• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Device driver for Microgate SyncLink GT serial adapters.
3  *
4  * written by Paul Fulghum for Microgate Corporation
5  * paulkf@microgate.com
6  *
7  * Microgate and SyncLink are trademarks of Microgate Corporation
8  *
9  * This code is released under the GNU General Public License (GPL)
10  *
11  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
12  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
13  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
15  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
16  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
18  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
19  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
21  * OF THE POSSIBILITY OF SUCH DAMAGE.
22  */
23 
24 /*
25  * DEBUG OUTPUT DEFINITIONS
26  *
27  * uncomment lines below to enable specific types of debug output
28  *
29  * DBGINFO   information - most verbose output
30  * DBGERR    serious errors
31  * DBGBH     bottom half service routine debugging
32  * DBGISR    interrupt service routine debugging
33  * DBGDATA   output receive and transmit data
34  * DBGTBUF   output transmit DMA buffers and registers
35  * DBGRBUF   output receive DMA buffers and registers
36  */
37 
38 #define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
39 #define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
40 #define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
41 #define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
42 #define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
43 //#define DBGTBUF(info) dump_tbufs(info)
44 //#define DBGRBUF(info) dump_rbufs(info)
45 
46 
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/signal.h>
50 #include <linux/sched.h>
51 #include <linux/timer.h>
52 #include <linux/interrupt.h>
53 #include <linux/pci.h>
54 #include <linux/tty.h>
55 #include <linux/tty_flip.h>
56 #include <linux/serial.h>
57 #include <linux/major.h>
58 #include <linux/string.h>
59 #include <linux/fcntl.h>
60 #include <linux/ptrace.h>
61 #include <linux/ioport.h>
62 #include <linux/mm.h>
63 #include <linux/slab.h>
64 #include <linux/netdevice.h>
65 #include <linux/vmalloc.h>
66 #include <linux/init.h>
67 #include <linux/delay.h>
68 #include <linux/ioctl.h>
69 #include <linux/termios.h>
70 #include <linux/bitops.h>
71 #include <linux/workqueue.h>
72 #include <linux/hdlc.h>
73 #include <linux/synclink.h>
74 
75 #include <asm/system.h>
76 #include <asm/io.h>
77 #include <asm/irq.h>
78 #include <asm/dma.h>
79 #include <asm/types.h>
80 #include <asm/uaccess.h>
81 
82 #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
83 #define SYNCLINK_GENERIC_HDLC 1
84 #else
85 #define SYNCLINK_GENERIC_HDLC 0
86 #endif
87 
88 /*
89  * module identification
90  */
91 static char *driver_name     = "SyncLink GT";
92 static char *tty_driver_name = "synclink_gt";
93 static char *tty_dev_prefix  = "ttySLG";
94 MODULE_LICENSE("GPL");
95 #define MGSL_MAGIC 0x5401
96 #define MAX_DEVICES 32
97 
98 static struct pci_device_id pci_table[] = {
99 	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
100 	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
101 	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
102 	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
103 	{0,}, /* terminate list */
104 };
105 MODULE_DEVICE_TABLE(pci, pci_table);
106 
107 static int  init_one(struct pci_dev *dev,const struct pci_device_id *ent);
108 static void remove_one(struct pci_dev *dev);
109 static struct pci_driver pci_driver = {
110 	.name		= "synclink_gt",
111 	.id_table	= pci_table,
112 	.probe		= init_one,
113 	.remove		= __devexit_p(remove_one),
114 };
115 
116 static bool pci_registered;
117 
118 /*
119  * module configuration and status
120  */
121 static struct slgt_info *slgt_device_list;
122 static int slgt_device_count;
123 
124 static int ttymajor;
125 static int debug_level;
126 static int maxframe[MAX_DEVICES];
127 
128 module_param(ttymajor, int, 0);
129 module_param(debug_level, int, 0);
130 module_param_array(maxframe, int, NULL, 0);
131 
132 MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
133 MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
134 MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
135 
136 /*
137  * tty support and callbacks
138  */
139 static struct tty_driver *serial_driver;
140 
141 static int  open(struct tty_struct *tty, struct file * filp);
142 static void close(struct tty_struct *tty, struct file * filp);
143 static void hangup(struct tty_struct *tty);
144 static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
145 
146 static int  write(struct tty_struct *tty, const unsigned char *buf, int count);
147 static int put_char(struct tty_struct *tty, unsigned char ch);
148 static void send_xchar(struct tty_struct *tty, char ch);
149 static void wait_until_sent(struct tty_struct *tty, int timeout);
150 static int  write_room(struct tty_struct *tty);
151 static void flush_chars(struct tty_struct *tty);
152 static void flush_buffer(struct tty_struct *tty);
153 static void tx_hold(struct tty_struct *tty);
154 static void tx_release(struct tty_struct *tty);
155 
156 static int  ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg);
157 static int  read_proc(char *page, char **start, off_t off, int count,int *eof, void *data);
158 static int  chars_in_buffer(struct tty_struct *tty);
159 static void throttle(struct tty_struct * tty);
160 static void unthrottle(struct tty_struct * tty);
161 static int set_break(struct tty_struct *tty, int break_state);
162 
163 /*
164  * generic HDLC support and callbacks
165  */
166 #if SYNCLINK_GENERIC_HDLC
167 #define dev_to_port(D) (dev_to_hdlc(D)->priv)
168 static void hdlcdev_tx_done(struct slgt_info *info);
169 static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
170 static int  hdlcdev_init(struct slgt_info *info);
171 static void hdlcdev_exit(struct slgt_info *info);
172 #endif
173 
174 
175 /*
176  * device specific structures, macros and functions
177  */
178 
179 #define SLGT_MAX_PORTS 4
180 #define SLGT_REG_SIZE  256
181 
182 /*
183  * conditional wait facility
184  */
185 struct cond_wait {
186 	struct cond_wait *next;
187 	wait_queue_head_t q;
188 	wait_queue_t wait;
189 	unsigned int data;
190 };
191 static void init_cond_wait(struct cond_wait *w, unsigned int data);
192 static void add_cond_wait(struct cond_wait **head, struct cond_wait *w);
193 static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w);
194 static void flush_cond_wait(struct cond_wait **head);
195 
196 /*
197  * DMA buffer descriptor and access macros
198  */
199 struct slgt_desc
200 {
201 	__le16 count;
202 	__le16 status;
203 	__le32 pbuf;  /* physical address of data buffer */
204 	__le32 next;  /* physical address of next descriptor */
205 
206 	/* driver book keeping */
207 	char *buf;          /* virtual  address of data buffer */
208     	unsigned int pdesc; /* physical address of this descriptor */
209 	dma_addr_t buf_dma_addr;
210 	unsigned short buf_count;
211 };
212 
213 #define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
214 #define set_desc_next(a,b) (a).next   = cpu_to_le32((unsigned int)(b))
215 #define set_desc_count(a,b)(a).count  = cpu_to_le16((unsigned short)(b))
216 #define set_desc_eof(a,b)  (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
217 #define desc_count(a)      (le16_to_cpu((a).count))
218 #define desc_status(a)     (le16_to_cpu((a).status))
219 #define desc_complete(a)   (le16_to_cpu((a).status) & BIT15)
220 #define desc_eof(a)        (le16_to_cpu((a).status) & BIT2)
221 #define desc_crc_error(a)  (le16_to_cpu((a).status) & BIT1)
222 #define desc_abort(a)      (le16_to_cpu((a).status) & BIT0)
223 #define desc_residue(a)    ((le16_to_cpu((a).status) & 0x38) >> 3)
224 
225 struct _input_signal_events {
226 	int ri_up;
227 	int ri_down;
228 	int dsr_up;
229 	int dsr_down;
230 	int dcd_up;
231 	int dcd_down;
232 	int cts_up;
233 	int cts_down;
234 };
235 
236 /*
237  * device instance data structure
238  */
239 struct slgt_info {
240 	void *if_ptr;		/* General purpose pointer (used by SPPP) */
241 	struct tty_port port;
242 
243 	struct slgt_info *next_device;	/* device list link */
244 
245 	int magic;
246 
247 	char device_name[25];
248 	struct pci_dev *pdev;
249 
250 	int port_count;  /* count of ports on adapter */
251 	int adapter_num; /* adapter instance number */
252 	int port_num;    /* port instance number */
253 
254 	/* array of pointers to port contexts on this adapter */
255 	struct slgt_info *port_array[SLGT_MAX_PORTS];
256 
257 	int			line;		/* tty line instance number */
258 
259 	struct mgsl_icount	icount;
260 
261 	int			timeout;
262 	int			x_char;		/* xon/xoff character */
263 	unsigned int		read_status_mask;
264 	unsigned int 		ignore_status_mask;
265 
266 	wait_queue_head_t	status_event_wait_q;
267 	wait_queue_head_t	event_wait_q;
268 	struct timer_list	tx_timer;
269 	struct timer_list	rx_timer;
270 
271 	unsigned int            gpio_present;
272 	struct cond_wait        *gpio_wait_q;
273 
274 	spinlock_t lock;	/* spinlock for synchronizing with ISR */
275 
276 	struct work_struct task;
277 	u32 pending_bh;
278 	bool bh_requested;
279 	bool bh_running;
280 
281 	int isr_overflow;
282 	bool irq_requested;	/* true if IRQ requested */
283 	bool irq_occurred;	/* for diagnostics use */
284 
285 	/* device configuration */
286 
287 	unsigned int bus_type;
288 	unsigned int irq_level;
289 	unsigned long irq_flags;
290 
291 	unsigned char __iomem * reg_addr;  /* memory mapped registers address */
292 	u32 phys_reg_addr;
293 	bool reg_addr_requested;
294 
295 	MGSL_PARAMS params;       /* communications parameters */
296 	u32 idle_mode;
297 	u32 max_frame_size;       /* as set by device config */
298 
299 	unsigned int rbuf_fill_level;
300 	unsigned int if_mode;
301 
302 	/* device status */
303 
304 	bool rx_enabled;
305 	bool rx_restart;
306 
307 	bool tx_enabled;
308 	bool tx_active;
309 
310 	unsigned char signals;    /* serial signal states */
311 	int init_error;  /* initialization error */
312 
313 	unsigned char *tx_buf;
314 	int tx_count;
315 
316 	char flag_buf[MAX_ASYNC_BUFFER_SIZE];
317 	char char_buf[MAX_ASYNC_BUFFER_SIZE];
318 	bool drop_rts_on_tx_done;
319 	struct	_input_signal_events	input_signal_events;
320 
321 	int dcd_chkcount;	/* check counts to prevent */
322 	int cts_chkcount;	/* too many IRQs if a signal */
323 	int dsr_chkcount;	/* is floating */
324 	int ri_chkcount;
325 
326 	char *bufs;		/* virtual address of DMA buffer lists */
327 	dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
328 
329 	unsigned int rbuf_count;
330 	struct slgt_desc *rbufs;
331 	unsigned int rbuf_current;
332 	unsigned int rbuf_index;
333 
334 	unsigned int tbuf_count;
335 	struct slgt_desc *tbufs;
336 	unsigned int tbuf_current;
337 	unsigned int tbuf_start;
338 
339 	unsigned char *tmp_rbuf;
340 	unsigned int tmp_rbuf_count;
341 
342 	/* SPPP/Cisco HDLC device parts */
343 
344 	int netcount;
345 	spinlock_t netlock;
346 #if SYNCLINK_GENERIC_HDLC
347 	struct net_device *netdev;
348 #endif
349 
350 };
351 
352 static MGSL_PARAMS default_params = {
353 	.mode            = MGSL_MODE_HDLC,
354 	.loopback        = 0,
355 	.flags           = HDLC_FLAG_UNDERRUN_ABORT15,
356 	.encoding        = HDLC_ENCODING_NRZI_SPACE,
357 	.clock_speed     = 0,
358 	.addr_filter     = 0xff,
359 	.crc_type        = HDLC_CRC_16_CCITT,
360 	.preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
361 	.preamble        = HDLC_PREAMBLE_PATTERN_NONE,
362 	.data_rate       = 9600,
363 	.data_bits       = 8,
364 	.stop_bits       = 1,
365 	.parity          = ASYNC_PARITY_NONE
366 };
367 
368 
369 #define BH_RECEIVE  1
370 #define BH_TRANSMIT 2
371 #define BH_STATUS   4
372 #define IO_PIN_SHUTDOWN_LIMIT 100
373 
374 #define DMABUFSIZE 256
375 #define DESC_LIST_SIZE 4096
376 
377 #define MASK_PARITY  BIT1
378 #define MASK_FRAMING BIT0
379 #define MASK_BREAK   BIT14
380 #define MASK_OVERRUN BIT4
381 
382 #define GSR   0x00 /* global status */
383 #define JCR   0x04 /* JTAG control */
384 #define IODR  0x08 /* GPIO direction */
385 #define IOER  0x0c /* GPIO interrupt enable */
386 #define IOVR  0x10 /* GPIO value */
387 #define IOSR  0x14 /* GPIO interrupt status */
388 #define TDR   0x80 /* tx data */
389 #define RDR   0x80 /* rx data */
390 #define TCR   0x82 /* tx control */
391 #define TIR   0x84 /* tx idle */
392 #define TPR   0x85 /* tx preamble */
393 #define RCR   0x86 /* rx control */
394 #define VCR   0x88 /* V.24 control */
395 #define CCR   0x89 /* clock control */
396 #define BDR   0x8a /* baud divisor */
397 #define SCR   0x8c /* serial control */
398 #define SSR   0x8e /* serial status */
399 #define RDCSR 0x90 /* rx DMA control/status */
400 #define TDCSR 0x94 /* tx DMA control/status */
401 #define RDDAR 0x98 /* rx DMA descriptor address */
402 #define TDDAR 0x9c /* tx DMA descriptor address */
403 
404 #define RXIDLE      BIT14
405 #define RXBREAK     BIT14
406 #define IRQ_TXDATA  BIT13
407 #define IRQ_TXIDLE  BIT12
408 #define IRQ_TXUNDER BIT11 /* HDLC */
409 #define IRQ_RXDATA  BIT10
410 #define IRQ_RXIDLE  BIT9  /* HDLC */
411 #define IRQ_RXBREAK BIT9  /* async */
412 #define IRQ_RXOVER  BIT8
413 #define IRQ_DSR     BIT7
414 #define IRQ_CTS     BIT6
415 #define IRQ_DCD     BIT5
416 #define IRQ_RI      BIT4
417 #define IRQ_ALL     0x3ff0
418 #define IRQ_MASTER  BIT0
419 
420 #define slgt_irq_on(info, mask) \
421 	wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
422 #define slgt_irq_off(info, mask) \
423 	wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
424 
425 static __u8  rd_reg8(struct slgt_info *info, unsigned int addr);
426 static void  wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
427 static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
428 static void  wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
429 static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
430 static void  wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
431 
432 static void  msc_set_vcr(struct slgt_info *info);
433 
434 static int  startup(struct slgt_info *info);
435 static int  block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
436 static void shutdown(struct slgt_info *info);
437 static void program_hw(struct slgt_info *info);
438 static void change_params(struct slgt_info *info);
439 
440 static int  register_test(struct slgt_info *info);
441 static int  irq_test(struct slgt_info *info);
442 static int  loopback_test(struct slgt_info *info);
443 static int  adapter_test(struct slgt_info *info);
444 
445 static void reset_adapter(struct slgt_info *info);
446 static void reset_port(struct slgt_info *info);
447 static void async_mode(struct slgt_info *info);
448 static void sync_mode(struct slgt_info *info);
449 
450 static void rx_stop(struct slgt_info *info);
451 static void rx_start(struct slgt_info *info);
452 static void reset_rbufs(struct slgt_info *info);
453 static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
454 static void rdma_reset(struct slgt_info *info);
455 static bool rx_get_frame(struct slgt_info *info);
456 static bool rx_get_buf(struct slgt_info *info);
457 
458 static void tx_start(struct slgt_info *info);
459 static void tx_stop(struct slgt_info *info);
460 static void tx_set_idle(struct slgt_info *info);
461 static unsigned int free_tbuf_count(struct slgt_info *info);
462 static unsigned int tbuf_bytes(struct slgt_info *info);
463 static void reset_tbufs(struct slgt_info *info);
464 static void tdma_reset(struct slgt_info *info);
465 static void tdma_start(struct slgt_info *info);
466 static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
467 
468 static void get_signals(struct slgt_info *info);
469 static void set_signals(struct slgt_info *info);
470 static void enable_loopback(struct slgt_info *info);
471 static void set_rate(struct slgt_info *info, u32 data_rate);
472 
473 static int  bh_action(struct slgt_info *info);
474 static void bh_handler(struct work_struct *work);
475 static void bh_transmit(struct slgt_info *info);
476 static void isr_serial(struct slgt_info *info);
477 static void isr_rdma(struct slgt_info *info);
478 static void isr_txeom(struct slgt_info *info, unsigned short status);
479 static void isr_tdma(struct slgt_info *info);
480 
481 static int  alloc_dma_bufs(struct slgt_info *info);
482 static void free_dma_bufs(struct slgt_info *info);
483 static int  alloc_desc(struct slgt_info *info);
484 static void free_desc(struct slgt_info *info);
485 static int  alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
486 static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
487 
488 static int  alloc_tmp_rbuf(struct slgt_info *info);
489 static void free_tmp_rbuf(struct slgt_info *info);
490 
491 static void tx_timeout(unsigned long context);
492 static void rx_timeout(unsigned long context);
493 
494 /*
495  * ioctl handlers
496  */
497 static int  get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
498 static int  get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
499 static int  set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
500 static int  get_txidle(struct slgt_info *info, int __user *idle_mode);
501 static int  set_txidle(struct slgt_info *info, int idle_mode);
502 static int  tx_enable(struct slgt_info *info, int enable);
503 static int  tx_abort(struct slgt_info *info);
504 static int  rx_enable(struct slgt_info *info, int enable);
505 static int  modem_input_wait(struct slgt_info *info,int arg);
506 static int  wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
507 static int  tiocmget(struct tty_struct *tty, struct file *file);
508 static int  tiocmset(struct tty_struct *tty, struct file *file,
509 		     unsigned int set, unsigned int clear);
510 static int set_break(struct tty_struct *tty, int break_state);
511 static int  get_interface(struct slgt_info *info, int __user *if_mode);
512 static int  set_interface(struct slgt_info *info, int if_mode);
513 static int  set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
514 static int  get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
515 static int  wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
516 
517 /*
518  * driver functions
519  */
520 static void add_device(struct slgt_info *info);
521 static void device_init(int adapter_num, struct pci_dev *pdev);
522 static int  claim_resources(struct slgt_info *info);
523 static void release_resources(struct slgt_info *info);
524 
525 /*
526  * DEBUG OUTPUT CODE
527  */
528 #ifndef DBGINFO
529 #define DBGINFO(fmt)
530 #endif
531 #ifndef DBGERR
532 #define DBGERR(fmt)
533 #endif
534 #ifndef DBGBH
535 #define DBGBH(fmt)
536 #endif
537 #ifndef DBGISR
538 #define DBGISR(fmt)
539 #endif
540 
541 #ifdef DBGDATA
trace_block(struct slgt_info * info,const char * data,int count,const char * label)542 static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
543 {
544 	int i;
545 	int linecount;
546 	printk("%s %s data:\n",info->device_name, label);
547 	while(count) {
548 		linecount = (count > 16) ? 16 : count;
549 		for(i=0; i < linecount; i++)
550 			printk("%02X ",(unsigned char)data[i]);
551 		for(;i<17;i++)
552 			printk("   ");
553 		for(i=0;i<linecount;i++) {
554 			if (data[i]>=040 && data[i]<=0176)
555 				printk("%c",data[i]);
556 			else
557 				printk(".");
558 		}
559 		printk("\n");
560 		data  += linecount;
561 		count -= linecount;
562 	}
563 }
564 #else
565 #define DBGDATA(info, buf, size, label)
566 #endif
567 
568 #ifdef DBGTBUF
dump_tbufs(struct slgt_info * info)569 static void dump_tbufs(struct slgt_info *info)
570 {
571 	int i;
572 	printk("tbuf_current=%d\n", info->tbuf_current);
573 	for (i=0 ; i < info->tbuf_count ; i++) {
574 		printk("%d: count=%04X status=%04X\n",
575 			i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
576 	}
577 }
578 #else
579 #define DBGTBUF(info)
580 #endif
581 
582 #ifdef DBGRBUF
dump_rbufs(struct slgt_info * info)583 static void dump_rbufs(struct slgt_info *info)
584 {
585 	int i;
586 	printk("rbuf_current=%d\n", info->rbuf_current);
587 	for (i=0 ; i < info->rbuf_count ; i++) {
588 		printk("%d: count=%04X status=%04X\n",
589 			i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
590 	}
591 }
592 #else
593 #define DBGRBUF(info)
594 #endif
595 
sanity_check(struct slgt_info * info,char * devname,const char * name)596 static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
597 {
598 #ifdef SANITY_CHECK
599 	if (!info) {
600 		printk("null struct slgt_info for (%s) in %s\n", devname, name);
601 		return 1;
602 	}
603 	if (info->magic != MGSL_MAGIC) {
604 		printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
605 		return 1;
606 	}
607 #else
608 	if (!info)
609 		return 1;
610 #endif
611 	return 0;
612 }
613 
614 /**
615  * line discipline callback wrappers
616  *
617  * The wrappers maintain line discipline references
618  * while calling into the line discipline.
619  *
620  * ldisc_receive_buf  - pass receive data to line discipline
621  */
ldisc_receive_buf(struct tty_struct * tty,const __u8 * data,char * flags,int count)622 static void ldisc_receive_buf(struct tty_struct *tty,
623 			      const __u8 *data, char *flags, int count)
624 {
625 	struct tty_ldisc *ld;
626 	if (!tty)
627 		return;
628 	ld = tty_ldisc_ref(tty);
629 	if (ld) {
630 		if (ld->ops->receive_buf)
631 			ld->ops->receive_buf(tty, data, flags, count);
632 		tty_ldisc_deref(ld);
633 	}
634 }
635 
636 /* tty callbacks */
637 
open(struct tty_struct * tty,struct file * filp)638 static int open(struct tty_struct *tty, struct file *filp)
639 {
640 	struct slgt_info *info;
641 	int retval, line;
642 	unsigned long flags;
643 
644 	line = tty->index;
645 	if ((line < 0) || (line >= slgt_device_count)) {
646 		DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
647 		return -ENODEV;
648 	}
649 
650 	info = slgt_device_list;
651 	while(info && info->line != line)
652 		info = info->next_device;
653 	if (sanity_check(info, tty->name, "open"))
654 		return -ENODEV;
655 	if (info->init_error) {
656 		DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
657 		return -ENODEV;
658 	}
659 
660 	tty->driver_data = info;
661 	info->port.tty = tty;
662 
663 	DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
664 
665 	/* If port is closing, signal caller to try again */
666 	if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
667 		if (info->port.flags & ASYNC_CLOSING)
668 			interruptible_sleep_on(&info->port.close_wait);
669 		retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
670 			-EAGAIN : -ERESTARTSYS);
671 		goto cleanup;
672 	}
673 
674 	info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
675 
676 	spin_lock_irqsave(&info->netlock, flags);
677 	if (info->netcount) {
678 		retval = -EBUSY;
679 		spin_unlock_irqrestore(&info->netlock, flags);
680 		goto cleanup;
681 	}
682 	info->port.count++;
683 	spin_unlock_irqrestore(&info->netlock, flags);
684 
685 	if (info->port.count == 1) {
686 		/* 1st open on this device, init hardware */
687 		retval = startup(info);
688 		if (retval < 0)
689 			goto cleanup;
690 	}
691 
692 	retval = block_til_ready(tty, filp, info);
693 	if (retval) {
694 		DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
695 		goto cleanup;
696 	}
697 
698 	retval = 0;
699 
700 cleanup:
701 	if (retval) {
702 		if (tty->count == 1)
703 			info->port.tty = NULL; /* tty layer will release tty struct */
704 		if(info->port.count)
705 			info->port.count--;
706 	}
707 
708 	DBGINFO(("%s open rc=%d\n", info->device_name, retval));
709 	return retval;
710 }
711 
close(struct tty_struct * tty,struct file * filp)712 static void close(struct tty_struct *tty, struct file *filp)
713 {
714 	struct slgt_info *info = tty->driver_data;
715 
716 	if (sanity_check(info, tty->name, "close"))
717 		return;
718 	DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
719 
720 	if (tty_port_close_start(&info->port, tty, filp) == 0)
721 		goto cleanup;
722 
723  	if (info->port.flags & ASYNC_INITIALIZED)
724  		wait_until_sent(tty, info->timeout);
725 	flush_buffer(tty);
726 	tty_ldisc_flush(tty);
727 
728 	shutdown(info);
729 
730 	tty_port_close_end(&info->port, tty);
731 	info->port.tty = NULL;
732 cleanup:
733 	DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
734 }
735 
hangup(struct tty_struct * tty)736 static void hangup(struct tty_struct *tty)
737 {
738 	struct slgt_info *info = tty->driver_data;
739 
740 	if (sanity_check(info, tty->name, "hangup"))
741 		return;
742 	DBGINFO(("%s hangup\n", info->device_name));
743 
744 	flush_buffer(tty);
745 	shutdown(info);
746 
747 	info->port.count = 0;
748 	info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
749 	info->port.tty = NULL;
750 
751 	wake_up_interruptible(&info->port.open_wait);
752 }
753 
set_termios(struct tty_struct * tty,struct ktermios * old_termios)754 static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
755 {
756 	struct slgt_info *info = tty->driver_data;
757 	unsigned long flags;
758 
759 	DBGINFO(("%s set_termios\n", tty->driver->name));
760 
761 	change_params(info);
762 
763 	/* Handle transition to B0 status */
764 	if (old_termios->c_cflag & CBAUD &&
765 	    !(tty->termios->c_cflag & CBAUD)) {
766 		info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
767 		spin_lock_irqsave(&info->lock,flags);
768 		set_signals(info);
769 		spin_unlock_irqrestore(&info->lock,flags);
770 	}
771 
772 	/* Handle transition away from B0 status */
773 	if (!(old_termios->c_cflag & CBAUD) &&
774 	    tty->termios->c_cflag & CBAUD) {
775 		info->signals |= SerialSignal_DTR;
776  		if (!(tty->termios->c_cflag & CRTSCTS) ||
777  		    !test_bit(TTY_THROTTLED, &tty->flags)) {
778 			info->signals |= SerialSignal_RTS;
779  		}
780 		spin_lock_irqsave(&info->lock,flags);
781 	 	set_signals(info);
782 		spin_unlock_irqrestore(&info->lock,flags);
783 	}
784 
785 	/* Handle turning off CRTSCTS */
786 	if (old_termios->c_cflag & CRTSCTS &&
787 	    !(tty->termios->c_cflag & CRTSCTS)) {
788 		tty->hw_stopped = 0;
789 		tx_release(tty);
790 	}
791 }
792 
write(struct tty_struct * tty,const unsigned char * buf,int count)793 static int write(struct tty_struct *tty,
794 		 const unsigned char *buf, int count)
795 {
796 	int ret = 0;
797 	struct slgt_info *info = tty->driver_data;
798 	unsigned long flags;
799 	unsigned int bufs_needed;
800 
801 	if (sanity_check(info, tty->name, "write"))
802 		goto cleanup;
803 	DBGINFO(("%s write count=%d\n", info->device_name, count));
804 
805 	if (!info->tx_buf)
806 		goto cleanup;
807 
808 	if (count > info->max_frame_size) {
809 		ret = -EIO;
810 		goto cleanup;
811 	}
812 
813 	if (!count)
814 		goto cleanup;
815 
816 	if (!info->tx_active && info->tx_count) {
817 		/* send accumulated data from send_char() */
818 		tx_load(info, info->tx_buf, info->tx_count);
819 		goto start;
820 	}
821 	bufs_needed = (count/DMABUFSIZE);
822 	if (count % DMABUFSIZE)
823 		++bufs_needed;
824 	if (bufs_needed > free_tbuf_count(info))
825 		goto cleanup;
826 
827 	ret = info->tx_count = count;
828 	tx_load(info, buf, count);
829 	goto start;
830 
831 start:
832  	if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
833 		spin_lock_irqsave(&info->lock,flags);
834 		if (!info->tx_active)
835 		 	tx_start(info);
836 		else
837 			tdma_start(info);
838 		spin_unlock_irqrestore(&info->lock,flags);
839  	}
840 
841 cleanup:
842 	DBGINFO(("%s write rc=%d\n", info->device_name, ret));
843 	return ret;
844 }
845 
put_char(struct tty_struct * tty,unsigned char ch)846 static int put_char(struct tty_struct *tty, unsigned char ch)
847 {
848 	struct slgt_info *info = tty->driver_data;
849 	unsigned long flags;
850 	int ret = 0;
851 
852 	if (sanity_check(info, tty->name, "put_char"))
853 		return 0;
854 	DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
855 	if (!info->tx_buf)
856 		return 0;
857 	spin_lock_irqsave(&info->lock,flags);
858 	if (!info->tx_active && (info->tx_count < info->max_frame_size)) {
859 		info->tx_buf[info->tx_count++] = ch;
860 		ret = 1;
861 	}
862 	spin_unlock_irqrestore(&info->lock,flags);
863 	return ret;
864 }
865 
send_xchar(struct tty_struct * tty,char ch)866 static void send_xchar(struct tty_struct *tty, char ch)
867 {
868 	struct slgt_info *info = tty->driver_data;
869 	unsigned long flags;
870 
871 	if (sanity_check(info, tty->name, "send_xchar"))
872 		return;
873 	DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
874 	info->x_char = ch;
875 	if (ch) {
876 		spin_lock_irqsave(&info->lock,flags);
877 		if (!info->tx_enabled)
878 		 	tx_start(info);
879 		spin_unlock_irqrestore(&info->lock,flags);
880 	}
881 }
882 
wait_until_sent(struct tty_struct * tty,int timeout)883 static void wait_until_sent(struct tty_struct *tty, int timeout)
884 {
885 	struct slgt_info *info = tty->driver_data;
886 	unsigned long orig_jiffies, char_time;
887 
888 	if (!info )
889 		return;
890 	if (sanity_check(info, tty->name, "wait_until_sent"))
891 		return;
892 	DBGINFO(("%s wait_until_sent entry\n", info->device_name));
893 	if (!(info->port.flags & ASYNC_INITIALIZED))
894 		goto exit;
895 
896 	orig_jiffies = jiffies;
897 
898 	/* Set check interval to 1/5 of estimated time to
899 	 * send a character, and make it at least 1. The check
900 	 * interval should also be less than the timeout.
901 	 * Note: use tight timings here to satisfy the NIST-PCTS.
902 	 */
903 
904 	lock_kernel();
905 
906 	if (info->params.data_rate) {
907 	       	char_time = info->timeout/(32 * 5);
908 		if (!char_time)
909 			char_time++;
910 	} else
911 		char_time = 1;
912 
913 	if (timeout)
914 		char_time = min_t(unsigned long, char_time, timeout);
915 
916 	while (info->tx_active) {
917 		msleep_interruptible(jiffies_to_msecs(char_time));
918 		if (signal_pending(current))
919 			break;
920 		if (timeout && time_after(jiffies, orig_jiffies + timeout))
921 			break;
922 	}
923 	unlock_kernel();
924 
925 exit:
926 	DBGINFO(("%s wait_until_sent exit\n", info->device_name));
927 }
928 
write_room(struct tty_struct * tty)929 static int write_room(struct tty_struct *tty)
930 {
931 	struct slgt_info *info = tty->driver_data;
932 	int ret;
933 
934 	if (sanity_check(info, tty->name, "write_room"))
935 		return 0;
936 	ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
937 	DBGINFO(("%s write_room=%d\n", info->device_name, ret));
938 	return ret;
939 }
940 
flush_chars(struct tty_struct * tty)941 static void flush_chars(struct tty_struct *tty)
942 {
943 	struct slgt_info *info = tty->driver_data;
944 	unsigned long flags;
945 
946 	if (sanity_check(info, tty->name, "flush_chars"))
947 		return;
948 	DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
949 
950 	if (info->tx_count <= 0 || tty->stopped ||
951 	    tty->hw_stopped || !info->tx_buf)
952 		return;
953 
954 	DBGINFO(("%s flush_chars start transmit\n", info->device_name));
955 
956 	spin_lock_irqsave(&info->lock,flags);
957 	if (!info->tx_active && info->tx_count) {
958 		tx_load(info, info->tx_buf,info->tx_count);
959 	 	tx_start(info);
960 	}
961 	spin_unlock_irqrestore(&info->lock,flags);
962 }
963 
flush_buffer(struct tty_struct * tty)964 static void flush_buffer(struct tty_struct *tty)
965 {
966 	struct slgt_info *info = tty->driver_data;
967 	unsigned long flags;
968 
969 	if (sanity_check(info, tty->name, "flush_buffer"))
970 		return;
971 	DBGINFO(("%s flush_buffer\n", info->device_name));
972 
973 	spin_lock_irqsave(&info->lock,flags);
974 	if (!info->tx_active)
975 		info->tx_count = 0;
976 	spin_unlock_irqrestore(&info->lock,flags);
977 
978 	tty_wakeup(tty);
979 }
980 
981 /*
982  * throttle (stop) transmitter
983  */
tx_hold(struct tty_struct * tty)984 static void tx_hold(struct tty_struct *tty)
985 {
986 	struct slgt_info *info = tty->driver_data;
987 	unsigned long flags;
988 
989 	if (sanity_check(info, tty->name, "tx_hold"))
990 		return;
991 	DBGINFO(("%s tx_hold\n", info->device_name));
992 	spin_lock_irqsave(&info->lock,flags);
993 	if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
994 	 	tx_stop(info);
995 	spin_unlock_irqrestore(&info->lock,flags);
996 }
997 
998 /*
999  * release (start) transmitter
1000  */
tx_release(struct tty_struct * tty)1001 static void tx_release(struct tty_struct *tty)
1002 {
1003 	struct slgt_info *info = tty->driver_data;
1004 	unsigned long flags;
1005 
1006 	if (sanity_check(info, tty->name, "tx_release"))
1007 		return;
1008 	DBGINFO(("%s tx_release\n", info->device_name));
1009 	spin_lock_irqsave(&info->lock,flags);
1010 	if (!info->tx_active && info->tx_count) {
1011 		tx_load(info, info->tx_buf, info->tx_count);
1012 	 	tx_start(info);
1013 	}
1014 	spin_unlock_irqrestore(&info->lock,flags);
1015 }
1016 
1017 /*
1018  * Service an IOCTL request
1019  *
1020  * Arguments
1021  *
1022  * 	tty	pointer to tty instance data
1023  * 	file	pointer to associated file object for device
1024  * 	cmd	IOCTL command code
1025  * 	arg	command argument/context
1026  *
1027  * Return 0 if success, otherwise error code
1028  */
ioctl(struct tty_struct * tty,struct file * file,unsigned int cmd,unsigned long arg)1029 static int ioctl(struct tty_struct *tty, struct file *file,
1030 		 unsigned int cmd, unsigned long arg)
1031 {
1032 	struct slgt_info *info = tty->driver_data;
1033 	struct mgsl_icount cnow;	/* kernel counter temps */
1034 	struct serial_icounter_struct __user *p_cuser;	/* user space */
1035 	unsigned long flags;
1036 	void __user *argp = (void __user *)arg;
1037 	int ret;
1038 
1039 	if (sanity_check(info, tty->name, "ioctl"))
1040 		return -ENODEV;
1041 	DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
1042 
1043 	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
1044 	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
1045 		if (tty->flags & (1 << TTY_IO_ERROR))
1046 		    return -EIO;
1047 	}
1048 
1049 	lock_kernel();
1050 
1051 	switch (cmd) {
1052 	case MGSL_IOCGPARAMS:
1053 		ret = get_params(info, argp);
1054 		break;
1055 	case MGSL_IOCSPARAMS:
1056 		ret = set_params(info, argp);
1057 		break;
1058 	case MGSL_IOCGTXIDLE:
1059 		ret = get_txidle(info, argp);
1060 		break;
1061 	case MGSL_IOCSTXIDLE:
1062 		ret = set_txidle(info, (int)arg);
1063 		break;
1064 	case MGSL_IOCTXENABLE:
1065 		ret = tx_enable(info, (int)arg);
1066 		break;
1067 	case MGSL_IOCRXENABLE:
1068 		ret = rx_enable(info, (int)arg);
1069 		break;
1070 	case MGSL_IOCTXABORT:
1071 		ret = tx_abort(info);
1072 		break;
1073 	case MGSL_IOCGSTATS:
1074 		ret = get_stats(info, argp);
1075 		break;
1076 	case MGSL_IOCWAITEVENT:
1077 		ret = wait_mgsl_event(info, argp);
1078 		break;
1079 	case TIOCMIWAIT:
1080 		ret = modem_input_wait(info,(int)arg);
1081 		break;
1082 	case MGSL_IOCGIF:
1083 		ret = get_interface(info, argp);
1084 		break;
1085 	case MGSL_IOCSIF:
1086 		ret = set_interface(info,(int)arg);
1087 		break;
1088 	case MGSL_IOCSGPIO:
1089 		ret = set_gpio(info, argp);
1090 		break;
1091 	case MGSL_IOCGGPIO:
1092 		ret = get_gpio(info, argp);
1093 		break;
1094 	case MGSL_IOCWAITGPIO:
1095 		ret = wait_gpio(info, argp);
1096 		break;
1097 	case TIOCGICOUNT:
1098 		spin_lock_irqsave(&info->lock,flags);
1099 		cnow = info->icount;
1100 		spin_unlock_irqrestore(&info->lock,flags);
1101 		p_cuser = argp;
1102 		if (put_user(cnow.cts, &p_cuser->cts) ||
1103 		    put_user(cnow.dsr, &p_cuser->dsr) ||
1104 		    put_user(cnow.rng, &p_cuser->rng) ||
1105 		    put_user(cnow.dcd, &p_cuser->dcd) ||
1106 		    put_user(cnow.rx, &p_cuser->rx) ||
1107 		    put_user(cnow.tx, &p_cuser->tx) ||
1108 		    put_user(cnow.frame, &p_cuser->frame) ||
1109 		    put_user(cnow.overrun, &p_cuser->overrun) ||
1110 		    put_user(cnow.parity, &p_cuser->parity) ||
1111 		    put_user(cnow.brk, &p_cuser->brk) ||
1112 		    put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
1113 			ret = -EFAULT;
1114 		ret = 0;
1115 		break;
1116 	default:
1117 		ret = -ENOIOCTLCMD;
1118 	}
1119 	unlock_kernel();
1120 	return ret;
1121 }
1122 
1123 /*
1124  * support for 32 bit ioctl calls on 64 bit systems
1125  */
1126 #ifdef CONFIG_COMPAT
get_params32(struct slgt_info * info,struct MGSL_PARAMS32 __user * user_params)1127 static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params)
1128 {
1129 	struct MGSL_PARAMS32 tmp_params;
1130 
1131 	DBGINFO(("%s get_params32\n", info->device_name));
1132 	tmp_params.mode            = (compat_ulong_t)info->params.mode;
1133 	tmp_params.loopback        = info->params.loopback;
1134 	tmp_params.flags           = info->params.flags;
1135 	tmp_params.encoding        = info->params.encoding;
1136 	tmp_params.clock_speed     = (compat_ulong_t)info->params.clock_speed;
1137 	tmp_params.addr_filter     = info->params.addr_filter;
1138 	tmp_params.crc_type        = info->params.crc_type;
1139 	tmp_params.preamble_length = info->params.preamble_length;
1140 	tmp_params.preamble        = info->params.preamble;
1141 	tmp_params.data_rate       = (compat_ulong_t)info->params.data_rate;
1142 	tmp_params.data_bits       = info->params.data_bits;
1143 	tmp_params.stop_bits       = info->params.stop_bits;
1144 	tmp_params.parity          = info->params.parity;
1145 	if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32)))
1146 		return -EFAULT;
1147 	return 0;
1148 }
1149 
set_params32(struct slgt_info * info,struct MGSL_PARAMS32 __user * new_params)1150 static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
1151 {
1152 	struct MGSL_PARAMS32 tmp_params;
1153 
1154 	DBGINFO(("%s set_params32\n", info->device_name));
1155 	if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
1156 		return -EFAULT;
1157 
1158 	spin_lock(&info->lock);
1159 	info->params.mode            = tmp_params.mode;
1160 	info->params.loopback        = tmp_params.loopback;
1161 	info->params.flags           = tmp_params.flags;
1162 	info->params.encoding        = tmp_params.encoding;
1163 	info->params.clock_speed     = tmp_params.clock_speed;
1164 	info->params.addr_filter     = tmp_params.addr_filter;
1165 	info->params.crc_type        = tmp_params.crc_type;
1166 	info->params.preamble_length = tmp_params.preamble_length;
1167 	info->params.preamble        = tmp_params.preamble;
1168 	info->params.data_rate       = tmp_params.data_rate;
1169 	info->params.data_bits       = tmp_params.data_bits;
1170 	info->params.stop_bits       = tmp_params.stop_bits;
1171 	info->params.parity          = tmp_params.parity;
1172 	spin_unlock(&info->lock);
1173 
1174  	change_params(info);
1175 
1176 	return 0;
1177 }
1178 
slgt_compat_ioctl(struct tty_struct * tty,struct file * file,unsigned int cmd,unsigned long arg)1179 static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
1180 			 unsigned int cmd, unsigned long arg)
1181 {
1182 	struct slgt_info *info = tty->driver_data;
1183 	int rc = -ENOIOCTLCMD;
1184 
1185 	if (sanity_check(info, tty->name, "compat_ioctl"))
1186 		return -ENODEV;
1187 	DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
1188 
1189 	switch (cmd) {
1190 
1191 	case MGSL_IOCSPARAMS32:
1192 		rc = set_params32(info, compat_ptr(arg));
1193 		break;
1194 
1195 	case MGSL_IOCGPARAMS32:
1196 		rc = get_params32(info, compat_ptr(arg));
1197 		break;
1198 
1199 	case MGSL_IOCGPARAMS:
1200 	case MGSL_IOCSPARAMS:
1201 	case MGSL_IOCGTXIDLE:
1202 	case MGSL_IOCGSTATS:
1203 	case MGSL_IOCWAITEVENT:
1204 	case MGSL_IOCGIF:
1205 	case MGSL_IOCSGPIO:
1206 	case MGSL_IOCGGPIO:
1207 	case MGSL_IOCWAITGPIO:
1208 	case TIOCGICOUNT:
1209 		rc = ioctl(tty, file, cmd, (unsigned long)(compat_ptr(arg)));
1210 		break;
1211 
1212 	case MGSL_IOCSTXIDLE:
1213 	case MGSL_IOCTXENABLE:
1214 	case MGSL_IOCRXENABLE:
1215 	case MGSL_IOCTXABORT:
1216 	case TIOCMIWAIT:
1217 	case MGSL_IOCSIF:
1218 		rc = ioctl(tty, file, cmd, arg);
1219 		break;
1220 	}
1221 
1222 	DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
1223 	return rc;
1224 }
1225 #else
1226 #define slgt_compat_ioctl NULL
1227 #endif /* ifdef CONFIG_COMPAT */
1228 
1229 /*
1230  * proc fs support
1231  */
line_info(char * buf,struct slgt_info * info)1232 static inline int line_info(char *buf, struct slgt_info *info)
1233 {
1234 	char stat_buf[30];
1235 	int ret;
1236 	unsigned long flags;
1237 
1238 	ret = sprintf(buf, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
1239 		      info->device_name, info->phys_reg_addr,
1240 		      info->irq_level, info->max_frame_size);
1241 
1242 	/* output current serial signal states */
1243 	spin_lock_irqsave(&info->lock,flags);
1244 	get_signals(info);
1245 	spin_unlock_irqrestore(&info->lock,flags);
1246 
1247 	stat_buf[0] = 0;
1248 	stat_buf[1] = 0;
1249 	if (info->signals & SerialSignal_RTS)
1250 		strcat(stat_buf, "|RTS");
1251 	if (info->signals & SerialSignal_CTS)
1252 		strcat(stat_buf, "|CTS");
1253 	if (info->signals & SerialSignal_DTR)
1254 		strcat(stat_buf, "|DTR");
1255 	if (info->signals & SerialSignal_DSR)
1256 		strcat(stat_buf, "|DSR");
1257 	if (info->signals & SerialSignal_DCD)
1258 		strcat(stat_buf, "|CD");
1259 	if (info->signals & SerialSignal_RI)
1260 		strcat(stat_buf, "|RI");
1261 
1262 	if (info->params.mode != MGSL_MODE_ASYNC) {
1263 		ret += sprintf(buf+ret, "\tHDLC txok:%d rxok:%d",
1264 			       info->icount.txok, info->icount.rxok);
1265 		if (info->icount.txunder)
1266 			ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
1267 		if (info->icount.txabort)
1268 			ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
1269 		if (info->icount.rxshort)
1270 			ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
1271 		if (info->icount.rxlong)
1272 			ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
1273 		if (info->icount.rxover)
1274 			ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
1275 		if (info->icount.rxcrc)
1276 			ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
1277 	} else {
1278 		ret += sprintf(buf+ret, "\tASYNC tx:%d rx:%d",
1279 			       info->icount.tx, info->icount.rx);
1280 		if (info->icount.frame)
1281 			ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
1282 		if (info->icount.parity)
1283 			ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
1284 		if (info->icount.brk)
1285 			ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
1286 		if (info->icount.overrun)
1287 			ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
1288 	}
1289 
1290 	/* Append serial signal status to end */
1291 	ret += sprintf(buf+ret, " %s\n", stat_buf+1);
1292 
1293 	ret += sprintf(buf+ret, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
1294 		       info->tx_active,info->bh_requested,info->bh_running,
1295 		       info->pending_bh);
1296 
1297 	return ret;
1298 }
1299 
1300 /* Called to print information about devices
1301  */
read_proc(char * page,char ** start,off_t off,int count,int * eof,void * data)1302 static int read_proc(char *page, char **start, off_t off, int count,
1303 		     int *eof, void *data)
1304 {
1305 	int len = 0, l;
1306 	off_t	begin = 0;
1307 	struct slgt_info *info;
1308 
1309 	len += sprintf(page, "synclink_gt driver\n");
1310 
1311 	info = slgt_device_list;
1312 	while( info ) {
1313 		l = line_info(page + len, info);
1314 		len += l;
1315 		if (len+begin > off+count)
1316 			goto done;
1317 		if (len+begin < off) {
1318 			begin += len;
1319 			len = 0;
1320 		}
1321 		info = info->next_device;
1322 	}
1323 
1324 	*eof = 1;
1325 done:
1326 	if (off >= len+begin)
1327 		return 0;
1328 	*start = page + (off-begin);
1329 	return ((count < begin+len-off) ? count : begin+len-off);
1330 }
1331 
1332 /*
1333  * return count of bytes in transmit buffer
1334  */
chars_in_buffer(struct tty_struct * tty)1335 static int chars_in_buffer(struct tty_struct *tty)
1336 {
1337 	struct slgt_info *info = tty->driver_data;
1338 	int count;
1339 	if (sanity_check(info, tty->name, "chars_in_buffer"))
1340 		return 0;
1341 	count = tbuf_bytes(info);
1342 	DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count));
1343 	return count;
1344 }
1345 
1346 /*
1347  * signal remote device to throttle send data (our receive data)
1348  */
throttle(struct tty_struct * tty)1349 static void throttle(struct tty_struct * tty)
1350 {
1351 	struct slgt_info *info = tty->driver_data;
1352 	unsigned long flags;
1353 
1354 	if (sanity_check(info, tty->name, "throttle"))
1355 		return;
1356 	DBGINFO(("%s throttle\n", info->device_name));
1357 	if (I_IXOFF(tty))
1358 		send_xchar(tty, STOP_CHAR(tty));
1359  	if (tty->termios->c_cflag & CRTSCTS) {
1360 		spin_lock_irqsave(&info->lock,flags);
1361 		info->signals &= ~SerialSignal_RTS;
1362 	 	set_signals(info);
1363 		spin_unlock_irqrestore(&info->lock,flags);
1364 	}
1365 }
1366 
1367 /*
1368  * signal remote device to stop throttling send data (our receive data)
1369  */
unthrottle(struct tty_struct * tty)1370 static void unthrottle(struct tty_struct * tty)
1371 {
1372 	struct slgt_info *info = tty->driver_data;
1373 	unsigned long flags;
1374 
1375 	if (sanity_check(info, tty->name, "unthrottle"))
1376 		return;
1377 	DBGINFO(("%s unthrottle\n", info->device_name));
1378 	if (I_IXOFF(tty)) {
1379 		if (info->x_char)
1380 			info->x_char = 0;
1381 		else
1382 			send_xchar(tty, START_CHAR(tty));
1383 	}
1384  	if (tty->termios->c_cflag & CRTSCTS) {
1385 		spin_lock_irqsave(&info->lock,flags);
1386 		info->signals |= SerialSignal_RTS;
1387 	 	set_signals(info);
1388 		spin_unlock_irqrestore(&info->lock,flags);
1389 	}
1390 }
1391 
1392 /*
1393  * set or clear transmit break condition
1394  * break_state	-1=set break condition, 0=clear
1395  */
set_break(struct tty_struct * tty,int break_state)1396 static int set_break(struct tty_struct *tty, int break_state)
1397 {
1398 	struct slgt_info *info = tty->driver_data;
1399 	unsigned short value;
1400 	unsigned long flags;
1401 
1402 	if (sanity_check(info, tty->name, "set_break"))
1403 		return -EINVAL;
1404 	DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
1405 
1406 	spin_lock_irqsave(&info->lock,flags);
1407 	value = rd_reg16(info, TCR);
1408  	if (break_state == -1)
1409 		value |= BIT6;
1410 	else
1411 		value &= ~BIT6;
1412 	wr_reg16(info, TCR, value);
1413 	spin_unlock_irqrestore(&info->lock,flags);
1414 	return 0;
1415 }
1416 
1417 #if SYNCLINK_GENERIC_HDLC
1418 
1419 /**
1420  * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
1421  * set encoding and frame check sequence (FCS) options
1422  *
1423  * dev       pointer to network device structure
1424  * encoding  serial encoding setting
1425  * parity    FCS setting
1426  *
1427  * returns 0 if success, otherwise error code
1428  */
hdlcdev_attach(struct net_device * dev,unsigned short encoding,unsigned short parity)1429 static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
1430 			  unsigned short parity)
1431 {
1432 	struct slgt_info *info = dev_to_port(dev);
1433 	unsigned char  new_encoding;
1434 	unsigned short new_crctype;
1435 
1436 	/* return error if TTY interface open */
1437 	if (info->port.count)
1438 		return -EBUSY;
1439 
1440 	DBGINFO(("%s hdlcdev_attach\n", info->device_name));
1441 
1442 	switch (encoding)
1443 	{
1444 	case ENCODING_NRZ:        new_encoding = HDLC_ENCODING_NRZ; break;
1445 	case ENCODING_NRZI:       new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
1446 	case ENCODING_FM_MARK:    new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
1447 	case ENCODING_FM_SPACE:   new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
1448 	case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
1449 	default: return -EINVAL;
1450 	}
1451 
1452 	switch (parity)
1453 	{
1454 	case PARITY_NONE:            new_crctype = HDLC_CRC_NONE; break;
1455 	case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
1456 	case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
1457 	default: return -EINVAL;
1458 	}
1459 
1460 	info->params.encoding = new_encoding;
1461 	info->params.crc_type = new_crctype;
1462 
1463 	/* if network interface up, reprogram hardware */
1464 	if (info->netcount)
1465 		program_hw(info);
1466 
1467 	return 0;
1468 }
1469 
1470 /**
1471  * called by generic HDLC layer to send frame
1472  *
1473  * skb  socket buffer containing HDLC frame
1474  * dev  pointer to network device structure
1475  *
1476  * returns 0 if success, otherwise error code
1477  */
hdlcdev_xmit(struct sk_buff * skb,struct net_device * dev)1478 static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
1479 {
1480 	struct slgt_info *info = dev_to_port(dev);
1481 	unsigned long flags;
1482 
1483 	DBGINFO(("%s hdlc_xmit\n", dev->name));
1484 
1485 	/* stop sending until this frame completes */
1486 	netif_stop_queue(dev);
1487 
1488 	/* copy data to device buffers */
1489 	info->tx_count = skb->len;
1490 	tx_load(info, skb->data, skb->len);
1491 
1492 	/* update network statistics */
1493 	dev->stats.tx_packets++;
1494 	dev->stats.tx_bytes += skb->len;
1495 
1496 	/* done with socket buffer, so free it */
1497 	dev_kfree_skb(skb);
1498 
1499 	/* save start time for transmit timeout detection */
1500 	dev->trans_start = jiffies;
1501 
1502 	/* start hardware transmitter if necessary */
1503 	spin_lock_irqsave(&info->lock,flags);
1504 	if (!info->tx_active)
1505 	 	tx_start(info);
1506 	spin_unlock_irqrestore(&info->lock,flags);
1507 
1508 	return 0;
1509 }
1510 
1511 /**
1512  * called by network layer when interface enabled
1513  * claim resources and initialize hardware
1514  *
1515  * dev  pointer to network device structure
1516  *
1517  * returns 0 if success, otherwise error code
1518  */
hdlcdev_open(struct net_device * dev)1519 static int hdlcdev_open(struct net_device *dev)
1520 {
1521 	struct slgt_info *info = dev_to_port(dev);
1522 	int rc;
1523 	unsigned long flags;
1524 
1525 	if (!try_module_get(THIS_MODULE))
1526 		return -EBUSY;
1527 
1528 	DBGINFO(("%s hdlcdev_open\n", dev->name));
1529 
1530 	/* generic HDLC layer open processing */
1531 	if ((rc = hdlc_open(dev)))
1532 		return rc;
1533 
1534 	/* arbitrate between network and tty opens */
1535 	spin_lock_irqsave(&info->netlock, flags);
1536 	if (info->port.count != 0 || info->netcount != 0) {
1537 		DBGINFO(("%s hdlc_open busy\n", dev->name));
1538 		spin_unlock_irqrestore(&info->netlock, flags);
1539 		return -EBUSY;
1540 	}
1541 	info->netcount=1;
1542 	spin_unlock_irqrestore(&info->netlock, flags);
1543 
1544 	/* claim resources and init adapter */
1545 	if ((rc = startup(info)) != 0) {
1546 		spin_lock_irqsave(&info->netlock, flags);
1547 		info->netcount=0;
1548 		spin_unlock_irqrestore(&info->netlock, flags);
1549 		return rc;
1550 	}
1551 
1552 	/* assert DTR and RTS, apply hardware settings */
1553 	info->signals |= SerialSignal_RTS + SerialSignal_DTR;
1554 	program_hw(info);
1555 
1556 	/* enable network layer transmit */
1557 	dev->trans_start = jiffies;
1558 	netif_start_queue(dev);
1559 
1560 	/* inform generic HDLC layer of current DCD status */
1561 	spin_lock_irqsave(&info->lock, flags);
1562 	get_signals(info);
1563 	spin_unlock_irqrestore(&info->lock, flags);
1564 	if (info->signals & SerialSignal_DCD)
1565 		netif_carrier_on(dev);
1566 	else
1567 		netif_carrier_off(dev);
1568 	return 0;
1569 }
1570 
1571 /**
1572  * called by network layer when interface is disabled
1573  * shutdown hardware and release resources
1574  *
1575  * dev  pointer to network device structure
1576  *
1577  * returns 0 if success, otherwise error code
1578  */
hdlcdev_close(struct net_device * dev)1579 static int hdlcdev_close(struct net_device *dev)
1580 {
1581 	struct slgt_info *info = dev_to_port(dev);
1582 	unsigned long flags;
1583 
1584 	DBGINFO(("%s hdlcdev_close\n", dev->name));
1585 
1586 	netif_stop_queue(dev);
1587 
1588 	/* shutdown adapter and release resources */
1589 	shutdown(info);
1590 
1591 	hdlc_close(dev);
1592 
1593 	spin_lock_irqsave(&info->netlock, flags);
1594 	info->netcount=0;
1595 	spin_unlock_irqrestore(&info->netlock, flags);
1596 
1597 	module_put(THIS_MODULE);
1598 	return 0;
1599 }
1600 
1601 /**
1602  * called by network layer to process IOCTL call to network device
1603  *
1604  * dev  pointer to network device structure
1605  * ifr  pointer to network interface request structure
1606  * cmd  IOCTL command code
1607  *
1608  * returns 0 if success, otherwise error code
1609  */
hdlcdev_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1610 static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1611 {
1612 	const size_t size = sizeof(sync_serial_settings);
1613 	sync_serial_settings new_line;
1614 	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1615 	struct slgt_info *info = dev_to_port(dev);
1616 	unsigned int flags;
1617 
1618 	DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
1619 
1620 	/* return error if TTY interface open */
1621 	if (info->port.count)
1622 		return -EBUSY;
1623 
1624 	if (cmd != SIOCWANDEV)
1625 		return hdlc_ioctl(dev, ifr, cmd);
1626 
1627 	switch(ifr->ifr_settings.type) {
1628 	case IF_GET_IFACE: /* return current sync_serial_settings */
1629 
1630 		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1631 		if (ifr->ifr_settings.size < size) {
1632 			ifr->ifr_settings.size = size; /* data size wanted */
1633 			return -ENOBUFS;
1634 		}
1635 
1636 		flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1637 					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1638 					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1639 					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
1640 
1641 		switch (flags){
1642 		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
1643 		case (HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_INT; break;
1644 		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_TXINT; break;
1645 		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
1646 		default: new_line.clock_type = CLOCK_DEFAULT;
1647 		}
1648 
1649 		new_line.clock_rate = info->params.clock_speed;
1650 		new_line.loopback   = info->params.loopback ? 1:0;
1651 
1652 		if (copy_to_user(line, &new_line, size))
1653 			return -EFAULT;
1654 		return 0;
1655 
1656 	case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
1657 
1658 		if(!capable(CAP_NET_ADMIN))
1659 			return -EPERM;
1660 		if (copy_from_user(&new_line, line, size))
1661 			return -EFAULT;
1662 
1663 		switch (new_line.clock_type)
1664 		{
1665 		case CLOCK_EXT:      flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
1666 		case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
1667 		case CLOCK_INT:      flags = HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG;    break;
1668 		case CLOCK_TXINT:    flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG;    break;
1669 		case CLOCK_DEFAULT:  flags = info->params.flags &
1670 					     (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1671 					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1672 					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1673 					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN); break;
1674 		default: return -EINVAL;
1675 		}
1676 
1677 		if (new_line.loopback != 0 && new_line.loopback != 1)
1678 			return -EINVAL;
1679 
1680 		info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1681 					HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1682 					HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1683 					HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
1684 		info->params.flags |= flags;
1685 
1686 		info->params.loopback = new_line.loopback;
1687 
1688 		if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
1689 			info->params.clock_speed = new_line.clock_rate;
1690 		else
1691 			info->params.clock_speed = 0;
1692 
1693 		/* if network interface up, reprogram hardware */
1694 		if (info->netcount)
1695 			program_hw(info);
1696 		return 0;
1697 
1698 	default:
1699 		return hdlc_ioctl(dev, ifr, cmd);
1700 	}
1701 }
1702 
1703 /**
1704  * called by network layer when transmit timeout is detected
1705  *
1706  * dev  pointer to network device structure
1707  */
hdlcdev_tx_timeout(struct net_device * dev)1708 static void hdlcdev_tx_timeout(struct net_device *dev)
1709 {
1710 	struct slgt_info *info = dev_to_port(dev);
1711 	unsigned long flags;
1712 
1713 	DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
1714 
1715 	dev->stats.tx_errors++;
1716 	dev->stats.tx_aborted_errors++;
1717 
1718 	spin_lock_irqsave(&info->lock,flags);
1719 	tx_stop(info);
1720 	spin_unlock_irqrestore(&info->lock,flags);
1721 
1722 	netif_wake_queue(dev);
1723 }
1724 
1725 /**
1726  * called by device driver when transmit completes
1727  * reenable network layer transmit if stopped
1728  *
1729  * info  pointer to device instance information
1730  */
hdlcdev_tx_done(struct slgt_info * info)1731 static void hdlcdev_tx_done(struct slgt_info *info)
1732 {
1733 	if (netif_queue_stopped(info->netdev))
1734 		netif_wake_queue(info->netdev);
1735 }
1736 
1737 /**
1738  * called by device driver when frame received
1739  * pass frame to network layer
1740  *
1741  * info  pointer to device instance information
1742  * buf   pointer to buffer contianing frame data
1743  * size  count of data bytes in buf
1744  */
hdlcdev_rx(struct slgt_info * info,char * buf,int size)1745 static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
1746 {
1747 	struct sk_buff *skb = dev_alloc_skb(size);
1748 	struct net_device *dev = info->netdev;
1749 
1750 	DBGINFO(("%s hdlcdev_rx\n", dev->name));
1751 
1752 	if (skb == NULL) {
1753 		DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
1754 		dev->stats.rx_dropped++;
1755 		return;
1756 	}
1757 
1758 	memcpy(skb_put(skb, size), buf, size);
1759 
1760 	skb->protocol = hdlc_type_trans(skb, dev);
1761 
1762 	dev->stats.rx_packets++;
1763 	dev->stats.rx_bytes += size;
1764 
1765 	netif_rx(skb);
1766 
1767 	dev->last_rx = jiffies;
1768 }
1769 
1770 /**
1771  * called by device driver when adding device instance
1772  * do generic HDLC initialization
1773  *
1774  * info  pointer to device instance information
1775  *
1776  * returns 0 if success, otherwise error code
1777  */
hdlcdev_init(struct slgt_info * info)1778 static int hdlcdev_init(struct slgt_info *info)
1779 {
1780 	int rc;
1781 	struct net_device *dev;
1782 	hdlc_device *hdlc;
1783 
1784 	/* allocate and initialize network and HDLC layer objects */
1785 
1786 	if (!(dev = alloc_hdlcdev(info))) {
1787 		printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
1788 		return -ENOMEM;
1789 	}
1790 
1791 	/* for network layer reporting purposes only */
1792 	dev->mem_start = info->phys_reg_addr;
1793 	dev->mem_end   = info->phys_reg_addr + SLGT_REG_SIZE - 1;
1794 	dev->irq       = info->irq_level;
1795 
1796 	/* network layer callbacks and settings */
1797 	dev->do_ioctl       = hdlcdev_ioctl;
1798 	dev->open           = hdlcdev_open;
1799 	dev->stop           = hdlcdev_close;
1800 	dev->tx_timeout     = hdlcdev_tx_timeout;
1801 	dev->watchdog_timeo = 10*HZ;
1802 	dev->tx_queue_len   = 50;
1803 
1804 	/* generic HDLC layer callbacks and settings */
1805 	hdlc         = dev_to_hdlc(dev);
1806 	hdlc->attach = hdlcdev_attach;
1807 	hdlc->xmit   = hdlcdev_xmit;
1808 
1809 	/* register objects with HDLC layer */
1810 	if ((rc = register_hdlc_device(dev))) {
1811 		printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
1812 		free_netdev(dev);
1813 		return rc;
1814 	}
1815 
1816 	info->netdev = dev;
1817 	return 0;
1818 }
1819 
1820 /**
1821  * called by device driver when removing device instance
1822  * do generic HDLC cleanup
1823  *
1824  * info  pointer to device instance information
1825  */
hdlcdev_exit(struct slgt_info * info)1826 static void hdlcdev_exit(struct slgt_info *info)
1827 {
1828 	unregister_hdlc_device(info->netdev);
1829 	free_netdev(info->netdev);
1830 	info->netdev = NULL;
1831 }
1832 
1833 #endif /* ifdef CONFIG_HDLC */
1834 
1835 /*
1836  * get async data from rx DMA buffers
1837  */
rx_async(struct slgt_info * info)1838 static void rx_async(struct slgt_info *info)
1839 {
1840  	struct tty_struct *tty = info->port.tty;
1841  	struct mgsl_icount *icount = &info->icount;
1842 	unsigned int start, end;
1843 	unsigned char *p;
1844 	unsigned char status;
1845 	struct slgt_desc *bufs = info->rbufs;
1846 	int i, count;
1847 	int chars = 0;
1848 	int stat;
1849 	unsigned char ch;
1850 
1851 	start = end = info->rbuf_current;
1852 
1853 	while(desc_complete(bufs[end])) {
1854 		count = desc_count(bufs[end]) - info->rbuf_index;
1855 		p     = bufs[end].buf + info->rbuf_index;
1856 
1857 		DBGISR(("%s rx_async count=%d\n", info->device_name, count));
1858 		DBGDATA(info, p, count, "rx");
1859 
1860 		for(i=0 ; i < count; i+=2, p+=2) {
1861 			ch = *p;
1862 			icount->rx++;
1863 
1864 			stat = 0;
1865 
1866 			if ((status = *(p+1) & (BIT1 + BIT0))) {
1867 				if (status & BIT1)
1868 					icount->parity++;
1869 				else if (status & BIT0)
1870 					icount->frame++;
1871 				/* discard char if tty control flags say so */
1872 				if (status & info->ignore_status_mask)
1873 					continue;
1874 				if (status & BIT1)
1875 					stat = TTY_PARITY;
1876 				else if (status & BIT0)
1877 					stat = TTY_FRAME;
1878 			}
1879 			if (tty) {
1880 				tty_insert_flip_char(tty, ch, stat);
1881 				chars++;
1882 			}
1883 		}
1884 
1885 		if (i < count) {
1886 			/* receive buffer not completed */
1887 			info->rbuf_index += i;
1888 			mod_timer(&info->rx_timer, jiffies + 1);
1889 			break;
1890 		}
1891 
1892 		info->rbuf_index = 0;
1893 		free_rbufs(info, end, end);
1894 
1895 		if (++end == info->rbuf_count)
1896 			end = 0;
1897 
1898 		/* if entire list searched then no frame available */
1899 		if (end == start)
1900 			break;
1901 	}
1902 
1903 	if (tty && chars)
1904 		tty_flip_buffer_push(tty);
1905 }
1906 
1907 /*
1908  * return next bottom half action to perform
1909  */
bh_action(struct slgt_info * info)1910 static int bh_action(struct slgt_info *info)
1911 {
1912 	unsigned long flags;
1913 	int rc;
1914 
1915 	spin_lock_irqsave(&info->lock,flags);
1916 
1917 	if (info->pending_bh & BH_RECEIVE) {
1918 		info->pending_bh &= ~BH_RECEIVE;
1919 		rc = BH_RECEIVE;
1920 	} else if (info->pending_bh & BH_TRANSMIT) {
1921 		info->pending_bh &= ~BH_TRANSMIT;
1922 		rc = BH_TRANSMIT;
1923 	} else if (info->pending_bh & BH_STATUS) {
1924 		info->pending_bh &= ~BH_STATUS;
1925 		rc = BH_STATUS;
1926 	} else {
1927 		/* Mark BH routine as complete */
1928 		info->bh_running = false;
1929 		info->bh_requested = false;
1930 		rc = 0;
1931 	}
1932 
1933 	spin_unlock_irqrestore(&info->lock,flags);
1934 
1935 	return rc;
1936 }
1937 
1938 /*
1939  * perform bottom half processing
1940  */
bh_handler(struct work_struct * work)1941 static void bh_handler(struct work_struct *work)
1942 {
1943 	struct slgt_info *info = container_of(work, struct slgt_info, task);
1944 	int action;
1945 
1946 	if (!info)
1947 		return;
1948 	info->bh_running = true;
1949 
1950 	while((action = bh_action(info))) {
1951 		switch (action) {
1952 		case BH_RECEIVE:
1953 			DBGBH(("%s bh receive\n", info->device_name));
1954 			switch(info->params.mode) {
1955 			case MGSL_MODE_ASYNC:
1956 				rx_async(info);
1957 				break;
1958 			case MGSL_MODE_HDLC:
1959 				while(rx_get_frame(info));
1960 				break;
1961 			case MGSL_MODE_RAW:
1962 			case MGSL_MODE_MONOSYNC:
1963 			case MGSL_MODE_BISYNC:
1964 				while(rx_get_buf(info));
1965 				break;
1966 			}
1967 			/* restart receiver if rx DMA buffers exhausted */
1968 			if (info->rx_restart)
1969 				rx_start(info);
1970 			break;
1971 		case BH_TRANSMIT:
1972 			bh_transmit(info);
1973 			break;
1974 		case BH_STATUS:
1975 			DBGBH(("%s bh status\n", info->device_name));
1976 			info->ri_chkcount = 0;
1977 			info->dsr_chkcount = 0;
1978 			info->dcd_chkcount = 0;
1979 			info->cts_chkcount = 0;
1980 			break;
1981 		default:
1982 			DBGBH(("%s unknown action\n", info->device_name));
1983 			break;
1984 		}
1985 	}
1986 	DBGBH(("%s bh_handler exit\n", info->device_name));
1987 }
1988 
bh_transmit(struct slgt_info * info)1989 static void bh_transmit(struct slgt_info *info)
1990 {
1991 	struct tty_struct *tty = info->port.tty;
1992 
1993 	DBGBH(("%s bh_transmit\n", info->device_name));
1994 	if (tty)
1995 		tty_wakeup(tty);
1996 }
1997 
dsr_change(struct slgt_info * info,unsigned short status)1998 static void dsr_change(struct slgt_info *info, unsigned short status)
1999 {
2000 	if (status & BIT3) {
2001 		info->signals |= SerialSignal_DSR;
2002 		info->input_signal_events.dsr_up++;
2003 	} else {
2004 		info->signals &= ~SerialSignal_DSR;
2005 		info->input_signal_events.dsr_down++;
2006 	}
2007 	DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
2008 	if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2009 		slgt_irq_off(info, IRQ_DSR);
2010 		return;
2011 	}
2012 	info->icount.dsr++;
2013 	wake_up_interruptible(&info->status_event_wait_q);
2014 	wake_up_interruptible(&info->event_wait_q);
2015 	info->pending_bh |= BH_STATUS;
2016 }
2017 
cts_change(struct slgt_info * info,unsigned short status)2018 static void cts_change(struct slgt_info *info, unsigned short status)
2019 {
2020 	if (status & BIT2) {
2021 		info->signals |= SerialSignal_CTS;
2022 		info->input_signal_events.cts_up++;
2023 	} else {
2024 		info->signals &= ~SerialSignal_CTS;
2025 		info->input_signal_events.cts_down++;
2026 	}
2027 	DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
2028 	if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2029 		slgt_irq_off(info, IRQ_CTS);
2030 		return;
2031 	}
2032 	info->icount.cts++;
2033 	wake_up_interruptible(&info->status_event_wait_q);
2034 	wake_up_interruptible(&info->event_wait_q);
2035 	info->pending_bh |= BH_STATUS;
2036 
2037 	if (info->port.flags & ASYNC_CTS_FLOW) {
2038 		if (info->port.tty) {
2039 			if (info->port.tty->hw_stopped) {
2040 				if (info->signals & SerialSignal_CTS) {
2041 		 			info->port.tty->hw_stopped = 0;
2042 					info->pending_bh |= BH_TRANSMIT;
2043 					return;
2044 				}
2045 			} else {
2046 				if (!(info->signals & SerialSignal_CTS))
2047 		 			info->port.tty->hw_stopped = 1;
2048 			}
2049 		}
2050 	}
2051 }
2052 
dcd_change(struct slgt_info * info,unsigned short status)2053 static void dcd_change(struct slgt_info *info, unsigned short status)
2054 {
2055 	if (status & BIT1) {
2056 		info->signals |= SerialSignal_DCD;
2057 		info->input_signal_events.dcd_up++;
2058 	} else {
2059 		info->signals &= ~SerialSignal_DCD;
2060 		info->input_signal_events.dcd_down++;
2061 	}
2062 	DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
2063 	if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2064 		slgt_irq_off(info, IRQ_DCD);
2065 		return;
2066 	}
2067 	info->icount.dcd++;
2068 #if SYNCLINK_GENERIC_HDLC
2069 	if (info->netcount) {
2070 		if (info->signals & SerialSignal_DCD)
2071 			netif_carrier_on(info->netdev);
2072 		else
2073 			netif_carrier_off(info->netdev);
2074 	}
2075 #endif
2076 	wake_up_interruptible(&info->status_event_wait_q);
2077 	wake_up_interruptible(&info->event_wait_q);
2078 	info->pending_bh |= BH_STATUS;
2079 
2080 	if (info->port.flags & ASYNC_CHECK_CD) {
2081 		if (info->signals & SerialSignal_DCD)
2082 			wake_up_interruptible(&info->port.open_wait);
2083 		else {
2084 			if (info->port.tty)
2085 				tty_hangup(info->port.tty);
2086 		}
2087 	}
2088 }
2089 
ri_change(struct slgt_info * info,unsigned short status)2090 static void ri_change(struct slgt_info *info, unsigned short status)
2091 {
2092 	if (status & BIT0) {
2093 		info->signals |= SerialSignal_RI;
2094 		info->input_signal_events.ri_up++;
2095 	} else {
2096 		info->signals &= ~SerialSignal_RI;
2097 		info->input_signal_events.ri_down++;
2098 	}
2099 	DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
2100 	if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2101 		slgt_irq_off(info, IRQ_RI);
2102 		return;
2103 	}
2104 	info->icount.rng++;
2105 	wake_up_interruptible(&info->status_event_wait_q);
2106 	wake_up_interruptible(&info->event_wait_q);
2107 	info->pending_bh |= BH_STATUS;
2108 }
2109 
isr_serial(struct slgt_info * info)2110 static void isr_serial(struct slgt_info *info)
2111 {
2112 	unsigned short status = rd_reg16(info, SSR);
2113 
2114 	DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
2115 
2116 	wr_reg16(info, SSR, status); /* clear pending */
2117 
2118 	info->irq_occurred = true;
2119 
2120 	if (info->params.mode == MGSL_MODE_ASYNC) {
2121 		if (status & IRQ_TXIDLE) {
2122 			if (info->tx_count)
2123 				isr_txeom(info, status);
2124 		}
2125 		if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
2126 			info->icount.brk++;
2127 			/* process break detection if tty control allows */
2128 			if (info->port.tty) {
2129 				if (!(status & info->ignore_status_mask)) {
2130 					if (info->read_status_mask & MASK_BREAK) {
2131 						tty_insert_flip_char(info->port.tty, 0, TTY_BREAK);
2132 						if (info->port.flags & ASYNC_SAK)
2133 							do_SAK(info->port.tty);
2134 					}
2135 				}
2136 			}
2137 		}
2138 	} else {
2139 		if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
2140 			isr_txeom(info, status);
2141 
2142 		if (status & IRQ_RXIDLE) {
2143 			if (status & RXIDLE)
2144 				info->icount.rxidle++;
2145 			else
2146 				info->icount.exithunt++;
2147 			wake_up_interruptible(&info->event_wait_q);
2148 		}
2149 
2150 		if (status & IRQ_RXOVER)
2151 			rx_start(info);
2152 	}
2153 
2154 	if (status & IRQ_DSR)
2155 		dsr_change(info, status);
2156 	if (status & IRQ_CTS)
2157 		cts_change(info, status);
2158 	if (status & IRQ_DCD)
2159 		dcd_change(info, status);
2160 	if (status & IRQ_RI)
2161 		ri_change(info, status);
2162 }
2163 
isr_rdma(struct slgt_info * info)2164 static void isr_rdma(struct slgt_info *info)
2165 {
2166 	unsigned int status = rd_reg32(info, RDCSR);
2167 
2168 	DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
2169 
2170 	/* RDCSR (rx DMA control/status)
2171 	 *
2172 	 * 31..07  reserved
2173 	 * 06      save status byte to DMA buffer
2174 	 * 05      error
2175 	 * 04      eol (end of list)
2176 	 * 03      eob (end of buffer)
2177 	 * 02      IRQ enable
2178 	 * 01      reset
2179 	 * 00      enable
2180 	 */
2181 	wr_reg32(info, RDCSR, status);	/* clear pending */
2182 
2183 	if (status & (BIT5 + BIT4)) {
2184 		DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
2185 		info->rx_restart = true;
2186 	}
2187 	info->pending_bh |= BH_RECEIVE;
2188 }
2189 
isr_tdma(struct slgt_info * info)2190 static void isr_tdma(struct slgt_info *info)
2191 {
2192 	unsigned int status = rd_reg32(info, TDCSR);
2193 
2194 	DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
2195 
2196 	/* TDCSR (tx DMA control/status)
2197 	 *
2198 	 * 31..06  reserved
2199 	 * 05      error
2200 	 * 04      eol (end of list)
2201 	 * 03      eob (end of buffer)
2202 	 * 02      IRQ enable
2203 	 * 01      reset
2204 	 * 00      enable
2205 	 */
2206 	wr_reg32(info, TDCSR, status);	/* clear pending */
2207 
2208 	if (status & (BIT5 + BIT4 + BIT3)) {
2209 		// another transmit buffer has completed
2210 		// run bottom half to get more send data from user
2211 		info->pending_bh |= BH_TRANSMIT;
2212 	}
2213 }
2214 
isr_txeom(struct slgt_info * info,unsigned short status)2215 static void isr_txeom(struct slgt_info *info, unsigned short status)
2216 {
2217 	DBGISR(("%s txeom status=%04x\n", info->device_name, status));
2218 
2219 	slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
2220 	tdma_reset(info);
2221 	reset_tbufs(info);
2222 	if (status & IRQ_TXUNDER) {
2223 		unsigned short val = rd_reg16(info, TCR);
2224 		wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
2225 		wr_reg16(info, TCR, val); /* clear reset bit */
2226 	}
2227 
2228 	if (info->tx_active) {
2229 		if (info->params.mode != MGSL_MODE_ASYNC) {
2230 			if (status & IRQ_TXUNDER)
2231 				info->icount.txunder++;
2232 			else if (status & IRQ_TXIDLE)
2233 				info->icount.txok++;
2234 		}
2235 
2236 		info->tx_active = false;
2237 		info->tx_count = 0;
2238 
2239 		del_timer(&info->tx_timer);
2240 
2241 		if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
2242 			info->signals &= ~SerialSignal_RTS;
2243 			info->drop_rts_on_tx_done = false;
2244 			set_signals(info);
2245 		}
2246 
2247 #if SYNCLINK_GENERIC_HDLC
2248 		if (info->netcount)
2249 			hdlcdev_tx_done(info);
2250 		else
2251 #endif
2252 		{
2253 			if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
2254 				tx_stop(info);
2255 				return;
2256 			}
2257 			info->pending_bh |= BH_TRANSMIT;
2258 		}
2259 	}
2260 }
2261 
isr_gpio(struct slgt_info * info,unsigned int changed,unsigned int state)2262 static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
2263 {
2264 	struct cond_wait *w, *prev;
2265 
2266 	/* wake processes waiting for specific transitions */
2267 	for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
2268 		if (w->data & changed) {
2269 			w->data = state;
2270 			wake_up_interruptible(&w->q);
2271 			if (prev != NULL)
2272 				prev->next = w->next;
2273 			else
2274 				info->gpio_wait_q = w->next;
2275 		} else
2276 			prev = w;
2277 	}
2278 }
2279 
2280 /* interrupt service routine
2281  *
2282  * 	irq	interrupt number
2283  * 	dev_id	device ID supplied during interrupt registration
2284  */
slgt_interrupt(int dummy,void * dev_id)2285 static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
2286 {
2287 	struct slgt_info *info = dev_id;
2288 	unsigned int gsr;
2289 	unsigned int i;
2290 
2291 	DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
2292 
2293 	spin_lock(&info->lock);
2294 
2295 	while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
2296 		DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
2297 		info->irq_occurred = true;
2298 		for(i=0; i < info->port_count ; i++) {
2299 			if (info->port_array[i] == NULL)
2300 				continue;
2301 			if (gsr & (BIT8 << i))
2302 				isr_serial(info->port_array[i]);
2303 			if (gsr & (BIT16 << (i*2)))
2304 				isr_rdma(info->port_array[i]);
2305 			if (gsr & (BIT17 << (i*2)))
2306 				isr_tdma(info->port_array[i]);
2307 		}
2308 	}
2309 
2310 	if (info->gpio_present) {
2311 		unsigned int state;
2312 		unsigned int changed;
2313 		while ((changed = rd_reg32(info, IOSR)) != 0) {
2314 			DBGISR(("%s iosr=%08x\n", info->device_name, changed));
2315 			/* read latched state of GPIO signals */
2316 			state = rd_reg32(info, IOVR);
2317 			/* clear pending GPIO interrupt bits */
2318 			wr_reg32(info, IOSR, changed);
2319 			for (i=0 ; i < info->port_count ; i++) {
2320 				if (info->port_array[i] != NULL)
2321 					isr_gpio(info->port_array[i], changed, state);
2322 			}
2323 		}
2324 	}
2325 
2326 	for(i=0; i < info->port_count ; i++) {
2327 		struct slgt_info *port = info->port_array[i];
2328 
2329 		if (port && (port->port.count || port->netcount) &&
2330 		    port->pending_bh && !port->bh_running &&
2331 		    !port->bh_requested) {
2332 			DBGISR(("%s bh queued\n", port->device_name));
2333 			schedule_work(&port->task);
2334 			port->bh_requested = true;
2335 		}
2336 	}
2337 
2338 	spin_unlock(&info->lock);
2339 
2340 	DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
2341 	return IRQ_HANDLED;
2342 }
2343 
startup(struct slgt_info * info)2344 static int startup(struct slgt_info *info)
2345 {
2346 	DBGINFO(("%s startup\n", info->device_name));
2347 
2348 	if (info->port.flags & ASYNC_INITIALIZED)
2349 		return 0;
2350 
2351 	if (!info->tx_buf) {
2352 		info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
2353 		if (!info->tx_buf) {
2354 			DBGERR(("%s can't allocate tx buffer\n", info->device_name));
2355 			return -ENOMEM;
2356 		}
2357 	}
2358 
2359 	info->pending_bh = 0;
2360 
2361 	memset(&info->icount, 0, sizeof(info->icount));
2362 
2363 	/* program hardware for current parameters */
2364 	change_params(info);
2365 
2366 	if (info->port.tty)
2367 		clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
2368 
2369 	info->port.flags |= ASYNC_INITIALIZED;
2370 
2371 	return 0;
2372 }
2373 
2374 /*
2375  *  called by close() and hangup() to shutdown hardware
2376  */
shutdown(struct slgt_info * info)2377 static void shutdown(struct slgt_info *info)
2378 {
2379 	unsigned long flags;
2380 
2381 	if (!(info->port.flags & ASYNC_INITIALIZED))
2382 		return;
2383 
2384 	DBGINFO(("%s shutdown\n", info->device_name));
2385 
2386 	/* clear status wait queue because status changes */
2387 	/* can't happen after shutting down the hardware */
2388 	wake_up_interruptible(&info->status_event_wait_q);
2389 	wake_up_interruptible(&info->event_wait_q);
2390 
2391 	del_timer_sync(&info->tx_timer);
2392 	del_timer_sync(&info->rx_timer);
2393 
2394 	kfree(info->tx_buf);
2395 	info->tx_buf = NULL;
2396 
2397 	spin_lock_irqsave(&info->lock,flags);
2398 
2399 	tx_stop(info);
2400 	rx_stop(info);
2401 
2402 	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
2403 
2404  	if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
2405  		info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
2406 		set_signals(info);
2407 	}
2408 
2409 	flush_cond_wait(&info->gpio_wait_q);
2410 
2411 	spin_unlock_irqrestore(&info->lock,flags);
2412 
2413 	if (info->port.tty)
2414 		set_bit(TTY_IO_ERROR, &info->port.tty->flags);
2415 
2416 	info->port.flags &= ~ASYNC_INITIALIZED;
2417 }
2418 
program_hw(struct slgt_info * info)2419 static void program_hw(struct slgt_info *info)
2420 {
2421 	unsigned long flags;
2422 
2423 	spin_lock_irqsave(&info->lock,flags);
2424 
2425 	rx_stop(info);
2426 	tx_stop(info);
2427 
2428 	if (info->params.mode != MGSL_MODE_ASYNC ||
2429 	    info->netcount)
2430 		sync_mode(info);
2431 	else
2432 		async_mode(info);
2433 
2434 	set_signals(info);
2435 
2436 	info->dcd_chkcount = 0;
2437 	info->cts_chkcount = 0;
2438 	info->ri_chkcount = 0;
2439 	info->dsr_chkcount = 0;
2440 
2441 	slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
2442 	get_signals(info);
2443 
2444 	if (info->netcount ||
2445 	    (info->port.tty && info->port.tty->termios->c_cflag & CREAD))
2446 		rx_start(info);
2447 
2448 	spin_unlock_irqrestore(&info->lock,flags);
2449 }
2450 
2451 /*
2452  * reconfigure adapter based on new parameters
2453  */
change_params(struct slgt_info * info)2454 static void change_params(struct slgt_info *info)
2455 {
2456 	unsigned cflag;
2457 	int bits_per_char;
2458 
2459 	if (!info->port.tty || !info->port.tty->termios)
2460 		return;
2461 	DBGINFO(("%s change_params\n", info->device_name));
2462 
2463 	cflag = info->port.tty->termios->c_cflag;
2464 
2465 	/* if B0 rate (hangup) specified then negate DTR and RTS */
2466 	/* otherwise assert DTR and RTS */
2467  	if (cflag & CBAUD)
2468 		info->signals |= SerialSignal_RTS + SerialSignal_DTR;
2469 	else
2470 		info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
2471 
2472 	/* byte size and parity */
2473 
2474 	switch (cflag & CSIZE) {
2475 	case CS5: info->params.data_bits = 5; break;
2476 	case CS6: info->params.data_bits = 6; break;
2477 	case CS7: info->params.data_bits = 7; break;
2478 	case CS8: info->params.data_bits = 8; break;
2479 	default:  info->params.data_bits = 7; break;
2480 	}
2481 
2482 	info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
2483 
2484 	if (cflag & PARENB)
2485 		info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
2486 	else
2487 		info->params.parity = ASYNC_PARITY_NONE;
2488 
2489 	/* calculate number of jiffies to transmit a full
2490 	 * FIFO (32 bytes) at specified data rate
2491 	 */
2492 	bits_per_char = info->params.data_bits +
2493 			info->params.stop_bits + 1;
2494 
2495 	info->params.data_rate = tty_get_baud_rate(info->port.tty);
2496 
2497 	if (info->params.data_rate) {
2498 		info->timeout = (32*HZ*bits_per_char) /
2499 				info->params.data_rate;
2500 	}
2501 	info->timeout += HZ/50;		/* Add .02 seconds of slop */
2502 
2503 	if (cflag & CRTSCTS)
2504 		info->port.flags |= ASYNC_CTS_FLOW;
2505 	else
2506 		info->port.flags &= ~ASYNC_CTS_FLOW;
2507 
2508 	if (cflag & CLOCAL)
2509 		info->port.flags &= ~ASYNC_CHECK_CD;
2510 	else
2511 		info->port.flags |= ASYNC_CHECK_CD;
2512 
2513 	/* process tty input control flags */
2514 
2515 	info->read_status_mask = IRQ_RXOVER;
2516 	if (I_INPCK(info->port.tty))
2517 		info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
2518  	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
2519  		info->read_status_mask |= MASK_BREAK;
2520 	if (I_IGNPAR(info->port.tty))
2521 		info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
2522 	if (I_IGNBRK(info->port.tty)) {
2523 		info->ignore_status_mask |= MASK_BREAK;
2524 		/* If ignoring parity and break indicators, ignore
2525 		 * overruns too.  (For real raw support).
2526 		 */
2527 		if (I_IGNPAR(info->port.tty))
2528 			info->ignore_status_mask |= MASK_OVERRUN;
2529 	}
2530 
2531 	program_hw(info);
2532 }
2533 
get_stats(struct slgt_info * info,struct mgsl_icount __user * user_icount)2534 static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
2535 {
2536 	DBGINFO(("%s get_stats\n",  info->device_name));
2537 	if (!user_icount) {
2538 		memset(&info->icount, 0, sizeof(info->icount));
2539 	} else {
2540 		if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
2541 			return -EFAULT;
2542 	}
2543 	return 0;
2544 }
2545 
get_params(struct slgt_info * info,MGSL_PARAMS __user * user_params)2546 static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
2547 {
2548 	DBGINFO(("%s get_params\n", info->device_name));
2549 	if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
2550 		return -EFAULT;
2551 	return 0;
2552 }
2553 
set_params(struct slgt_info * info,MGSL_PARAMS __user * new_params)2554 static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
2555 {
2556  	unsigned long flags;
2557 	MGSL_PARAMS tmp_params;
2558 
2559 	DBGINFO(("%s set_params\n", info->device_name));
2560 	if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
2561 		return -EFAULT;
2562 
2563 	spin_lock_irqsave(&info->lock, flags);
2564 	memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
2565 	spin_unlock_irqrestore(&info->lock, flags);
2566 
2567  	change_params(info);
2568 
2569 	return 0;
2570 }
2571 
get_txidle(struct slgt_info * info,int __user * idle_mode)2572 static int get_txidle(struct slgt_info *info, int __user *idle_mode)
2573 {
2574 	DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
2575 	if (put_user(info->idle_mode, idle_mode))
2576 		return -EFAULT;
2577 	return 0;
2578 }
2579 
set_txidle(struct slgt_info * info,int idle_mode)2580 static int set_txidle(struct slgt_info *info, int idle_mode)
2581 {
2582  	unsigned long flags;
2583 	DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
2584 	spin_lock_irqsave(&info->lock,flags);
2585 	info->idle_mode = idle_mode;
2586 	if (info->params.mode != MGSL_MODE_ASYNC)
2587 		tx_set_idle(info);
2588 	spin_unlock_irqrestore(&info->lock,flags);
2589 	return 0;
2590 }
2591 
tx_enable(struct slgt_info * info,int enable)2592 static int tx_enable(struct slgt_info *info, int enable)
2593 {
2594  	unsigned long flags;
2595 	DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
2596 	spin_lock_irqsave(&info->lock,flags);
2597 	if (enable) {
2598 		if (!info->tx_enabled)
2599 			tx_start(info);
2600 	} else {
2601 		if (info->tx_enabled)
2602 			tx_stop(info);
2603 	}
2604 	spin_unlock_irqrestore(&info->lock,flags);
2605 	return 0;
2606 }
2607 
2608 /*
2609  * abort transmit HDLC frame
2610  */
tx_abort(struct slgt_info * info)2611 static int tx_abort(struct slgt_info *info)
2612 {
2613  	unsigned long flags;
2614 	DBGINFO(("%s tx_abort\n", info->device_name));
2615 	spin_lock_irqsave(&info->lock,flags);
2616 	tdma_reset(info);
2617 	spin_unlock_irqrestore(&info->lock,flags);
2618 	return 0;
2619 }
2620 
rx_enable(struct slgt_info * info,int enable)2621 static int rx_enable(struct slgt_info *info, int enable)
2622 {
2623  	unsigned long flags;
2624 	unsigned int rbuf_fill_level;
2625 	DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable));
2626 	spin_lock_irqsave(&info->lock,flags);
2627 	/*
2628 	 * enable[31..16] = receive DMA buffer fill level
2629 	 * 0 = noop (leave fill level unchanged)
2630 	 * fill level must be multiple of 4 and <= buffer size
2631 	 */
2632 	rbuf_fill_level = ((unsigned int)enable) >> 16;
2633 	if (rbuf_fill_level) {
2634 		if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) {
2635 			spin_unlock_irqrestore(&info->lock, flags);
2636 			return -EINVAL;
2637 		}
2638 		info->rbuf_fill_level = rbuf_fill_level;
2639 		rx_stop(info); /* restart receiver to use new fill level */
2640 	}
2641 
2642 	/*
2643 	 * enable[1..0] = receiver enable command
2644 	 * 0 = disable
2645 	 * 1 = enable
2646 	 * 2 = enable or force hunt mode if already enabled
2647 	 */
2648 	enable &= 3;
2649 	if (enable) {
2650 		if (!info->rx_enabled)
2651 			rx_start(info);
2652 		else if (enable == 2) {
2653 			/* force hunt mode (write 1 to RCR[3]) */
2654 			wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3);
2655 		}
2656 	} else {
2657 		if (info->rx_enabled)
2658 			rx_stop(info);
2659 	}
2660 	spin_unlock_irqrestore(&info->lock,flags);
2661 	return 0;
2662 }
2663 
2664 /*
2665  *  wait for specified event to occur
2666  */
wait_mgsl_event(struct slgt_info * info,int __user * mask_ptr)2667 static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
2668 {
2669  	unsigned long flags;
2670 	int s;
2671 	int rc=0;
2672 	struct mgsl_icount cprev, cnow;
2673 	int events;
2674 	int mask;
2675 	struct	_input_signal_events oldsigs, newsigs;
2676 	DECLARE_WAITQUEUE(wait, current);
2677 
2678 	if (get_user(mask, mask_ptr))
2679 		return -EFAULT;
2680 
2681 	DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
2682 
2683 	spin_lock_irqsave(&info->lock,flags);
2684 
2685 	/* return immediately if state matches requested events */
2686 	get_signals(info);
2687 	s = info->signals;
2688 
2689 	events = mask &
2690 		( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2691  		  ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2692 		  ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2693 		  ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2694 	if (events) {
2695 		spin_unlock_irqrestore(&info->lock,flags);
2696 		goto exit;
2697 	}
2698 
2699 	/* save current irq counts */
2700 	cprev = info->icount;
2701 	oldsigs = info->input_signal_events;
2702 
2703 	/* enable hunt and idle irqs if needed */
2704 	if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
2705 		unsigned short val = rd_reg16(info, SCR);
2706 		if (!(val & IRQ_RXIDLE))
2707 			wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
2708 	}
2709 
2710 	set_current_state(TASK_INTERRUPTIBLE);
2711 	add_wait_queue(&info->event_wait_q, &wait);
2712 
2713 	spin_unlock_irqrestore(&info->lock,flags);
2714 
2715 	for(;;) {
2716 		schedule();
2717 		if (signal_pending(current)) {
2718 			rc = -ERESTARTSYS;
2719 			break;
2720 		}
2721 
2722 		/* get current irq counts */
2723 		spin_lock_irqsave(&info->lock,flags);
2724 		cnow = info->icount;
2725 		newsigs = info->input_signal_events;
2726 		set_current_state(TASK_INTERRUPTIBLE);
2727 		spin_unlock_irqrestore(&info->lock,flags);
2728 
2729 		/* if no change, wait aborted for some reason */
2730 		if (newsigs.dsr_up   == oldsigs.dsr_up   &&
2731 		    newsigs.dsr_down == oldsigs.dsr_down &&
2732 		    newsigs.dcd_up   == oldsigs.dcd_up   &&
2733 		    newsigs.dcd_down == oldsigs.dcd_down &&
2734 		    newsigs.cts_up   == oldsigs.cts_up   &&
2735 		    newsigs.cts_down == oldsigs.cts_down &&
2736 		    newsigs.ri_up    == oldsigs.ri_up    &&
2737 		    newsigs.ri_down  == oldsigs.ri_down  &&
2738 		    cnow.exithunt    == cprev.exithunt   &&
2739 		    cnow.rxidle      == cprev.rxidle) {
2740 			rc = -EIO;
2741 			break;
2742 		}
2743 
2744 		events = mask &
2745 			( (newsigs.dsr_up   != oldsigs.dsr_up   ? MgslEvent_DsrActive:0)   +
2746 			  (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2747 			  (newsigs.dcd_up   != oldsigs.dcd_up   ? MgslEvent_DcdActive:0)   +
2748 			  (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2749 			  (newsigs.cts_up   != oldsigs.cts_up   ? MgslEvent_CtsActive:0)   +
2750 			  (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2751 			  (newsigs.ri_up    != oldsigs.ri_up    ? MgslEvent_RiActive:0)    +
2752 			  (newsigs.ri_down  != oldsigs.ri_down  ? MgslEvent_RiInactive:0)  +
2753 			  (cnow.exithunt    != cprev.exithunt   ? MgslEvent_ExitHuntMode:0) +
2754 			  (cnow.rxidle      != cprev.rxidle     ? MgslEvent_IdleReceived:0) );
2755 		if (events)
2756 			break;
2757 
2758 		cprev = cnow;
2759 		oldsigs = newsigs;
2760 	}
2761 
2762 	remove_wait_queue(&info->event_wait_q, &wait);
2763 	set_current_state(TASK_RUNNING);
2764 
2765 
2766 	if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2767 		spin_lock_irqsave(&info->lock,flags);
2768 		if (!waitqueue_active(&info->event_wait_q)) {
2769 			/* disable enable exit hunt mode/idle rcvd IRQs */
2770 			wr_reg16(info, SCR,
2771 				(unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
2772 		}
2773 		spin_unlock_irqrestore(&info->lock,flags);
2774 	}
2775 exit:
2776 	if (rc == 0)
2777 		rc = put_user(events, mask_ptr);
2778 	return rc;
2779 }
2780 
get_interface(struct slgt_info * info,int __user * if_mode)2781 static int get_interface(struct slgt_info *info, int __user *if_mode)
2782 {
2783 	DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
2784 	if (put_user(info->if_mode, if_mode))
2785 		return -EFAULT;
2786 	return 0;
2787 }
2788 
set_interface(struct slgt_info * info,int if_mode)2789 static int set_interface(struct slgt_info *info, int if_mode)
2790 {
2791  	unsigned long flags;
2792 	unsigned short val;
2793 
2794 	DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
2795 	spin_lock_irqsave(&info->lock,flags);
2796 	info->if_mode = if_mode;
2797 
2798 	msc_set_vcr(info);
2799 
2800 	/* TCR (tx control) 07  1=RTS driver control */
2801 	val = rd_reg16(info, TCR);
2802 	if (info->if_mode & MGSL_INTERFACE_RTS_EN)
2803 		val |= BIT7;
2804 	else
2805 		val &= ~BIT7;
2806 	wr_reg16(info, TCR, val);
2807 
2808 	spin_unlock_irqrestore(&info->lock,flags);
2809 	return 0;
2810 }
2811 
2812 /*
2813  * set general purpose IO pin state and direction
2814  *
2815  * user_gpio fields:
2816  * state   each bit indicates a pin state
2817  * smask   set bit indicates pin state to set
2818  * dir     each bit indicates a pin direction (0=input, 1=output)
2819  * dmask   set bit indicates pin direction to set
2820  */
set_gpio(struct slgt_info * info,struct gpio_desc __user * user_gpio)2821 static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2822 {
2823  	unsigned long flags;
2824 	struct gpio_desc gpio;
2825 	__u32 data;
2826 
2827 	if (!info->gpio_present)
2828 		return -EINVAL;
2829 	if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2830 		return -EFAULT;
2831 	DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
2832 		 info->device_name, gpio.state, gpio.smask,
2833 		 gpio.dir, gpio.dmask));
2834 
2835 	spin_lock_irqsave(&info->lock,flags);
2836 	if (gpio.dmask) {
2837 		data = rd_reg32(info, IODR);
2838 		data |= gpio.dmask & gpio.dir;
2839 		data &= ~(gpio.dmask & ~gpio.dir);
2840 		wr_reg32(info, IODR, data);
2841 	}
2842 	if (gpio.smask) {
2843 		data = rd_reg32(info, IOVR);
2844 		data |= gpio.smask & gpio.state;
2845 		data &= ~(gpio.smask & ~gpio.state);
2846 		wr_reg32(info, IOVR, data);
2847 	}
2848 	spin_unlock_irqrestore(&info->lock,flags);
2849 
2850 	return 0;
2851 }
2852 
2853 /*
2854  * get general purpose IO pin state and direction
2855  */
get_gpio(struct slgt_info * info,struct gpio_desc __user * user_gpio)2856 static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2857 {
2858 	struct gpio_desc gpio;
2859 	if (!info->gpio_present)
2860 		return -EINVAL;
2861 	gpio.state = rd_reg32(info, IOVR);
2862 	gpio.smask = 0xffffffff;
2863 	gpio.dir   = rd_reg32(info, IODR);
2864 	gpio.dmask = 0xffffffff;
2865 	if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
2866 		return -EFAULT;
2867 	DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
2868 		 info->device_name, gpio.state, gpio.dir));
2869 	return 0;
2870 }
2871 
2872 /*
2873  * conditional wait facility
2874  */
init_cond_wait(struct cond_wait * w,unsigned int data)2875 static void init_cond_wait(struct cond_wait *w, unsigned int data)
2876 {
2877 	init_waitqueue_head(&w->q);
2878 	init_waitqueue_entry(&w->wait, current);
2879 	w->data = data;
2880 }
2881 
add_cond_wait(struct cond_wait ** head,struct cond_wait * w)2882 static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
2883 {
2884 	set_current_state(TASK_INTERRUPTIBLE);
2885 	add_wait_queue(&w->q, &w->wait);
2886 	w->next = *head;
2887 	*head = w;
2888 }
2889 
remove_cond_wait(struct cond_wait ** head,struct cond_wait * cw)2890 static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
2891 {
2892 	struct cond_wait *w, *prev;
2893 	remove_wait_queue(&cw->q, &cw->wait);
2894 	set_current_state(TASK_RUNNING);
2895 	for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
2896 		if (w == cw) {
2897 			if (prev != NULL)
2898 				prev->next = w->next;
2899 			else
2900 				*head = w->next;
2901 			break;
2902 		}
2903 	}
2904 }
2905 
flush_cond_wait(struct cond_wait ** head)2906 static void flush_cond_wait(struct cond_wait **head)
2907 {
2908 	while (*head != NULL) {
2909 		wake_up_interruptible(&(*head)->q);
2910 		*head = (*head)->next;
2911 	}
2912 }
2913 
2914 /*
2915  * wait for general purpose I/O pin(s) to enter specified state
2916  *
2917  * user_gpio fields:
2918  * state - bit indicates target pin state
2919  * smask - set bit indicates watched pin
2920  *
2921  * The wait ends when at least one watched pin enters the specified
2922  * state. When 0 (no error) is returned, user_gpio->state is set to the
2923  * state of all GPIO pins when the wait ends.
2924  *
2925  * Note: Each pin may be a dedicated input, dedicated output, or
2926  * configurable input/output. The number and configuration of pins
2927  * varies with the specific adapter model. Only input pins (dedicated
2928  * or configured) can be monitored with this function.
2929  */
wait_gpio(struct slgt_info * info,struct gpio_desc __user * user_gpio)2930 static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2931 {
2932  	unsigned long flags;
2933 	int rc = 0;
2934 	struct gpio_desc gpio;
2935 	struct cond_wait wait;
2936 	u32 state;
2937 
2938 	if (!info->gpio_present)
2939 		return -EINVAL;
2940 	if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2941 		return -EFAULT;
2942 	DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
2943 		 info->device_name, gpio.state, gpio.smask));
2944 	/* ignore output pins identified by set IODR bit */
2945 	if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
2946 		return -EINVAL;
2947 	init_cond_wait(&wait, gpio.smask);
2948 
2949 	spin_lock_irqsave(&info->lock, flags);
2950 	/* enable interrupts for watched pins */
2951 	wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
2952 	/* get current pin states */
2953 	state = rd_reg32(info, IOVR);
2954 
2955 	if (gpio.smask & ~(state ^ gpio.state)) {
2956 		/* already in target state */
2957 		gpio.state = state;
2958 	} else {
2959 		/* wait for target state */
2960 		add_cond_wait(&info->gpio_wait_q, &wait);
2961 		spin_unlock_irqrestore(&info->lock, flags);
2962 		schedule();
2963 		if (signal_pending(current))
2964 			rc = -ERESTARTSYS;
2965 		else
2966 			gpio.state = wait.data;
2967 		spin_lock_irqsave(&info->lock, flags);
2968 		remove_cond_wait(&info->gpio_wait_q, &wait);
2969 	}
2970 
2971 	/* disable all GPIO interrupts if no waiting processes */
2972 	if (info->gpio_wait_q == NULL)
2973 		wr_reg32(info, IOER, 0);
2974 	spin_unlock_irqrestore(&info->lock,flags);
2975 
2976 	if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
2977 		rc = -EFAULT;
2978 	return rc;
2979 }
2980 
modem_input_wait(struct slgt_info * info,int arg)2981 static int modem_input_wait(struct slgt_info *info,int arg)
2982 {
2983  	unsigned long flags;
2984 	int rc;
2985 	struct mgsl_icount cprev, cnow;
2986 	DECLARE_WAITQUEUE(wait, current);
2987 
2988 	/* save current irq counts */
2989 	spin_lock_irqsave(&info->lock,flags);
2990 	cprev = info->icount;
2991 	add_wait_queue(&info->status_event_wait_q, &wait);
2992 	set_current_state(TASK_INTERRUPTIBLE);
2993 	spin_unlock_irqrestore(&info->lock,flags);
2994 
2995 	for(;;) {
2996 		schedule();
2997 		if (signal_pending(current)) {
2998 			rc = -ERESTARTSYS;
2999 			break;
3000 		}
3001 
3002 		/* get new irq counts */
3003 		spin_lock_irqsave(&info->lock,flags);
3004 		cnow = info->icount;
3005 		set_current_state(TASK_INTERRUPTIBLE);
3006 		spin_unlock_irqrestore(&info->lock,flags);
3007 
3008 		/* if no change, wait aborted for some reason */
3009 		if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
3010 		    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
3011 			rc = -EIO;
3012 			break;
3013 		}
3014 
3015 		/* check for change in caller specified modem input */
3016 		if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
3017 		    (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
3018 		    (arg & TIOCM_CD  && cnow.dcd != cprev.dcd) ||
3019 		    (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
3020 			rc = 0;
3021 			break;
3022 		}
3023 
3024 		cprev = cnow;
3025 	}
3026 	remove_wait_queue(&info->status_event_wait_q, &wait);
3027 	set_current_state(TASK_RUNNING);
3028 	return rc;
3029 }
3030 
3031 /*
3032  *  return state of serial control and status signals
3033  */
tiocmget(struct tty_struct * tty,struct file * file)3034 static int tiocmget(struct tty_struct *tty, struct file *file)
3035 {
3036 	struct slgt_info *info = tty->driver_data;
3037 	unsigned int result;
3038  	unsigned long flags;
3039 
3040 	spin_lock_irqsave(&info->lock,flags);
3041  	get_signals(info);
3042 	spin_unlock_irqrestore(&info->lock,flags);
3043 
3044 	result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
3045 		((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
3046 		((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
3047 		((info->signals & SerialSignal_RI)  ? TIOCM_RNG:0) +
3048 		((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
3049 		((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
3050 
3051 	DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
3052 	return result;
3053 }
3054 
3055 /*
3056  * set modem control signals (DTR/RTS)
3057  *
3058  * 	cmd	signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
3059  *		TIOCMSET = set/clear signal values
3060  * 	value	bit mask for command
3061  */
tiocmset(struct tty_struct * tty,struct file * file,unsigned int set,unsigned int clear)3062 static int tiocmset(struct tty_struct *tty, struct file *file,
3063 		    unsigned int set, unsigned int clear)
3064 {
3065 	struct slgt_info *info = tty->driver_data;
3066  	unsigned long flags;
3067 
3068 	DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
3069 
3070 	if (set & TIOCM_RTS)
3071 		info->signals |= SerialSignal_RTS;
3072 	if (set & TIOCM_DTR)
3073 		info->signals |= SerialSignal_DTR;
3074 	if (clear & TIOCM_RTS)
3075 		info->signals &= ~SerialSignal_RTS;
3076 	if (clear & TIOCM_DTR)
3077 		info->signals &= ~SerialSignal_DTR;
3078 
3079 	spin_lock_irqsave(&info->lock,flags);
3080  	set_signals(info);
3081 	spin_unlock_irqrestore(&info->lock,flags);
3082 	return 0;
3083 }
3084 
carrier_raised(struct tty_port * port)3085 static int carrier_raised(struct tty_port *port)
3086 {
3087 	unsigned long flags;
3088 	struct slgt_info *info = container_of(port, struct slgt_info, port);
3089 
3090 	spin_lock_irqsave(&info->lock,flags);
3091  	get_signals(info);
3092 	spin_unlock_irqrestore(&info->lock,flags);
3093 	return (info->signals & SerialSignal_DCD) ? 1 : 0;
3094 }
3095 
raise_dtr_rts(struct tty_port * port)3096 static void raise_dtr_rts(struct tty_port *port)
3097 {
3098 	unsigned long flags;
3099 	struct slgt_info *info = container_of(port, struct slgt_info, port);
3100 
3101 	spin_lock_irqsave(&info->lock,flags);
3102 	info->signals |= SerialSignal_RTS + SerialSignal_DTR;
3103  	set_signals(info);
3104 	spin_unlock_irqrestore(&info->lock,flags);
3105 }
3106 
3107 
3108 /*
3109  *  block current process until the device is ready to open
3110  */
block_til_ready(struct tty_struct * tty,struct file * filp,struct slgt_info * info)3111 static int block_til_ready(struct tty_struct *tty, struct file *filp,
3112 			   struct slgt_info *info)
3113 {
3114 	DECLARE_WAITQUEUE(wait, current);
3115 	int		retval;
3116 	bool		do_clocal = false;
3117 	bool		extra_count = false;
3118 	unsigned long	flags;
3119 	int		cd;
3120 	struct tty_port *port = &info->port;
3121 
3122 	DBGINFO(("%s block_til_ready\n", tty->driver->name));
3123 
3124 	if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3125 		/* nonblock mode is set or port is not enabled */
3126 		port->flags |= ASYNC_NORMAL_ACTIVE;
3127 		return 0;
3128 	}
3129 
3130 	if (tty->termios->c_cflag & CLOCAL)
3131 		do_clocal = true;
3132 
3133 	/* Wait for carrier detect and the line to become
3134 	 * free (i.e., not in use by the callout).  While we are in
3135 	 * this loop, port->count is dropped by one, so that
3136 	 * close() knows when to free things.  We restore it upon
3137 	 * exit, either normal or abnormal.
3138 	 */
3139 
3140 	retval = 0;
3141 	add_wait_queue(&port->open_wait, &wait);
3142 
3143 	spin_lock_irqsave(&info->lock, flags);
3144 	if (!tty_hung_up_p(filp)) {
3145 		extra_count = true;
3146 		port->count--;
3147 	}
3148 	spin_unlock_irqrestore(&info->lock, flags);
3149 	port->blocked_open++;
3150 
3151 	while (1) {
3152 		if ((tty->termios->c_cflag & CBAUD))
3153 			tty_port_raise_dtr_rts(port);
3154 
3155 		set_current_state(TASK_INTERRUPTIBLE);
3156 
3157 		if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3158 			retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3159 					-EAGAIN : -ERESTARTSYS;
3160 			break;
3161 		}
3162 
3163 		cd = tty_port_carrier_raised(port);
3164 
3165  		if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd ))
3166  			break;
3167 
3168 		if (signal_pending(current)) {
3169 			retval = -ERESTARTSYS;
3170 			break;
3171 		}
3172 
3173 		DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
3174 		schedule();
3175 	}
3176 
3177 	set_current_state(TASK_RUNNING);
3178 	remove_wait_queue(&port->open_wait, &wait);
3179 
3180 	if (extra_count)
3181 		port->count++;
3182 	port->blocked_open--;
3183 
3184 	if (!retval)
3185 		port->flags |= ASYNC_NORMAL_ACTIVE;
3186 
3187 	DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
3188 	return retval;
3189 }
3190 
alloc_tmp_rbuf(struct slgt_info * info)3191 static int alloc_tmp_rbuf(struct slgt_info *info)
3192 {
3193 	info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
3194 	if (info->tmp_rbuf == NULL)
3195 		return -ENOMEM;
3196 	return 0;
3197 }
3198 
free_tmp_rbuf(struct slgt_info * info)3199 static void free_tmp_rbuf(struct slgt_info *info)
3200 {
3201 	kfree(info->tmp_rbuf);
3202 	info->tmp_rbuf = NULL;
3203 }
3204 
3205 /*
3206  * allocate DMA descriptor lists.
3207  */
alloc_desc(struct slgt_info * info)3208 static int alloc_desc(struct slgt_info *info)
3209 {
3210 	unsigned int i;
3211 	unsigned int pbufs;
3212 
3213 	/* allocate memory to hold descriptor lists */
3214 	info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr);
3215 	if (info->bufs == NULL)
3216 		return -ENOMEM;
3217 
3218 	memset(info->bufs, 0, DESC_LIST_SIZE);
3219 
3220 	info->rbufs = (struct slgt_desc*)info->bufs;
3221 	info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
3222 
3223 	pbufs = (unsigned int)info->bufs_dma_addr;
3224 
3225 	/*
3226 	 * Build circular lists of descriptors
3227 	 */
3228 
3229 	for (i=0; i < info->rbuf_count; i++) {
3230 		/* physical address of this descriptor */
3231 		info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
3232 
3233 		/* physical address of next descriptor */
3234 		if (i == info->rbuf_count - 1)
3235 			info->rbufs[i].next = cpu_to_le32(pbufs);
3236 		else
3237 			info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
3238 		set_desc_count(info->rbufs[i], DMABUFSIZE);
3239 	}
3240 
3241 	for (i=0; i < info->tbuf_count; i++) {
3242 		/* physical address of this descriptor */
3243 		info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
3244 
3245 		/* physical address of next descriptor */
3246 		if (i == info->tbuf_count - 1)
3247 			info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
3248 		else
3249 			info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
3250 	}
3251 
3252 	return 0;
3253 }
3254 
free_desc(struct slgt_info * info)3255 static void free_desc(struct slgt_info *info)
3256 {
3257 	if (info->bufs != NULL) {
3258 		pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr);
3259 		info->bufs  = NULL;
3260 		info->rbufs = NULL;
3261 		info->tbufs = NULL;
3262 	}
3263 }
3264 
alloc_bufs(struct slgt_info * info,struct slgt_desc * bufs,int count)3265 static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3266 {
3267 	int i;
3268 	for (i=0; i < count; i++) {
3269 		if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL)
3270 			return -ENOMEM;
3271 		bufs[i].pbuf  = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
3272 	}
3273 	return 0;
3274 }
3275 
free_bufs(struct slgt_info * info,struct slgt_desc * bufs,int count)3276 static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3277 {
3278 	int i;
3279 	for (i=0; i < count; i++) {
3280 		if (bufs[i].buf == NULL)
3281 			continue;
3282 		pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr);
3283 		bufs[i].buf = NULL;
3284 	}
3285 }
3286 
alloc_dma_bufs(struct slgt_info * info)3287 static int alloc_dma_bufs(struct slgt_info *info)
3288 {
3289 	info->rbuf_count = 32;
3290 	info->tbuf_count = 32;
3291 
3292 	if (alloc_desc(info) < 0 ||
3293 	    alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
3294 	    alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
3295 	    alloc_tmp_rbuf(info) < 0) {
3296 		DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
3297 		return -ENOMEM;
3298 	}
3299 	reset_rbufs(info);
3300 	return 0;
3301 }
3302 
free_dma_bufs(struct slgt_info * info)3303 static void free_dma_bufs(struct slgt_info *info)
3304 {
3305 	if (info->bufs) {
3306 		free_bufs(info, info->rbufs, info->rbuf_count);
3307 		free_bufs(info, info->tbufs, info->tbuf_count);
3308 		free_desc(info);
3309 	}
3310 	free_tmp_rbuf(info);
3311 }
3312 
claim_resources(struct slgt_info * info)3313 static int claim_resources(struct slgt_info *info)
3314 {
3315 	if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
3316 		DBGERR(("%s reg addr conflict, addr=%08X\n",
3317 			info->device_name, info->phys_reg_addr));
3318 		info->init_error = DiagStatus_AddressConflict;
3319 		goto errout;
3320 	}
3321 	else
3322 		info->reg_addr_requested = true;
3323 
3324 	info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
3325 	if (!info->reg_addr) {
3326 		DBGERR(("%s cant map device registers, addr=%08X\n",
3327 			info->device_name, info->phys_reg_addr));
3328 		info->init_error = DiagStatus_CantAssignPciResources;
3329 		goto errout;
3330 	}
3331 	return 0;
3332 
3333 errout:
3334 	release_resources(info);
3335 	return -ENODEV;
3336 }
3337 
release_resources(struct slgt_info * info)3338 static void release_resources(struct slgt_info *info)
3339 {
3340 	if (info->irq_requested) {
3341 		free_irq(info->irq_level, info);
3342 		info->irq_requested = false;
3343 	}
3344 
3345 	if (info->reg_addr_requested) {
3346 		release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
3347 		info->reg_addr_requested = false;
3348 	}
3349 
3350 	if (info->reg_addr) {
3351 		iounmap(info->reg_addr);
3352 		info->reg_addr = NULL;
3353 	}
3354 }
3355 
3356 /* Add the specified device instance data structure to the
3357  * global linked list of devices and increment the device count.
3358  */
add_device(struct slgt_info * info)3359 static void add_device(struct slgt_info *info)
3360 {
3361 	char *devstr;
3362 
3363 	info->next_device = NULL;
3364 	info->line = slgt_device_count;
3365 	sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
3366 
3367 	if (info->line < MAX_DEVICES) {
3368 		if (maxframe[info->line])
3369 			info->max_frame_size = maxframe[info->line];
3370 	}
3371 
3372 	slgt_device_count++;
3373 
3374 	if (!slgt_device_list)
3375 		slgt_device_list = info;
3376 	else {
3377 		struct slgt_info *current_dev = slgt_device_list;
3378 		while(current_dev->next_device)
3379 			current_dev = current_dev->next_device;
3380 		current_dev->next_device = info;
3381 	}
3382 
3383 	if (info->max_frame_size < 4096)
3384 		info->max_frame_size = 4096;
3385 	else if (info->max_frame_size > 65535)
3386 		info->max_frame_size = 65535;
3387 
3388 	switch(info->pdev->device) {
3389 	case SYNCLINK_GT_DEVICE_ID:
3390 		devstr = "GT";
3391 		break;
3392 	case SYNCLINK_GT2_DEVICE_ID:
3393 		devstr = "GT2";
3394 		break;
3395 	case SYNCLINK_GT4_DEVICE_ID:
3396 		devstr = "GT4";
3397 		break;
3398 	case SYNCLINK_AC_DEVICE_ID:
3399 		devstr = "AC";
3400 		info->params.mode = MGSL_MODE_ASYNC;
3401 		break;
3402 	default:
3403 		devstr = "(unknown model)";
3404 	}
3405 	printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
3406 		devstr, info->device_name, info->phys_reg_addr,
3407 		info->irq_level, info->max_frame_size);
3408 
3409 #if SYNCLINK_GENERIC_HDLC
3410 	hdlcdev_init(info);
3411 #endif
3412 }
3413 
3414 static const struct tty_port_operations slgt_port_ops = {
3415 	.carrier_raised = carrier_raised,
3416 	.raise_dtr_rts = raise_dtr_rts,
3417 };
3418 
3419 /*
3420  *  allocate device instance structure, return NULL on failure
3421  */
alloc_dev(int adapter_num,int port_num,struct pci_dev * pdev)3422 static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3423 {
3424 	struct slgt_info *info;
3425 
3426 	info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
3427 
3428 	if (!info) {
3429 		DBGERR(("%s device alloc failed adapter=%d port=%d\n",
3430 			driver_name, adapter_num, port_num));
3431 	} else {
3432 		tty_port_init(&info->port);
3433 		info->port.ops = &slgt_port_ops;
3434 		info->magic = MGSL_MAGIC;
3435 		INIT_WORK(&info->task, bh_handler);
3436 		info->max_frame_size = 4096;
3437 		info->rbuf_fill_level = DMABUFSIZE;
3438 		info->port.close_delay = 5*HZ/10;
3439 		info->port.closing_wait = 30*HZ;
3440 		init_waitqueue_head(&info->status_event_wait_q);
3441 		init_waitqueue_head(&info->event_wait_q);
3442 		spin_lock_init(&info->netlock);
3443 		memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
3444 		info->idle_mode = HDLC_TXIDLE_FLAGS;
3445 		info->adapter_num = adapter_num;
3446 		info->port_num = port_num;
3447 
3448 		setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
3449 		setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
3450 
3451 		/* Copy configuration info to device instance data */
3452 		info->pdev = pdev;
3453 		info->irq_level = pdev->irq;
3454 		info->phys_reg_addr = pci_resource_start(pdev,0);
3455 
3456 		info->bus_type = MGSL_BUS_TYPE_PCI;
3457 		info->irq_flags = IRQF_SHARED;
3458 
3459 		info->init_error = -1; /* assume error, set to 0 on successful init */
3460 	}
3461 
3462 	return info;
3463 }
3464 
device_init(int adapter_num,struct pci_dev * pdev)3465 static void device_init(int adapter_num, struct pci_dev *pdev)
3466 {
3467 	struct slgt_info *port_array[SLGT_MAX_PORTS];
3468 	int i;
3469 	int port_count = 1;
3470 
3471 	if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
3472 		port_count = 2;
3473 	else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
3474 		port_count = 4;
3475 
3476 	/* allocate device instances for all ports */
3477 	for (i=0; i < port_count; ++i) {
3478 		port_array[i] = alloc_dev(adapter_num, i, pdev);
3479 		if (port_array[i] == NULL) {
3480 			for (--i; i >= 0; --i)
3481 				kfree(port_array[i]);
3482 			return;
3483 		}
3484 	}
3485 
3486 	/* give copy of port_array to all ports and add to device list  */
3487 	for (i=0; i < port_count; ++i) {
3488 		memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
3489 		add_device(port_array[i]);
3490 		port_array[i]->port_count = port_count;
3491 		spin_lock_init(&port_array[i]->lock);
3492 	}
3493 
3494 	/* Allocate and claim adapter resources */
3495 	if (!claim_resources(port_array[0])) {
3496 
3497 		alloc_dma_bufs(port_array[0]);
3498 
3499 		/* copy resource information from first port to others */
3500 		for (i = 1; i < port_count; ++i) {
3501 			port_array[i]->lock      = port_array[0]->lock;
3502 			port_array[i]->irq_level = port_array[0]->irq_level;
3503 			port_array[i]->reg_addr  = port_array[0]->reg_addr;
3504 			alloc_dma_bufs(port_array[i]);
3505 		}
3506 
3507 		if (request_irq(port_array[0]->irq_level,
3508 					slgt_interrupt,
3509 					port_array[0]->irq_flags,
3510 					port_array[0]->device_name,
3511 					port_array[0]) < 0) {
3512 			DBGERR(("%s request_irq failed IRQ=%d\n",
3513 				port_array[0]->device_name,
3514 				port_array[0]->irq_level));
3515 		} else {
3516 			port_array[0]->irq_requested = true;
3517 			adapter_test(port_array[0]);
3518 			for (i=1 ; i < port_count ; i++) {
3519 				port_array[i]->init_error = port_array[0]->init_error;
3520 				port_array[i]->gpio_present = port_array[0]->gpio_present;
3521 			}
3522 		}
3523 	}
3524 
3525 	for (i=0; i < port_count; ++i)
3526 		tty_register_device(serial_driver, port_array[i]->line, &(port_array[i]->pdev->dev));
3527 }
3528 
init_one(struct pci_dev * dev,const struct pci_device_id * ent)3529 static int __devinit init_one(struct pci_dev *dev,
3530 			      const struct pci_device_id *ent)
3531 {
3532 	if (pci_enable_device(dev)) {
3533 		printk("error enabling pci device %p\n", dev);
3534 		return -EIO;
3535 	}
3536 	pci_set_master(dev);
3537 	device_init(slgt_device_count, dev);
3538 	return 0;
3539 }
3540 
remove_one(struct pci_dev * dev)3541 static void __devexit remove_one(struct pci_dev *dev)
3542 {
3543 }
3544 
3545 static const struct tty_operations ops = {
3546 	.open = open,
3547 	.close = close,
3548 	.write = write,
3549 	.put_char = put_char,
3550 	.flush_chars = flush_chars,
3551 	.write_room = write_room,
3552 	.chars_in_buffer = chars_in_buffer,
3553 	.flush_buffer = flush_buffer,
3554 	.ioctl = ioctl,
3555 	.compat_ioctl = slgt_compat_ioctl,
3556 	.throttle = throttle,
3557 	.unthrottle = unthrottle,
3558 	.send_xchar = send_xchar,
3559 	.break_ctl = set_break,
3560 	.wait_until_sent = wait_until_sent,
3561  	.read_proc = read_proc,
3562 	.set_termios = set_termios,
3563 	.stop = tx_hold,
3564 	.start = tx_release,
3565 	.hangup = hangup,
3566 	.tiocmget = tiocmget,
3567 	.tiocmset = tiocmset,
3568 };
3569 
slgt_cleanup(void)3570 static void slgt_cleanup(void)
3571 {
3572 	int rc;
3573 	struct slgt_info *info;
3574 	struct slgt_info *tmp;
3575 
3576 	printk(KERN_INFO "unload %s\n", driver_name);
3577 
3578 	if (serial_driver) {
3579 		for (info=slgt_device_list ; info != NULL ; info=info->next_device)
3580 			tty_unregister_device(serial_driver, info->line);
3581 		if ((rc = tty_unregister_driver(serial_driver)))
3582 			DBGERR(("tty_unregister_driver error=%d\n", rc));
3583 		put_tty_driver(serial_driver);
3584 	}
3585 
3586 	/* reset devices */
3587 	info = slgt_device_list;
3588 	while(info) {
3589 		reset_port(info);
3590 		info = info->next_device;
3591 	}
3592 
3593 	/* release devices */
3594 	info = slgt_device_list;
3595 	while(info) {
3596 #if SYNCLINK_GENERIC_HDLC
3597 		hdlcdev_exit(info);
3598 #endif
3599 		free_dma_bufs(info);
3600 		free_tmp_rbuf(info);
3601 		if (info->port_num == 0)
3602 			release_resources(info);
3603 		tmp = info;
3604 		info = info->next_device;
3605 		kfree(tmp);
3606 	}
3607 
3608 	if (pci_registered)
3609 		pci_unregister_driver(&pci_driver);
3610 }
3611 
3612 /*
3613  *  Driver initialization entry point.
3614  */
slgt_init(void)3615 static int __init slgt_init(void)
3616 {
3617 	int rc;
3618 
3619 	printk(KERN_INFO "%s\n", driver_name);
3620 
3621 	serial_driver = alloc_tty_driver(MAX_DEVICES);
3622 	if (!serial_driver) {
3623 		printk("%s can't allocate tty driver\n", driver_name);
3624 		return -ENOMEM;
3625 	}
3626 
3627 	/* Initialize the tty_driver structure */
3628 
3629 	serial_driver->owner = THIS_MODULE;
3630 	serial_driver->driver_name = tty_driver_name;
3631 	serial_driver->name = tty_dev_prefix;
3632 	serial_driver->major = ttymajor;
3633 	serial_driver->minor_start = 64;
3634 	serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
3635 	serial_driver->subtype = SERIAL_TYPE_NORMAL;
3636 	serial_driver->init_termios = tty_std_termios;
3637 	serial_driver->init_termios.c_cflag =
3638 		B9600 | CS8 | CREAD | HUPCL | CLOCAL;
3639 	serial_driver->init_termios.c_ispeed = 9600;
3640 	serial_driver->init_termios.c_ospeed = 9600;
3641 	serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
3642 	tty_set_operations(serial_driver, &ops);
3643 	if ((rc = tty_register_driver(serial_driver)) < 0) {
3644 		DBGERR(("%s can't register serial driver\n", driver_name));
3645 		put_tty_driver(serial_driver);
3646 		serial_driver = NULL;
3647 		goto error;
3648 	}
3649 
3650 	printk(KERN_INFO "%s, tty major#%d\n",
3651 	       driver_name, serial_driver->major);
3652 
3653 	slgt_device_count = 0;
3654 	if ((rc = pci_register_driver(&pci_driver)) < 0) {
3655 		printk("%s pci_register_driver error=%d\n", driver_name, rc);
3656 		goto error;
3657 	}
3658 	pci_registered = true;
3659 
3660 	if (!slgt_device_list)
3661 		printk("%s no devices found\n",driver_name);
3662 
3663 	return 0;
3664 
3665 error:
3666 	slgt_cleanup();
3667 	return rc;
3668 }
3669 
slgt_exit(void)3670 static void __exit slgt_exit(void)
3671 {
3672 	slgt_cleanup();
3673 }
3674 
3675 module_init(slgt_init);
3676 module_exit(slgt_exit);
3677 
3678 /*
3679  * register access routines
3680  */
3681 
3682 #define CALC_REGADDR() \
3683 	unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
3684 	if (addr >= 0x80) \
3685 		reg_addr += (info->port_num) * 32;
3686 
rd_reg8(struct slgt_info * info,unsigned int addr)3687 static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
3688 {
3689 	CALC_REGADDR();
3690 	return readb((void __iomem *)reg_addr);
3691 }
3692 
wr_reg8(struct slgt_info * info,unsigned int addr,__u8 value)3693 static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
3694 {
3695 	CALC_REGADDR();
3696 	writeb(value, (void __iomem *)reg_addr);
3697 }
3698 
rd_reg16(struct slgt_info * info,unsigned int addr)3699 static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
3700 {
3701 	CALC_REGADDR();
3702 	return readw((void __iomem *)reg_addr);
3703 }
3704 
wr_reg16(struct slgt_info * info,unsigned int addr,__u16 value)3705 static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
3706 {
3707 	CALC_REGADDR();
3708 	writew(value, (void __iomem *)reg_addr);
3709 }
3710 
rd_reg32(struct slgt_info * info,unsigned int addr)3711 static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
3712 {
3713 	CALC_REGADDR();
3714 	return readl((void __iomem *)reg_addr);
3715 }
3716 
wr_reg32(struct slgt_info * info,unsigned int addr,__u32 value)3717 static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
3718 {
3719 	CALC_REGADDR();
3720 	writel(value, (void __iomem *)reg_addr);
3721 }
3722 
rdma_reset(struct slgt_info * info)3723 static void rdma_reset(struct slgt_info *info)
3724 {
3725 	unsigned int i;
3726 
3727 	/* set reset bit */
3728 	wr_reg32(info, RDCSR, BIT1);
3729 
3730 	/* wait for enable bit cleared */
3731 	for(i=0 ; i < 1000 ; i++)
3732 		if (!(rd_reg32(info, RDCSR) & BIT0))
3733 			break;
3734 }
3735 
tdma_reset(struct slgt_info * info)3736 static void tdma_reset(struct slgt_info *info)
3737 {
3738 	unsigned int i;
3739 
3740 	/* set reset bit */
3741 	wr_reg32(info, TDCSR, BIT1);
3742 
3743 	/* wait for enable bit cleared */
3744 	for(i=0 ; i < 1000 ; i++)
3745 		if (!(rd_reg32(info, TDCSR) & BIT0))
3746 			break;
3747 }
3748 
3749 /*
3750  * enable internal loopback
3751  * TxCLK and RxCLK are generated from BRG
3752  * and TxD is looped back to RxD internally.
3753  */
enable_loopback(struct slgt_info * info)3754 static void enable_loopback(struct slgt_info *info)
3755 {
3756 	/* SCR (serial control) BIT2=looopback enable */
3757 	wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
3758 
3759 	if (info->params.mode != MGSL_MODE_ASYNC) {
3760 		/* CCR (clock control)
3761 		 * 07..05  tx clock source (010 = BRG)
3762 		 * 04..02  rx clock source (010 = BRG)
3763 		 * 01      auxclk enable   (0 = disable)
3764 		 * 00      BRG enable      (1 = enable)
3765 		 *
3766 		 * 0100 1001
3767 		 */
3768 		wr_reg8(info, CCR, 0x49);
3769 
3770 		/* set speed if available, otherwise use default */
3771 		if (info->params.clock_speed)
3772 			set_rate(info, info->params.clock_speed);
3773 		else
3774 			set_rate(info, 3686400);
3775 	}
3776 }
3777 
3778 /*
3779  *  set baud rate generator to specified rate
3780  */
set_rate(struct slgt_info * info,u32 rate)3781 static void set_rate(struct slgt_info *info, u32 rate)
3782 {
3783 	unsigned int div;
3784 	static unsigned int osc = 14745600;
3785 
3786 	/* div = osc/rate - 1
3787 	 *
3788 	 * Round div up if osc/rate is not integer to
3789 	 * force to next slowest rate.
3790 	 */
3791 
3792 	if (rate) {
3793 		div = osc/rate;
3794 		if (!(osc % rate) && div)
3795 			div--;
3796 		wr_reg16(info, BDR, (unsigned short)div);
3797 	}
3798 }
3799 
rx_stop(struct slgt_info * info)3800 static void rx_stop(struct slgt_info *info)
3801 {
3802 	unsigned short val;
3803 
3804 	/* disable and reset receiver */
3805 	val = rd_reg16(info, RCR) & ~BIT1;          /* clear enable bit */
3806 	wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3807 	wr_reg16(info, RCR, val);                  /* clear reset bit */
3808 
3809 	slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
3810 
3811 	/* clear pending rx interrupts */
3812 	wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
3813 
3814 	rdma_reset(info);
3815 
3816 	info->rx_enabled = false;
3817 	info->rx_restart = false;
3818 }
3819 
rx_start(struct slgt_info * info)3820 static void rx_start(struct slgt_info *info)
3821 {
3822 	unsigned short val;
3823 
3824 	slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
3825 
3826 	/* clear pending rx overrun IRQ */
3827 	wr_reg16(info, SSR, IRQ_RXOVER);
3828 
3829 	/* reset and disable receiver */
3830 	val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
3831 	wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3832 	wr_reg16(info, RCR, val);                  /* clear reset bit */
3833 
3834 	rdma_reset(info);
3835 	reset_rbufs(info);
3836 
3837 	/* set 1st descriptor address */
3838 	wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
3839 
3840 	if (info->params.mode != MGSL_MODE_ASYNC) {
3841 		/* enable rx DMA and DMA interrupt */
3842 		wr_reg32(info, RDCSR, (BIT2 + BIT0));
3843 	} else {
3844 		/* enable saving of rx status, rx DMA and DMA interrupt */
3845 		wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
3846 	}
3847 
3848 	slgt_irq_on(info, IRQ_RXOVER);
3849 
3850 	/* enable receiver */
3851 	wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
3852 
3853 	info->rx_restart = false;
3854 	info->rx_enabled = true;
3855 }
3856 
tx_start(struct slgt_info * info)3857 static void tx_start(struct slgt_info *info)
3858 {
3859 	if (!info->tx_enabled) {
3860 		wr_reg16(info, TCR,
3861 			 (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
3862 		info->tx_enabled = true;
3863 	}
3864 
3865 	if (info->tx_count) {
3866 		info->drop_rts_on_tx_done = false;
3867 
3868 		if (info->params.mode != MGSL_MODE_ASYNC) {
3869 			if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
3870 				get_signals(info);
3871 				if (!(info->signals & SerialSignal_RTS)) {
3872 					info->signals |= SerialSignal_RTS;
3873 					set_signals(info);
3874 					info->drop_rts_on_tx_done = true;
3875 				}
3876 			}
3877 
3878 			slgt_irq_off(info, IRQ_TXDATA);
3879 			slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
3880 			/* clear tx idle and underrun status bits */
3881 			wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
3882 			if (info->params.mode == MGSL_MODE_HDLC)
3883 				mod_timer(&info->tx_timer, jiffies +
3884 						msecs_to_jiffies(5000));
3885 		} else {
3886 			slgt_irq_off(info, IRQ_TXDATA);
3887 			slgt_irq_on(info, IRQ_TXIDLE);
3888 			/* clear tx idle status bit */
3889 			wr_reg16(info, SSR, IRQ_TXIDLE);
3890 		}
3891 		tdma_start(info);
3892 		info->tx_active = true;
3893 	}
3894 }
3895 
3896 /*
3897  * start transmit DMA if inactive and there are unsent buffers
3898  */
tdma_start(struct slgt_info * info)3899 static void tdma_start(struct slgt_info *info)
3900 {
3901 	unsigned int i;
3902 
3903 	if (rd_reg32(info, TDCSR) & BIT0)
3904 		return;
3905 
3906 	/* transmit DMA inactive, check for unsent buffers */
3907 	i = info->tbuf_start;
3908 	while (!desc_count(info->tbufs[i])) {
3909 		if (++i == info->tbuf_count)
3910 			i = 0;
3911 		if (i == info->tbuf_current)
3912 			return;
3913 	}
3914 	info->tbuf_start = i;
3915 
3916 	/* there are unsent buffers, start transmit DMA */
3917 
3918 	/* reset needed if previous error condition */
3919 	tdma_reset(info);
3920 
3921 	/* set 1st descriptor address */
3922 	wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
3923 	wr_reg32(info, TDCSR, BIT2 + BIT0); /* IRQ + DMA enable */
3924 }
3925 
tx_stop(struct slgt_info * info)3926 static void tx_stop(struct slgt_info *info)
3927 {
3928 	unsigned short val;
3929 
3930 	del_timer(&info->tx_timer);
3931 
3932 	tdma_reset(info);
3933 
3934 	/* reset and disable transmitter */
3935 	val = rd_reg16(info, TCR) & ~BIT1;          /* clear enable bit */
3936 	wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
3937 
3938 	slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
3939 
3940 	/* clear tx idle and underrun status bit */
3941 	wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
3942 
3943 	reset_tbufs(info);
3944 
3945 	info->tx_enabled = false;
3946 	info->tx_active = false;
3947 }
3948 
reset_port(struct slgt_info * info)3949 static void reset_port(struct slgt_info *info)
3950 {
3951 	if (!info->reg_addr)
3952 		return;
3953 
3954 	tx_stop(info);
3955 	rx_stop(info);
3956 
3957 	info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
3958 	set_signals(info);
3959 
3960 	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
3961 }
3962 
reset_adapter(struct slgt_info * info)3963 static void reset_adapter(struct slgt_info *info)
3964 {
3965 	int i;
3966 	for (i=0; i < info->port_count; ++i) {
3967 		if (info->port_array[i])
3968 			reset_port(info->port_array[i]);
3969 	}
3970 }
3971 
async_mode(struct slgt_info * info)3972 static void async_mode(struct slgt_info *info)
3973 {
3974   	unsigned short val;
3975 
3976 	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
3977 	tx_stop(info);
3978 	rx_stop(info);
3979 
3980 	/* TCR (tx control)
3981 	 *
3982 	 * 15..13  mode, 010=async
3983 	 * 12..10  encoding, 000=NRZ
3984 	 * 09      parity enable
3985 	 * 08      1=odd parity, 0=even parity
3986 	 * 07      1=RTS driver control
3987 	 * 06      1=break enable
3988 	 * 05..04  character length
3989 	 *         00=5 bits
3990 	 *         01=6 bits
3991 	 *         10=7 bits
3992 	 *         11=8 bits
3993 	 * 03      0=1 stop bit, 1=2 stop bits
3994 	 * 02      reset
3995 	 * 01      enable
3996 	 * 00      auto-CTS enable
3997 	 */
3998 	val = 0x4000;
3999 
4000 	if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4001 		val |= BIT7;
4002 
4003 	if (info->params.parity != ASYNC_PARITY_NONE) {
4004 		val |= BIT9;
4005 		if (info->params.parity == ASYNC_PARITY_ODD)
4006 			val |= BIT8;
4007 	}
4008 
4009 	switch (info->params.data_bits)
4010 	{
4011 	case 6: val |= BIT4; break;
4012 	case 7: val |= BIT5; break;
4013 	case 8: val |= BIT5 + BIT4; break;
4014 	}
4015 
4016 	if (info->params.stop_bits != 1)
4017 		val |= BIT3;
4018 
4019 	if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4020 		val |= BIT0;
4021 
4022 	wr_reg16(info, TCR, val);
4023 
4024 	/* RCR (rx control)
4025 	 *
4026 	 * 15..13  mode, 010=async
4027 	 * 12..10  encoding, 000=NRZ
4028 	 * 09      parity enable
4029 	 * 08      1=odd parity, 0=even parity
4030 	 * 07..06  reserved, must be 0
4031 	 * 05..04  character length
4032 	 *         00=5 bits
4033 	 *         01=6 bits
4034 	 *         10=7 bits
4035 	 *         11=8 bits
4036 	 * 03      reserved, must be zero
4037 	 * 02      reset
4038 	 * 01      enable
4039 	 * 00      auto-DCD enable
4040 	 */
4041 	val = 0x4000;
4042 
4043 	if (info->params.parity != ASYNC_PARITY_NONE) {
4044 		val |= BIT9;
4045 		if (info->params.parity == ASYNC_PARITY_ODD)
4046 			val |= BIT8;
4047 	}
4048 
4049 	switch (info->params.data_bits)
4050 	{
4051 	case 6: val |= BIT4; break;
4052 	case 7: val |= BIT5; break;
4053 	case 8: val |= BIT5 + BIT4; break;
4054 	}
4055 
4056 	if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4057 		val |= BIT0;
4058 
4059 	wr_reg16(info, RCR, val);
4060 
4061 	/* CCR (clock control)
4062 	 *
4063 	 * 07..05  011 = tx clock source is BRG/16
4064 	 * 04..02  010 = rx clock source is BRG
4065 	 * 01      0 = auxclk disabled
4066 	 * 00      1 = BRG enabled
4067 	 *
4068 	 * 0110 1001
4069 	 */
4070 	wr_reg8(info, CCR, 0x69);
4071 
4072 	msc_set_vcr(info);
4073 
4074 	/* SCR (serial control)
4075 	 *
4076 	 * 15  1=tx req on FIFO half empty
4077 	 * 14  1=rx req on FIFO half full
4078 	 * 13  tx data  IRQ enable
4079 	 * 12  tx idle  IRQ enable
4080 	 * 11  rx break on IRQ enable
4081 	 * 10  rx data  IRQ enable
4082 	 * 09  rx break off IRQ enable
4083 	 * 08  overrun  IRQ enable
4084 	 * 07  DSR      IRQ enable
4085 	 * 06  CTS      IRQ enable
4086 	 * 05  DCD      IRQ enable
4087 	 * 04  RI       IRQ enable
4088 	 * 03  reserved, must be zero
4089 	 * 02  1=txd->rxd internal loopback enable
4090 	 * 01  reserved, must be zero
4091 	 * 00  1=master IRQ enable
4092 	 */
4093 	val = BIT15 + BIT14 + BIT0;
4094 	wr_reg16(info, SCR, val);
4095 
4096 	slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
4097 
4098 	set_rate(info, info->params.data_rate * 16);
4099 
4100 	if (info->params.loopback)
4101 		enable_loopback(info);
4102 }
4103 
sync_mode(struct slgt_info * info)4104 static void sync_mode(struct slgt_info *info)
4105 {
4106 	unsigned short val;
4107 
4108 	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4109 	tx_stop(info);
4110 	rx_stop(info);
4111 
4112 	/* TCR (tx control)
4113 	 *
4114 	 * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
4115 	 * 12..10  encoding
4116 	 * 09      CRC enable
4117 	 * 08      CRC32
4118 	 * 07      1=RTS driver control
4119 	 * 06      preamble enable
4120 	 * 05..04  preamble length
4121 	 * 03      share open/close flag
4122 	 * 02      reset
4123 	 * 01      enable
4124 	 * 00      auto-CTS enable
4125 	 */
4126 	val = BIT2;
4127 
4128 	switch(info->params.mode) {
4129 	case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4130 	case MGSL_MODE_BISYNC:   val |= BIT15; break;
4131 	case MGSL_MODE_RAW:      val |= BIT13; break;
4132 	}
4133 	if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4134 		val |= BIT7;
4135 
4136 	switch(info->params.encoding)
4137 	{
4138 	case HDLC_ENCODING_NRZB:          val |= BIT10; break;
4139 	case HDLC_ENCODING_NRZI_MARK:     val |= BIT11; break;
4140 	case HDLC_ENCODING_NRZI:          val |= BIT11 + BIT10; break;
4141 	case HDLC_ENCODING_BIPHASE_MARK:  val |= BIT12; break;
4142 	case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4143 	case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4144 	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4145 	}
4146 
4147 	switch (info->params.crc_type & HDLC_CRC_MASK)
4148 	{
4149 	case HDLC_CRC_16_CCITT: val |= BIT9; break;
4150 	case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4151 	}
4152 
4153 	if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
4154 		val |= BIT6;
4155 
4156 	switch (info->params.preamble_length)
4157 	{
4158 	case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
4159 	case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
4160 	case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
4161 	}
4162 
4163 	if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4164 		val |= BIT0;
4165 
4166 	wr_reg16(info, TCR, val);
4167 
4168 	/* TPR (transmit preamble) */
4169 
4170 	switch (info->params.preamble)
4171 	{
4172 	case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
4173 	case HDLC_PREAMBLE_PATTERN_ONES:  val = 0xff; break;
4174 	case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
4175 	case HDLC_PREAMBLE_PATTERN_10:    val = 0x55; break;
4176 	case HDLC_PREAMBLE_PATTERN_01:    val = 0xaa; break;
4177 	default:                          val = 0x7e; break;
4178 	}
4179 	wr_reg8(info, TPR, (unsigned char)val);
4180 
4181 	/* RCR (rx control)
4182 	 *
4183 	 * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
4184 	 * 12..10  encoding
4185 	 * 09      CRC enable
4186 	 * 08      CRC32
4187 	 * 07..03  reserved, must be 0
4188 	 * 02      reset
4189 	 * 01      enable
4190 	 * 00      auto-DCD enable
4191 	 */
4192 	val = 0;
4193 
4194 	switch(info->params.mode) {
4195 	case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4196 	case MGSL_MODE_BISYNC:   val |= BIT15; break;
4197 	case MGSL_MODE_RAW:      val |= BIT13; break;
4198 	}
4199 
4200 	switch(info->params.encoding)
4201 	{
4202 	case HDLC_ENCODING_NRZB:          val |= BIT10; break;
4203 	case HDLC_ENCODING_NRZI_MARK:     val |= BIT11; break;
4204 	case HDLC_ENCODING_NRZI:          val |= BIT11 + BIT10; break;
4205 	case HDLC_ENCODING_BIPHASE_MARK:  val |= BIT12; break;
4206 	case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4207 	case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4208 	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4209 	}
4210 
4211 	switch (info->params.crc_type & HDLC_CRC_MASK)
4212 	{
4213 	case HDLC_CRC_16_CCITT: val |= BIT9; break;
4214 	case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4215 	}
4216 
4217 	if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4218 		val |= BIT0;
4219 
4220 	wr_reg16(info, RCR, val);
4221 
4222 	/* CCR (clock control)
4223 	 *
4224 	 * 07..05  tx clock source
4225 	 * 04..02  rx clock source
4226 	 * 01      auxclk enable
4227 	 * 00      BRG enable
4228 	 */
4229 	val = 0;
4230 
4231 	if (info->params.flags & HDLC_FLAG_TXC_BRG)
4232 	{
4233 		// when RxC source is DPLL, BRG generates 16X DPLL
4234 		// reference clock, so take TxC from BRG/16 to get
4235 		// transmit clock at actual data rate
4236 		if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4237 			val |= BIT6 + BIT5;	/* 011, txclk = BRG/16 */
4238 		else
4239 			val |= BIT6;	/* 010, txclk = BRG */
4240 	}
4241 	else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
4242 		val |= BIT7;	/* 100, txclk = DPLL Input */
4243 	else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4244 		val |= BIT5;	/* 001, txclk = RXC Input */
4245 
4246 	if (info->params.flags & HDLC_FLAG_RXC_BRG)
4247 		val |= BIT3;	/* 010, rxclk = BRG */
4248 	else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4249 		val |= BIT4;	/* 100, rxclk = DPLL */
4250 	else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4251 		val |= BIT2;	/* 001, rxclk = TXC Input */
4252 
4253 	if (info->params.clock_speed)
4254 		val |= BIT1 + BIT0;
4255 
4256 	wr_reg8(info, CCR, (unsigned char)val);
4257 
4258 	if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
4259 	{
4260 		// program DPLL mode
4261 		switch(info->params.encoding)
4262 		{
4263 		case HDLC_ENCODING_BIPHASE_MARK:
4264 		case HDLC_ENCODING_BIPHASE_SPACE:
4265 			val = BIT7; break;
4266 		case HDLC_ENCODING_BIPHASE_LEVEL:
4267 		case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
4268 			val = BIT7 + BIT6; break;
4269 		default: val = BIT6;	// NRZ encodings
4270 		}
4271 		wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
4272 
4273 		// DPLL requires a 16X reference clock from BRG
4274 		set_rate(info, info->params.clock_speed * 16);
4275 	}
4276 	else
4277 		set_rate(info, info->params.clock_speed);
4278 
4279 	tx_set_idle(info);
4280 
4281 	msc_set_vcr(info);
4282 
4283 	/* SCR (serial control)
4284 	 *
4285 	 * 15  1=tx req on FIFO half empty
4286 	 * 14  1=rx req on FIFO half full
4287 	 * 13  tx data  IRQ enable
4288 	 * 12  tx idle  IRQ enable
4289 	 * 11  underrun IRQ enable
4290 	 * 10  rx data  IRQ enable
4291 	 * 09  rx idle  IRQ enable
4292 	 * 08  overrun  IRQ enable
4293 	 * 07  DSR      IRQ enable
4294 	 * 06  CTS      IRQ enable
4295 	 * 05  DCD      IRQ enable
4296 	 * 04  RI       IRQ enable
4297 	 * 03  reserved, must be zero
4298 	 * 02  1=txd->rxd internal loopback enable
4299 	 * 01  reserved, must be zero
4300 	 * 00  1=master IRQ enable
4301 	 */
4302 	wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
4303 
4304 	if (info->params.loopback)
4305 		enable_loopback(info);
4306 }
4307 
4308 /*
4309  *  set transmit idle mode
4310  */
tx_set_idle(struct slgt_info * info)4311 static void tx_set_idle(struct slgt_info *info)
4312 {
4313 	unsigned char val;
4314 	unsigned short tcr;
4315 
4316 	/* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
4317 	 * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
4318 	 */
4319 	tcr = rd_reg16(info, TCR);
4320 	if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
4321 		/* disable preamble, set idle size to 16 bits */
4322 		tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
4323 		/* MSB of 16 bit idle specified in tx preamble register (TPR) */
4324 		wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
4325 	} else if (!(tcr & BIT6)) {
4326 		/* preamble is disabled, set idle size to 8 bits */
4327 		tcr &= ~(BIT5 + BIT4);
4328 	}
4329 	wr_reg16(info, TCR, tcr);
4330 
4331 	if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
4332 		/* LSB of custom tx idle specified in tx idle register */
4333 		val = (unsigned char)(info->idle_mode & 0xff);
4334 	} else {
4335 		/* standard 8 bit idle patterns */
4336 		switch(info->idle_mode)
4337 		{
4338 		case HDLC_TXIDLE_FLAGS:          val = 0x7e; break;
4339 		case HDLC_TXIDLE_ALT_ZEROS_ONES:
4340 		case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
4341 		case HDLC_TXIDLE_ZEROS:
4342 		case HDLC_TXIDLE_SPACE:          val = 0x00; break;
4343 		default:                         val = 0xff;
4344 		}
4345 	}
4346 
4347 	wr_reg8(info, TIR, val);
4348 }
4349 
4350 /*
4351  * get state of V24 status (input) signals
4352  */
get_signals(struct slgt_info * info)4353 static void get_signals(struct slgt_info *info)
4354 {
4355 	unsigned short status = rd_reg16(info, SSR);
4356 
4357 	/* clear all serial signals except DTR and RTS */
4358 	info->signals &= SerialSignal_DTR + SerialSignal_RTS;
4359 
4360 	if (status & BIT3)
4361 		info->signals |= SerialSignal_DSR;
4362 	if (status & BIT2)
4363 		info->signals |= SerialSignal_CTS;
4364 	if (status & BIT1)
4365 		info->signals |= SerialSignal_DCD;
4366 	if (status & BIT0)
4367 		info->signals |= SerialSignal_RI;
4368 }
4369 
4370 /*
4371  * set V.24 Control Register based on current configuration
4372  */
msc_set_vcr(struct slgt_info * info)4373 static void msc_set_vcr(struct slgt_info *info)
4374 {
4375 	unsigned char val = 0;
4376 
4377 	/* VCR (V.24 control)
4378 	 *
4379 	 * 07..04  serial IF select
4380 	 * 03      DTR
4381 	 * 02      RTS
4382 	 * 01      LL
4383 	 * 00      RL
4384 	 */
4385 
4386 	switch(info->if_mode & MGSL_INTERFACE_MASK)
4387 	{
4388 	case MGSL_INTERFACE_RS232:
4389 		val |= BIT5; /* 0010 */
4390 		break;
4391 	case MGSL_INTERFACE_V35:
4392 		val |= BIT7 + BIT6 + BIT5; /* 1110 */
4393 		break;
4394 	case MGSL_INTERFACE_RS422:
4395 		val |= BIT6; /* 0100 */
4396 		break;
4397 	}
4398 
4399 	if (info->if_mode & MGSL_INTERFACE_MSB_FIRST)
4400 		val |= BIT4;
4401 	if (info->signals & SerialSignal_DTR)
4402 		val |= BIT3;
4403 	if (info->signals & SerialSignal_RTS)
4404 		val |= BIT2;
4405 	if (info->if_mode & MGSL_INTERFACE_LL)
4406 		val |= BIT1;
4407 	if (info->if_mode & MGSL_INTERFACE_RL)
4408 		val |= BIT0;
4409 	wr_reg8(info, VCR, val);
4410 }
4411 
4412 /*
4413  * set state of V24 control (output) signals
4414  */
set_signals(struct slgt_info * info)4415 static void set_signals(struct slgt_info *info)
4416 {
4417 	unsigned char val = rd_reg8(info, VCR);
4418 	if (info->signals & SerialSignal_DTR)
4419 		val |= BIT3;
4420 	else
4421 		val &= ~BIT3;
4422 	if (info->signals & SerialSignal_RTS)
4423 		val |= BIT2;
4424 	else
4425 		val &= ~BIT2;
4426 	wr_reg8(info, VCR, val);
4427 }
4428 
4429 /*
4430  * free range of receive DMA buffers (i to last)
4431  */
free_rbufs(struct slgt_info * info,unsigned int i,unsigned int last)4432 static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
4433 {
4434 	int done = 0;
4435 
4436 	while(!done) {
4437 		/* reset current buffer for reuse */
4438 		info->rbufs[i].status = 0;
4439 		set_desc_count(info->rbufs[i], info->rbuf_fill_level);
4440 		if (i == last)
4441 			done = 1;
4442 		if (++i == info->rbuf_count)
4443 			i = 0;
4444 	}
4445 	info->rbuf_current = i;
4446 }
4447 
4448 /*
4449  * mark all receive DMA buffers as free
4450  */
reset_rbufs(struct slgt_info * info)4451 static void reset_rbufs(struct slgt_info *info)
4452 {
4453 	free_rbufs(info, 0, info->rbuf_count - 1);
4454 }
4455 
4456 /*
4457  * pass receive HDLC frame to upper layer
4458  *
4459  * return true if frame available, otherwise false
4460  */
rx_get_frame(struct slgt_info * info)4461 static bool rx_get_frame(struct slgt_info *info)
4462 {
4463 	unsigned int start, end;
4464 	unsigned short status;
4465 	unsigned int framesize = 0;
4466 	unsigned long flags;
4467 	struct tty_struct *tty = info->port.tty;
4468 	unsigned char addr_field = 0xff;
4469 	unsigned int crc_size = 0;
4470 
4471 	switch (info->params.crc_type & HDLC_CRC_MASK) {
4472 	case HDLC_CRC_16_CCITT: crc_size = 2; break;
4473 	case HDLC_CRC_32_CCITT: crc_size = 4; break;
4474 	}
4475 
4476 check_again:
4477 
4478 	framesize = 0;
4479 	addr_field = 0xff;
4480 	start = end = info->rbuf_current;
4481 
4482 	for (;;) {
4483 		if (!desc_complete(info->rbufs[end]))
4484 			goto cleanup;
4485 
4486 		if (framesize == 0 && info->params.addr_filter != 0xff)
4487 			addr_field = info->rbufs[end].buf[0];
4488 
4489 		framesize += desc_count(info->rbufs[end]);
4490 
4491 		if (desc_eof(info->rbufs[end]))
4492 			break;
4493 
4494 		if (++end == info->rbuf_count)
4495 			end = 0;
4496 
4497 		if (end == info->rbuf_current) {
4498 			if (info->rx_enabled){
4499 				spin_lock_irqsave(&info->lock,flags);
4500 				rx_start(info);
4501 				spin_unlock_irqrestore(&info->lock,flags);
4502 			}
4503 			goto cleanup;
4504 		}
4505 	}
4506 
4507 	/* status
4508 	 *
4509 	 * 15      buffer complete
4510 	 * 14..06  reserved
4511 	 * 05..04  residue
4512 	 * 02      eof (end of frame)
4513 	 * 01      CRC error
4514 	 * 00      abort
4515 	 */
4516 	status = desc_status(info->rbufs[end]);
4517 
4518 	/* ignore CRC bit if not using CRC (bit is undefined) */
4519 	if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
4520 		status &= ~BIT1;
4521 
4522 	if (framesize == 0 ||
4523 		 (addr_field != 0xff && addr_field != info->params.addr_filter)) {
4524 		free_rbufs(info, start, end);
4525 		goto check_again;
4526 	}
4527 
4528 	if (framesize < (2 + crc_size) || status & BIT0) {
4529 		info->icount.rxshort++;
4530 		framesize = 0;
4531 	} else if (status & BIT1) {
4532 		info->icount.rxcrc++;
4533 		if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
4534 			framesize = 0;
4535 	}
4536 
4537 #if SYNCLINK_GENERIC_HDLC
4538 	if (framesize == 0) {
4539 		info->netdev->stats.rx_errors++;
4540 		info->netdev->stats.rx_frame_errors++;
4541 	}
4542 #endif
4543 
4544 	DBGBH(("%s rx frame status=%04X size=%d\n",
4545 		info->device_name, status, framesize));
4546 	DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx");
4547 
4548 	if (framesize) {
4549 		if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
4550 			framesize -= crc_size;
4551 			crc_size = 0;
4552 		}
4553 
4554 		if (framesize > info->max_frame_size + crc_size)
4555 			info->icount.rxlong++;
4556 		else {
4557 			/* copy dma buffer(s) to contiguous temp buffer */
4558 			int copy_count = framesize;
4559 			int i = start;
4560 			unsigned char *p = info->tmp_rbuf;
4561 			info->tmp_rbuf_count = framesize;
4562 
4563 			info->icount.rxok++;
4564 
4565 			while(copy_count) {
4566 				int partial_count = min_t(int, copy_count, info->rbuf_fill_level);
4567 				memcpy(p, info->rbufs[i].buf, partial_count);
4568 				p += partial_count;
4569 				copy_count -= partial_count;
4570 				if (++i == info->rbuf_count)
4571 					i = 0;
4572 			}
4573 
4574 			if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
4575 				*p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
4576 				framesize++;
4577 			}
4578 
4579 #if SYNCLINK_GENERIC_HDLC
4580 			if (info->netcount)
4581 				hdlcdev_rx(info,info->tmp_rbuf, framesize);
4582 			else
4583 #endif
4584 				ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
4585 		}
4586 	}
4587 	free_rbufs(info, start, end);
4588 	return true;
4589 
4590 cleanup:
4591 	return false;
4592 }
4593 
4594 /*
4595  * pass receive buffer (RAW synchronous mode) to tty layer
4596  * return true if buffer available, otherwise false
4597  */
rx_get_buf(struct slgt_info * info)4598 static bool rx_get_buf(struct slgt_info *info)
4599 {
4600 	unsigned int i = info->rbuf_current;
4601 	unsigned int count;
4602 
4603 	if (!desc_complete(info->rbufs[i]))
4604 		return false;
4605 	count = desc_count(info->rbufs[i]);
4606 	switch(info->params.mode) {
4607 	case MGSL_MODE_MONOSYNC:
4608 	case MGSL_MODE_BISYNC:
4609 		/* ignore residue in byte synchronous modes */
4610 		if (desc_residue(info->rbufs[i]))
4611 			count--;
4612 		break;
4613 	}
4614 	DBGDATA(info, info->rbufs[i].buf, count, "rx");
4615 	DBGINFO(("rx_get_buf size=%d\n", count));
4616 	if (count)
4617 		ldisc_receive_buf(info->port.tty, info->rbufs[i].buf,
4618 				  info->flag_buf, count);
4619 	free_rbufs(info, i, i);
4620 	return true;
4621 }
4622 
reset_tbufs(struct slgt_info * info)4623 static void reset_tbufs(struct slgt_info *info)
4624 {
4625 	unsigned int i;
4626 	info->tbuf_current = 0;
4627 	for (i=0 ; i < info->tbuf_count ; i++) {
4628 		info->tbufs[i].status = 0;
4629 		info->tbufs[i].count  = 0;
4630 	}
4631 }
4632 
4633 /*
4634  * return number of free transmit DMA buffers
4635  */
free_tbuf_count(struct slgt_info * info)4636 static unsigned int free_tbuf_count(struct slgt_info *info)
4637 {
4638 	unsigned int count = 0;
4639 	unsigned int i = info->tbuf_current;
4640 
4641 	do
4642 	{
4643 		if (desc_count(info->tbufs[i]))
4644 			break; /* buffer in use */
4645 		++count;
4646 		if (++i == info->tbuf_count)
4647 			i=0;
4648 	} while (i != info->tbuf_current);
4649 
4650 	/* if tx DMA active, last zero count buffer is in use */
4651 	if (count && (rd_reg32(info, TDCSR) & BIT0))
4652 		--count;
4653 
4654 	return count;
4655 }
4656 
4657 /*
4658  * return number of bytes in unsent transmit DMA buffers
4659  * and the serial controller tx FIFO
4660  */
tbuf_bytes(struct slgt_info * info)4661 static unsigned int tbuf_bytes(struct slgt_info *info)
4662 {
4663 	unsigned int total_count = 0;
4664 	unsigned int i = info->tbuf_current;
4665 	unsigned int reg_value;
4666 	unsigned int count;
4667 	unsigned int active_buf_count = 0;
4668 
4669 	/*
4670 	 * Add descriptor counts for all tx DMA buffers.
4671 	 * If count is zero (cleared by DMA controller after read),
4672 	 * the buffer is complete or is actively being read from.
4673 	 *
4674 	 * Record buf_count of last buffer with zero count starting
4675 	 * from current ring position. buf_count is mirror
4676 	 * copy of count and is not cleared by serial controller.
4677 	 * If DMA controller is active, that buffer is actively
4678 	 * being read so add to total.
4679 	 */
4680 	do {
4681 		count = desc_count(info->tbufs[i]);
4682 		if (count)
4683 			total_count += count;
4684 		else if (!total_count)
4685 			active_buf_count = info->tbufs[i].buf_count;
4686 		if (++i == info->tbuf_count)
4687 			i = 0;
4688 	} while (i != info->tbuf_current);
4689 
4690 	/* read tx DMA status register */
4691 	reg_value = rd_reg32(info, TDCSR);
4692 
4693 	/* if tx DMA active, last zero count buffer is in use */
4694 	if (reg_value & BIT0)
4695 		total_count += active_buf_count;
4696 
4697 	/* add tx FIFO count = reg_value[15..8] */
4698 	total_count += (reg_value >> 8) & 0xff;
4699 
4700 	/* if transmitter active add one byte for shift register */
4701 	if (info->tx_active)
4702 		total_count++;
4703 
4704 	return total_count;
4705 }
4706 
4707 /*
4708  * load transmit DMA buffer(s) with data
4709  */
tx_load(struct slgt_info * info,const char * buf,unsigned int size)4710 static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
4711 {
4712 	unsigned short count;
4713 	unsigned int i;
4714 	struct slgt_desc *d;
4715 
4716 	if (size == 0)
4717 		return;
4718 
4719 	DBGDATA(info, buf, size, "tx");
4720 
4721 	info->tbuf_start = i = info->tbuf_current;
4722 
4723 	while (size) {
4724 		d = &info->tbufs[i];
4725 		if (++i == info->tbuf_count)
4726 			i = 0;
4727 
4728 		count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
4729 		memcpy(d->buf, buf, count);
4730 
4731 		size -= count;
4732 		buf  += count;
4733 
4734 		/*
4735 		 * set EOF bit for last buffer of HDLC frame or
4736 		 * for every buffer in raw mode
4737 		 */
4738 		if ((!size && info->params.mode == MGSL_MODE_HDLC) ||
4739 		    info->params.mode == MGSL_MODE_RAW)
4740 			set_desc_eof(*d, 1);
4741 		else
4742 			set_desc_eof(*d, 0);
4743 
4744 		set_desc_count(*d, count);
4745 		d->buf_count = count;
4746 	}
4747 
4748 	info->tbuf_current = i;
4749 }
4750 
register_test(struct slgt_info * info)4751 static int register_test(struct slgt_info *info)
4752 {
4753 	static unsigned short patterns[] =
4754 		{0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
4755 	static unsigned int count = sizeof(patterns)/sizeof(patterns[0]);
4756 	unsigned int i;
4757 	int rc = 0;
4758 
4759 	for (i=0 ; i < count ; i++) {
4760 		wr_reg16(info, TIR, patterns[i]);
4761 		wr_reg16(info, BDR, patterns[(i+1)%count]);
4762 		if ((rd_reg16(info, TIR) != patterns[i]) ||
4763 		    (rd_reg16(info, BDR) != patterns[(i+1)%count])) {
4764 			rc = -ENODEV;
4765 			break;
4766 		}
4767 	}
4768 	info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
4769 	info->init_error = rc ? 0 : DiagStatus_AddressFailure;
4770 	return rc;
4771 }
4772 
irq_test(struct slgt_info * info)4773 static int irq_test(struct slgt_info *info)
4774 {
4775 	unsigned long timeout;
4776 	unsigned long flags;
4777 	struct tty_struct *oldtty = info->port.tty;
4778 	u32 speed = info->params.data_rate;
4779 
4780 	info->params.data_rate = 921600;
4781 	info->port.tty = NULL;
4782 
4783 	spin_lock_irqsave(&info->lock, flags);
4784 	async_mode(info);
4785 	slgt_irq_on(info, IRQ_TXIDLE);
4786 
4787 	/* enable transmitter */
4788 	wr_reg16(info, TCR,
4789 		(unsigned short)(rd_reg16(info, TCR) | BIT1));
4790 
4791 	/* write one byte and wait for tx idle */
4792 	wr_reg16(info, TDR, 0);
4793 
4794 	/* assume failure */
4795 	info->init_error = DiagStatus_IrqFailure;
4796 	info->irq_occurred = false;
4797 
4798 	spin_unlock_irqrestore(&info->lock, flags);
4799 
4800 	timeout=100;
4801 	while(timeout-- && !info->irq_occurred)
4802 		msleep_interruptible(10);
4803 
4804 	spin_lock_irqsave(&info->lock,flags);
4805 	reset_port(info);
4806 	spin_unlock_irqrestore(&info->lock,flags);
4807 
4808 	info->params.data_rate = speed;
4809 	info->port.tty = oldtty;
4810 
4811 	info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
4812 	return info->irq_occurred ? 0 : -ENODEV;
4813 }
4814 
loopback_test_rx(struct slgt_info * info)4815 static int loopback_test_rx(struct slgt_info *info)
4816 {
4817 	unsigned char *src, *dest;
4818 	int count;
4819 
4820 	if (desc_complete(info->rbufs[0])) {
4821 		count = desc_count(info->rbufs[0]);
4822 		src   = info->rbufs[0].buf;
4823 		dest  = info->tmp_rbuf;
4824 
4825 		for( ; count ; count-=2, src+=2) {
4826 			/* src=data byte (src+1)=status byte */
4827 			if (!(*(src+1) & (BIT9 + BIT8))) {
4828 				*dest = *src;
4829 				dest++;
4830 				info->tmp_rbuf_count++;
4831 			}
4832 		}
4833 		DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
4834 		return 1;
4835 	}
4836 	return 0;
4837 }
4838 
loopback_test(struct slgt_info * info)4839 static int loopback_test(struct slgt_info *info)
4840 {
4841 #define TESTFRAMESIZE 20
4842 
4843 	unsigned long timeout;
4844 	u16 count = TESTFRAMESIZE;
4845 	unsigned char buf[TESTFRAMESIZE];
4846 	int rc = -ENODEV;
4847 	unsigned long flags;
4848 
4849 	struct tty_struct *oldtty = info->port.tty;
4850 	MGSL_PARAMS params;
4851 
4852 	memcpy(&params, &info->params, sizeof(params));
4853 
4854 	info->params.mode = MGSL_MODE_ASYNC;
4855 	info->params.data_rate = 921600;
4856 	info->params.loopback = 1;
4857 	info->port.tty = NULL;
4858 
4859 	/* build and send transmit frame */
4860 	for (count = 0; count < TESTFRAMESIZE; ++count)
4861 		buf[count] = (unsigned char)count;
4862 
4863 	info->tmp_rbuf_count = 0;
4864 	memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
4865 
4866 	/* program hardware for HDLC and enabled receiver */
4867 	spin_lock_irqsave(&info->lock,flags);
4868 	async_mode(info);
4869 	rx_start(info);
4870 	info->tx_count = count;
4871 	tx_load(info, buf, count);
4872 	tx_start(info);
4873 	spin_unlock_irqrestore(&info->lock, flags);
4874 
4875 	/* wait for receive complete */
4876 	for (timeout = 100; timeout; --timeout) {
4877 		msleep_interruptible(10);
4878 		if (loopback_test_rx(info)) {
4879 			rc = 0;
4880 			break;
4881 		}
4882 	}
4883 
4884 	/* verify received frame length and contents */
4885 	if (!rc && (info->tmp_rbuf_count != count ||
4886 		  memcmp(buf, info->tmp_rbuf, count))) {
4887 		rc = -ENODEV;
4888 	}
4889 
4890 	spin_lock_irqsave(&info->lock,flags);
4891 	reset_adapter(info);
4892 	spin_unlock_irqrestore(&info->lock,flags);
4893 
4894 	memcpy(&info->params, &params, sizeof(info->params));
4895 	info->port.tty = oldtty;
4896 
4897 	info->init_error = rc ? DiagStatus_DmaFailure : 0;
4898 	return rc;
4899 }
4900 
adapter_test(struct slgt_info * info)4901 static int adapter_test(struct slgt_info *info)
4902 {
4903 	DBGINFO(("testing %s\n", info->device_name));
4904 	if (register_test(info) < 0) {
4905 		printk("register test failure %s addr=%08X\n",
4906 			info->device_name, info->phys_reg_addr);
4907 	} else if (irq_test(info) < 0) {
4908 		printk("IRQ test failure %s IRQ=%d\n",
4909 			info->device_name, info->irq_level);
4910 	} else if (loopback_test(info) < 0) {
4911 		printk("loopback test failure %s\n", info->device_name);
4912 	}
4913 	return info->init_error;
4914 }
4915 
4916 /*
4917  * transmit timeout handler
4918  */
tx_timeout(unsigned long context)4919 static void tx_timeout(unsigned long context)
4920 {
4921 	struct slgt_info *info = (struct slgt_info*)context;
4922 	unsigned long flags;
4923 
4924 	DBGINFO(("%s tx_timeout\n", info->device_name));
4925 	if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
4926 		info->icount.txtimeout++;
4927 	}
4928 	spin_lock_irqsave(&info->lock,flags);
4929 	info->tx_active = false;
4930 	info->tx_count = 0;
4931 	spin_unlock_irqrestore(&info->lock,flags);
4932 
4933 #if SYNCLINK_GENERIC_HDLC
4934 	if (info->netcount)
4935 		hdlcdev_tx_done(info);
4936 	else
4937 #endif
4938 		bh_transmit(info);
4939 }
4940 
4941 /*
4942  * receive buffer polling timer
4943  */
rx_timeout(unsigned long context)4944 static void rx_timeout(unsigned long context)
4945 {
4946 	struct slgt_info *info = (struct slgt_info*)context;
4947 	unsigned long flags;
4948 
4949 	DBGINFO(("%s rx_timeout\n", info->device_name));
4950 	spin_lock_irqsave(&info->lock, flags);
4951 	info->pending_bh |= BH_RECEIVE;
4952 	spin_unlock_irqrestore(&info->lock, flags);
4953 	bh_handler(&info->task);
4954 }
4955 
4956