• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/char/
1/*
2 * Device driver for Microgate SyncLink GT serial adapters.
3 *
4 * written by Paul Fulghum for Microgate Corporation
5 * paulkf@microgate.com
6 *
7 * Microgate and SyncLink are trademarks of Microgate Corporation
8 *
9 * This code is released under the GNU General Public License (GPL)
10 *
11 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
12 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
13 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
15 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
16 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
17 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
18 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
19 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
20 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
21 * OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23
24/*
25 * DEBUG OUTPUT DEFINITIONS
26 *
27 * uncomment lines below to enable specific types of debug output
28 *
29 * DBGINFO   information - most verbose output
30 * DBGERR    serious errors
31 * DBGBH     bottom half service routine debugging
32 * DBGISR    interrupt service routine debugging
33 * DBGDATA   output receive and transmit data
34 * DBGTBUF   output transmit DMA buffers and registers
35 * DBGRBUF   output receive DMA buffers and registers
36 */
37
38#define DBGINFO(fmt) if (debug_level >= DEBUG_LEVEL_INFO) printk fmt
39#define DBGERR(fmt) if (debug_level >= DEBUG_LEVEL_ERROR) printk fmt
40#define DBGBH(fmt) if (debug_level >= DEBUG_LEVEL_BH) printk fmt
41#define DBGISR(fmt) if (debug_level >= DEBUG_LEVEL_ISR) printk fmt
42#define DBGDATA(info, buf, size, label) if (debug_level >= DEBUG_LEVEL_DATA) trace_block((info), (buf), (size), (label))
43/*#define DBGTBUF(info) dump_tbufs(info)*/
44/*#define DBGRBUF(info) dump_rbufs(info)*/
45
46
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/signal.h>
50#include <linux/sched.h>
51#include <linux/timer.h>
52#include <linux/interrupt.h>
53#include <linux/pci.h>
54#include <linux/tty.h>
55#include <linux/tty_flip.h>
56#include <linux/serial.h>
57#include <linux/major.h>
58#include <linux/string.h>
59#include <linux/fcntl.h>
60#include <linux/ptrace.h>
61#include <linux/ioport.h>
62#include <linux/mm.h>
63#include <linux/seq_file.h>
64#include <linux/slab.h>
65#include <linux/netdevice.h>
66#include <linux/vmalloc.h>
67#include <linux/init.h>
68#include <linux/delay.h>
69#include <linux/ioctl.h>
70#include <linux/termios.h>
71#include <linux/bitops.h>
72#include <linux/workqueue.h>
73#include <linux/hdlc.h>
74#include <linux/synclink.h>
75
76#include <asm/system.h>
77#include <asm/io.h>
78#include <asm/irq.h>
79#include <asm/dma.h>
80#include <asm/types.h>
81#include <asm/uaccess.h>
82
83#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && \
84	defined(CONFIG_SYNCLINK_GT_MODULE))
85#define SYNCLINK_GENERIC_HDLC 1
86#else
87#define SYNCLINK_GENERIC_HDLC 0
88#endif
89
90/*
91 * module identification
92 */
93static char *driver_name     = "SyncLink GT";
94static char *tty_driver_name = "synclink_gt";
95static char *tty_dev_prefix  = "ttySLG";
96MODULE_LICENSE("GPL");
97#define MGSL_MAGIC 0x5401
98#define MAX_DEVICES 32
99
100static struct pci_device_id pci_table[] = {
101	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
102	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
103	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
104	{PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
105	{0,}, /* terminate list */
106};
107MODULE_DEVICE_TABLE(pci, pci_table);
108
109static int  init_one(struct pci_dev *dev,const struct pci_device_id *ent);
110static void remove_one(struct pci_dev *dev);
111static struct pci_driver pci_driver = {
112	.name		= "synclink_gt",
113	.id_table	= pci_table,
114	.probe		= init_one,
115	.remove		= __devexit_p(remove_one),
116};
117
118static bool pci_registered;
119
120/*
121 * module configuration and status
122 */
123static struct slgt_info *slgt_device_list;
124static int slgt_device_count;
125
126static int ttymajor;
127static int debug_level;
128static int maxframe[MAX_DEVICES];
129
130module_param(ttymajor, int, 0);
131module_param(debug_level, int, 0);
132module_param_array(maxframe, int, NULL, 0);
133
134MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned");
135MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail");
136MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)");
137
138/*
139 * tty support and callbacks
140 */
141static struct tty_driver *serial_driver;
142
143static int  open(struct tty_struct *tty, struct file * filp);
144static void close(struct tty_struct *tty, struct file * filp);
145static void hangup(struct tty_struct *tty);
146static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
147
148static int  write(struct tty_struct *tty, const unsigned char *buf, int count);
149static int put_char(struct tty_struct *tty, unsigned char ch);
150static void send_xchar(struct tty_struct *tty, char ch);
151static void wait_until_sent(struct tty_struct *tty, int timeout);
152static int  write_room(struct tty_struct *tty);
153static void flush_chars(struct tty_struct *tty);
154static void flush_buffer(struct tty_struct *tty);
155static void tx_hold(struct tty_struct *tty);
156static void tx_release(struct tty_struct *tty);
157
158static int  ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg);
159static int  chars_in_buffer(struct tty_struct *tty);
160static void throttle(struct tty_struct * tty);
161static void unthrottle(struct tty_struct * tty);
162static int set_break(struct tty_struct *tty, int break_state);
163
164/*
165 * generic HDLC support and callbacks
166 */
167#if SYNCLINK_GENERIC_HDLC
168#define dev_to_port(D) (dev_to_hdlc(D)->priv)
169static void hdlcdev_tx_done(struct slgt_info *info);
170static void hdlcdev_rx(struct slgt_info *info, char *buf, int size);
171static int  hdlcdev_init(struct slgt_info *info);
172static void hdlcdev_exit(struct slgt_info *info);
173#endif
174
175
176/*
177 * device specific structures, macros and functions
178 */
179
180#define SLGT_MAX_PORTS 4
181#define SLGT_REG_SIZE  256
182
183/*
184 * conditional wait facility
185 */
186struct cond_wait {
187	struct cond_wait *next;
188	wait_queue_head_t q;
189	wait_queue_t wait;
190	unsigned int data;
191};
192static void init_cond_wait(struct cond_wait *w, unsigned int data);
193static void add_cond_wait(struct cond_wait **head, struct cond_wait *w);
194static void remove_cond_wait(struct cond_wait **head, struct cond_wait *w);
195static void flush_cond_wait(struct cond_wait **head);
196
197/*
198 * DMA buffer descriptor and access macros
199 */
200struct slgt_desc
201{
202	__le16 count;
203	__le16 status;
204	__le32 pbuf;  /* physical address of data buffer */
205	__le32 next;  /* physical address of next descriptor */
206
207	/* driver book keeping */
208	char *buf;          /* virtual  address of data buffer */
209    	unsigned int pdesc; /* physical address of this descriptor */
210	dma_addr_t buf_dma_addr;
211	unsigned short buf_count;
212};
213
214#define set_desc_buffer(a,b) (a).pbuf = cpu_to_le32((unsigned int)(b))
215#define set_desc_next(a,b) (a).next   = cpu_to_le32((unsigned int)(b))
216#define set_desc_count(a,b)(a).count  = cpu_to_le16((unsigned short)(b))
217#define set_desc_eof(a,b)  (a).status = cpu_to_le16((b) ? (le16_to_cpu((a).status) | BIT0) : (le16_to_cpu((a).status) & ~BIT0))
218#define set_desc_status(a, b) (a).status = cpu_to_le16((unsigned short)(b))
219#define desc_count(a)      (le16_to_cpu((a).count))
220#define desc_status(a)     (le16_to_cpu((a).status))
221#define desc_complete(a)   (le16_to_cpu((a).status) & BIT15)
222#define desc_eof(a)        (le16_to_cpu((a).status) & BIT2)
223#define desc_crc_error(a)  (le16_to_cpu((a).status) & BIT1)
224#define desc_abort(a)      (le16_to_cpu((a).status) & BIT0)
225#define desc_residue(a)    ((le16_to_cpu((a).status) & 0x38) >> 3)
226
227struct _input_signal_events {
228	int ri_up;
229	int ri_down;
230	int dsr_up;
231	int dsr_down;
232	int dcd_up;
233	int dcd_down;
234	int cts_up;
235	int cts_down;
236};
237
238/*
239 * device instance data structure
240 */
241struct slgt_info {
242	void *if_ptr;		/* General purpose pointer (used by SPPP) */
243	struct tty_port port;
244
245	struct slgt_info *next_device;	/* device list link */
246
247	int magic;
248
249	char device_name[25];
250	struct pci_dev *pdev;
251
252	int port_count;  /* count of ports on adapter */
253	int adapter_num; /* adapter instance number */
254	int port_num;    /* port instance number */
255
256	/* array of pointers to port contexts on this adapter */
257	struct slgt_info *port_array[SLGT_MAX_PORTS];
258
259	int			line;		/* tty line instance number */
260
261	struct mgsl_icount	icount;
262
263	int			timeout;
264	int			x_char;		/* xon/xoff character */
265	unsigned int		read_status_mask;
266	unsigned int 		ignore_status_mask;
267
268	wait_queue_head_t	status_event_wait_q;
269	wait_queue_head_t	event_wait_q;
270	struct timer_list	tx_timer;
271	struct timer_list	rx_timer;
272
273	unsigned int            gpio_present;
274	struct cond_wait        *gpio_wait_q;
275
276	spinlock_t lock;	/* spinlock for synchronizing with ISR */
277
278	struct work_struct task;
279	u32 pending_bh;
280	bool bh_requested;
281	bool bh_running;
282
283	int isr_overflow;
284	bool irq_requested;	/* true if IRQ requested */
285	bool irq_occurred;	/* for diagnostics use */
286
287	/* device configuration */
288
289	unsigned int bus_type;
290	unsigned int irq_level;
291	unsigned long irq_flags;
292
293	unsigned char __iomem * reg_addr;  /* memory mapped registers address */
294	u32 phys_reg_addr;
295	bool reg_addr_requested;
296
297	MGSL_PARAMS params;       /* communications parameters */
298	u32 idle_mode;
299	u32 max_frame_size;       /* as set by device config */
300
301	unsigned int rbuf_fill_level;
302	unsigned int rx_pio;
303	unsigned int if_mode;
304	unsigned int base_clock;
305
306	/* device status */
307
308	bool rx_enabled;
309	bool rx_restart;
310
311	bool tx_enabled;
312	bool tx_active;
313
314	unsigned char signals;    /* serial signal states */
315	int init_error;  /* initialization error */
316
317	unsigned char *tx_buf;
318	int tx_count;
319
320	char flag_buf[MAX_ASYNC_BUFFER_SIZE];
321	char char_buf[MAX_ASYNC_BUFFER_SIZE];
322	bool drop_rts_on_tx_done;
323	struct	_input_signal_events	input_signal_events;
324
325	int dcd_chkcount;	/* check counts to prevent */
326	int cts_chkcount;	/* too many IRQs if a signal */
327	int dsr_chkcount;	/* is floating */
328	int ri_chkcount;
329
330	char *bufs;		/* virtual address of DMA buffer lists */
331	dma_addr_t bufs_dma_addr; /* physical address of buffer descriptors */
332
333	unsigned int rbuf_count;
334	struct slgt_desc *rbufs;
335	unsigned int rbuf_current;
336	unsigned int rbuf_index;
337	unsigned int rbuf_fill_index;
338	unsigned short rbuf_fill_count;
339
340	unsigned int tbuf_count;
341	struct slgt_desc *tbufs;
342	unsigned int tbuf_current;
343	unsigned int tbuf_start;
344
345	unsigned char *tmp_rbuf;
346	unsigned int tmp_rbuf_count;
347
348	/* SPPP/Cisco HDLC device parts */
349
350	int netcount;
351	spinlock_t netlock;
352#if SYNCLINK_GENERIC_HDLC
353	struct net_device *netdev;
354#endif
355
356};
357
358static MGSL_PARAMS default_params = {
359	.mode            = MGSL_MODE_HDLC,
360	.loopback        = 0,
361	.flags           = HDLC_FLAG_UNDERRUN_ABORT15,
362	.encoding        = HDLC_ENCODING_NRZI_SPACE,
363	.clock_speed     = 0,
364	.addr_filter     = 0xff,
365	.crc_type        = HDLC_CRC_16_CCITT,
366	.preamble_length = HDLC_PREAMBLE_LENGTH_8BITS,
367	.preamble        = HDLC_PREAMBLE_PATTERN_NONE,
368	.data_rate       = 9600,
369	.data_bits       = 8,
370	.stop_bits       = 1,
371	.parity          = ASYNC_PARITY_NONE
372};
373
374
375#define BH_RECEIVE  1
376#define BH_TRANSMIT 2
377#define BH_STATUS   4
378#define IO_PIN_SHUTDOWN_LIMIT 100
379
380#define DMABUFSIZE 256
381#define DESC_LIST_SIZE 4096
382
383#define MASK_PARITY  BIT1
384#define MASK_FRAMING BIT0
385#define MASK_BREAK   BIT14
386#define MASK_OVERRUN BIT4
387
388#define GSR   0x00 /* global status */
389#define JCR   0x04 /* JTAG control */
390#define IODR  0x08 /* GPIO direction */
391#define IOER  0x0c /* GPIO interrupt enable */
392#define IOVR  0x10 /* GPIO value */
393#define IOSR  0x14 /* GPIO interrupt status */
394#define TDR   0x80 /* tx data */
395#define RDR   0x80 /* rx data */
396#define TCR   0x82 /* tx control */
397#define TIR   0x84 /* tx idle */
398#define TPR   0x85 /* tx preamble */
399#define RCR   0x86 /* rx control */
400#define VCR   0x88 /* V.24 control */
401#define CCR   0x89 /* clock control */
402#define BDR   0x8a /* baud divisor */
403#define SCR   0x8c /* serial control */
404#define SSR   0x8e /* serial status */
405#define RDCSR 0x90 /* rx DMA control/status */
406#define TDCSR 0x94 /* tx DMA control/status */
407#define RDDAR 0x98 /* rx DMA descriptor address */
408#define TDDAR 0x9c /* tx DMA descriptor address */
409
410#define RXIDLE      BIT14
411#define RXBREAK     BIT14
412#define IRQ_TXDATA  BIT13
413#define IRQ_TXIDLE  BIT12
414#define IRQ_TXUNDER BIT11 /* HDLC */
415#define IRQ_RXDATA  BIT10
416#define IRQ_RXIDLE  BIT9  /* HDLC */
417#define IRQ_RXBREAK BIT9  /* async */
418#define IRQ_RXOVER  BIT8
419#define IRQ_DSR     BIT7
420#define IRQ_CTS     BIT6
421#define IRQ_DCD     BIT5
422#define IRQ_RI      BIT4
423#define IRQ_ALL     0x3ff0
424#define IRQ_MASTER  BIT0
425
426#define slgt_irq_on(info, mask) \
427	wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) | (mask)))
428#define slgt_irq_off(info, mask) \
429	wr_reg16((info), SCR, (unsigned short)(rd_reg16((info), SCR) & ~(mask)))
430
431static __u8  rd_reg8(struct slgt_info *info, unsigned int addr);
432static void  wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value);
433static __u16 rd_reg16(struct slgt_info *info, unsigned int addr);
434static void  wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value);
435static __u32 rd_reg32(struct slgt_info *info, unsigned int addr);
436static void  wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value);
437
438static void  msc_set_vcr(struct slgt_info *info);
439
440static int  startup(struct slgt_info *info);
441static int  block_til_ready(struct tty_struct *tty, struct file * filp,struct slgt_info *info);
442static void shutdown(struct slgt_info *info);
443static void program_hw(struct slgt_info *info);
444static void change_params(struct slgt_info *info);
445
446static int  register_test(struct slgt_info *info);
447static int  irq_test(struct slgt_info *info);
448static int  loopback_test(struct slgt_info *info);
449static int  adapter_test(struct slgt_info *info);
450
451static void reset_adapter(struct slgt_info *info);
452static void reset_port(struct slgt_info *info);
453static void async_mode(struct slgt_info *info);
454static void sync_mode(struct slgt_info *info);
455
456static void rx_stop(struct slgt_info *info);
457static void rx_start(struct slgt_info *info);
458static void reset_rbufs(struct slgt_info *info);
459static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
460static void rdma_reset(struct slgt_info *info);
461static bool rx_get_frame(struct slgt_info *info);
462static bool rx_get_buf(struct slgt_info *info);
463
464static void tx_start(struct slgt_info *info);
465static void tx_stop(struct slgt_info *info);
466static void tx_set_idle(struct slgt_info *info);
467static unsigned int free_tbuf_count(struct slgt_info *info);
468static unsigned int tbuf_bytes(struct slgt_info *info);
469static void reset_tbufs(struct slgt_info *info);
470static void tdma_reset(struct slgt_info *info);
471static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
472
473static void get_signals(struct slgt_info *info);
474static void set_signals(struct slgt_info *info);
475static void enable_loopback(struct slgt_info *info);
476static void set_rate(struct slgt_info *info, u32 data_rate);
477
478static int  bh_action(struct slgt_info *info);
479static void bh_handler(struct work_struct *work);
480static void bh_transmit(struct slgt_info *info);
481static void isr_serial(struct slgt_info *info);
482static void isr_rdma(struct slgt_info *info);
483static void isr_txeom(struct slgt_info *info, unsigned short status);
484static void isr_tdma(struct slgt_info *info);
485
486static int  alloc_dma_bufs(struct slgt_info *info);
487static void free_dma_bufs(struct slgt_info *info);
488static int  alloc_desc(struct slgt_info *info);
489static void free_desc(struct slgt_info *info);
490static int  alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
491static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count);
492
493static int  alloc_tmp_rbuf(struct slgt_info *info);
494static void free_tmp_rbuf(struct slgt_info *info);
495
496static void tx_timeout(unsigned long context);
497static void rx_timeout(unsigned long context);
498
499/*
500 * ioctl handlers
501 */
502static int  get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount);
503static int  get_params(struct slgt_info *info, MGSL_PARAMS __user *params);
504static int  set_params(struct slgt_info *info, MGSL_PARAMS __user *params);
505static int  get_txidle(struct slgt_info *info, int __user *idle_mode);
506static int  set_txidle(struct slgt_info *info, int idle_mode);
507static int  tx_enable(struct slgt_info *info, int enable);
508static int  tx_abort(struct slgt_info *info);
509static int  rx_enable(struct slgt_info *info, int enable);
510static int  modem_input_wait(struct slgt_info *info,int arg);
511static int  wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr);
512static int  tiocmget(struct tty_struct *tty, struct file *file);
513static int  tiocmset(struct tty_struct *tty, struct file *file,
514		     unsigned int set, unsigned int clear);
515static int set_break(struct tty_struct *tty, int break_state);
516static int  get_interface(struct slgt_info *info, int __user *if_mode);
517static int  set_interface(struct slgt_info *info, int if_mode);
518static int  set_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
519static int  get_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
520static int  wait_gpio(struct slgt_info *info, struct gpio_desc __user *gpio);
521
522/*
523 * driver functions
524 */
525static void add_device(struct slgt_info *info);
526static void device_init(int adapter_num, struct pci_dev *pdev);
527static int  claim_resources(struct slgt_info *info);
528static void release_resources(struct slgt_info *info);
529
530/*
531 * DEBUG OUTPUT CODE
532 */
533#ifndef DBGINFO
534#define DBGINFO(fmt)
535#endif
536#ifndef DBGERR
537#define DBGERR(fmt)
538#endif
539#ifndef DBGBH
540#define DBGBH(fmt)
541#endif
542#ifndef DBGISR
543#define DBGISR(fmt)
544#endif
545
546#ifdef DBGDATA
547static void trace_block(struct slgt_info *info, const char *data, int count, const char *label)
548{
549	int i;
550	int linecount;
551	printk("%s %s data:\n",info->device_name, label);
552	while(count) {
553		linecount = (count > 16) ? 16 : count;
554		for(i=0; i < linecount; i++)
555			printk("%02X ",(unsigned char)data[i]);
556		for(;i<17;i++)
557			printk("   ");
558		for(i=0;i<linecount;i++) {
559			if (data[i]>=040 && data[i]<=0176)
560				printk("%c",data[i]);
561			else
562				printk(".");
563		}
564		printk("\n");
565		data  += linecount;
566		count -= linecount;
567	}
568}
569#else
570#define DBGDATA(info, buf, size, label)
571#endif
572
573#ifdef DBGTBUF
574static void dump_tbufs(struct slgt_info *info)
575{
576	int i;
577	printk("tbuf_current=%d\n", info->tbuf_current);
578	for (i=0 ; i < info->tbuf_count ; i++) {
579		printk("%d: count=%04X status=%04X\n",
580			i, le16_to_cpu(info->tbufs[i].count), le16_to_cpu(info->tbufs[i].status));
581	}
582}
583#else
584#define DBGTBUF(info)
585#endif
586
587#ifdef DBGRBUF
588static void dump_rbufs(struct slgt_info *info)
589{
590	int i;
591	printk("rbuf_current=%d\n", info->rbuf_current);
592	for (i=0 ; i < info->rbuf_count ; i++) {
593		printk("%d: count=%04X status=%04X\n",
594			i, le16_to_cpu(info->rbufs[i].count), le16_to_cpu(info->rbufs[i].status));
595	}
596}
597#else
598#define DBGRBUF(info)
599#endif
600
601static inline int sanity_check(struct slgt_info *info, char *devname, const char *name)
602{
603#ifdef SANITY_CHECK
604	if (!info) {
605		printk("null struct slgt_info for (%s) in %s\n", devname, name);
606		return 1;
607	}
608	if (info->magic != MGSL_MAGIC) {
609		printk("bad magic number struct slgt_info (%s) in %s\n", devname, name);
610		return 1;
611	}
612#else
613	if (!info)
614		return 1;
615#endif
616	return 0;
617}
618
619/**
620 * line discipline callback wrappers
621 *
622 * The wrappers maintain line discipline references
623 * while calling into the line discipline.
624 *
625 * ldisc_receive_buf  - pass receive data to line discipline
626 */
627static void ldisc_receive_buf(struct tty_struct *tty,
628			      const __u8 *data, char *flags, int count)
629{
630	struct tty_ldisc *ld;
631	if (!tty)
632		return;
633	ld = tty_ldisc_ref(tty);
634	if (ld) {
635		if (ld->ops->receive_buf)
636			ld->ops->receive_buf(tty, data, flags, count);
637		tty_ldisc_deref(ld);
638	}
639}
640
641/* tty callbacks */
642
643static int open(struct tty_struct *tty, struct file *filp)
644{
645	struct slgt_info *info;
646	int retval, line;
647	unsigned long flags;
648
649	line = tty->index;
650	if ((line < 0) || (line >= slgt_device_count)) {
651		DBGERR(("%s: open with invalid line #%d.\n", driver_name, line));
652		return -ENODEV;
653	}
654
655	info = slgt_device_list;
656	while(info && info->line != line)
657		info = info->next_device;
658	if (sanity_check(info, tty->name, "open"))
659		return -ENODEV;
660	if (info->init_error) {
661		DBGERR(("%s init error=%d\n", info->device_name, info->init_error));
662		return -ENODEV;
663	}
664
665	tty->driver_data = info;
666	info->port.tty = tty;
667
668	DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
669
670	/* If port is closing, signal caller to try again */
671	if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
672		if (info->port.flags & ASYNC_CLOSING)
673			interruptible_sleep_on(&info->port.close_wait);
674		retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
675			-EAGAIN : -ERESTARTSYS);
676		goto cleanup;
677	}
678
679	mutex_lock(&info->port.mutex);
680	info->port.tty->low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
681
682	spin_lock_irqsave(&info->netlock, flags);
683	if (info->netcount) {
684		retval = -EBUSY;
685		spin_unlock_irqrestore(&info->netlock, flags);
686		mutex_unlock(&info->port.mutex);
687		goto cleanup;
688	}
689	info->port.count++;
690	spin_unlock_irqrestore(&info->netlock, flags);
691
692	if (info->port.count == 1) {
693		/* 1st open on this device, init hardware */
694		retval = startup(info);
695		if (retval < 0) {
696			mutex_unlock(&info->port.mutex);
697			goto cleanup;
698		}
699	}
700	mutex_unlock(&info->port.mutex);
701	retval = block_til_ready(tty, filp, info);
702	if (retval) {
703		DBGINFO(("%s block_til_ready rc=%d\n", info->device_name, retval));
704		goto cleanup;
705	}
706
707	retval = 0;
708
709cleanup:
710	if (retval) {
711		if (tty->count == 1)
712			info->port.tty = NULL; /* tty layer will release tty struct */
713		if(info->port.count)
714			info->port.count--;
715	}
716
717	DBGINFO(("%s open rc=%d\n", info->device_name, retval));
718	return retval;
719}
720
721static void close(struct tty_struct *tty, struct file *filp)
722{
723	struct slgt_info *info = tty->driver_data;
724
725	if (sanity_check(info, tty->name, "close"))
726		return;
727	DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
728
729	if (tty_port_close_start(&info->port, tty, filp) == 0)
730		goto cleanup;
731
732	mutex_lock(&info->port.mutex);
733 	if (info->port.flags & ASYNC_INITIALIZED)
734 		wait_until_sent(tty, info->timeout);
735	flush_buffer(tty);
736	tty_ldisc_flush(tty);
737
738	shutdown(info);
739	mutex_unlock(&info->port.mutex);
740
741	tty_port_close_end(&info->port, tty);
742	info->port.tty = NULL;
743cleanup:
744	DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
745}
746
747static void hangup(struct tty_struct *tty)
748{
749	struct slgt_info *info = tty->driver_data;
750	unsigned long flags;
751
752	if (sanity_check(info, tty->name, "hangup"))
753		return;
754	DBGINFO(("%s hangup\n", info->device_name));
755
756	flush_buffer(tty);
757
758	mutex_lock(&info->port.mutex);
759	shutdown(info);
760
761	spin_lock_irqsave(&info->port.lock, flags);
762	info->port.count = 0;
763	info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
764	info->port.tty = NULL;
765	spin_unlock_irqrestore(&info->port.lock, flags);
766	mutex_unlock(&info->port.mutex);
767
768	wake_up_interruptible(&info->port.open_wait);
769}
770
771static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
772{
773	struct slgt_info *info = tty->driver_data;
774	unsigned long flags;
775
776	DBGINFO(("%s set_termios\n", tty->driver->name));
777
778	change_params(info);
779
780	/* Handle transition to B0 status */
781	if (old_termios->c_cflag & CBAUD &&
782	    !(tty->termios->c_cflag & CBAUD)) {
783		info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
784		spin_lock_irqsave(&info->lock,flags);
785		set_signals(info);
786		spin_unlock_irqrestore(&info->lock,flags);
787	}
788
789	/* Handle transition away from B0 status */
790	if (!(old_termios->c_cflag & CBAUD) &&
791	    tty->termios->c_cflag & CBAUD) {
792		info->signals |= SerialSignal_DTR;
793 		if (!(tty->termios->c_cflag & CRTSCTS) ||
794 		    !test_bit(TTY_THROTTLED, &tty->flags)) {
795			info->signals |= SerialSignal_RTS;
796 		}
797		spin_lock_irqsave(&info->lock,flags);
798	 	set_signals(info);
799		spin_unlock_irqrestore(&info->lock,flags);
800	}
801
802	/* Handle turning off CRTSCTS */
803	if (old_termios->c_cflag & CRTSCTS &&
804	    !(tty->termios->c_cflag & CRTSCTS)) {
805		tty->hw_stopped = 0;
806		tx_release(tty);
807	}
808}
809
810static void update_tx_timer(struct slgt_info *info)
811{
812	/*
813	 * use worst case speed of 1200bps to calculate transmit timeout
814	 * based on data in buffers (tbuf_bytes) and FIFO (128 bytes)
815	 */
816	if (info->params.mode == MGSL_MODE_HDLC) {
817		int timeout  = (tbuf_bytes(info) * 7) + 1000;
818		mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(timeout));
819	}
820}
821
822static int write(struct tty_struct *tty,
823		 const unsigned char *buf, int count)
824{
825	int ret = 0;
826	struct slgt_info *info = tty->driver_data;
827	unsigned long flags;
828
829	if (sanity_check(info, tty->name, "write"))
830		return -EIO;
831
832	DBGINFO(("%s write count=%d\n", info->device_name, count));
833
834	if (!info->tx_buf || (count > info->max_frame_size))
835		return -EIO;
836
837	if (!count || tty->stopped || tty->hw_stopped)
838		return 0;
839
840	spin_lock_irqsave(&info->lock, flags);
841
842	if (info->tx_count) {
843		/* send accumulated data from send_char() */
844		if (!tx_load(info, info->tx_buf, info->tx_count))
845			goto cleanup;
846		info->tx_count = 0;
847	}
848
849	if (tx_load(info, buf, count))
850		ret = count;
851
852cleanup:
853	spin_unlock_irqrestore(&info->lock, flags);
854	DBGINFO(("%s write rc=%d\n", info->device_name, ret));
855	return ret;
856}
857
858static int put_char(struct tty_struct *tty, unsigned char ch)
859{
860	struct slgt_info *info = tty->driver_data;
861	unsigned long flags;
862	int ret = 0;
863
864	if (sanity_check(info, tty->name, "put_char"))
865		return 0;
866	DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
867	if (!info->tx_buf)
868		return 0;
869	spin_lock_irqsave(&info->lock,flags);
870	if (info->tx_count < info->max_frame_size) {
871		info->tx_buf[info->tx_count++] = ch;
872		ret = 1;
873	}
874	spin_unlock_irqrestore(&info->lock,flags);
875	return ret;
876}
877
878static void send_xchar(struct tty_struct *tty, char ch)
879{
880	struct slgt_info *info = tty->driver_data;
881	unsigned long flags;
882
883	if (sanity_check(info, tty->name, "send_xchar"))
884		return;
885	DBGINFO(("%s send_xchar(%d)\n", info->device_name, ch));
886	info->x_char = ch;
887	if (ch) {
888		spin_lock_irqsave(&info->lock,flags);
889		if (!info->tx_enabled)
890		 	tx_start(info);
891		spin_unlock_irqrestore(&info->lock,flags);
892	}
893}
894
895static void wait_until_sent(struct tty_struct *tty, int timeout)
896{
897	struct slgt_info *info = tty->driver_data;
898	unsigned long orig_jiffies, char_time;
899
900	if (!info )
901		return;
902	if (sanity_check(info, tty->name, "wait_until_sent"))
903		return;
904	DBGINFO(("%s wait_until_sent entry\n", info->device_name));
905	if (!(info->port.flags & ASYNC_INITIALIZED))
906		goto exit;
907
908	orig_jiffies = jiffies;
909
910	/* Set check interval to 1/5 of estimated time to
911	 * send a character, and make it at least 1. The check
912	 * interval should also be less than the timeout.
913	 * Note: use tight timings here to satisfy the NIST-PCTS.
914	 */
915
916	if (info->params.data_rate) {
917	       	char_time = info->timeout/(32 * 5);
918		if (!char_time)
919			char_time++;
920	} else
921		char_time = 1;
922
923	if (timeout)
924		char_time = min_t(unsigned long, char_time, timeout);
925
926	while (info->tx_active) {
927		msleep_interruptible(jiffies_to_msecs(char_time));
928		if (signal_pending(current))
929			break;
930		if (timeout && time_after(jiffies, orig_jiffies + timeout))
931			break;
932	}
933exit:
934	DBGINFO(("%s wait_until_sent exit\n", info->device_name));
935}
936
937static int write_room(struct tty_struct *tty)
938{
939	struct slgt_info *info = tty->driver_data;
940	int ret;
941
942	if (sanity_check(info, tty->name, "write_room"))
943		return 0;
944	ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
945	DBGINFO(("%s write_room=%d\n", info->device_name, ret));
946	return ret;
947}
948
949static void flush_chars(struct tty_struct *tty)
950{
951	struct slgt_info *info = tty->driver_data;
952	unsigned long flags;
953
954	if (sanity_check(info, tty->name, "flush_chars"))
955		return;
956	DBGINFO(("%s flush_chars entry tx_count=%d\n", info->device_name, info->tx_count));
957
958	if (info->tx_count <= 0 || tty->stopped ||
959	    tty->hw_stopped || !info->tx_buf)
960		return;
961
962	DBGINFO(("%s flush_chars start transmit\n", info->device_name));
963
964	spin_lock_irqsave(&info->lock,flags);
965	if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
966		info->tx_count = 0;
967	spin_unlock_irqrestore(&info->lock,flags);
968}
969
970static void flush_buffer(struct tty_struct *tty)
971{
972	struct slgt_info *info = tty->driver_data;
973	unsigned long flags;
974
975	if (sanity_check(info, tty->name, "flush_buffer"))
976		return;
977	DBGINFO(("%s flush_buffer\n", info->device_name));
978
979	spin_lock_irqsave(&info->lock, flags);
980	info->tx_count = 0;
981	spin_unlock_irqrestore(&info->lock, flags);
982
983	tty_wakeup(tty);
984}
985
986/*
987 * throttle (stop) transmitter
988 */
989static void tx_hold(struct tty_struct *tty)
990{
991	struct slgt_info *info = tty->driver_data;
992	unsigned long flags;
993
994	if (sanity_check(info, tty->name, "tx_hold"))
995		return;
996	DBGINFO(("%s tx_hold\n", info->device_name));
997	spin_lock_irqsave(&info->lock,flags);
998	if (info->tx_enabled && info->params.mode == MGSL_MODE_ASYNC)
999	 	tx_stop(info);
1000	spin_unlock_irqrestore(&info->lock,flags);
1001}
1002
1003/*
1004 * release (start) transmitter
1005 */
1006static void tx_release(struct tty_struct *tty)
1007{
1008	struct slgt_info *info = tty->driver_data;
1009	unsigned long flags;
1010
1011	if (sanity_check(info, tty->name, "tx_release"))
1012		return;
1013	DBGINFO(("%s tx_release\n", info->device_name));
1014	spin_lock_irqsave(&info->lock, flags);
1015	if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
1016		info->tx_count = 0;
1017	spin_unlock_irqrestore(&info->lock, flags);
1018}
1019
1020/*
1021 * Service an IOCTL request
1022 *
1023 * Arguments
1024 *
1025 * 	tty	pointer to tty instance data
1026 * 	file	pointer to associated file object for device
1027 * 	cmd	IOCTL command code
1028 * 	arg	command argument/context
1029 *
1030 * Return 0 if success, otherwise error code
1031 */
1032static int ioctl(struct tty_struct *tty, struct file *file,
1033		 unsigned int cmd, unsigned long arg)
1034{
1035	struct slgt_info *info = tty->driver_data;
1036	struct mgsl_icount cnow;	/* kernel counter temps */
1037	struct serial_icounter_struct __user *p_cuser;	/* user space */
1038	unsigned long flags;
1039	void __user *argp = (void __user *)arg;
1040	int ret;
1041
1042	if (sanity_check(info, tty->name, "ioctl"))
1043		return -ENODEV;
1044	DBGINFO(("%s ioctl() cmd=%08X\n", info->device_name, cmd));
1045
1046	if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
1047	    (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
1048		if (tty->flags & (1 << TTY_IO_ERROR))
1049		    return -EIO;
1050	}
1051
1052	switch (cmd) {
1053	case MGSL_IOCWAITEVENT:
1054		return wait_mgsl_event(info, argp);
1055	case TIOCMIWAIT:
1056		return modem_input_wait(info,(int)arg);
1057	case TIOCGICOUNT:
1058		spin_lock_irqsave(&info->lock,flags);
1059		cnow = info->icount;
1060		spin_unlock_irqrestore(&info->lock,flags);
1061		p_cuser = argp;
1062		if (put_user(cnow.cts, &p_cuser->cts) ||
1063		    put_user(cnow.dsr, &p_cuser->dsr) ||
1064		    put_user(cnow.rng, &p_cuser->rng) ||
1065		    put_user(cnow.dcd, &p_cuser->dcd) ||
1066		    put_user(cnow.rx, &p_cuser->rx) ||
1067		    put_user(cnow.tx, &p_cuser->tx) ||
1068		    put_user(cnow.frame, &p_cuser->frame) ||
1069		    put_user(cnow.overrun, &p_cuser->overrun) ||
1070		    put_user(cnow.parity, &p_cuser->parity) ||
1071		    put_user(cnow.brk, &p_cuser->brk) ||
1072		    put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
1073			return -EFAULT;
1074		return 0;
1075	case MGSL_IOCSGPIO:
1076		return set_gpio(info, argp);
1077	case MGSL_IOCGGPIO:
1078		return get_gpio(info, argp);
1079	case MGSL_IOCWAITGPIO:
1080		return wait_gpio(info, argp);
1081	}
1082	mutex_lock(&info->port.mutex);
1083	switch (cmd) {
1084	case MGSL_IOCGPARAMS:
1085		ret = get_params(info, argp);
1086		break;
1087	case MGSL_IOCSPARAMS:
1088		ret = set_params(info, argp);
1089		break;
1090	case MGSL_IOCGTXIDLE:
1091		ret = get_txidle(info, argp);
1092		break;
1093	case MGSL_IOCSTXIDLE:
1094		ret = set_txidle(info, (int)arg);
1095		break;
1096	case MGSL_IOCTXENABLE:
1097		ret = tx_enable(info, (int)arg);
1098		break;
1099	case MGSL_IOCRXENABLE:
1100		ret = rx_enable(info, (int)arg);
1101		break;
1102	case MGSL_IOCTXABORT:
1103		ret = tx_abort(info);
1104		break;
1105	case MGSL_IOCGSTATS:
1106		ret = get_stats(info, argp);
1107		break;
1108	case MGSL_IOCGIF:
1109		ret = get_interface(info, argp);
1110		break;
1111	case MGSL_IOCSIF:
1112		ret = set_interface(info,(int)arg);
1113		break;
1114	default:
1115		ret = -ENOIOCTLCMD;
1116	}
1117	mutex_unlock(&info->port.mutex);
1118	return ret;
1119}
1120
1121/*
1122 * support for 32 bit ioctl calls on 64 bit systems
1123 */
1124#ifdef CONFIG_COMPAT
1125static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *user_params)
1126{
1127	struct MGSL_PARAMS32 tmp_params;
1128
1129	DBGINFO(("%s get_params32\n", info->device_name));
1130	tmp_params.mode            = (compat_ulong_t)info->params.mode;
1131	tmp_params.loopback        = info->params.loopback;
1132	tmp_params.flags           = info->params.flags;
1133	tmp_params.encoding        = info->params.encoding;
1134	tmp_params.clock_speed     = (compat_ulong_t)info->params.clock_speed;
1135	tmp_params.addr_filter     = info->params.addr_filter;
1136	tmp_params.crc_type        = info->params.crc_type;
1137	tmp_params.preamble_length = info->params.preamble_length;
1138	tmp_params.preamble        = info->params.preamble;
1139	tmp_params.data_rate       = (compat_ulong_t)info->params.data_rate;
1140	tmp_params.data_bits       = info->params.data_bits;
1141	tmp_params.stop_bits       = info->params.stop_bits;
1142	tmp_params.parity          = info->params.parity;
1143	if (copy_to_user(user_params, &tmp_params, sizeof(struct MGSL_PARAMS32)))
1144		return -EFAULT;
1145	return 0;
1146}
1147
1148static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
1149{
1150	struct MGSL_PARAMS32 tmp_params;
1151
1152	DBGINFO(("%s set_params32\n", info->device_name));
1153	if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
1154		return -EFAULT;
1155
1156	spin_lock(&info->lock);
1157	if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
1158		info->base_clock = tmp_params.clock_speed;
1159	} else {
1160		info->params.mode            = tmp_params.mode;
1161		info->params.loopback        = tmp_params.loopback;
1162		info->params.flags           = tmp_params.flags;
1163		info->params.encoding        = tmp_params.encoding;
1164		info->params.clock_speed     = tmp_params.clock_speed;
1165		info->params.addr_filter     = tmp_params.addr_filter;
1166		info->params.crc_type        = tmp_params.crc_type;
1167		info->params.preamble_length = tmp_params.preamble_length;
1168		info->params.preamble        = tmp_params.preamble;
1169		info->params.data_rate       = tmp_params.data_rate;
1170		info->params.data_bits       = tmp_params.data_bits;
1171		info->params.stop_bits       = tmp_params.stop_bits;
1172		info->params.parity          = tmp_params.parity;
1173	}
1174	spin_unlock(&info->lock);
1175
1176	program_hw(info);
1177
1178	return 0;
1179}
1180
1181static long slgt_compat_ioctl(struct tty_struct *tty, struct file *file,
1182			 unsigned int cmd, unsigned long arg)
1183{
1184	struct slgt_info *info = tty->driver_data;
1185	int rc = -ENOIOCTLCMD;
1186
1187	if (sanity_check(info, tty->name, "compat_ioctl"))
1188		return -ENODEV;
1189	DBGINFO(("%s compat_ioctl() cmd=%08X\n", info->device_name, cmd));
1190
1191	switch (cmd) {
1192
1193	case MGSL_IOCSPARAMS32:
1194		rc = set_params32(info, compat_ptr(arg));
1195		break;
1196
1197	case MGSL_IOCGPARAMS32:
1198		rc = get_params32(info, compat_ptr(arg));
1199		break;
1200
1201	case MGSL_IOCGPARAMS:
1202	case MGSL_IOCSPARAMS:
1203	case MGSL_IOCGTXIDLE:
1204	case MGSL_IOCGSTATS:
1205	case MGSL_IOCWAITEVENT:
1206	case MGSL_IOCGIF:
1207	case MGSL_IOCSGPIO:
1208	case MGSL_IOCGGPIO:
1209	case MGSL_IOCWAITGPIO:
1210	case TIOCGICOUNT:
1211		rc = ioctl(tty, file, cmd, (unsigned long)(compat_ptr(arg)));
1212		break;
1213
1214	case MGSL_IOCSTXIDLE:
1215	case MGSL_IOCTXENABLE:
1216	case MGSL_IOCRXENABLE:
1217	case MGSL_IOCTXABORT:
1218	case TIOCMIWAIT:
1219	case MGSL_IOCSIF:
1220		rc = ioctl(tty, file, cmd, arg);
1221		break;
1222	}
1223
1224	DBGINFO(("%s compat_ioctl() cmd=%08X rc=%d\n", info->device_name, cmd, rc));
1225	return rc;
1226}
1227#else
1228#define slgt_compat_ioctl NULL
1229#endif /* ifdef CONFIG_COMPAT */
1230
1231/*
1232 * proc fs support
1233 */
1234static inline void line_info(struct seq_file *m, struct slgt_info *info)
1235{
1236	char stat_buf[30];
1237	unsigned long flags;
1238
1239	seq_printf(m, "%s: IO=%08X IRQ=%d MaxFrameSize=%u\n",
1240		      info->device_name, info->phys_reg_addr,
1241		      info->irq_level, info->max_frame_size);
1242
1243	/* output current serial signal states */
1244	spin_lock_irqsave(&info->lock,flags);
1245	get_signals(info);
1246	spin_unlock_irqrestore(&info->lock,flags);
1247
1248	stat_buf[0] = 0;
1249	stat_buf[1] = 0;
1250	if (info->signals & SerialSignal_RTS)
1251		strcat(stat_buf, "|RTS");
1252	if (info->signals & SerialSignal_CTS)
1253		strcat(stat_buf, "|CTS");
1254	if (info->signals & SerialSignal_DTR)
1255		strcat(stat_buf, "|DTR");
1256	if (info->signals & SerialSignal_DSR)
1257		strcat(stat_buf, "|DSR");
1258	if (info->signals & SerialSignal_DCD)
1259		strcat(stat_buf, "|CD");
1260	if (info->signals & SerialSignal_RI)
1261		strcat(stat_buf, "|RI");
1262
1263	if (info->params.mode != MGSL_MODE_ASYNC) {
1264		seq_printf(m, "\tHDLC txok:%d rxok:%d",
1265			       info->icount.txok, info->icount.rxok);
1266		if (info->icount.txunder)
1267			seq_printf(m, " txunder:%d", info->icount.txunder);
1268		if (info->icount.txabort)
1269			seq_printf(m, " txabort:%d", info->icount.txabort);
1270		if (info->icount.rxshort)
1271			seq_printf(m, " rxshort:%d", info->icount.rxshort);
1272		if (info->icount.rxlong)
1273			seq_printf(m, " rxlong:%d", info->icount.rxlong);
1274		if (info->icount.rxover)
1275			seq_printf(m, " rxover:%d", info->icount.rxover);
1276		if (info->icount.rxcrc)
1277			seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
1278	} else {
1279		seq_printf(m, "\tASYNC tx:%d rx:%d",
1280			       info->icount.tx, info->icount.rx);
1281		if (info->icount.frame)
1282			seq_printf(m, " fe:%d", info->icount.frame);
1283		if (info->icount.parity)
1284			seq_printf(m, " pe:%d", info->icount.parity);
1285		if (info->icount.brk)
1286			seq_printf(m, " brk:%d", info->icount.brk);
1287		if (info->icount.overrun)
1288			seq_printf(m, " oe:%d", info->icount.overrun);
1289	}
1290
1291	/* Append serial signal status to end */
1292	seq_printf(m, " %s\n", stat_buf+1);
1293
1294	seq_printf(m, "\ttxactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
1295		       info->tx_active,info->bh_requested,info->bh_running,
1296		       info->pending_bh);
1297}
1298
1299/* Called to print information about devices
1300 */
1301static int synclink_gt_proc_show(struct seq_file *m, void *v)
1302{
1303	struct slgt_info *info;
1304
1305	seq_puts(m, "synclink_gt driver\n");
1306
1307	info = slgt_device_list;
1308	while( info ) {
1309		line_info(m, info);
1310		info = info->next_device;
1311	}
1312	return 0;
1313}
1314
1315static int synclink_gt_proc_open(struct inode *inode, struct file *file)
1316{
1317	return single_open(file, synclink_gt_proc_show, NULL);
1318}
1319
1320static const struct file_operations synclink_gt_proc_fops = {
1321	.owner		= THIS_MODULE,
1322	.open		= synclink_gt_proc_open,
1323	.read		= seq_read,
1324	.llseek		= seq_lseek,
1325	.release	= single_release,
1326};
1327
1328/*
1329 * return count of bytes in transmit buffer
1330 */
1331static int chars_in_buffer(struct tty_struct *tty)
1332{
1333	struct slgt_info *info = tty->driver_data;
1334	int count;
1335	if (sanity_check(info, tty->name, "chars_in_buffer"))
1336		return 0;
1337	count = tbuf_bytes(info);
1338	DBGINFO(("%s chars_in_buffer()=%d\n", info->device_name, count));
1339	return count;
1340}
1341
1342/*
1343 * signal remote device to throttle send data (our receive data)
1344 */
1345static void throttle(struct tty_struct * tty)
1346{
1347	struct slgt_info *info = tty->driver_data;
1348	unsigned long flags;
1349
1350	if (sanity_check(info, tty->name, "throttle"))
1351		return;
1352	DBGINFO(("%s throttle\n", info->device_name));
1353	if (I_IXOFF(tty))
1354		send_xchar(tty, STOP_CHAR(tty));
1355 	if (tty->termios->c_cflag & CRTSCTS) {
1356		spin_lock_irqsave(&info->lock,flags);
1357		info->signals &= ~SerialSignal_RTS;
1358	 	set_signals(info);
1359		spin_unlock_irqrestore(&info->lock,flags);
1360	}
1361}
1362
1363/*
1364 * signal remote device to stop throttling send data (our receive data)
1365 */
1366static void unthrottle(struct tty_struct * tty)
1367{
1368	struct slgt_info *info = tty->driver_data;
1369	unsigned long flags;
1370
1371	if (sanity_check(info, tty->name, "unthrottle"))
1372		return;
1373	DBGINFO(("%s unthrottle\n", info->device_name));
1374	if (I_IXOFF(tty)) {
1375		if (info->x_char)
1376			info->x_char = 0;
1377		else
1378			send_xchar(tty, START_CHAR(tty));
1379	}
1380 	if (tty->termios->c_cflag & CRTSCTS) {
1381		spin_lock_irqsave(&info->lock,flags);
1382		info->signals |= SerialSignal_RTS;
1383	 	set_signals(info);
1384		spin_unlock_irqrestore(&info->lock,flags);
1385	}
1386}
1387
1388/*
1389 * set or clear transmit break condition
1390 * break_state	-1=set break condition, 0=clear
1391 */
1392static int set_break(struct tty_struct *tty, int break_state)
1393{
1394	struct slgt_info *info = tty->driver_data;
1395	unsigned short value;
1396	unsigned long flags;
1397
1398	if (sanity_check(info, tty->name, "set_break"))
1399		return -EINVAL;
1400	DBGINFO(("%s set_break(%d)\n", info->device_name, break_state));
1401
1402	spin_lock_irqsave(&info->lock,flags);
1403	value = rd_reg16(info, TCR);
1404 	if (break_state == -1)
1405		value |= BIT6;
1406	else
1407		value &= ~BIT6;
1408	wr_reg16(info, TCR, value);
1409	spin_unlock_irqrestore(&info->lock,flags);
1410	return 0;
1411}
1412
1413#if SYNCLINK_GENERIC_HDLC
1414
1415/**
1416 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
1417 * set encoding and frame check sequence (FCS) options
1418 *
1419 * dev       pointer to network device structure
1420 * encoding  serial encoding setting
1421 * parity    FCS setting
1422 *
1423 * returns 0 if success, otherwise error code
1424 */
1425static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
1426			  unsigned short parity)
1427{
1428	struct slgt_info *info = dev_to_port(dev);
1429	unsigned char  new_encoding;
1430	unsigned short new_crctype;
1431
1432	/* return error if TTY interface open */
1433	if (info->port.count)
1434		return -EBUSY;
1435
1436	DBGINFO(("%s hdlcdev_attach\n", info->device_name));
1437
1438	switch (encoding)
1439	{
1440	case ENCODING_NRZ:        new_encoding = HDLC_ENCODING_NRZ; break;
1441	case ENCODING_NRZI:       new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
1442	case ENCODING_FM_MARK:    new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
1443	case ENCODING_FM_SPACE:   new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
1444	case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
1445	default: return -EINVAL;
1446	}
1447
1448	switch (parity)
1449	{
1450	case PARITY_NONE:            new_crctype = HDLC_CRC_NONE; break;
1451	case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
1452	case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
1453	default: return -EINVAL;
1454	}
1455
1456	info->params.encoding = new_encoding;
1457	info->params.crc_type = new_crctype;
1458
1459	/* if network interface up, reprogram hardware */
1460	if (info->netcount)
1461		program_hw(info);
1462
1463	return 0;
1464}
1465
1466/**
1467 * called by generic HDLC layer to send frame
1468 *
1469 * skb  socket buffer containing HDLC frame
1470 * dev  pointer to network device structure
1471 */
1472static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
1473				      struct net_device *dev)
1474{
1475	struct slgt_info *info = dev_to_port(dev);
1476	unsigned long flags;
1477
1478	DBGINFO(("%s hdlc_xmit\n", dev->name));
1479
1480	if (!skb->len)
1481		return NETDEV_TX_OK;
1482
1483	/* stop sending until this frame completes */
1484	netif_stop_queue(dev);
1485
1486	/* update network statistics */
1487	dev->stats.tx_packets++;
1488	dev->stats.tx_bytes += skb->len;
1489
1490	/* save start time for transmit timeout detection */
1491	dev->trans_start = jiffies;
1492
1493	spin_lock_irqsave(&info->lock, flags);
1494	tx_load(info, skb->data, skb->len);
1495	spin_unlock_irqrestore(&info->lock, flags);
1496
1497	/* done with socket buffer, so free it */
1498	dev_kfree_skb(skb);
1499
1500	return NETDEV_TX_OK;
1501}
1502
1503/**
1504 * called by network layer when interface enabled
1505 * claim resources and initialize hardware
1506 *
1507 * dev  pointer to network device structure
1508 *
1509 * returns 0 if success, otherwise error code
1510 */
1511static int hdlcdev_open(struct net_device *dev)
1512{
1513	struct slgt_info *info = dev_to_port(dev);
1514	int rc;
1515	unsigned long flags;
1516
1517	if (!try_module_get(THIS_MODULE))
1518		return -EBUSY;
1519
1520	DBGINFO(("%s hdlcdev_open\n", dev->name));
1521
1522	/* generic HDLC layer open processing */
1523	if ((rc = hdlc_open(dev)))
1524		return rc;
1525
1526	/* arbitrate between network and tty opens */
1527	spin_lock_irqsave(&info->netlock, flags);
1528	if (info->port.count != 0 || info->netcount != 0) {
1529		DBGINFO(("%s hdlc_open busy\n", dev->name));
1530		spin_unlock_irqrestore(&info->netlock, flags);
1531		return -EBUSY;
1532	}
1533	info->netcount=1;
1534	spin_unlock_irqrestore(&info->netlock, flags);
1535
1536	/* claim resources and init adapter */
1537	if ((rc = startup(info)) != 0) {
1538		spin_lock_irqsave(&info->netlock, flags);
1539		info->netcount=0;
1540		spin_unlock_irqrestore(&info->netlock, flags);
1541		return rc;
1542	}
1543
1544	/* assert DTR and RTS, apply hardware settings */
1545	info->signals |= SerialSignal_RTS + SerialSignal_DTR;
1546	program_hw(info);
1547
1548	/* enable network layer transmit */
1549	dev->trans_start = jiffies;
1550	netif_start_queue(dev);
1551
1552	/* inform generic HDLC layer of current DCD status */
1553	spin_lock_irqsave(&info->lock, flags);
1554	get_signals(info);
1555	spin_unlock_irqrestore(&info->lock, flags);
1556	if (info->signals & SerialSignal_DCD)
1557		netif_carrier_on(dev);
1558	else
1559		netif_carrier_off(dev);
1560	return 0;
1561}
1562
1563/**
1564 * called by network layer when interface is disabled
1565 * shutdown hardware and release resources
1566 *
1567 * dev  pointer to network device structure
1568 *
1569 * returns 0 if success, otherwise error code
1570 */
1571static int hdlcdev_close(struct net_device *dev)
1572{
1573	struct slgt_info *info = dev_to_port(dev);
1574	unsigned long flags;
1575
1576	DBGINFO(("%s hdlcdev_close\n", dev->name));
1577
1578	netif_stop_queue(dev);
1579
1580	/* shutdown adapter and release resources */
1581	shutdown(info);
1582
1583	hdlc_close(dev);
1584
1585	spin_lock_irqsave(&info->netlock, flags);
1586	info->netcount=0;
1587	spin_unlock_irqrestore(&info->netlock, flags);
1588
1589	module_put(THIS_MODULE);
1590	return 0;
1591}
1592
1593/**
1594 * called by network layer to process IOCTL call to network device
1595 *
1596 * dev  pointer to network device structure
1597 * ifr  pointer to network interface request structure
1598 * cmd  IOCTL command code
1599 *
1600 * returns 0 if success, otherwise error code
1601 */
1602static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1603{
1604	const size_t size = sizeof(sync_serial_settings);
1605	sync_serial_settings new_line;
1606	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1607	struct slgt_info *info = dev_to_port(dev);
1608	unsigned int flags;
1609
1610	DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
1611
1612	/* return error if TTY interface open */
1613	if (info->port.count)
1614		return -EBUSY;
1615
1616	if (cmd != SIOCWANDEV)
1617		return hdlc_ioctl(dev, ifr, cmd);
1618
1619	switch(ifr->ifr_settings.type) {
1620	case IF_GET_IFACE: /* return current sync_serial_settings */
1621
1622		ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1623		if (ifr->ifr_settings.size < size) {
1624			ifr->ifr_settings.size = size; /* data size wanted */
1625			return -ENOBUFS;
1626		}
1627
1628		flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1629					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1630					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1631					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
1632
1633		switch (flags){
1634		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
1635		case (HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_INT; break;
1636		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG):    new_line.clock_type = CLOCK_TXINT; break;
1637		case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
1638		default: new_line.clock_type = CLOCK_DEFAULT;
1639		}
1640
1641		new_line.clock_rate = info->params.clock_speed;
1642		new_line.loopback   = info->params.loopback ? 1:0;
1643
1644		if (copy_to_user(line, &new_line, size))
1645			return -EFAULT;
1646		return 0;
1647
1648	case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
1649
1650		if(!capable(CAP_NET_ADMIN))
1651			return -EPERM;
1652		if (copy_from_user(&new_line, line, size))
1653			return -EFAULT;
1654
1655		switch (new_line.clock_type)
1656		{
1657		case CLOCK_EXT:      flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
1658		case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
1659		case CLOCK_INT:      flags = HDLC_FLAG_RXC_BRG    | HDLC_FLAG_TXC_BRG;    break;
1660		case CLOCK_TXINT:    flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG;    break;
1661		case CLOCK_DEFAULT:  flags = info->params.flags &
1662					     (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1663					      HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1664					      HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1665					      HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN); break;
1666		default: return -EINVAL;
1667		}
1668
1669		if (new_line.loopback != 0 && new_line.loopback != 1)
1670			return -EINVAL;
1671
1672		info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
1673					HDLC_FLAG_RXC_BRG    | HDLC_FLAG_RXC_TXCPIN |
1674					HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
1675					HDLC_FLAG_TXC_BRG    | HDLC_FLAG_TXC_RXCPIN);
1676		info->params.flags |= flags;
1677
1678		info->params.loopback = new_line.loopback;
1679
1680		if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
1681			info->params.clock_speed = new_line.clock_rate;
1682		else
1683			info->params.clock_speed = 0;
1684
1685		/* if network interface up, reprogram hardware */
1686		if (info->netcount)
1687			program_hw(info);
1688		return 0;
1689
1690	default:
1691		return hdlc_ioctl(dev, ifr, cmd);
1692	}
1693}
1694
1695/**
1696 * called by network layer when transmit timeout is detected
1697 *
1698 * dev  pointer to network device structure
1699 */
1700static void hdlcdev_tx_timeout(struct net_device *dev)
1701{
1702	struct slgt_info *info = dev_to_port(dev);
1703	unsigned long flags;
1704
1705	DBGINFO(("%s hdlcdev_tx_timeout\n", dev->name));
1706
1707	dev->stats.tx_errors++;
1708	dev->stats.tx_aborted_errors++;
1709
1710	spin_lock_irqsave(&info->lock,flags);
1711	tx_stop(info);
1712	spin_unlock_irqrestore(&info->lock,flags);
1713
1714	netif_wake_queue(dev);
1715}
1716
1717/**
1718 * called by device driver when transmit completes
1719 * reenable network layer transmit if stopped
1720 *
1721 * info  pointer to device instance information
1722 */
1723static void hdlcdev_tx_done(struct slgt_info *info)
1724{
1725	if (netif_queue_stopped(info->netdev))
1726		netif_wake_queue(info->netdev);
1727}
1728
1729/**
1730 * called by device driver when frame received
1731 * pass frame to network layer
1732 *
1733 * info  pointer to device instance information
1734 * buf   pointer to buffer contianing frame data
1735 * size  count of data bytes in buf
1736 */
1737static void hdlcdev_rx(struct slgt_info *info, char *buf, int size)
1738{
1739	struct sk_buff *skb = dev_alloc_skb(size);
1740	struct net_device *dev = info->netdev;
1741
1742	DBGINFO(("%s hdlcdev_rx\n", dev->name));
1743
1744	if (skb == NULL) {
1745		DBGERR(("%s: can't alloc skb, drop packet\n", dev->name));
1746		dev->stats.rx_dropped++;
1747		return;
1748	}
1749
1750	memcpy(skb_put(skb, size), buf, size);
1751
1752	skb->protocol = hdlc_type_trans(skb, dev);
1753
1754	dev->stats.rx_packets++;
1755	dev->stats.rx_bytes += size;
1756
1757	netif_rx(skb);
1758}
1759
1760static const struct net_device_ops hdlcdev_ops = {
1761	.ndo_open       = hdlcdev_open,
1762	.ndo_stop       = hdlcdev_close,
1763	.ndo_change_mtu = hdlc_change_mtu,
1764	.ndo_start_xmit = hdlc_start_xmit,
1765	.ndo_do_ioctl   = hdlcdev_ioctl,
1766	.ndo_tx_timeout = hdlcdev_tx_timeout,
1767};
1768
1769/**
1770 * called by device driver when adding device instance
1771 * do generic HDLC initialization
1772 *
1773 * info  pointer to device instance information
1774 *
1775 * returns 0 if success, otherwise error code
1776 */
1777static int hdlcdev_init(struct slgt_info *info)
1778{
1779	int rc;
1780	struct net_device *dev;
1781	hdlc_device *hdlc;
1782
1783	/* allocate and initialize network and HDLC layer objects */
1784
1785	if (!(dev = alloc_hdlcdev(info))) {
1786		printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
1787		return -ENOMEM;
1788	}
1789
1790	/* for network layer reporting purposes only */
1791	dev->mem_start = info->phys_reg_addr;
1792	dev->mem_end   = info->phys_reg_addr + SLGT_REG_SIZE - 1;
1793	dev->irq       = info->irq_level;
1794
1795	/* network layer callbacks and settings */
1796	dev->netdev_ops	    = &hdlcdev_ops;
1797	dev->watchdog_timeo = 10 * HZ;
1798	dev->tx_queue_len   = 50;
1799
1800	/* generic HDLC layer callbacks and settings */
1801	hdlc         = dev_to_hdlc(dev);
1802	hdlc->attach = hdlcdev_attach;
1803	hdlc->xmit   = hdlcdev_xmit;
1804
1805	/* register objects with HDLC layer */
1806	if ((rc = register_hdlc_device(dev))) {
1807		printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
1808		free_netdev(dev);
1809		return rc;
1810	}
1811
1812	info->netdev = dev;
1813	return 0;
1814}
1815
1816/**
1817 * called by device driver when removing device instance
1818 * do generic HDLC cleanup
1819 *
1820 * info  pointer to device instance information
1821 */
1822static void hdlcdev_exit(struct slgt_info *info)
1823{
1824	unregister_hdlc_device(info->netdev);
1825	free_netdev(info->netdev);
1826	info->netdev = NULL;
1827}
1828
1829#endif /* ifdef CONFIG_HDLC */
1830
1831/*
1832 * get async data from rx DMA buffers
1833 */
1834static void rx_async(struct slgt_info *info)
1835{
1836 	struct tty_struct *tty = info->port.tty;
1837 	struct mgsl_icount *icount = &info->icount;
1838	unsigned int start, end;
1839	unsigned char *p;
1840	unsigned char status;
1841	struct slgt_desc *bufs = info->rbufs;
1842	int i, count;
1843	int chars = 0;
1844	int stat;
1845	unsigned char ch;
1846
1847	start = end = info->rbuf_current;
1848
1849	while(desc_complete(bufs[end])) {
1850		count = desc_count(bufs[end]) - info->rbuf_index;
1851		p     = bufs[end].buf + info->rbuf_index;
1852
1853		DBGISR(("%s rx_async count=%d\n", info->device_name, count));
1854		DBGDATA(info, p, count, "rx");
1855
1856		for(i=0 ; i < count; i+=2, p+=2) {
1857			ch = *p;
1858			icount->rx++;
1859
1860			stat = 0;
1861
1862			if ((status = *(p+1) & (BIT1 + BIT0))) {
1863				if (status & BIT1)
1864					icount->parity++;
1865				else if (status & BIT0)
1866					icount->frame++;
1867				/* discard char if tty control flags say so */
1868				if (status & info->ignore_status_mask)
1869					continue;
1870				if (status & BIT1)
1871					stat = TTY_PARITY;
1872				else if (status & BIT0)
1873					stat = TTY_FRAME;
1874			}
1875			if (tty) {
1876				tty_insert_flip_char(tty, ch, stat);
1877				chars++;
1878			}
1879		}
1880
1881		if (i < count) {
1882			/* receive buffer not completed */
1883			info->rbuf_index += i;
1884			mod_timer(&info->rx_timer, jiffies + 1);
1885			break;
1886		}
1887
1888		info->rbuf_index = 0;
1889		free_rbufs(info, end, end);
1890
1891		if (++end == info->rbuf_count)
1892			end = 0;
1893
1894		/* if entire list searched then no frame available */
1895		if (end == start)
1896			break;
1897	}
1898
1899	if (tty && chars)
1900		tty_flip_buffer_push(tty);
1901}
1902
1903/*
1904 * return next bottom half action to perform
1905 */
1906static int bh_action(struct slgt_info *info)
1907{
1908	unsigned long flags;
1909	int rc;
1910
1911	spin_lock_irqsave(&info->lock,flags);
1912
1913	if (info->pending_bh & BH_RECEIVE) {
1914		info->pending_bh &= ~BH_RECEIVE;
1915		rc = BH_RECEIVE;
1916	} else if (info->pending_bh & BH_TRANSMIT) {
1917		info->pending_bh &= ~BH_TRANSMIT;
1918		rc = BH_TRANSMIT;
1919	} else if (info->pending_bh & BH_STATUS) {
1920		info->pending_bh &= ~BH_STATUS;
1921		rc = BH_STATUS;
1922	} else {
1923		/* Mark BH routine as complete */
1924		info->bh_running = false;
1925		info->bh_requested = false;
1926		rc = 0;
1927	}
1928
1929	spin_unlock_irqrestore(&info->lock,flags);
1930
1931	return rc;
1932}
1933
1934/*
1935 * perform bottom half processing
1936 */
1937static void bh_handler(struct work_struct *work)
1938{
1939	struct slgt_info *info = container_of(work, struct slgt_info, task);
1940	int action;
1941
1942	if (!info)
1943		return;
1944	info->bh_running = true;
1945
1946	while((action = bh_action(info))) {
1947		switch (action) {
1948		case BH_RECEIVE:
1949			DBGBH(("%s bh receive\n", info->device_name));
1950			switch(info->params.mode) {
1951			case MGSL_MODE_ASYNC:
1952				rx_async(info);
1953				break;
1954			case MGSL_MODE_HDLC:
1955				while(rx_get_frame(info));
1956				break;
1957			case MGSL_MODE_RAW:
1958			case MGSL_MODE_MONOSYNC:
1959			case MGSL_MODE_BISYNC:
1960				while(rx_get_buf(info));
1961				break;
1962			}
1963			/* restart receiver if rx DMA buffers exhausted */
1964			if (info->rx_restart)
1965				rx_start(info);
1966			break;
1967		case BH_TRANSMIT:
1968			bh_transmit(info);
1969			break;
1970		case BH_STATUS:
1971			DBGBH(("%s bh status\n", info->device_name));
1972			info->ri_chkcount = 0;
1973			info->dsr_chkcount = 0;
1974			info->dcd_chkcount = 0;
1975			info->cts_chkcount = 0;
1976			break;
1977		default:
1978			DBGBH(("%s unknown action\n", info->device_name));
1979			break;
1980		}
1981	}
1982	DBGBH(("%s bh_handler exit\n", info->device_name));
1983}
1984
1985static void bh_transmit(struct slgt_info *info)
1986{
1987	struct tty_struct *tty = info->port.tty;
1988
1989	DBGBH(("%s bh_transmit\n", info->device_name));
1990	if (tty)
1991		tty_wakeup(tty);
1992}
1993
1994static void dsr_change(struct slgt_info *info, unsigned short status)
1995{
1996	if (status & BIT3) {
1997		info->signals |= SerialSignal_DSR;
1998		info->input_signal_events.dsr_up++;
1999	} else {
2000		info->signals &= ~SerialSignal_DSR;
2001		info->input_signal_events.dsr_down++;
2002	}
2003	DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
2004	if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2005		slgt_irq_off(info, IRQ_DSR);
2006		return;
2007	}
2008	info->icount.dsr++;
2009	wake_up_interruptible(&info->status_event_wait_q);
2010	wake_up_interruptible(&info->event_wait_q);
2011	info->pending_bh |= BH_STATUS;
2012}
2013
2014static void cts_change(struct slgt_info *info, unsigned short status)
2015{
2016	if (status & BIT2) {
2017		info->signals |= SerialSignal_CTS;
2018		info->input_signal_events.cts_up++;
2019	} else {
2020		info->signals &= ~SerialSignal_CTS;
2021		info->input_signal_events.cts_down++;
2022	}
2023	DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
2024	if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2025		slgt_irq_off(info, IRQ_CTS);
2026		return;
2027	}
2028	info->icount.cts++;
2029	wake_up_interruptible(&info->status_event_wait_q);
2030	wake_up_interruptible(&info->event_wait_q);
2031	info->pending_bh |= BH_STATUS;
2032
2033	if (info->port.flags & ASYNC_CTS_FLOW) {
2034		if (info->port.tty) {
2035			if (info->port.tty->hw_stopped) {
2036				if (info->signals & SerialSignal_CTS) {
2037		 			info->port.tty->hw_stopped = 0;
2038					info->pending_bh |= BH_TRANSMIT;
2039					return;
2040				}
2041			} else {
2042				if (!(info->signals & SerialSignal_CTS))
2043		 			info->port.tty->hw_stopped = 1;
2044			}
2045		}
2046	}
2047}
2048
2049static void dcd_change(struct slgt_info *info, unsigned short status)
2050{
2051	if (status & BIT1) {
2052		info->signals |= SerialSignal_DCD;
2053		info->input_signal_events.dcd_up++;
2054	} else {
2055		info->signals &= ~SerialSignal_DCD;
2056		info->input_signal_events.dcd_down++;
2057	}
2058	DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
2059	if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2060		slgt_irq_off(info, IRQ_DCD);
2061		return;
2062	}
2063	info->icount.dcd++;
2064#if SYNCLINK_GENERIC_HDLC
2065	if (info->netcount) {
2066		if (info->signals & SerialSignal_DCD)
2067			netif_carrier_on(info->netdev);
2068		else
2069			netif_carrier_off(info->netdev);
2070	}
2071#endif
2072	wake_up_interruptible(&info->status_event_wait_q);
2073	wake_up_interruptible(&info->event_wait_q);
2074	info->pending_bh |= BH_STATUS;
2075
2076	if (info->port.flags & ASYNC_CHECK_CD) {
2077		if (info->signals & SerialSignal_DCD)
2078			wake_up_interruptible(&info->port.open_wait);
2079		else {
2080			if (info->port.tty)
2081				tty_hangup(info->port.tty);
2082		}
2083	}
2084}
2085
2086static void ri_change(struct slgt_info *info, unsigned short status)
2087{
2088	if (status & BIT0) {
2089		info->signals |= SerialSignal_RI;
2090		info->input_signal_events.ri_up++;
2091	} else {
2092		info->signals &= ~SerialSignal_RI;
2093		info->input_signal_events.ri_down++;
2094	}
2095	DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
2096	if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
2097		slgt_irq_off(info, IRQ_RI);
2098		return;
2099	}
2100	info->icount.rng++;
2101	wake_up_interruptible(&info->status_event_wait_q);
2102	wake_up_interruptible(&info->event_wait_q);
2103	info->pending_bh |= BH_STATUS;
2104}
2105
2106static void isr_rxdata(struct slgt_info *info)
2107{
2108	unsigned int count = info->rbuf_fill_count;
2109	unsigned int i = info->rbuf_fill_index;
2110	unsigned short reg;
2111
2112	while (rd_reg16(info, SSR) & IRQ_RXDATA) {
2113		reg = rd_reg16(info, RDR);
2114		DBGISR(("isr_rxdata %s RDR=%04X\n", info->device_name, reg));
2115		if (desc_complete(info->rbufs[i])) {
2116			/* all buffers full */
2117			rx_stop(info);
2118			info->rx_restart = 1;
2119			continue;
2120		}
2121		info->rbufs[i].buf[count++] = (unsigned char)reg;
2122		/* async mode saves status byte to buffer for each data byte */
2123		if (info->params.mode == MGSL_MODE_ASYNC)
2124			info->rbufs[i].buf[count++] = (unsigned char)(reg >> 8);
2125		if (count == info->rbuf_fill_level || (reg & BIT10)) {
2126			/* buffer full or end of frame */
2127			set_desc_count(info->rbufs[i], count);
2128			set_desc_status(info->rbufs[i], BIT15 | (reg >> 8));
2129			info->rbuf_fill_count = count = 0;
2130			if (++i == info->rbuf_count)
2131				i = 0;
2132			info->pending_bh |= BH_RECEIVE;
2133		}
2134	}
2135
2136	info->rbuf_fill_index = i;
2137	info->rbuf_fill_count = count;
2138}
2139
2140static void isr_serial(struct slgt_info *info)
2141{
2142	unsigned short status = rd_reg16(info, SSR);
2143
2144	DBGISR(("%s isr_serial status=%04X\n", info->device_name, status));
2145
2146	wr_reg16(info, SSR, status); /* clear pending */
2147
2148	info->irq_occurred = true;
2149
2150	if (info->params.mode == MGSL_MODE_ASYNC) {
2151		if (status & IRQ_TXIDLE) {
2152			if (info->tx_active)
2153				isr_txeom(info, status);
2154		}
2155		if (info->rx_pio && (status & IRQ_RXDATA))
2156			isr_rxdata(info);
2157		if ((status & IRQ_RXBREAK) && (status & RXBREAK)) {
2158			info->icount.brk++;
2159			/* process break detection if tty control allows */
2160			if (info->port.tty) {
2161				if (!(status & info->ignore_status_mask)) {
2162					if (info->read_status_mask & MASK_BREAK) {
2163						tty_insert_flip_char(info->port.tty, 0, TTY_BREAK);
2164						if (info->port.flags & ASYNC_SAK)
2165							do_SAK(info->port.tty);
2166					}
2167				}
2168			}
2169		}
2170	} else {
2171		if (status & (IRQ_TXIDLE + IRQ_TXUNDER))
2172			isr_txeom(info, status);
2173		if (info->rx_pio && (status & IRQ_RXDATA))
2174			isr_rxdata(info);
2175		if (status & IRQ_RXIDLE) {
2176			if (status & RXIDLE)
2177				info->icount.rxidle++;
2178			else
2179				info->icount.exithunt++;
2180			wake_up_interruptible(&info->event_wait_q);
2181		}
2182
2183		if (status & IRQ_RXOVER)
2184			rx_start(info);
2185	}
2186
2187	if (status & IRQ_DSR)
2188		dsr_change(info, status);
2189	if (status & IRQ_CTS)
2190		cts_change(info, status);
2191	if (status & IRQ_DCD)
2192		dcd_change(info, status);
2193	if (status & IRQ_RI)
2194		ri_change(info, status);
2195}
2196
2197static void isr_rdma(struct slgt_info *info)
2198{
2199	unsigned int status = rd_reg32(info, RDCSR);
2200
2201	DBGISR(("%s isr_rdma status=%08x\n", info->device_name, status));
2202
2203	/* RDCSR (rx DMA control/status)
2204	 *
2205	 * 31..07  reserved
2206	 * 06      save status byte to DMA buffer
2207	 * 05      error
2208	 * 04      eol (end of list)
2209	 * 03      eob (end of buffer)
2210	 * 02      IRQ enable
2211	 * 01      reset
2212	 * 00      enable
2213	 */
2214	wr_reg32(info, RDCSR, status);	/* clear pending */
2215
2216	if (status & (BIT5 + BIT4)) {
2217		DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
2218		info->rx_restart = true;
2219	}
2220	info->pending_bh |= BH_RECEIVE;
2221}
2222
2223static void isr_tdma(struct slgt_info *info)
2224{
2225	unsigned int status = rd_reg32(info, TDCSR);
2226
2227	DBGISR(("%s isr_tdma status=%08x\n", info->device_name, status));
2228
2229	/* TDCSR (tx DMA control/status)
2230	 *
2231	 * 31..06  reserved
2232	 * 05      error
2233	 * 04      eol (end of list)
2234	 * 03      eob (end of buffer)
2235	 * 02      IRQ enable
2236	 * 01      reset
2237	 * 00      enable
2238	 */
2239	wr_reg32(info, TDCSR, status);	/* clear pending */
2240
2241	if (status & (BIT5 + BIT4 + BIT3)) {
2242		// another transmit buffer has completed
2243		// run bottom half to get more send data from user
2244		info->pending_bh |= BH_TRANSMIT;
2245	}
2246}
2247
2248/*
2249 * return true if there are unsent tx DMA buffers, otherwise false
2250 *
2251 * if there are unsent buffers then info->tbuf_start
2252 * is set to index of first unsent buffer
2253 */
2254static bool unsent_tbufs(struct slgt_info *info)
2255{
2256	unsigned int i = info->tbuf_current;
2257	bool rc = false;
2258
2259	/*
2260	 * search backwards from last loaded buffer (precedes tbuf_current)
2261	 * for first unsent buffer (desc_count > 0)
2262	 */
2263
2264	do {
2265		if (i)
2266			i--;
2267		else
2268			i = info->tbuf_count - 1;
2269		if (!desc_count(info->tbufs[i]))
2270			break;
2271		info->tbuf_start = i;
2272		rc = true;
2273	} while (i != info->tbuf_current);
2274
2275	return rc;
2276}
2277
2278static void isr_txeom(struct slgt_info *info, unsigned short status)
2279{
2280	DBGISR(("%s txeom status=%04x\n", info->device_name, status));
2281
2282	slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
2283	tdma_reset(info);
2284	if (status & IRQ_TXUNDER) {
2285		unsigned short val = rd_reg16(info, TCR);
2286		wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
2287		wr_reg16(info, TCR, val); /* clear reset bit */
2288	}
2289
2290	if (info->tx_active) {
2291		if (info->params.mode != MGSL_MODE_ASYNC) {
2292			if (status & IRQ_TXUNDER)
2293				info->icount.txunder++;
2294			else if (status & IRQ_TXIDLE)
2295				info->icount.txok++;
2296		}
2297
2298		if (unsent_tbufs(info)) {
2299			tx_start(info);
2300			update_tx_timer(info);
2301			return;
2302		}
2303		info->tx_active = false;
2304
2305		del_timer(&info->tx_timer);
2306
2307		if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
2308			info->signals &= ~SerialSignal_RTS;
2309			info->drop_rts_on_tx_done = false;
2310			set_signals(info);
2311		}
2312
2313#if SYNCLINK_GENERIC_HDLC
2314		if (info->netcount)
2315			hdlcdev_tx_done(info);
2316		else
2317#endif
2318		{
2319			if (info->port.tty && (info->port.tty->stopped || info->port.tty->hw_stopped)) {
2320				tx_stop(info);
2321				return;
2322			}
2323			info->pending_bh |= BH_TRANSMIT;
2324		}
2325	}
2326}
2327
2328static void isr_gpio(struct slgt_info *info, unsigned int changed, unsigned int state)
2329{
2330	struct cond_wait *w, *prev;
2331
2332	/* wake processes waiting for specific transitions */
2333	for (w = info->gpio_wait_q, prev = NULL ; w != NULL ; w = w->next) {
2334		if (w->data & changed) {
2335			w->data = state;
2336			wake_up_interruptible(&w->q);
2337			if (prev != NULL)
2338				prev->next = w->next;
2339			else
2340				info->gpio_wait_q = w->next;
2341		} else
2342			prev = w;
2343	}
2344}
2345
2346/* interrupt service routine
2347 *
2348 * 	irq	interrupt number
2349 * 	dev_id	device ID supplied during interrupt registration
2350 */
2351static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
2352{
2353	struct slgt_info *info = dev_id;
2354	unsigned int gsr;
2355	unsigned int i;
2356
2357	DBGISR(("slgt_interrupt irq=%d entry\n", info->irq_level));
2358
2359	spin_lock(&info->lock);
2360
2361	while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
2362		DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
2363		info->irq_occurred = true;
2364		for(i=0; i < info->port_count ; i++) {
2365			if (info->port_array[i] == NULL)
2366				continue;
2367			if (gsr & (BIT8 << i))
2368				isr_serial(info->port_array[i]);
2369			if (gsr & (BIT16 << (i*2)))
2370				isr_rdma(info->port_array[i]);
2371			if (gsr & (BIT17 << (i*2)))
2372				isr_tdma(info->port_array[i]);
2373		}
2374	}
2375
2376	if (info->gpio_present) {
2377		unsigned int state;
2378		unsigned int changed;
2379		while ((changed = rd_reg32(info, IOSR)) != 0) {
2380			DBGISR(("%s iosr=%08x\n", info->device_name, changed));
2381			/* read latched state of GPIO signals */
2382			state = rd_reg32(info, IOVR);
2383			/* clear pending GPIO interrupt bits */
2384			wr_reg32(info, IOSR, changed);
2385			for (i=0 ; i < info->port_count ; i++) {
2386				if (info->port_array[i] != NULL)
2387					isr_gpio(info->port_array[i], changed, state);
2388			}
2389		}
2390	}
2391
2392	for(i=0; i < info->port_count ; i++) {
2393		struct slgt_info *port = info->port_array[i];
2394
2395		if (port && (port->port.count || port->netcount) &&
2396		    port->pending_bh && !port->bh_running &&
2397		    !port->bh_requested) {
2398			DBGISR(("%s bh queued\n", port->device_name));
2399			schedule_work(&port->task);
2400			port->bh_requested = true;
2401		}
2402	}
2403
2404	spin_unlock(&info->lock);
2405
2406	DBGISR(("slgt_interrupt irq=%d exit\n", info->irq_level));
2407	return IRQ_HANDLED;
2408}
2409
2410static int startup(struct slgt_info *info)
2411{
2412	DBGINFO(("%s startup\n", info->device_name));
2413
2414	if (info->port.flags & ASYNC_INITIALIZED)
2415		return 0;
2416
2417	if (!info->tx_buf) {
2418		info->tx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
2419		if (!info->tx_buf) {
2420			DBGERR(("%s can't allocate tx buffer\n", info->device_name));
2421			return -ENOMEM;
2422		}
2423	}
2424
2425	info->pending_bh = 0;
2426
2427	memset(&info->icount, 0, sizeof(info->icount));
2428
2429	/* program hardware for current parameters */
2430	change_params(info);
2431
2432	if (info->port.tty)
2433		clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
2434
2435	info->port.flags |= ASYNC_INITIALIZED;
2436
2437	return 0;
2438}
2439
2440/*
2441 *  called by close() and hangup() to shutdown hardware
2442 */
2443static void shutdown(struct slgt_info *info)
2444{
2445	unsigned long flags;
2446
2447	if (!(info->port.flags & ASYNC_INITIALIZED))
2448		return;
2449
2450	DBGINFO(("%s shutdown\n", info->device_name));
2451
2452	/* clear status wait queue because status changes */
2453	/* can't happen after shutting down the hardware */
2454	wake_up_interruptible(&info->status_event_wait_q);
2455	wake_up_interruptible(&info->event_wait_q);
2456
2457	del_timer_sync(&info->tx_timer);
2458	del_timer_sync(&info->rx_timer);
2459
2460	kfree(info->tx_buf);
2461	info->tx_buf = NULL;
2462
2463	spin_lock_irqsave(&info->lock,flags);
2464
2465	tx_stop(info);
2466	rx_stop(info);
2467
2468	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
2469
2470 	if (!info->port.tty || info->port.tty->termios->c_cflag & HUPCL) {
2471 		info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
2472		set_signals(info);
2473	}
2474
2475	flush_cond_wait(&info->gpio_wait_q);
2476
2477	spin_unlock_irqrestore(&info->lock,flags);
2478
2479	if (info->port.tty)
2480		set_bit(TTY_IO_ERROR, &info->port.tty->flags);
2481
2482	info->port.flags &= ~ASYNC_INITIALIZED;
2483}
2484
2485static void program_hw(struct slgt_info *info)
2486{
2487	unsigned long flags;
2488
2489	spin_lock_irqsave(&info->lock,flags);
2490
2491	rx_stop(info);
2492	tx_stop(info);
2493
2494	if (info->params.mode != MGSL_MODE_ASYNC ||
2495	    info->netcount)
2496		sync_mode(info);
2497	else
2498		async_mode(info);
2499
2500	set_signals(info);
2501
2502	info->dcd_chkcount = 0;
2503	info->cts_chkcount = 0;
2504	info->ri_chkcount = 0;
2505	info->dsr_chkcount = 0;
2506
2507	slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
2508	get_signals(info);
2509
2510	if (info->netcount ||
2511	    (info->port.tty && info->port.tty->termios->c_cflag & CREAD))
2512		rx_start(info);
2513
2514	spin_unlock_irqrestore(&info->lock,flags);
2515}
2516
2517/*
2518 * reconfigure adapter based on new parameters
2519 */
2520static void change_params(struct slgt_info *info)
2521{
2522	unsigned cflag;
2523	int bits_per_char;
2524
2525	if (!info->port.tty || !info->port.tty->termios)
2526		return;
2527	DBGINFO(("%s change_params\n", info->device_name));
2528
2529	cflag = info->port.tty->termios->c_cflag;
2530
2531	/* if B0 rate (hangup) specified then negate DTR and RTS */
2532	/* otherwise assert DTR and RTS */
2533 	if (cflag & CBAUD)
2534		info->signals |= SerialSignal_RTS + SerialSignal_DTR;
2535	else
2536		info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
2537
2538	/* byte size and parity */
2539
2540	switch (cflag & CSIZE) {
2541	case CS5: info->params.data_bits = 5; break;
2542	case CS6: info->params.data_bits = 6; break;
2543	case CS7: info->params.data_bits = 7; break;
2544	case CS8: info->params.data_bits = 8; break;
2545	default:  info->params.data_bits = 7; break;
2546	}
2547
2548	info->params.stop_bits = (cflag & CSTOPB) ? 2 : 1;
2549
2550	if (cflag & PARENB)
2551		info->params.parity = (cflag & PARODD) ? ASYNC_PARITY_ODD : ASYNC_PARITY_EVEN;
2552	else
2553		info->params.parity = ASYNC_PARITY_NONE;
2554
2555	/* calculate number of jiffies to transmit a full
2556	 * FIFO (32 bytes) at specified data rate
2557	 */
2558	bits_per_char = info->params.data_bits +
2559			info->params.stop_bits + 1;
2560
2561	info->params.data_rate = tty_get_baud_rate(info->port.tty);
2562
2563	if (info->params.data_rate) {
2564		info->timeout = (32*HZ*bits_per_char) /
2565				info->params.data_rate;
2566	}
2567	info->timeout += HZ/50;		/* Add .02 seconds of slop */
2568
2569	if (cflag & CRTSCTS)
2570		info->port.flags |= ASYNC_CTS_FLOW;
2571	else
2572		info->port.flags &= ~ASYNC_CTS_FLOW;
2573
2574	if (cflag & CLOCAL)
2575		info->port.flags &= ~ASYNC_CHECK_CD;
2576	else
2577		info->port.flags |= ASYNC_CHECK_CD;
2578
2579	/* process tty input control flags */
2580
2581	info->read_status_mask = IRQ_RXOVER;
2582	if (I_INPCK(info->port.tty))
2583		info->read_status_mask |= MASK_PARITY | MASK_FRAMING;
2584 	if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
2585 		info->read_status_mask |= MASK_BREAK;
2586	if (I_IGNPAR(info->port.tty))
2587		info->ignore_status_mask |= MASK_PARITY | MASK_FRAMING;
2588	if (I_IGNBRK(info->port.tty)) {
2589		info->ignore_status_mask |= MASK_BREAK;
2590		/* If ignoring parity and break indicators, ignore
2591		 * overruns too.  (For real raw support).
2592		 */
2593		if (I_IGNPAR(info->port.tty))
2594			info->ignore_status_mask |= MASK_OVERRUN;
2595	}
2596
2597	program_hw(info);
2598}
2599
2600static int get_stats(struct slgt_info *info, struct mgsl_icount __user *user_icount)
2601{
2602	DBGINFO(("%s get_stats\n",  info->device_name));
2603	if (!user_icount) {
2604		memset(&info->icount, 0, sizeof(info->icount));
2605	} else {
2606		if (copy_to_user(user_icount, &info->icount, sizeof(struct mgsl_icount)))
2607			return -EFAULT;
2608	}
2609	return 0;
2610}
2611
2612static int get_params(struct slgt_info *info, MGSL_PARAMS __user *user_params)
2613{
2614	DBGINFO(("%s get_params\n", info->device_name));
2615	if (copy_to_user(user_params, &info->params, sizeof(MGSL_PARAMS)))
2616		return -EFAULT;
2617	return 0;
2618}
2619
2620static int set_params(struct slgt_info *info, MGSL_PARAMS __user *new_params)
2621{
2622 	unsigned long flags;
2623	MGSL_PARAMS tmp_params;
2624
2625	DBGINFO(("%s set_params\n", info->device_name));
2626	if (copy_from_user(&tmp_params, new_params, sizeof(MGSL_PARAMS)))
2627		return -EFAULT;
2628
2629	spin_lock_irqsave(&info->lock, flags);
2630	if (tmp_params.mode == MGSL_MODE_BASE_CLOCK)
2631		info->base_clock = tmp_params.clock_speed;
2632	else
2633		memcpy(&info->params, &tmp_params, sizeof(MGSL_PARAMS));
2634	spin_unlock_irqrestore(&info->lock, flags);
2635
2636	program_hw(info);
2637
2638	return 0;
2639}
2640
2641static int get_txidle(struct slgt_info *info, int __user *idle_mode)
2642{
2643	DBGINFO(("%s get_txidle=%d\n", info->device_name, info->idle_mode));
2644	if (put_user(info->idle_mode, idle_mode))
2645		return -EFAULT;
2646	return 0;
2647}
2648
2649static int set_txidle(struct slgt_info *info, int idle_mode)
2650{
2651 	unsigned long flags;
2652	DBGINFO(("%s set_txidle(%d)\n", info->device_name, idle_mode));
2653	spin_lock_irqsave(&info->lock,flags);
2654	info->idle_mode = idle_mode;
2655	if (info->params.mode != MGSL_MODE_ASYNC)
2656		tx_set_idle(info);
2657	spin_unlock_irqrestore(&info->lock,flags);
2658	return 0;
2659}
2660
2661static int tx_enable(struct slgt_info *info, int enable)
2662{
2663 	unsigned long flags;
2664	DBGINFO(("%s tx_enable(%d)\n", info->device_name, enable));
2665	spin_lock_irqsave(&info->lock,flags);
2666	if (enable) {
2667		if (!info->tx_enabled)
2668			tx_start(info);
2669	} else {
2670		if (info->tx_enabled)
2671			tx_stop(info);
2672	}
2673	spin_unlock_irqrestore(&info->lock,flags);
2674	return 0;
2675}
2676
2677/*
2678 * abort transmit HDLC frame
2679 */
2680static int tx_abort(struct slgt_info *info)
2681{
2682 	unsigned long flags;
2683	DBGINFO(("%s tx_abort\n", info->device_name));
2684	spin_lock_irqsave(&info->lock,flags);
2685	tdma_reset(info);
2686	spin_unlock_irqrestore(&info->lock,flags);
2687	return 0;
2688}
2689
2690static int rx_enable(struct slgt_info *info, int enable)
2691{
2692 	unsigned long flags;
2693	unsigned int rbuf_fill_level;
2694	DBGINFO(("%s rx_enable(%08x)\n", info->device_name, enable));
2695	spin_lock_irqsave(&info->lock,flags);
2696	/*
2697	 * enable[31..16] = receive DMA buffer fill level
2698	 * 0 = noop (leave fill level unchanged)
2699	 * fill level must be multiple of 4 and <= buffer size
2700	 */
2701	rbuf_fill_level = ((unsigned int)enable) >> 16;
2702	if (rbuf_fill_level) {
2703		if ((rbuf_fill_level > DMABUFSIZE) || (rbuf_fill_level % 4)) {
2704			spin_unlock_irqrestore(&info->lock, flags);
2705			return -EINVAL;
2706		}
2707		info->rbuf_fill_level = rbuf_fill_level;
2708		if (rbuf_fill_level < 128)
2709			info->rx_pio = 1; /* PIO mode */
2710		else
2711			info->rx_pio = 0; /* DMA mode */
2712		rx_stop(info); /* restart receiver to use new fill level */
2713	}
2714
2715	/*
2716	 * enable[1..0] = receiver enable command
2717	 * 0 = disable
2718	 * 1 = enable
2719	 * 2 = enable or force hunt mode if already enabled
2720	 */
2721	enable &= 3;
2722	if (enable) {
2723		if (!info->rx_enabled)
2724			rx_start(info);
2725		else if (enable == 2) {
2726			/* force hunt mode (write 1 to RCR[3]) */
2727			wr_reg16(info, RCR, rd_reg16(info, RCR) | BIT3);
2728		}
2729	} else {
2730		if (info->rx_enabled)
2731			rx_stop(info);
2732	}
2733	spin_unlock_irqrestore(&info->lock,flags);
2734	return 0;
2735}
2736
2737/*
2738 *  wait for specified event to occur
2739 */
2740static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
2741{
2742 	unsigned long flags;
2743	int s;
2744	int rc=0;
2745	struct mgsl_icount cprev, cnow;
2746	int events;
2747	int mask;
2748	struct	_input_signal_events oldsigs, newsigs;
2749	DECLARE_WAITQUEUE(wait, current);
2750
2751	if (get_user(mask, mask_ptr))
2752		return -EFAULT;
2753
2754	DBGINFO(("%s wait_mgsl_event(%d)\n", info->device_name, mask));
2755
2756	spin_lock_irqsave(&info->lock,flags);
2757
2758	/* return immediately if state matches requested events */
2759	get_signals(info);
2760	s = info->signals;
2761
2762	events = mask &
2763		( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2764 		  ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2765		  ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2766		  ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2767	if (events) {
2768		spin_unlock_irqrestore(&info->lock,flags);
2769		goto exit;
2770	}
2771
2772	/* save current irq counts */
2773	cprev = info->icount;
2774	oldsigs = info->input_signal_events;
2775
2776	/* enable hunt and idle irqs if needed */
2777	if (mask & (MgslEvent_ExitHuntMode+MgslEvent_IdleReceived)) {
2778		unsigned short val = rd_reg16(info, SCR);
2779		if (!(val & IRQ_RXIDLE))
2780			wr_reg16(info, SCR, (unsigned short)(val | IRQ_RXIDLE));
2781	}
2782
2783	set_current_state(TASK_INTERRUPTIBLE);
2784	add_wait_queue(&info->event_wait_q, &wait);
2785
2786	spin_unlock_irqrestore(&info->lock,flags);
2787
2788	for(;;) {
2789		schedule();
2790		if (signal_pending(current)) {
2791			rc = -ERESTARTSYS;
2792			break;
2793		}
2794
2795		/* get current irq counts */
2796		spin_lock_irqsave(&info->lock,flags);
2797		cnow = info->icount;
2798		newsigs = info->input_signal_events;
2799		set_current_state(TASK_INTERRUPTIBLE);
2800		spin_unlock_irqrestore(&info->lock,flags);
2801
2802		/* if no change, wait aborted for some reason */
2803		if (newsigs.dsr_up   == oldsigs.dsr_up   &&
2804		    newsigs.dsr_down == oldsigs.dsr_down &&
2805		    newsigs.dcd_up   == oldsigs.dcd_up   &&
2806		    newsigs.dcd_down == oldsigs.dcd_down &&
2807		    newsigs.cts_up   == oldsigs.cts_up   &&
2808		    newsigs.cts_down == oldsigs.cts_down &&
2809		    newsigs.ri_up    == oldsigs.ri_up    &&
2810		    newsigs.ri_down  == oldsigs.ri_down  &&
2811		    cnow.exithunt    == cprev.exithunt   &&
2812		    cnow.rxidle      == cprev.rxidle) {
2813			rc = -EIO;
2814			break;
2815		}
2816
2817		events = mask &
2818			( (newsigs.dsr_up   != oldsigs.dsr_up   ? MgslEvent_DsrActive:0)   +
2819			  (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2820			  (newsigs.dcd_up   != oldsigs.dcd_up   ? MgslEvent_DcdActive:0)   +
2821			  (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2822			  (newsigs.cts_up   != oldsigs.cts_up   ? MgslEvent_CtsActive:0)   +
2823			  (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2824			  (newsigs.ri_up    != oldsigs.ri_up    ? MgslEvent_RiActive:0)    +
2825			  (newsigs.ri_down  != oldsigs.ri_down  ? MgslEvent_RiInactive:0)  +
2826			  (cnow.exithunt    != cprev.exithunt   ? MgslEvent_ExitHuntMode:0) +
2827			  (cnow.rxidle      != cprev.rxidle     ? MgslEvent_IdleReceived:0) );
2828		if (events)
2829			break;
2830
2831		cprev = cnow;
2832		oldsigs = newsigs;
2833	}
2834
2835	remove_wait_queue(&info->event_wait_q, &wait);
2836	set_current_state(TASK_RUNNING);
2837
2838
2839	if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2840		spin_lock_irqsave(&info->lock,flags);
2841		if (!waitqueue_active(&info->event_wait_q)) {
2842			/* disable enable exit hunt mode/idle rcvd IRQs */
2843			wr_reg16(info, SCR,
2844				(unsigned short)(rd_reg16(info, SCR) & ~IRQ_RXIDLE));
2845		}
2846		spin_unlock_irqrestore(&info->lock,flags);
2847	}
2848exit:
2849	if (rc == 0)
2850		rc = put_user(events, mask_ptr);
2851	return rc;
2852}
2853
2854static int get_interface(struct slgt_info *info, int __user *if_mode)
2855{
2856	DBGINFO(("%s get_interface=%x\n", info->device_name, info->if_mode));
2857	if (put_user(info->if_mode, if_mode))
2858		return -EFAULT;
2859	return 0;
2860}
2861
2862static int set_interface(struct slgt_info *info, int if_mode)
2863{
2864 	unsigned long flags;
2865	unsigned short val;
2866
2867	DBGINFO(("%s set_interface=%x)\n", info->device_name, if_mode));
2868	spin_lock_irqsave(&info->lock,flags);
2869	info->if_mode = if_mode;
2870
2871	msc_set_vcr(info);
2872
2873	/* TCR (tx control) 07  1=RTS driver control */
2874	val = rd_reg16(info, TCR);
2875	if (info->if_mode & MGSL_INTERFACE_RTS_EN)
2876		val |= BIT7;
2877	else
2878		val &= ~BIT7;
2879	wr_reg16(info, TCR, val);
2880
2881	spin_unlock_irqrestore(&info->lock,flags);
2882	return 0;
2883}
2884
2885/*
2886 * set general purpose IO pin state and direction
2887 *
2888 * user_gpio fields:
2889 * state   each bit indicates a pin state
2890 * smask   set bit indicates pin state to set
2891 * dir     each bit indicates a pin direction (0=input, 1=output)
2892 * dmask   set bit indicates pin direction to set
2893 */
2894static int set_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2895{
2896 	unsigned long flags;
2897	struct gpio_desc gpio;
2898	__u32 data;
2899
2900	if (!info->gpio_present)
2901		return -EINVAL;
2902	if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
2903		return -EFAULT;
2904	DBGINFO(("%s set_gpio state=%08x smask=%08x dir=%08x dmask=%08x\n",
2905		 info->device_name, gpio.state, gpio.smask,
2906		 gpio.dir, gpio.dmask));
2907
2908	spin_lock_irqsave(&info->lock,flags);
2909	if (gpio.dmask) {
2910		data = rd_reg32(info, IODR);
2911		data |= gpio.dmask & gpio.dir;
2912		data &= ~(gpio.dmask & ~gpio.dir);
2913		wr_reg32(info, IODR, data);
2914	}
2915	if (gpio.smask) {
2916		data = rd_reg32(info, IOVR);
2917		data |= gpio.smask & gpio.state;
2918		data &= ~(gpio.smask & ~gpio.state);
2919		wr_reg32(info, IOVR, data);
2920	}
2921	spin_unlock_irqrestore(&info->lock,flags);
2922
2923	return 0;
2924}
2925
2926/*
2927 * get general purpose IO pin state and direction
2928 */
2929static int get_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
2930{
2931	struct gpio_desc gpio;
2932	if (!info->gpio_present)
2933		return -EINVAL;
2934	gpio.state = rd_reg32(info, IOVR);
2935	gpio.smask = 0xffffffff;
2936	gpio.dir   = rd_reg32(info, IODR);
2937	gpio.dmask = 0xffffffff;
2938	if (copy_to_user(user_gpio, &gpio, sizeof(gpio)))
2939		return -EFAULT;
2940	DBGINFO(("%s get_gpio state=%08x dir=%08x\n",
2941		 info->device_name, gpio.state, gpio.dir));
2942	return 0;
2943}
2944
2945/*
2946 * conditional wait facility
2947 */
2948static void init_cond_wait(struct cond_wait *w, unsigned int data)
2949{
2950	init_waitqueue_head(&w->q);
2951	init_waitqueue_entry(&w->wait, current);
2952	w->data = data;
2953}
2954
2955static void add_cond_wait(struct cond_wait **head, struct cond_wait *w)
2956{
2957	set_current_state(TASK_INTERRUPTIBLE);
2958	add_wait_queue(&w->q, &w->wait);
2959	w->next = *head;
2960	*head = w;
2961}
2962
2963static void remove_cond_wait(struct cond_wait **head, struct cond_wait *cw)
2964{
2965	struct cond_wait *w, *prev;
2966	remove_wait_queue(&cw->q, &cw->wait);
2967	set_current_state(TASK_RUNNING);
2968	for (w = *head, prev = NULL ; w != NULL ; prev = w, w = w->next) {
2969		if (w == cw) {
2970			if (prev != NULL)
2971				prev->next = w->next;
2972			else
2973				*head = w->next;
2974			break;
2975		}
2976	}
2977}
2978
2979static void flush_cond_wait(struct cond_wait **head)
2980{
2981	while (*head != NULL) {
2982		wake_up_interruptible(&(*head)->q);
2983		*head = (*head)->next;
2984	}
2985}
2986
2987/*
2988 * wait for general purpose I/O pin(s) to enter specified state
2989 *
2990 * user_gpio fields:
2991 * state - bit indicates target pin state
2992 * smask - set bit indicates watched pin
2993 *
2994 * The wait ends when at least one watched pin enters the specified
2995 * state. When 0 (no error) is returned, user_gpio->state is set to the
2996 * state of all GPIO pins when the wait ends.
2997 *
2998 * Note: Each pin may be a dedicated input, dedicated output, or
2999 * configurable input/output. The number and configuration of pins
3000 * varies with the specific adapter model. Only input pins (dedicated
3001 * or configured) can be monitored with this function.
3002 */
3003static int wait_gpio(struct slgt_info *info, struct gpio_desc __user *user_gpio)
3004{
3005 	unsigned long flags;
3006	int rc = 0;
3007	struct gpio_desc gpio;
3008	struct cond_wait wait;
3009	u32 state;
3010
3011	if (!info->gpio_present)
3012		return -EINVAL;
3013	if (copy_from_user(&gpio, user_gpio, sizeof(gpio)))
3014		return -EFAULT;
3015	DBGINFO(("%s wait_gpio() state=%08x smask=%08x\n",
3016		 info->device_name, gpio.state, gpio.smask));
3017	/* ignore output pins identified by set IODR bit */
3018	if ((gpio.smask &= ~rd_reg32(info, IODR)) == 0)
3019		return -EINVAL;
3020	init_cond_wait(&wait, gpio.smask);
3021
3022	spin_lock_irqsave(&info->lock, flags);
3023	/* enable interrupts for watched pins */
3024	wr_reg32(info, IOER, rd_reg32(info, IOER) | gpio.smask);
3025	/* get current pin states */
3026	state = rd_reg32(info, IOVR);
3027
3028	if (gpio.smask & ~(state ^ gpio.state)) {
3029		/* already in target state */
3030		gpio.state = state;
3031	} else {
3032		/* wait for target state */
3033		add_cond_wait(&info->gpio_wait_q, &wait);
3034		spin_unlock_irqrestore(&info->lock, flags);
3035		schedule();
3036		if (signal_pending(current))
3037			rc = -ERESTARTSYS;
3038		else
3039			gpio.state = wait.data;
3040		spin_lock_irqsave(&info->lock, flags);
3041		remove_cond_wait(&info->gpio_wait_q, &wait);
3042	}
3043
3044	/* disable all GPIO interrupts if no waiting processes */
3045	if (info->gpio_wait_q == NULL)
3046		wr_reg32(info, IOER, 0);
3047	spin_unlock_irqrestore(&info->lock,flags);
3048
3049	if ((rc == 0) && copy_to_user(user_gpio, &gpio, sizeof(gpio)))
3050		rc = -EFAULT;
3051	return rc;
3052}
3053
3054static int modem_input_wait(struct slgt_info *info,int arg)
3055{
3056 	unsigned long flags;
3057	int rc;
3058	struct mgsl_icount cprev, cnow;
3059	DECLARE_WAITQUEUE(wait, current);
3060
3061	/* save current irq counts */
3062	spin_lock_irqsave(&info->lock,flags);
3063	cprev = info->icount;
3064	add_wait_queue(&info->status_event_wait_q, &wait);
3065	set_current_state(TASK_INTERRUPTIBLE);
3066	spin_unlock_irqrestore(&info->lock,flags);
3067
3068	for(;;) {
3069		schedule();
3070		if (signal_pending(current)) {
3071			rc = -ERESTARTSYS;
3072			break;
3073		}
3074
3075		/* get new irq counts */
3076		spin_lock_irqsave(&info->lock,flags);
3077		cnow = info->icount;
3078		set_current_state(TASK_INTERRUPTIBLE);
3079		spin_unlock_irqrestore(&info->lock,flags);
3080
3081		/* if no change, wait aborted for some reason */
3082		if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
3083		    cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
3084			rc = -EIO;
3085			break;
3086		}
3087
3088		/* check for change in caller specified modem input */
3089		if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
3090		    (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
3091		    (arg & TIOCM_CD  && cnow.dcd != cprev.dcd) ||
3092		    (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
3093			rc = 0;
3094			break;
3095		}
3096
3097		cprev = cnow;
3098	}
3099	remove_wait_queue(&info->status_event_wait_q, &wait);
3100	set_current_state(TASK_RUNNING);
3101	return rc;
3102}
3103
3104/*
3105 *  return state of serial control and status signals
3106 */
3107static int tiocmget(struct tty_struct *tty, struct file *file)
3108{
3109	struct slgt_info *info = tty->driver_data;
3110	unsigned int result;
3111 	unsigned long flags;
3112
3113	spin_lock_irqsave(&info->lock,flags);
3114 	get_signals(info);
3115	spin_unlock_irqrestore(&info->lock,flags);
3116
3117	result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
3118		((info->signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
3119		((info->signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
3120		((info->signals & SerialSignal_RI)  ? TIOCM_RNG:0) +
3121		((info->signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
3122		((info->signals & SerialSignal_CTS) ? TIOCM_CTS:0);
3123
3124	DBGINFO(("%s tiocmget value=%08X\n", info->device_name, result));
3125	return result;
3126}
3127
3128/*
3129 * set modem control signals (DTR/RTS)
3130 *
3131 * 	cmd	signal command: TIOCMBIS = set bit TIOCMBIC = clear bit
3132 *		TIOCMSET = set/clear signal values
3133 * 	value	bit mask for command
3134 */
3135static int tiocmset(struct tty_struct *tty, struct file *file,
3136		    unsigned int set, unsigned int clear)
3137{
3138	struct slgt_info *info = tty->driver_data;
3139 	unsigned long flags;
3140
3141	DBGINFO(("%s tiocmset(%x,%x)\n", info->device_name, set, clear));
3142
3143	if (set & TIOCM_RTS)
3144		info->signals |= SerialSignal_RTS;
3145	if (set & TIOCM_DTR)
3146		info->signals |= SerialSignal_DTR;
3147	if (clear & TIOCM_RTS)
3148		info->signals &= ~SerialSignal_RTS;
3149	if (clear & TIOCM_DTR)
3150		info->signals &= ~SerialSignal_DTR;
3151
3152	spin_lock_irqsave(&info->lock,flags);
3153 	set_signals(info);
3154	spin_unlock_irqrestore(&info->lock,flags);
3155	return 0;
3156}
3157
3158static int carrier_raised(struct tty_port *port)
3159{
3160	unsigned long flags;
3161	struct slgt_info *info = container_of(port, struct slgt_info, port);
3162
3163	spin_lock_irqsave(&info->lock,flags);
3164 	get_signals(info);
3165	spin_unlock_irqrestore(&info->lock,flags);
3166	return (info->signals & SerialSignal_DCD) ? 1 : 0;
3167}
3168
3169static void dtr_rts(struct tty_port *port, int on)
3170{
3171	unsigned long flags;
3172	struct slgt_info *info = container_of(port, struct slgt_info, port);
3173
3174	spin_lock_irqsave(&info->lock,flags);
3175	if (on)
3176		info->signals |= SerialSignal_RTS + SerialSignal_DTR;
3177	else
3178		info->signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3179 	set_signals(info);
3180	spin_unlock_irqrestore(&info->lock,flags);
3181}
3182
3183
3184/*
3185 *  block current process until the device is ready to open
3186 */
3187static int block_til_ready(struct tty_struct *tty, struct file *filp,
3188			   struct slgt_info *info)
3189{
3190	DECLARE_WAITQUEUE(wait, current);
3191	int		retval;
3192	bool		do_clocal = false;
3193	bool		extra_count = false;
3194	unsigned long	flags;
3195	int		cd;
3196	struct tty_port *port = &info->port;
3197
3198	DBGINFO(("%s block_til_ready\n", tty->driver->name));
3199
3200	if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3201		/* nonblock mode is set or port is not enabled */
3202		port->flags |= ASYNC_NORMAL_ACTIVE;
3203		return 0;
3204	}
3205
3206	if (tty->termios->c_cflag & CLOCAL)
3207		do_clocal = true;
3208
3209	/* Wait for carrier detect and the line to become
3210	 * free (i.e., not in use by the callout).  While we are in
3211	 * this loop, port->count is dropped by one, so that
3212	 * close() knows when to free things.  We restore it upon
3213	 * exit, either normal or abnormal.
3214	 */
3215
3216	retval = 0;
3217	add_wait_queue(&port->open_wait, &wait);
3218
3219	spin_lock_irqsave(&info->lock, flags);
3220	if (!tty_hung_up_p(filp)) {
3221		extra_count = true;
3222		port->count--;
3223	}
3224	spin_unlock_irqrestore(&info->lock, flags);
3225	port->blocked_open++;
3226
3227	while (1) {
3228		if ((tty->termios->c_cflag & CBAUD))
3229			tty_port_raise_dtr_rts(port);
3230
3231		set_current_state(TASK_INTERRUPTIBLE);
3232
3233		if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
3234			retval = (port->flags & ASYNC_HUP_NOTIFY) ?
3235					-EAGAIN : -ERESTARTSYS;
3236			break;
3237		}
3238
3239		cd = tty_port_carrier_raised(port);
3240
3241 		if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd ))
3242 			break;
3243
3244		if (signal_pending(current)) {
3245			retval = -ERESTARTSYS;
3246			break;
3247		}
3248
3249		DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
3250		tty_unlock();
3251		schedule();
3252		tty_lock();
3253	}
3254
3255	set_current_state(TASK_RUNNING);
3256	remove_wait_queue(&port->open_wait, &wait);
3257
3258	if (extra_count)
3259		port->count++;
3260	port->blocked_open--;
3261
3262	if (!retval)
3263		port->flags |= ASYNC_NORMAL_ACTIVE;
3264
3265	DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
3266	return retval;
3267}
3268
3269static int alloc_tmp_rbuf(struct slgt_info *info)
3270{
3271	info->tmp_rbuf = kmalloc(info->max_frame_size + 5, GFP_KERNEL);
3272	if (info->tmp_rbuf == NULL)
3273		return -ENOMEM;
3274	return 0;
3275}
3276
3277static void free_tmp_rbuf(struct slgt_info *info)
3278{
3279	kfree(info->tmp_rbuf);
3280	info->tmp_rbuf = NULL;
3281}
3282
3283/*
3284 * allocate DMA descriptor lists.
3285 */
3286static int alloc_desc(struct slgt_info *info)
3287{
3288	unsigned int i;
3289	unsigned int pbufs;
3290
3291	/* allocate memory to hold descriptor lists */
3292	info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr);
3293	if (info->bufs == NULL)
3294		return -ENOMEM;
3295
3296	memset(info->bufs, 0, DESC_LIST_SIZE);
3297
3298	info->rbufs = (struct slgt_desc*)info->bufs;
3299	info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
3300
3301	pbufs = (unsigned int)info->bufs_dma_addr;
3302
3303	/*
3304	 * Build circular lists of descriptors
3305	 */
3306
3307	for (i=0; i < info->rbuf_count; i++) {
3308		/* physical address of this descriptor */
3309		info->rbufs[i].pdesc = pbufs + (i * sizeof(struct slgt_desc));
3310
3311		/* physical address of next descriptor */
3312		if (i == info->rbuf_count - 1)
3313			info->rbufs[i].next = cpu_to_le32(pbufs);
3314		else
3315			info->rbufs[i].next = cpu_to_le32(pbufs + ((i+1) * sizeof(struct slgt_desc)));
3316		set_desc_count(info->rbufs[i], DMABUFSIZE);
3317	}
3318
3319	for (i=0; i < info->tbuf_count; i++) {
3320		/* physical address of this descriptor */
3321		info->tbufs[i].pdesc = pbufs + ((info->rbuf_count + i) * sizeof(struct slgt_desc));
3322
3323		/* physical address of next descriptor */
3324		if (i == info->tbuf_count - 1)
3325			info->tbufs[i].next = cpu_to_le32(pbufs + info->rbuf_count * sizeof(struct slgt_desc));
3326		else
3327			info->tbufs[i].next = cpu_to_le32(pbufs + ((info->rbuf_count + i + 1) * sizeof(struct slgt_desc)));
3328	}
3329
3330	return 0;
3331}
3332
3333static void free_desc(struct slgt_info *info)
3334{
3335	if (info->bufs != NULL) {
3336		pci_free_consistent(info->pdev, DESC_LIST_SIZE, info->bufs, info->bufs_dma_addr);
3337		info->bufs  = NULL;
3338		info->rbufs = NULL;
3339		info->tbufs = NULL;
3340	}
3341}
3342
3343static int alloc_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3344{
3345	int i;
3346	for (i=0; i < count; i++) {
3347		if ((bufs[i].buf = pci_alloc_consistent(info->pdev, DMABUFSIZE, &bufs[i].buf_dma_addr)) == NULL)
3348			return -ENOMEM;
3349		bufs[i].pbuf  = cpu_to_le32((unsigned int)bufs[i].buf_dma_addr);
3350	}
3351	return 0;
3352}
3353
3354static void free_bufs(struct slgt_info *info, struct slgt_desc *bufs, int count)
3355{
3356	int i;
3357	for (i=0; i < count; i++) {
3358		if (bufs[i].buf == NULL)
3359			continue;
3360		pci_free_consistent(info->pdev, DMABUFSIZE, bufs[i].buf, bufs[i].buf_dma_addr);
3361		bufs[i].buf = NULL;
3362	}
3363}
3364
3365static int alloc_dma_bufs(struct slgt_info *info)
3366{
3367	info->rbuf_count = 32;
3368	info->tbuf_count = 32;
3369
3370	if (alloc_desc(info) < 0 ||
3371	    alloc_bufs(info, info->rbufs, info->rbuf_count) < 0 ||
3372	    alloc_bufs(info, info->tbufs, info->tbuf_count) < 0 ||
3373	    alloc_tmp_rbuf(info) < 0) {
3374		DBGERR(("%s DMA buffer alloc fail\n", info->device_name));
3375		return -ENOMEM;
3376	}
3377	reset_rbufs(info);
3378	return 0;
3379}
3380
3381static void free_dma_bufs(struct slgt_info *info)
3382{
3383	if (info->bufs) {
3384		free_bufs(info, info->rbufs, info->rbuf_count);
3385		free_bufs(info, info->tbufs, info->tbuf_count);
3386		free_desc(info);
3387	}
3388	free_tmp_rbuf(info);
3389}
3390
3391static int claim_resources(struct slgt_info *info)
3392{
3393	if (request_mem_region(info->phys_reg_addr, SLGT_REG_SIZE, "synclink_gt") == NULL) {
3394		DBGERR(("%s reg addr conflict, addr=%08X\n",
3395			info->device_name, info->phys_reg_addr));
3396		info->init_error = DiagStatus_AddressConflict;
3397		goto errout;
3398	}
3399	else
3400		info->reg_addr_requested = true;
3401
3402	info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
3403	if (!info->reg_addr) {
3404		DBGERR(("%s cant map device registers, addr=%08X\n",
3405			info->device_name, info->phys_reg_addr));
3406		info->init_error = DiagStatus_CantAssignPciResources;
3407		goto errout;
3408	}
3409	return 0;
3410
3411errout:
3412	release_resources(info);
3413	return -ENODEV;
3414}
3415
3416static void release_resources(struct slgt_info *info)
3417{
3418	if (info->irq_requested) {
3419		free_irq(info->irq_level, info);
3420		info->irq_requested = false;
3421	}
3422
3423	if (info->reg_addr_requested) {
3424		release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
3425		info->reg_addr_requested = false;
3426	}
3427
3428	if (info->reg_addr) {
3429		iounmap(info->reg_addr);
3430		info->reg_addr = NULL;
3431	}
3432}
3433
3434/* Add the specified device instance data structure to the
3435 * global linked list of devices and increment the device count.
3436 */
3437static void add_device(struct slgt_info *info)
3438{
3439	char *devstr;
3440
3441	info->next_device = NULL;
3442	info->line = slgt_device_count;
3443	sprintf(info->device_name, "%s%d", tty_dev_prefix, info->line);
3444
3445	if (info->line < MAX_DEVICES) {
3446		if (maxframe[info->line])
3447			info->max_frame_size = maxframe[info->line];
3448	}
3449
3450	slgt_device_count++;
3451
3452	if (!slgt_device_list)
3453		slgt_device_list = info;
3454	else {
3455		struct slgt_info *current_dev = slgt_device_list;
3456		while(current_dev->next_device)
3457			current_dev = current_dev->next_device;
3458		current_dev->next_device = info;
3459	}
3460
3461	if (info->max_frame_size < 4096)
3462		info->max_frame_size = 4096;
3463	else if (info->max_frame_size > 65535)
3464		info->max_frame_size = 65535;
3465
3466	switch(info->pdev->device) {
3467	case SYNCLINK_GT_DEVICE_ID:
3468		devstr = "GT";
3469		break;
3470	case SYNCLINK_GT2_DEVICE_ID:
3471		devstr = "GT2";
3472		break;
3473	case SYNCLINK_GT4_DEVICE_ID:
3474		devstr = "GT4";
3475		break;
3476	case SYNCLINK_AC_DEVICE_ID:
3477		devstr = "AC";
3478		info->params.mode = MGSL_MODE_ASYNC;
3479		break;
3480	default:
3481		devstr = "(unknown model)";
3482	}
3483	printk("SyncLink %s %s IO=%08x IRQ=%d MaxFrameSize=%u\n",
3484		devstr, info->device_name, info->phys_reg_addr,
3485		info->irq_level, info->max_frame_size);
3486
3487#if SYNCLINK_GENERIC_HDLC
3488	hdlcdev_init(info);
3489#endif
3490}
3491
3492static const struct tty_port_operations slgt_port_ops = {
3493	.carrier_raised = carrier_raised,
3494	.dtr_rts = dtr_rts,
3495};
3496
3497/*
3498 *  allocate device instance structure, return NULL on failure
3499 */
3500static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
3501{
3502	struct slgt_info *info;
3503
3504	info = kzalloc(sizeof(struct slgt_info), GFP_KERNEL);
3505
3506	if (!info) {
3507		DBGERR(("%s device alloc failed adapter=%d port=%d\n",
3508			driver_name, adapter_num, port_num));
3509	} else {
3510		tty_port_init(&info->port);
3511		info->port.ops = &slgt_port_ops;
3512		info->magic = MGSL_MAGIC;
3513		INIT_WORK(&info->task, bh_handler);
3514		info->max_frame_size = 4096;
3515		info->base_clock = 14745600;
3516		info->rbuf_fill_level = DMABUFSIZE;
3517		info->port.close_delay = 5*HZ/10;
3518		info->port.closing_wait = 30*HZ;
3519		init_waitqueue_head(&info->status_event_wait_q);
3520		init_waitqueue_head(&info->event_wait_q);
3521		spin_lock_init(&info->netlock);
3522		memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
3523		info->idle_mode = HDLC_TXIDLE_FLAGS;
3524		info->adapter_num = adapter_num;
3525		info->port_num = port_num;
3526
3527		setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
3528		setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
3529
3530		/* Copy configuration info to device instance data */
3531		info->pdev = pdev;
3532		info->irq_level = pdev->irq;
3533		info->phys_reg_addr = pci_resource_start(pdev,0);
3534
3535		info->bus_type = MGSL_BUS_TYPE_PCI;
3536		info->irq_flags = IRQF_SHARED;
3537
3538		info->init_error = -1; /* assume error, set to 0 on successful init */
3539	}
3540
3541	return info;
3542}
3543
3544static void device_init(int adapter_num, struct pci_dev *pdev)
3545{
3546	struct slgt_info *port_array[SLGT_MAX_PORTS];
3547	int i;
3548	int port_count = 1;
3549
3550	if (pdev->device == SYNCLINK_GT2_DEVICE_ID)
3551		port_count = 2;
3552	else if (pdev->device == SYNCLINK_GT4_DEVICE_ID)
3553		port_count = 4;
3554
3555	/* allocate device instances for all ports */
3556	for (i=0; i < port_count; ++i) {
3557		port_array[i] = alloc_dev(adapter_num, i, pdev);
3558		if (port_array[i] == NULL) {
3559			for (--i; i >= 0; --i)
3560				kfree(port_array[i]);
3561			return;
3562		}
3563	}
3564
3565	/* give copy of port_array to all ports and add to device list  */
3566	for (i=0; i < port_count; ++i) {
3567		memcpy(port_array[i]->port_array, port_array, sizeof(port_array));
3568		add_device(port_array[i]);
3569		port_array[i]->port_count = port_count;
3570		spin_lock_init(&port_array[i]->lock);
3571	}
3572
3573	/* Allocate and claim adapter resources */
3574	if (!claim_resources(port_array[0])) {
3575
3576		alloc_dma_bufs(port_array[0]);
3577
3578		/* copy resource information from first port to others */
3579		for (i = 1; i < port_count; ++i) {
3580			port_array[i]->lock      = port_array[0]->lock;
3581			port_array[i]->irq_level = port_array[0]->irq_level;
3582			port_array[i]->reg_addr  = port_array[0]->reg_addr;
3583			alloc_dma_bufs(port_array[i]);
3584		}
3585
3586		if (request_irq(port_array[0]->irq_level,
3587					slgt_interrupt,
3588					port_array[0]->irq_flags,
3589					port_array[0]->device_name,
3590					port_array[0]) < 0) {
3591			DBGERR(("%s request_irq failed IRQ=%d\n",
3592				port_array[0]->device_name,
3593				port_array[0]->irq_level));
3594		} else {
3595			port_array[0]->irq_requested = true;
3596			adapter_test(port_array[0]);
3597			for (i=1 ; i < port_count ; i++) {
3598				port_array[i]->init_error = port_array[0]->init_error;
3599				port_array[i]->gpio_present = port_array[0]->gpio_present;
3600			}
3601		}
3602	}
3603
3604	for (i=0; i < port_count; ++i)
3605		tty_register_device(serial_driver, port_array[i]->line, &(port_array[i]->pdev->dev));
3606}
3607
3608static int __devinit init_one(struct pci_dev *dev,
3609			      const struct pci_device_id *ent)
3610{
3611	if (pci_enable_device(dev)) {
3612		printk("error enabling pci device %p\n", dev);
3613		return -EIO;
3614	}
3615	pci_set_master(dev);
3616	device_init(slgt_device_count, dev);
3617	return 0;
3618}
3619
3620static void __devexit remove_one(struct pci_dev *dev)
3621{
3622}
3623
3624static const struct tty_operations ops = {
3625	.open = open,
3626	.close = close,
3627	.write = write,
3628	.put_char = put_char,
3629	.flush_chars = flush_chars,
3630	.write_room = write_room,
3631	.chars_in_buffer = chars_in_buffer,
3632	.flush_buffer = flush_buffer,
3633	.ioctl = ioctl,
3634	.compat_ioctl = slgt_compat_ioctl,
3635	.throttle = throttle,
3636	.unthrottle = unthrottle,
3637	.send_xchar = send_xchar,
3638	.break_ctl = set_break,
3639	.wait_until_sent = wait_until_sent,
3640	.set_termios = set_termios,
3641	.stop = tx_hold,
3642	.start = tx_release,
3643	.hangup = hangup,
3644	.tiocmget = tiocmget,
3645	.tiocmset = tiocmset,
3646	.proc_fops = &synclink_gt_proc_fops,
3647};
3648
3649static void slgt_cleanup(void)
3650{
3651	int rc;
3652	struct slgt_info *info;
3653	struct slgt_info *tmp;
3654
3655	printk(KERN_INFO "unload %s\n", driver_name);
3656
3657	if (serial_driver) {
3658		for (info=slgt_device_list ; info != NULL ; info=info->next_device)
3659			tty_unregister_device(serial_driver, info->line);
3660		if ((rc = tty_unregister_driver(serial_driver)))
3661			DBGERR(("tty_unregister_driver error=%d\n", rc));
3662		put_tty_driver(serial_driver);
3663	}
3664
3665	/* reset devices */
3666	info = slgt_device_list;
3667	while(info) {
3668		reset_port(info);
3669		info = info->next_device;
3670	}
3671
3672	/* release devices */
3673	info = slgt_device_list;
3674	while(info) {
3675#if SYNCLINK_GENERIC_HDLC
3676		hdlcdev_exit(info);
3677#endif
3678		free_dma_bufs(info);
3679		free_tmp_rbuf(info);
3680		if (info->port_num == 0)
3681			release_resources(info);
3682		tmp = info;
3683		info = info->next_device;
3684		kfree(tmp);
3685	}
3686
3687	if (pci_registered)
3688		pci_unregister_driver(&pci_driver);
3689}
3690
3691/*
3692 *  Driver initialization entry point.
3693 */
3694static int __init slgt_init(void)
3695{
3696	int rc;
3697
3698	printk(KERN_INFO "%s\n", driver_name);
3699
3700	serial_driver = alloc_tty_driver(MAX_DEVICES);
3701	if (!serial_driver) {
3702		printk("%s can't allocate tty driver\n", driver_name);
3703		return -ENOMEM;
3704	}
3705
3706	/* Initialize the tty_driver structure */
3707
3708	serial_driver->owner = THIS_MODULE;
3709	serial_driver->driver_name = tty_driver_name;
3710	serial_driver->name = tty_dev_prefix;
3711	serial_driver->major = ttymajor;
3712	serial_driver->minor_start = 64;
3713	serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
3714	serial_driver->subtype = SERIAL_TYPE_NORMAL;
3715	serial_driver->init_termios = tty_std_termios;
3716	serial_driver->init_termios.c_cflag =
3717		B9600 | CS8 | CREAD | HUPCL | CLOCAL;
3718	serial_driver->init_termios.c_ispeed = 9600;
3719	serial_driver->init_termios.c_ospeed = 9600;
3720	serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
3721	tty_set_operations(serial_driver, &ops);
3722	if ((rc = tty_register_driver(serial_driver)) < 0) {
3723		DBGERR(("%s can't register serial driver\n", driver_name));
3724		put_tty_driver(serial_driver);
3725		serial_driver = NULL;
3726		goto error;
3727	}
3728
3729	printk(KERN_INFO "%s, tty major#%d\n",
3730	       driver_name, serial_driver->major);
3731
3732	slgt_device_count = 0;
3733	if ((rc = pci_register_driver(&pci_driver)) < 0) {
3734		printk("%s pci_register_driver error=%d\n", driver_name, rc);
3735		goto error;
3736	}
3737	pci_registered = true;
3738
3739	if (!slgt_device_list)
3740		printk("%s no devices found\n",driver_name);
3741
3742	return 0;
3743
3744error:
3745	slgt_cleanup();
3746	return rc;
3747}
3748
3749static void __exit slgt_exit(void)
3750{
3751	slgt_cleanup();
3752}
3753
3754module_init(slgt_init);
3755module_exit(slgt_exit);
3756
3757/*
3758 * register access routines
3759 */
3760
3761#define CALC_REGADDR() \
3762	unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
3763	if (addr >= 0x80) \
3764		reg_addr += (info->port_num) * 32;
3765
3766static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
3767{
3768	CALC_REGADDR();
3769	return readb((void __iomem *)reg_addr);
3770}
3771
3772static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
3773{
3774	CALC_REGADDR();
3775	writeb(value, (void __iomem *)reg_addr);
3776}
3777
3778static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
3779{
3780	CALC_REGADDR();
3781	return readw((void __iomem *)reg_addr);
3782}
3783
3784static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
3785{
3786	CALC_REGADDR();
3787	writew(value, (void __iomem *)reg_addr);
3788}
3789
3790static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
3791{
3792	CALC_REGADDR();
3793	return readl((void __iomem *)reg_addr);
3794}
3795
3796static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
3797{
3798	CALC_REGADDR();
3799	writel(value, (void __iomem *)reg_addr);
3800}
3801
3802static void rdma_reset(struct slgt_info *info)
3803{
3804	unsigned int i;
3805
3806	/* set reset bit */
3807	wr_reg32(info, RDCSR, BIT1);
3808
3809	/* wait for enable bit cleared */
3810	for(i=0 ; i < 1000 ; i++)
3811		if (!(rd_reg32(info, RDCSR) & BIT0))
3812			break;
3813}
3814
3815static void tdma_reset(struct slgt_info *info)
3816{
3817	unsigned int i;
3818
3819	/* set reset bit */
3820	wr_reg32(info, TDCSR, BIT1);
3821
3822	/* wait for enable bit cleared */
3823	for(i=0 ; i < 1000 ; i++)
3824		if (!(rd_reg32(info, TDCSR) & BIT0))
3825			break;
3826}
3827
3828/*
3829 * enable internal loopback
3830 * TxCLK and RxCLK are generated from BRG
3831 * and TxD is looped back to RxD internally.
3832 */
3833static void enable_loopback(struct slgt_info *info)
3834{
3835	/* SCR (serial control) BIT2=looopback enable */
3836	wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT2));
3837
3838	if (info->params.mode != MGSL_MODE_ASYNC) {
3839		/* CCR (clock control)
3840		 * 07..05  tx clock source (010 = BRG)
3841		 * 04..02  rx clock source (010 = BRG)
3842		 * 01      auxclk enable   (0 = disable)
3843		 * 00      BRG enable      (1 = enable)
3844		 *
3845		 * 0100 1001
3846		 */
3847		wr_reg8(info, CCR, 0x49);
3848
3849		/* set speed if available, otherwise use default */
3850		if (info->params.clock_speed)
3851			set_rate(info, info->params.clock_speed);
3852		else
3853			set_rate(info, 3686400);
3854	}
3855}
3856
3857/*
3858 *  set baud rate generator to specified rate
3859 */
3860static void set_rate(struct slgt_info *info, u32 rate)
3861{
3862	unsigned int div;
3863	unsigned int osc = info->base_clock;
3864
3865	/* div = osc/rate - 1
3866	 *
3867	 * Round div up if osc/rate is not integer to
3868	 * force to next slowest rate.
3869	 */
3870
3871	if (rate) {
3872		div = osc/rate;
3873		if (!(osc % rate) && div)
3874			div--;
3875		wr_reg16(info, BDR, (unsigned short)div);
3876	}
3877}
3878
3879static void rx_stop(struct slgt_info *info)
3880{
3881	unsigned short val;
3882
3883	/* disable and reset receiver */
3884	val = rd_reg16(info, RCR) & ~BIT1;          /* clear enable bit */
3885	wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3886	wr_reg16(info, RCR, val);                  /* clear reset bit */
3887
3888	slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA + IRQ_RXIDLE);
3889
3890	/* clear pending rx interrupts */
3891	wr_reg16(info, SSR, IRQ_RXIDLE + IRQ_RXOVER);
3892
3893	rdma_reset(info);
3894
3895	info->rx_enabled = false;
3896	info->rx_restart = false;
3897}
3898
3899static void rx_start(struct slgt_info *info)
3900{
3901	unsigned short val;
3902
3903	slgt_irq_off(info, IRQ_RXOVER + IRQ_RXDATA);
3904
3905	/* clear pending rx overrun IRQ */
3906	wr_reg16(info, SSR, IRQ_RXOVER);
3907
3908	/* reset and disable receiver */
3909	val = rd_reg16(info, RCR) & ~BIT1; /* clear enable bit */
3910	wr_reg16(info, RCR, (unsigned short)(val | BIT2)); /* set reset bit */
3911	wr_reg16(info, RCR, val);                  /* clear reset bit */
3912
3913	rdma_reset(info);
3914	reset_rbufs(info);
3915
3916	if (info->rx_pio) {
3917		/* rx request when rx FIFO not empty */
3918		wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) & ~BIT14));
3919		slgt_irq_on(info, IRQ_RXDATA);
3920		if (info->params.mode == MGSL_MODE_ASYNC) {
3921			/* enable saving of rx status */
3922			wr_reg32(info, RDCSR, BIT6);
3923		}
3924	} else {
3925		/* rx request when rx FIFO half full */
3926		wr_reg16(info, SCR, (unsigned short)(rd_reg16(info, SCR) | BIT14));
3927		/* set 1st descriptor address */
3928		wr_reg32(info, RDDAR, info->rbufs[0].pdesc);
3929
3930		if (info->params.mode != MGSL_MODE_ASYNC) {
3931			/* enable rx DMA and DMA interrupt */
3932			wr_reg32(info, RDCSR, (BIT2 + BIT0));
3933		} else {
3934			/* enable saving of rx status, rx DMA and DMA interrupt */
3935			wr_reg32(info, RDCSR, (BIT6 + BIT2 + BIT0));
3936		}
3937	}
3938
3939	slgt_irq_on(info, IRQ_RXOVER);
3940
3941	/* enable receiver */
3942	wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
3943
3944	info->rx_restart = false;
3945	info->rx_enabled = true;
3946}
3947
3948static void tx_start(struct slgt_info *info)
3949{
3950	if (!info->tx_enabled) {
3951		wr_reg16(info, TCR,
3952			 (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
3953		info->tx_enabled = true;
3954	}
3955
3956	if (desc_count(info->tbufs[info->tbuf_start])) {
3957		info->drop_rts_on_tx_done = false;
3958
3959		if (info->params.mode != MGSL_MODE_ASYNC) {
3960			if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
3961				get_signals(info);
3962				if (!(info->signals & SerialSignal_RTS)) {
3963					info->signals |= SerialSignal_RTS;
3964					set_signals(info);
3965					info->drop_rts_on_tx_done = true;
3966				}
3967			}
3968
3969			slgt_irq_off(info, IRQ_TXDATA);
3970			slgt_irq_on(info, IRQ_TXUNDER + IRQ_TXIDLE);
3971			/* clear tx idle and underrun status bits */
3972			wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
3973		} else {
3974			slgt_irq_off(info, IRQ_TXDATA);
3975			slgt_irq_on(info, IRQ_TXIDLE);
3976			/* clear tx idle status bit */
3977			wr_reg16(info, SSR, IRQ_TXIDLE);
3978		}
3979		/* set 1st descriptor address and start DMA */
3980		wr_reg32(info, TDDAR, info->tbufs[info->tbuf_start].pdesc);
3981		wr_reg32(info, TDCSR, BIT2 + BIT0);
3982		info->tx_active = true;
3983	}
3984}
3985
3986static void tx_stop(struct slgt_info *info)
3987{
3988	unsigned short val;
3989
3990	del_timer(&info->tx_timer);
3991
3992	tdma_reset(info);
3993
3994	/* reset and disable transmitter */
3995	val = rd_reg16(info, TCR) & ~BIT1;          /* clear enable bit */
3996	wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
3997
3998	slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
3999
4000	/* clear tx idle and underrun status bit */
4001	wr_reg16(info, SSR, (unsigned short)(IRQ_TXIDLE + IRQ_TXUNDER));
4002
4003	reset_tbufs(info);
4004
4005	info->tx_enabled = false;
4006	info->tx_active = false;
4007}
4008
4009static void reset_port(struct slgt_info *info)
4010{
4011	if (!info->reg_addr)
4012		return;
4013
4014	tx_stop(info);
4015	rx_stop(info);
4016
4017	info->signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
4018	set_signals(info);
4019
4020	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4021}
4022
4023static void reset_adapter(struct slgt_info *info)
4024{
4025	int i;
4026	for (i=0; i < info->port_count; ++i) {
4027		if (info->port_array[i])
4028			reset_port(info->port_array[i]);
4029	}
4030}
4031
4032static void async_mode(struct slgt_info *info)
4033{
4034  	unsigned short val;
4035
4036	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4037	tx_stop(info);
4038	rx_stop(info);
4039
4040	/* TCR (tx control)
4041	 *
4042	 * 15..13  mode, 010=async
4043	 * 12..10  encoding, 000=NRZ
4044	 * 09      parity enable
4045	 * 08      1=odd parity, 0=even parity
4046	 * 07      1=RTS driver control
4047	 * 06      1=break enable
4048	 * 05..04  character length
4049	 *         00=5 bits
4050	 *         01=6 bits
4051	 *         10=7 bits
4052	 *         11=8 bits
4053	 * 03      0=1 stop bit, 1=2 stop bits
4054	 * 02      reset
4055	 * 01      enable
4056	 * 00      auto-CTS enable
4057	 */
4058	val = 0x4000;
4059
4060	if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4061		val |= BIT7;
4062
4063	if (info->params.parity != ASYNC_PARITY_NONE) {
4064		val |= BIT9;
4065		if (info->params.parity == ASYNC_PARITY_ODD)
4066			val |= BIT8;
4067	}
4068
4069	switch (info->params.data_bits)
4070	{
4071	case 6: val |= BIT4; break;
4072	case 7: val |= BIT5; break;
4073	case 8: val |= BIT5 + BIT4; break;
4074	}
4075
4076	if (info->params.stop_bits != 1)
4077		val |= BIT3;
4078
4079	if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4080		val |= BIT0;
4081
4082	wr_reg16(info, TCR, val);
4083
4084	/* RCR (rx control)
4085	 *
4086	 * 15..13  mode, 010=async
4087	 * 12..10  encoding, 000=NRZ
4088	 * 09      parity enable
4089	 * 08      1=odd parity, 0=even parity
4090	 * 07..06  reserved, must be 0
4091	 * 05..04  character length
4092	 *         00=5 bits
4093	 *         01=6 bits
4094	 *         10=7 bits
4095	 *         11=8 bits
4096	 * 03      reserved, must be zero
4097	 * 02      reset
4098	 * 01      enable
4099	 * 00      auto-DCD enable
4100	 */
4101	val = 0x4000;
4102
4103	if (info->params.parity != ASYNC_PARITY_NONE) {
4104		val |= BIT9;
4105		if (info->params.parity == ASYNC_PARITY_ODD)
4106			val |= BIT8;
4107	}
4108
4109	switch (info->params.data_bits)
4110	{
4111	case 6: val |= BIT4; break;
4112	case 7: val |= BIT5; break;
4113	case 8: val |= BIT5 + BIT4; break;
4114	}
4115
4116	if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4117		val |= BIT0;
4118
4119	wr_reg16(info, RCR, val);
4120
4121	/* CCR (clock control)
4122	 *
4123	 * 07..05  011 = tx clock source is BRG/16
4124	 * 04..02  010 = rx clock source is BRG
4125	 * 01      0 = auxclk disabled
4126	 * 00      1 = BRG enabled
4127	 *
4128	 * 0110 1001
4129	 */
4130	wr_reg8(info, CCR, 0x69);
4131
4132	msc_set_vcr(info);
4133
4134	/* SCR (serial control)
4135	 *
4136	 * 15  1=tx req on FIFO half empty
4137	 * 14  1=rx req on FIFO half full
4138	 * 13  tx data  IRQ enable
4139	 * 12  tx idle  IRQ enable
4140	 * 11  rx break on IRQ enable
4141	 * 10  rx data  IRQ enable
4142	 * 09  rx break off IRQ enable
4143	 * 08  overrun  IRQ enable
4144	 * 07  DSR      IRQ enable
4145	 * 06  CTS      IRQ enable
4146	 * 05  DCD      IRQ enable
4147	 * 04  RI       IRQ enable
4148	 * 03  0=16x sampling, 1=8x sampling
4149	 * 02  1=txd->rxd internal loopback enable
4150	 * 01  reserved, must be zero
4151	 * 00  1=master IRQ enable
4152	 */
4153	val = BIT15 + BIT14 + BIT0;
4154	/* JCR[8] : 1 = x8 async mode feature available */
4155	if ((rd_reg32(info, JCR) & BIT8) && info->params.data_rate &&
4156	    ((info->base_clock < (info->params.data_rate * 16)) ||
4157	     (info->base_clock % (info->params.data_rate * 16)))) {
4158		/* use 8x sampling */
4159		val |= BIT3;
4160		set_rate(info, info->params.data_rate * 8);
4161	} else {
4162		/* use 16x sampling */
4163		set_rate(info, info->params.data_rate * 16);
4164	}
4165	wr_reg16(info, SCR, val);
4166
4167	slgt_irq_on(info, IRQ_RXBREAK | IRQ_RXOVER);
4168
4169	if (info->params.loopback)
4170		enable_loopback(info);
4171}
4172
4173static void sync_mode(struct slgt_info *info)
4174{
4175	unsigned short val;
4176
4177	slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
4178	tx_stop(info);
4179	rx_stop(info);
4180
4181	/* TCR (tx control)
4182	 *
4183	 * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
4184	 * 12..10  encoding
4185	 * 09      CRC enable
4186	 * 08      CRC32
4187	 * 07      1=RTS driver control
4188	 * 06      preamble enable
4189	 * 05..04  preamble length
4190	 * 03      share open/close flag
4191	 * 02      reset
4192	 * 01      enable
4193	 * 00      auto-CTS enable
4194	 */
4195	val = BIT2;
4196
4197	switch(info->params.mode) {
4198	case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4199	case MGSL_MODE_BISYNC:   val |= BIT15; break;
4200	case MGSL_MODE_RAW:      val |= BIT13; break;
4201	}
4202	if (info->if_mode & MGSL_INTERFACE_RTS_EN)
4203		val |= BIT7;
4204
4205	switch(info->params.encoding)
4206	{
4207	case HDLC_ENCODING_NRZB:          val |= BIT10; break;
4208	case HDLC_ENCODING_NRZI_MARK:     val |= BIT11; break;
4209	case HDLC_ENCODING_NRZI:          val |= BIT11 + BIT10; break;
4210	case HDLC_ENCODING_BIPHASE_MARK:  val |= BIT12; break;
4211	case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4212	case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4213	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4214	}
4215
4216	switch (info->params.crc_type & HDLC_CRC_MASK)
4217	{
4218	case HDLC_CRC_16_CCITT: val |= BIT9; break;
4219	case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4220	}
4221
4222	if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
4223		val |= BIT6;
4224
4225	switch (info->params.preamble_length)
4226	{
4227	case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT5; break;
4228	case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT4; break;
4229	case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT5 + BIT4; break;
4230	}
4231
4232	if (info->params.flags & HDLC_FLAG_AUTO_CTS)
4233		val |= BIT0;
4234
4235	wr_reg16(info, TCR, val);
4236
4237	/* TPR (transmit preamble) */
4238
4239	switch (info->params.preamble)
4240	{
4241	case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break;
4242	case HDLC_PREAMBLE_PATTERN_ONES:  val = 0xff; break;
4243	case HDLC_PREAMBLE_PATTERN_ZEROS: val = 0x00; break;
4244	case HDLC_PREAMBLE_PATTERN_10:    val = 0x55; break;
4245	case HDLC_PREAMBLE_PATTERN_01:    val = 0xaa; break;
4246	default:                          val = 0x7e; break;
4247	}
4248	wr_reg8(info, TPR, (unsigned char)val);
4249
4250	/* RCR (rx control)
4251	 *
4252	 * 15..13  mode, 000=HDLC 001=raw 010=async 011=monosync 100=bisync
4253	 * 12..10  encoding
4254	 * 09      CRC enable
4255	 * 08      CRC32
4256	 * 07..03  reserved, must be 0
4257	 * 02      reset
4258	 * 01      enable
4259	 * 00      auto-DCD enable
4260	 */
4261	val = 0;
4262
4263	switch(info->params.mode) {
4264	case MGSL_MODE_MONOSYNC: val |= BIT14 + BIT13; break;
4265	case MGSL_MODE_BISYNC:   val |= BIT15; break;
4266	case MGSL_MODE_RAW:      val |= BIT13; break;
4267	}
4268
4269	switch(info->params.encoding)
4270	{
4271	case HDLC_ENCODING_NRZB:          val |= BIT10; break;
4272	case HDLC_ENCODING_NRZI_MARK:     val |= BIT11; break;
4273	case HDLC_ENCODING_NRZI:          val |= BIT11 + BIT10; break;
4274	case HDLC_ENCODING_BIPHASE_MARK:  val |= BIT12; break;
4275	case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT12 + BIT10; break;
4276	case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT12 + BIT11; break;
4277	case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: val |= BIT12 + BIT11 + BIT10; break;
4278	}
4279
4280	switch (info->params.crc_type & HDLC_CRC_MASK)
4281	{
4282	case HDLC_CRC_16_CCITT: val |= BIT9; break;
4283	case HDLC_CRC_32_CCITT: val |= BIT9 + BIT8; break;
4284	}
4285
4286	if (info->params.flags & HDLC_FLAG_AUTO_DCD)
4287		val |= BIT0;
4288
4289	wr_reg16(info, RCR, val);
4290
4291	/* CCR (clock control)
4292	 *
4293	 * 07..05  tx clock source
4294	 * 04..02  rx clock source
4295	 * 01      auxclk enable
4296	 * 00      BRG enable
4297	 */
4298	val = 0;
4299
4300	if (info->params.flags & HDLC_FLAG_TXC_BRG)
4301	{
4302		// when RxC source is DPLL, BRG generates 16X DPLL
4303		// reference clock, so take TxC from BRG/16 to get
4304		// transmit clock at actual data rate
4305		if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4306			val |= BIT6 + BIT5;	/* 011, txclk = BRG/16 */
4307		else
4308			val |= BIT6;	/* 010, txclk = BRG */
4309	}
4310	else if (info->params.flags & HDLC_FLAG_TXC_DPLL)
4311		val |= BIT7;	/* 100, txclk = DPLL Input */
4312	else if (info->params.flags & HDLC_FLAG_TXC_RXCPIN)
4313		val |= BIT5;	/* 001, txclk = RXC Input */
4314
4315	if (info->params.flags & HDLC_FLAG_RXC_BRG)
4316		val |= BIT3;	/* 010, rxclk = BRG */
4317	else if (info->params.flags & HDLC_FLAG_RXC_DPLL)
4318		val |= BIT4;	/* 100, rxclk = DPLL */
4319	else if (info->params.flags & HDLC_FLAG_RXC_TXCPIN)
4320		val |= BIT2;	/* 001, rxclk = TXC Input */
4321
4322	if (info->params.clock_speed)
4323		val |= BIT1 + BIT0;
4324
4325	wr_reg8(info, CCR, (unsigned char)val);
4326
4327	if (info->params.flags & (HDLC_FLAG_TXC_DPLL + HDLC_FLAG_RXC_DPLL))
4328	{
4329		// program DPLL mode
4330		switch(info->params.encoding)
4331		{
4332		case HDLC_ENCODING_BIPHASE_MARK:
4333		case HDLC_ENCODING_BIPHASE_SPACE:
4334			val = BIT7; break;
4335		case HDLC_ENCODING_BIPHASE_LEVEL:
4336		case HDLC_ENCODING_DIFF_BIPHASE_LEVEL:
4337			val = BIT7 + BIT6; break;
4338		default: val = BIT6;	// NRZ encodings
4339		}
4340		wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | val));
4341
4342		// DPLL requires a 16X reference clock from BRG
4343		set_rate(info, info->params.clock_speed * 16);
4344	}
4345	else
4346		set_rate(info, info->params.clock_speed);
4347
4348	tx_set_idle(info);
4349
4350	msc_set_vcr(info);
4351
4352	/* SCR (serial control)
4353	 *
4354	 * 15  1=tx req on FIFO half empty
4355	 * 14  1=rx req on FIFO half full
4356	 * 13  tx data  IRQ enable
4357	 * 12  tx idle  IRQ enable
4358	 * 11  underrun IRQ enable
4359	 * 10  rx data  IRQ enable
4360	 * 09  rx idle  IRQ enable
4361	 * 08  overrun  IRQ enable
4362	 * 07  DSR      IRQ enable
4363	 * 06  CTS      IRQ enable
4364	 * 05  DCD      IRQ enable
4365	 * 04  RI       IRQ enable
4366	 * 03  reserved, must be zero
4367	 * 02  1=txd->rxd internal loopback enable
4368	 * 01  reserved, must be zero
4369	 * 00  1=master IRQ enable
4370	 */
4371	wr_reg16(info, SCR, BIT15 + BIT14 + BIT0);
4372
4373	if (info->params.loopback)
4374		enable_loopback(info);
4375}
4376
4377/*
4378 *  set transmit idle mode
4379 */
4380static void tx_set_idle(struct slgt_info *info)
4381{
4382	unsigned char val;
4383	unsigned short tcr;
4384
4385	/* if preamble enabled (tcr[6] == 1) then tx idle size = 8 bits
4386	 * else tcr[5:4] = tx idle size: 00 = 8 bits, 01 = 16 bits
4387	 */
4388	tcr = rd_reg16(info, TCR);
4389	if (info->idle_mode & HDLC_TXIDLE_CUSTOM_16) {
4390		/* disable preamble, set idle size to 16 bits */
4391		tcr = (tcr & ~(BIT6 + BIT5)) | BIT4;
4392		/* MSB of 16 bit idle specified in tx preamble register (TPR) */
4393		wr_reg8(info, TPR, (unsigned char)((info->idle_mode >> 8) & 0xff));
4394	} else if (!(tcr & BIT6)) {
4395		/* preamble is disabled, set idle size to 8 bits */
4396		tcr &= ~(BIT5 + BIT4);
4397	}
4398	wr_reg16(info, TCR, tcr);
4399
4400	if (info->idle_mode & (HDLC_TXIDLE_CUSTOM_8 | HDLC_TXIDLE_CUSTOM_16)) {
4401		/* LSB of custom tx idle specified in tx idle register */
4402		val = (unsigned char)(info->idle_mode & 0xff);
4403	} else {
4404		/* standard 8 bit idle patterns */
4405		switch(info->idle_mode)
4406		{
4407		case HDLC_TXIDLE_FLAGS:          val = 0x7e; break;
4408		case HDLC_TXIDLE_ALT_ZEROS_ONES:
4409		case HDLC_TXIDLE_ALT_MARK_SPACE: val = 0xaa; break;
4410		case HDLC_TXIDLE_ZEROS:
4411		case HDLC_TXIDLE_SPACE:          val = 0x00; break;
4412		default:                         val = 0xff;
4413		}
4414	}
4415
4416	wr_reg8(info, TIR, val);
4417}
4418
4419/*
4420 * get state of V24 status (input) signals
4421 */
4422static void get_signals(struct slgt_info *info)
4423{
4424	unsigned short status = rd_reg16(info, SSR);
4425
4426	/* clear all serial signals except DTR and RTS */
4427	info->signals &= SerialSignal_DTR + SerialSignal_RTS;
4428
4429	if (status & BIT3)
4430		info->signals |= SerialSignal_DSR;
4431	if (status & BIT2)
4432		info->signals |= SerialSignal_CTS;
4433	if (status & BIT1)
4434		info->signals |= SerialSignal_DCD;
4435	if (status & BIT0)
4436		info->signals |= SerialSignal_RI;
4437}
4438
4439/*
4440 * set V.24 Control Register based on current configuration
4441 */
4442static void msc_set_vcr(struct slgt_info *info)
4443{
4444	unsigned char val = 0;
4445
4446	/* VCR (V.24 control)
4447	 *
4448	 * 07..04  serial IF select
4449	 * 03      DTR
4450	 * 02      RTS
4451	 * 01      LL
4452	 * 00      RL
4453	 */
4454
4455	switch(info->if_mode & MGSL_INTERFACE_MASK)
4456	{
4457	case MGSL_INTERFACE_RS232:
4458		val |= BIT5; /* 0010 */
4459		break;
4460	case MGSL_INTERFACE_V35:
4461		val |= BIT7 + BIT6 + BIT5; /* 1110 */
4462		break;
4463	case MGSL_INTERFACE_RS422:
4464		val |= BIT6; /* 0100 */
4465		break;
4466	}
4467
4468	if (info->if_mode & MGSL_INTERFACE_MSB_FIRST)
4469		val |= BIT4;
4470	if (info->signals & SerialSignal_DTR)
4471		val |= BIT3;
4472	if (info->signals & SerialSignal_RTS)
4473		val |= BIT2;
4474	if (info->if_mode & MGSL_INTERFACE_LL)
4475		val |= BIT1;
4476	if (info->if_mode & MGSL_INTERFACE_RL)
4477		val |= BIT0;
4478	wr_reg8(info, VCR, val);
4479}
4480
4481/*
4482 * set state of V24 control (output) signals
4483 */
4484static void set_signals(struct slgt_info *info)
4485{
4486	unsigned char val = rd_reg8(info, VCR);
4487	if (info->signals & SerialSignal_DTR)
4488		val |= BIT3;
4489	else
4490		val &= ~BIT3;
4491	if (info->signals & SerialSignal_RTS)
4492		val |= BIT2;
4493	else
4494		val &= ~BIT2;
4495	wr_reg8(info, VCR, val);
4496}
4497
4498/*
4499 * free range of receive DMA buffers (i to last)
4500 */
4501static void free_rbufs(struct slgt_info *info, unsigned int i, unsigned int last)
4502{
4503	int done = 0;
4504
4505	while(!done) {
4506		/* reset current buffer for reuse */
4507		info->rbufs[i].status = 0;
4508		set_desc_count(info->rbufs[i], info->rbuf_fill_level);
4509		if (i == last)
4510			done = 1;
4511		if (++i == info->rbuf_count)
4512			i = 0;
4513	}
4514	info->rbuf_current = i;
4515}
4516
4517/*
4518 * mark all receive DMA buffers as free
4519 */
4520static void reset_rbufs(struct slgt_info *info)
4521{
4522	free_rbufs(info, 0, info->rbuf_count - 1);
4523	info->rbuf_fill_index = 0;
4524	info->rbuf_fill_count = 0;
4525}
4526
4527/*
4528 * pass receive HDLC frame to upper layer
4529 *
4530 * return true if frame available, otherwise false
4531 */
4532static bool rx_get_frame(struct slgt_info *info)
4533{
4534	unsigned int start, end;
4535	unsigned short status;
4536	unsigned int framesize = 0;
4537	unsigned long flags;
4538	struct tty_struct *tty = info->port.tty;
4539	unsigned char addr_field = 0xff;
4540	unsigned int crc_size = 0;
4541
4542	switch (info->params.crc_type & HDLC_CRC_MASK) {
4543	case HDLC_CRC_16_CCITT: crc_size = 2; break;
4544	case HDLC_CRC_32_CCITT: crc_size = 4; break;
4545	}
4546
4547check_again:
4548
4549	framesize = 0;
4550	addr_field = 0xff;
4551	start = end = info->rbuf_current;
4552
4553	for (;;) {
4554		if (!desc_complete(info->rbufs[end]))
4555			goto cleanup;
4556
4557		if (framesize == 0 && info->params.addr_filter != 0xff)
4558			addr_field = info->rbufs[end].buf[0];
4559
4560		framesize += desc_count(info->rbufs[end]);
4561
4562		if (desc_eof(info->rbufs[end]))
4563			break;
4564
4565		if (++end == info->rbuf_count)
4566			end = 0;
4567
4568		if (end == info->rbuf_current) {
4569			if (info->rx_enabled){
4570				spin_lock_irqsave(&info->lock,flags);
4571				rx_start(info);
4572				spin_unlock_irqrestore(&info->lock,flags);
4573			}
4574			goto cleanup;
4575		}
4576	}
4577
4578	/* status
4579	 *
4580	 * 15      buffer complete
4581	 * 14..06  reserved
4582	 * 05..04  residue
4583	 * 02      eof (end of frame)
4584	 * 01      CRC error
4585	 * 00      abort
4586	 */
4587	status = desc_status(info->rbufs[end]);
4588
4589	/* ignore CRC bit if not using CRC (bit is undefined) */
4590	if ((info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_NONE)
4591		status &= ~BIT1;
4592
4593	if (framesize == 0 ||
4594		 (addr_field != 0xff && addr_field != info->params.addr_filter)) {
4595		free_rbufs(info, start, end);
4596		goto check_again;
4597	}
4598
4599	if (framesize < (2 + crc_size) || status & BIT0) {
4600		info->icount.rxshort++;
4601		framesize = 0;
4602	} else if (status & BIT1) {
4603		info->icount.rxcrc++;
4604		if (!(info->params.crc_type & HDLC_CRC_RETURN_EX))
4605			framesize = 0;
4606	}
4607
4608#if SYNCLINK_GENERIC_HDLC
4609	if (framesize == 0) {
4610		info->netdev->stats.rx_errors++;
4611		info->netdev->stats.rx_frame_errors++;
4612	}
4613#endif
4614
4615	DBGBH(("%s rx frame status=%04X size=%d\n",
4616		info->device_name, status, framesize));
4617	DBGDATA(info, info->rbufs[start].buf, min_t(int, framesize, info->rbuf_fill_level), "rx");
4618
4619	if (framesize) {
4620		if (!(info->params.crc_type & HDLC_CRC_RETURN_EX)) {
4621			framesize -= crc_size;
4622			crc_size = 0;
4623		}
4624
4625		if (framesize > info->max_frame_size + crc_size)
4626			info->icount.rxlong++;
4627		else {
4628			/* copy dma buffer(s) to contiguous temp buffer */
4629			int copy_count = framesize;
4630			int i = start;
4631			unsigned char *p = info->tmp_rbuf;
4632			info->tmp_rbuf_count = framesize;
4633
4634			info->icount.rxok++;
4635
4636			while(copy_count) {
4637				int partial_count = min_t(int, copy_count, info->rbuf_fill_level);
4638				memcpy(p, info->rbufs[i].buf, partial_count);
4639				p += partial_count;
4640				copy_count -= partial_count;
4641				if (++i == info->rbuf_count)
4642					i = 0;
4643			}
4644
4645			if (info->params.crc_type & HDLC_CRC_RETURN_EX) {
4646				*p = (status & BIT1) ? RX_CRC_ERROR : RX_OK;
4647				framesize++;
4648			}
4649
4650#if SYNCLINK_GENERIC_HDLC
4651			if (info->netcount)
4652				hdlcdev_rx(info,info->tmp_rbuf, framesize);
4653			else
4654#endif
4655				ldisc_receive_buf(tty, info->tmp_rbuf, info->flag_buf, framesize);
4656		}
4657	}
4658	free_rbufs(info, start, end);
4659	return true;
4660
4661cleanup:
4662	return false;
4663}
4664
4665/*
4666 * pass receive buffer (RAW synchronous mode) to tty layer
4667 * return true if buffer available, otherwise false
4668 */
4669static bool rx_get_buf(struct slgt_info *info)
4670{
4671	unsigned int i = info->rbuf_current;
4672	unsigned int count;
4673
4674	if (!desc_complete(info->rbufs[i]))
4675		return false;
4676	count = desc_count(info->rbufs[i]);
4677	switch(info->params.mode) {
4678	case MGSL_MODE_MONOSYNC:
4679	case MGSL_MODE_BISYNC:
4680		/* ignore residue in byte synchronous modes */
4681		if (desc_residue(info->rbufs[i]))
4682			count--;
4683		break;
4684	}
4685	DBGDATA(info, info->rbufs[i].buf, count, "rx");
4686	DBGINFO(("rx_get_buf size=%d\n", count));
4687	if (count)
4688		ldisc_receive_buf(info->port.tty, info->rbufs[i].buf,
4689				  info->flag_buf, count);
4690	free_rbufs(info, i, i);
4691	return true;
4692}
4693
4694static void reset_tbufs(struct slgt_info *info)
4695{
4696	unsigned int i;
4697	info->tbuf_current = 0;
4698	for (i=0 ; i < info->tbuf_count ; i++) {
4699		info->tbufs[i].status = 0;
4700		info->tbufs[i].count  = 0;
4701	}
4702}
4703
4704/*
4705 * return number of free transmit DMA buffers
4706 */
4707static unsigned int free_tbuf_count(struct slgt_info *info)
4708{
4709	unsigned int count = 0;
4710	unsigned int i = info->tbuf_current;
4711
4712	do
4713	{
4714		if (desc_count(info->tbufs[i]))
4715			break; /* buffer in use */
4716		++count;
4717		if (++i == info->tbuf_count)
4718			i=0;
4719	} while (i != info->tbuf_current);
4720
4721	/* if tx DMA active, last zero count buffer is in use */
4722	if (count && (rd_reg32(info, TDCSR) & BIT0))
4723		--count;
4724
4725	return count;
4726}
4727
4728/*
4729 * return number of bytes in unsent transmit DMA buffers
4730 * and the serial controller tx FIFO
4731 */
4732static unsigned int tbuf_bytes(struct slgt_info *info)
4733{
4734	unsigned int total_count = 0;
4735	unsigned int i = info->tbuf_current;
4736	unsigned int reg_value;
4737	unsigned int count;
4738	unsigned int active_buf_count = 0;
4739
4740	/*
4741	 * Add descriptor counts for all tx DMA buffers.
4742	 * If count is zero (cleared by DMA controller after read),
4743	 * the buffer is complete or is actively being read from.
4744	 *
4745	 * Record buf_count of last buffer with zero count starting
4746	 * from current ring position. buf_count is mirror
4747	 * copy of count and is not cleared by serial controller.
4748	 * If DMA controller is active, that buffer is actively
4749	 * being read so add to total.
4750	 */
4751	do {
4752		count = desc_count(info->tbufs[i]);
4753		if (count)
4754			total_count += count;
4755		else if (!total_count)
4756			active_buf_count = info->tbufs[i].buf_count;
4757		if (++i == info->tbuf_count)
4758			i = 0;
4759	} while (i != info->tbuf_current);
4760
4761	/* read tx DMA status register */
4762	reg_value = rd_reg32(info, TDCSR);
4763
4764	/* if tx DMA active, last zero count buffer is in use */
4765	if (reg_value & BIT0)
4766		total_count += active_buf_count;
4767
4768	/* add tx FIFO count = reg_value[15..8] */
4769	total_count += (reg_value >> 8) & 0xff;
4770
4771	/* if transmitter active add one byte for shift register */
4772	if (info->tx_active)
4773		total_count++;
4774
4775	return total_count;
4776}
4777
4778/*
4779 * load data into transmit DMA buffer ring and start transmitter if needed
4780 * return true if data accepted, otherwise false (buffers full)
4781 */
4782static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
4783{
4784	unsigned short count;
4785	unsigned int i;
4786	struct slgt_desc *d;
4787
4788	/* check required buffer space */
4789	if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
4790		return false;
4791
4792	DBGDATA(info, buf, size, "tx");
4793
4794	/*
4795	 * copy data to one or more DMA buffers in circular ring
4796	 * tbuf_start   = first buffer for this data
4797	 * tbuf_current = next free buffer
4798	 *
4799	 * Copy all data before making data visible to DMA controller by
4800	 * setting descriptor count of the first buffer.
4801	 * This prevents an active DMA controller from reading the first DMA
4802	 * buffers of a frame and stopping before the final buffers are filled.
4803	 */
4804
4805	info->tbuf_start = i = info->tbuf_current;
4806
4807	while (size) {
4808		d = &info->tbufs[i];
4809
4810		count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
4811		memcpy(d->buf, buf, count);
4812
4813		size -= count;
4814		buf  += count;
4815
4816		/*
4817		 * set EOF bit for last buffer of HDLC frame or
4818		 * for every buffer in raw mode
4819		 */
4820		if ((!size && info->params.mode == MGSL_MODE_HDLC) ||
4821		    info->params.mode == MGSL_MODE_RAW)
4822			set_desc_eof(*d, 1);
4823		else
4824			set_desc_eof(*d, 0);
4825
4826		/* set descriptor count for all but first buffer */
4827		if (i != info->tbuf_start)
4828			set_desc_count(*d, count);
4829		d->buf_count = count;
4830
4831		if (++i == info->tbuf_count)
4832			i = 0;
4833	}
4834
4835	info->tbuf_current = i;
4836
4837	/* set first buffer count to make new data visible to DMA controller */
4838	d = &info->tbufs[info->tbuf_start];
4839	set_desc_count(*d, d->buf_count);
4840
4841	/* start transmitter if needed and update transmit timeout */
4842	if (!info->tx_active)
4843		tx_start(info);
4844	update_tx_timer(info);
4845
4846	return true;
4847}
4848
4849static int register_test(struct slgt_info *info)
4850{
4851	static unsigned short patterns[] =
4852		{0x0000, 0xffff, 0xaaaa, 0x5555, 0x6969, 0x9696};
4853	static unsigned int count = ARRAY_SIZE(patterns);
4854	unsigned int i;
4855	int rc = 0;
4856
4857	for (i=0 ; i < count ; i++) {
4858		wr_reg16(info, TIR, patterns[i]);
4859		wr_reg16(info, BDR, patterns[(i+1)%count]);
4860		if ((rd_reg16(info, TIR) != patterns[i]) ||
4861		    (rd_reg16(info, BDR) != patterns[(i+1)%count])) {
4862			rc = -ENODEV;
4863			break;
4864		}
4865	}
4866	info->gpio_present = (rd_reg32(info, JCR) & BIT5) ? 1 : 0;
4867	info->init_error = rc ? 0 : DiagStatus_AddressFailure;
4868	return rc;
4869}
4870
4871static int irq_test(struct slgt_info *info)
4872{
4873	unsigned long timeout;
4874	unsigned long flags;
4875	struct tty_struct *oldtty = info->port.tty;
4876	u32 speed = info->params.data_rate;
4877
4878	info->params.data_rate = 921600;
4879	info->port.tty = NULL;
4880
4881	spin_lock_irqsave(&info->lock, flags);
4882	async_mode(info);
4883	slgt_irq_on(info, IRQ_TXIDLE);
4884
4885	/* enable transmitter */
4886	wr_reg16(info, TCR,
4887		(unsigned short)(rd_reg16(info, TCR) | BIT1));
4888
4889	/* write one byte and wait for tx idle */
4890	wr_reg16(info, TDR, 0);
4891
4892	/* assume failure */
4893	info->init_error = DiagStatus_IrqFailure;
4894	info->irq_occurred = false;
4895
4896	spin_unlock_irqrestore(&info->lock, flags);
4897
4898	timeout=100;
4899	while(timeout-- && !info->irq_occurred)
4900		msleep_interruptible(10);
4901
4902	spin_lock_irqsave(&info->lock,flags);
4903	reset_port(info);
4904	spin_unlock_irqrestore(&info->lock,flags);
4905
4906	info->params.data_rate = speed;
4907	info->port.tty = oldtty;
4908
4909	info->init_error = info->irq_occurred ? 0 : DiagStatus_IrqFailure;
4910	return info->irq_occurred ? 0 : -ENODEV;
4911}
4912
4913static int loopback_test_rx(struct slgt_info *info)
4914{
4915	unsigned char *src, *dest;
4916	int count;
4917
4918	if (desc_complete(info->rbufs[0])) {
4919		count = desc_count(info->rbufs[0]);
4920		src   = info->rbufs[0].buf;
4921		dest  = info->tmp_rbuf;
4922
4923		for( ; count ; count-=2, src+=2) {
4924			/* src=data byte (src+1)=status byte */
4925			if (!(*(src+1) & (BIT9 + BIT8))) {
4926				*dest = *src;
4927				dest++;
4928				info->tmp_rbuf_count++;
4929			}
4930		}
4931		DBGDATA(info, info->tmp_rbuf, info->tmp_rbuf_count, "rx");
4932		return 1;
4933	}
4934	return 0;
4935}
4936
4937static int loopback_test(struct slgt_info *info)
4938{
4939#define TESTFRAMESIZE 20
4940
4941	unsigned long timeout;
4942	u16 count = TESTFRAMESIZE;
4943	unsigned char buf[TESTFRAMESIZE];
4944	int rc = -ENODEV;
4945	unsigned long flags;
4946
4947	struct tty_struct *oldtty = info->port.tty;
4948	MGSL_PARAMS params;
4949
4950	memcpy(&params, &info->params, sizeof(params));
4951
4952	info->params.mode = MGSL_MODE_ASYNC;
4953	info->params.data_rate = 921600;
4954	info->params.loopback = 1;
4955	info->port.tty = NULL;
4956
4957	/* build and send transmit frame */
4958	for (count = 0; count < TESTFRAMESIZE; ++count)
4959		buf[count] = (unsigned char)count;
4960
4961	info->tmp_rbuf_count = 0;
4962	memset(info->tmp_rbuf, 0, TESTFRAMESIZE);
4963
4964	/* program hardware for HDLC and enabled receiver */
4965	spin_lock_irqsave(&info->lock,flags);
4966	async_mode(info);
4967	rx_start(info);
4968	tx_load(info, buf, count);
4969	spin_unlock_irqrestore(&info->lock, flags);
4970
4971	/* wait for receive complete */
4972	for (timeout = 100; timeout; --timeout) {
4973		msleep_interruptible(10);
4974		if (loopback_test_rx(info)) {
4975			rc = 0;
4976			break;
4977		}
4978	}
4979
4980	/* verify received frame length and contents */
4981	if (!rc && (info->tmp_rbuf_count != count ||
4982		  memcmp(buf, info->tmp_rbuf, count))) {
4983		rc = -ENODEV;
4984	}
4985
4986	spin_lock_irqsave(&info->lock,flags);
4987	reset_adapter(info);
4988	spin_unlock_irqrestore(&info->lock,flags);
4989
4990	memcpy(&info->params, &params, sizeof(info->params));
4991	info->port.tty = oldtty;
4992
4993	info->init_error = rc ? DiagStatus_DmaFailure : 0;
4994	return rc;
4995}
4996
4997static int adapter_test(struct slgt_info *info)
4998{
4999	DBGINFO(("testing %s\n", info->device_name));
5000	if (register_test(info) < 0) {
5001		printk("register test failure %s addr=%08X\n",
5002			info->device_name, info->phys_reg_addr);
5003	} else if (irq_test(info) < 0) {
5004		printk("IRQ test failure %s IRQ=%d\n",
5005			info->device_name, info->irq_level);
5006	} else if (loopback_test(info) < 0) {
5007		printk("loopback test failure %s\n", info->device_name);
5008	}
5009	return info->init_error;
5010}
5011
5012/*
5013 * transmit timeout handler
5014 */
5015static void tx_timeout(unsigned long context)
5016{
5017	struct slgt_info *info = (struct slgt_info*)context;
5018	unsigned long flags;
5019
5020	DBGINFO(("%s tx_timeout\n", info->device_name));
5021	if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) {
5022		info->icount.txtimeout++;
5023	}
5024	spin_lock_irqsave(&info->lock,flags);
5025	tx_stop(info);
5026	spin_unlock_irqrestore(&info->lock,flags);
5027
5028#if SYNCLINK_GENERIC_HDLC
5029	if (info->netcount)
5030		hdlcdev_tx_done(info);
5031	else
5032#endif
5033		bh_transmit(info);
5034}
5035
5036/*
5037 * receive buffer polling timer
5038 */
5039static void rx_timeout(unsigned long context)
5040{
5041	struct slgt_info *info = (struct slgt_info*)context;
5042	unsigned long flags;
5043
5044	DBGINFO(("%s rx_timeout\n", info->device_name));
5045	spin_lock_irqsave(&info->lock, flags);
5046	info->pending_bh |= BH_RECEIVE;
5047	spin_unlock_irqrestore(&info->lock, flags);
5048	bh_handler(&info->task);
5049}
5050