1/* 2 * linux/drivers/char/synclink.c 3 * 4 * $Id: synclink.c,v 1.1.1.1 2007/08/03 18:52:28 Exp $ 5 * 6 * Device driver for Microgate SyncLink ISA and PCI 7 * high speed multiprotocol serial adapters. 8 * 9 * written by Paul Fulghum for Microgate Corporation 10 * paulkf@microgate.com 11 * 12 * Microgate and SyncLink are trademarks of Microgate Corporation 13 * 14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds 15 * 16 * Original release 01/11/99 17 * 18 * This code is released under the GNU General Public License (GPL) 19 * 20 * This driver is primarily intended for use in synchronous 21 * HDLC mode. Asynchronous mode is also provided. 22 * 23 * When operating in synchronous mode, each call to mgsl_write() 24 * contains exactly one complete HDLC frame. Calling mgsl_put_char 25 * will start assembling an HDLC frame that will not be sent until 26 * mgsl_flush_chars or mgsl_write is called. 27 * 28 * Synchronous receive data is reported as complete frames. To accomplish 29 * this, the TTY flip buffer is bypassed (too small to hold largest 30 * frame and may fragment frames) and the line discipline 31 * receive entry point is called directly. 32 * 33 * This driver has been tested with a slightly modified ppp.c driver 34 * for synchronous PPP. 35 * 36 * 2000/02/16 37 * Added interface for syncppp.c driver (an alternate synchronous PPP 38 * implementation that also supports Cisco HDLC). Each device instance 39 * registers as a tty device AND a network device (if dosyncppp option 40 * is set for the device). The functionality is determined by which 41 * device interface is opened. 42 * 43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 53 * OF THE POSSIBILITY OF SUCH DAMAGE. 54 */ 55 56#if defined(__i386__) 57# define BREAKPOINT() asm(" int $3"); 58#else 59# define BREAKPOINT() { } 60#endif 61 62#define MAX_ISA_DEVICES 10 63#define MAX_PCI_DEVICES 10 64#define MAX_TOTAL_DEVICES 20 65 66#include <linux/module.h> 67#include <linux/errno.h> 68#include <linux/signal.h> 69#include <linux/sched.h> 70#include <linux/timer.h> 71#include <linux/interrupt.h> 72#include <linux/pci.h> 73#include <linux/tty.h> 74#include <linux/tty_flip.h> 75#include <linux/serial.h> 76#include <linux/major.h> 77#include <linux/string.h> 78#include <linux/fcntl.h> 79#include <linux/ptrace.h> 80#include <linux/ioport.h> 81#include <linux/mm.h> 82#include <linux/slab.h> 83#include <linux/delay.h> 84 85#include <linux/netdevice.h> 86 87#include <linux/vmalloc.h> 88#include <linux/init.h> 89 90#include <linux/delay.h> 91#include <linux/ioctl.h> 92 93#include <asm/system.h> 94#include <asm/io.h> 95#include <asm/irq.h> 96#include <asm/dma.h> 97#include <linux/bitops.h> 98#include <asm/types.h> 99#include <linux/termios.h> 100#include <linux/workqueue.h> 101#include <linux/hdlc.h> 102#include <linux/dma-mapping.h> 103 104#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && \ 105 defined(CONFIG_SYNCLINK_MODULE)) 106#define SYNCLINK_GENERIC_HDLC 1 107#else 108#define SYNCLINK_GENERIC_HDLC 0 109#endif 110 111#define GET_USER(error,value,addr) error = get_user(value,addr) 112#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 113#define PUT_USER(error,value,addr) error = put_user(value,addr) 114#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 115 116#include <asm/uaccess.h> 117 118#include "linux/synclink.h" 119 120#define RCLRVALUE 0xffff 121 122static MGSL_PARAMS default_params = { 123 MGSL_MODE_HDLC, /* unsigned long mode */ 124 0, /* unsigned char loopback; */ 125 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ 126 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 127 0, /* unsigned long clock_speed; */ 128 0xff, /* unsigned char addr_filter; */ 129 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ 130 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ 131 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 132 9600, /* unsigned long data_rate; */ 133 8, /* unsigned char data_bits; */ 134 1, /* unsigned char stop_bits; */ 135 ASYNC_PARITY_NONE /* unsigned char parity; */ 136}; 137 138#define SHARED_MEM_ADDRESS_SIZE 0x40000 139#define BUFFERLISTSIZE 4096 140#define DMABUFFERSIZE 4096 141#define MAXRXFRAMES 7 142 143typedef struct _DMABUFFERENTRY 144{ 145 u32 phys_addr; /* 32-bit flat physical address of data buffer */ 146 volatile u16 count; /* buffer size/data count */ 147 volatile u16 status; /* Control/status field */ 148 volatile u16 rcc; /* character count field */ 149 u16 reserved; /* padding required by 16C32 */ 150 u32 link; /* 32-bit flat link to next buffer entry */ 151 char *virt_addr; /* virtual address of data buffer */ 152 u32 phys_entry; /* physical address of this buffer entry */ 153 dma_addr_t dma_addr; 154} DMABUFFERENTRY, *DMAPBUFFERENTRY; 155 156/* The queue of BH actions to be performed */ 157 158#define BH_RECEIVE 1 159#define BH_TRANSMIT 2 160#define BH_STATUS 4 161 162#define IO_PIN_SHUTDOWN_LIMIT 100 163 164#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 165 166struct _input_signal_events { 167 int ri_up; 168 int ri_down; 169 int dsr_up; 170 int dsr_down; 171 int dcd_up; 172 int dcd_down; 173 int cts_up; 174 int cts_down; 175}; 176 177/* transmit holding buffer definitions*/ 178#define MAX_TX_HOLDING_BUFFERS 5 179struct tx_holding_buffer { 180 int buffer_size; 181 unsigned char * buffer; 182}; 183 184 185/* 186 * Device instance data structure 187 */ 188 189struct mgsl_struct { 190 int magic; 191 int flags; 192 int count; /* count of opens */ 193 int line; 194 int hw_version; 195 unsigned short close_delay; 196 unsigned short closing_wait; /* time to wait before closing */ 197 198 struct mgsl_icount icount; 199 200 struct tty_struct *tty; 201 int timeout; 202 int x_char; /* xon/xoff character */ 203 int blocked_open; /* # of blocked opens */ 204 u16 read_status_mask; 205 u16 ignore_status_mask; 206 unsigned char *xmit_buf; 207 int xmit_head; 208 int xmit_tail; 209 int xmit_cnt; 210 211 wait_queue_head_t open_wait; 212 wait_queue_head_t close_wait; 213 214 wait_queue_head_t status_event_wait_q; 215 wait_queue_head_t event_wait_q; 216 struct timer_list tx_timer; /* HDLC transmit timeout timer */ 217 struct mgsl_struct *next_device; /* device list link */ 218 219 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */ 220 struct work_struct task; /* task structure for scheduling bh */ 221 222 u32 EventMask; /* event trigger mask */ 223 u32 RecordedEvents; /* pending events */ 224 225 u32 max_frame_size; /* as set by device config */ 226 227 u32 pending_bh; 228 229 int bh_running; /* Protection from multiple */ 230 int isr_overflow; 231 int bh_requested; 232 233 int dcd_chkcount; /* check counts to prevent */ 234 int cts_chkcount; /* too many IRQs if a signal */ 235 int dsr_chkcount; /* is floating */ 236 int ri_chkcount; 237 238 char *buffer_list; /* virtual address of Rx & Tx buffer lists */ 239 u32 buffer_list_phys; 240 dma_addr_t buffer_list_dma_addr; 241 242 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */ 243 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */ 244 unsigned int current_rx_buffer; 245 246 int num_tx_dma_buffers; /* number of tx dma frames required */ 247 int tx_dma_buffers_used; 248 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */ 249 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */ 250 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */ 251 int current_tx_buffer; /* next tx dma buffer to be loaded */ 252 253 unsigned char *intermediate_rxbuffer; 254 255 int num_tx_holding_buffers; /* number of tx holding buffer allocated */ 256 int get_tx_holding_index; /* next tx holding buffer for adapter to load */ 257 int put_tx_holding_index; /* next tx holding buffer to store user request */ 258 int tx_holding_count; /* number of tx holding buffers waiting */ 259 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS]; 260 261 int rx_enabled; 262 int rx_overflow; 263 int rx_rcc_underrun; 264 265 int tx_enabled; 266 int tx_active; 267 u32 idle_mode; 268 269 u16 cmr_value; 270 u16 tcsr_value; 271 272 char device_name[25]; /* device instance name */ 273 274 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */ 275 unsigned char bus; /* expansion bus number (zero based) */ 276 unsigned char function; /* PCI device number */ 277 278 unsigned int io_base; /* base I/O address of adapter */ 279 unsigned int io_addr_size; /* size of the I/O address range */ 280 int io_addr_requested; /* nonzero if I/O address requested */ 281 282 unsigned int irq_level; /* interrupt level */ 283 unsigned long irq_flags; 284 int irq_requested; /* nonzero if IRQ requested */ 285 286 unsigned int dma_level; /* DMA channel */ 287 int dma_requested; /* nonzero if dma channel requested */ 288 289 u16 mbre_bit; 290 u16 loopback_bits; 291 u16 usc_idle_mode; 292 293 MGSL_PARAMS params; /* communications parameters */ 294 295 unsigned char serial_signals; /* current serial signal states */ 296 297 int irq_occurred; /* for diagnostics use */ 298 unsigned int init_error; /* Initialization startup error (DIAGS) */ 299 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */ 300 301 u32 last_mem_alloc; 302 unsigned char* memory_base; /* shared memory address (PCI only) */ 303 u32 phys_memory_base; 304 int shared_mem_requested; 305 306 unsigned char* lcr_base; /* local config registers (PCI only) */ 307 u32 phys_lcr_base; 308 u32 lcr_offset; 309 int lcr_mem_requested; 310 311 u32 misc_ctrl_value; 312 char flag_buf[MAX_ASYNC_BUFFER_SIZE]; 313 char char_buf[MAX_ASYNC_BUFFER_SIZE]; 314 BOOLEAN drop_rts_on_tx_done; 315 316 BOOLEAN loopmode_insert_requested; 317 BOOLEAN loopmode_send_done_requested; 318 319 struct _input_signal_events input_signal_events; 320 321 /* generic HDLC device parts */ 322 int netcount; 323 int dosyncppp; 324 spinlock_t netlock; 325 326#if SYNCLINK_GENERIC_HDLC 327 struct net_device *netdev; 328#endif 329}; 330 331#define MGSL_MAGIC 0x5401 332 333/* 334 * The size of the serial xmit buffer is 1 page, or 4096 bytes 335 */ 336#ifndef SERIAL_XMIT_SIZE 337#define SERIAL_XMIT_SIZE 4096 338#endif 339 340/* 341 * These macros define the offsets used in calculating the 342 * I/O address of the specified USC registers. 343 */ 344 345 346#define DCPIN 2 /* Bit 1 of I/O address */ 347#define SDPIN 4 /* Bit 2 of I/O address */ 348 349#define DCAR 0 /* DMA command/address register */ 350#define CCAR SDPIN /* channel command/address register */ 351#define DATAREG DCPIN + SDPIN /* serial data register */ 352#define MSBONLY 0x41 353#define LSBONLY 0x40 354 355/* 356 * These macros define the register address (ordinal number) 357 * used for writing address/value pairs to the USC. 358 */ 359 360#define CMR 0x02 /* Channel mode Register */ 361#define CCSR 0x04 /* Channel Command/status Register */ 362#define CCR 0x06 /* Channel Control Register */ 363#define PSR 0x08 /* Port status Register */ 364#define PCR 0x0a /* Port Control Register */ 365#define TMDR 0x0c /* Test mode Data Register */ 366#define TMCR 0x0e /* Test mode Control Register */ 367#define CMCR 0x10 /* Clock mode Control Register */ 368#define HCR 0x12 /* Hardware Configuration Register */ 369#define IVR 0x14 /* Interrupt Vector Register */ 370#define IOCR 0x16 /* Input/Output Control Register */ 371#define ICR 0x18 /* Interrupt Control Register */ 372#define DCCR 0x1a /* Daisy Chain Control Register */ 373#define MISR 0x1c /* Misc Interrupt status Register */ 374#define SICR 0x1e /* status Interrupt Control Register */ 375#define RDR 0x20 /* Receive Data Register */ 376#define RMR 0x22 /* Receive mode Register */ 377#define RCSR 0x24 /* Receive Command/status Register */ 378#define RICR 0x26 /* Receive Interrupt Control Register */ 379#define RSR 0x28 /* Receive Sync Register */ 380#define RCLR 0x2a /* Receive count Limit Register */ 381#define RCCR 0x2c /* Receive Character count Register */ 382#define TC0R 0x2e /* Time Constant 0 Register */ 383#define TDR 0x30 /* Transmit Data Register */ 384#define TMR 0x32 /* Transmit mode Register */ 385#define TCSR 0x34 /* Transmit Command/status Register */ 386#define TICR 0x36 /* Transmit Interrupt Control Register */ 387#define TSR 0x38 /* Transmit Sync Register */ 388#define TCLR 0x3a /* Transmit count Limit Register */ 389#define TCCR 0x3c /* Transmit Character count Register */ 390#define TC1R 0x3e /* Time Constant 1 Register */ 391 392 393/* 394 * MACRO DEFINITIONS FOR DMA REGISTERS 395 */ 396 397#define DCR 0x06 /* DMA Control Register (shared) */ 398#define DACR 0x08 /* DMA Array count Register (shared) */ 399#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */ 400#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */ 401#define DICR 0x18 /* DMA Interrupt Control Register (shared) */ 402#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */ 403#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */ 404 405#define TDMR 0x02 /* Transmit DMA mode Register */ 406#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */ 407#define TBCR 0x2a /* Transmit Byte count Register */ 408#define TARL 0x2c /* Transmit Address Register (low) */ 409#define TARU 0x2e /* Transmit Address Register (high) */ 410#define NTBCR 0x3a /* Next Transmit Byte count Register */ 411#define NTARL 0x3c /* Next Transmit Address Register (low) */ 412#define NTARU 0x3e /* Next Transmit Address Register (high) */ 413 414#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */ 415#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */ 416#define RBCR 0xaa /* Receive Byte count Register */ 417#define RARL 0xac /* Receive Address Register (low) */ 418#define RARU 0xae /* Receive Address Register (high) */ 419#define NRBCR 0xba /* Next Receive Byte count Register */ 420#define NRARL 0xbc /* Next Receive Address Register (low) */ 421#define NRARU 0xbe /* Next Receive Address Register (high) */ 422 423 424/* 425 * MACRO DEFINITIONS FOR MODEM STATUS BITS 426 */ 427 428#define MODEMSTATUS_DTR 0x80 429#define MODEMSTATUS_DSR 0x40 430#define MODEMSTATUS_RTS 0x20 431#define MODEMSTATUS_CTS 0x10 432#define MODEMSTATUS_RI 0x04 433#define MODEMSTATUS_DCD 0x01 434 435 436/* 437 * Channel Command/Address Register (CCAR) Command Codes 438 */ 439 440#define RTCmd_Null 0x0000 441#define RTCmd_ResetHighestIus 0x1000 442#define RTCmd_TriggerChannelLoadDma 0x2000 443#define RTCmd_TriggerRxDma 0x2800 444#define RTCmd_TriggerTxDma 0x3000 445#define RTCmd_TriggerRxAndTxDma 0x3800 446#define RTCmd_PurgeRxFifo 0x4800 447#define RTCmd_PurgeTxFifo 0x5000 448#define RTCmd_PurgeRxAndTxFifo 0x5800 449#define RTCmd_LoadRcc 0x6800 450#define RTCmd_LoadTcc 0x7000 451#define RTCmd_LoadRccAndTcc 0x7800 452#define RTCmd_LoadTC0 0x8800 453#define RTCmd_LoadTC1 0x9000 454#define RTCmd_LoadTC0AndTC1 0x9800 455#define RTCmd_SerialDataLSBFirst 0xa000 456#define RTCmd_SerialDataMSBFirst 0xa800 457#define RTCmd_SelectBigEndian 0xb000 458#define RTCmd_SelectLittleEndian 0xb800 459 460 461/* 462 * DMA Command/Address Register (DCAR) Command Codes 463 */ 464 465#define DmaCmd_Null 0x0000 466#define DmaCmd_ResetTxChannel 0x1000 467#define DmaCmd_ResetRxChannel 0x1200 468#define DmaCmd_StartTxChannel 0x2000 469#define DmaCmd_StartRxChannel 0x2200 470#define DmaCmd_ContinueTxChannel 0x3000 471#define DmaCmd_ContinueRxChannel 0x3200 472#define DmaCmd_PauseTxChannel 0x4000 473#define DmaCmd_PauseRxChannel 0x4200 474#define DmaCmd_AbortTxChannel 0x5000 475#define DmaCmd_AbortRxChannel 0x5200 476#define DmaCmd_InitTxChannel 0x7000 477#define DmaCmd_InitRxChannel 0x7200 478#define DmaCmd_ResetHighestDmaIus 0x8000 479#define DmaCmd_ResetAllChannels 0x9000 480#define DmaCmd_StartAllChannels 0xa000 481#define DmaCmd_ContinueAllChannels 0xb000 482#define DmaCmd_PauseAllChannels 0xc000 483#define DmaCmd_AbortAllChannels 0xd000 484#define DmaCmd_InitAllChannels 0xf000 485 486#define TCmd_Null 0x0000 487#define TCmd_ClearTxCRC 0x2000 488#define TCmd_SelectTicrTtsaData 0x4000 489#define TCmd_SelectTicrTxFifostatus 0x5000 490#define TCmd_SelectTicrIntLevel 0x6000 491#define TCmd_SelectTicrdma_level 0x7000 492#define TCmd_SendFrame 0x8000 493#define TCmd_SendAbort 0x9000 494#define TCmd_EnableDleInsertion 0xc000 495#define TCmd_DisableDleInsertion 0xd000 496#define TCmd_ClearEofEom 0xe000 497#define TCmd_SetEofEom 0xf000 498 499#define RCmd_Null 0x0000 500#define RCmd_ClearRxCRC 0x2000 501#define RCmd_EnterHuntmode 0x3000 502#define RCmd_SelectRicrRtsaData 0x4000 503#define RCmd_SelectRicrRxFifostatus 0x5000 504#define RCmd_SelectRicrIntLevel 0x6000 505#define RCmd_SelectRicrdma_level 0x7000 506 507/* 508 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR) 509 */ 510 511#define RECEIVE_STATUS BIT5 512#define RECEIVE_DATA BIT4 513#define TRANSMIT_STATUS BIT3 514#define TRANSMIT_DATA BIT2 515#define IO_PIN BIT1 516#define MISC BIT0 517 518 519/* 520 * Receive status Bits in Receive Command/status Register RCSR 521 */ 522 523#define RXSTATUS_SHORT_FRAME BIT8 524#define RXSTATUS_CODE_VIOLATION BIT8 525#define RXSTATUS_EXITED_HUNT BIT7 526#define RXSTATUS_IDLE_RECEIVED BIT6 527#define RXSTATUS_BREAK_RECEIVED BIT5 528#define RXSTATUS_ABORT_RECEIVED BIT5 529#define RXSTATUS_RXBOUND BIT4 530#define RXSTATUS_CRC_ERROR BIT3 531#define RXSTATUS_FRAMING_ERROR BIT3 532#define RXSTATUS_ABORT BIT2 533#define RXSTATUS_PARITY_ERROR BIT2 534#define RXSTATUS_OVERRUN BIT1 535#define RXSTATUS_DATA_AVAILABLE BIT0 536#define RXSTATUS_ALL 0x01f6 537#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) ) 538 539/* 540 * Values for setting transmit idle mode in 541 * Transmit Control/status Register (TCSR) 542 */ 543#define IDLEMODE_FLAGS 0x0000 544#define IDLEMODE_ALT_ONE_ZERO 0x0100 545#define IDLEMODE_ZERO 0x0200 546#define IDLEMODE_ONE 0x0300 547#define IDLEMODE_ALT_MARK_SPACE 0x0500 548#define IDLEMODE_SPACE 0x0600 549#define IDLEMODE_MARK 0x0700 550#define IDLEMODE_MASK 0x0700 551 552/* 553 * IUSC revision identifiers 554 */ 555#define IUSC_SL1660 0x4d44 556#define IUSC_PRE_SL1660 0x4553 557 558/* 559 * Transmit status Bits in Transmit Command/status Register (TCSR) 560 */ 561 562#define TCSR_PRESERVE 0x0F00 563 564#define TCSR_UNDERWAIT BIT11 565#define TXSTATUS_PREAMBLE_SENT BIT7 566#define TXSTATUS_IDLE_SENT BIT6 567#define TXSTATUS_ABORT_SENT BIT5 568#define TXSTATUS_EOF_SENT BIT4 569#define TXSTATUS_EOM_SENT BIT4 570#define TXSTATUS_CRC_SENT BIT3 571#define TXSTATUS_ALL_SENT BIT2 572#define TXSTATUS_UNDERRUN BIT1 573#define TXSTATUS_FIFO_EMPTY BIT0 574#define TXSTATUS_ALL 0x00fa 575#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) ) 576 577 578#define MISCSTATUS_RXC_LATCHED BIT15 579#define MISCSTATUS_RXC BIT14 580#define MISCSTATUS_TXC_LATCHED BIT13 581#define MISCSTATUS_TXC BIT12 582#define MISCSTATUS_RI_LATCHED BIT11 583#define MISCSTATUS_RI BIT10 584#define MISCSTATUS_DSR_LATCHED BIT9 585#define MISCSTATUS_DSR BIT8 586#define MISCSTATUS_DCD_LATCHED BIT7 587#define MISCSTATUS_DCD BIT6 588#define MISCSTATUS_CTS_LATCHED BIT5 589#define MISCSTATUS_CTS BIT4 590#define MISCSTATUS_RCC_UNDERRUN BIT3 591#define MISCSTATUS_DPLL_NO_SYNC BIT2 592#define MISCSTATUS_BRG1_ZERO BIT1 593#define MISCSTATUS_BRG0_ZERO BIT0 594 595#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0)) 596#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f)) 597 598#define SICR_RXC_ACTIVE BIT15 599#define SICR_RXC_INACTIVE BIT14 600#define SICR_RXC (BIT15+BIT14) 601#define SICR_TXC_ACTIVE BIT13 602#define SICR_TXC_INACTIVE BIT12 603#define SICR_TXC (BIT13+BIT12) 604#define SICR_RI_ACTIVE BIT11 605#define SICR_RI_INACTIVE BIT10 606#define SICR_RI (BIT11+BIT10) 607#define SICR_DSR_ACTIVE BIT9 608#define SICR_DSR_INACTIVE BIT8 609#define SICR_DSR (BIT9+BIT8) 610#define SICR_DCD_ACTIVE BIT7 611#define SICR_DCD_INACTIVE BIT6 612#define SICR_DCD (BIT7+BIT6) 613#define SICR_CTS_ACTIVE BIT5 614#define SICR_CTS_INACTIVE BIT4 615#define SICR_CTS (BIT5+BIT4) 616#define SICR_RCC_UNDERFLOW BIT3 617#define SICR_DPLL_NO_SYNC BIT2 618#define SICR_BRG1_ZERO BIT1 619#define SICR_BRG0_ZERO BIT0 620 621void usc_DisableMasterIrqBit( struct mgsl_struct *info ); 622void usc_EnableMasterIrqBit( struct mgsl_struct *info ); 623void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 624void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask ); 625void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask ); 626 627#define usc_EnableInterrupts( a, b ) \ 628 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) ) 629 630#define usc_DisableInterrupts( a, b ) \ 631 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) ) 632 633#define usc_EnableMasterIrqBit(a) \ 634 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) ) 635 636#define usc_DisableMasterIrqBit(a) \ 637 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) ) 638 639#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) ) 640 641/* 642 * Transmit status Bits in Transmit Control status Register (TCSR) 643 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) 644 */ 645 646#define TXSTATUS_PREAMBLE_SENT BIT7 647#define TXSTATUS_IDLE_SENT BIT6 648#define TXSTATUS_ABORT_SENT BIT5 649#define TXSTATUS_EOF BIT4 650#define TXSTATUS_CRC_SENT BIT3 651#define TXSTATUS_ALL_SENT BIT2 652#define TXSTATUS_UNDERRUN BIT1 653#define TXSTATUS_FIFO_EMPTY BIT0 654 655#define DICR_MASTER BIT15 656#define DICR_TRANSMIT BIT0 657#define DICR_RECEIVE BIT1 658 659#define usc_EnableDmaInterrupts(a,b) \ 660 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) ) 661 662#define usc_DisableDmaInterrupts(a,b) \ 663 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) ) 664 665#define usc_EnableStatusIrqs(a,b) \ 666 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) ) 667 668#define usc_DisablestatusIrqs(a,b) \ 669 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) ) 670 671/* Transmit status Bits in Transmit Control status Register (TCSR) */ 672/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */ 673 674 675#define DISABLE_UNCONDITIONAL 0 676#define DISABLE_END_OF_FRAME 1 677#define ENABLE_UNCONDITIONAL 2 678#define ENABLE_AUTO_CTS 3 679#define ENABLE_AUTO_DCD 3 680#define usc_EnableTransmitter(a,b) \ 681 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) ) 682#define usc_EnableReceiver(a,b) \ 683 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) ) 684 685static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port ); 686static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value ); 687static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ); 688 689static u16 usc_InReg( struct mgsl_struct *info, u16 Port ); 690static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value ); 691static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ); 692void usc_RCmd( struct mgsl_struct *info, u16 Cmd ); 693void usc_TCmd( struct mgsl_struct *info, u16 Cmd ); 694 695#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b))) 696#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b)) 697 698#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1)) 699 700static void usc_process_rxoverrun_sync( struct mgsl_struct *info ); 701static void usc_start_receiver( struct mgsl_struct *info ); 702static void usc_stop_receiver( struct mgsl_struct *info ); 703 704static void usc_start_transmitter( struct mgsl_struct *info ); 705static void usc_stop_transmitter( struct mgsl_struct *info ); 706static void usc_set_txidle( struct mgsl_struct *info ); 707static void usc_load_txfifo( struct mgsl_struct *info ); 708 709static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate ); 710static void usc_enable_loopback( struct mgsl_struct *info, int enable ); 711 712static void usc_get_serial_signals( struct mgsl_struct *info ); 713static void usc_set_serial_signals( struct mgsl_struct *info ); 714 715static void usc_reset( struct mgsl_struct *info ); 716 717static void usc_set_sync_mode( struct mgsl_struct *info ); 718static void usc_set_sdlc_mode( struct mgsl_struct *info ); 719static void usc_set_async_mode( struct mgsl_struct *info ); 720static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate ); 721 722static void usc_loopback_frame( struct mgsl_struct *info ); 723 724static void mgsl_tx_timeout(unsigned long context); 725 726 727static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ); 728static void usc_loopmode_insert_request( struct mgsl_struct * info ); 729static int usc_loopmode_active( struct mgsl_struct * info); 730static void usc_loopmode_send_done( struct mgsl_struct * info ); 731 732static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg); 733 734#if SYNCLINK_GENERIC_HDLC 735#define dev_to_port(D) (dev_to_hdlc(D)->priv) 736static void hdlcdev_tx_done(struct mgsl_struct *info); 737static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size); 738static int hdlcdev_init(struct mgsl_struct *info); 739static void hdlcdev_exit(struct mgsl_struct *info); 740#endif 741 742/* 743 * Defines a BUS descriptor value for the PCI adapter 744 * local bus address ranges. 745 */ 746 747#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \ 748(0x00400020 + \ 749((WrHold) << 30) + \ 750((WrDly) << 28) + \ 751((RdDly) << 26) + \ 752((Nwdd) << 20) + \ 753((Nwad) << 15) + \ 754((Nxda) << 13) + \ 755((Nrdd) << 11) + \ 756((Nrad) << 6) ) 757 758static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit); 759 760/* 761 * Adapter diagnostic routines 762 */ 763static BOOLEAN mgsl_register_test( struct mgsl_struct *info ); 764static BOOLEAN mgsl_irq_test( struct mgsl_struct *info ); 765static BOOLEAN mgsl_dma_test( struct mgsl_struct *info ); 766static BOOLEAN mgsl_memory_test( struct mgsl_struct *info ); 767static int mgsl_adapter_test( struct mgsl_struct *info ); 768 769/* 770 * device and resource management routines 771 */ 772static int mgsl_claim_resources(struct mgsl_struct *info); 773static void mgsl_release_resources(struct mgsl_struct *info); 774static void mgsl_add_device(struct mgsl_struct *info); 775static struct mgsl_struct* mgsl_allocate_device(void); 776 777/* 778 * DMA buffer manupulation functions. 779 */ 780static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ); 781static int mgsl_get_rx_frame( struct mgsl_struct *info ); 782static int mgsl_get_raw_rx_frame( struct mgsl_struct *info ); 783static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ); 784static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ); 785static int num_free_tx_dma_buffers(struct mgsl_struct *info); 786static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize); 787static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count); 788 789/* 790 * DMA and Shared Memory buffer allocation and formatting 791 */ 792static int mgsl_allocate_dma_buffers(struct mgsl_struct *info); 793static void mgsl_free_dma_buffers(struct mgsl_struct *info); 794static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 795static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount); 796static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info); 797static void mgsl_free_buffer_list_memory(struct mgsl_struct *info); 798static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info); 799static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info); 800static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info); 801static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info); 802static int load_next_tx_holding_buffer(struct mgsl_struct *info); 803static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize); 804 805/* 806 * Bottom half interrupt handlers 807 */ 808static void mgsl_bh_handler(struct work_struct *work); 809static void mgsl_bh_receive(struct mgsl_struct *info); 810static void mgsl_bh_transmit(struct mgsl_struct *info); 811static void mgsl_bh_status(struct mgsl_struct *info); 812 813/* 814 * Interrupt handler routines and dispatch table. 815 */ 816static void mgsl_isr_null( struct mgsl_struct *info ); 817static void mgsl_isr_transmit_data( struct mgsl_struct *info ); 818static void mgsl_isr_receive_data( struct mgsl_struct *info ); 819static void mgsl_isr_receive_status( struct mgsl_struct *info ); 820static void mgsl_isr_transmit_status( struct mgsl_struct *info ); 821static void mgsl_isr_io_pin( struct mgsl_struct *info ); 822static void mgsl_isr_misc( struct mgsl_struct *info ); 823static void mgsl_isr_receive_dma( struct mgsl_struct *info ); 824static void mgsl_isr_transmit_dma( struct mgsl_struct *info ); 825 826typedef void (*isr_dispatch_func)(struct mgsl_struct *); 827 828static isr_dispatch_func UscIsrTable[7] = 829{ 830 mgsl_isr_null, 831 mgsl_isr_misc, 832 mgsl_isr_io_pin, 833 mgsl_isr_transmit_data, 834 mgsl_isr_transmit_status, 835 mgsl_isr_receive_data, 836 mgsl_isr_receive_status 837}; 838 839/* 840 * ioctl call handlers 841 */ 842static int tiocmget(struct tty_struct *tty, struct file *file); 843static int tiocmset(struct tty_struct *tty, struct file *file, 844 unsigned int set, unsigned int clear); 845static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount 846 __user *user_icount); 847static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params); 848static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params); 849static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode); 850static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode); 851static int mgsl_txenable(struct mgsl_struct * info, int enable); 852static int mgsl_txabort(struct mgsl_struct * info); 853static int mgsl_rxenable(struct mgsl_struct * info, int enable); 854static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask); 855static int mgsl_loopmode_send_done( struct mgsl_struct * info ); 856 857/* set non-zero on successful registration with PCI subsystem */ 858static int pci_registered; 859 860/* 861 * Global linked list of SyncLink devices 862 */ 863static struct mgsl_struct *mgsl_device_list; 864static int mgsl_device_count; 865 866/* 867 * Set this param to non-zero to load eax with the 868 * .text section address and breakpoint on module load. 869 * This is useful for use with gdb and add-symbol-file command. 870 */ 871static int break_on_load; 872 873/* 874 * Driver major number, defaults to zero to get auto 875 * assigned major number. May be forced as module parameter. 876 */ 877static int ttymajor; 878 879/* 880 * Array of user specified options for ISA adapters. 881 */ 882static int io[MAX_ISA_DEVICES]; 883static int irq[MAX_ISA_DEVICES]; 884static int dma[MAX_ISA_DEVICES]; 885static int debug_level; 886static int maxframe[MAX_TOTAL_DEVICES]; 887static int dosyncppp[MAX_TOTAL_DEVICES]; 888static int txdmabufs[MAX_TOTAL_DEVICES]; 889static int txholdbufs[MAX_TOTAL_DEVICES]; 890 891module_param(break_on_load, bool, 0); 892module_param(ttymajor, int, 0); 893module_param_array(io, int, NULL, 0); 894module_param_array(irq, int, NULL, 0); 895module_param_array(dma, int, NULL, 0); 896module_param(debug_level, int, 0); 897module_param_array(maxframe, int, NULL, 0); 898module_param_array(dosyncppp, int, NULL, 0); 899module_param_array(txdmabufs, int, NULL, 0); 900module_param_array(txholdbufs, int, NULL, 0); 901 902static char *driver_name = "SyncLink serial driver"; 903static char *driver_version = "$Revision: 1.1.1.1 $"; 904 905static int synclink_init_one (struct pci_dev *dev, 906 const struct pci_device_id *ent); 907static void synclink_remove_one (struct pci_dev *dev); 908 909static struct pci_device_id synclink_pci_tbl[] = { 910 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, }, 911 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, }, 912 { 0, }, /* terminate list */ 913}; 914MODULE_DEVICE_TABLE(pci, synclink_pci_tbl); 915 916MODULE_LICENSE("GPL"); 917 918static struct pci_driver synclink_pci_driver = { 919 .name = "synclink", 920 .id_table = synclink_pci_tbl, 921 .probe = synclink_init_one, 922 .remove = __devexit_p(synclink_remove_one), 923}; 924 925static struct tty_driver *serial_driver; 926 927/* number of characters left in xmit buffer before we ask for more */ 928#define WAKEUP_CHARS 256 929 930 931static void mgsl_change_params(struct mgsl_struct *info); 932static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout); 933 934/* 935 * 1st function defined in .text section. Calling this function in 936 * init_module() followed by a breakpoint allows a remote debugger 937 * (gdb) to get the .text address for the add-symbol-file command. 938 * This allows remote debugging of dynamically loadable modules. 939 */ 940static void* mgsl_get_text_ptr(void) 941{ 942 return mgsl_get_text_ptr; 943} 944 945static inline int mgsl_paranoia_check(struct mgsl_struct *info, 946 char *name, const char *routine) 947{ 948#ifdef MGSL_PARANOIA_CHECK 949 static const char *badmagic = 950 "Warning: bad magic number for mgsl struct (%s) in %s\n"; 951 static const char *badinfo = 952 "Warning: null mgsl_struct for (%s) in %s\n"; 953 954 if (!info) { 955 printk(badinfo, name, routine); 956 return 1; 957 } 958 if (info->magic != MGSL_MAGIC) { 959 printk(badmagic, name, routine); 960 return 1; 961 } 962#else 963 if (!info) 964 return 1; 965#endif 966 return 0; 967} 968 969/** 970 * line discipline callback wrappers 971 * 972 * The wrappers maintain line discipline references 973 * while calling into the line discipline. 974 * 975 * ldisc_receive_buf - pass receive data to line discipline 976 */ 977 978static void ldisc_receive_buf(struct tty_struct *tty, 979 const __u8 *data, char *flags, int count) 980{ 981 struct tty_ldisc *ld; 982 if (!tty) 983 return; 984 ld = tty_ldisc_ref(tty); 985 if (ld) { 986 if (ld->receive_buf) 987 ld->receive_buf(tty, data, flags, count); 988 tty_ldisc_deref(ld); 989 } 990} 991 992/* mgsl_stop() throttle (stop) transmitter 993 * 994 * Arguments: tty pointer to tty info structure 995 * Return Value: None 996 */ 997static void mgsl_stop(struct tty_struct *tty) 998{ 999 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 1000 unsigned long flags; 1001 1002 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) 1003 return; 1004 1005 if ( debug_level >= DEBUG_LEVEL_INFO ) 1006 printk("mgsl_stop(%s)\n",info->device_name); 1007 1008 spin_lock_irqsave(&info->irq_spinlock,flags); 1009 if (info->tx_enabled) 1010 usc_stop_transmitter(info); 1011 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1012 1013} /* end of mgsl_stop() */ 1014 1015/* mgsl_start() release (start) transmitter 1016 * 1017 * Arguments: tty pointer to tty info structure 1018 * Return Value: None 1019 */ 1020static void mgsl_start(struct tty_struct *tty) 1021{ 1022 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 1023 unsigned long flags; 1024 1025 if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) 1026 return; 1027 1028 if ( debug_level >= DEBUG_LEVEL_INFO ) 1029 printk("mgsl_start(%s)\n",info->device_name); 1030 1031 spin_lock_irqsave(&info->irq_spinlock,flags); 1032 if (!info->tx_enabled) 1033 usc_start_transmitter(info); 1034 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1035 1036} /* end of mgsl_start() */ 1037 1038/* 1039 * Bottom half work queue access functions 1040 */ 1041 1042/* mgsl_bh_action() Return next bottom half action to perform. 1043 * Return Value: BH action code or 0 if nothing to do. 1044 */ 1045static int mgsl_bh_action(struct mgsl_struct *info) 1046{ 1047 unsigned long flags; 1048 int rc = 0; 1049 1050 spin_lock_irqsave(&info->irq_spinlock,flags); 1051 1052 if (info->pending_bh & BH_RECEIVE) { 1053 info->pending_bh &= ~BH_RECEIVE; 1054 rc = BH_RECEIVE; 1055 } else if (info->pending_bh & BH_TRANSMIT) { 1056 info->pending_bh &= ~BH_TRANSMIT; 1057 rc = BH_TRANSMIT; 1058 } else if (info->pending_bh & BH_STATUS) { 1059 info->pending_bh &= ~BH_STATUS; 1060 rc = BH_STATUS; 1061 } 1062 1063 if (!rc) { 1064 /* Mark BH routine as complete */ 1065 info->bh_running = 0; 1066 info->bh_requested = 0; 1067 } 1068 1069 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1070 1071 return rc; 1072} 1073 1074/* 1075 * Perform bottom half processing of work items queued by ISR. 1076 */ 1077static void mgsl_bh_handler(struct work_struct *work) 1078{ 1079 struct mgsl_struct *info = 1080 container_of(work, struct mgsl_struct, task); 1081 int action; 1082 1083 if (!info) 1084 return; 1085 1086 if ( debug_level >= DEBUG_LEVEL_BH ) 1087 printk( "%s(%d):mgsl_bh_handler(%s) entry\n", 1088 __FILE__,__LINE__,info->device_name); 1089 1090 info->bh_running = 1; 1091 1092 while((action = mgsl_bh_action(info)) != 0) { 1093 1094 /* Process work item */ 1095 if ( debug_level >= DEBUG_LEVEL_BH ) 1096 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n", 1097 __FILE__,__LINE__,action); 1098 1099 switch (action) { 1100 1101 case BH_RECEIVE: 1102 mgsl_bh_receive(info); 1103 break; 1104 case BH_TRANSMIT: 1105 mgsl_bh_transmit(info); 1106 break; 1107 case BH_STATUS: 1108 mgsl_bh_status(info); 1109 break; 1110 default: 1111 /* unknown work item ID */ 1112 printk("Unknown work item ID=%08X!\n", action); 1113 break; 1114 } 1115 } 1116 1117 if ( debug_level >= DEBUG_LEVEL_BH ) 1118 printk( "%s(%d):mgsl_bh_handler(%s) exit\n", 1119 __FILE__,__LINE__,info->device_name); 1120} 1121 1122static void mgsl_bh_receive(struct mgsl_struct *info) 1123{ 1124 int (*get_rx_frame)(struct mgsl_struct *info) = 1125 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame); 1126 1127 if ( debug_level >= DEBUG_LEVEL_BH ) 1128 printk( "%s(%d):mgsl_bh_receive(%s)\n", 1129 __FILE__,__LINE__,info->device_name); 1130 1131 do 1132 { 1133 if (info->rx_rcc_underrun) { 1134 unsigned long flags; 1135 spin_lock_irqsave(&info->irq_spinlock,flags); 1136 usc_start_receiver(info); 1137 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1138 return; 1139 } 1140 } while(get_rx_frame(info)); 1141} 1142 1143static void mgsl_bh_transmit(struct mgsl_struct *info) 1144{ 1145 struct tty_struct *tty = info->tty; 1146 unsigned long flags; 1147 1148 if ( debug_level >= DEBUG_LEVEL_BH ) 1149 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n", 1150 __FILE__,__LINE__,info->device_name); 1151 1152 if (tty) 1153 tty_wakeup(tty); 1154 1155 /* if transmitter idle and loopmode_send_done_requested 1156 * then start echoing RxD to TxD 1157 */ 1158 spin_lock_irqsave(&info->irq_spinlock,flags); 1159 if ( !info->tx_active && info->loopmode_send_done_requested ) 1160 usc_loopmode_send_done( info ); 1161 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1162} 1163 1164static void mgsl_bh_status(struct mgsl_struct *info) 1165{ 1166 if ( debug_level >= DEBUG_LEVEL_BH ) 1167 printk( "%s(%d):mgsl_bh_status() entry on %s\n", 1168 __FILE__,__LINE__,info->device_name); 1169 1170 info->ri_chkcount = 0; 1171 info->dsr_chkcount = 0; 1172 info->dcd_chkcount = 0; 1173 info->cts_chkcount = 0; 1174} 1175 1176/* mgsl_isr_receive_status() 1177 * 1178 * Service a receive status interrupt. The type of status 1179 * interrupt is indicated by the state of the RCSR. 1180 * This is only used for HDLC mode. 1181 * 1182 * Arguments: info pointer to device instance data 1183 * Return Value: None 1184 */ 1185static void mgsl_isr_receive_status( struct mgsl_struct *info ) 1186{ 1187 u16 status = usc_InReg( info, RCSR ); 1188 1189 if ( debug_level >= DEBUG_LEVEL_ISR ) 1190 printk("%s(%d):mgsl_isr_receive_status status=%04X\n", 1191 __FILE__,__LINE__,status); 1192 1193 if ( (status & RXSTATUS_ABORT_RECEIVED) && 1194 info->loopmode_insert_requested && 1195 usc_loopmode_active(info) ) 1196 { 1197 ++info->icount.rxabort; 1198 info->loopmode_insert_requested = FALSE; 1199 1200 /* clear CMR:13 to start echoing RxD to TxD */ 1201 info->cmr_value &= ~BIT13; 1202 usc_OutReg(info, CMR, info->cmr_value); 1203 1204 /* disable received abort irq (no longer required) */ 1205 usc_OutReg(info, RICR, 1206 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED)); 1207 } 1208 1209 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) { 1210 if (status & RXSTATUS_EXITED_HUNT) 1211 info->icount.exithunt++; 1212 if (status & RXSTATUS_IDLE_RECEIVED) 1213 info->icount.rxidle++; 1214 wake_up_interruptible(&info->event_wait_q); 1215 } 1216 1217 if (status & RXSTATUS_OVERRUN){ 1218 info->icount.rxover++; 1219 usc_process_rxoverrun_sync( info ); 1220 } 1221 1222 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 1223 usc_UnlatchRxstatusBits( info, status ); 1224 1225} /* end of mgsl_isr_receive_status() */ 1226 1227/* mgsl_isr_transmit_status() 1228 * 1229 * Service a transmit status interrupt 1230 * HDLC mode :end of transmit frame 1231 * Async mode:all data is sent 1232 * transmit status is indicated by bits in the TCSR. 1233 * 1234 * Arguments: info pointer to device instance data 1235 * Return Value: None 1236 */ 1237static void mgsl_isr_transmit_status( struct mgsl_struct *info ) 1238{ 1239 u16 status = usc_InReg( info, TCSR ); 1240 1241 if ( debug_level >= DEBUG_LEVEL_ISR ) 1242 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n", 1243 __FILE__,__LINE__,status); 1244 1245 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 1246 usc_UnlatchTxstatusBits( info, status ); 1247 1248 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) ) 1249 { 1250 /* finished sending HDLC abort. This may leave */ 1251 /* the TxFifo with data from the aborted frame */ 1252 /* so purge the TxFifo. Also shutdown the DMA */ 1253 /* channel in case there is data remaining in */ 1254 /* the DMA buffer */ 1255 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 1256 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 1257 } 1258 1259 if ( status & TXSTATUS_EOF_SENT ) 1260 info->icount.txok++; 1261 else if ( status & TXSTATUS_UNDERRUN ) 1262 info->icount.txunder++; 1263 else if ( status & TXSTATUS_ABORT_SENT ) 1264 info->icount.txabort++; 1265 else 1266 info->icount.txunder++; 1267 1268 info->tx_active = 0; 1269 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1270 del_timer(&info->tx_timer); 1271 1272 if ( info->drop_rts_on_tx_done ) { 1273 usc_get_serial_signals( info ); 1274 if ( info->serial_signals & SerialSignal_RTS ) { 1275 info->serial_signals &= ~SerialSignal_RTS; 1276 usc_set_serial_signals( info ); 1277 } 1278 info->drop_rts_on_tx_done = 0; 1279 } 1280 1281#if SYNCLINK_GENERIC_HDLC 1282 if (info->netcount) 1283 hdlcdev_tx_done(info); 1284 else 1285#endif 1286 { 1287 if (info->tty->stopped || info->tty->hw_stopped) { 1288 usc_stop_transmitter(info); 1289 return; 1290 } 1291 info->pending_bh |= BH_TRANSMIT; 1292 } 1293 1294} /* end of mgsl_isr_transmit_status() */ 1295 1296/* mgsl_isr_io_pin() 1297 * 1298 * Service an Input/Output pin interrupt. The type of 1299 * interrupt is indicated by bits in the MISR 1300 * 1301 * Arguments: info pointer to device instance data 1302 * Return Value: None 1303 */ 1304static void mgsl_isr_io_pin( struct mgsl_struct *info ) 1305{ 1306 struct mgsl_icount *icount; 1307 u16 status = usc_InReg( info, MISR ); 1308 1309 if ( debug_level >= DEBUG_LEVEL_ISR ) 1310 printk("%s(%d):mgsl_isr_io_pin status=%04X\n", 1311 __FILE__,__LINE__,status); 1312 1313 usc_ClearIrqPendingBits( info, IO_PIN ); 1314 usc_UnlatchIostatusBits( info, status ); 1315 1316 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED | 1317 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) { 1318 icount = &info->icount; 1319 /* update input line counters */ 1320 if (status & MISCSTATUS_RI_LATCHED) { 1321 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1322 usc_DisablestatusIrqs(info,SICR_RI); 1323 icount->rng++; 1324 if ( status & MISCSTATUS_RI ) 1325 info->input_signal_events.ri_up++; 1326 else 1327 info->input_signal_events.ri_down++; 1328 } 1329 if (status & MISCSTATUS_DSR_LATCHED) { 1330 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1331 usc_DisablestatusIrqs(info,SICR_DSR); 1332 icount->dsr++; 1333 if ( status & MISCSTATUS_DSR ) 1334 info->input_signal_events.dsr_up++; 1335 else 1336 info->input_signal_events.dsr_down++; 1337 } 1338 if (status & MISCSTATUS_DCD_LATCHED) { 1339 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1340 usc_DisablestatusIrqs(info,SICR_DCD); 1341 icount->dcd++; 1342 if (status & MISCSTATUS_DCD) { 1343 info->input_signal_events.dcd_up++; 1344 } else 1345 info->input_signal_events.dcd_down++; 1346#if SYNCLINK_GENERIC_HDLC 1347 if (info->netcount) { 1348 if (status & MISCSTATUS_DCD) 1349 netif_carrier_on(info->netdev); 1350 else 1351 netif_carrier_off(info->netdev); 1352 } 1353#endif 1354 } 1355 if (status & MISCSTATUS_CTS_LATCHED) 1356 { 1357 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) 1358 usc_DisablestatusIrqs(info,SICR_CTS); 1359 icount->cts++; 1360 if ( status & MISCSTATUS_CTS ) 1361 info->input_signal_events.cts_up++; 1362 else 1363 info->input_signal_events.cts_down++; 1364 } 1365 wake_up_interruptible(&info->status_event_wait_q); 1366 wake_up_interruptible(&info->event_wait_q); 1367 1368 if ( (info->flags & ASYNC_CHECK_CD) && 1369 (status & MISCSTATUS_DCD_LATCHED) ) { 1370 if ( debug_level >= DEBUG_LEVEL_ISR ) 1371 printk("%s CD now %s...", info->device_name, 1372 (status & MISCSTATUS_DCD) ? "on" : "off"); 1373 if (status & MISCSTATUS_DCD) 1374 wake_up_interruptible(&info->open_wait); 1375 else { 1376 if ( debug_level >= DEBUG_LEVEL_ISR ) 1377 printk("doing serial hangup..."); 1378 if (info->tty) 1379 tty_hangup(info->tty); 1380 } 1381 } 1382 1383 if ( (info->flags & ASYNC_CTS_FLOW) && 1384 (status & MISCSTATUS_CTS_LATCHED) ) { 1385 if (info->tty->hw_stopped) { 1386 if (status & MISCSTATUS_CTS) { 1387 if ( debug_level >= DEBUG_LEVEL_ISR ) 1388 printk("CTS tx start..."); 1389 if (info->tty) 1390 info->tty->hw_stopped = 0; 1391 usc_start_transmitter(info); 1392 info->pending_bh |= BH_TRANSMIT; 1393 return; 1394 } 1395 } else { 1396 if (!(status & MISCSTATUS_CTS)) { 1397 if ( debug_level >= DEBUG_LEVEL_ISR ) 1398 printk("CTS tx stop..."); 1399 if (info->tty) 1400 info->tty->hw_stopped = 1; 1401 usc_stop_transmitter(info); 1402 } 1403 } 1404 } 1405 } 1406 1407 info->pending_bh |= BH_STATUS; 1408 1409 /* for diagnostics set IRQ flag */ 1410 if ( status & MISCSTATUS_TXC_LATCHED ){ 1411 usc_OutReg( info, SICR, 1412 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) ); 1413 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED ); 1414 info->irq_occurred = 1; 1415 } 1416 1417} /* end of mgsl_isr_io_pin() */ 1418 1419/* mgsl_isr_transmit_data() 1420 * 1421 * Service a transmit data interrupt (async mode only). 1422 * 1423 * Arguments: info pointer to device instance data 1424 * Return Value: None 1425 */ 1426static void mgsl_isr_transmit_data( struct mgsl_struct *info ) 1427{ 1428 if ( debug_level >= DEBUG_LEVEL_ISR ) 1429 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n", 1430 __FILE__,__LINE__,info->xmit_cnt); 1431 1432 usc_ClearIrqPendingBits( info, TRANSMIT_DATA ); 1433 1434 if (info->tty->stopped || info->tty->hw_stopped) { 1435 usc_stop_transmitter(info); 1436 return; 1437 } 1438 1439 if ( info->xmit_cnt ) 1440 usc_load_txfifo( info ); 1441 else 1442 info->tx_active = 0; 1443 1444 if (info->xmit_cnt < WAKEUP_CHARS) 1445 info->pending_bh |= BH_TRANSMIT; 1446 1447} /* end of mgsl_isr_transmit_data() */ 1448 1449/* mgsl_isr_receive_data() 1450 * 1451 * Service a receive data interrupt. This occurs 1452 * when operating in asynchronous interrupt transfer mode. 1453 * The receive data FIFO is flushed to the receive data buffers. 1454 * 1455 * Arguments: info pointer to device instance data 1456 * Return Value: None 1457 */ 1458static void mgsl_isr_receive_data( struct mgsl_struct *info ) 1459{ 1460 int Fifocount; 1461 u16 status; 1462 int work = 0; 1463 unsigned char DataByte; 1464 struct tty_struct *tty = info->tty; 1465 struct mgsl_icount *icount = &info->icount; 1466 1467 if ( debug_level >= DEBUG_LEVEL_ISR ) 1468 printk("%s(%d):mgsl_isr_receive_data\n", 1469 __FILE__,__LINE__); 1470 1471 usc_ClearIrqPendingBits( info, RECEIVE_DATA ); 1472 1473 /* select FIFO status for RICR readback */ 1474 usc_RCmd( info, RCmd_SelectRicrRxFifostatus ); 1475 1476 /* clear the Wordstatus bit so that status readback */ 1477 /* only reflects the status of this byte */ 1478 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 )); 1479 1480 /* flush the receive FIFO */ 1481 1482 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) { 1483 int flag; 1484 1485 /* read one byte from RxFIFO */ 1486 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY), 1487 info->io_base + CCAR ); 1488 DataByte = inb( info->io_base + CCAR ); 1489 1490 /* get the status of the received byte */ 1491 status = usc_InReg(info, RCSR); 1492 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1493 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) 1494 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 1495 1496 icount->rx++; 1497 1498 flag = 0; 1499 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR + 1500 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) { 1501 printk("rxerr=%04X\n",status); 1502 /* update error statistics */ 1503 if ( status & RXSTATUS_BREAK_RECEIVED ) { 1504 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR); 1505 icount->brk++; 1506 } else if (status & RXSTATUS_PARITY_ERROR) 1507 icount->parity++; 1508 else if (status & RXSTATUS_FRAMING_ERROR) 1509 icount->frame++; 1510 else if (status & RXSTATUS_OVERRUN) { 1511 /* must issue purge fifo cmd before */ 1512 /* 16C32 accepts more receive chars */ 1513 usc_RTCmd(info,RTCmd_PurgeRxFifo); 1514 icount->overrun++; 1515 } 1516 1517 /* discard char if tty control flags say so */ 1518 if (status & info->ignore_status_mask) 1519 continue; 1520 1521 status &= info->read_status_mask; 1522 1523 if (status & RXSTATUS_BREAK_RECEIVED) { 1524 flag = TTY_BREAK; 1525 if (info->flags & ASYNC_SAK) 1526 do_SAK(tty); 1527 } else if (status & RXSTATUS_PARITY_ERROR) 1528 flag = TTY_PARITY; 1529 else if (status & RXSTATUS_FRAMING_ERROR) 1530 flag = TTY_FRAME; 1531 } /* end of if (error) */ 1532 tty_insert_flip_char(tty, DataByte, flag); 1533 if (status & RXSTATUS_OVERRUN) { 1534 /* Overrun is special, since it's 1535 * reported immediately, and doesn't 1536 * affect the current character 1537 */ 1538 work += tty_insert_flip_char(tty, 0, TTY_OVERRUN); 1539 } 1540 } 1541 1542 if ( debug_level >= DEBUG_LEVEL_ISR ) { 1543 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", 1544 __FILE__,__LINE__,icount->rx,icount->brk, 1545 icount->parity,icount->frame,icount->overrun); 1546 } 1547 1548 if(work) 1549 tty_flip_buffer_push(tty); 1550} 1551 1552/* mgsl_isr_misc() 1553 * 1554 * Service a miscellaneos interrupt source. 1555 * 1556 * Arguments: info pointer to device extension (instance data) 1557 * Return Value: None 1558 */ 1559static void mgsl_isr_misc( struct mgsl_struct *info ) 1560{ 1561 u16 status = usc_InReg( info, MISR ); 1562 1563 if ( debug_level >= DEBUG_LEVEL_ISR ) 1564 printk("%s(%d):mgsl_isr_misc status=%04X\n", 1565 __FILE__,__LINE__,status); 1566 1567 if ((status & MISCSTATUS_RCC_UNDERRUN) && 1568 (info->params.mode == MGSL_MODE_HDLC)) { 1569 1570 /* turn off receiver and rx DMA */ 1571 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 1572 usc_DmaCmd(info, DmaCmd_ResetRxChannel); 1573 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 1574 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 1575 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS); 1576 1577 /* schedule BH handler to restart receiver */ 1578 info->pending_bh |= BH_RECEIVE; 1579 info->rx_rcc_underrun = 1; 1580 } 1581 1582 usc_ClearIrqPendingBits( info, MISC ); 1583 usc_UnlatchMiscstatusBits( info, status ); 1584 1585} /* end of mgsl_isr_misc() */ 1586 1587/* mgsl_isr_null() 1588 * 1589 * Services undefined interrupt vectors from the 1590 * USC. (hence this function SHOULD never be called) 1591 * 1592 * Arguments: info pointer to device extension (instance data) 1593 * Return Value: None 1594 */ 1595static void mgsl_isr_null( struct mgsl_struct *info ) 1596{ 1597 1598} /* end of mgsl_isr_null() */ 1599 1600/* mgsl_isr_receive_dma() 1601 * 1602 * Service a receive DMA channel interrupt. 1603 * For this driver there are two sources of receive DMA interrupts 1604 * as identified in the Receive DMA mode Register (RDMR): 1605 * 1606 * BIT3 EOA/EOL End of List, all receive buffers in receive 1607 * buffer list have been filled (no more free buffers 1608 * available). The DMA controller has shut down. 1609 * 1610 * BIT2 EOB End of Buffer. This interrupt occurs when a receive 1611 * DMA buffer is terminated in response to completion 1612 * of a good frame or a frame with errors. The status 1613 * of the frame is stored in the buffer entry in the 1614 * list of receive buffer entries. 1615 * 1616 * Arguments: info pointer to device instance data 1617 * Return Value: None 1618 */ 1619static void mgsl_isr_receive_dma( struct mgsl_struct *info ) 1620{ 1621 u16 status; 1622 1623 /* clear interrupt pending and IUS bit for Rx DMA IRQ */ 1624 usc_OutDmaReg( info, CDIR, BIT9+BIT1 ); 1625 1626 /* Read the receive DMA status to identify interrupt type. */ 1627 /* This also clears the status bits. */ 1628 status = usc_InDmaReg( info, RDMR ); 1629 1630 if ( debug_level >= DEBUG_LEVEL_ISR ) 1631 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n", 1632 __FILE__,__LINE__,info->device_name,status); 1633 1634 info->pending_bh |= BH_RECEIVE; 1635 1636 if ( status & BIT3 ) { 1637 info->rx_overflow = 1; 1638 info->icount.buf_overrun++; 1639 } 1640 1641} /* end of mgsl_isr_receive_dma() */ 1642 1643/* mgsl_isr_transmit_dma() 1644 * 1645 * This function services a transmit DMA channel interrupt. 1646 * 1647 * For this driver there is one source of transmit DMA interrupts 1648 * as identified in the Transmit DMA Mode Register (TDMR): 1649 * 1650 * BIT2 EOB End of Buffer. This interrupt occurs when a 1651 * transmit DMA buffer has been emptied. 1652 * 1653 * The driver maintains enough transmit DMA buffers to hold at least 1654 * one max frame size transmit frame. When operating in a buffered 1655 * transmit mode, there may be enough transmit DMA buffers to hold at 1656 * least two or more max frame size frames. On an EOB condition, 1657 * determine if there are any queued transmit buffers and copy into 1658 * transmit DMA buffers if we have room. 1659 * 1660 * Arguments: info pointer to device instance data 1661 * Return Value: None 1662 */ 1663static void mgsl_isr_transmit_dma( struct mgsl_struct *info ) 1664{ 1665 u16 status; 1666 1667 /* clear interrupt pending and IUS bit for Tx DMA IRQ */ 1668 usc_OutDmaReg(info, CDIR, BIT8+BIT0 ); 1669 1670 /* Read the transmit DMA status to identify interrupt type. */ 1671 /* This also clears the status bits. */ 1672 1673 status = usc_InDmaReg( info, TDMR ); 1674 1675 if ( debug_level >= DEBUG_LEVEL_ISR ) 1676 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n", 1677 __FILE__,__LINE__,info->device_name,status); 1678 1679 if ( status & BIT2 ) { 1680 --info->tx_dma_buffers_used; 1681 1682 /* if there are transmit frames queued, 1683 * try to load the next one 1684 */ 1685 if ( load_next_tx_holding_buffer(info) ) { 1686 /* if call returns non-zero value, we have 1687 * at least one free tx holding buffer 1688 */ 1689 info->pending_bh |= BH_TRANSMIT; 1690 } 1691 } 1692 1693} /* end of mgsl_isr_transmit_dma() */ 1694 1695/* mgsl_interrupt() 1696 * 1697 * Interrupt service routine entry point. 1698 * 1699 * Arguments: 1700 * 1701 * irq interrupt number that caused interrupt 1702 * dev_id device ID supplied during interrupt registration 1703 * 1704 * Return Value: None 1705 */ 1706static irqreturn_t mgsl_interrupt(int irq, void *dev_id) 1707{ 1708 struct mgsl_struct * info; 1709 u16 UscVector; 1710 u16 DmaVector; 1711 1712 if ( debug_level >= DEBUG_LEVEL_ISR ) 1713 printk("%s(%d):mgsl_interrupt(%d)entry.\n", 1714 __FILE__,__LINE__,irq); 1715 1716 info = (struct mgsl_struct *)dev_id; 1717 if (!info) 1718 return IRQ_NONE; 1719 1720 spin_lock(&info->irq_spinlock); 1721 1722 for(;;) { 1723 /* Read the interrupt vectors from hardware. */ 1724 UscVector = usc_InReg(info, IVR) >> 9; 1725 DmaVector = usc_InDmaReg(info, DIVR); 1726 1727 if ( debug_level >= DEBUG_LEVEL_ISR ) 1728 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n", 1729 __FILE__,__LINE__,info->device_name,UscVector,DmaVector); 1730 1731 if ( !UscVector && !DmaVector ) 1732 break; 1733 1734 /* Dispatch interrupt vector */ 1735 if ( UscVector ) 1736 (*UscIsrTable[UscVector])(info); 1737 else if ( (DmaVector&(BIT10|BIT9)) == BIT10) 1738 mgsl_isr_transmit_dma(info); 1739 else 1740 mgsl_isr_receive_dma(info); 1741 1742 if ( info->isr_overflow ) { 1743 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n", 1744 __FILE__,__LINE__,info->device_name, irq); 1745 usc_DisableMasterIrqBit(info); 1746 usc_DisableDmaInterrupts(info,DICR_MASTER); 1747 break; 1748 } 1749 } 1750 1751 /* Request bottom half processing if there's something 1752 * for it to do and the bh is not already running 1753 */ 1754 1755 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) { 1756 if ( debug_level >= DEBUG_LEVEL_ISR ) 1757 printk("%s(%d):%s queueing bh task.\n", 1758 __FILE__,__LINE__,info->device_name); 1759 schedule_work(&info->task); 1760 info->bh_requested = 1; 1761 } 1762 1763 spin_unlock(&info->irq_spinlock); 1764 1765 if ( debug_level >= DEBUG_LEVEL_ISR ) 1766 printk("%s(%d):mgsl_interrupt(%d)exit.\n", 1767 __FILE__,__LINE__,irq); 1768 return IRQ_HANDLED; 1769} /* end of mgsl_interrupt() */ 1770 1771/* startup() 1772 * 1773 * Initialize and start device. 1774 * 1775 * Arguments: info pointer to device instance data 1776 * Return Value: 0 if success, otherwise error code 1777 */ 1778static int startup(struct mgsl_struct * info) 1779{ 1780 int retval = 0; 1781 1782 if ( debug_level >= DEBUG_LEVEL_INFO ) 1783 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name); 1784 1785 if (info->flags & ASYNC_INITIALIZED) 1786 return 0; 1787 1788 if (!info->xmit_buf) { 1789 /* allocate a page of memory for a transmit buffer */ 1790 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); 1791 if (!info->xmit_buf) { 1792 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", 1793 __FILE__,__LINE__,info->device_name); 1794 return -ENOMEM; 1795 } 1796 } 1797 1798 info->pending_bh = 0; 1799 1800 memset(&info->icount, 0, sizeof(info->icount)); 1801 1802 setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info); 1803 1804 /* Allocate and claim adapter resources */ 1805 retval = mgsl_claim_resources(info); 1806 1807 /* perform existence check and diagnostics */ 1808 if ( !retval ) 1809 retval = mgsl_adapter_test(info); 1810 1811 if ( retval ) { 1812 if (capable(CAP_SYS_ADMIN) && info->tty) 1813 set_bit(TTY_IO_ERROR, &info->tty->flags); 1814 mgsl_release_resources(info); 1815 return retval; 1816 } 1817 1818 /* program hardware for current parameters */ 1819 mgsl_change_params(info); 1820 1821 if (info->tty) 1822 clear_bit(TTY_IO_ERROR, &info->tty->flags); 1823 1824 info->flags |= ASYNC_INITIALIZED; 1825 1826 return 0; 1827 1828} /* end of startup() */ 1829 1830/* shutdown() 1831 * 1832 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware 1833 * 1834 * Arguments: info pointer to device instance data 1835 * Return Value: None 1836 */ 1837static void shutdown(struct mgsl_struct * info) 1838{ 1839 unsigned long flags; 1840 1841 if (!(info->flags & ASYNC_INITIALIZED)) 1842 return; 1843 1844 if (debug_level >= DEBUG_LEVEL_INFO) 1845 printk("%s(%d):mgsl_shutdown(%s)\n", 1846 __FILE__,__LINE__, info->device_name ); 1847 1848 /* clear status wait queue because status changes */ 1849 /* can't happen after shutting down the hardware */ 1850 wake_up_interruptible(&info->status_event_wait_q); 1851 wake_up_interruptible(&info->event_wait_q); 1852 1853 del_timer_sync(&info->tx_timer); 1854 1855 if (info->xmit_buf) { 1856 free_page((unsigned long) info->xmit_buf); 1857 info->xmit_buf = NULL; 1858 } 1859 1860 spin_lock_irqsave(&info->irq_spinlock,flags); 1861 usc_DisableMasterIrqBit(info); 1862 usc_stop_receiver(info); 1863 usc_stop_transmitter(info); 1864 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS + 1865 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC ); 1866 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE); 1867 1868 /* Disable DMAEN (Port 7, Bit 14) */ 1869 /* This disconnects the DMA request signal from the ISA bus */ 1870 /* on the ISA adapter. This has no effect for the PCI adapter */ 1871 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14)); 1872 1873 /* Disable INTEN (Port 6, Bit12) */ 1874 /* This disconnects the IRQ request signal to the ISA bus */ 1875 /* on the ISA adapter. This has no effect for the PCI adapter */ 1876 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12)); 1877 1878 if (!info->tty || info->tty->termios->c_cflag & HUPCL) { 1879 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); 1880 usc_set_serial_signals(info); 1881 } 1882 1883 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1884 1885 mgsl_release_resources(info); 1886 1887 if (info->tty) 1888 set_bit(TTY_IO_ERROR, &info->tty->flags); 1889 1890 info->flags &= ~ASYNC_INITIALIZED; 1891 1892} /* end of shutdown() */ 1893 1894static void mgsl_program_hw(struct mgsl_struct *info) 1895{ 1896 unsigned long flags; 1897 1898 spin_lock_irqsave(&info->irq_spinlock,flags); 1899 1900 usc_stop_receiver(info); 1901 usc_stop_transmitter(info); 1902 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 1903 1904 if (info->params.mode == MGSL_MODE_HDLC || 1905 info->params.mode == MGSL_MODE_RAW || 1906 info->netcount) 1907 usc_set_sync_mode(info); 1908 else 1909 usc_set_async_mode(info); 1910 1911 usc_set_serial_signals(info); 1912 1913 info->dcd_chkcount = 0; 1914 info->cts_chkcount = 0; 1915 info->ri_chkcount = 0; 1916 info->dsr_chkcount = 0; 1917 1918 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI); 1919 usc_EnableInterrupts(info, IO_PIN); 1920 usc_get_serial_signals(info); 1921 1922 if (info->netcount || info->tty->termios->c_cflag & CREAD) 1923 usc_start_receiver(info); 1924 1925 spin_unlock_irqrestore(&info->irq_spinlock,flags); 1926} 1927 1928/* Reconfigure adapter based on new parameters 1929 */ 1930static void mgsl_change_params(struct mgsl_struct *info) 1931{ 1932 unsigned cflag; 1933 int bits_per_char; 1934 1935 if (!info->tty || !info->tty->termios) 1936 return; 1937 1938 if (debug_level >= DEBUG_LEVEL_INFO) 1939 printk("%s(%d):mgsl_change_params(%s)\n", 1940 __FILE__,__LINE__, info->device_name ); 1941 1942 cflag = info->tty->termios->c_cflag; 1943 1944 /* if B0 rate (hangup) specified then negate DTR and RTS */ 1945 /* otherwise assert DTR and RTS */ 1946 if (cflag & CBAUD) 1947 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 1948 else 1949 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 1950 1951 /* byte size and parity */ 1952 1953 switch (cflag & CSIZE) { 1954 case CS5: info->params.data_bits = 5; break; 1955 case CS6: info->params.data_bits = 6; break; 1956 case CS7: info->params.data_bits = 7; break; 1957 case CS8: info->params.data_bits = 8; break; 1958 /* Never happens, but GCC is too dumb to figure it out */ 1959 default: info->params.data_bits = 7; break; 1960 } 1961 1962 if (cflag & CSTOPB) 1963 info->params.stop_bits = 2; 1964 else 1965 info->params.stop_bits = 1; 1966 1967 info->params.parity = ASYNC_PARITY_NONE; 1968 if (cflag & PARENB) { 1969 if (cflag & PARODD) 1970 info->params.parity = ASYNC_PARITY_ODD; 1971 else 1972 info->params.parity = ASYNC_PARITY_EVEN; 1973#ifdef CMSPAR 1974 if (cflag & CMSPAR) 1975 info->params.parity = ASYNC_PARITY_SPACE; 1976#endif 1977 } 1978 1979 /* calculate number of jiffies to transmit a full 1980 * FIFO (32 bytes) at specified data rate 1981 */ 1982 bits_per_char = info->params.data_bits + 1983 info->params.stop_bits + 1; 1984 1985 /* if port data rate is set to 460800 or less then 1986 * allow tty settings to override, otherwise keep the 1987 * current data rate. 1988 */ 1989 if (info->params.data_rate <= 460800) 1990 info->params.data_rate = tty_get_baud_rate(info->tty); 1991 1992 if ( info->params.data_rate ) { 1993 info->timeout = (32*HZ*bits_per_char) / 1994 info->params.data_rate; 1995 } 1996 info->timeout += HZ/50; /* Add .02 seconds of slop */ 1997 1998 if (cflag & CRTSCTS) 1999 info->flags |= ASYNC_CTS_FLOW; 2000 else 2001 info->flags &= ~ASYNC_CTS_FLOW; 2002 2003 if (cflag & CLOCAL) 2004 info->flags &= ~ASYNC_CHECK_CD; 2005 else 2006 info->flags |= ASYNC_CHECK_CD; 2007 2008 /* process tty input control flags */ 2009 2010 info->read_status_mask = RXSTATUS_OVERRUN; 2011 if (I_INPCK(info->tty)) 2012 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 2013 if (I_BRKINT(info->tty) || I_PARMRK(info->tty)) 2014 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED; 2015 2016 if (I_IGNPAR(info->tty)) 2017 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR; 2018 if (I_IGNBRK(info->tty)) { 2019 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED; 2020 /* If ignoring parity and break indicators, ignore 2021 * overruns too. (For real raw support). 2022 */ 2023 if (I_IGNPAR(info->tty)) 2024 info->ignore_status_mask |= RXSTATUS_OVERRUN; 2025 } 2026 2027 mgsl_program_hw(info); 2028 2029} /* end of mgsl_change_params() */ 2030 2031/* mgsl_put_char() 2032 * 2033 * Add a character to the transmit buffer. 2034 * 2035 * Arguments: tty pointer to tty information structure 2036 * ch character to add to transmit buffer 2037 * 2038 * Return Value: None 2039 */ 2040static void mgsl_put_char(struct tty_struct *tty, unsigned char ch) 2041{ 2042 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2043 unsigned long flags; 2044 2045 if ( debug_level >= DEBUG_LEVEL_INFO ) { 2046 printk( "%s(%d):mgsl_put_char(%d) on %s\n", 2047 __FILE__,__LINE__,ch,info->device_name); 2048 } 2049 2050 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) 2051 return; 2052 2053 if (!tty || !info->xmit_buf) 2054 return; 2055 2056 spin_lock_irqsave(&info->irq_spinlock,flags); 2057 2058 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) { 2059 2060 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { 2061 info->xmit_buf[info->xmit_head++] = ch; 2062 info->xmit_head &= SERIAL_XMIT_SIZE-1; 2063 info->xmit_cnt++; 2064 } 2065 } 2066 2067 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2068 2069} /* end of mgsl_put_char() */ 2070 2071/* mgsl_flush_chars() 2072 * 2073 * Enable transmitter so remaining characters in the 2074 * transmit buffer are sent. 2075 * 2076 * Arguments: tty pointer to tty information structure 2077 * Return Value: None 2078 */ 2079static void mgsl_flush_chars(struct tty_struct *tty) 2080{ 2081 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2082 unsigned long flags; 2083 2084 if ( debug_level >= DEBUG_LEVEL_INFO ) 2085 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n", 2086 __FILE__,__LINE__,info->device_name,info->xmit_cnt); 2087 2088 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars")) 2089 return; 2090 2091 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || 2092 !info->xmit_buf) 2093 return; 2094 2095 if ( debug_level >= DEBUG_LEVEL_INFO ) 2096 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n", 2097 __FILE__,__LINE__,info->device_name ); 2098 2099 spin_lock_irqsave(&info->irq_spinlock,flags); 2100 2101 if (!info->tx_active) { 2102 if ( (info->params.mode == MGSL_MODE_HDLC || 2103 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) { 2104 /* operating in synchronous (frame oriented) mode */ 2105 /* copy data from circular xmit_buf to */ 2106 /* transmit DMA buffer. */ 2107 mgsl_load_tx_dma_buffer(info, 2108 info->xmit_buf,info->xmit_cnt); 2109 } 2110 usc_start_transmitter(info); 2111 } 2112 2113 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2114 2115} /* end of mgsl_flush_chars() */ 2116 2117/* mgsl_write() 2118 * 2119 * Send a block of data 2120 * 2121 * Arguments: 2122 * 2123 * tty pointer to tty information structure 2124 * buf pointer to buffer containing send data 2125 * count size of send data in bytes 2126 * 2127 * Return Value: number of characters written 2128 */ 2129static int mgsl_write(struct tty_struct * tty, 2130 const unsigned char *buf, int count) 2131{ 2132 int c, ret = 0; 2133 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2134 unsigned long flags; 2135 2136 if ( debug_level >= DEBUG_LEVEL_INFO ) 2137 printk( "%s(%d):mgsl_write(%s) count=%d\n", 2138 __FILE__,__LINE__,info->device_name,count); 2139 2140 if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) 2141 goto cleanup; 2142 2143 if (!tty || !info->xmit_buf) 2144 goto cleanup; 2145 2146 if ( info->params.mode == MGSL_MODE_HDLC || 2147 info->params.mode == MGSL_MODE_RAW ) { 2148 /* operating in synchronous (frame oriented) mode */ 2149 /* operating in synchronous (frame oriented) mode */ 2150 if (info->tx_active) { 2151 2152 if ( info->params.mode == MGSL_MODE_HDLC ) { 2153 ret = 0; 2154 goto cleanup; 2155 } 2156 /* transmitter is actively sending data - 2157 * if we have multiple transmit dma and 2158 * holding buffers, attempt to queue this 2159 * frame for transmission at a later time. 2160 */ 2161 if (info->tx_holding_count >= info->num_tx_holding_buffers ) { 2162 /* no tx holding buffers available */ 2163 ret = 0; 2164 goto cleanup; 2165 } 2166 2167 /* queue transmit frame request */ 2168 ret = count; 2169 save_tx_buffer_request(info,buf,count); 2170 2171 /* if we have sufficient tx dma buffers, 2172 * load the next buffered tx request 2173 */ 2174 spin_lock_irqsave(&info->irq_spinlock,flags); 2175 load_next_tx_holding_buffer(info); 2176 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2177 goto cleanup; 2178 } 2179 2180 /* if operating in HDLC LoopMode and the adapter */ 2181 /* has yet to be inserted into the loop, we can't */ 2182 /* transmit */ 2183 2184 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) && 2185 !usc_loopmode_active(info) ) 2186 { 2187 ret = 0; 2188 goto cleanup; 2189 } 2190 2191 if ( info->xmit_cnt ) { 2192 /* Send accumulated from send_char() calls */ 2193 /* as frame and wait before accepting more data. */ 2194 ret = 0; 2195 2196 /* copy data from circular xmit_buf to */ 2197 /* transmit DMA buffer. */ 2198 mgsl_load_tx_dma_buffer(info, 2199 info->xmit_buf,info->xmit_cnt); 2200 if ( debug_level >= DEBUG_LEVEL_INFO ) 2201 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n", 2202 __FILE__,__LINE__,info->device_name); 2203 } else { 2204 if ( debug_level >= DEBUG_LEVEL_INFO ) 2205 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n", 2206 __FILE__,__LINE__,info->device_name); 2207 ret = count; 2208 info->xmit_cnt = count; 2209 mgsl_load_tx_dma_buffer(info,buf,count); 2210 } 2211 } else { 2212 while (1) { 2213 spin_lock_irqsave(&info->irq_spinlock,flags); 2214 c = min_t(int, count, 2215 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, 2216 SERIAL_XMIT_SIZE - info->xmit_head)); 2217 if (c <= 0) { 2218 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2219 break; 2220 } 2221 memcpy(info->xmit_buf + info->xmit_head, buf, c); 2222 info->xmit_head = ((info->xmit_head + c) & 2223 (SERIAL_XMIT_SIZE-1)); 2224 info->xmit_cnt += c; 2225 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2226 buf += c; 2227 count -= c; 2228 ret += c; 2229 } 2230 } 2231 2232 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { 2233 spin_lock_irqsave(&info->irq_spinlock,flags); 2234 if (!info->tx_active) 2235 usc_start_transmitter(info); 2236 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2237 } 2238cleanup: 2239 if ( debug_level >= DEBUG_LEVEL_INFO ) 2240 printk( "%s(%d):mgsl_write(%s) returning=%d\n", 2241 __FILE__,__LINE__,info->device_name,ret); 2242 2243 return ret; 2244 2245} /* end of mgsl_write() */ 2246 2247/* mgsl_write_room() 2248 * 2249 * Return the count of free bytes in transmit buffer 2250 * 2251 * Arguments: tty pointer to tty info structure 2252 * Return Value: None 2253 */ 2254static int mgsl_write_room(struct tty_struct *tty) 2255{ 2256 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2257 int ret; 2258 2259 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) 2260 return 0; 2261 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; 2262 if (ret < 0) 2263 ret = 0; 2264 2265 if (debug_level >= DEBUG_LEVEL_INFO) 2266 printk("%s(%d):mgsl_write_room(%s)=%d\n", 2267 __FILE__,__LINE__, info->device_name,ret ); 2268 2269 if ( info->params.mode == MGSL_MODE_HDLC || 2270 info->params.mode == MGSL_MODE_RAW ) { 2271 /* operating in synchronous (frame oriented) mode */ 2272 if ( info->tx_active ) 2273 return 0; 2274 else 2275 return HDLC_MAX_FRAME_SIZE; 2276 } 2277 2278 return ret; 2279 2280} /* end of mgsl_write_room() */ 2281 2282/* mgsl_chars_in_buffer() 2283 * 2284 * Return the count of bytes in transmit buffer 2285 * 2286 * Arguments: tty pointer to tty info structure 2287 * Return Value: None 2288 */ 2289static int mgsl_chars_in_buffer(struct tty_struct *tty) 2290{ 2291 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2292 2293 if (debug_level >= DEBUG_LEVEL_INFO) 2294 printk("%s(%d):mgsl_chars_in_buffer(%s)\n", 2295 __FILE__,__LINE__, info->device_name ); 2296 2297 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer")) 2298 return 0; 2299 2300 if (debug_level >= DEBUG_LEVEL_INFO) 2301 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n", 2302 __FILE__,__LINE__, info->device_name,info->xmit_cnt ); 2303 2304 if ( info->params.mode == MGSL_MODE_HDLC || 2305 info->params.mode == MGSL_MODE_RAW ) { 2306 /* operating in synchronous (frame oriented) mode */ 2307 if ( info->tx_active ) 2308 return info->max_frame_size; 2309 else 2310 return 0; 2311 } 2312 2313 return info->xmit_cnt; 2314} /* end of mgsl_chars_in_buffer() */ 2315 2316/* mgsl_flush_buffer() 2317 * 2318 * Discard all data in the send buffer 2319 * 2320 * Arguments: tty pointer to tty info structure 2321 * Return Value: None 2322 */ 2323static void mgsl_flush_buffer(struct tty_struct *tty) 2324{ 2325 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2326 unsigned long flags; 2327 2328 if (debug_level >= DEBUG_LEVEL_INFO) 2329 printk("%s(%d):mgsl_flush_buffer(%s) entry\n", 2330 __FILE__,__LINE__, info->device_name ); 2331 2332 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer")) 2333 return; 2334 2335 spin_lock_irqsave(&info->irq_spinlock,flags); 2336 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 2337 del_timer(&info->tx_timer); 2338 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2339 2340 tty_wakeup(tty); 2341} 2342 2343/* mgsl_send_xchar() 2344 * 2345 * Send a high-priority XON/XOFF character 2346 * 2347 * Arguments: tty pointer to tty info structure 2348 * ch character to send 2349 * Return Value: None 2350 */ 2351static void mgsl_send_xchar(struct tty_struct *tty, char ch) 2352{ 2353 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2354 unsigned long flags; 2355 2356 if (debug_level >= DEBUG_LEVEL_INFO) 2357 printk("%s(%d):mgsl_send_xchar(%s,%d)\n", 2358 __FILE__,__LINE__, info->device_name, ch ); 2359 2360 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar")) 2361 return; 2362 2363 info->x_char = ch; 2364 if (ch) { 2365 /* Make sure transmit interrupts are on */ 2366 spin_lock_irqsave(&info->irq_spinlock,flags); 2367 if (!info->tx_enabled) 2368 usc_start_transmitter(info); 2369 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2370 } 2371} /* end of mgsl_send_xchar() */ 2372 2373/* mgsl_throttle() 2374 * 2375 * Signal remote device to throttle send data (our receive data) 2376 * 2377 * Arguments: tty pointer to tty info structure 2378 * Return Value: None 2379 */ 2380static void mgsl_throttle(struct tty_struct * tty) 2381{ 2382 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2383 unsigned long flags; 2384 2385 if (debug_level >= DEBUG_LEVEL_INFO) 2386 printk("%s(%d):mgsl_throttle(%s) entry\n", 2387 __FILE__,__LINE__, info->device_name ); 2388 2389 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle")) 2390 return; 2391 2392 if (I_IXOFF(tty)) 2393 mgsl_send_xchar(tty, STOP_CHAR(tty)); 2394 2395 if (tty->termios->c_cflag & CRTSCTS) { 2396 spin_lock_irqsave(&info->irq_spinlock,flags); 2397 info->serial_signals &= ~SerialSignal_RTS; 2398 usc_set_serial_signals(info); 2399 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2400 } 2401} /* end of mgsl_throttle() */ 2402 2403/* mgsl_unthrottle() 2404 * 2405 * Signal remote device to stop throttling send data (our receive data) 2406 * 2407 * Arguments: tty pointer to tty info structure 2408 * Return Value: None 2409 */ 2410static void mgsl_unthrottle(struct tty_struct * tty) 2411{ 2412 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2413 unsigned long flags; 2414 2415 if (debug_level >= DEBUG_LEVEL_INFO) 2416 printk("%s(%d):mgsl_unthrottle(%s) entry\n", 2417 __FILE__,__LINE__, info->device_name ); 2418 2419 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle")) 2420 return; 2421 2422 if (I_IXOFF(tty)) { 2423 if (info->x_char) 2424 info->x_char = 0; 2425 else 2426 mgsl_send_xchar(tty, START_CHAR(tty)); 2427 } 2428 2429 if (tty->termios->c_cflag & CRTSCTS) { 2430 spin_lock_irqsave(&info->irq_spinlock,flags); 2431 info->serial_signals |= SerialSignal_RTS; 2432 usc_set_serial_signals(info); 2433 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2434 } 2435 2436} /* end of mgsl_unthrottle() */ 2437 2438/* mgsl_get_stats() 2439 * 2440 * get the current serial parameters information 2441 * 2442 * Arguments: info pointer to device instance data 2443 * user_icount pointer to buffer to hold returned stats 2444 * 2445 * Return Value: 0 if success, otherwise error code 2446 */ 2447static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount) 2448{ 2449 int err; 2450 2451 if (debug_level >= DEBUG_LEVEL_INFO) 2452 printk("%s(%d):mgsl_get_params(%s)\n", 2453 __FILE__,__LINE__, info->device_name); 2454 2455 if (!user_icount) { 2456 memset(&info->icount, 0, sizeof(info->icount)); 2457 } else { 2458 COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); 2459 if (err) 2460 return -EFAULT; 2461 } 2462 2463 return 0; 2464 2465} /* end of mgsl_get_stats() */ 2466 2467/* mgsl_get_params() 2468 * 2469 * get the current serial parameters information 2470 * 2471 * Arguments: info pointer to device instance data 2472 * user_params pointer to buffer to hold returned params 2473 * 2474 * Return Value: 0 if success, otherwise error code 2475 */ 2476static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params) 2477{ 2478 int err; 2479 if (debug_level >= DEBUG_LEVEL_INFO) 2480 printk("%s(%d):mgsl_get_params(%s)\n", 2481 __FILE__,__LINE__, info->device_name); 2482 2483 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); 2484 if (err) { 2485 if ( debug_level >= DEBUG_LEVEL_INFO ) 2486 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n", 2487 __FILE__,__LINE__,info->device_name); 2488 return -EFAULT; 2489 } 2490 2491 return 0; 2492 2493} /* end of mgsl_get_params() */ 2494 2495/* mgsl_set_params() 2496 * 2497 * set the serial parameters 2498 * 2499 * Arguments: 2500 * 2501 * info pointer to device instance data 2502 * new_params user buffer containing new serial params 2503 * 2504 * Return Value: 0 if success, otherwise error code 2505 */ 2506static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params) 2507{ 2508 unsigned long flags; 2509 MGSL_PARAMS tmp_params; 2510 int err; 2511 2512 if (debug_level >= DEBUG_LEVEL_INFO) 2513 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__, 2514 info->device_name ); 2515 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); 2516 if (err) { 2517 if ( debug_level >= DEBUG_LEVEL_INFO ) 2518 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n", 2519 __FILE__,__LINE__,info->device_name); 2520 return -EFAULT; 2521 } 2522 2523 spin_lock_irqsave(&info->irq_spinlock,flags); 2524 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 2525 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2526 2527 mgsl_change_params(info); 2528 2529 return 0; 2530 2531} /* end of mgsl_set_params() */ 2532 2533/* mgsl_get_txidle() 2534 * 2535 * get the current transmit idle mode 2536 * 2537 * Arguments: info pointer to device instance data 2538 * idle_mode pointer to buffer to hold returned idle mode 2539 * 2540 * Return Value: 0 if success, otherwise error code 2541 */ 2542static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode) 2543{ 2544 int err; 2545 2546 if (debug_level >= DEBUG_LEVEL_INFO) 2547 printk("%s(%d):mgsl_get_txidle(%s)=%d\n", 2548 __FILE__,__LINE__, info->device_name, info->idle_mode); 2549 2550 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); 2551 if (err) { 2552 if ( debug_level >= DEBUG_LEVEL_INFO ) 2553 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n", 2554 __FILE__,__LINE__,info->device_name); 2555 return -EFAULT; 2556 } 2557 2558 return 0; 2559 2560} /* end of mgsl_get_txidle() */ 2561 2562/* mgsl_set_txidle() service ioctl to set transmit idle mode 2563 * 2564 * Arguments: info pointer to device instance data 2565 * idle_mode new idle mode 2566 * 2567 * Return Value: 0 if success, otherwise error code 2568 */ 2569static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode) 2570{ 2571 unsigned long flags; 2572 2573 if (debug_level >= DEBUG_LEVEL_INFO) 2574 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__, 2575 info->device_name, idle_mode ); 2576 2577 spin_lock_irqsave(&info->irq_spinlock,flags); 2578 info->idle_mode = idle_mode; 2579 usc_set_txidle( info ); 2580 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2581 return 0; 2582 2583} /* end of mgsl_set_txidle() */ 2584 2585/* mgsl_txenable() 2586 * 2587 * enable or disable the transmitter 2588 * 2589 * Arguments: 2590 * 2591 * info pointer to device instance data 2592 * enable 1 = enable, 0 = disable 2593 * 2594 * Return Value: 0 if success, otherwise error code 2595 */ 2596static int mgsl_txenable(struct mgsl_struct * info, int enable) 2597{ 2598 unsigned long flags; 2599 2600 if (debug_level >= DEBUG_LEVEL_INFO) 2601 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__, 2602 info->device_name, enable); 2603 2604 spin_lock_irqsave(&info->irq_spinlock,flags); 2605 if ( enable ) { 2606 if ( !info->tx_enabled ) { 2607 2608 usc_start_transmitter(info); 2609 /*-------------------------------------------------- 2610 * if HDLC/SDLC Loop mode, attempt to insert the 2611 * station in the 'loop' by setting CMR:13. Upon 2612 * receipt of the next GoAhead (RxAbort) sequence, 2613 * the OnLoop indicator (CCSR:7) should go active 2614 * to indicate that we are on the loop 2615 *--------------------------------------------------*/ 2616 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2617 usc_loopmode_insert_request( info ); 2618 } 2619 } else { 2620 if ( info->tx_enabled ) 2621 usc_stop_transmitter(info); 2622 } 2623 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2624 return 0; 2625 2626} /* end of mgsl_txenable() */ 2627 2628/* mgsl_txabort() abort send HDLC frame 2629 * 2630 * Arguments: info pointer to device instance data 2631 * Return Value: 0 if success, otherwise error code 2632 */ 2633static int mgsl_txabort(struct mgsl_struct * info) 2634{ 2635 unsigned long flags; 2636 2637 if (debug_level >= DEBUG_LEVEL_INFO) 2638 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__, 2639 info->device_name); 2640 2641 spin_lock_irqsave(&info->irq_spinlock,flags); 2642 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC ) 2643 { 2644 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 2645 usc_loopmode_cancel_transmit( info ); 2646 else 2647 usc_TCmd(info,TCmd_SendAbort); 2648 } 2649 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2650 return 0; 2651 2652} /* end of mgsl_txabort() */ 2653 2654/* mgsl_rxenable() enable or disable the receiver 2655 * 2656 * Arguments: info pointer to device instance data 2657 * enable 1 = enable, 0 = disable 2658 * Return Value: 0 if success, otherwise error code 2659 */ 2660static int mgsl_rxenable(struct mgsl_struct * info, int enable) 2661{ 2662 unsigned long flags; 2663 2664 if (debug_level >= DEBUG_LEVEL_INFO) 2665 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__, 2666 info->device_name, enable); 2667 2668 spin_lock_irqsave(&info->irq_spinlock,flags); 2669 if ( enable ) { 2670 if ( !info->rx_enabled ) 2671 usc_start_receiver(info); 2672 } else { 2673 if ( info->rx_enabled ) 2674 usc_stop_receiver(info); 2675 } 2676 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2677 return 0; 2678 2679} /* end of mgsl_rxenable() */ 2680 2681/* mgsl_wait_event() wait for specified event to occur 2682 * 2683 * Arguments: info pointer to device instance data 2684 * mask pointer to bitmask of events to wait for 2685 * Return Value: 0 if successful and bit mask updated with 2686 * of events triggerred, 2687 * otherwise error code 2688 */ 2689static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr) 2690{ 2691 unsigned long flags; 2692 int s; 2693 int rc=0; 2694 struct mgsl_icount cprev, cnow; 2695 int events; 2696 int mask; 2697 struct _input_signal_events oldsigs, newsigs; 2698 DECLARE_WAITQUEUE(wait, current); 2699 2700 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); 2701 if (rc) { 2702 return -EFAULT; 2703 } 2704 2705 if (debug_level >= DEBUG_LEVEL_INFO) 2706 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__, 2707 info->device_name, mask); 2708 2709 spin_lock_irqsave(&info->irq_spinlock,flags); 2710 2711 /* return immediately if state matches requested events */ 2712 usc_get_serial_signals(info); 2713 s = info->serial_signals; 2714 events = mask & 2715 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + 2716 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + 2717 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + 2718 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); 2719 if (events) { 2720 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2721 goto exit; 2722 } 2723 2724 /* save current irq counts */ 2725 cprev = info->icount; 2726 oldsigs = info->input_signal_events; 2727 2728 /* enable hunt and idle irqs if needed */ 2729 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2730 u16 oldreg = usc_InReg(info,RICR); 2731 u16 newreg = oldreg + 2732 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) + 2733 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0); 2734 if (oldreg != newreg) 2735 usc_OutReg(info, RICR, newreg); 2736 } 2737 2738 set_current_state(TASK_INTERRUPTIBLE); 2739 add_wait_queue(&info->event_wait_q, &wait); 2740 2741 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2742 2743 2744 for(;;) { 2745 schedule(); 2746 if (signal_pending(current)) { 2747 rc = -ERESTARTSYS; 2748 break; 2749 } 2750 2751 /* get current irq counts */ 2752 spin_lock_irqsave(&info->irq_spinlock,flags); 2753 cnow = info->icount; 2754 newsigs = info->input_signal_events; 2755 set_current_state(TASK_INTERRUPTIBLE); 2756 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2757 2758 /* if no change, wait aborted for some reason */ 2759 if (newsigs.dsr_up == oldsigs.dsr_up && 2760 newsigs.dsr_down == oldsigs.dsr_down && 2761 newsigs.dcd_up == oldsigs.dcd_up && 2762 newsigs.dcd_down == oldsigs.dcd_down && 2763 newsigs.cts_up == oldsigs.cts_up && 2764 newsigs.cts_down == oldsigs.cts_down && 2765 newsigs.ri_up == oldsigs.ri_up && 2766 newsigs.ri_down == oldsigs.ri_down && 2767 cnow.exithunt == cprev.exithunt && 2768 cnow.rxidle == cprev.rxidle) { 2769 rc = -EIO; 2770 break; 2771 } 2772 2773 events = mask & 2774 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + 2775 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + 2776 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + 2777 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + 2778 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + 2779 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + 2780 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + 2781 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + 2782 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + 2783 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); 2784 if (events) 2785 break; 2786 2787 cprev = cnow; 2788 oldsigs = newsigs; 2789 } 2790 2791 remove_wait_queue(&info->event_wait_q, &wait); 2792 set_current_state(TASK_RUNNING); 2793 2794 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) { 2795 spin_lock_irqsave(&info->irq_spinlock,flags); 2796 if (!waitqueue_active(&info->event_wait_q)) { 2797 /* disable enable exit hunt mode/idle rcvd IRQs */ 2798 usc_OutReg(info, RICR, usc_InReg(info,RICR) & 2799 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)); 2800 } 2801 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2802 } 2803exit: 2804 if ( rc == 0 ) 2805 PUT_USER(rc, events, mask_ptr); 2806 2807 return rc; 2808 2809} /* end of mgsl_wait_event() */ 2810 2811static int modem_input_wait(struct mgsl_struct *info,int arg) 2812{ 2813 unsigned long flags; 2814 int rc; 2815 struct mgsl_icount cprev, cnow; 2816 DECLARE_WAITQUEUE(wait, current); 2817 2818 /* save current irq counts */ 2819 spin_lock_irqsave(&info->irq_spinlock,flags); 2820 cprev = info->icount; 2821 add_wait_queue(&info->status_event_wait_q, &wait); 2822 set_current_state(TASK_INTERRUPTIBLE); 2823 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2824 2825 for(;;) { 2826 schedule(); 2827 if (signal_pending(current)) { 2828 rc = -ERESTARTSYS; 2829 break; 2830 } 2831 2832 /* get new irq counts */ 2833 spin_lock_irqsave(&info->irq_spinlock,flags); 2834 cnow = info->icount; 2835 set_current_state(TASK_INTERRUPTIBLE); 2836 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2837 2838 /* if no change, wait aborted for some reason */ 2839 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && 2840 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { 2841 rc = -EIO; 2842 break; 2843 } 2844 2845 /* check for change in caller specified modem input */ 2846 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || 2847 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || 2848 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || 2849 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { 2850 rc = 0; 2851 break; 2852 } 2853 2854 cprev = cnow; 2855 } 2856 remove_wait_queue(&info->status_event_wait_q, &wait); 2857 set_current_state(TASK_RUNNING); 2858 return rc; 2859} 2860 2861/* return the state of the serial control and status signals 2862 */ 2863static int tiocmget(struct tty_struct *tty, struct file *file) 2864{ 2865 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2866 unsigned int result; 2867 unsigned long flags; 2868 2869 spin_lock_irqsave(&info->irq_spinlock,flags); 2870 usc_get_serial_signals(info); 2871 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2872 2873 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + 2874 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + 2875 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + 2876 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + 2877 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + 2878 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); 2879 2880 if (debug_level >= DEBUG_LEVEL_INFO) 2881 printk("%s(%d):%s tiocmget() value=%08X\n", 2882 __FILE__,__LINE__, info->device_name, result ); 2883 return result; 2884} 2885 2886/* set modem control signals (DTR/RTS) 2887 */ 2888static int tiocmset(struct tty_struct *tty, struct file *file, 2889 unsigned int set, unsigned int clear) 2890{ 2891 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 2892 unsigned long flags; 2893 2894 if (debug_level >= DEBUG_LEVEL_INFO) 2895 printk("%s(%d):%s tiocmset(%x,%x)\n", 2896 __FILE__,__LINE__,info->device_name, set, clear); 2897 2898 if (set & TIOCM_RTS) 2899 info->serial_signals |= SerialSignal_RTS; 2900 if (set & TIOCM_DTR) 2901 info->serial_signals |= SerialSignal_DTR; 2902 if (clear & TIOCM_RTS) 2903 info->serial_signals &= ~SerialSignal_RTS; 2904 if (clear & TIOCM_DTR) 2905 info->serial_signals &= ~SerialSignal_DTR; 2906 2907 spin_lock_irqsave(&info->irq_spinlock,flags); 2908 usc_set_serial_signals(info); 2909 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2910 2911 return 0; 2912} 2913 2914/* mgsl_break() Set or clear transmit break condition 2915 * 2916 * Arguments: tty pointer to tty instance data 2917 * break_state -1=set break condition, 0=clear 2918 * Return Value: None 2919 */ 2920static void mgsl_break(struct tty_struct *tty, int break_state) 2921{ 2922 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 2923 unsigned long flags; 2924 2925 if (debug_level >= DEBUG_LEVEL_INFO) 2926 printk("%s(%d):mgsl_break(%s,%d)\n", 2927 __FILE__,__LINE__, info->device_name, break_state); 2928 2929 if (mgsl_paranoia_check(info, tty->name, "mgsl_break")) 2930 return; 2931 2932 spin_lock_irqsave(&info->irq_spinlock,flags); 2933 if (break_state == -1) 2934 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7)); 2935 else 2936 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7)); 2937 spin_unlock_irqrestore(&info->irq_spinlock,flags); 2938 2939} /* end of mgsl_break() */ 2940 2941/* mgsl_ioctl() Service an IOCTL request 2942 * 2943 * Arguments: 2944 * 2945 * tty pointer to tty instance data 2946 * file pointer to associated file object for device 2947 * cmd IOCTL command code 2948 * arg command argument/context 2949 * 2950 * Return Value: 0 if success, otherwise error code 2951 */ 2952static int mgsl_ioctl(struct tty_struct *tty, struct file * file, 2953 unsigned int cmd, unsigned long arg) 2954{ 2955 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 2956 2957 if (debug_level >= DEBUG_LEVEL_INFO) 2958 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, 2959 info->device_name, cmd ); 2960 2961 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl")) 2962 return -ENODEV; 2963 2964 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && 2965 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { 2966 if (tty->flags & (1 << TTY_IO_ERROR)) 2967 return -EIO; 2968 } 2969 2970 return mgsl_ioctl_common(info, cmd, arg); 2971} 2972 2973static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) 2974{ 2975 int error; 2976 struct mgsl_icount cnow; /* kernel counter temps */ 2977 void __user *argp = (void __user *)arg; 2978 struct serial_icounter_struct __user *p_cuser; /* user space */ 2979 unsigned long flags; 2980 2981 switch (cmd) { 2982 case MGSL_IOCGPARAMS: 2983 return mgsl_get_params(info, argp); 2984 case MGSL_IOCSPARAMS: 2985 return mgsl_set_params(info, argp); 2986 case MGSL_IOCGTXIDLE: 2987 return mgsl_get_txidle(info, argp); 2988 case MGSL_IOCSTXIDLE: 2989 return mgsl_set_txidle(info,(int)arg); 2990 case MGSL_IOCTXENABLE: 2991 return mgsl_txenable(info,(int)arg); 2992 case MGSL_IOCRXENABLE: 2993 return mgsl_rxenable(info,(int)arg); 2994 case MGSL_IOCTXABORT: 2995 return mgsl_txabort(info); 2996 case MGSL_IOCGSTATS: 2997 return mgsl_get_stats(info, argp); 2998 case MGSL_IOCWAITEVENT: 2999 return mgsl_wait_event(info, argp); 3000 case MGSL_IOCLOOPTXDONE: 3001 return mgsl_loopmode_send_done(info); 3002 /* Wait for modem input (DCD,RI,DSR,CTS) change 3003 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS) 3004 */ 3005 case TIOCMIWAIT: 3006 return modem_input_wait(info,(int)arg); 3007 3008 /* 3009 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) 3010 * Return: write counters to the user passed counter struct 3011 * NB: both 1->0 and 0->1 transitions are counted except for 3012 * RI where only 0->1 is counted. 3013 */ 3014 case TIOCGICOUNT: 3015 spin_lock_irqsave(&info->irq_spinlock,flags); 3016 cnow = info->icount; 3017 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3018 p_cuser = argp; 3019 PUT_USER(error,cnow.cts, &p_cuser->cts); 3020 if (error) return error; 3021 PUT_USER(error,cnow.dsr, &p_cuser->dsr); 3022 if (error) return error; 3023 PUT_USER(error,cnow.rng, &p_cuser->rng); 3024 if (error) return error; 3025 PUT_USER(error,cnow.dcd, &p_cuser->dcd); 3026 if (error) return error; 3027 PUT_USER(error,cnow.rx, &p_cuser->rx); 3028 if (error) return error; 3029 PUT_USER(error,cnow.tx, &p_cuser->tx); 3030 if (error) return error; 3031 PUT_USER(error,cnow.frame, &p_cuser->frame); 3032 if (error) return error; 3033 PUT_USER(error,cnow.overrun, &p_cuser->overrun); 3034 if (error) return error; 3035 PUT_USER(error,cnow.parity, &p_cuser->parity); 3036 if (error) return error; 3037 PUT_USER(error,cnow.brk, &p_cuser->brk); 3038 if (error) return error; 3039 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun); 3040 if (error) return error; 3041 return 0; 3042 default: 3043 return -ENOIOCTLCMD; 3044 } 3045 return 0; 3046} 3047 3048/* mgsl_set_termios() 3049 * 3050 * Set new termios settings 3051 * 3052 * Arguments: 3053 * 3054 * tty pointer to tty structure 3055 * termios pointer to buffer to hold returned old termios 3056 * 3057 * Return Value: None 3058 */ 3059static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) 3060{ 3061 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; 3062 unsigned long flags; 3063 3064 if (debug_level >= DEBUG_LEVEL_INFO) 3065 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__, 3066 tty->driver->name ); 3067 3068 /* just return if nothing has changed */ 3069 if ((tty->termios->c_cflag == old_termios->c_cflag) 3070 && (RELEVANT_IFLAG(tty->termios->c_iflag) 3071 == RELEVANT_IFLAG(old_termios->c_iflag))) 3072 return; 3073 3074 mgsl_change_params(info); 3075 3076 /* Handle transition to B0 status */ 3077 if (old_termios->c_cflag & CBAUD && 3078 !(tty->termios->c_cflag & CBAUD)) { 3079 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); 3080 spin_lock_irqsave(&info->irq_spinlock,flags); 3081 usc_set_serial_signals(info); 3082 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3083 } 3084 3085 /* Handle transition away from B0 status */ 3086 if (!(old_termios->c_cflag & CBAUD) && 3087 tty->termios->c_cflag & CBAUD) { 3088 info->serial_signals |= SerialSignal_DTR; 3089 if (!(tty->termios->c_cflag & CRTSCTS) || 3090 !test_bit(TTY_THROTTLED, &tty->flags)) { 3091 info->serial_signals |= SerialSignal_RTS; 3092 } 3093 spin_lock_irqsave(&info->irq_spinlock,flags); 3094 usc_set_serial_signals(info); 3095 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3096 } 3097 3098 /* Handle turning off CRTSCTS */ 3099 if (old_termios->c_cflag & CRTSCTS && 3100 !(tty->termios->c_cflag & CRTSCTS)) { 3101 tty->hw_stopped = 0; 3102 mgsl_start(tty); 3103 } 3104 3105} /* end of mgsl_set_termios() */ 3106 3107/* mgsl_close() 3108 * 3109 * Called when port is closed. Wait for remaining data to be 3110 * sent. Disable port and free resources. 3111 * 3112 * Arguments: 3113 * 3114 * tty pointer to open tty structure 3115 * filp pointer to open file object 3116 * 3117 * Return Value: None 3118 */ 3119static void mgsl_close(struct tty_struct *tty, struct file * filp) 3120{ 3121 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 3122 3123 if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) 3124 return; 3125 3126 if (debug_level >= DEBUG_LEVEL_INFO) 3127 printk("%s(%d):mgsl_close(%s) entry, count=%d\n", 3128 __FILE__,__LINE__, info->device_name, info->count); 3129 3130 if (!info->count) 3131 return; 3132 3133 if (tty_hung_up_p(filp)) 3134 goto cleanup; 3135 3136 if ((tty->count == 1) && (info->count != 1)) { 3137 /* 3138 * tty->count is 1 and the tty structure will be freed. 3139 * info->count should be one in this case. 3140 * if it's not, correct it so that the port is shutdown. 3141 */ 3142 printk("mgsl_close: bad refcount; tty->count is 1, " 3143 "info->count is %d\n", info->count); 3144 info->count = 1; 3145 } 3146 3147 info->count--; 3148 3149 /* if at least one open remaining, leave hardware active */ 3150 if (info->count) 3151 goto cleanup; 3152 3153 info->flags |= ASYNC_CLOSING; 3154 3155 /* set tty->closing to notify line discipline to 3156 * only process XON/XOFF characters. Only the N_TTY 3157 * discipline appears to use this (ppp does not). 3158 */ 3159 tty->closing = 1; 3160 3161 /* wait for transmit data to clear all layers */ 3162 3163 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) { 3164 if (debug_level >= DEBUG_LEVEL_INFO) 3165 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n", 3166 __FILE__,__LINE__, info->device_name ); 3167 tty_wait_until_sent(tty, info->closing_wait); 3168 } 3169 3170 if (info->flags & ASYNC_INITIALIZED) 3171 mgsl_wait_until_sent(tty, info->timeout); 3172 3173 if (tty->driver->flush_buffer) 3174 tty->driver->flush_buffer(tty); 3175 3176 tty_ldisc_flush(tty); 3177 3178 shutdown(info); 3179 3180 tty->closing = 0; 3181 info->tty = NULL; 3182 3183 if (info->blocked_open) { 3184 if (info->close_delay) { 3185 msleep_interruptible(jiffies_to_msecs(info->close_delay)); 3186 } 3187 wake_up_interruptible(&info->open_wait); 3188 } 3189 3190 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); 3191 3192 wake_up_interruptible(&info->close_wait); 3193 3194cleanup: 3195 if (debug_level >= DEBUG_LEVEL_INFO) 3196 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, 3197 tty->driver->name, info->count); 3198 3199} /* end of mgsl_close() */ 3200 3201/* mgsl_wait_until_sent() 3202 * 3203 * Wait until the transmitter is empty. 3204 * 3205 * Arguments: 3206 * 3207 * tty pointer to tty info structure 3208 * timeout time to wait for send completion 3209 * 3210 * Return Value: None 3211 */ 3212static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) 3213{ 3214 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 3215 unsigned long orig_jiffies, char_time; 3216 3217 if (!info ) 3218 return; 3219 3220 if (debug_level >= DEBUG_LEVEL_INFO) 3221 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n", 3222 __FILE__,__LINE__, info->device_name ); 3223 3224 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent")) 3225 return; 3226 3227 if (!(info->flags & ASYNC_INITIALIZED)) 3228 goto exit; 3229 3230 orig_jiffies = jiffies; 3231 3232 /* Set check interval to 1/5 of estimated time to 3233 * send a character, and make it at least 1. The check 3234 * interval should also be less than the timeout. 3235 * Note: use tight timings here to satisfy the NIST-PCTS. 3236 */ 3237 3238 if ( info->params.data_rate ) { 3239 char_time = info->timeout/(32 * 5); 3240 if (!char_time) 3241 char_time++; 3242 } else 3243 char_time = 1; 3244 3245 if (timeout) 3246 char_time = min_t(unsigned long, char_time, timeout); 3247 3248 if ( info->params.mode == MGSL_MODE_HDLC || 3249 info->params.mode == MGSL_MODE_RAW ) { 3250 while (info->tx_active) { 3251 msleep_interruptible(jiffies_to_msecs(char_time)); 3252 if (signal_pending(current)) 3253 break; 3254 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3255 break; 3256 } 3257 } else { 3258 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) && 3259 info->tx_enabled) { 3260 msleep_interruptible(jiffies_to_msecs(char_time)); 3261 if (signal_pending(current)) 3262 break; 3263 if (timeout && time_after(jiffies, orig_jiffies + timeout)) 3264 break; 3265 } 3266 } 3267 3268exit: 3269 if (debug_level >= DEBUG_LEVEL_INFO) 3270 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n", 3271 __FILE__,__LINE__, info->device_name ); 3272 3273} /* end of mgsl_wait_until_sent() */ 3274 3275/* mgsl_hangup() 3276 * 3277 * Called by tty_hangup() when a hangup is signaled. 3278 * This is the same as to closing all open files for the port. 3279 * 3280 * Arguments: tty pointer to associated tty object 3281 * Return Value: None 3282 */ 3283static void mgsl_hangup(struct tty_struct *tty) 3284{ 3285 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; 3286 3287 if (debug_level >= DEBUG_LEVEL_INFO) 3288 printk("%s(%d):mgsl_hangup(%s)\n", 3289 __FILE__,__LINE__, info->device_name ); 3290 3291 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup")) 3292 return; 3293 3294 mgsl_flush_buffer(tty); 3295 shutdown(info); 3296 3297 info->count = 0; 3298 info->flags &= ~ASYNC_NORMAL_ACTIVE; 3299 info->tty = NULL; 3300 3301 wake_up_interruptible(&info->open_wait); 3302 3303} /* end of mgsl_hangup() */ 3304 3305/* block_til_ready() 3306 * 3307 * Block the current process until the specified port 3308 * is ready to be opened. 3309 * 3310 * Arguments: 3311 * 3312 * tty pointer to tty info structure 3313 * filp pointer to open file object 3314 * info pointer to device instance data 3315 * 3316 * Return Value: 0 if success, otherwise error code 3317 */ 3318static int block_til_ready(struct tty_struct *tty, struct file * filp, 3319 struct mgsl_struct *info) 3320{ 3321 DECLARE_WAITQUEUE(wait, current); 3322 int retval; 3323 int do_clocal = 0, extra_count = 0; 3324 unsigned long flags; 3325 3326 if (debug_level >= DEBUG_LEVEL_INFO) 3327 printk("%s(%d):block_til_ready on %s\n", 3328 __FILE__,__LINE__, tty->driver->name ); 3329 3330 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ 3331 /* nonblock mode is set or port is not enabled */ 3332 info->flags |= ASYNC_NORMAL_ACTIVE; 3333 return 0; 3334 } 3335 3336 if (tty->termios->c_cflag & CLOCAL) 3337 do_clocal = 1; 3338 3339 /* Wait for carrier detect and the line to become 3340 * free (i.e., not in use by the callout). While we are in 3341 * this loop, info->count is dropped by one, so that 3342 * mgsl_close() knows when to free things. We restore it upon 3343 * exit, either normal or abnormal. 3344 */ 3345 3346 retval = 0; 3347 add_wait_queue(&info->open_wait, &wait); 3348 3349 if (debug_level >= DEBUG_LEVEL_INFO) 3350 printk("%s(%d):block_til_ready before block on %s count=%d\n", 3351 __FILE__,__LINE__, tty->driver->name, info->count ); 3352 3353 spin_lock_irqsave(&info->irq_spinlock, flags); 3354 if (!tty_hung_up_p(filp)) { 3355 extra_count = 1; 3356 info->count--; 3357 } 3358 spin_unlock_irqrestore(&info->irq_spinlock, flags); 3359 info->blocked_open++; 3360 3361 while (1) { 3362 if (tty->termios->c_cflag & CBAUD) { 3363 spin_lock_irqsave(&info->irq_spinlock,flags); 3364 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 3365 usc_set_serial_signals(info); 3366 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3367 } 3368 3369 set_current_state(TASK_INTERRUPTIBLE); 3370 3371 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){ 3372 retval = (info->flags & ASYNC_HUP_NOTIFY) ? 3373 -EAGAIN : -ERESTARTSYS; 3374 break; 3375 } 3376 3377 spin_lock_irqsave(&info->irq_spinlock,flags); 3378 usc_get_serial_signals(info); 3379 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3380 3381 if (!(info->flags & ASYNC_CLOSING) && 3382 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) { 3383 break; 3384 } 3385 3386 if (signal_pending(current)) { 3387 retval = -ERESTARTSYS; 3388 break; 3389 } 3390 3391 if (debug_level >= DEBUG_LEVEL_INFO) 3392 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3393 __FILE__,__LINE__, tty->driver->name, info->count ); 3394 3395 schedule(); 3396 } 3397 3398 set_current_state(TASK_RUNNING); 3399 remove_wait_queue(&info->open_wait, &wait); 3400 3401 if (extra_count) 3402 info->count++; 3403 info->blocked_open--; 3404 3405 if (debug_level >= DEBUG_LEVEL_INFO) 3406 printk("%s(%d):block_til_ready after blocking on %s count=%d\n", 3407 __FILE__,__LINE__, tty->driver->name, info->count ); 3408 3409 if (!retval) 3410 info->flags |= ASYNC_NORMAL_ACTIVE; 3411 3412 return retval; 3413 3414} /* end of block_til_ready() */ 3415 3416/* mgsl_open() 3417 * 3418 * Called when a port is opened. Init and enable port. 3419 * Perform serial-specific initialization for the tty structure. 3420 * 3421 * Arguments: tty pointer to tty info structure 3422 * filp associated file pointer 3423 * 3424 * Return Value: 0 if success, otherwise error code 3425 */ 3426static int mgsl_open(struct tty_struct *tty, struct file * filp) 3427{ 3428 struct mgsl_struct *info; 3429 int retval, line; 3430 unsigned long flags; 3431 3432 /* verify range of specified line number */ 3433 line = tty->index; 3434 if ((line < 0) || (line >= mgsl_device_count)) { 3435 printk("%s(%d):mgsl_open with invalid line #%d.\n", 3436 __FILE__,__LINE__,line); 3437 return -ENODEV; 3438 } 3439 3440 /* find the info structure for the specified line */ 3441 info = mgsl_device_list; 3442 while(info && info->line != line) 3443 info = info->next_device; 3444 if (mgsl_paranoia_check(info, tty->name, "mgsl_open")) 3445 return -ENODEV; 3446 3447 tty->driver_data = info; 3448 info->tty = tty; 3449 3450 if (debug_level >= DEBUG_LEVEL_INFO) 3451 printk("%s(%d):mgsl_open(%s), old ref count = %d\n", 3452 __FILE__,__LINE__,tty->driver->name, info->count); 3453 3454 /* If port is closing, signal caller to try again */ 3455 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){ 3456 if (info->flags & ASYNC_CLOSING) 3457 interruptible_sleep_on(&info->close_wait); 3458 retval = ((info->flags & ASYNC_HUP_NOTIFY) ? 3459 -EAGAIN : -ERESTARTSYS); 3460 goto cleanup; 3461 } 3462 3463 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 3464 3465 spin_lock_irqsave(&info->netlock, flags); 3466 if (info->netcount) { 3467 retval = -EBUSY; 3468 spin_unlock_irqrestore(&info->netlock, flags); 3469 goto cleanup; 3470 } 3471 info->count++; 3472 spin_unlock_irqrestore(&info->netlock, flags); 3473 3474 if (info->count == 1) { 3475 /* 1st open on this device, init hardware */ 3476 retval = startup(info); 3477 if (retval < 0) 3478 goto cleanup; 3479 } 3480 3481 retval = block_til_ready(tty, filp, info); 3482 if (retval) { 3483 if (debug_level >= DEBUG_LEVEL_INFO) 3484 printk("%s(%d):block_til_ready(%s) returned %d\n", 3485 __FILE__,__LINE__, info->device_name, retval); 3486 goto cleanup; 3487 } 3488 3489 if (debug_level >= DEBUG_LEVEL_INFO) 3490 printk("%s(%d):mgsl_open(%s) success\n", 3491 __FILE__,__LINE__, info->device_name); 3492 retval = 0; 3493 3494cleanup: 3495 if (retval) { 3496 if (tty->count == 1) 3497 info->tty = NULL; /* tty layer will release tty struct */ 3498 if(info->count) 3499 info->count--; 3500 } 3501 3502 return retval; 3503 3504} /* end of mgsl_open() */ 3505 3506/* 3507 * /proc fs routines.... 3508 */ 3509 3510static inline int line_info(char *buf, struct mgsl_struct *info) 3511{ 3512 char stat_buf[30]; 3513 int ret; 3514 unsigned long flags; 3515 3516 if (info->bus_type == MGSL_BUS_TYPE_PCI) { 3517 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X", 3518 info->device_name, info->io_base, info->irq_level, 3519 info->phys_memory_base, info->phys_lcr_base); 3520 } else { 3521 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d", 3522 info->device_name, info->io_base, 3523 info->irq_level, info->dma_level); 3524 } 3525 3526 /* output current serial signal states */ 3527 spin_lock_irqsave(&info->irq_spinlock,flags); 3528 usc_get_serial_signals(info); 3529 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3530 3531 stat_buf[0] = 0; 3532 stat_buf[1] = 0; 3533 if (info->serial_signals & SerialSignal_RTS) 3534 strcat(stat_buf, "|RTS"); 3535 if (info->serial_signals & SerialSignal_CTS) 3536 strcat(stat_buf, "|CTS"); 3537 if (info->serial_signals & SerialSignal_DTR) 3538 strcat(stat_buf, "|DTR"); 3539 if (info->serial_signals & SerialSignal_DSR) 3540 strcat(stat_buf, "|DSR"); 3541 if (info->serial_signals & SerialSignal_DCD) 3542 strcat(stat_buf, "|CD"); 3543 if (info->serial_signals & SerialSignal_RI) 3544 strcat(stat_buf, "|RI"); 3545 3546 if (info->params.mode == MGSL_MODE_HDLC || 3547 info->params.mode == MGSL_MODE_RAW ) { 3548 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d", 3549 info->icount.txok, info->icount.rxok); 3550 if (info->icount.txunder) 3551 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder); 3552 if (info->icount.txabort) 3553 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort); 3554 if (info->icount.rxshort) 3555 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort); 3556 if (info->icount.rxlong) 3557 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong); 3558 if (info->icount.rxover) 3559 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover); 3560 if (info->icount.rxcrc) 3561 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc); 3562 } else { 3563 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d", 3564 info->icount.tx, info->icount.rx); 3565 if (info->icount.frame) 3566 ret += sprintf(buf+ret, " fe:%d", info->icount.frame); 3567 if (info->icount.parity) 3568 ret += sprintf(buf+ret, " pe:%d", info->icount.parity); 3569 if (info->icount.brk) 3570 ret += sprintf(buf+ret, " brk:%d", info->icount.brk); 3571 if (info->icount.overrun) 3572 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun); 3573 } 3574 3575 /* Append serial signal status to end */ 3576 ret += sprintf(buf+ret, " %s\n", stat_buf+1); 3577 3578 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", 3579 info->tx_active,info->bh_requested,info->bh_running, 3580 info->pending_bh); 3581 3582 spin_lock_irqsave(&info->irq_spinlock,flags); 3583 { 3584 u16 Tcsr = usc_InReg( info, TCSR ); 3585 u16 Tdmr = usc_InDmaReg( info, TDMR ); 3586 u16 Ticr = usc_InReg( info, TICR ); 3587 u16 Rscr = usc_InReg( info, RCSR ); 3588 u16 Rdmr = usc_InDmaReg( info, RDMR ); 3589 u16 Ricr = usc_InReg( info, RICR ); 3590 u16 Icr = usc_InReg( info, ICR ); 3591 u16 Dccr = usc_InReg( info, DCCR ); 3592 u16 Tmr = usc_InReg( info, TMR ); 3593 u16 Tccr = usc_InReg( info, TCCR ); 3594 u16 Ccar = inw( info->io_base + CCAR ); 3595 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n" 3596 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n", 3597 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar ); 3598 } 3599 spin_unlock_irqrestore(&info->irq_spinlock,flags); 3600 3601 return ret; 3602 3603} /* end of line_info() */ 3604 3605/* mgsl_read_proc() 3606 * 3607 * Called to print information about devices 3608 * 3609 * Arguments: 3610 * page page of memory to hold returned info 3611 * start 3612 * off 3613 * count 3614 * eof 3615 * data 3616 * 3617 * Return Value: 3618 */ 3619static int mgsl_read_proc(char *page, char **start, off_t off, int count, 3620 int *eof, void *data) 3621{ 3622 int len = 0, l; 3623 off_t begin = 0; 3624 struct mgsl_struct *info; 3625 3626 len += sprintf(page, "synclink driver:%s\n", driver_version); 3627 3628 info = mgsl_device_list; 3629 while( info ) { 3630 l = line_info(page + len, info); 3631 len += l; 3632 if (len+begin > off+count) 3633 goto done; 3634 if (len+begin < off) { 3635 begin += len; 3636 len = 0; 3637 } 3638 info = info->next_device; 3639 } 3640 3641 *eof = 1; 3642done: 3643 if (off >= len+begin) 3644 return 0; 3645 *start = page + (off-begin); 3646 return ((count < begin+len-off) ? count : begin+len-off); 3647 3648} /* end of mgsl_read_proc() */ 3649 3650/* mgsl_allocate_dma_buffers() 3651 * 3652 * Allocate and format DMA buffers (ISA adapter) 3653 * or format shared memory buffers (PCI adapter). 3654 * 3655 * Arguments: info pointer to device instance data 3656 * Return Value: 0 if success, otherwise error 3657 */ 3658static int mgsl_allocate_dma_buffers(struct mgsl_struct *info) 3659{ 3660 unsigned short BuffersPerFrame; 3661 3662 info->last_mem_alloc = 0; 3663 3664 /* Calculate the number of DMA buffers necessary to hold the */ 3665 /* largest allowable frame size. Note: If the max frame size is */ 3666 /* not an even multiple of the DMA buffer size then we need to */ 3667 /* round the buffer count per frame up one. */ 3668 3669 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE); 3670 if ( info->max_frame_size % DMABUFFERSIZE ) 3671 BuffersPerFrame++; 3672 3673 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3674 /* 3675 * The PCI adapter has 256KBytes of shared memory to use. 3676 * This is 64 PAGE_SIZE buffers. 3677 * 3678 * The first page is used for padding at this time so the 3679 * buffer list does not begin at offset 0 of the PCI 3680 * adapter's shared memory. 3681 * 3682 * The 2nd page is used for the buffer list. A 4K buffer 3683 * list can hold 128 DMA_BUFFER structures at 32 bytes 3684 * each. 3685 * 3686 * This leaves 62 4K pages. 3687 * 3688 * The next N pages are used for transmit frame(s). We 3689 * reserve enough 4K page blocks to hold the required 3690 * number of transmit dma buffers (num_tx_dma_buffers), 3691 * each of MaxFrameSize size. 3692 * 3693 * Of the remaining pages (62-N), determine how many can 3694 * be used to receive full MaxFrameSize inbound frames 3695 */ 3696 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3697 info->rx_buffer_count = 62 - info->tx_buffer_count; 3698 } else { 3699 /* Calculate the number of PAGE_SIZE buffers needed for */ 3700 /* receive and transmit DMA buffers. */ 3701 3702 3703 /* Calculate the number of DMA buffers necessary to */ 3704 /* hold 7 max size receive frames and one max size transmit frame. */ 3705 /* The receive buffer count is bumped by one so we avoid an */ 3706 /* End of List condition if all receive buffers are used when */ 3707 /* using linked list DMA buffers. */ 3708 3709 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame; 3710 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6; 3711 3712 /* 3713 * limit total TxBuffers & RxBuffers to 62 4K total 3714 * (ala PCI Allocation) 3715 */ 3716 3717 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 ) 3718 info->rx_buffer_count = 62 - info->tx_buffer_count; 3719 3720 } 3721 3722 if ( debug_level >= DEBUG_LEVEL_INFO ) 3723 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n", 3724 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count); 3725 3726 if ( mgsl_alloc_buffer_list_memory( info ) < 0 || 3727 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 || 3728 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 || 3729 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 || 3730 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) { 3731 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__); 3732 return -ENOMEM; 3733 } 3734 3735 mgsl_reset_rx_dma_buffers( info ); 3736 mgsl_reset_tx_dma_buffers( info ); 3737 3738 return 0; 3739 3740} /* end of mgsl_allocate_dma_buffers() */ 3741 3742/* 3743 * mgsl_alloc_buffer_list_memory() 3744 * 3745 * Allocate a common DMA buffer for use as the 3746 * receive and transmit buffer lists. 3747 * 3748 * A buffer list is a set of buffer entries where each entry contains 3749 * a pointer to an actual buffer and a pointer to the next buffer entry 3750 * (plus some other info about the buffer). 3751 * 3752 * The buffer entries for a list are built to form a circular list so 3753 * that when the entire list has been traversed you start back at the 3754 * beginning. 3755 * 3756 * This function allocates memory for just the buffer entries. 3757 * The links (pointer to next entry) are filled in with the physical 3758 * address of the next entry so the adapter can navigate the list 3759 * using bus master DMA. The pointers to the actual buffers are filled 3760 * out later when the actual buffers are allocated. 3761 * 3762 * Arguments: info pointer to device instance data 3763 * Return Value: 0 if success, otherwise error 3764 */ 3765static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info ) 3766{ 3767 unsigned int i; 3768 3769 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3770 /* PCI adapter uses shared memory. */ 3771 info->buffer_list = info->memory_base + info->last_mem_alloc; 3772 info->buffer_list_phys = info->last_mem_alloc; 3773 info->last_mem_alloc += BUFFERLISTSIZE; 3774 } else { 3775 /* ISA adapter uses system memory. */ 3776 /* The buffer lists are allocated as a common buffer that both */ 3777 /* the processor and adapter can access. This allows the driver to */ 3778 /* inspect portions of the buffer while other portions are being */ 3779 /* updated by the adapter using Bus Master DMA. */ 3780 3781 info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL); 3782 if (info->buffer_list == NULL) 3783 return -ENOMEM; 3784 info->buffer_list_phys = (u32)(info->buffer_list_dma_addr); 3785 } 3786 3787 /* We got the memory for the buffer entry lists. */ 3788 /* Initialize the memory block to all zeros. */ 3789 memset( info->buffer_list, 0, BUFFERLISTSIZE ); 3790 3791 /* Save virtual address pointers to the receive and */ 3792 /* transmit buffer lists. (Receive 1st). These pointers will */ 3793 /* be used by the processor to access the lists. */ 3794 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3795 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list; 3796 info->tx_buffer_list += info->rx_buffer_count; 3797 3798 /* 3799 * Build the links for the buffer entry lists such that 3800 * two circular lists are built. (Transmit and Receive). 3801 * 3802 * Note: the links are physical addresses 3803 * which are read by the adapter to determine the next 3804 * buffer entry to use. 3805 */ 3806 3807 for ( i = 0; i < info->rx_buffer_count; i++ ) { 3808 /* calculate and store physical address of this buffer entry */ 3809 info->rx_buffer_list[i].phys_entry = 3810 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY)); 3811 3812 /* calculate and store physical address of */ 3813 /* next entry in cirular list of entries */ 3814 3815 info->rx_buffer_list[i].link = info->buffer_list_phys; 3816 3817 if ( i < info->rx_buffer_count - 1 ) 3818 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3819 } 3820 3821 for ( i = 0; i < info->tx_buffer_count; i++ ) { 3822 /* calculate and store physical address of this buffer entry */ 3823 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys + 3824 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY)); 3825 3826 /* calculate and store physical address of */ 3827 /* next entry in cirular list of entries */ 3828 3829 info->tx_buffer_list[i].link = info->buffer_list_phys + 3830 info->rx_buffer_count * sizeof(DMABUFFERENTRY); 3831 3832 if ( i < info->tx_buffer_count - 1 ) 3833 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY); 3834 } 3835 3836 return 0; 3837 3838} /* end of mgsl_alloc_buffer_list_memory() */ 3839 3840/* Free DMA buffers allocated for use as the 3841 * receive and transmit buffer lists. 3842 * Warning: 3843 * 3844 * The data transfer buffers associated with the buffer list 3845 * MUST be freed before freeing the buffer list itself because 3846 * the buffer list contains the information necessary to free 3847 * the individual buffers! 3848 */ 3849static void mgsl_free_buffer_list_memory( struct mgsl_struct *info ) 3850{ 3851 if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI) 3852 dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr); 3853 3854 info->buffer_list = NULL; 3855 info->rx_buffer_list = NULL; 3856 info->tx_buffer_list = NULL; 3857 3858} /* end of mgsl_free_buffer_list_memory() */ 3859 3860/* 3861 * mgsl_alloc_frame_memory() 3862 * 3863 * Allocate the frame DMA buffers used by the specified buffer list. 3864 * Each DMA buffer will be one memory page in size. This is necessary 3865 * because memory can fragment enough that it may be impossible 3866 * contiguous pages. 3867 * 3868 * Arguments: 3869 * 3870 * info pointer to device instance data 3871 * BufferList pointer to list of buffer entries 3872 * Buffercount count of buffer entries in buffer list 3873 * 3874 * Return Value: 0 if success, otherwise -ENOMEM 3875 */ 3876static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount) 3877{ 3878 int i; 3879 u32 phys_addr; 3880 3881 /* Allocate page sized buffers for the receive buffer list */ 3882 3883 for ( i = 0; i < Buffercount; i++ ) { 3884 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 3885 /* PCI adapter uses shared memory buffers. */ 3886 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc; 3887 phys_addr = info->last_mem_alloc; 3888 info->last_mem_alloc += DMABUFFERSIZE; 3889 } else { 3890 /* ISA adapter uses system memory. */ 3891 BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL); 3892 if (BufferList[i].virt_addr == NULL) 3893 return -ENOMEM; 3894 phys_addr = (u32)(BufferList[i].dma_addr); 3895 } 3896 BufferList[i].phys_addr = phys_addr; 3897 } 3898 3899 return 0; 3900 3901} /* end of mgsl_alloc_frame_memory() */ 3902 3903/* 3904 * mgsl_free_frame_memory() 3905 * 3906 * Free the buffers associated with 3907 * each buffer entry of a buffer list. 3908 * 3909 * Arguments: 3910 * 3911 * info pointer to device instance data 3912 * BufferList pointer to list of buffer entries 3913 * Buffercount count of buffer entries in buffer list 3914 * 3915 * Return Value: None 3916 */ 3917static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount) 3918{ 3919 int i; 3920 3921 if ( BufferList ) { 3922 for ( i = 0 ; i < Buffercount ; i++ ) { 3923 if ( BufferList[i].virt_addr ) { 3924 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 3925 dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr); 3926 BufferList[i].virt_addr = NULL; 3927 } 3928 } 3929 } 3930 3931} /* end of mgsl_free_frame_memory() */ 3932 3933/* mgsl_free_dma_buffers() 3934 * 3935 * Free DMA buffers 3936 * 3937 * Arguments: info pointer to device instance data 3938 * Return Value: None 3939 */ 3940static void mgsl_free_dma_buffers( struct mgsl_struct *info ) 3941{ 3942 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count ); 3943 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count ); 3944 mgsl_free_buffer_list_memory( info ); 3945 3946} /* end of mgsl_free_dma_buffers() */ 3947 3948 3949/* 3950 * mgsl_alloc_intermediate_rxbuffer_memory() 3951 * 3952 * Allocate a buffer large enough to hold max_frame_size. This buffer 3953 * is used to pass an assembled frame to the line discipline. 3954 * 3955 * Arguments: 3956 * 3957 * info pointer to device instance data 3958 * 3959 * Return Value: 0 if success, otherwise -ENOMEM 3960 */ 3961static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3962{ 3963 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA); 3964 if ( info->intermediate_rxbuffer == NULL ) 3965 return -ENOMEM; 3966 3967 return 0; 3968 3969} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */ 3970 3971/* 3972 * mgsl_free_intermediate_rxbuffer_memory() 3973 * 3974 * 3975 * Arguments: 3976 * 3977 * info pointer to device instance data 3978 * 3979 * Return Value: None 3980 */ 3981static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info) 3982{ 3983 kfree(info->intermediate_rxbuffer); 3984 info->intermediate_rxbuffer = NULL; 3985 3986} /* end of mgsl_free_intermediate_rxbuffer_memory() */ 3987 3988/* 3989 * mgsl_alloc_intermediate_txbuffer_memory() 3990 * 3991 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size. 3992 * This buffer is used to load transmit frames into the adapter's dma transfer 3993 * buffers when there is sufficient space. 3994 * 3995 * Arguments: 3996 * 3997 * info pointer to device instance data 3998 * 3999 * Return Value: 0 if success, otherwise -ENOMEM 4000 */ 4001static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info) 4002{ 4003 int i; 4004 4005 if ( debug_level >= DEBUG_LEVEL_INFO ) 4006 printk("%s %s(%d) allocating %d tx holding buffers\n", 4007 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers); 4008 4009 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers)); 4010 4011 for ( i=0; i<info->num_tx_holding_buffers; ++i) { 4012 info->tx_holding_buffers[i].buffer = 4013 kmalloc(info->max_frame_size, GFP_KERNEL); 4014 if (info->tx_holding_buffers[i].buffer == NULL) { 4015 for (--i; i >= 0; i--) { 4016 kfree(info->tx_holding_buffers[i].buffer); 4017 info->tx_holding_buffers[i].buffer = NULL; 4018 } 4019 return -ENOMEM; 4020 } 4021 } 4022 4023 return 0; 4024 4025} /* end of mgsl_alloc_intermediate_txbuffer_memory() */ 4026 4027/* 4028 * mgsl_free_intermediate_txbuffer_memory() 4029 * 4030 * 4031 * Arguments: 4032 * 4033 * info pointer to device instance data 4034 * 4035 * Return Value: None 4036 */ 4037static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info) 4038{ 4039 int i; 4040 4041 for ( i=0; i<info->num_tx_holding_buffers; ++i ) { 4042 kfree(info->tx_holding_buffers[i].buffer); 4043 info->tx_holding_buffers[i].buffer = NULL; 4044 } 4045 4046 info->get_tx_holding_index = 0; 4047 info->put_tx_holding_index = 0; 4048 info->tx_holding_count = 0; 4049 4050} /* end of mgsl_free_intermediate_txbuffer_memory() */ 4051 4052 4053/* 4054 * load_next_tx_holding_buffer() 4055 * 4056 * attempts to load the next buffered tx request into the 4057 * tx dma buffers 4058 * 4059 * Arguments: 4060 * 4061 * info pointer to device instance data 4062 * 4063 * Return Value: 1 if next buffered tx request loaded 4064 * into adapter's tx dma buffer, 4065 * 0 otherwise 4066 */ 4067static int load_next_tx_holding_buffer(struct mgsl_struct *info) 4068{ 4069 int ret = 0; 4070 4071 if ( info->tx_holding_count ) { 4072 /* determine if we have enough tx dma buffers 4073 * to accommodate the next tx frame 4074 */ 4075 struct tx_holding_buffer *ptx = 4076 &info->tx_holding_buffers[info->get_tx_holding_index]; 4077 int num_free = num_free_tx_dma_buffers(info); 4078 int num_needed = ptx->buffer_size / DMABUFFERSIZE; 4079 if ( ptx->buffer_size % DMABUFFERSIZE ) 4080 ++num_needed; 4081 4082 if (num_needed <= num_free) { 4083 info->xmit_cnt = ptx->buffer_size; 4084 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size); 4085 4086 --info->tx_holding_count; 4087 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers) 4088 info->get_tx_holding_index=0; 4089 4090 /* restart transmit timer */ 4091 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); 4092 4093 ret = 1; 4094 } 4095 } 4096 4097 return ret; 4098} 4099 4100/* 4101 * save_tx_buffer_request() 4102 * 4103 * attempt to store transmit frame request for later transmission 4104 * 4105 * Arguments: 4106 * 4107 * info pointer to device instance data 4108 * Buffer pointer to buffer containing frame to load 4109 * BufferSize size in bytes of frame in Buffer 4110 * 4111 * Return Value: 1 if able to store, 0 otherwise 4112 */ 4113static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize) 4114{ 4115 struct tx_holding_buffer *ptx; 4116 4117 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) { 4118 return 0; /* all buffers in use */ 4119 } 4120 4121 ptx = &info->tx_holding_buffers[info->put_tx_holding_index]; 4122 ptx->buffer_size = BufferSize; 4123 memcpy( ptx->buffer, Buffer, BufferSize); 4124 4125 ++info->tx_holding_count; 4126 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers) 4127 info->put_tx_holding_index=0; 4128 4129 return 1; 4130} 4131 4132static int mgsl_claim_resources(struct mgsl_struct *info) 4133{ 4134 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) { 4135 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n", 4136 __FILE__,__LINE__,info->device_name, info->io_base); 4137 return -ENODEV; 4138 } 4139 info->io_addr_requested = 1; 4140 4141 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags, 4142 info->device_name, info ) < 0 ) { 4143 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n", 4144 __FILE__,__LINE__,info->device_name, info->irq_level ); 4145 goto errout; 4146 } 4147 info->irq_requested = 1; 4148 4149 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4150 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) { 4151 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n", 4152 __FILE__,__LINE__,info->device_name, info->phys_memory_base); 4153 goto errout; 4154 } 4155 info->shared_mem_requested = 1; 4156 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) { 4157 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n", 4158 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset); 4159 goto errout; 4160 } 4161 info->lcr_mem_requested = 1; 4162 4163 info->memory_base = ioremap(info->phys_memory_base,0x40000); 4164 if (!info->memory_base) { 4165 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", 4166 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4167 goto errout; 4168 } 4169 4170 if ( !mgsl_memory_test(info) ) { 4171 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n", 4172 __FILE__,__LINE__,info->device_name, info->phys_memory_base ); 4173 goto errout; 4174 } 4175 4176 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset; 4177 if (!info->lcr_base) { 4178 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", 4179 __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); 4180 goto errout; 4181 } 4182 4183 } else { 4184 /* claim DMA channel */ 4185 4186 if (request_dma(info->dma_level,info->device_name) < 0){ 4187 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n", 4188 __FILE__,__LINE__,info->device_name, info->dma_level ); 4189 mgsl_release_resources( info ); 4190 return -ENODEV; 4191 } 4192 info->dma_requested = 1; 4193 4194 /* ISA adapter uses bus master DMA */ 4195 set_dma_mode(info->dma_level,DMA_MODE_CASCADE); 4196 enable_dma(info->dma_level); 4197 } 4198 4199 if ( mgsl_allocate_dma_buffers(info) < 0 ) { 4200 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n", 4201 __FILE__,__LINE__,info->device_name, info->dma_level ); 4202 goto errout; 4203 } 4204 4205 return 0; 4206errout: 4207 mgsl_release_resources(info); 4208 return -ENODEV; 4209 4210} /* end of mgsl_claim_resources() */ 4211 4212static void mgsl_release_resources(struct mgsl_struct *info) 4213{ 4214 if ( debug_level >= DEBUG_LEVEL_INFO ) 4215 printk( "%s(%d):mgsl_release_resources(%s) entry\n", 4216 __FILE__,__LINE__,info->device_name ); 4217 4218 if ( info->irq_requested ) { 4219 free_irq(info->irq_level, info); 4220 info->irq_requested = 0; 4221 } 4222 if ( info->dma_requested ) { 4223 disable_dma(info->dma_level); 4224 free_dma(info->dma_level); 4225 info->dma_requested = 0; 4226 } 4227 mgsl_free_dma_buffers(info); 4228 mgsl_free_intermediate_rxbuffer_memory(info); 4229 mgsl_free_intermediate_txbuffer_memory(info); 4230 4231 if ( info->io_addr_requested ) { 4232 release_region(info->io_base,info->io_addr_size); 4233 info->io_addr_requested = 0; 4234 } 4235 if ( info->shared_mem_requested ) { 4236 release_mem_region(info->phys_memory_base,0x40000); 4237 info->shared_mem_requested = 0; 4238 } 4239 if ( info->lcr_mem_requested ) { 4240 release_mem_region(info->phys_lcr_base + info->lcr_offset,128); 4241 info->lcr_mem_requested = 0; 4242 } 4243 if (info->memory_base){ 4244 iounmap(info->memory_base); 4245 info->memory_base = NULL; 4246 } 4247 if (info->lcr_base){ 4248 iounmap(info->lcr_base - info->lcr_offset); 4249 info->lcr_base = NULL; 4250 } 4251 4252 if ( debug_level >= DEBUG_LEVEL_INFO ) 4253 printk( "%s(%d):mgsl_release_resources(%s) exit\n", 4254 __FILE__,__LINE__,info->device_name ); 4255 4256} /* end of mgsl_release_resources() */ 4257 4258/* mgsl_add_device() 4259 * 4260 * Add the specified device instance data structure to the 4261 * global linked list of devices and increment the device count. 4262 * 4263 * Arguments: info pointer to device instance data 4264 * Return Value: None 4265 */ 4266static void mgsl_add_device( struct mgsl_struct *info ) 4267{ 4268 info->next_device = NULL; 4269 info->line = mgsl_device_count; 4270 sprintf(info->device_name,"ttySL%d",info->line); 4271 4272 if (info->line < MAX_TOTAL_DEVICES) { 4273 if (maxframe[info->line]) 4274 info->max_frame_size = maxframe[info->line]; 4275 info->dosyncppp = dosyncppp[info->line]; 4276 4277 if (txdmabufs[info->line]) { 4278 info->num_tx_dma_buffers = txdmabufs[info->line]; 4279 if (info->num_tx_dma_buffers < 1) 4280 info->num_tx_dma_buffers = 1; 4281 } 4282 4283 if (txholdbufs[info->line]) { 4284 info->num_tx_holding_buffers = txholdbufs[info->line]; 4285 if (info->num_tx_holding_buffers < 1) 4286 info->num_tx_holding_buffers = 1; 4287 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS) 4288 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS; 4289 } 4290 } 4291 4292 mgsl_device_count++; 4293 4294 if ( !mgsl_device_list ) 4295 mgsl_device_list = info; 4296 else { 4297 struct mgsl_struct *current_dev = mgsl_device_list; 4298 while( current_dev->next_device ) 4299 current_dev = current_dev->next_device; 4300 current_dev->next_device = info; 4301 } 4302 4303 if ( info->max_frame_size < 4096 ) 4304 info->max_frame_size = 4096; 4305 else if ( info->max_frame_size > 65535 ) 4306 info->max_frame_size = 65535; 4307 4308 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 4309 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n", 4310 info->hw_version + 1, info->device_name, info->io_base, info->irq_level, 4311 info->phys_memory_base, info->phys_lcr_base, 4312 info->max_frame_size ); 4313 } else { 4314 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n", 4315 info->device_name, info->io_base, info->irq_level, info->dma_level, 4316 info->max_frame_size ); 4317 } 4318 4319#if SYNCLINK_GENERIC_HDLC 4320 hdlcdev_init(info); 4321#endif 4322 4323} /* end of mgsl_add_device() */ 4324 4325/* mgsl_allocate_device() 4326 * 4327 * Allocate and initialize a device instance structure 4328 * 4329 * Arguments: none 4330 * Return Value: pointer to mgsl_struct if success, otherwise NULL 4331 */ 4332static struct mgsl_struct* mgsl_allocate_device(void) 4333{ 4334 struct mgsl_struct *info; 4335 4336 info = kmalloc(sizeof(struct mgsl_struct), 4337 GFP_KERNEL); 4338 4339 if (!info) { 4340 printk("Error can't allocate device instance data\n"); 4341 } else { 4342 memset(info, 0, sizeof(struct mgsl_struct)); 4343 info->magic = MGSL_MAGIC; 4344 INIT_WORK(&info->task, mgsl_bh_handler); 4345 info->max_frame_size = 4096; 4346 info->close_delay = 5*HZ/10; 4347 info->closing_wait = 30*HZ; 4348 init_waitqueue_head(&info->open_wait); 4349 init_waitqueue_head(&info->close_wait); 4350 init_waitqueue_head(&info->status_event_wait_q); 4351 init_waitqueue_head(&info->event_wait_q); 4352 spin_lock_init(&info->irq_spinlock); 4353 spin_lock_init(&info->netlock); 4354 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 4355 info->idle_mode = HDLC_TXIDLE_FLAGS; 4356 info->num_tx_dma_buffers = 1; 4357 info->num_tx_holding_buffers = 0; 4358 } 4359 4360 return info; 4361 4362} /* end of mgsl_allocate_device()*/ 4363 4364static const struct tty_operations mgsl_ops = { 4365 .open = mgsl_open, 4366 .close = mgsl_close, 4367 .write = mgsl_write, 4368 .put_char = mgsl_put_char, 4369 .flush_chars = mgsl_flush_chars, 4370 .write_room = mgsl_write_room, 4371 .chars_in_buffer = mgsl_chars_in_buffer, 4372 .flush_buffer = mgsl_flush_buffer, 4373 .ioctl = mgsl_ioctl, 4374 .throttle = mgsl_throttle, 4375 .unthrottle = mgsl_unthrottle, 4376 .send_xchar = mgsl_send_xchar, 4377 .break_ctl = mgsl_break, 4378 .wait_until_sent = mgsl_wait_until_sent, 4379 .read_proc = mgsl_read_proc, 4380 .set_termios = mgsl_set_termios, 4381 .stop = mgsl_stop, 4382 .start = mgsl_start, 4383 .hangup = mgsl_hangup, 4384 .tiocmget = tiocmget, 4385 .tiocmset = tiocmset, 4386}; 4387 4388/* 4389 * perform tty device initialization 4390 */ 4391static int mgsl_init_tty(void) 4392{ 4393 int rc; 4394 4395 serial_driver = alloc_tty_driver(128); 4396 if (!serial_driver) 4397 return -ENOMEM; 4398 4399 serial_driver->owner = THIS_MODULE; 4400 serial_driver->driver_name = "synclink"; 4401 serial_driver->name = "ttySL"; 4402 serial_driver->major = ttymajor; 4403 serial_driver->minor_start = 64; 4404 serial_driver->type = TTY_DRIVER_TYPE_SERIAL; 4405 serial_driver->subtype = SERIAL_TYPE_NORMAL; 4406 serial_driver->init_termios = tty_std_termios; 4407 serial_driver->init_termios.c_cflag = 4408 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 4409 serial_driver->init_termios.c_ispeed = 9600; 4410 serial_driver->init_termios.c_ospeed = 9600; 4411 serial_driver->flags = TTY_DRIVER_REAL_RAW; 4412 tty_set_operations(serial_driver, &mgsl_ops); 4413 if ((rc = tty_register_driver(serial_driver)) < 0) { 4414 printk("%s(%d):Couldn't register serial driver\n", 4415 __FILE__,__LINE__); 4416 put_tty_driver(serial_driver); 4417 serial_driver = NULL; 4418 return rc; 4419 } 4420 4421 printk("%s %s, tty major#%d\n", 4422 driver_name, driver_version, 4423 serial_driver->major); 4424 return 0; 4425} 4426 4427/* enumerate user specified ISA adapters 4428 */ 4429static void mgsl_enum_isa_devices(void) 4430{ 4431 struct mgsl_struct *info; 4432 int i; 4433 4434 /* Check for user specified ISA devices */ 4435 4436 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){ 4437 if ( debug_level >= DEBUG_LEVEL_INFO ) 4438 printk("ISA device specified io=%04X,irq=%d,dma=%d\n", 4439 io[i], irq[i], dma[i] ); 4440 4441 info = mgsl_allocate_device(); 4442 if ( !info ) { 4443 /* error allocating device instance data */ 4444 if ( debug_level >= DEBUG_LEVEL_ERROR ) 4445 printk( "can't allocate device instance data.\n"); 4446 continue; 4447 } 4448 4449 /* Copy user configuration info to device instance data */ 4450 info->io_base = (unsigned int)io[i]; 4451 info->irq_level = (unsigned int)irq[i]; 4452 info->irq_level = irq_canonicalize(info->irq_level); 4453 info->dma_level = (unsigned int)dma[i]; 4454 info->bus_type = MGSL_BUS_TYPE_ISA; 4455 info->io_addr_size = 16; 4456 info->irq_flags = 0; 4457 4458 mgsl_add_device( info ); 4459 } 4460} 4461 4462static void synclink_cleanup(void) 4463{ 4464 int rc; 4465 struct mgsl_struct *info; 4466 struct mgsl_struct *tmp; 4467 4468 printk("Unloading %s: %s\n", driver_name, driver_version); 4469 4470 if (serial_driver) { 4471 if ((rc = tty_unregister_driver(serial_driver))) 4472 printk("%s(%d) failed to unregister tty driver err=%d\n", 4473 __FILE__,__LINE__,rc); 4474 put_tty_driver(serial_driver); 4475 } 4476 4477 info = mgsl_device_list; 4478 while(info) { 4479#if SYNCLINK_GENERIC_HDLC 4480 hdlcdev_exit(info); 4481#endif 4482 mgsl_release_resources(info); 4483 tmp = info; 4484 info = info->next_device; 4485 kfree(tmp); 4486 } 4487 4488 if (pci_registered) 4489 pci_unregister_driver(&synclink_pci_driver); 4490} 4491 4492static int __init synclink_init(void) 4493{ 4494 int rc; 4495 4496 if (break_on_load) { 4497 mgsl_get_text_ptr(); 4498 BREAKPOINT(); 4499 } 4500 4501 printk("%s %s\n", driver_name, driver_version); 4502 4503 mgsl_enum_isa_devices(); 4504 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0) 4505 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc); 4506 else 4507 pci_registered = 1; 4508 4509 if ((rc = mgsl_init_tty()) < 0) 4510 goto error; 4511 4512 return 0; 4513 4514error: 4515 synclink_cleanup(); 4516 return rc; 4517} 4518 4519static void __exit synclink_exit(void) 4520{ 4521 synclink_cleanup(); 4522} 4523 4524module_init(synclink_init); 4525module_exit(synclink_exit); 4526 4527/* 4528 * usc_RTCmd() 4529 * 4530 * Issue a USC Receive/Transmit command to the 4531 * Channel Command/Address Register (CCAR). 4532 * 4533 * Notes: 4534 * 4535 * The command is encoded in the most significant 5 bits <15..11> 4536 * of the CCAR value. Bits <10..7> of the CCAR must be preserved 4537 * and Bits <6..0> must be written as zeros. 4538 * 4539 * Arguments: 4540 * 4541 * info pointer to device information structure 4542 * Cmd command mask (use symbolic macros) 4543 * 4544 * Return Value: 4545 * 4546 * None 4547 */ 4548static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd ) 4549{ 4550 /* output command to CCAR in bits <15..11> */ 4551 /* preserve bits <10..7>, bits <6..0> must be zero */ 4552 4553 outw( Cmd + info->loopback_bits, info->io_base + CCAR ); 4554 4555 /* Read to flush write to CCAR */ 4556 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4557 inw( info->io_base + CCAR ); 4558 4559} /* end of usc_RTCmd() */ 4560 4561/* 4562 * usc_DmaCmd() 4563 * 4564 * Issue a DMA command to the DMA Command/Address Register (DCAR). 4565 * 4566 * Arguments: 4567 * 4568 * info pointer to device information structure 4569 * Cmd DMA command mask (usc_DmaCmd_XX Macros) 4570 * 4571 * Return Value: 4572 * 4573 * None 4574 */ 4575static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd ) 4576{ 4577 /* write command mask to DCAR */ 4578 outw( Cmd + info->mbre_bit, info->io_base ); 4579 4580 /* Read to flush write to DCAR */ 4581 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4582 inw( info->io_base ); 4583 4584} /* end of usc_DmaCmd() */ 4585 4586/* 4587 * usc_OutDmaReg() 4588 * 4589 * Write a 16-bit value to a USC DMA register 4590 * 4591 * Arguments: 4592 * 4593 * info pointer to device info structure 4594 * RegAddr register address (number) for write 4595 * RegValue 16-bit value to write to register 4596 * 4597 * Return Value: 4598 * 4599 * None 4600 * 4601 */ 4602static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4603{ 4604 /* Note: The DCAR is located at the adapter base address */ 4605 /* Note: must preserve state of BIT8 in DCAR */ 4606 4607 outw( RegAddr + info->mbre_bit, info->io_base ); 4608 outw( RegValue, info->io_base ); 4609 4610 /* Read to flush write to DCAR */ 4611 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4612 inw( info->io_base ); 4613 4614} /* end of usc_OutDmaReg() */ 4615 4616/* 4617 * usc_InDmaReg() 4618 * 4619 * Read a 16-bit value from a DMA register 4620 * 4621 * Arguments: 4622 * 4623 * info pointer to device info structure 4624 * RegAddr register address (number) to read from 4625 * 4626 * Return Value: 4627 * 4628 * The 16-bit value read from register 4629 * 4630 */ 4631static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr ) 4632{ 4633 /* Note: The DCAR is located at the adapter base address */ 4634 /* Note: must preserve state of BIT8 in DCAR */ 4635 4636 outw( RegAddr + info->mbre_bit, info->io_base ); 4637 return inw( info->io_base ); 4638 4639} /* end of usc_InDmaReg() */ 4640 4641/* 4642 * 4643 * usc_OutReg() 4644 * 4645 * Write a 16-bit value to a USC serial channel register 4646 * 4647 * Arguments: 4648 * 4649 * info pointer to device info structure 4650 * RegAddr register address (number) to write to 4651 * RegValue 16-bit value to write to register 4652 * 4653 * Return Value: 4654 * 4655 * None 4656 * 4657 */ 4658static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue ) 4659{ 4660 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4661 outw( RegValue, info->io_base + CCAR ); 4662 4663 /* Read to flush write to CCAR */ 4664 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4665 inw( info->io_base + CCAR ); 4666 4667} /* end of usc_OutReg() */ 4668 4669/* 4670 * usc_InReg() 4671 * 4672 * Reads a 16-bit value from a USC serial channel register 4673 * 4674 * Arguments: 4675 * 4676 * info pointer to device extension 4677 * RegAddr register address (number) to read from 4678 * 4679 * Return Value: 4680 * 4681 * 16-bit value read from register 4682 */ 4683static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr ) 4684{ 4685 outw( RegAddr + info->loopback_bits, info->io_base + CCAR ); 4686 return inw( info->io_base + CCAR ); 4687 4688} /* end of usc_InReg() */ 4689 4690/* usc_set_sdlc_mode() 4691 * 4692 * Set up the adapter for SDLC DMA communications. 4693 * 4694 * Arguments: info pointer to device instance data 4695 * Return Value: NONE 4696 */ 4697static void usc_set_sdlc_mode( struct mgsl_struct *info ) 4698{ 4699 u16 RegValue; 4700 int PreSL1660; 4701 4702 /* 4703 * determine if the IUSC on the adapter is pre-SL1660. If 4704 * not, take advantage of the UnderWait feature of more 4705 * modern chips. If an underrun occurs and this bit is set, 4706 * the transmitter will idle the programmed idle pattern 4707 * until the driver has time to service the underrun. Otherwise, 4708 * the dma controller may get the cycles previously requested 4709 * and begin transmitting queued tx data. 4710 */ 4711 usc_OutReg(info,TMCR,0x1f); 4712 RegValue=usc_InReg(info,TMDR); 4713 if ( RegValue == IUSC_PRE_SL1660 ) 4714 PreSL1660 = 1; 4715 else 4716 PreSL1660 = 0; 4717 4718 4719 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 4720 { 4721 /* 4722 ** Channel Mode Register (CMR) 4723 ** 4724 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun 4725 ** <13> 0 0 = Transmit Disabled (initially) 4726 ** <12> 0 1 = Consecutive Idles share common 0 4727 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop 4728 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling 4729 ** <3..0> 0110 Receiver Mode = HDLC/SDLC 4730 ** 4731 ** 1000 1110 0000 0110 = 0x8e06 4732 */ 4733 RegValue = 0x8e06; 4734 4735 /*-------------------------------------------------- 4736 * ignore user options for UnderRun Actions and 4737 * preambles 4738 *--------------------------------------------------*/ 4739 } 4740 else 4741 { 4742 /* Channel mode Register (CMR) 4743 * 4744 * <15..14> 00 Tx Sub modes, Underrun Action 4745 * <13> 0 1 = Send Preamble before opening flag 4746 * <12> 0 1 = Consecutive Idles share common 0 4747 * <11..8> 0110 Transmitter mode = HDLC/SDLC 4748 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling 4749 * <3..0> 0110 Receiver mode = HDLC/SDLC 4750 * 4751 * 0000 0110 0000 0110 = 0x0606 4752 */ 4753 if (info->params.mode == MGSL_MODE_RAW) { 4754 RegValue = 0x0001; /* Set Receive mode = external sync */ 4755 4756 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */ 4757 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12)); 4758 4759 RegValue |= 0x0400; 4760 } 4761 else { 4762 4763 RegValue = 0x0606; 4764 4765 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 ) 4766 RegValue |= BIT14; 4767 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG ) 4768 RegValue |= BIT15; 4769 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC ) 4770 RegValue |= BIT15 + BIT14; 4771 } 4772 4773 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE ) 4774 RegValue |= BIT13; 4775 } 4776 4777 if ( info->params.mode == MGSL_MODE_HDLC && 4778 (info->params.flags & HDLC_FLAG_SHARE_ZERO) ) 4779 RegValue |= BIT12; 4780 4781 if ( info->params.addr_filter != 0xff ) 4782 { 4783 /* set up receive address filtering */ 4784 usc_OutReg( info, RSR, info->params.addr_filter ); 4785 RegValue |= BIT4; 4786 } 4787 4788 usc_OutReg( info, CMR, RegValue ); 4789 info->cmr_value = RegValue; 4790 4791 /* Receiver mode Register (RMR) 4792 * 4793 * <15..13> 000 encoding 4794 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4795 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC) 4796 * <9> 0 1 = Include Receive chars in CRC 4797 * <8> 1 1 = Use Abort/PE bit as abort indicator 4798 * <7..6> 00 Even parity 4799 * <5> 0 parity disabled 4800 * <4..2> 000 Receive Char Length = 8 bits 4801 * <1..0> 00 Disable Receiver 4802 * 4803 * 0000 0101 0000 0000 = 0x0500 4804 */ 4805 4806 RegValue = 0x0500; 4807 4808 switch ( info->params.encoding ) { 4809 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4810 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4811 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4812 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4813 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4814 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4815 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4816 } 4817 4818 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4819 RegValue |= BIT9; 4820 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4821 RegValue |= ( BIT12 | BIT10 | BIT9 ); 4822 4823 usc_OutReg( info, RMR, RegValue ); 4824 4825 /* Set the Receive count Limit Register (RCLR) to 0xffff. */ 4826 /* When an opening flag of an SDLC frame is recognized the */ 4827 /* Receive Character count (RCC) is loaded with the value in */ 4828 /* RCLR. The RCC is decremented for each received byte. The */ 4829 /* value of RCC is stored after the closing flag of the frame */ 4830 /* allowing the frame size to be computed. */ 4831 4832 usc_OutReg( info, RCLR, RCLRVALUE ); 4833 4834 usc_RCmd( info, RCmd_SelectRicrdma_level ); 4835 4836 /* Receive Interrupt Control Register (RICR) 4837 * 4838 * <15..8> ? RxFIFO DMA Request Level 4839 * <7> 0 Exited Hunt IA (Interrupt Arm) 4840 * <6> 0 Idle Received IA 4841 * <5> 0 Break/Abort IA 4842 * <4> 0 Rx Bound IA 4843 * <3> 1 Queued status reflects oldest 2 bytes in FIFO 4844 * <2> 0 Abort/PE IA 4845 * <1> 1 Rx Overrun IA 4846 * <0> 0 Select TC0 value for readback 4847 * 4848 * 0000 0000 0000 1000 = 0x000a 4849 */ 4850 4851 /* Carry over the Exit Hunt and Idle Received bits */ 4852 /* in case they have been armed by usc_ArmEvents. */ 4853 4854 RegValue = usc_InReg( info, RICR ) & 0xc0; 4855 4856 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4857 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) ); 4858 else 4859 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) ); 4860 4861 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */ 4862 4863 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 4864 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 4865 4866 /* Transmit mode Register (TMR) 4867 * 4868 * <15..13> 000 encoding 4869 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1) 4870 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC) 4871 * <9> 0 1 = Tx CRC Enabled 4872 * <8> 0 1 = Append CRC to end of transmit frame 4873 * <7..6> 00 Transmit parity Even 4874 * <5> 0 Transmit parity Disabled 4875 * <4..2> 000 Tx Char Length = 8 bits 4876 * <1..0> 00 Disable Transmitter 4877 * 4878 * 0000 0100 0000 0000 = 0x0400 4879 */ 4880 4881 RegValue = 0x0400; 4882 4883 switch ( info->params.encoding ) { 4884 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break; 4885 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break; 4886 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break; 4887 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break; 4888 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break; 4889 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break; 4890 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break; 4891 } 4892 4893 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT ) 4894 RegValue |= BIT9 + BIT8; 4895 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT ) 4896 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8); 4897 4898 usc_OutReg( info, TMR, RegValue ); 4899 4900 usc_set_txidle( info ); 4901 4902 4903 usc_TCmd( info, TCmd_SelectTicrdma_level ); 4904 4905 /* Transmit Interrupt Control Register (TICR) 4906 * 4907 * <15..8> ? Transmit FIFO DMA Level 4908 * <7> 0 Present IA (Interrupt Arm) 4909 * <6> 0 Idle Sent IA 4910 * <5> 1 Abort Sent IA 4911 * <4> 1 EOF/EOM Sent IA 4912 * <3> 0 CRC Sent IA 4913 * <2> 1 1 = Wait for SW Trigger to Start Frame 4914 * <1> 1 Tx Underrun IA 4915 * <0> 0 TC0 constant on read back 4916 * 4917 * 0000 0000 0011 0110 = 0x0036 4918 */ 4919 4920 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 4921 usc_OutReg( info, TICR, 0x0736 ); 4922 else 4923 usc_OutReg( info, TICR, 0x1436 ); 4924 4925 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 4926 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 4927 4928 /* 4929 ** Transmit Command/Status Register (TCSR) 4930 ** 4931 ** <15..12> 0000 TCmd 4932 ** <11> 0/1 UnderWait 4933 ** <10..08> 000 TxIdle 4934 ** <7> x PreSent 4935 ** <6> x IdleSent 4936 ** <5> x AbortSent 4937 ** <4> x EOF/EOM Sent 4938 ** <3> x CRC Sent 4939 ** <2> x All Sent 4940 ** <1> x TxUnder 4941 ** <0> x TxEmpty 4942 ** 4943 ** 0000 0000 0000 0000 = 0x0000 4944 */ 4945 info->tcsr_value = 0; 4946 4947 if ( !PreSL1660 ) 4948 info->tcsr_value |= TCSR_UNDERWAIT; 4949 4950 usc_OutReg( info, TCSR, info->tcsr_value ); 4951 4952 4953 RegValue = 0x0f40; 4954 4955 if ( info->params.flags & HDLC_FLAG_RXC_DPLL ) 4956 RegValue |= 0x0003; /* RxCLK from DPLL */ 4957 else if ( info->params.flags & HDLC_FLAG_RXC_BRG ) 4958 RegValue |= 0x0004; /* RxCLK from BRG0 */ 4959 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN) 4960 RegValue |= 0x0006; /* RxCLK from TXC Input */ 4961 else 4962 RegValue |= 0x0007; /* RxCLK from Port1 */ 4963 4964 if ( info->params.flags & HDLC_FLAG_TXC_DPLL ) 4965 RegValue |= 0x0018; /* TxCLK from DPLL */ 4966 else if ( info->params.flags & HDLC_FLAG_TXC_BRG ) 4967 RegValue |= 0x0020; /* TxCLK from BRG0 */ 4968 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN) 4969 RegValue |= 0x0038; /* RxCLK from TXC Input */ 4970 else 4971 RegValue |= 0x0030; /* TxCLK from Port0 */ 4972 4973 usc_OutReg( info, CMCR, RegValue ); 4974 4975 4976 /* Hardware Configuration Register (HCR) 4977 * 4978 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4 4979 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div 4980 * <12> 0 CVOK:0=report code violation in biphase 4981 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4 4982 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level 4983 * <7..6> 00 reserved 4984 * <5> 0 BRG1 mode:0=continuous,1=single cycle 4985 * <4> X BRG1 Enable 4986 * <3..2> 00 reserved 4987 * <1> 0 BRG0 mode:0=continuous,1=single cycle 4988 * <0> 0 BRG0 Enable 4989 */ 4990 4991 RegValue = 0x0000; 4992 4993 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) { 4994 u32 XtalSpeed; 4995 u32 DpllDivisor; 4996 u16 Tc; 4997 4998 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */ 4999 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */ 5000 5001 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 5002 XtalSpeed = 11059200; 5003 else 5004 XtalSpeed = 14745600; 5005 5006 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) { 5007 DpllDivisor = 16; 5008 RegValue |= BIT10; 5009 } 5010 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) { 5011 DpllDivisor = 8; 5012 RegValue |= BIT11; 5013 } 5014 else 5015 DpllDivisor = 32; 5016 5017 /* Tc = (Xtal/Speed) - 1 */ 5018 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5019 /* then rounding up gives a more precise time constant. Instead */ 5020 /* of rounding up and then subtracting 1 we just don't subtract */ 5021 /* the one in this case. */ 5022 5023 /*-------------------------------------------------- 5024 * ejz: for DPLL mode, application should use the 5025 * same clock speed as the partner system, even 5026 * though clocking is derived from the input RxData. 5027 * In case the user uses a 0 for the clock speed, 5028 * default to 0xffffffff and don't try to divide by 5029 * zero 5030 *--------------------------------------------------*/ 5031 if ( info->params.clock_speed ) 5032 { 5033 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed); 5034 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2) 5035 / info->params.clock_speed) ) 5036 Tc--; 5037 } 5038 else 5039 Tc = -1; 5040 5041 5042 /* Write 16-bit Time Constant for BRG1 */ 5043 usc_OutReg( info, TC1R, Tc ); 5044 5045 RegValue |= BIT4; /* enable BRG1 */ 5046 5047 switch ( info->params.encoding ) { 5048 case HDLC_ENCODING_NRZ: 5049 case HDLC_ENCODING_NRZB: 5050 case HDLC_ENCODING_NRZI_MARK: 5051 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break; 5052 case HDLC_ENCODING_BIPHASE_MARK: 5053 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break; 5054 case HDLC_ENCODING_BIPHASE_LEVEL: 5055 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break; 5056 } 5057 } 5058 5059 usc_OutReg( info, HCR, RegValue ); 5060 5061 5062 /* Channel Control/status Register (CCSR) 5063 * 5064 * <15> X RCC FIFO Overflow status (RO) 5065 * <14> X RCC FIFO Not Empty status (RO) 5066 * <13> 0 1 = Clear RCC FIFO (WO) 5067 * <12> X DPLL Sync (RW) 5068 * <11> X DPLL 2 Missed Clocks status (RO) 5069 * <10> X DPLL 1 Missed Clock status (RO) 5070 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 5071 * <7> X SDLC Loop On status (RO) 5072 * <6> X SDLC Loop Send status (RO) 5073 * <5> 1 Bypass counters for TxClk and RxClk (RW) 5074 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 5075 * <1..0> 00 reserved 5076 * 5077 * 0000 0000 0010 0000 = 0x0020 5078 */ 5079 5080 usc_OutReg( info, CCSR, 0x1020 ); 5081 5082 5083 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) { 5084 usc_OutReg( info, SICR, 5085 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) ); 5086 } 5087 5088 5089 /* enable Master Interrupt Enable bit (MIE) */ 5090 usc_EnableMasterIrqBit( info ); 5091 5092 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA + 5093 TRANSMIT_STATUS + TRANSMIT_DATA + MISC); 5094 5095 /* arm RCC underflow interrupt */ 5096 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3)); 5097 usc_EnableInterrupts(info, MISC); 5098 5099 info->mbre_bit = 0; 5100 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5101 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5102 info->mbre_bit = BIT8; 5103 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */ 5104 5105 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 5106 /* Enable DMAEN (Port 7, Bit 14) */ 5107 /* This connects the DMA request signal to the ISA bus */ 5108 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14)); 5109 } 5110 5111 /* DMA Control Register (DCR) 5112 * 5113 * <15..14> 10 Priority mode = Alternating Tx/Rx 5114 * 01 Rx has priority 5115 * 00 Tx has priority 5116 * 5117 * <13> 1 Enable Priority Preempt per DCR<15..14> 5118 * (WARNING DCR<11..10> must be 00 when this is 1) 5119 * 0 Choose activate channel per DCR<11..10> 5120 * 5121 * <12> 0 Little Endian for Array/List 5122 * <11..10> 00 Both Channels can use each bus grant 5123 * <9..6> 0000 reserved 5124 * <5> 0 7 CLK - Minimum Bus Re-request Interval 5125 * <4> 0 1 = drive D/C and S/D pins 5126 * <3> 1 1 = Add one wait state to all DMA cycles. 5127 * <2> 0 1 = Strobe /UAS on every transfer. 5128 * <1..0> 11 Addr incrementing only affects LS24 bits 5129 * 5130 * 0110 0000 0000 1011 = 0x600b 5131 */ 5132 5133 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5134 /* PCI adapter does not need DMA wait state */ 5135 usc_OutDmaReg( info, DCR, 0xa00b ); 5136 } 5137 else 5138 usc_OutDmaReg( info, DCR, 0x800b ); 5139 5140 5141 /* Receive DMA mode Register (RDMR) 5142 * 5143 * <15..14> 11 DMA mode = Linked List Buffer mode 5144 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry 5145 * <12> 1 Clear count of List Entry after fetching 5146 * <11..10> 00 Address mode = Increment 5147 * <9> 1 Terminate Buffer on RxBound 5148 * <8> 0 Bus Width = 16bits 5149 * <7..0> ? status Bits (write as 0s) 5150 * 5151 * 1111 0010 0000 0000 = 0xf200 5152 */ 5153 5154 usc_OutDmaReg( info, RDMR, 0xf200 ); 5155 5156 5157 /* Transmit DMA mode Register (TDMR) 5158 * 5159 * <15..14> 11 DMA mode = Linked List Buffer mode 5160 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry 5161 * <12> 1 Clear count of List Entry after fetching 5162 * <11..10> 00 Address mode = Increment 5163 * <9> 1 Terminate Buffer on end of frame 5164 * <8> 0 Bus Width = 16bits 5165 * <7..0> ? status Bits (Read Only so write as 0) 5166 * 5167 * 1111 0010 0000 0000 = 0xf200 5168 */ 5169 5170 usc_OutDmaReg( info, TDMR, 0xf200 ); 5171 5172 5173 /* DMA Interrupt Control Register (DICR) 5174 * 5175 * <15> 1 DMA Interrupt Enable 5176 * <14> 0 1 = Disable IEO from USC 5177 * <13> 0 1 = Don't provide vector during IntAck 5178 * <12> 1 1 = Include status in Vector 5179 * <10..2> 0 reserved, Must be 0s 5180 * <1> 0 1 = Rx DMA Interrupt Enabled 5181 * <0> 0 1 = Tx DMA Interrupt Enabled 5182 * 5183 * 1001 0000 0000 0000 = 0x9000 5184 */ 5185 5186 usc_OutDmaReg( info, DICR, 0x9000 ); 5187 5188 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */ 5189 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */ 5190 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */ 5191 5192 /* Channel Control Register (CCR) 5193 * 5194 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs) 5195 * <13> 0 Trigger Tx on SW Command Disabled 5196 * <12> 0 Flag Preamble Disabled 5197 * <11..10> 00 Preamble Length 5198 * <9..8> 00 Preamble Pattern 5199 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs) 5200 * <5> 0 Trigger Rx on SW Command Disabled 5201 * <4..0> 0 reserved 5202 * 5203 * 1000 0000 1000 0000 = 0x8080 5204 */ 5205 5206 RegValue = 0x8080; 5207 5208 switch ( info->params.preamble_length ) { 5209 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break; 5210 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break; 5211 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break; 5212 } 5213 5214 switch ( info->params.preamble ) { 5215 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break; 5216 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break; 5217 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break; 5218 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break; 5219 } 5220 5221 usc_OutReg( info, CCR, RegValue ); 5222 5223 5224 /* 5225 * Burst/Dwell Control Register 5226 * 5227 * <15..8> 0x20 Maximum number of transfers per bus grant 5228 * <7..0> 0x00 Maximum number of clock cycles per bus grant 5229 */ 5230 5231 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5232 /* don't limit bus occupancy on PCI adapter */ 5233 usc_OutDmaReg( info, BDCR, 0x0000 ); 5234 } 5235 else 5236 usc_OutDmaReg( info, BDCR, 0x2000 ); 5237 5238 usc_stop_transmitter(info); 5239 usc_stop_receiver(info); 5240 5241} /* end of usc_set_sdlc_mode() */ 5242 5243/* usc_enable_loopback() 5244 * 5245 * Set the 16C32 for internal loopback mode. 5246 * The TxCLK and RxCLK signals are generated from the BRG0 and 5247 * the TxD is looped back to the RxD internally. 5248 * 5249 * Arguments: info pointer to device instance data 5250 * enable 1 = enable loopback, 0 = disable 5251 * Return Value: None 5252 */ 5253static void usc_enable_loopback(struct mgsl_struct *info, int enable) 5254{ 5255 if (enable) { 5256 /* blank external TXD output */ 5257 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6)); 5258 5259 /* Clock mode Control Register (CMCR) 5260 * 5261 * <15..14> 00 counter 1 Disabled 5262 * <13..12> 00 counter 0 Disabled 5263 * <11..10> 11 BRG1 Input is TxC Pin 5264 * <9..8> 11 BRG0 Input is TxC Pin 5265 * <7..6> 01 DPLL Input is BRG1 Output 5266 * <5..3> 100 TxCLK comes from BRG0 5267 * <2..0> 100 RxCLK comes from BRG0 5268 * 5269 * 0000 1111 0110 0100 = 0x0f64 5270 */ 5271 5272 usc_OutReg( info, CMCR, 0x0f64 ); 5273 5274 /* Write 16-bit Time Constant for BRG0 */ 5275 /* use clock speed if available, otherwise use 8 for diagnostics */ 5276 if (info->params.clock_speed) { 5277 if (info->bus_type == MGSL_BUS_TYPE_PCI) 5278 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1)); 5279 else 5280 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1)); 5281 } else 5282 usc_OutReg(info, TC0R, (u16)8); 5283 5284 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0 5285 mode = Continuous Set Bit 0 to enable BRG0. */ 5286 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5287 5288 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5289 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004)); 5290 5291 /* set Internal Data loopback mode */ 5292 info->loopback_bits = 0x300; 5293 outw( 0x0300, info->io_base + CCAR ); 5294 } else { 5295 /* enable external TXD output */ 5296 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6)); 5297 5298 /* clear Internal Data loopback mode */ 5299 info->loopback_bits = 0; 5300 outw( 0,info->io_base + CCAR ); 5301 } 5302 5303} /* end of usc_enable_loopback() */ 5304 5305/* usc_enable_aux_clock() 5306 * 5307 * Enabled the AUX clock output at the specified frequency. 5308 * 5309 * Arguments: 5310 * 5311 * info pointer to device extension 5312 * data_rate data rate of clock in bits per second 5313 * A data rate of 0 disables the AUX clock. 5314 * 5315 * Return Value: None 5316 */ 5317static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate ) 5318{ 5319 u32 XtalSpeed; 5320 u16 Tc; 5321 5322 if ( data_rate ) { 5323 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 5324 XtalSpeed = 11059200; 5325 else 5326 XtalSpeed = 14745600; 5327 5328 5329 /* Tc = (Xtal/Speed) - 1 */ 5330 /* If twice the remainder of (Xtal/Speed) is greater than Speed */ 5331 /* then rounding up gives a more precise time constant. Instead */ 5332 /* of rounding up and then subtracting 1 we just don't subtract */ 5333 /* the one in this case. */ 5334 5335 5336 Tc = (u16)(XtalSpeed/data_rate); 5337 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) ) 5338 Tc--; 5339 5340 /* Write 16-bit Time Constant for BRG0 */ 5341 usc_OutReg( info, TC0R, Tc ); 5342 5343 /* 5344 * Hardware Configuration Register (HCR) 5345 * Clear Bit 1, BRG0 mode = Continuous 5346 * Set Bit 0 to enable BRG0. 5347 */ 5348 5349 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 5350 5351 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 5352 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 5353 } else { 5354 /* data rate == 0 so turn off BRG0 */ 5355 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 5356 } 5357 5358} /* end of usc_enable_aux_clock() */ 5359 5360/* 5361 * 5362 * usc_process_rxoverrun_sync() 5363 * 5364 * This function processes a receive overrun by resetting the 5365 * receive DMA buffers and issuing a Purge Rx FIFO command 5366 * to allow the receiver to continue receiving. 5367 * 5368 * Arguments: 5369 * 5370 * info pointer to device extension 5371 * 5372 * Return Value: None 5373 */ 5374static void usc_process_rxoverrun_sync( struct mgsl_struct *info ) 5375{ 5376 int start_index; 5377 int end_index; 5378 int frame_start_index; 5379 int start_of_frame_found = FALSE; 5380 int end_of_frame_found = FALSE; 5381 int reprogram_dma = FALSE; 5382 5383 DMABUFFERENTRY *buffer_list = info->rx_buffer_list; 5384 u32 phys_addr; 5385 5386 usc_DmaCmd( info, DmaCmd_PauseRxChannel ); 5387 usc_RCmd( info, RCmd_EnterHuntmode ); 5388 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5389 5390 /* CurrentRxBuffer points to the 1st buffer of the next */ 5391 /* possibly available receive frame. */ 5392 5393 frame_start_index = start_index = end_index = info->current_rx_buffer; 5394 5395 /* Search for an unfinished string of buffers. This means */ 5396 /* that a receive frame started (at least one buffer with */ 5397 /* count set to zero) but there is no terminiting buffer */ 5398 /* (status set to non-zero). */ 5399 5400 while( !buffer_list[end_index].count ) 5401 { 5402 /* Count field has been reset to zero by 16C32. */ 5403 /* This buffer is currently in use. */ 5404 5405 if ( !start_of_frame_found ) 5406 { 5407 start_of_frame_found = TRUE; 5408 frame_start_index = end_index; 5409 end_of_frame_found = FALSE; 5410 } 5411 5412 if ( buffer_list[end_index].status ) 5413 { 5414 /* Status field has been set by 16C32. */ 5415 /* This is the last buffer of a received frame. */ 5416 5417 /* We want to leave the buffers for this frame intact. */ 5418 /* Move on to next possible frame. */ 5419 5420 start_of_frame_found = FALSE; 5421 end_of_frame_found = TRUE; 5422 } 5423 5424 /* advance to next buffer entry in linked list */ 5425 end_index++; 5426 if ( end_index == info->rx_buffer_count ) 5427 end_index = 0; 5428 5429 if ( start_index == end_index ) 5430 { 5431 /* The entire list has been searched with all Counts == 0 and */ 5432 /* all Status == 0. The receive buffers are */ 5433 /* completely screwed, reset all receive buffers! */ 5434 mgsl_reset_rx_dma_buffers( info ); 5435 frame_start_index = 0; 5436 start_of_frame_found = FALSE; 5437 reprogram_dma = TRUE; 5438 break; 5439 } 5440 } 5441 5442 if ( start_of_frame_found && !end_of_frame_found ) 5443 { 5444 /* There is an unfinished string of receive DMA buffers */ 5445 /* as a result of the receiver overrun. */ 5446 5447 /* Reset the buffers for the unfinished frame */ 5448 /* and reprogram the receive DMA controller to start */ 5449 /* at the 1st buffer of unfinished frame. */ 5450 5451 start_index = frame_start_index; 5452 5453 do 5454 { 5455 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE; 5456 5457 /* Adjust index for wrap around. */ 5458 if ( start_index == info->rx_buffer_count ) 5459 start_index = 0; 5460 5461 } while( start_index != end_index ); 5462 5463 reprogram_dma = TRUE; 5464 } 5465 5466 if ( reprogram_dma ) 5467 { 5468 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL); 5469 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5470 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS); 5471 5472 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5473 5474 /* This empties the receive FIFO and loads the RCC with RCLR */ 5475 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5476 5477 /* program 16C32 with physical address of 1st DMA buffer entry */ 5478 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry; 5479 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5480 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5481 5482 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5483 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5484 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5485 5486 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5487 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5488 5489 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5490 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5491 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5492 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5493 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5494 else 5495 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5496 } 5497 else 5498 { 5499 /* This empties the receive FIFO and loads the RCC with RCLR */ 5500 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5501 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5502 } 5503 5504} /* end of usc_process_rxoverrun_sync() */ 5505 5506/* usc_stop_receiver() 5507 * 5508 * Disable USC receiver 5509 * 5510 * Arguments: info pointer to device instance data 5511 * Return Value: None 5512 */ 5513static void usc_stop_receiver( struct mgsl_struct *info ) 5514{ 5515 if (debug_level >= DEBUG_LEVEL_ISR) 5516 printk("%s(%d):usc_stop_receiver(%s)\n", 5517 __FILE__,__LINE__, info->device_name ); 5518 5519 /* Disable receive DMA channel. */ 5520 /* This also disables receive DMA channel interrupts */ 5521 usc_DmaCmd( info, DmaCmd_ResetRxChannel ); 5522 5523 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5524 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5525 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS ); 5526 5527 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL); 5528 5529 /* This empties the receive FIFO and loads the RCC with RCLR */ 5530 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5531 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5532 5533 info->rx_enabled = 0; 5534 info->rx_overflow = 0; 5535 info->rx_rcc_underrun = 0; 5536 5537} /* end of stop_receiver() */ 5538 5539/* usc_start_receiver() 5540 * 5541 * Enable the USC receiver 5542 * 5543 * Arguments: info pointer to device instance data 5544 * Return Value: None 5545 */ 5546static void usc_start_receiver( struct mgsl_struct *info ) 5547{ 5548 u32 phys_addr; 5549 5550 if (debug_level >= DEBUG_LEVEL_ISR) 5551 printk("%s(%d):usc_start_receiver(%s)\n", 5552 __FILE__,__LINE__, info->device_name ); 5553 5554 mgsl_reset_rx_dma_buffers( info ); 5555 usc_stop_receiver( info ); 5556 5557 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) ); 5558 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5559 5560 if ( info->params.mode == MGSL_MODE_HDLC || 5561 info->params.mode == MGSL_MODE_RAW ) { 5562 /* DMA mode Transfers */ 5563 /* Program the DMA controller. */ 5564 /* Enable the DMA controller end of buffer interrupt. */ 5565 5566 /* program 16C32 with physical address of 1st DMA buffer entry */ 5567 phys_addr = info->rx_buffer_list[0].phys_entry; 5568 usc_OutDmaReg( info, NRARL, (u16)phys_addr ); 5569 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) ); 5570 5571 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 5572 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS ); 5573 usc_EnableInterrupts( info, RECEIVE_STATUS ); 5574 5575 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */ 5576 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */ 5577 5578 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 ); 5579 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) ); 5580 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 5581 if ( info->params.flags & HDLC_FLAG_AUTO_DCD ) 5582 usc_EnableReceiver(info,ENABLE_AUTO_DCD); 5583 else 5584 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5585 } else { 5586 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL); 5587 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS); 5588 usc_EnableInterrupts(info, RECEIVE_DATA); 5589 5590 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 5591 usc_RCmd( info, RCmd_EnterHuntmode ); 5592 5593 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 5594 } 5595 5596 usc_OutReg( info, CCSR, 0x1020 ); 5597 5598 info->rx_enabled = 1; 5599 5600} /* end of usc_start_receiver() */ 5601 5602/* usc_start_transmitter() 5603 * 5604 * Enable the USC transmitter and send a transmit frame if 5605 * one is loaded in the DMA buffers. 5606 * 5607 * Arguments: info pointer to device instance data 5608 * Return Value: None 5609 */ 5610static void usc_start_transmitter( struct mgsl_struct *info ) 5611{ 5612 u32 phys_addr; 5613 unsigned int FrameSize; 5614 5615 if (debug_level >= DEBUG_LEVEL_ISR) 5616 printk("%s(%d):usc_start_transmitter(%s)\n", 5617 __FILE__,__LINE__, info->device_name ); 5618 5619 if ( info->xmit_cnt ) { 5620 5621 /* If auto RTS enabled and RTS is inactive, then assert */ 5622 /* RTS and set a flag indicating that the driver should */ 5623 /* negate RTS when the transmission completes. */ 5624 5625 info->drop_rts_on_tx_done = 0; 5626 5627 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) { 5628 usc_get_serial_signals( info ); 5629 if ( !(info->serial_signals & SerialSignal_RTS) ) { 5630 info->serial_signals |= SerialSignal_RTS; 5631 usc_set_serial_signals( info ); 5632 info->drop_rts_on_tx_done = 1; 5633 } 5634 } 5635 5636 5637 if ( info->params.mode == MGSL_MODE_ASYNC ) { 5638 if ( !info->tx_active ) { 5639 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL); 5640 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA); 5641 usc_EnableInterrupts(info, TRANSMIT_DATA); 5642 usc_load_txfifo(info); 5643 } 5644 } else { 5645 /* Disable transmit DMA controller while programming. */ 5646 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5647 5648 /* Transmit DMA buffer is loaded, so program USC */ 5649 /* to send the frame contained in the buffers. */ 5650 5651 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc; 5652 5653 /* if operating in Raw sync mode, reset the rcc component 5654 * of the tx dma buffer entry, otherwise, the serial controller 5655 * will send a closing sync char after this count. 5656 */ 5657 if ( info->params.mode == MGSL_MODE_RAW ) 5658 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0; 5659 5660 /* Program the Transmit Character Length Register (TCLR) */ 5661 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 5662 usc_OutReg( info, TCLR, (u16)FrameSize ); 5663 5664 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5665 5666 /* Program the address of the 1st DMA Buffer Entry in linked list */ 5667 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry; 5668 usc_OutDmaReg( info, NTARL, (u16)phys_addr ); 5669 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) ); 5670 5671 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5672 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 5673 usc_EnableInterrupts( info, TRANSMIT_STATUS ); 5674 5675 if ( info->params.mode == MGSL_MODE_RAW && 5676 info->num_tx_dma_buffers > 1 ) { 5677 /* When running external sync mode, attempt to 'stream' transmit */ 5678 /* by filling tx dma buffers as they become available. To do this */ 5679 /* we need to enable Tx DMA EOB Status interrupts : */ 5680 /* */ 5681 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */ 5682 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */ 5683 5684 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 ); 5685 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) ); 5686 } 5687 5688 /* Initialize Transmit DMA Channel */ 5689 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 5690 5691 usc_TCmd( info, TCmd_SendFrame ); 5692 5693 mod_timer(&info->tx_timer, jiffies + 5694 msecs_to_jiffies(5000)); 5695 } 5696 info->tx_active = 1; 5697 } 5698 5699 if ( !info->tx_enabled ) { 5700 info->tx_enabled = 1; 5701 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) 5702 usc_EnableTransmitter(info,ENABLE_AUTO_CTS); 5703 else 5704 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 5705 } 5706 5707} /* end of usc_start_transmitter() */ 5708 5709/* usc_stop_transmitter() 5710 * 5711 * Stops the transmitter and DMA 5712 * 5713 * Arguments: info pointer to device isntance data 5714 * Return Value: None 5715 */ 5716static void usc_stop_transmitter( struct mgsl_struct *info ) 5717{ 5718 if (debug_level >= DEBUG_LEVEL_ISR) 5719 printk("%s(%d):usc_stop_transmitter(%s)\n", 5720 __FILE__,__LINE__, info->device_name ); 5721 5722 del_timer(&info->tx_timer); 5723 5724 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 5725 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5726 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA ); 5727 5728 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL); 5729 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 5730 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 5731 5732 info->tx_enabled = 0; 5733 info->tx_active = 0; 5734 5735} /* end of usc_stop_transmitter() */ 5736 5737/* usc_load_txfifo() 5738 * 5739 * Fill the transmit FIFO until the FIFO is full or 5740 * there is no more data to load. 5741 * 5742 * Arguments: info pointer to device extension (instance data) 5743 * Return Value: None 5744 */ 5745static void usc_load_txfifo( struct mgsl_struct *info ) 5746{ 5747 int Fifocount; 5748 u8 TwoBytes[2]; 5749 5750 if ( !info->xmit_cnt && !info->x_char ) 5751 return; 5752 5753 /* Select transmit FIFO status readback in TICR */ 5754 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 5755 5756 /* load the Transmit FIFO until FIFOs full or all data sent */ 5757 5758 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) { 5759 /* there is more space in the transmit FIFO and */ 5760 /* there is more data in transmit buffer */ 5761 5762 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) { 5763 /* write a 16-bit word from transmit buffer to 16C32 */ 5764 5765 TwoBytes[0] = info->xmit_buf[info->xmit_tail++]; 5766 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5767 TwoBytes[1] = info->xmit_buf[info->xmit_tail++]; 5768 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5769 5770 outw( *((u16 *)TwoBytes), info->io_base + DATAREG); 5771 5772 info->xmit_cnt -= 2; 5773 info->icount.tx += 2; 5774 } else { 5775 /* only 1 byte left to transmit or 1 FIFO slot left */ 5776 5777 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY), 5778 info->io_base + CCAR ); 5779 5780 if (info->x_char) { 5781 /* transmit pending high priority char */ 5782 outw( info->x_char,info->io_base + CCAR ); 5783 info->x_char = 0; 5784 } else { 5785 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR ); 5786 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); 5787 info->xmit_cnt--; 5788 } 5789 info->icount.tx++; 5790 } 5791 } 5792 5793} /* end of usc_load_txfifo() */ 5794 5795/* usc_reset() 5796 * 5797 * Reset the adapter to a known state and prepare it for further use. 5798 * 5799 * Arguments: info pointer to device instance data 5800 * Return Value: None 5801 */ 5802static void usc_reset( struct mgsl_struct *info ) 5803{ 5804 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) { 5805 int i; 5806 u32 readval; 5807 5808 /* Set BIT30 of Misc Control Register */ 5809 /* (Local Control Register 0x50) to force reset of USC. */ 5810 5811 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50); 5812 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28); 5813 5814 info->misc_ctrl_value |= BIT30; 5815 *MiscCtrl = info->misc_ctrl_value; 5816 5817 /* 5818 * Force at least 170ns delay before clearing 5819 * reset bit. Each read from LCR takes at least 5820 * 30ns so 10 times for 300ns to be safe. 5821 */ 5822 for(i=0;i<10;i++) 5823 readval = *MiscCtrl; 5824 5825 info->misc_ctrl_value &= ~BIT30; 5826 *MiscCtrl = info->misc_ctrl_value; 5827 5828 *LCR0BRDR = BUS_DESCRIPTOR( 5829 1, // Write Strobe Hold (0-3) 5830 2, // Write Strobe Delay (0-3) 5831 2, // Read Strobe Delay (0-3) 5832 0, // NWDD (Write data-data) (0-3) 5833 4, // NWAD (Write Addr-data) (0-31) 5834 0, // NXDA (Read/Write Data-Addr) (0-3) 5835 0, // NRDD (Read Data-Data) (0-3) 5836 5 // NRAD (Read Addr-Data) (0-31) 5837 ); 5838 } else { 5839 /* do HW reset */ 5840 outb( 0,info->io_base + 8 ); 5841 } 5842 5843 info->mbre_bit = 0; 5844 info->loopback_bits = 0; 5845 info->usc_idle_mode = 0; 5846 5847 /* 5848 * Program the Bus Configuration Register (BCR) 5849 * 5850 * <15> 0 Don't use separate address 5851 * <14..6> 0 reserved 5852 * <5..4> 00 IAckmode = Default, don't care 5853 * <3> 1 Bus Request Totem Pole output 5854 * <2> 1 Use 16 Bit data bus 5855 * <1> 0 IRQ Totem Pole output 5856 * <0> 0 Don't Shift Right Addr 5857 * 5858 * 0000 0000 0000 1100 = 0x000c 5859 * 5860 * By writing to io_base + SDPIN the Wait/Ack pin is 5861 * programmed to work as a Wait pin. 5862 */ 5863 5864 outw( 0x000c,info->io_base + SDPIN ); 5865 5866 5867 outw( 0,info->io_base ); 5868 outw( 0,info->io_base + CCAR ); 5869 5870 /* select little endian byte ordering */ 5871 usc_RTCmd( info, RTCmd_SelectLittleEndian ); 5872 5873 5874 /* Port Control Register (PCR) 5875 * 5876 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled) 5877 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled) 5878 * <11..10> 00 Port 5 is Input (No Connect, Don't Care) 5879 * <9..8> 00 Port 4 is Input (No Connect, Don't Care) 5880 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled ) 5881 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled ) 5882 * <3..2> 01 Port 1 is Input (Dedicated RxC) 5883 * <1..0> 01 Port 0 is Input (Dedicated TxC) 5884 * 5885 * 1111 0000 1111 0101 = 0xf0f5 5886 */ 5887 5888 usc_OutReg( info, PCR, 0xf0f5 ); 5889 5890 5891 /* 5892 * Input/Output Control Register 5893 * 5894 * <15..14> 00 CTS is active low input 5895 * <13..12> 00 DCD is active low input 5896 * <11..10> 00 TxREQ pin is input (DSR) 5897 * <9..8> 00 RxREQ pin is input (RI) 5898 * <7..6> 00 TxD is output (Transmit Data) 5899 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock) 5900 * <2..0> 100 RxC is Output (drive with BRG0) 5901 * 5902 * 0000 0000 0000 0100 = 0x0004 5903 */ 5904 5905 usc_OutReg( info, IOCR, 0x0004 ); 5906 5907} /* end of usc_reset() */ 5908 5909/* usc_set_async_mode() 5910 * 5911 * Program adapter for asynchronous communications. 5912 * 5913 * Arguments: info pointer to device instance data 5914 * Return Value: None 5915 */ 5916static void usc_set_async_mode( struct mgsl_struct *info ) 5917{ 5918 u16 RegValue; 5919 5920 /* disable interrupts while programming USC */ 5921 usc_DisableMasterIrqBit( info ); 5922 5923 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */ 5924 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */ 5925 5926 usc_loopback_frame( info ); 5927 5928 /* Channel mode Register (CMR) 5929 * 5930 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit 5931 * <13..12> 00 00 = 16X Clock 5932 * <11..8> 0000 Transmitter mode = Asynchronous 5933 * <7..6> 00 reserved? 5934 * <5..4> 00 Rx Sub modes, 00 = 16X Clock 5935 * <3..0> 0000 Receiver mode = Asynchronous 5936 * 5937 * 0000 0000 0000 0000 = 0x0 5938 */ 5939 5940 RegValue = 0; 5941 if ( info->params.stop_bits != 1 ) 5942 RegValue |= BIT14; 5943 usc_OutReg( info, CMR, RegValue ); 5944 5945 5946 /* Receiver mode Register (RMR) 5947 * 5948 * <15..13> 000 encoding = None 5949 * <12..08> 00000 reserved (Sync Only) 5950 * <7..6> 00 Even parity 5951 * <5> 0 parity disabled 5952 * <4..2> 000 Receive Char Length = 8 bits 5953 * <1..0> 00 Disable Receiver 5954 * 5955 * 0000 0000 0000 0000 = 0x0 5956 */ 5957 5958 RegValue = 0; 5959 5960 if ( info->params.data_bits != 8 ) 5961 RegValue |= BIT4+BIT3+BIT2; 5962 5963 if ( info->params.parity != ASYNC_PARITY_NONE ) { 5964 RegValue |= BIT5; 5965 if ( info->params.parity != ASYNC_PARITY_ODD ) 5966 RegValue |= BIT6; 5967 } 5968 5969 usc_OutReg( info, RMR, RegValue ); 5970 5971 5972 /* Set IRQ trigger level */ 5973 5974 usc_RCmd( info, RCmd_SelectRicrIntLevel ); 5975 5976 5977 /* Receive Interrupt Control Register (RICR) 5978 * 5979 * <15..8> ? RxFIFO IRQ Request Level 5980 * 5981 * Note: For async mode the receive FIFO level must be set 5982 * to 0 to avoid the situation where the FIFO contains fewer bytes 5983 * than the trigger level and no more data is expected. 5984 * 5985 * <7> 0 Exited Hunt IA (Interrupt Arm) 5986 * <6> 0 Idle Received IA 5987 * <5> 0 Break/Abort IA 5988 * <4> 0 Rx Bound IA 5989 * <3> 0 Queued status reflects oldest byte in FIFO 5990 * <2> 0 Abort/PE IA 5991 * <1> 0 Rx Overrun IA 5992 * <0> 0 Select TC0 value for readback 5993 * 5994 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB) 5995 */ 5996 5997 usc_OutReg( info, RICR, 0x0000 ); 5998 5999 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL ); 6000 usc_ClearIrqPendingBits( info, RECEIVE_STATUS ); 6001 6002 6003 /* Transmit mode Register (TMR) 6004 * 6005 * <15..13> 000 encoding = None 6006 * <12..08> 00000 reserved (Sync Only) 6007 * <7..6> 00 Transmit parity Even 6008 * <5> 0 Transmit parity Disabled 6009 * <4..2> 000 Tx Char Length = 8 bits 6010 * <1..0> 00 Disable Transmitter 6011 * 6012 * 0000 0000 0000 0000 = 0x0 6013 */ 6014 6015 RegValue = 0; 6016 6017 if ( info->params.data_bits != 8 ) 6018 RegValue |= BIT4+BIT3+BIT2; 6019 6020 if ( info->params.parity != ASYNC_PARITY_NONE ) { 6021 RegValue |= BIT5; 6022 if ( info->params.parity != ASYNC_PARITY_ODD ) 6023 RegValue |= BIT6; 6024 } 6025 6026 usc_OutReg( info, TMR, RegValue ); 6027 6028 usc_set_txidle( info ); 6029 6030 6031 /* Set IRQ trigger level */ 6032 6033 usc_TCmd( info, TCmd_SelectTicrIntLevel ); 6034 6035 6036 /* Transmit Interrupt Control Register (TICR) 6037 * 6038 * <15..8> ? Transmit FIFO IRQ Level 6039 * <7> 0 Present IA (Interrupt Arm) 6040 * <6> 1 Idle Sent IA 6041 * <5> 0 Abort Sent IA 6042 * <4> 0 EOF/EOM Sent IA 6043 * <3> 0 CRC Sent IA 6044 * <2> 0 1 = Wait for SW Trigger to Start Frame 6045 * <1> 0 Tx Underrun IA 6046 * <0> 0 TC0 constant on read back 6047 * 6048 * 0000 0000 0100 0000 = 0x0040 6049 */ 6050 6051 usc_OutReg( info, TICR, 0x1f40 ); 6052 6053 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL ); 6054 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS ); 6055 6056 usc_enable_async_clock( info, info->params.data_rate ); 6057 6058 6059 /* Channel Control/status Register (CCSR) 6060 * 6061 * <15> X RCC FIFO Overflow status (RO) 6062 * <14> X RCC FIFO Not Empty status (RO) 6063 * <13> 0 1 = Clear RCC FIFO (WO) 6064 * <12> X DPLL in Sync status (RO) 6065 * <11> X DPLL 2 Missed Clocks status (RO) 6066 * <10> X DPLL 1 Missed Clock status (RO) 6067 * <9..8> 00 DPLL Resync on rising and falling edges (RW) 6068 * <7> X SDLC Loop On status (RO) 6069 * <6> X SDLC Loop Send status (RO) 6070 * <5> 1 Bypass counters for TxClk and RxClk (RW) 6071 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW) 6072 * <1..0> 00 reserved 6073 * 6074 * 0000 0000 0010 0000 = 0x0020 6075 */ 6076 6077 usc_OutReg( info, CCSR, 0x0020 ); 6078 6079 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6080 RECEIVE_DATA + RECEIVE_STATUS ); 6081 6082 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA + 6083 RECEIVE_DATA + RECEIVE_STATUS ); 6084 6085 usc_EnableMasterIrqBit( info ); 6086 6087 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6088 /* Enable INTEN (Port 6, Bit12) */ 6089 /* This connects the IRQ request signal to the ISA bus */ 6090 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6091 } 6092 6093 if (info->params.loopback) { 6094 info->loopback_bits = 0x300; 6095 outw(0x0300, info->io_base + CCAR); 6096 } 6097 6098} /* end of usc_set_async_mode() */ 6099 6100/* usc_loopback_frame() 6101 * 6102 * Loop back a small (2 byte) dummy SDLC frame. 6103 * Interrupts and DMA are NOT used. The purpose of this is to 6104 * clear any 'stale' status info left over from running in async mode. 6105 * 6106 * The 16C32 shows the strange behaviour of marking the 1st 6107 * received SDLC frame with a CRC error even when there is no 6108 * CRC error. To get around this a small dummy from of 2 bytes 6109 * is looped back when switching from async to sync mode. 6110 * 6111 * Arguments: info pointer to device instance data 6112 * Return Value: None 6113 */ 6114static void usc_loopback_frame( struct mgsl_struct *info ) 6115{ 6116 int i; 6117 unsigned long oldmode = info->params.mode; 6118 6119 info->params.mode = MGSL_MODE_HDLC; 6120 6121 usc_DisableMasterIrqBit( info ); 6122 6123 usc_set_sdlc_mode( info ); 6124 usc_enable_loopback( info, 1 ); 6125 6126 /* Write 16-bit Time Constant for BRG0 */ 6127 usc_OutReg( info, TC0R, 0 ); 6128 6129 /* Channel Control Register (CCR) 6130 * 6131 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs) 6132 * <13> 0 Trigger Tx on SW Command Disabled 6133 * <12> 0 Flag Preamble Disabled 6134 * <11..10> 00 Preamble Length = 8-Bits 6135 * <9..8> 01 Preamble Pattern = flags 6136 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs) 6137 * <5> 0 Trigger Rx on SW Command Disabled 6138 * <4..0> 0 reserved 6139 * 6140 * 0000 0001 0000 0000 = 0x0100 6141 */ 6142 6143 usc_OutReg( info, CCR, 0x0100 ); 6144 6145 /* SETUP RECEIVER */ 6146 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 6147 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL); 6148 6149 /* SETUP TRANSMITTER */ 6150 /* Program the Transmit Character Length Register (TCLR) */ 6151 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 6152 usc_OutReg( info, TCLR, 2 ); 6153 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 6154 6155 /* unlatch Tx status bits, and start transmit channel. */ 6156 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL); 6157 outw(0,info->io_base + DATAREG); 6158 6159 /* ENABLE TRANSMITTER */ 6160 usc_TCmd( info, TCmd_SendFrame ); 6161 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL); 6162 6163 /* WAIT FOR RECEIVE COMPLETE */ 6164 for (i=0 ; i<1000 ; i++) 6165 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1)) 6166 break; 6167 6168 /* clear Internal Data loopback mode */ 6169 usc_enable_loopback(info, 0); 6170 6171 usc_EnableMasterIrqBit(info); 6172 6173 info->params.mode = oldmode; 6174 6175} /* end of usc_loopback_frame() */ 6176 6177/* usc_set_sync_mode() Programs the USC for SDLC communications. 6178 * 6179 * Arguments: info pointer to adapter info structure 6180 * Return Value: None 6181 */ 6182static void usc_set_sync_mode( struct mgsl_struct *info ) 6183{ 6184 usc_loopback_frame( info ); 6185 usc_set_sdlc_mode( info ); 6186 6187 if (info->bus_type == MGSL_BUS_TYPE_ISA) { 6188 /* Enable INTEN (Port 6, Bit12) */ 6189 /* This connects the IRQ request signal to the ISA bus */ 6190 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12)); 6191 } 6192 6193 usc_enable_aux_clock(info, info->params.clock_speed); 6194 6195 if (info->params.loopback) 6196 usc_enable_loopback(info,1); 6197 6198} /* end of mgsl_set_sync_mode() */ 6199 6200/* usc_set_txidle() Set the HDLC idle mode for the transmitter. 6201 * 6202 * Arguments: info pointer to device instance data 6203 * Return Value: None 6204 */ 6205static void usc_set_txidle( struct mgsl_struct *info ) 6206{ 6207 u16 usc_idle_mode = IDLEMODE_FLAGS; 6208 6209 /* Map API idle mode to USC register bits */ 6210 6211 switch( info->idle_mode ){ 6212 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break; 6213 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break; 6214 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break; 6215 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break; 6216 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break; 6217 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break; 6218 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break; 6219 } 6220 6221 info->usc_idle_mode = usc_idle_mode; 6222 //usc_OutReg(info, TCSR, usc_idle_mode); 6223 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */ 6224 info->tcsr_value += usc_idle_mode; 6225 usc_OutReg(info, TCSR, info->tcsr_value); 6226 6227 /* 6228 * if SyncLink WAN adapter is running in external sync mode, the 6229 * transmitter has been set to Monosync in order to try to mimic 6230 * a true raw outbound bit stream. Monosync still sends an open/close 6231 * sync char at the start/end of a frame. Try to match those sync 6232 * patterns to the idle mode set here 6233 */ 6234 if ( info->params.mode == MGSL_MODE_RAW ) { 6235 unsigned char syncpat = 0; 6236 switch( info->idle_mode ) { 6237 case HDLC_TXIDLE_FLAGS: 6238 syncpat = 0x7e; 6239 break; 6240 case HDLC_TXIDLE_ALT_ZEROS_ONES: 6241 syncpat = 0x55; 6242 break; 6243 case HDLC_TXIDLE_ZEROS: 6244 case HDLC_TXIDLE_SPACE: 6245 syncpat = 0x00; 6246 break; 6247 case HDLC_TXIDLE_ONES: 6248 case HDLC_TXIDLE_MARK: 6249 syncpat = 0xff; 6250 break; 6251 case HDLC_TXIDLE_ALT_MARK_SPACE: 6252 syncpat = 0xaa; 6253 break; 6254 } 6255 6256 usc_SetTransmitSyncChars(info,syncpat,syncpat); 6257 } 6258 6259} /* end of usc_set_txidle() */ 6260 6261/* usc_get_serial_signals() 6262 * 6263 * Query the adapter for the state of the V24 status (input) signals. 6264 * 6265 * Arguments: info pointer to device instance data 6266 * Return Value: None 6267 */ 6268static void usc_get_serial_signals( struct mgsl_struct *info ) 6269{ 6270 u16 status; 6271 6272 /* clear all serial signals except DTR and RTS */ 6273 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; 6274 6275 /* Read the Misc Interrupt status Register (MISR) to get */ 6276 /* the V24 status signals. */ 6277 6278 status = usc_InReg( info, MISR ); 6279 6280 /* set serial signal bits to reflect MISR */ 6281 6282 if ( status & MISCSTATUS_CTS ) 6283 info->serial_signals |= SerialSignal_CTS; 6284 6285 if ( status & MISCSTATUS_DCD ) 6286 info->serial_signals |= SerialSignal_DCD; 6287 6288 if ( status & MISCSTATUS_RI ) 6289 info->serial_signals |= SerialSignal_RI; 6290 6291 if ( status & MISCSTATUS_DSR ) 6292 info->serial_signals |= SerialSignal_DSR; 6293 6294} /* end of usc_get_serial_signals() */ 6295 6296/* usc_set_serial_signals() 6297 * 6298 * Set the state of DTR and RTS based on contents of 6299 * serial_signals member of device extension. 6300 * 6301 * Arguments: info pointer to device instance data 6302 * Return Value: None 6303 */ 6304static void usc_set_serial_signals( struct mgsl_struct *info ) 6305{ 6306 u16 Control; 6307 unsigned char V24Out = info->serial_signals; 6308 6309 /* get the current value of the Port Control Register (PCR) */ 6310 6311 Control = usc_InReg( info, PCR ); 6312 6313 if ( V24Out & SerialSignal_RTS ) 6314 Control &= ~(BIT6); 6315 else 6316 Control |= BIT6; 6317 6318 if ( V24Out & SerialSignal_DTR ) 6319 Control &= ~(BIT4); 6320 else 6321 Control |= BIT4; 6322 6323 usc_OutReg( info, PCR, Control ); 6324 6325} /* end of usc_set_serial_signals() */ 6326 6327/* usc_enable_async_clock() 6328 * 6329 * Enable the async clock at the specified frequency. 6330 * 6331 * Arguments: info pointer to device instance data 6332 * data_rate data rate of clock in bps 6333 * 0 disables the AUX clock. 6334 * Return Value: None 6335 */ 6336static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate ) 6337{ 6338 if ( data_rate ) { 6339 /* 6340 * Clock mode Control Register (CMCR) 6341 * 6342 * <15..14> 00 counter 1 Disabled 6343 * <13..12> 00 counter 0 Disabled 6344 * <11..10> 11 BRG1 Input is TxC Pin 6345 * <9..8> 11 BRG0 Input is TxC Pin 6346 * <7..6> 01 DPLL Input is BRG1 Output 6347 * <5..3> 100 TxCLK comes from BRG0 6348 * <2..0> 100 RxCLK comes from BRG0 6349 * 6350 * 0000 1111 0110 0100 = 0x0f64 6351 */ 6352 6353 usc_OutReg( info, CMCR, 0x0f64 ); 6354 6355 6356 /* 6357 * Write 16-bit Time Constant for BRG0 6358 * Time Constant = (ClkSpeed / data_rate) - 1 6359 * ClkSpeed = 921600 (ISA), 691200 (PCI) 6360 */ 6361 6362 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6363 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) ); 6364 else 6365 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) ); 6366 6367 6368 /* 6369 * Hardware Configuration Register (HCR) 6370 * Clear Bit 1, BRG0 mode = Continuous 6371 * Set Bit 0 to enable BRG0. 6372 */ 6373 6374 usc_OutReg( info, HCR, 6375 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) ); 6376 6377 6378 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */ 6379 6380 usc_OutReg( info, IOCR, 6381 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) ); 6382 } else { 6383 /* data rate == 0 so turn off BRG0 */ 6384 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) ); 6385 } 6386 6387} /* end of usc_enable_async_clock() */ 6388 6389/* 6390 * Buffer Structures: 6391 * 6392 * Normal memory access uses virtual addresses that can make discontiguous 6393 * physical memory pages appear to be contiguous in the virtual address 6394 * space (the processors memory mapping handles the conversions). 6395 * 6396 * DMA transfers require physically contiguous memory. This is because 6397 * the DMA system controller and DMA bus masters deal with memory using 6398 * only physical addresses. 6399 * 6400 * This causes a problem under Windows NT when large DMA buffers are 6401 * needed. Fragmentation of the nonpaged pool prevents allocations of 6402 * physically contiguous buffers larger than the PAGE_SIZE. 6403 * 6404 * However the 16C32 supports Bus Master Scatter/Gather DMA which 6405 * allows DMA transfers to physically discontiguous buffers. Information 6406 * about each data transfer buffer is contained in a memory structure 6407 * called a 'buffer entry'. A list of buffer entries is maintained 6408 * to track and control the use of the data transfer buffers. 6409 * 6410 * To support this strategy we will allocate sufficient PAGE_SIZE 6411 * contiguous memory buffers to allow for the total required buffer 6412 * space. 6413 * 6414 * The 16C32 accesses the list of buffer entries using Bus Master 6415 * DMA. Control information is read from the buffer entries by the 6416 * 16C32 to control data transfers. status information is written to 6417 * the buffer entries by the 16C32 to indicate the status of completed 6418 * transfers. 6419 * 6420 * The CPU writes control information to the buffer entries to control 6421 * the 16C32 and reads status information from the buffer entries to 6422 * determine information about received and transmitted frames. 6423 * 6424 * Because the CPU and 16C32 (adapter) both need simultaneous access 6425 * to the buffer entries, the buffer entry memory is allocated with 6426 * HalAllocateCommonBuffer(). This restricts the size of the buffer 6427 * entry list to PAGE_SIZE. 6428 * 6429 * The actual data buffers on the other hand will only be accessed 6430 * by the CPU or the adapter but not by both simultaneously. This allows 6431 * Scatter/Gather packet based DMA procedures for using physically 6432 * discontiguous pages. 6433 */ 6434 6435/* 6436 * mgsl_reset_tx_dma_buffers() 6437 * 6438 * Set the count for all transmit buffers to 0 to indicate the 6439 * buffer is available for use and set the current buffer to the 6440 * first buffer. This effectively makes all buffers free and 6441 * discards any data in buffers. 6442 * 6443 * Arguments: info pointer to device instance data 6444 * Return Value: None 6445 */ 6446static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info ) 6447{ 6448 unsigned int i; 6449 6450 for ( i = 0; i < info->tx_buffer_count; i++ ) { 6451 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0; 6452 } 6453 6454 info->current_tx_buffer = 0; 6455 info->start_tx_dma_buffer = 0; 6456 info->tx_dma_buffers_used = 0; 6457 6458 info->get_tx_holding_index = 0; 6459 info->put_tx_holding_index = 0; 6460 info->tx_holding_count = 0; 6461 6462} /* end of mgsl_reset_tx_dma_buffers() */ 6463 6464/* 6465 * num_free_tx_dma_buffers() 6466 * 6467 * returns the number of free tx dma buffers available 6468 * 6469 * Arguments: info pointer to device instance data 6470 * Return Value: number of free tx dma buffers 6471 */ 6472static int num_free_tx_dma_buffers(struct mgsl_struct *info) 6473{ 6474 return info->tx_buffer_count - info->tx_dma_buffers_used; 6475} 6476 6477/* 6478 * mgsl_reset_rx_dma_buffers() 6479 * 6480 * Set the count for all receive buffers to DMABUFFERSIZE 6481 * and set the current buffer to the first buffer. This effectively 6482 * makes all buffers free and discards any data in buffers. 6483 * 6484 * Arguments: info pointer to device instance data 6485 * Return Value: None 6486 */ 6487static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info ) 6488{ 6489 unsigned int i; 6490 6491 for ( i = 0; i < info->rx_buffer_count; i++ ) { 6492 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE; 6493// info->rx_buffer_list[i].count = DMABUFFERSIZE; 6494// info->rx_buffer_list[i].status = 0; 6495 } 6496 6497 info->current_rx_buffer = 0; 6498 6499} /* end of mgsl_reset_rx_dma_buffers() */ 6500 6501/* 6502 * mgsl_free_rx_frame_buffers() 6503 * 6504 * Free the receive buffers used by a received SDLC 6505 * frame such that the buffers can be reused. 6506 * 6507 * Arguments: 6508 * 6509 * info pointer to device instance data 6510 * StartIndex index of 1st receive buffer of frame 6511 * EndIndex index of last receive buffer of frame 6512 * 6513 * Return Value: None 6514 */ 6515static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex ) 6516{ 6517 int Done = 0; 6518 DMABUFFERENTRY *pBufEntry; 6519 unsigned int Index; 6520 6521 /* Starting with 1st buffer entry of the frame clear the status */ 6522 /* field and set the count field to DMA Buffer Size. */ 6523 6524 Index = StartIndex; 6525 6526 while( !Done ) { 6527 pBufEntry = &(info->rx_buffer_list[Index]); 6528 6529 if ( Index == EndIndex ) { 6530 /* This is the last buffer of the frame! */ 6531 Done = 1; 6532 } 6533 6534 /* reset current buffer for reuse */ 6535// pBufEntry->status = 0; 6536// pBufEntry->count = DMABUFFERSIZE; 6537 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE; 6538 6539 /* advance to next buffer entry in linked list */ 6540 Index++; 6541 if ( Index == info->rx_buffer_count ) 6542 Index = 0; 6543 } 6544 6545 /* set current buffer to next buffer after last buffer of frame */ 6546 info->current_rx_buffer = Index; 6547 6548} /* end of free_rx_frame_buffers() */ 6549 6550/* mgsl_get_rx_frame() 6551 * 6552 * This function attempts to return a received SDLC frame from the 6553 * receive DMA buffers. Only frames received without errors are returned. 6554 * 6555 * Arguments: info pointer to device extension 6556 * Return Value: 1 if frame returned, otherwise 0 6557 */ 6558static int mgsl_get_rx_frame(struct mgsl_struct *info) 6559{ 6560 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */ 6561 unsigned short status; 6562 DMABUFFERENTRY *pBufEntry; 6563 unsigned int framesize = 0; 6564 int ReturnCode = 0; 6565 unsigned long flags; 6566 struct tty_struct *tty = info->tty; 6567 int return_frame = 0; 6568 6569 /* 6570 * current_rx_buffer points to the 1st buffer of the next available 6571 * receive frame. To find the last buffer of the frame look for 6572 * a non-zero status field in the buffer entries. (The status 6573 * field is set by the 16C32 after completing a receive frame. 6574 */ 6575 6576 StartIndex = EndIndex = info->current_rx_buffer; 6577 6578 while( !info->rx_buffer_list[EndIndex].status ) { 6579 /* 6580 * If the count field of the buffer entry is non-zero then 6581 * this buffer has not been used. (The 16C32 clears the count 6582 * field when it starts using the buffer.) If an unused buffer 6583 * is encountered then there are no frames available. 6584 */ 6585 6586 if ( info->rx_buffer_list[EndIndex].count ) 6587 goto Cleanup; 6588 6589 /* advance to next buffer entry in linked list */ 6590 EndIndex++; 6591 if ( EndIndex == info->rx_buffer_count ) 6592 EndIndex = 0; 6593 6594 /* if entire list searched then no frame available */ 6595 if ( EndIndex == StartIndex ) { 6596 /* If this occurs then something bad happened, 6597 * all buffers have been 'used' but none mark 6598 * the end of a frame. Reset buffers and receiver. 6599 */ 6600 6601 if ( info->rx_enabled ){ 6602 spin_lock_irqsave(&info->irq_spinlock,flags); 6603 usc_start_receiver(info); 6604 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6605 } 6606 goto Cleanup; 6607 } 6608 } 6609 6610 6611 /* check status of receive frame */ 6612 6613 status = info->rx_buffer_list[EndIndex].status; 6614 6615 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6616 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6617 if ( status & RXSTATUS_SHORT_FRAME ) 6618 info->icount.rxshort++; 6619 else if ( status & RXSTATUS_ABORT ) 6620 info->icount.rxabort++; 6621 else if ( status & RXSTATUS_OVERRUN ) 6622 info->icount.rxover++; 6623 else { 6624 info->icount.rxcrc++; 6625 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) 6626 return_frame = 1; 6627 } 6628 framesize = 0; 6629#if SYNCLINK_GENERIC_HDLC 6630 { 6631 struct net_device_stats *stats = hdlc_stats(info->netdev); 6632 stats->rx_errors++; 6633 stats->rx_frame_errors++; 6634 } 6635#endif 6636 } else 6637 return_frame = 1; 6638 6639 if ( return_frame ) { 6640 /* receive frame has no errors, get frame size. 6641 * The frame size is the starting value of the RCC (which was 6642 * set to 0xffff) minus the ending value of the RCC (decremented 6643 * once for each receive character) minus 2 for the 16-bit CRC. 6644 */ 6645 6646 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc; 6647 6648 /* adjust frame size for CRC if any */ 6649 if ( info->params.crc_type == HDLC_CRC_16_CCITT ) 6650 framesize -= 2; 6651 else if ( info->params.crc_type == HDLC_CRC_32_CCITT ) 6652 framesize -= 4; 6653 } 6654 6655 if ( debug_level >= DEBUG_LEVEL_BH ) 6656 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n", 6657 __FILE__,__LINE__,info->device_name,status,framesize); 6658 6659 if ( debug_level >= DEBUG_LEVEL_DATA ) 6660 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr, 6661 min_t(int, framesize, DMABUFFERSIZE),0); 6662 6663 if (framesize) { 6664 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) && 6665 ((framesize+1) > info->max_frame_size) ) || 6666 (framesize > info->max_frame_size) ) 6667 info->icount.rxlong++; 6668 else { 6669 /* copy dma buffer(s) to contiguous intermediate buffer */ 6670 int copy_count = framesize; 6671 int index = StartIndex; 6672 unsigned char *ptmp = info->intermediate_rxbuffer; 6673 6674 if ( !(status & RXSTATUS_CRC_ERROR)) 6675 info->icount.rxok++; 6676 6677 while(copy_count) { 6678 int partial_count; 6679 if ( copy_count > DMABUFFERSIZE ) 6680 partial_count = DMABUFFERSIZE; 6681 else 6682 partial_count = copy_count; 6683 6684 pBufEntry = &(info->rx_buffer_list[index]); 6685 memcpy( ptmp, pBufEntry->virt_addr, partial_count ); 6686 ptmp += partial_count; 6687 copy_count -= partial_count; 6688 6689 if ( ++index == info->rx_buffer_count ) 6690 index = 0; 6691 } 6692 6693 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) { 6694 ++framesize; 6695 *ptmp = (status & RXSTATUS_CRC_ERROR ? 6696 RX_CRC_ERROR : 6697 RX_OK); 6698 6699 if ( debug_level >= DEBUG_LEVEL_DATA ) 6700 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n", 6701 __FILE__,__LINE__,info->device_name, 6702 *ptmp); 6703 } 6704 6705#if SYNCLINK_GENERIC_HDLC 6706 if (info->netcount) 6707 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize); 6708 else 6709#endif 6710 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6711 } 6712 } 6713 /* Free the buffers used by this frame. */ 6714 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex ); 6715 6716 ReturnCode = 1; 6717 6718Cleanup: 6719 6720 if ( info->rx_enabled && info->rx_overflow ) { 6721 /* The receiver needs to restarted because of 6722 * a receive overflow (buffer or FIFO). If the 6723 * receive buffers are now empty, then restart receiver. 6724 */ 6725 6726 if ( !info->rx_buffer_list[EndIndex].status && 6727 info->rx_buffer_list[EndIndex].count ) { 6728 spin_lock_irqsave(&info->irq_spinlock,flags); 6729 usc_start_receiver(info); 6730 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6731 } 6732 } 6733 6734 return ReturnCode; 6735 6736} /* end of mgsl_get_rx_frame() */ 6737 6738/* mgsl_get_raw_rx_frame() 6739 * 6740 * This function attempts to return a received frame from the 6741 * receive DMA buffers when running in external loop mode. In this mode, 6742 * we will return at most one DMABUFFERSIZE frame to the application. 6743 * The USC receiver is triggering off of DCD going active to start a new 6744 * frame, and DCD going inactive to terminate the frame (similar to 6745 * processing a closing flag character). 6746 * 6747 * In this routine, we will return DMABUFFERSIZE "chunks" at a time. 6748 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero 6749 * status field and the RCC field will indicate the length of the 6750 * entire received frame. We take this RCC field and get the modulus 6751 * of RCC and DMABUFFERSIZE to determine if number of bytes in the 6752 * last Rx DMA buffer and return that last portion of the frame. 6753 * 6754 * Arguments: info pointer to device extension 6755 * Return Value: 1 if frame returned, otherwise 0 6756 */ 6757static int mgsl_get_raw_rx_frame(struct mgsl_struct *info) 6758{ 6759 unsigned int CurrentIndex, NextIndex; 6760 unsigned short status; 6761 DMABUFFERENTRY *pBufEntry; 6762 unsigned int framesize = 0; 6763 int ReturnCode = 0; 6764 unsigned long flags; 6765 struct tty_struct *tty = info->tty; 6766 6767 /* 6768 * current_rx_buffer points to the 1st buffer of the next available 6769 * receive frame. The status field is set by the 16C32 after 6770 * completing a receive frame. If the status field of this buffer 6771 * is zero, either the USC is still filling this buffer or this 6772 * is one of a series of buffers making up a received frame. 6773 * 6774 * If the count field of this buffer is zero, the USC is either 6775 * using this buffer or has used this buffer. Look at the count 6776 * field of the next buffer. If that next buffer's count is 6777 * non-zero, the USC is still actively using the current buffer. 6778 * Otherwise, if the next buffer's count field is zero, the 6779 * current buffer is complete and the USC is using the next 6780 * buffer. 6781 */ 6782 CurrentIndex = NextIndex = info->current_rx_buffer; 6783 ++NextIndex; 6784 if ( NextIndex == info->rx_buffer_count ) 6785 NextIndex = 0; 6786 6787 if ( info->rx_buffer_list[CurrentIndex].status != 0 || 6788 (info->rx_buffer_list[CurrentIndex].count == 0 && 6789 info->rx_buffer_list[NextIndex].count == 0)) { 6790 /* 6791 * Either the status field of this dma buffer is non-zero 6792 * (indicating the last buffer of a receive frame) or the next 6793 * buffer is marked as in use -- implying this buffer is complete 6794 * and an intermediate buffer for this received frame. 6795 */ 6796 6797 status = info->rx_buffer_list[CurrentIndex].status; 6798 6799 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN + 6800 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) { 6801 if ( status & RXSTATUS_SHORT_FRAME ) 6802 info->icount.rxshort++; 6803 else if ( status & RXSTATUS_ABORT ) 6804 info->icount.rxabort++; 6805 else if ( status & RXSTATUS_OVERRUN ) 6806 info->icount.rxover++; 6807 else 6808 info->icount.rxcrc++; 6809 framesize = 0; 6810 } else { 6811 /* 6812 * A receive frame is available, get frame size and status. 6813 * 6814 * The frame size is the starting value of the RCC (which was 6815 * set to 0xffff) minus the ending value of the RCC (decremented 6816 * once for each receive character) minus 2 or 4 for the 16-bit 6817 * or 32-bit CRC. 6818 * 6819 * If the status field is zero, this is an intermediate buffer. 6820 * It's size is 4K. 6821 * 6822 * If the DMA Buffer Entry's Status field is non-zero, the 6823 * receive operation completed normally (ie: DCD dropped). The 6824 * RCC field is valid and holds the received frame size. 6825 * It is possible that the RCC field will be zero on a DMA buffer 6826 * entry with a non-zero status. This can occur if the total 6827 * frame size (number of bytes between the time DCD goes active 6828 * to the time DCD goes inactive) exceeds 65535 bytes. In this 6829 * case the 16C32 has underrun on the RCC count and appears to 6830 * stop updating this counter to let us know the actual received 6831 * frame size. If this happens (non-zero status and zero RCC), 6832 * simply return the entire RxDMA Buffer 6833 */ 6834 if ( status ) { 6835 /* 6836 * In the event that the final RxDMA Buffer is 6837 * terminated with a non-zero status and the RCC 6838 * field is zero, we interpret this as the RCC 6839 * having underflowed (received frame > 65535 bytes). 6840 * 6841 * Signal the event to the user by passing back 6842 * a status of RxStatus_CrcError returning the full 6843 * buffer and let the app figure out what data is 6844 * actually valid 6845 */ 6846 if ( info->rx_buffer_list[CurrentIndex].rcc ) 6847 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc; 6848 else 6849 framesize = DMABUFFERSIZE; 6850 } 6851 else 6852 framesize = DMABUFFERSIZE; 6853 } 6854 6855 if ( framesize > DMABUFFERSIZE ) { 6856 /* 6857 * if running in raw sync mode, ISR handler for 6858 * End Of Buffer events terminates all buffers at 4K. 6859 * If this frame size is said to be >4K, get the 6860 * actual number of bytes of the frame in this buffer. 6861 */ 6862 framesize = framesize % DMABUFFERSIZE; 6863 } 6864 6865 6866 if ( debug_level >= DEBUG_LEVEL_BH ) 6867 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n", 6868 __FILE__,__LINE__,info->device_name,status,framesize); 6869 6870 if ( debug_level >= DEBUG_LEVEL_DATA ) 6871 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr, 6872 min_t(int, framesize, DMABUFFERSIZE),0); 6873 6874 if (framesize) { 6875 /* copy dma buffer(s) to contiguous intermediate buffer */ 6876 /* NOTE: we never copy more than DMABUFFERSIZE bytes */ 6877 6878 pBufEntry = &(info->rx_buffer_list[CurrentIndex]); 6879 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize); 6880 info->icount.rxok++; 6881 6882 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize); 6883 } 6884 6885 /* Free the buffers used by this frame. */ 6886 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex ); 6887 6888 ReturnCode = 1; 6889 } 6890 6891 6892 if ( info->rx_enabled && info->rx_overflow ) { 6893 /* The receiver needs to restarted because of 6894 * a receive overflow (buffer or FIFO). If the 6895 * receive buffers are now empty, then restart receiver. 6896 */ 6897 6898 if ( !info->rx_buffer_list[CurrentIndex].status && 6899 info->rx_buffer_list[CurrentIndex].count ) { 6900 spin_lock_irqsave(&info->irq_spinlock,flags); 6901 usc_start_receiver(info); 6902 spin_unlock_irqrestore(&info->irq_spinlock,flags); 6903 } 6904 } 6905 6906 return ReturnCode; 6907 6908} /* end of mgsl_get_raw_rx_frame() */ 6909 6910/* mgsl_load_tx_dma_buffer() 6911 * 6912 * Load the transmit DMA buffer with the specified data. 6913 * 6914 * Arguments: 6915 * 6916 * info pointer to device extension 6917 * Buffer pointer to buffer containing frame to load 6918 * BufferSize size in bytes of frame in Buffer 6919 * 6920 * Return Value: None 6921 */ 6922static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info, 6923 const char *Buffer, unsigned int BufferSize) 6924{ 6925 unsigned short Copycount; 6926 unsigned int i = 0; 6927 DMABUFFERENTRY *pBufEntry; 6928 6929 if ( debug_level >= DEBUG_LEVEL_DATA ) 6930 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1); 6931 6932 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 6933 /* set CMR:13 to start transmit when 6934 * next GoAhead (abort) is received 6935 */ 6936 info->cmr_value |= BIT13; 6937 } 6938 6939 /* begin loading the frame in the next available tx dma 6940 * buffer, remember it's starting location for setting 6941 * up tx dma operation 6942 */ 6943 i = info->current_tx_buffer; 6944 info->start_tx_dma_buffer = i; 6945 6946 /* Setup the status and RCC (Frame Size) fields of the 1st */ 6947 /* buffer entry in the transmit DMA buffer list. */ 6948 6949 info->tx_buffer_list[i].status = info->cmr_value & 0xf000; 6950 info->tx_buffer_list[i].rcc = BufferSize; 6951 info->tx_buffer_list[i].count = BufferSize; 6952 6953 /* Copy frame data from 1st source buffer to the DMA buffers. */ 6954 /* The frame data may span multiple DMA buffers. */ 6955 6956 while( BufferSize ){ 6957 /* Get a pointer to next DMA buffer entry. */ 6958 pBufEntry = &info->tx_buffer_list[i++]; 6959 6960 if ( i == info->tx_buffer_count ) 6961 i=0; 6962 6963 /* Calculate the number of bytes that can be copied from */ 6964 /* the source buffer to this DMA buffer. */ 6965 if ( BufferSize > DMABUFFERSIZE ) 6966 Copycount = DMABUFFERSIZE; 6967 else 6968 Copycount = BufferSize; 6969 6970 /* Actually copy data from source buffer to DMA buffer. */ 6971 /* Also set the data count for this individual DMA buffer. */ 6972 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) 6973 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount); 6974 else 6975 memcpy(pBufEntry->virt_addr, Buffer, Copycount); 6976 6977 pBufEntry->count = Copycount; 6978 6979 /* Advance source pointer and reduce remaining data count. */ 6980 Buffer += Copycount; 6981 BufferSize -= Copycount; 6982 6983 ++info->tx_dma_buffers_used; 6984 } 6985 6986 /* remember next available tx dma buffer */ 6987 info->current_tx_buffer = i; 6988 6989} /* end of mgsl_load_tx_dma_buffer() */ 6990 6991/* 6992 * mgsl_register_test() 6993 * 6994 * Performs a register test of the 16C32. 6995 * 6996 * Arguments: info pointer to device instance data 6997 * Return Value: TRUE if test passed, otherwise FALSE 6998 */ 6999static BOOLEAN mgsl_register_test( struct mgsl_struct *info ) 7000{ 7001 static unsigned short BitPatterns[] = 7002 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f }; 7003 static unsigned int Patterncount = ARRAY_SIZE(BitPatterns); 7004 unsigned int i; 7005 BOOLEAN rc = TRUE; 7006 unsigned long flags; 7007 7008 spin_lock_irqsave(&info->irq_spinlock,flags); 7009 usc_reset(info); 7010 7011 /* Verify the reset state of some registers. */ 7012 7013 if ( (usc_InReg( info, SICR ) != 0) || 7014 (usc_InReg( info, IVR ) != 0) || 7015 (usc_InDmaReg( info, DIVR ) != 0) ){ 7016 rc = FALSE; 7017 } 7018 7019 if ( rc == TRUE ){ 7020 /* Write bit patterns to various registers but do it out of */ 7021 /* sync, then read back and verify values. */ 7022 7023 for ( i = 0 ; i < Patterncount ; i++ ) { 7024 usc_OutReg( info, TC0R, BitPatterns[i] ); 7025 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] ); 7026 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] ); 7027 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] ); 7028 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] ); 7029 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] ); 7030 7031 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) || 7032 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) || 7033 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) || 7034 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) || 7035 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) || 7036 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){ 7037 rc = FALSE; 7038 break; 7039 } 7040 } 7041 } 7042 7043 usc_reset(info); 7044 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7045 7046 return rc; 7047 7048} /* end of mgsl_register_test() */ 7049 7050/* mgsl_irq_test() Perform interrupt test of the 16C32. 7051 * 7052 * Arguments: info pointer to device instance data 7053 * Return Value: TRUE if test passed, otherwise FALSE 7054 */ 7055static BOOLEAN mgsl_irq_test( struct mgsl_struct *info ) 7056{ 7057 unsigned long EndTime; 7058 unsigned long flags; 7059 7060 spin_lock_irqsave(&info->irq_spinlock,flags); 7061 usc_reset(info); 7062 7063 /* 7064 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 7065 * The ISR sets irq_occurred to 1. 7066 */ 7067 7068 info->irq_occurred = FALSE; 7069 7070 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */ 7071 /* Enable INTEN (Port 6, Bit12) */ 7072 /* This connects the IRQ request signal to the ISA bus */ 7073 /* on the ISA adapter. This has no effect for the PCI adapter */ 7074 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) ); 7075 7076 usc_EnableMasterIrqBit(info); 7077 usc_EnableInterrupts(info, IO_PIN); 7078 usc_ClearIrqPendingBits(info, IO_PIN); 7079 7080 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED); 7081 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE); 7082 7083 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7084 7085 EndTime=100; 7086 while( EndTime-- && !info->irq_occurred ) { 7087 msleep_interruptible(10); 7088 } 7089 7090 spin_lock_irqsave(&info->irq_spinlock,flags); 7091 usc_reset(info); 7092 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7093 7094 if ( !info->irq_occurred ) 7095 return FALSE; 7096 else 7097 return TRUE; 7098 7099} /* end of mgsl_irq_test() */ 7100 7101/* mgsl_dma_test() 7102 * 7103 * Perform a DMA test of the 16C32. A small frame is 7104 * transmitted via DMA from a transmit buffer to a receive buffer 7105 * using single buffer DMA mode. 7106 * 7107 * Arguments: info pointer to device instance data 7108 * Return Value: TRUE if test passed, otherwise FALSE 7109 */ 7110static BOOLEAN mgsl_dma_test( struct mgsl_struct *info ) 7111{ 7112 unsigned short FifoLevel; 7113 unsigned long phys_addr; 7114 unsigned int FrameSize; 7115 unsigned int i; 7116 char *TmpPtr; 7117 BOOLEAN rc = TRUE; 7118 unsigned short status=0; 7119 unsigned long EndTime; 7120 unsigned long flags; 7121 MGSL_PARAMS tmp_params; 7122 7123 /* save current port options */ 7124 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS)); 7125 /* load default port options */ 7126 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); 7127 7128#define TESTFRAMESIZE 40 7129 7130 spin_lock_irqsave(&info->irq_spinlock,flags); 7131 7132 /* setup 16C32 for SDLC DMA transfer mode */ 7133 7134 usc_reset(info); 7135 usc_set_sdlc_mode(info); 7136 usc_enable_loopback(info,1); 7137 7138 /* Reprogram the RDMR so that the 16C32 does NOT clear the count 7139 * field of the buffer entry after fetching buffer address. This 7140 * way we can detect a DMA failure for a DMA read (which should be 7141 * non-destructive to system memory) before we try and write to 7142 * memory (where a failure could corrupt system memory). 7143 */ 7144 7145 /* Receive DMA mode Register (RDMR) 7146 * 7147 * <15..14> 11 DMA mode = Linked List Buffer mode 7148 * <13> 1 RSBinA/L = store Rx status Block in List entry 7149 * <12> 0 1 = Clear count of List Entry after fetching 7150 * <11..10> 00 Address mode = Increment 7151 * <9> 1 Terminate Buffer on RxBound 7152 * <8> 0 Bus Width = 16bits 7153 * <7..0> ? status Bits (write as 0s) 7154 * 7155 * 1110 0010 0000 0000 = 0xe200 7156 */ 7157 7158 usc_OutDmaReg( info, RDMR, 0xe200 ); 7159 7160 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7161 7162 7163 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */ 7164 7165 FrameSize = TESTFRAMESIZE; 7166 7167 /* setup 1st transmit buffer entry: */ 7168 /* with frame size and transmit control word */ 7169 7170 info->tx_buffer_list[0].count = FrameSize; 7171 info->tx_buffer_list[0].rcc = FrameSize; 7172 info->tx_buffer_list[0].status = 0x4000; 7173 7174 /* build a transmit frame in 1st transmit DMA buffer */ 7175 7176 TmpPtr = info->tx_buffer_list[0].virt_addr; 7177 for (i = 0; i < FrameSize; i++ ) 7178 *TmpPtr++ = i; 7179 7180 /* setup 1st receive buffer entry: */ 7181 /* clear status, set max receive buffer size */ 7182 7183 info->rx_buffer_list[0].status = 0; 7184 info->rx_buffer_list[0].count = FrameSize + 4; 7185 7186 /* zero out the 1st receive buffer */ 7187 7188 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 ); 7189 7190 /* Set count field of next buffer entries to prevent */ 7191 /* 16C32 from using buffers after the 1st one. */ 7192 7193 info->tx_buffer_list[1].count = 0; 7194 info->rx_buffer_list[1].count = 0; 7195 7196 7197 /***************************/ 7198 /* Program 16C32 receiver. */ 7199 /***************************/ 7200 7201 spin_lock_irqsave(&info->irq_spinlock,flags); 7202 7203 /* setup DMA transfers */ 7204 usc_RTCmd( info, RTCmd_PurgeRxFifo ); 7205 7206 /* program 16C32 receiver with physical address of 1st DMA buffer entry */ 7207 phys_addr = info->rx_buffer_list[0].phys_entry; 7208 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr ); 7209 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) ); 7210 7211 /* Clear the Rx DMA status bits (read RDMR) and start channel */ 7212 usc_InDmaReg( info, RDMR ); 7213 usc_DmaCmd( info, DmaCmd_InitRxChannel ); 7214 7215 /* Enable Receiver (RMR <1..0> = 10) */ 7216 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) ); 7217 7218 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7219 7220 7221 /*************************************************************/ 7222 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */ 7223 /*************************************************************/ 7224 7225 /* Wait 100ms for interrupt. */ 7226 EndTime = jiffies + msecs_to_jiffies(100); 7227 7228 for(;;) { 7229 if (time_after(jiffies, EndTime)) { 7230 rc = FALSE; 7231 break; 7232 } 7233 7234 spin_lock_irqsave(&info->irq_spinlock,flags); 7235 status = usc_InDmaReg( info, RDMR ); 7236 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7237 7238 if ( !(status & BIT4) && (status & BIT5) ) { 7239 /* INITG (BIT 4) is inactive (no entry read in progress) AND */ 7240 /* BUSY (BIT 5) is active (channel still active). */ 7241 /* This means the buffer entry read has completed. */ 7242 break; 7243 } 7244 } 7245 7246 7247 /******************************/ 7248 /* Program 16C32 transmitter. */ 7249 /******************************/ 7250 7251 spin_lock_irqsave(&info->irq_spinlock,flags); 7252 7253 /* Program the Transmit Character Length Register (TCLR) */ 7254 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */ 7255 7256 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count ); 7257 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7258 7259 /* Program the address of the 1st DMA Buffer Entry in linked list */ 7260 7261 phys_addr = info->tx_buffer_list[0].phys_entry; 7262 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr ); 7263 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) ); 7264 7265 /* unlatch Tx status bits, and start transmit channel. */ 7266 7267 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) ); 7268 usc_DmaCmd( info, DmaCmd_InitTxChannel ); 7269 7270 /* wait for DMA controller to fill transmit FIFO */ 7271 7272 usc_TCmd( info, TCmd_SelectTicrTxFifostatus ); 7273 7274 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7275 7276 7277 /**********************************/ 7278 /* WAIT FOR TRANSMIT FIFO TO FILL */ 7279 /**********************************/ 7280 7281 /* Wait 100ms */ 7282 EndTime = jiffies + msecs_to_jiffies(100); 7283 7284 for(;;) { 7285 if (time_after(jiffies, EndTime)) { 7286 rc = FALSE; 7287 break; 7288 } 7289 7290 spin_lock_irqsave(&info->irq_spinlock,flags); 7291 FifoLevel = usc_InReg(info, TICR) >> 8; 7292 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7293 7294 if ( FifoLevel < 16 ) 7295 break; 7296 else 7297 if ( FrameSize < 32 ) { 7298 /* This frame is smaller than the entire transmit FIFO */ 7299 /* so wait for the entire frame to be loaded. */ 7300 if ( FifoLevel <= (32 - FrameSize) ) 7301 break; 7302 } 7303 } 7304 7305 7306 if ( rc == TRUE ) 7307 { 7308 /* Enable 16C32 transmitter. */ 7309 7310 spin_lock_irqsave(&info->irq_spinlock,flags); 7311 7312 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */ 7313 usc_TCmd( info, TCmd_SendFrame ); 7314 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) ); 7315 7316 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7317 7318 7319 /******************************/ 7320 /* WAIT FOR TRANSMIT COMPLETE */ 7321 /******************************/ 7322 7323 /* Wait 100ms */ 7324 EndTime = jiffies + msecs_to_jiffies(100); 7325 7326 /* While timer not expired wait for transmit complete */ 7327 7328 spin_lock_irqsave(&info->irq_spinlock,flags); 7329 status = usc_InReg( info, TCSR ); 7330 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7331 7332 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) { 7333 if (time_after(jiffies, EndTime)) { 7334 rc = FALSE; 7335 break; 7336 } 7337 7338 spin_lock_irqsave(&info->irq_spinlock,flags); 7339 status = usc_InReg( info, TCSR ); 7340 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7341 } 7342 } 7343 7344 7345 if ( rc == TRUE ){ 7346 /* CHECK FOR TRANSMIT ERRORS */ 7347 if ( status & (BIT5 + BIT1) ) 7348 rc = FALSE; 7349 } 7350 7351 if ( rc == TRUE ) { 7352 /* WAIT FOR RECEIVE COMPLETE */ 7353 7354 /* Wait 100ms */ 7355 EndTime = jiffies + msecs_to_jiffies(100); 7356 7357 /* Wait for 16C32 to write receive status to buffer entry. */ 7358 status=info->rx_buffer_list[0].status; 7359 while ( status == 0 ) { 7360 if (time_after(jiffies, EndTime)) { 7361 rc = FALSE; 7362 break; 7363 } 7364 status=info->rx_buffer_list[0].status; 7365 } 7366 } 7367 7368 7369 if ( rc == TRUE ) { 7370 /* CHECK FOR RECEIVE ERRORS */ 7371 status = info->rx_buffer_list[0].status; 7372 7373 if ( status & (BIT8 + BIT3 + BIT1) ) { 7374 /* receive error has occurred */ 7375 rc = FALSE; 7376 } else { 7377 if ( memcmp( info->tx_buffer_list[0].virt_addr , 7378 info->rx_buffer_list[0].virt_addr, FrameSize ) ){ 7379 rc = FALSE; 7380 } 7381 } 7382 } 7383 7384 spin_lock_irqsave(&info->irq_spinlock,flags); 7385 usc_reset( info ); 7386 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7387 7388 /* restore current port options */ 7389 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); 7390 7391 return rc; 7392 7393} /* end of mgsl_dma_test() */ 7394 7395/* mgsl_adapter_test() 7396 * 7397 * Perform the register, IRQ, and DMA tests for the 16C32. 7398 * 7399 * Arguments: info pointer to device instance data 7400 * Return Value: 0 if success, otherwise -ENODEV 7401 */ 7402static int mgsl_adapter_test( struct mgsl_struct *info ) 7403{ 7404 if ( debug_level >= DEBUG_LEVEL_INFO ) 7405 printk( "%s(%d):Testing device %s\n", 7406 __FILE__,__LINE__,info->device_name ); 7407 7408 if ( !mgsl_register_test( info ) ) { 7409 info->init_error = DiagStatus_AddressFailure; 7410 printk( "%s(%d):Register test failure for device %s Addr=%04X\n", 7411 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); 7412 return -ENODEV; 7413 } 7414 7415 if ( !mgsl_irq_test( info ) ) { 7416 info->init_error = DiagStatus_IrqFailure; 7417 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", 7418 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); 7419 return -ENODEV; 7420 } 7421 7422 if ( !mgsl_dma_test( info ) ) { 7423 info->init_error = DiagStatus_DmaFailure; 7424 printk( "%s(%d):DMA test failure for device %s DMA=%d\n", 7425 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) ); 7426 return -ENODEV; 7427 } 7428 7429 if ( debug_level >= DEBUG_LEVEL_INFO ) 7430 printk( "%s(%d):device %s passed diagnostics\n", 7431 __FILE__,__LINE__,info->device_name ); 7432 7433 return 0; 7434 7435} /* end of mgsl_adapter_test() */ 7436 7437/* mgsl_memory_test() 7438 * 7439 * Test the shared memory on a PCI adapter. 7440 * 7441 * Arguments: info pointer to device instance data 7442 * Return Value: TRUE if test passed, otherwise FALSE 7443 */ 7444static BOOLEAN mgsl_memory_test( struct mgsl_struct *info ) 7445{ 7446 static unsigned long BitPatterns[] = 7447 { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 }; 7448 unsigned long Patterncount = ARRAY_SIZE(BitPatterns); 7449 unsigned long i; 7450 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long); 7451 unsigned long * TestAddr; 7452 7453 if ( info->bus_type != MGSL_BUS_TYPE_PCI ) 7454 return TRUE; 7455 7456 TestAddr = (unsigned long *)info->memory_base; 7457 7458 /* Test data lines with test pattern at one location. */ 7459 7460 for ( i = 0 ; i < Patterncount ; i++ ) { 7461 *TestAddr = BitPatterns[i]; 7462 if ( *TestAddr != BitPatterns[i] ) 7463 return FALSE; 7464 } 7465 7466 /* Test address lines with incrementing pattern over */ 7467 /* entire address range. */ 7468 7469 for ( i = 0 ; i < TestLimit ; i++ ) { 7470 *TestAddr = i * 4; 7471 TestAddr++; 7472 } 7473 7474 TestAddr = (unsigned long *)info->memory_base; 7475 7476 for ( i = 0 ; i < TestLimit ; i++ ) { 7477 if ( *TestAddr != i * 4 ) 7478 return FALSE; 7479 TestAddr++; 7480 } 7481 7482 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE ); 7483 7484 return TRUE; 7485 7486} /* End Of mgsl_memory_test() */ 7487 7488 7489/* mgsl_load_pci_memory() 7490 * 7491 * Load a large block of data into the PCI shared memory. 7492 * Use this instead of memcpy() or memmove() to move data 7493 * into the PCI shared memory. 7494 * 7495 * Notes: 7496 * 7497 * This function prevents the PCI9050 interface chip from hogging 7498 * the adapter local bus, which can starve the 16C32 by preventing 7499 * 16C32 bus master cycles. 7500 * 7501 * The PCI9050 documentation says that the 9050 will always release 7502 * control of the local bus after completing the current read 7503 * or write operation. 7504 * 7505 * It appears that as long as the PCI9050 write FIFO is full, the 7506 * PCI9050 treats all of the writes as a single burst transaction 7507 * and will not release the bus. This causes DMA latency problems 7508 * at high speeds when copying large data blocks to the shared 7509 * memory. 7510 * 7511 * This function in effect, breaks the a large shared memory write 7512 * into multiple transations by interleaving a shared memory read 7513 * which will flush the write FIFO and 'complete' the write 7514 * transation. This allows any pending DMA request to gain control 7515 * of the local bus in a timely fasion. 7516 * 7517 * Arguments: 7518 * 7519 * TargetPtr pointer to target address in PCI shared memory 7520 * SourcePtr pointer to source buffer for data 7521 * count count in bytes of data to copy 7522 * 7523 * Return Value: None 7524 */ 7525static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr, 7526 unsigned short count ) 7527{ 7528 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */ 7529#define PCI_LOAD_INTERVAL 64 7530 7531 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL; 7532 unsigned short Index; 7533 unsigned long Dummy; 7534 7535 for ( Index = 0 ; Index < Intervalcount ; Index++ ) 7536 { 7537 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL); 7538 Dummy = *((volatile unsigned long *)TargetPtr); 7539 TargetPtr += PCI_LOAD_INTERVAL; 7540 SourcePtr += PCI_LOAD_INTERVAL; 7541 } 7542 7543 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL ); 7544 7545} /* End Of mgsl_load_pci_memory() */ 7546 7547static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit) 7548{ 7549 int i; 7550 int linecount; 7551 if (xmit) 7552 printk("%s tx data:\n",info->device_name); 7553 else 7554 printk("%s rx data:\n",info->device_name); 7555 7556 while(count) { 7557 if (count > 16) 7558 linecount = 16; 7559 else 7560 linecount = count; 7561 7562 for(i=0;i<linecount;i++) 7563 printk("%02X ",(unsigned char)data[i]); 7564 for(;i<17;i++) 7565 printk(" "); 7566 for(i=0;i<linecount;i++) { 7567 if (data[i]>=040 && data[i]<=0176) 7568 printk("%c",data[i]); 7569 else 7570 printk("."); 7571 } 7572 printk("\n"); 7573 7574 data += linecount; 7575 count -= linecount; 7576 } 7577} /* end of mgsl_trace_block() */ 7578 7579/* mgsl_tx_timeout() 7580 * 7581 * called when HDLC frame times out 7582 * update stats and do tx completion processing 7583 * 7584 * Arguments: context pointer to device instance data 7585 * Return Value: None 7586 */ 7587static void mgsl_tx_timeout(unsigned long context) 7588{ 7589 struct mgsl_struct *info = (struct mgsl_struct*)context; 7590 unsigned long flags; 7591 7592 if ( debug_level >= DEBUG_LEVEL_INFO ) 7593 printk( "%s(%d):mgsl_tx_timeout(%s)\n", 7594 __FILE__,__LINE__,info->device_name); 7595 if(info->tx_active && 7596 (info->params.mode == MGSL_MODE_HDLC || 7597 info->params.mode == MGSL_MODE_RAW) ) { 7598 info->icount.txtimeout++; 7599 } 7600 spin_lock_irqsave(&info->irq_spinlock,flags); 7601 info->tx_active = 0; 7602 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 7603 7604 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE ) 7605 usc_loopmode_cancel_transmit( info ); 7606 7607 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7608 7609#if SYNCLINK_GENERIC_HDLC 7610 if (info->netcount) 7611 hdlcdev_tx_done(info); 7612 else 7613#endif 7614 mgsl_bh_transmit(info); 7615 7616} /* end of mgsl_tx_timeout() */ 7617 7618/* signal that there are no more frames to send, so that 7619 * line is 'released' by echoing RxD to TxD when current 7620 * transmission is complete (or immediately if no tx in progress). 7621 */ 7622static int mgsl_loopmode_send_done( struct mgsl_struct * info ) 7623{ 7624 unsigned long flags; 7625 7626 spin_lock_irqsave(&info->irq_spinlock,flags); 7627 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) { 7628 if (info->tx_active) 7629 info->loopmode_send_done_requested = TRUE; 7630 else 7631 usc_loopmode_send_done(info); 7632 } 7633 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7634 7635 return 0; 7636} 7637 7638/* release the line by echoing RxD to TxD 7639 * upon completion of a transmit frame 7640 */ 7641static void usc_loopmode_send_done( struct mgsl_struct * info ) 7642{ 7643 info->loopmode_send_done_requested = FALSE; 7644 /* clear CMR:13 to 0 to start echoing RxData to TxData */ 7645 info->cmr_value &= ~BIT13; 7646 usc_OutReg(info, CMR, info->cmr_value); 7647} 7648 7649/* abort a transmit in progress while in HDLC LoopMode 7650 */ 7651static void usc_loopmode_cancel_transmit( struct mgsl_struct * info ) 7652{ 7653 /* reset tx dma channel and purge TxFifo */ 7654 usc_RTCmd( info, RTCmd_PurgeTxFifo ); 7655 usc_DmaCmd( info, DmaCmd_ResetTxChannel ); 7656 usc_loopmode_send_done( info ); 7657} 7658 7659/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled 7660 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort) 7661 * we must clear CMR:13 to begin repeating TxData to RxData 7662 */ 7663static void usc_loopmode_insert_request( struct mgsl_struct * info ) 7664{ 7665 info->loopmode_insert_requested = TRUE; 7666 7667 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to 7668 * begin repeating TxData on RxData (complete insertion) 7669 */ 7670 usc_OutReg( info, RICR, 7671 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) ); 7672 7673 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */ 7674 info->cmr_value |= BIT13; 7675 usc_OutReg(info, CMR, info->cmr_value); 7676} 7677 7678/* return 1 if station is inserted into the loop, otherwise 0 7679 */ 7680static int usc_loopmode_active( struct mgsl_struct * info) 7681{ 7682 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ; 7683} 7684 7685#if SYNCLINK_GENERIC_HDLC 7686 7687/** 7688 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) 7689 * set encoding and frame check sequence (FCS) options 7690 * 7691 * dev pointer to network device structure 7692 * encoding serial encoding setting 7693 * parity FCS setting 7694 * 7695 * returns 0 if success, otherwise error code 7696 */ 7697static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, 7698 unsigned short parity) 7699{ 7700 struct mgsl_struct *info = dev_to_port(dev); 7701 unsigned char new_encoding; 7702 unsigned short new_crctype; 7703 7704 /* return error if TTY interface open */ 7705 if (info->count) 7706 return -EBUSY; 7707 7708 switch (encoding) 7709 { 7710 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; 7711 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; 7712 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; 7713 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; 7714 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; 7715 default: return -EINVAL; 7716 } 7717 7718 switch (parity) 7719 { 7720 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; 7721 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; 7722 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; 7723 default: return -EINVAL; 7724 } 7725 7726 info->params.encoding = new_encoding; 7727 info->params.crc_type = new_crctype; 7728 7729 /* if network interface up, reprogram hardware */ 7730 if (info->netcount) 7731 mgsl_program_hw(info); 7732 7733 return 0; 7734} 7735 7736/** 7737 * called by generic HDLC layer to send frame 7738 * 7739 * skb socket buffer containing HDLC frame 7740 * dev pointer to network device structure 7741 * 7742 * returns 0 if success, otherwise error code 7743 */ 7744static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) 7745{ 7746 struct mgsl_struct *info = dev_to_port(dev); 7747 struct net_device_stats *stats = hdlc_stats(dev); 7748 unsigned long flags; 7749 7750 if (debug_level >= DEBUG_LEVEL_INFO) 7751 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); 7752 7753 /* stop sending until this frame completes */ 7754 netif_stop_queue(dev); 7755 7756 /* copy data to device buffers */ 7757 info->xmit_cnt = skb->len; 7758 mgsl_load_tx_dma_buffer(info, skb->data, skb->len); 7759 7760 /* update network statistics */ 7761 stats->tx_packets++; 7762 stats->tx_bytes += skb->len; 7763 7764 /* done with socket buffer, so free it */ 7765 dev_kfree_skb(skb); 7766 7767 /* save start time for transmit timeout detection */ 7768 dev->trans_start = jiffies; 7769 7770 /* start hardware transmitter if necessary */ 7771 spin_lock_irqsave(&info->irq_spinlock,flags); 7772 if (!info->tx_active) 7773 usc_start_transmitter(info); 7774 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7775 7776 return 0; 7777} 7778 7779/** 7780 * called by network layer when interface enabled 7781 * claim resources and initialize hardware 7782 * 7783 * dev pointer to network device structure 7784 * 7785 * returns 0 if success, otherwise error code 7786 */ 7787static int hdlcdev_open(struct net_device *dev) 7788{ 7789 struct mgsl_struct *info = dev_to_port(dev); 7790 int rc; 7791 unsigned long flags; 7792 7793 if (debug_level >= DEBUG_LEVEL_INFO) 7794 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); 7795 7796 /* generic HDLC layer open processing */ 7797 if ((rc = hdlc_open(dev))) 7798 return rc; 7799 7800 /* arbitrate between network and tty opens */ 7801 spin_lock_irqsave(&info->netlock, flags); 7802 if (info->count != 0 || info->netcount != 0) { 7803 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); 7804 spin_unlock_irqrestore(&info->netlock, flags); 7805 return -EBUSY; 7806 } 7807 info->netcount=1; 7808 spin_unlock_irqrestore(&info->netlock, flags); 7809 7810 /* claim resources and init adapter */ 7811 if ((rc = startup(info)) != 0) { 7812 spin_lock_irqsave(&info->netlock, flags); 7813 info->netcount=0; 7814 spin_unlock_irqrestore(&info->netlock, flags); 7815 return rc; 7816 } 7817 7818 /* assert DTR and RTS, apply hardware settings */ 7819 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; 7820 mgsl_program_hw(info); 7821 7822 /* enable network layer transmit */ 7823 dev->trans_start = jiffies; 7824 netif_start_queue(dev); 7825 7826 /* inform generic HDLC layer of current DCD status */ 7827 spin_lock_irqsave(&info->irq_spinlock, flags); 7828 usc_get_serial_signals(info); 7829 spin_unlock_irqrestore(&info->irq_spinlock, flags); 7830 if (info->serial_signals & SerialSignal_DCD) 7831 netif_carrier_on(dev); 7832 else 7833 netif_carrier_off(dev); 7834 return 0; 7835} 7836 7837/** 7838 * called by network layer when interface is disabled 7839 * shutdown hardware and release resources 7840 * 7841 * dev pointer to network device structure 7842 * 7843 * returns 0 if success, otherwise error code 7844 */ 7845static int hdlcdev_close(struct net_device *dev) 7846{ 7847 struct mgsl_struct *info = dev_to_port(dev); 7848 unsigned long flags; 7849 7850 if (debug_level >= DEBUG_LEVEL_INFO) 7851 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); 7852 7853 netif_stop_queue(dev); 7854 7855 /* shutdown adapter and release resources */ 7856 shutdown(info); 7857 7858 hdlc_close(dev); 7859 7860 spin_lock_irqsave(&info->netlock, flags); 7861 info->netcount=0; 7862 spin_unlock_irqrestore(&info->netlock, flags); 7863 7864 return 0; 7865} 7866 7867/** 7868 * called by network layer to process IOCTL call to network device 7869 * 7870 * dev pointer to network device structure 7871 * ifr pointer to network interface request structure 7872 * cmd IOCTL command code 7873 * 7874 * returns 0 if success, otherwise error code 7875 */ 7876static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7877{ 7878 const size_t size = sizeof(sync_serial_settings); 7879 sync_serial_settings new_line; 7880 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 7881 struct mgsl_struct *info = dev_to_port(dev); 7882 unsigned int flags; 7883 7884 if (debug_level >= DEBUG_LEVEL_INFO) 7885 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); 7886 7887 /* return error if TTY interface open */ 7888 if (info->count) 7889 return -EBUSY; 7890 7891 if (cmd != SIOCWANDEV) 7892 return hdlc_ioctl(dev, ifr, cmd); 7893 7894 switch(ifr->ifr_settings.type) { 7895 case IF_GET_IFACE: /* return current sync_serial_settings */ 7896 7897 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 7898 if (ifr->ifr_settings.size < size) { 7899 ifr->ifr_settings.size = size; /* data size wanted */ 7900 return -ENOBUFS; 7901 } 7902 7903 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7904 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7905 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7906 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7907 7908 switch (flags){ 7909 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; 7910 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; 7911 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; 7912 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; 7913 default: new_line.clock_type = CLOCK_DEFAULT; 7914 } 7915 7916 new_line.clock_rate = info->params.clock_speed; 7917 new_line.loopback = info->params.loopback ? 1:0; 7918 7919 if (copy_to_user(line, &new_line, size)) 7920 return -EFAULT; 7921 return 0; 7922 7923 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ 7924 7925 if(!capable(CAP_NET_ADMIN)) 7926 return -EPERM; 7927 if (copy_from_user(&new_line, line, size)) 7928 return -EFAULT; 7929 7930 switch (new_line.clock_type) 7931 { 7932 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; 7933 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; 7934 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; 7935 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; 7936 case CLOCK_DEFAULT: flags = info->params.flags & 7937 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7938 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7939 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7940 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; 7941 default: return -EINVAL; 7942 } 7943 7944 if (new_line.loopback != 0 && new_line.loopback != 1) 7945 return -EINVAL; 7946 7947 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | 7948 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | 7949 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | 7950 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); 7951 info->params.flags |= flags; 7952 7953 info->params.loopback = new_line.loopback; 7954 7955 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) 7956 info->params.clock_speed = new_line.clock_rate; 7957 else 7958 info->params.clock_speed = 0; 7959 7960 /* if network interface up, reprogram hardware */ 7961 if (info->netcount) 7962 mgsl_program_hw(info); 7963 return 0; 7964 7965 default: 7966 return hdlc_ioctl(dev, ifr, cmd); 7967 } 7968} 7969 7970/** 7971 * called by network layer when transmit timeout is detected 7972 * 7973 * dev pointer to network device structure 7974 */ 7975static void hdlcdev_tx_timeout(struct net_device *dev) 7976{ 7977 struct mgsl_struct *info = dev_to_port(dev); 7978 struct net_device_stats *stats = hdlc_stats(dev); 7979 unsigned long flags; 7980 7981 if (debug_level >= DEBUG_LEVEL_INFO) 7982 printk("hdlcdev_tx_timeout(%s)\n",dev->name); 7983 7984 stats->tx_errors++; 7985 stats->tx_aborted_errors++; 7986 7987 spin_lock_irqsave(&info->irq_spinlock,flags); 7988 usc_stop_transmitter(info); 7989 spin_unlock_irqrestore(&info->irq_spinlock,flags); 7990 7991 netif_wake_queue(dev); 7992} 7993 7994/** 7995 * called by device driver when transmit completes 7996 * reenable network layer transmit if stopped 7997 * 7998 * info pointer to device instance information 7999 */ 8000static void hdlcdev_tx_done(struct mgsl_struct *info) 8001{ 8002 if (netif_queue_stopped(info->netdev)) 8003 netif_wake_queue(info->netdev); 8004} 8005 8006/** 8007 * called by device driver when frame received 8008 * pass frame to network layer 8009 * 8010 * info pointer to device instance information 8011 * buf pointer to buffer contianing frame data 8012 * size count of data bytes in buf 8013 */ 8014static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size) 8015{ 8016 struct sk_buff *skb = dev_alloc_skb(size); 8017 struct net_device *dev = info->netdev; 8018 struct net_device_stats *stats = hdlc_stats(dev); 8019 8020 if (debug_level >= DEBUG_LEVEL_INFO) 8021 printk("hdlcdev_rx(%s)\n",dev->name); 8022 8023 if (skb == NULL) { 8024 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); 8025 stats->rx_dropped++; 8026 return; 8027 } 8028 8029 memcpy(skb_put(skb, size),buf,size); 8030 8031 skb->protocol = hdlc_type_trans(skb, info->netdev); 8032 8033 stats->rx_packets++; 8034 stats->rx_bytes += size; 8035 8036 netif_rx(skb); 8037 8038 info->netdev->last_rx = jiffies; 8039} 8040 8041/** 8042 * called by device driver when adding device instance 8043 * do generic HDLC initialization 8044 * 8045 * info pointer to device instance information 8046 * 8047 * returns 0 if success, otherwise error code 8048 */ 8049static int hdlcdev_init(struct mgsl_struct *info) 8050{ 8051 int rc; 8052 struct net_device *dev; 8053 hdlc_device *hdlc; 8054 8055 /* allocate and initialize network and HDLC layer objects */ 8056 8057 if (!(dev = alloc_hdlcdev(info))) { 8058 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); 8059 return -ENOMEM; 8060 } 8061 8062 /* for network layer reporting purposes only */ 8063 dev->base_addr = info->io_base; 8064 dev->irq = info->irq_level; 8065 dev->dma = info->dma_level; 8066 8067 /* network layer callbacks and settings */ 8068 dev->do_ioctl = hdlcdev_ioctl; 8069 dev->open = hdlcdev_open; 8070 dev->stop = hdlcdev_close; 8071 dev->tx_timeout = hdlcdev_tx_timeout; 8072 dev->watchdog_timeo = 10*HZ; 8073 dev->tx_queue_len = 50; 8074 8075 /* generic HDLC layer callbacks and settings */ 8076 hdlc = dev_to_hdlc(dev); 8077 hdlc->attach = hdlcdev_attach; 8078 hdlc->xmit = hdlcdev_xmit; 8079 8080 /* register objects with HDLC layer */ 8081 if ((rc = register_hdlc_device(dev))) { 8082 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); 8083 free_netdev(dev); 8084 return rc; 8085 } 8086 8087 info->netdev = dev; 8088 return 0; 8089} 8090 8091/** 8092 * called by device driver when removing device instance 8093 * do generic HDLC cleanup 8094 * 8095 * info pointer to device instance information 8096 */ 8097static void hdlcdev_exit(struct mgsl_struct *info) 8098{ 8099 unregister_hdlc_device(info->netdev); 8100 free_netdev(info->netdev); 8101 info->netdev = NULL; 8102} 8103 8104#endif /* CONFIG_HDLC */ 8105 8106 8107static int __devinit synclink_init_one (struct pci_dev *dev, 8108 const struct pci_device_id *ent) 8109{ 8110 struct mgsl_struct *info; 8111 8112 if (pci_enable_device(dev)) { 8113 printk("error enabling pci device %p\n", dev); 8114 return -EIO; 8115 } 8116 8117 if (!(info = mgsl_allocate_device())) { 8118 printk("can't allocate device instance data.\n"); 8119 return -EIO; 8120 } 8121 8122 /* Copy user configuration info to device instance data */ 8123 8124 info->io_base = pci_resource_start(dev, 2); 8125 info->irq_level = dev->irq; 8126 info->phys_memory_base = pci_resource_start(dev, 3); 8127 8128 /* Because veremap only works on page boundaries we must map 8129 * a larger area than is actually implemented for the LCR 8130 * memory range. We map a full page starting at the page boundary. 8131 */ 8132 info->phys_lcr_base = pci_resource_start(dev, 0); 8133 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1); 8134 info->phys_lcr_base &= ~(PAGE_SIZE-1); 8135 8136 info->bus_type = MGSL_BUS_TYPE_PCI; 8137 info->io_addr_size = 8; 8138 info->irq_flags = IRQF_SHARED; 8139 8140 if (dev->device == 0x0210) { 8141 /* Version 1 PCI9030 based universal PCI adapter */ 8142 info->misc_ctrl_value = 0x007c4080; 8143 info->hw_version = 1; 8144 } else { 8145 /* Version 0 PCI9050 based 5V PCI adapter 8146 * A PCI9050 bug prevents reading LCR registers if 8147 * LCR base address bit 7 is set. Maintain shadow 8148 * value so we can write to LCR misc control reg. 8149 */ 8150 info->misc_ctrl_value = 0x087e4546; 8151 info->hw_version = 0; 8152 } 8153 8154 mgsl_add_device(info); 8155 8156 return 0; 8157} 8158 8159static void __devexit synclink_remove_one (struct pci_dev *dev) 8160{ 8161} 8162