1/* 2 * Generic driver for the MPSC (UART mode) on Marvell parts (e.g., GT64240, 3 * GT64260, MV64340, MV64360, GT96100, ... ). 4 * 5 * Author: Mark A. Greer <mgreer@mvista.com> 6 * 7 * Based on an old MPSC driver that was in the linuxppc tree. It appears to 8 * have been created by Chris Zankel (formerly of MontaVista) but there 9 * is no proper Copyright so I'm not sure. Apparently, parts were also 10 * taken from PPCBoot (now U-Boot). Also based on drivers/serial/8250.c 11 * by Russell King. 12 * 13 * 2004 (c) MontaVista, Software, Inc. This file is licensed under 14 * the terms of the GNU General Public License version 2. This program 15 * is licensed "as is" without any warranty of any kind, whether express 16 * or implied. 17 */ 18 19 20#if defined(CONFIG_SERIAL_MPSC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 21#define SUPPORT_SYSRQ 22#endif 23 24#include <linux/module.h> 25#include <linux/moduleparam.h> 26#include <linux/tty.h> 27#include <linux/tty_flip.h> 28#include <linux/ioport.h> 29#include <linux/init.h> 30#include <linux/console.h> 31#include <linux/sysrq.h> 32#include <linux/serial.h> 33#include <linux/serial_core.h> 34#include <linux/delay.h> 35#include <linux/device.h> 36#include <linux/dma-mapping.h> 37#include <linux/mv643xx.h> 38#include <linux/platform_device.h> 39#include <linux/gfp.h> 40 41#include <asm/io.h> 42#include <asm/irq.h> 43 44#define MPSC_NUM_CTLRS 2 45 46/* 47 * Descriptors and buffers must be cache line aligned. 48 * Buffers lengths must be multiple of cache line size. 49 * Number of Tx & Rx descriptors must be powers of 2. 50 */ 51#define MPSC_RXR_ENTRIES 32 52#define MPSC_RXRE_SIZE dma_get_cache_alignment() 53#define MPSC_RXR_SIZE (MPSC_RXR_ENTRIES * MPSC_RXRE_SIZE) 54#define MPSC_RXBE_SIZE dma_get_cache_alignment() 55#define MPSC_RXB_SIZE (MPSC_RXR_ENTRIES * MPSC_RXBE_SIZE) 56 57#define MPSC_TXR_ENTRIES 32 58#define MPSC_TXRE_SIZE dma_get_cache_alignment() 59#define MPSC_TXR_SIZE (MPSC_TXR_ENTRIES * MPSC_TXRE_SIZE) 60#define MPSC_TXBE_SIZE dma_get_cache_alignment() 61#define MPSC_TXB_SIZE (MPSC_TXR_ENTRIES * MPSC_TXBE_SIZE) 62 63#define MPSC_DMA_ALLOC_SIZE (MPSC_RXR_SIZE + MPSC_RXB_SIZE + MPSC_TXR_SIZE \ 64 + MPSC_TXB_SIZE + dma_get_cache_alignment() /* for alignment */) 65 66/* Rx and Tx Ring entry descriptors -- assume entry size is <= cacheline size */ 67struct mpsc_rx_desc { 68 u16 bufsize; 69 u16 bytecnt; 70 u32 cmdstat; 71 u32 link; 72 u32 buf_ptr; 73} __attribute((packed)); 74 75struct mpsc_tx_desc { 76 u16 bytecnt; 77 u16 shadow; 78 u32 cmdstat; 79 u32 link; 80 u32 buf_ptr; 81} __attribute((packed)); 82 83/* 84 * Some regs that have the erratum that you can't read them are are shared 85 * between the two MPSC controllers. This struct contains those shared regs. 86 */ 87struct mpsc_shared_regs { 88 phys_addr_t mpsc_routing_base_p; 89 phys_addr_t sdma_intr_base_p; 90 91 void __iomem *mpsc_routing_base; 92 void __iomem *sdma_intr_base; 93 94 u32 MPSC_MRR_m; 95 u32 MPSC_RCRR_m; 96 u32 MPSC_TCRR_m; 97 u32 SDMA_INTR_CAUSE_m; 98 u32 SDMA_INTR_MASK_m; 99}; 100 101/* The main driver data structure */ 102struct mpsc_port_info { 103 struct uart_port port; /* Overlay uart_port structure */ 104 105 /* Internal driver state for this ctlr */ 106 u8 ready; 107 u8 rcv_data; 108 tcflag_t c_iflag; /* save termios->c_iflag */ 109 tcflag_t c_cflag; /* save termios->c_cflag */ 110 111 /* Info passed in from platform */ 112 u8 mirror_regs; /* Need to mirror regs? */ 113 u8 cache_mgmt; /* Need manual cache mgmt? */ 114 u8 brg_can_tune; /* BRG has baud tuning? */ 115 u32 brg_clk_src; 116 u16 mpsc_max_idle; 117 int default_baud; 118 int default_bits; 119 int default_parity; 120 int default_flow; 121 122 /* Physical addresses of various blocks of registers (from platform) */ 123 phys_addr_t mpsc_base_p; 124 phys_addr_t sdma_base_p; 125 phys_addr_t brg_base_p; 126 127 /* Virtual addresses of various blocks of registers (from platform) */ 128 void __iomem *mpsc_base; 129 void __iomem *sdma_base; 130 void __iomem *brg_base; 131 132 /* Descriptor ring and buffer allocations */ 133 void *dma_region; 134 dma_addr_t dma_region_p; 135 136 dma_addr_t rxr; /* Rx descriptor ring */ 137 dma_addr_t rxr_p; /* Phys addr of rxr */ 138 u8 *rxb; /* Rx Ring I/O buf */ 139 u8 *rxb_p; /* Phys addr of rxb */ 140 u32 rxr_posn; /* First desc w/ Rx data */ 141 142 dma_addr_t txr; /* Tx descriptor ring */ 143 dma_addr_t txr_p; /* Phys addr of txr */ 144 u8 *txb; /* Tx Ring I/O buf */ 145 u8 *txb_p; /* Phys addr of txb */ 146 int txr_head; /* Where new data goes */ 147 int txr_tail; /* Where sent data comes off */ 148 spinlock_t tx_lock; /* transmit lock */ 149 150 /* Mirrored values of regs we can't read (if 'mirror_regs' set) */ 151 u32 MPSC_MPCR_m; 152 u32 MPSC_CHR_1_m; 153 u32 MPSC_CHR_2_m; 154 u32 MPSC_CHR_10_m; 155 u32 BRG_BCR_m; 156 struct mpsc_shared_regs *shared_regs; 157}; 158 159/* Hooks to platform-specific code */ 160int mpsc_platform_register_driver(void); 161void mpsc_platform_unregister_driver(void); 162 163/* Hooks back in to mpsc common to be called by platform-specific code */ 164struct mpsc_port_info *mpsc_device_probe(int index); 165struct mpsc_port_info *mpsc_device_remove(int index); 166 167/* Main MPSC Configuration Register Offsets */ 168#define MPSC_MMCRL 0x0000 169#define MPSC_MMCRH 0x0004 170#define MPSC_MPCR 0x0008 171#define MPSC_CHR_1 0x000c 172#define MPSC_CHR_2 0x0010 173#define MPSC_CHR_3 0x0014 174#define MPSC_CHR_4 0x0018 175#define MPSC_CHR_5 0x001c 176#define MPSC_CHR_6 0x0020 177#define MPSC_CHR_7 0x0024 178#define MPSC_CHR_8 0x0028 179#define MPSC_CHR_9 0x002c 180#define MPSC_CHR_10 0x0030 181#define MPSC_CHR_11 0x0034 182 183#define MPSC_MPCR_FRZ (1 << 9) 184#define MPSC_MPCR_CL_5 0 185#define MPSC_MPCR_CL_6 1 186#define MPSC_MPCR_CL_7 2 187#define MPSC_MPCR_CL_8 3 188#define MPSC_MPCR_SBL_1 0 189#define MPSC_MPCR_SBL_2 1 190 191#define MPSC_CHR_2_TEV (1<<1) 192#define MPSC_CHR_2_TA (1<<7) 193#define MPSC_CHR_2_TTCS (1<<9) 194#define MPSC_CHR_2_REV (1<<17) 195#define MPSC_CHR_2_RA (1<<23) 196#define MPSC_CHR_2_CRD (1<<25) 197#define MPSC_CHR_2_EH (1<<31) 198#define MPSC_CHR_2_PAR_ODD 0 199#define MPSC_CHR_2_PAR_SPACE 1 200#define MPSC_CHR_2_PAR_EVEN 2 201#define MPSC_CHR_2_PAR_MARK 3 202 203/* MPSC Signal Routing */ 204#define MPSC_MRR 0x0000 205#define MPSC_RCRR 0x0004 206#define MPSC_TCRR 0x0008 207 208/* Serial DMA Controller Interface Registers */ 209#define SDMA_SDC 0x0000 210#define SDMA_SDCM 0x0008 211#define SDMA_RX_DESC 0x0800 212#define SDMA_RX_BUF_PTR 0x0808 213#define SDMA_SCRDP 0x0810 214#define SDMA_TX_DESC 0x0c00 215#define SDMA_SCTDP 0x0c10 216#define SDMA_SFTDP 0x0c14 217 218#define SDMA_DESC_CMDSTAT_PE (1<<0) 219#define SDMA_DESC_CMDSTAT_CDL (1<<1) 220#define SDMA_DESC_CMDSTAT_FR (1<<3) 221#define SDMA_DESC_CMDSTAT_OR (1<<6) 222#define SDMA_DESC_CMDSTAT_BR (1<<9) 223#define SDMA_DESC_CMDSTAT_MI (1<<10) 224#define SDMA_DESC_CMDSTAT_A (1<<11) 225#define SDMA_DESC_CMDSTAT_AM (1<<12) 226#define SDMA_DESC_CMDSTAT_CT (1<<13) 227#define SDMA_DESC_CMDSTAT_C (1<<14) 228#define SDMA_DESC_CMDSTAT_ES (1<<15) 229#define SDMA_DESC_CMDSTAT_L (1<<16) 230#define SDMA_DESC_CMDSTAT_F (1<<17) 231#define SDMA_DESC_CMDSTAT_P (1<<18) 232#define SDMA_DESC_CMDSTAT_EI (1<<23) 233#define SDMA_DESC_CMDSTAT_O (1<<31) 234 235#define SDMA_DESC_DFLT (SDMA_DESC_CMDSTAT_O \ 236 | SDMA_DESC_CMDSTAT_EI) 237 238#define SDMA_SDC_RFT (1<<0) 239#define SDMA_SDC_SFM (1<<1) 240#define SDMA_SDC_BLMR (1<<6) 241#define SDMA_SDC_BLMT (1<<7) 242#define SDMA_SDC_POVR (1<<8) 243#define SDMA_SDC_RIFB (1<<9) 244 245#define SDMA_SDCM_ERD (1<<7) 246#define SDMA_SDCM_AR (1<<15) 247#define SDMA_SDCM_STD (1<<16) 248#define SDMA_SDCM_TXD (1<<23) 249#define SDMA_SDCM_AT (1<<31) 250 251#define SDMA_0_CAUSE_RXBUF (1<<0) 252#define SDMA_0_CAUSE_RXERR (1<<1) 253#define SDMA_0_CAUSE_TXBUF (1<<2) 254#define SDMA_0_CAUSE_TXEND (1<<3) 255#define SDMA_1_CAUSE_RXBUF (1<<8) 256#define SDMA_1_CAUSE_RXERR (1<<9) 257#define SDMA_1_CAUSE_TXBUF (1<<10) 258#define SDMA_1_CAUSE_TXEND (1<<11) 259 260#define SDMA_CAUSE_RX_MASK (SDMA_0_CAUSE_RXBUF | SDMA_0_CAUSE_RXERR \ 261 | SDMA_1_CAUSE_RXBUF | SDMA_1_CAUSE_RXERR) 262#define SDMA_CAUSE_TX_MASK (SDMA_0_CAUSE_TXBUF | SDMA_0_CAUSE_TXEND \ 263 | SDMA_1_CAUSE_TXBUF | SDMA_1_CAUSE_TXEND) 264 265/* SDMA Interrupt registers */ 266#define SDMA_INTR_CAUSE 0x0000 267#define SDMA_INTR_MASK 0x0080 268 269/* Baud Rate Generator Interface Registers */ 270#define BRG_BCR 0x0000 271#define BRG_BTR 0x0004 272 273/* 274 * Define how this driver is known to the outside (we've been assigned a 275 * range on the "Low-density serial ports" major). 276 */ 277#define MPSC_MAJOR 204 278#define MPSC_MINOR_START 44 279#define MPSC_DRIVER_NAME "MPSC" 280#define MPSC_DEV_NAME "ttyMM" 281#define MPSC_VERSION "1.00" 282 283static struct mpsc_port_info mpsc_ports[MPSC_NUM_CTLRS]; 284static struct mpsc_shared_regs mpsc_shared_regs; 285static struct uart_driver mpsc_reg; 286 287static void mpsc_start_rx(struct mpsc_port_info *pi); 288static void mpsc_free_ring_mem(struct mpsc_port_info *pi); 289static void mpsc_release_port(struct uart_port *port); 290/* 291 ****************************************************************************** 292 * 293 * Baud Rate Generator Routines (BRG) 294 * 295 ****************************************************************************** 296 */ 297static void mpsc_brg_init(struct mpsc_port_info *pi, u32 clk_src) 298{ 299 u32 v; 300 301 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 302 v = (v & ~(0xf << 18)) | ((clk_src & 0xf) << 18); 303 304 if (pi->brg_can_tune) 305 v &= ~(1 << 25); 306 307 if (pi->mirror_regs) 308 pi->BRG_BCR_m = v; 309 writel(v, pi->brg_base + BRG_BCR); 310 311 writel(readl(pi->brg_base + BRG_BTR) & 0xffff0000, 312 pi->brg_base + BRG_BTR); 313} 314 315static void mpsc_brg_enable(struct mpsc_port_info *pi) 316{ 317 u32 v; 318 319 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 320 v |= (1 << 16); 321 322 if (pi->mirror_regs) 323 pi->BRG_BCR_m = v; 324 writel(v, pi->brg_base + BRG_BCR); 325} 326 327static void mpsc_brg_disable(struct mpsc_port_info *pi) 328{ 329 u32 v; 330 331 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 332 v &= ~(1 << 16); 333 334 if (pi->mirror_regs) 335 pi->BRG_BCR_m = v; 336 writel(v, pi->brg_base + BRG_BCR); 337} 338 339/* 340 * To set the baud, we adjust the CDV field in the BRG_BCR reg. 341 * From manual: Baud = clk / ((CDV+1)*2) ==> CDV = (clk / (baud*2)) - 1. 342 * However, the input clock is divided by 16 in the MPSC b/c of how 343 * 'MPSC_MMCRH' was set up so we have to divide the 'clk' used in our 344 * calculation by 16 to account for that. So the real calculation 345 * that accounts for the way the mpsc is set up is: 346 * CDV = (clk / (baud*2*16)) - 1 ==> CDV = (clk / (baud << 5)) - 1. 347 */ 348static void mpsc_set_baudrate(struct mpsc_port_info *pi, u32 baud) 349{ 350 u32 cdv = (pi->port.uartclk / (baud << 5)) - 1; 351 u32 v; 352 353 mpsc_brg_disable(pi); 354 v = (pi->mirror_regs) ? pi->BRG_BCR_m : readl(pi->brg_base + BRG_BCR); 355 v = (v & 0xffff0000) | (cdv & 0xffff); 356 357 if (pi->mirror_regs) 358 pi->BRG_BCR_m = v; 359 writel(v, pi->brg_base + BRG_BCR); 360 mpsc_brg_enable(pi); 361} 362 363/* 364 ****************************************************************************** 365 * 366 * Serial DMA Routines (SDMA) 367 * 368 ****************************************************************************** 369 */ 370 371static void mpsc_sdma_burstsize(struct mpsc_port_info *pi, u32 burst_size) 372{ 373 u32 v; 374 375 pr_debug("mpsc_sdma_burstsize[%d]: burst_size: %d\n", 376 pi->port.line, burst_size); 377 378 burst_size >>= 3; /* Divide by 8 b/c reg values are 8-byte chunks */ 379 380 if (burst_size < 2) 381 v = 0x0; /* 1 64-bit word */ 382 else if (burst_size < 4) 383 v = 0x1; /* 2 64-bit words */ 384 else if (burst_size < 8) 385 v = 0x2; /* 4 64-bit words */ 386 else 387 v = 0x3; /* 8 64-bit words */ 388 389 writel((readl(pi->sdma_base + SDMA_SDC) & (0x3 << 12)) | (v << 12), 390 pi->sdma_base + SDMA_SDC); 391} 392 393static void mpsc_sdma_init(struct mpsc_port_info *pi, u32 burst_size) 394{ 395 pr_debug("mpsc_sdma_init[%d]: burst_size: %d\n", pi->port.line, 396 burst_size); 397 398 writel((readl(pi->sdma_base + SDMA_SDC) & 0x3ff) | 0x03f, 399 pi->sdma_base + SDMA_SDC); 400 mpsc_sdma_burstsize(pi, burst_size); 401} 402 403static u32 mpsc_sdma_intr_mask(struct mpsc_port_info *pi, u32 mask) 404{ 405 u32 old, v; 406 407 pr_debug("mpsc_sdma_intr_mask[%d]: mask: 0x%x\n", pi->port.line, mask); 408 409 old = v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m : 410 readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 411 412 mask &= 0xf; 413 if (pi->port.line) 414 mask <<= 8; 415 v &= ~mask; 416 417 if (pi->mirror_regs) 418 pi->shared_regs->SDMA_INTR_MASK_m = v; 419 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 420 421 if (pi->port.line) 422 old >>= 8; 423 return old & 0xf; 424} 425 426static void mpsc_sdma_intr_unmask(struct mpsc_port_info *pi, u32 mask) 427{ 428 u32 v; 429 430 pr_debug("mpsc_sdma_intr_unmask[%d]: mask: 0x%x\n", pi->port.line,mask); 431 432 v = (pi->mirror_regs) ? pi->shared_regs->SDMA_INTR_MASK_m 433 : readl(pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 434 435 mask &= 0xf; 436 if (pi->port.line) 437 mask <<= 8; 438 v |= mask; 439 440 if (pi->mirror_regs) 441 pi->shared_regs->SDMA_INTR_MASK_m = v; 442 writel(v, pi->shared_regs->sdma_intr_base + SDMA_INTR_MASK); 443} 444 445static void mpsc_sdma_intr_ack(struct mpsc_port_info *pi) 446{ 447 pr_debug("mpsc_sdma_intr_ack[%d]: Acknowledging IRQ\n", pi->port.line); 448 449 if (pi->mirror_regs) 450 pi->shared_regs->SDMA_INTR_CAUSE_m = 0; 451 writeb(0x00, pi->shared_regs->sdma_intr_base + SDMA_INTR_CAUSE 452 + pi->port.line); 453} 454 455static void mpsc_sdma_set_rx_ring(struct mpsc_port_info *pi, 456 struct mpsc_rx_desc *rxre_p) 457{ 458 pr_debug("mpsc_sdma_set_rx_ring[%d]: rxre_p: 0x%x\n", 459 pi->port.line, (u32)rxre_p); 460 461 writel((u32)rxre_p, pi->sdma_base + SDMA_SCRDP); 462} 463 464static void mpsc_sdma_set_tx_ring(struct mpsc_port_info *pi, 465 struct mpsc_tx_desc *txre_p) 466{ 467 writel((u32)txre_p, pi->sdma_base + SDMA_SFTDP); 468 writel((u32)txre_p, pi->sdma_base + SDMA_SCTDP); 469} 470 471static void mpsc_sdma_cmd(struct mpsc_port_info *pi, u32 val) 472{ 473 u32 v; 474 475 v = readl(pi->sdma_base + SDMA_SDCM); 476 if (val) 477 v |= val; 478 else 479 v = 0; 480 wmb(); 481 writel(v, pi->sdma_base + SDMA_SDCM); 482 wmb(); 483} 484 485static uint mpsc_sdma_tx_active(struct mpsc_port_info *pi) 486{ 487 return readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_TXD; 488} 489 490static void mpsc_sdma_start_tx(struct mpsc_port_info *pi) 491{ 492 struct mpsc_tx_desc *txre, *txre_p; 493 494 /* If tx isn't running & there's a desc ready to go, start it */ 495 if (!mpsc_sdma_tx_active(pi)) { 496 txre = (struct mpsc_tx_desc *)(pi->txr 497 + (pi->txr_tail * MPSC_TXRE_SIZE)); 498 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 499 DMA_FROM_DEVICE); 500#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 501 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 502 invalidate_dcache_range((ulong)txre, 503 (ulong)txre + MPSC_TXRE_SIZE); 504#endif 505 506 if (be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O) { 507 txre_p = (struct mpsc_tx_desc *) 508 (pi->txr_p + (pi->txr_tail * MPSC_TXRE_SIZE)); 509 510 mpsc_sdma_set_tx_ring(pi, txre_p); 511 mpsc_sdma_cmd(pi, SDMA_SDCM_STD | SDMA_SDCM_TXD); 512 } 513 } 514} 515 516static void mpsc_sdma_stop(struct mpsc_port_info *pi) 517{ 518 pr_debug("mpsc_sdma_stop[%d]: Stopping SDMA\n", pi->port.line); 519 520 /* Abort any SDMA transfers */ 521 mpsc_sdma_cmd(pi, 0); 522 mpsc_sdma_cmd(pi, SDMA_SDCM_AR | SDMA_SDCM_AT); 523 524 /* Clear the SDMA current and first TX and RX pointers */ 525 mpsc_sdma_set_tx_ring(pi, NULL); 526 mpsc_sdma_set_rx_ring(pi, NULL); 527 528 /* Disable interrupts */ 529 mpsc_sdma_intr_mask(pi, 0xf); 530 mpsc_sdma_intr_ack(pi); 531} 532 533/* 534 ****************************************************************************** 535 * 536 * Multi-Protocol Serial Controller Routines (MPSC) 537 * 538 ****************************************************************************** 539 */ 540 541static void mpsc_hw_init(struct mpsc_port_info *pi) 542{ 543 u32 v; 544 545 pr_debug("mpsc_hw_init[%d]: Initializing hardware\n", pi->port.line); 546 547 /* Set up clock routing */ 548 if (pi->mirror_regs) { 549 v = pi->shared_regs->MPSC_MRR_m; 550 v &= ~0x1c7; 551 pi->shared_regs->MPSC_MRR_m = v; 552 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 553 554 v = pi->shared_regs->MPSC_RCRR_m; 555 v = (v & ~0xf0f) | 0x100; 556 pi->shared_regs->MPSC_RCRR_m = v; 557 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 558 559 v = pi->shared_regs->MPSC_TCRR_m; 560 v = (v & ~0xf0f) | 0x100; 561 pi->shared_regs->MPSC_TCRR_m = v; 562 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 563 } else { 564 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_MRR); 565 v &= ~0x1c7; 566 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_MRR); 567 568 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 569 v = (v & ~0xf0f) | 0x100; 570 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_RCRR); 571 572 v = readl(pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 573 v = (v & ~0xf0f) | 0x100; 574 writel(v, pi->shared_regs->mpsc_routing_base + MPSC_TCRR); 575 } 576 577 /* Put MPSC in UART mode & enabel Tx/Rx egines */ 578 writel(0x000004c4, pi->mpsc_base + MPSC_MMCRL); 579 580 /* No preamble, 16x divider, low-latency, */ 581 writel(0x04400400, pi->mpsc_base + MPSC_MMCRH); 582 mpsc_set_baudrate(pi, pi->default_baud); 583 584 if (pi->mirror_regs) { 585 pi->MPSC_CHR_1_m = 0; 586 pi->MPSC_CHR_2_m = 0; 587 } 588 writel(0, pi->mpsc_base + MPSC_CHR_1); 589 writel(0, pi->mpsc_base + MPSC_CHR_2); 590 writel(pi->mpsc_max_idle, pi->mpsc_base + MPSC_CHR_3); 591 writel(0, pi->mpsc_base + MPSC_CHR_4); 592 writel(0, pi->mpsc_base + MPSC_CHR_5); 593 writel(0, pi->mpsc_base + MPSC_CHR_6); 594 writel(0, pi->mpsc_base + MPSC_CHR_7); 595 writel(0, pi->mpsc_base + MPSC_CHR_8); 596 writel(0, pi->mpsc_base + MPSC_CHR_9); 597 writel(0, pi->mpsc_base + MPSC_CHR_10); 598} 599 600static void mpsc_enter_hunt(struct mpsc_port_info *pi) 601{ 602 pr_debug("mpsc_enter_hunt[%d]: Hunting...\n", pi->port.line); 603 604 if (pi->mirror_regs) { 605 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_EH, 606 pi->mpsc_base + MPSC_CHR_2); 607 /* Erratum prevents reading CHR_2 so just delay for a while */ 608 udelay(100); 609 } else { 610 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_EH, 611 pi->mpsc_base + MPSC_CHR_2); 612 613 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_EH) 614 udelay(10); 615 } 616} 617 618static void mpsc_freeze(struct mpsc_port_info *pi) 619{ 620 u32 v; 621 622 pr_debug("mpsc_freeze[%d]: Freezing\n", pi->port.line); 623 624 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 625 readl(pi->mpsc_base + MPSC_MPCR); 626 v |= MPSC_MPCR_FRZ; 627 628 if (pi->mirror_regs) 629 pi->MPSC_MPCR_m = v; 630 writel(v, pi->mpsc_base + MPSC_MPCR); 631} 632 633static void mpsc_unfreeze(struct mpsc_port_info *pi) 634{ 635 u32 v; 636 637 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 638 readl(pi->mpsc_base + MPSC_MPCR); 639 v &= ~MPSC_MPCR_FRZ; 640 641 if (pi->mirror_regs) 642 pi->MPSC_MPCR_m = v; 643 writel(v, pi->mpsc_base + MPSC_MPCR); 644 645 pr_debug("mpsc_unfreeze[%d]: Unfrozen\n", pi->port.line); 646} 647 648static void mpsc_set_char_length(struct mpsc_port_info *pi, u32 len) 649{ 650 u32 v; 651 652 pr_debug("mpsc_set_char_length[%d]: char len: %d\n", pi->port.line,len); 653 654 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 655 readl(pi->mpsc_base + MPSC_MPCR); 656 v = (v & ~(0x3 << 12)) | ((len & 0x3) << 12); 657 658 if (pi->mirror_regs) 659 pi->MPSC_MPCR_m = v; 660 writel(v, pi->mpsc_base + MPSC_MPCR); 661} 662 663static void mpsc_set_stop_bit_length(struct mpsc_port_info *pi, u32 len) 664{ 665 u32 v; 666 667 pr_debug("mpsc_set_stop_bit_length[%d]: stop bits: %d\n", 668 pi->port.line, len); 669 670 v = (pi->mirror_regs) ? pi->MPSC_MPCR_m : 671 readl(pi->mpsc_base + MPSC_MPCR); 672 673 v = (v & ~(1 << 14)) | ((len & 0x1) << 14); 674 675 if (pi->mirror_regs) 676 pi->MPSC_MPCR_m = v; 677 writel(v, pi->mpsc_base + MPSC_MPCR); 678} 679 680static void mpsc_set_parity(struct mpsc_port_info *pi, u32 p) 681{ 682 u32 v; 683 684 pr_debug("mpsc_set_parity[%d]: parity bits: 0x%x\n", pi->port.line, p); 685 686 v = (pi->mirror_regs) ? pi->MPSC_CHR_2_m : 687 readl(pi->mpsc_base + MPSC_CHR_2); 688 689 p &= 0x3; 690 v = (v & ~0xc000c) | (p << 18) | (p << 2); 691 692 if (pi->mirror_regs) 693 pi->MPSC_CHR_2_m = v; 694 writel(v, pi->mpsc_base + MPSC_CHR_2); 695} 696 697/* 698 ****************************************************************************** 699 * 700 * Driver Init Routines 701 * 702 ****************************************************************************** 703 */ 704 705static void mpsc_init_hw(struct mpsc_port_info *pi) 706{ 707 pr_debug("mpsc_init_hw[%d]: Initializing\n", pi->port.line); 708 709 mpsc_brg_init(pi, pi->brg_clk_src); 710 mpsc_brg_enable(pi); 711 mpsc_sdma_init(pi, dma_get_cache_alignment()); /* burst a cacheline */ 712 mpsc_sdma_stop(pi); 713 mpsc_hw_init(pi); 714} 715 716static int mpsc_alloc_ring_mem(struct mpsc_port_info *pi) 717{ 718 int rc = 0; 719 720 pr_debug("mpsc_alloc_ring_mem[%d]: Allocating ring mem\n", 721 pi->port.line); 722 723 if (!pi->dma_region) { 724 if (!dma_supported(pi->port.dev, 0xffffffff)) { 725 printk(KERN_ERR "MPSC: Inadequate DMA support\n"); 726 rc = -ENXIO; 727 } else if ((pi->dma_region = dma_alloc_noncoherent(pi->port.dev, 728 MPSC_DMA_ALLOC_SIZE, 729 &pi->dma_region_p, GFP_KERNEL)) 730 == NULL) { 731 printk(KERN_ERR "MPSC: Can't alloc Desc region\n"); 732 rc = -ENOMEM; 733 } 734 } 735 736 return rc; 737} 738 739static void mpsc_free_ring_mem(struct mpsc_port_info *pi) 740{ 741 pr_debug("mpsc_free_ring_mem[%d]: Freeing ring mem\n", pi->port.line); 742 743 if (pi->dma_region) { 744 dma_free_noncoherent(pi->port.dev, MPSC_DMA_ALLOC_SIZE, 745 pi->dma_region, pi->dma_region_p); 746 pi->dma_region = NULL; 747 pi->dma_region_p = (dma_addr_t)NULL; 748 } 749} 750 751static void mpsc_init_rings(struct mpsc_port_info *pi) 752{ 753 struct mpsc_rx_desc *rxre; 754 struct mpsc_tx_desc *txre; 755 dma_addr_t dp, dp_p; 756 u8 *bp, *bp_p; 757 int i; 758 759 pr_debug("mpsc_init_rings[%d]: Initializing rings\n", pi->port.line); 760 761 BUG_ON(pi->dma_region == NULL); 762 763 memset(pi->dma_region, 0, MPSC_DMA_ALLOC_SIZE); 764 765 /* 766 * Descriptors & buffers are multiples of cacheline size and must be 767 * cacheline aligned. 768 */ 769 dp = ALIGN((u32)pi->dma_region, dma_get_cache_alignment()); 770 dp_p = ALIGN((u32)pi->dma_region_p, dma_get_cache_alignment()); 771 772 /* 773 * Partition dma region into rx ring descriptor, rx buffers, 774 * tx ring descriptors, and tx buffers. 775 */ 776 pi->rxr = dp; 777 pi->rxr_p = dp_p; 778 dp += MPSC_RXR_SIZE; 779 dp_p += MPSC_RXR_SIZE; 780 781 pi->rxb = (u8 *)dp; 782 pi->rxb_p = (u8 *)dp_p; 783 dp += MPSC_RXB_SIZE; 784 dp_p += MPSC_RXB_SIZE; 785 786 pi->rxr_posn = 0; 787 788 pi->txr = dp; 789 pi->txr_p = dp_p; 790 dp += MPSC_TXR_SIZE; 791 dp_p += MPSC_TXR_SIZE; 792 793 pi->txb = (u8 *)dp; 794 pi->txb_p = (u8 *)dp_p; 795 796 pi->txr_head = 0; 797 pi->txr_tail = 0; 798 799 /* Init rx ring descriptors */ 800 dp = pi->rxr; 801 dp_p = pi->rxr_p; 802 bp = pi->rxb; 803 bp_p = pi->rxb_p; 804 805 for (i = 0; i < MPSC_RXR_ENTRIES; i++) { 806 rxre = (struct mpsc_rx_desc *)dp; 807 808 rxre->bufsize = cpu_to_be16(MPSC_RXBE_SIZE); 809 rxre->bytecnt = cpu_to_be16(0); 810 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 811 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 812 | SDMA_DESC_CMDSTAT_L); 813 rxre->link = cpu_to_be32(dp_p + MPSC_RXRE_SIZE); 814 rxre->buf_ptr = cpu_to_be32(bp_p); 815 816 dp += MPSC_RXRE_SIZE; 817 dp_p += MPSC_RXRE_SIZE; 818 bp += MPSC_RXBE_SIZE; 819 bp_p += MPSC_RXBE_SIZE; 820 } 821 rxre->link = cpu_to_be32(pi->rxr_p); /* Wrap last back to first */ 822 823 /* Init tx ring descriptors */ 824 dp = pi->txr; 825 dp_p = pi->txr_p; 826 bp = pi->txb; 827 bp_p = pi->txb_p; 828 829 for (i = 0; i < MPSC_TXR_ENTRIES; i++) { 830 txre = (struct mpsc_tx_desc *)dp; 831 832 txre->link = cpu_to_be32(dp_p + MPSC_TXRE_SIZE); 833 txre->buf_ptr = cpu_to_be32(bp_p); 834 835 dp += MPSC_TXRE_SIZE; 836 dp_p += MPSC_TXRE_SIZE; 837 bp += MPSC_TXBE_SIZE; 838 bp_p += MPSC_TXBE_SIZE; 839 } 840 txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ 841 842 dma_cache_sync(pi->port.dev, (void *)pi->dma_region, 843 MPSC_DMA_ALLOC_SIZE, DMA_BIDIRECTIONAL); 844#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 845 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 846 flush_dcache_range((ulong)pi->dma_region, 847 (ulong)pi->dma_region 848 + MPSC_DMA_ALLOC_SIZE); 849#endif 850 851 return; 852} 853 854static void mpsc_uninit_rings(struct mpsc_port_info *pi) 855{ 856 pr_debug("mpsc_uninit_rings[%d]: Uninitializing rings\n",pi->port.line); 857 858 BUG_ON(pi->dma_region == NULL); 859 860 pi->rxr = 0; 861 pi->rxr_p = 0; 862 pi->rxb = NULL; 863 pi->rxb_p = NULL; 864 pi->rxr_posn = 0; 865 866 pi->txr = 0; 867 pi->txr_p = 0; 868 pi->txb = NULL; 869 pi->txb_p = NULL; 870 pi->txr_head = 0; 871 pi->txr_tail = 0; 872} 873 874static int mpsc_make_ready(struct mpsc_port_info *pi) 875{ 876 int rc; 877 878 pr_debug("mpsc_make_ready[%d]: Making cltr ready\n", pi->port.line); 879 880 if (!pi->ready) { 881 mpsc_init_hw(pi); 882 if ((rc = mpsc_alloc_ring_mem(pi))) 883 return rc; 884 mpsc_init_rings(pi); 885 pi->ready = 1; 886 } 887 888 return 0; 889} 890 891#ifdef CONFIG_CONSOLE_POLL 892static int serial_polled; 893#endif 894 895/* 896 ****************************************************************************** 897 * 898 * Interrupt Handling Routines 899 * 900 ****************************************************************************** 901 */ 902 903static int mpsc_rx_intr(struct mpsc_port_info *pi) 904{ 905 struct mpsc_rx_desc *rxre; 906 struct tty_struct *tty = pi->port.state->port.tty; 907 u32 cmdstat, bytes_in, i; 908 int rc = 0; 909 u8 *bp; 910 char flag = TTY_NORMAL; 911 912 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); 913 914 rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); 915 916 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 917 DMA_FROM_DEVICE); 918#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 919 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 920 invalidate_dcache_range((ulong)rxre, 921 (ulong)rxre + MPSC_RXRE_SIZE); 922#endif 923 924 /* 925 * Loop through Rx descriptors handling ones that have been completed. 926 */ 927 while (!((cmdstat = be32_to_cpu(rxre->cmdstat)) 928 & SDMA_DESC_CMDSTAT_O)) { 929 bytes_in = be16_to_cpu(rxre->bytecnt); 930#ifdef CONFIG_CONSOLE_POLL 931 if (unlikely(serial_polled)) { 932 serial_polled = 0; 933 return 0; 934 } 935#endif 936 /* Following use of tty struct directly is deprecated */ 937 if (unlikely(tty_buffer_request_room(tty, bytes_in) 938 < bytes_in)) { 939 if (tty->low_latency) 940 tty_flip_buffer_push(tty); 941 /* 942 * If this failed then we will throw away the bytes 943 * but must do so to clear interrupts. 944 */ 945 } 946 947 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 948 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_RXBE_SIZE, 949 DMA_FROM_DEVICE); 950#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 951 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 952 invalidate_dcache_range((ulong)bp, 953 (ulong)bp + MPSC_RXBE_SIZE); 954#endif 955 956 /* 957 * Other than for parity error, the manual provides little 958 * info on what data will be in a frame flagged by any of 959 * these errors. For parity error, it is the last byte in 960 * the buffer that had the error. As for the rest, I guess 961 * we'll assume there is no data in the buffer. 962 * If there is...it gets lost. 963 */ 964 if (unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 965 | SDMA_DESC_CMDSTAT_FR 966 | SDMA_DESC_CMDSTAT_OR))) { 967 968 pi->port.icount.rx++; 969 970 if (cmdstat & SDMA_DESC_CMDSTAT_BR) { /* Break */ 971 pi->port.icount.brk++; 972 973 if (uart_handle_break(&pi->port)) 974 goto next_frame; 975 } else if (cmdstat & SDMA_DESC_CMDSTAT_FR) { 976 pi->port.icount.frame++; 977 } else if (cmdstat & SDMA_DESC_CMDSTAT_OR) { 978 pi->port.icount.overrun++; 979 } 980 981 cmdstat &= pi->port.read_status_mask; 982 983 if (cmdstat & SDMA_DESC_CMDSTAT_BR) 984 flag = TTY_BREAK; 985 else if (cmdstat & SDMA_DESC_CMDSTAT_FR) 986 flag = TTY_FRAME; 987 else if (cmdstat & SDMA_DESC_CMDSTAT_OR) 988 flag = TTY_OVERRUN; 989 else if (cmdstat & SDMA_DESC_CMDSTAT_PE) 990 flag = TTY_PARITY; 991 } 992 993 if (uart_handle_sysrq_char(&pi->port, *bp)) { 994 bp++; 995 bytes_in--; 996#ifdef CONFIG_CONSOLE_POLL 997 if (unlikely(serial_polled)) { 998 serial_polled = 0; 999 return 0; 1000 } 1001#endif 1002 goto next_frame; 1003 } 1004 1005 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR 1006 | SDMA_DESC_CMDSTAT_FR 1007 | SDMA_DESC_CMDSTAT_OR))) 1008 && !(cmdstat & pi->port.ignore_status_mask)) { 1009 tty_insert_flip_char(tty, *bp, flag); 1010 } else { 1011 for (i=0; i<bytes_in; i++) 1012 tty_insert_flip_char(tty, *bp++, TTY_NORMAL); 1013 1014 pi->port.icount.rx += bytes_in; 1015 } 1016 1017next_frame: 1018 rxre->bytecnt = cpu_to_be16(0); 1019 wmb(); 1020 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O 1021 | SDMA_DESC_CMDSTAT_EI | SDMA_DESC_CMDSTAT_F 1022 | SDMA_DESC_CMDSTAT_L); 1023 wmb(); 1024 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1025 DMA_BIDIRECTIONAL); 1026#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1027 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1028 flush_dcache_range((ulong)rxre, 1029 (ulong)rxre + MPSC_RXRE_SIZE); 1030#endif 1031 1032 /* Advance to next descriptor */ 1033 pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); 1034 rxre = (struct mpsc_rx_desc *) 1035 (pi->rxr + (pi->rxr_posn * MPSC_RXRE_SIZE)); 1036 dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, 1037 DMA_FROM_DEVICE); 1038#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1039 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1040 invalidate_dcache_range((ulong)rxre, 1041 (ulong)rxre + MPSC_RXRE_SIZE); 1042#endif 1043 rc = 1; 1044 } 1045 1046 /* Restart rx engine, if its stopped */ 1047 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) 1048 mpsc_start_rx(pi); 1049 1050 tty_flip_buffer_push(tty); 1051 return rc; 1052} 1053 1054static void mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) 1055{ 1056 struct mpsc_tx_desc *txre; 1057 1058 txre = (struct mpsc_tx_desc *)(pi->txr 1059 + (pi->txr_head * MPSC_TXRE_SIZE)); 1060 1061 txre->bytecnt = cpu_to_be16(count); 1062 txre->shadow = txre->bytecnt; 1063 wmb(); /* ensure cmdstat is last field updated */ 1064 txre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | SDMA_DESC_CMDSTAT_F 1065 | SDMA_DESC_CMDSTAT_L 1066 | ((intr) ? SDMA_DESC_CMDSTAT_EI : 0)); 1067 wmb(); 1068 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1069 DMA_BIDIRECTIONAL); 1070#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1071 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1072 flush_dcache_range((ulong)txre, 1073 (ulong)txre + MPSC_TXRE_SIZE); 1074#endif 1075} 1076 1077static void mpsc_copy_tx_data(struct mpsc_port_info *pi) 1078{ 1079 struct circ_buf *xmit = &pi->port.state->xmit; 1080 u8 *bp; 1081 u32 i; 1082 1083 /* Make sure the desc ring isn't full */ 1084 while (CIRC_CNT(pi->txr_head, pi->txr_tail, MPSC_TXR_ENTRIES) 1085 < (MPSC_TXR_ENTRIES - 1)) { 1086 if (pi->port.x_char) { 1087 /* 1088 * Ideally, we should use the TCS field in 1089 * CHR_1 to put the x_char out immediately but 1090 * errata prevents us from being able to read 1091 * CHR_2 to know that its safe to write to 1092 * CHR_1. Instead, just put it in-band with 1093 * all the other Tx data. 1094 */ 1095 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1096 *bp = pi->port.x_char; 1097 pi->port.x_char = 0; 1098 i = 1; 1099 } else if (!uart_circ_empty(xmit) 1100 && !uart_tx_stopped(&pi->port)) { 1101 i = min((u32)MPSC_TXBE_SIZE, 1102 (u32)uart_circ_chars_pending(xmit)); 1103 i = min(i, (u32)CIRC_CNT_TO_END(xmit->head, xmit->tail, 1104 UART_XMIT_SIZE)); 1105 bp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1106 memcpy(bp, &xmit->buf[xmit->tail], i); 1107 xmit->tail = (xmit->tail + i) & (UART_XMIT_SIZE - 1); 1108 1109 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 1110 uart_write_wakeup(&pi->port); 1111 } else { /* All tx data copied into ring bufs */ 1112 return; 1113 } 1114 1115 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1116 DMA_BIDIRECTIONAL); 1117#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1118 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1119 flush_dcache_range((ulong)bp, 1120 (ulong)bp + MPSC_TXBE_SIZE); 1121#endif 1122 mpsc_setup_tx_desc(pi, i, 1); 1123 1124 /* Advance to next descriptor */ 1125 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1126 } 1127} 1128 1129static int mpsc_tx_intr(struct mpsc_port_info *pi) 1130{ 1131 struct mpsc_tx_desc *txre; 1132 int rc = 0; 1133 unsigned long iflags; 1134 1135 spin_lock_irqsave(&pi->tx_lock, iflags); 1136 1137 if (!mpsc_sdma_tx_active(pi)) { 1138 txre = (struct mpsc_tx_desc *)(pi->txr 1139 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1140 1141 dma_cache_sync(pi->port.dev, (void *)txre, MPSC_TXRE_SIZE, 1142 DMA_FROM_DEVICE); 1143#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1144 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1145 invalidate_dcache_range((ulong)txre, 1146 (ulong)txre + MPSC_TXRE_SIZE); 1147#endif 1148 1149 while (!(be32_to_cpu(txre->cmdstat) & SDMA_DESC_CMDSTAT_O)) { 1150 rc = 1; 1151 pi->port.icount.tx += be16_to_cpu(txre->bytecnt); 1152 pi->txr_tail = (pi->txr_tail+1) & (MPSC_TXR_ENTRIES-1); 1153 1154 /* If no more data to tx, fall out of loop */ 1155 if (pi->txr_head == pi->txr_tail) 1156 break; 1157 1158 txre = (struct mpsc_tx_desc *)(pi->txr 1159 + (pi->txr_tail * MPSC_TXRE_SIZE)); 1160 dma_cache_sync(pi->port.dev, (void *)txre, 1161 MPSC_TXRE_SIZE, DMA_FROM_DEVICE); 1162#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1163 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1164 invalidate_dcache_range((ulong)txre, 1165 (ulong)txre + MPSC_TXRE_SIZE); 1166#endif 1167 } 1168 1169 mpsc_copy_tx_data(pi); 1170 mpsc_sdma_start_tx(pi); /* start next desc if ready */ 1171 } 1172 1173 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1174 return rc; 1175} 1176 1177/* 1178 * This is the driver's interrupt handler. To avoid a race, we first clear 1179 * the interrupt, then handle any completed Rx/Tx descriptors. When done 1180 * handling those descriptors, we restart the Rx/Tx engines if they're stopped. 1181 */ 1182static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id) 1183{ 1184 struct mpsc_port_info *pi = dev_id; 1185 ulong iflags; 1186 int rc = IRQ_NONE; 1187 1188 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Received\n",pi->port.line); 1189 1190 spin_lock_irqsave(&pi->port.lock, iflags); 1191 mpsc_sdma_intr_ack(pi); 1192 if (mpsc_rx_intr(pi)) 1193 rc = IRQ_HANDLED; 1194 if (mpsc_tx_intr(pi)) 1195 rc = IRQ_HANDLED; 1196 spin_unlock_irqrestore(&pi->port.lock, iflags); 1197 1198 pr_debug("mpsc_sdma_intr[%d]: SDMA Interrupt Handled\n", pi->port.line); 1199 return rc; 1200} 1201 1202/* 1203 ****************************************************************************** 1204 * 1205 * serial_core.c Interface routines 1206 * 1207 ****************************************************************************** 1208 */ 1209static uint mpsc_tx_empty(struct uart_port *port) 1210{ 1211 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1212 ulong iflags; 1213 uint rc; 1214 1215 spin_lock_irqsave(&pi->port.lock, iflags); 1216 rc = mpsc_sdma_tx_active(pi) ? 0 : TIOCSER_TEMT; 1217 spin_unlock_irqrestore(&pi->port.lock, iflags); 1218 1219 return rc; 1220} 1221 1222static void mpsc_set_mctrl(struct uart_port *port, uint mctrl) 1223{ 1224 /* Have no way to set modem control lines AFAICT */ 1225} 1226 1227static uint mpsc_get_mctrl(struct uart_port *port) 1228{ 1229 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1230 u32 mflags, status; 1231 1232 status = (pi->mirror_regs) ? pi->MPSC_CHR_10_m 1233 : readl(pi->mpsc_base + MPSC_CHR_10); 1234 1235 mflags = 0; 1236 if (status & 0x1) 1237 mflags |= TIOCM_CTS; 1238 if (status & 0x2) 1239 mflags |= TIOCM_CAR; 1240 1241 return mflags | TIOCM_DSR; /* No way to tell if DSR asserted */ 1242} 1243 1244static void mpsc_stop_tx(struct uart_port *port) 1245{ 1246 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1247 1248 pr_debug("mpsc_stop_tx[%d]\n", port->line); 1249 1250 mpsc_freeze(pi); 1251} 1252 1253static void mpsc_start_tx(struct uart_port *port) 1254{ 1255 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1256 unsigned long iflags; 1257 1258 spin_lock_irqsave(&pi->tx_lock, iflags); 1259 1260 mpsc_unfreeze(pi); 1261 mpsc_copy_tx_data(pi); 1262 mpsc_sdma_start_tx(pi); 1263 1264 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1265 1266 pr_debug("mpsc_start_tx[%d]\n", port->line); 1267} 1268 1269static void mpsc_start_rx(struct mpsc_port_info *pi) 1270{ 1271 pr_debug("mpsc_start_rx[%d]: Starting...\n", pi->port.line); 1272 1273 if (pi->rcv_data) { 1274 mpsc_enter_hunt(pi); 1275 mpsc_sdma_cmd(pi, SDMA_SDCM_ERD); 1276 } 1277} 1278 1279static void mpsc_stop_rx(struct uart_port *port) 1280{ 1281 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1282 1283 pr_debug("mpsc_stop_rx[%d]: Stopping...\n", port->line); 1284 1285 if (pi->mirror_regs) { 1286 writel(pi->MPSC_CHR_2_m | MPSC_CHR_2_RA, 1287 pi->mpsc_base + MPSC_CHR_2); 1288 /* Erratum prevents reading CHR_2 so just delay for a while */ 1289 udelay(100); 1290 } else { 1291 writel(readl(pi->mpsc_base + MPSC_CHR_2) | MPSC_CHR_2_RA, 1292 pi->mpsc_base + MPSC_CHR_2); 1293 1294 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_RA) 1295 udelay(10); 1296 } 1297 1298 mpsc_sdma_cmd(pi, SDMA_SDCM_AR); 1299} 1300 1301static void mpsc_enable_ms(struct uart_port *port) 1302{ 1303} 1304 1305static void mpsc_break_ctl(struct uart_port *port, int ctl) 1306{ 1307 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1308 ulong flags; 1309 u32 v; 1310 1311 v = ctl ? 0x00ff0000 : 0; 1312 1313 spin_lock_irqsave(&pi->port.lock, flags); 1314 if (pi->mirror_regs) 1315 pi->MPSC_CHR_1_m = v; 1316 writel(v, pi->mpsc_base + MPSC_CHR_1); 1317 spin_unlock_irqrestore(&pi->port.lock, flags); 1318} 1319 1320static int mpsc_startup(struct uart_port *port) 1321{ 1322 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1323 u32 flag = 0; 1324 int rc; 1325 1326 pr_debug("mpsc_startup[%d]: Starting up MPSC, irq: %d\n", 1327 port->line, pi->port.irq); 1328 1329 if ((rc = mpsc_make_ready(pi)) == 0) { 1330 /* Setup IRQ handler */ 1331 mpsc_sdma_intr_ack(pi); 1332 1333 /* If irq's are shared, need to set flag */ 1334 if (mpsc_ports[0].port.irq == mpsc_ports[1].port.irq) 1335 flag = IRQF_SHARED; 1336 1337 if (request_irq(pi->port.irq, mpsc_sdma_intr, flag, 1338 "mpsc-sdma", pi)) 1339 printk(KERN_ERR "MPSC: Can't get SDMA IRQ %d\n", 1340 pi->port.irq); 1341 1342 mpsc_sdma_intr_unmask(pi, 0xf); 1343 mpsc_sdma_set_rx_ring(pi, (struct mpsc_rx_desc *)(pi->rxr_p 1344 + (pi->rxr_posn * MPSC_RXRE_SIZE))); 1345 } 1346 1347 return rc; 1348} 1349 1350static void mpsc_shutdown(struct uart_port *port) 1351{ 1352 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1353 1354 pr_debug("mpsc_shutdown[%d]: Shutting down MPSC\n", port->line); 1355 1356 mpsc_sdma_stop(pi); 1357 free_irq(pi->port.irq, pi); 1358} 1359 1360static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios, 1361 struct ktermios *old) 1362{ 1363 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1364 u32 baud; 1365 ulong flags; 1366 u32 chr_bits, stop_bits, par; 1367 1368 pi->c_iflag = termios->c_iflag; 1369 pi->c_cflag = termios->c_cflag; 1370 1371 switch (termios->c_cflag & CSIZE) { 1372 case CS5: 1373 chr_bits = MPSC_MPCR_CL_5; 1374 break; 1375 case CS6: 1376 chr_bits = MPSC_MPCR_CL_6; 1377 break; 1378 case CS7: 1379 chr_bits = MPSC_MPCR_CL_7; 1380 break; 1381 case CS8: 1382 default: 1383 chr_bits = MPSC_MPCR_CL_8; 1384 break; 1385 } 1386 1387 if (termios->c_cflag & CSTOPB) 1388 stop_bits = MPSC_MPCR_SBL_2; 1389 else 1390 stop_bits = MPSC_MPCR_SBL_1; 1391 1392 par = MPSC_CHR_2_PAR_EVEN; 1393 if (termios->c_cflag & PARENB) 1394 if (termios->c_cflag & PARODD) 1395 par = MPSC_CHR_2_PAR_ODD; 1396#ifdef CMSPAR 1397 if (termios->c_cflag & CMSPAR) { 1398 if (termios->c_cflag & PARODD) 1399 par = MPSC_CHR_2_PAR_MARK; 1400 else 1401 par = MPSC_CHR_2_PAR_SPACE; 1402 } 1403#endif 1404 1405 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk); 1406 1407 spin_lock_irqsave(&pi->port.lock, flags); 1408 1409 uart_update_timeout(port, termios->c_cflag, baud); 1410 1411 mpsc_set_char_length(pi, chr_bits); 1412 mpsc_set_stop_bit_length(pi, stop_bits); 1413 mpsc_set_parity(pi, par); 1414 mpsc_set_baudrate(pi, baud); 1415 1416 /* Characters/events to read */ 1417 pi->port.read_status_mask = SDMA_DESC_CMDSTAT_OR; 1418 1419 if (termios->c_iflag & INPCK) 1420 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE 1421 | SDMA_DESC_CMDSTAT_FR; 1422 1423 if (termios->c_iflag & (BRKINT | PARMRK)) 1424 pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR; 1425 1426 /* Characters/events to ignore */ 1427 pi->port.ignore_status_mask = 0; 1428 1429 if (termios->c_iflag & IGNPAR) 1430 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_PE 1431 | SDMA_DESC_CMDSTAT_FR; 1432 1433 if (termios->c_iflag & IGNBRK) { 1434 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_BR; 1435 1436 if (termios->c_iflag & IGNPAR) 1437 pi->port.ignore_status_mask |= SDMA_DESC_CMDSTAT_OR; 1438 } 1439 1440 if ((termios->c_cflag & CREAD)) { 1441 if (!pi->rcv_data) { 1442 pi->rcv_data = 1; 1443 mpsc_start_rx(pi); 1444 } 1445 } else if (pi->rcv_data) { 1446 mpsc_stop_rx(port); 1447 pi->rcv_data = 0; 1448 } 1449 1450 spin_unlock_irqrestore(&pi->port.lock, flags); 1451} 1452 1453static const char *mpsc_type(struct uart_port *port) 1454{ 1455 pr_debug("mpsc_type[%d]: port type: %s\n", port->line,MPSC_DRIVER_NAME); 1456 return MPSC_DRIVER_NAME; 1457} 1458 1459static int mpsc_request_port(struct uart_port *port) 1460{ 1461 /* Should make chip/platform specific call */ 1462 return 0; 1463} 1464 1465static void mpsc_release_port(struct uart_port *port) 1466{ 1467 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1468 1469 if (pi->ready) { 1470 mpsc_uninit_rings(pi); 1471 mpsc_free_ring_mem(pi); 1472 pi->ready = 0; 1473 } 1474} 1475 1476static void mpsc_config_port(struct uart_port *port, int flags) 1477{ 1478} 1479 1480static int mpsc_verify_port(struct uart_port *port, struct serial_struct *ser) 1481{ 1482 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1483 int rc = 0; 1484 1485 pr_debug("mpsc_verify_port[%d]: Verifying port data\n", pi->port.line); 1486 1487 if (ser->type != PORT_UNKNOWN && ser->type != PORT_MPSC) 1488 rc = -EINVAL; 1489 else if (pi->port.irq != ser->irq) 1490 rc = -EINVAL; 1491 else if (ser->io_type != SERIAL_IO_MEM) 1492 rc = -EINVAL; 1493 else if (pi->port.uartclk / 16 != ser->baud_base) /* Not sure */ 1494 rc = -EINVAL; 1495 else if ((void *)pi->port.mapbase != ser->iomem_base) 1496 rc = -EINVAL; 1497 else if (pi->port.iobase != ser->port) 1498 rc = -EINVAL; 1499 else if (ser->hub6 != 0) 1500 rc = -EINVAL; 1501 1502 return rc; 1503} 1504#ifdef CONFIG_CONSOLE_POLL 1505/* Serial polling routines for writing and reading from the uart while 1506 * in an interrupt or debug context. 1507 */ 1508 1509static char poll_buf[2048]; 1510static int poll_ptr; 1511static int poll_cnt; 1512static void mpsc_put_poll_char(struct uart_port *port, 1513 unsigned char c); 1514 1515static int mpsc_get_poll_char(struct uart_port *port) 1516{ 1517 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1518 struct mpsc_rx_desc *rxre; 1519 u32 cmdstat, bytes_in, i; 1520 u8 *bp; 1521 1522 if (!serial_polled) 1523 serial_polled = 1; 1524 1525 pr_debug("mpsc_rx_intr[%d]: Handling Rx intr\n", pi->port.line); 1526 1527 if (poll_cnt) { 1528 poll_cnt--; 1529 return poll_buf[poll_ptr++]; 1530 } 1531 poll_ptr = 0; 1532 poll_cnt = 0; 1533 1534 while (poll_cnt == 0) { 1535 rxre = (struct mpsc_rx_desc *)(pi->rxr + 1536 (pi->rxr_posn*MPSC_RXRE_SIZE)); 1537 dma_cache_sync(pi->port.dev, (void *)rxre, 1538 MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1539#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1540 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1541 invalidate_dcache_range((ulong)rxre, 1542 (ulong)rxre + MPSC_RXRE_SIZE); 1543#endif 1544 /* 1545 * Loop through Rx descriptors handling ones that have 1546 * been completed. 1547 */ 1548 while (poll_cnt == 0 && 1549 !((cmdstat = be32_to_cpu(rxre->cmdstat)) & 1550 SDMA_DESC_CMDSTAT_O)){ 1551 bytes_in = be16_to_cpu(rxre->bytecnt); 1552 bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); 1553 dma_cache_sync(pi->port.dev, (void *) bp, 1554 MPSC_RXBE_SIZE, DMA_FROM_DEVICE); 1555#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1556 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1557 invalidate_dcache_range((ulong)bp, 1558 (ulong)bp + MPSC_RXBE_SIZE); 1559#endif 1560 if ((unlikely(cmdstat & (SDMA_DESC_CMDSTAT_BR | 1561 SDMA_DESC_CMDSTAT_FR | SDMA_DESC_CMDSTAT_OR))) && 1562 !(cmdstat & pi->port.ignore_status_mask)) { 1563 poll_buf[poll_cnt] = *bp; 1564 poll_cnt++; 1565 } else { 1566 for (i = 0; i < bytes_in; i++) { 1567 poll_buf[poll_cnt] = *bp++; 1568 poll_cnt++; 1569 } 1570 pi->port.icount.rx += bytes_in; 1571 } 1572 rxre->bytecnt = cpu_to_be16(0); 1573 wmb(); 1574 rxre->cmdstat = cpu_to_be32(SDMA_DESC_CMDSTAT_O | 1575 SDMA_DESC_CMDSTAT_EI | 1576 SDMA_DESC_CMDSTAT_F | 1577 SDMA_DESC_CMDSTAT_L); 1578 wmb(); 1579 dma_cache_sync(pi->port.dev, (void *)rxre, 1580 MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); 1581#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1582 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1583 flush_dcache_range((ulong)rxre, 1584 (ulong)rxre + MPSC_RXRE_SIZE); 1585#endif 1586 1587 /* Advance to next descriptor */ 1588 pi->rxr_posn = (pi->rxr_posn + 1) & 1589 (MPSC_RXR_ENTRIES - 1); 1590 rxre = (struct mpsc_rx_desc *)(pi->rxr + 1591 (pi->rxr_posn * MPSC_RXRE_SIZE)); 1592 dma_cache_sync(pi->port.dev, (void *)rxre, 1593 MPSC_RXRE_SIZE, DMA_FROM_DEVICE); 1594#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1595 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1596 invalidate_dcache_range((ulong)rxre, 1597 (ulong)rxre + MPSC_RXRE_SIZE); 1598#endif 1599 } 1600 1601 /* Restart rx engine, if its stopped */ 1602 if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0) 1603 mpsc_start_rx(pi); 1604 } 1605 if (poll_cnt) { 1606 poll_cnt--; 1607 return poll_buf[poll_ptr++]; 1608 } 1609 1610 return 0; 1611} 1612 1613 1614static void mpsc_put_poll_char(struct uart_port *port, 1615 unsigned char c) 1616{ 1617 struct mpsc_port_info *pi = (struct mpsc_port_info *)port; 1618 u32 data; 1619 1620 data = readl(pi->mpsc_base + MPSC_MPCR); 1621 writeb(c, pi->mpsc_base + MPSC_CHR_1); 1622 mb(); 1623 data = readl(pi->mpsc_base + MPSC_CHR_2); 1624 data |= MPSC_CHR_2_TTCS; 1625 writel(data, pi->mpsc_base + MPSC_CHR_2); 1626 mb(); 1627 1628 while (readl(pi->mpsc_base + MPSC_CHR_2) & MPSC_CHR_2_TTCS); 1629} 1630#endif 1631 1632static struct uart_ops mpsc_pops = { 1633 .tx_empty = mpsc_tx_empty, 1634 .set_mctrl = mpsc_set_mctrl, 1635 .get_mctrl = mpsc_get_mctrl, 1636 .stop_tx = mpsc_stop_tx, 1637 .start_tx = mpsc_start_tx, 1638 .stop_rx = mpsc_stop_rx, 1639 .enable_ms = mpsc_enable_ms, 1640 .break_ctl = mpsc_break_ctl, 1641 .startup = mpsc_startup, 1642 .shutdown = mpsc_shutdown, 1643 .set_termios = mpsc_set_termios, 1644 .type = mpsc_type, 1645 .release_port = mpsc_release_port, 1646 .request_port = mpsc_request_port, 1647 .config_port = mpsc_config_port, 1648 .verify_port = mpsc_verify_port, 1649#ifdef CONFIG_CONSOLE_POLL 1650 .poll_get_char = mpsc_get_poll_char, 1651 .poll_put_char = mpsc_put_poll_char, 1652#endif 1653}; 1654 1655/* 1656 ****************************************************************************** 1657 * 1658 * Console Interface Routines 1659 * 1660 ****************************************************************************** 1661 */ 1662 1663#ifdef CONFIG_SERIAL_MPSC_CONSOLE 1664static void mpsc_console_write(struct console *co, const char *s, uint count) 1665{ 1666 struct mpsc_port_info *pi = &mpsc_ports[co->index]; 1667 u8 *bp, *dp, add_cr = 0; 1668 int i; 1669 unsigned long iflags; 1670 1671 spin_lock_irqsave(&pi->tx_lock, iflags); 1672 1673 while (pi->txr_head != pi->txr_tail) { 1674 while (mpsc_sdma_tx_active(pi)) 1675 udelay(100); 1676 mpsc_sdma_intr_ack(pi); 1677 mpsc_tx_intr(pi); 1678 } 1679 1680 while (mpsc_sdma_tx_active(pi)) 1681 udelay(100); 1682 1683 while (count > 0) { 1684 bp = dp = pi->txb + (pi->txr_head * MPSC_TXBE_SIZE); 1685 1686 for (i = 0; i < MPSC_TXBE_SIZE; i++) { 1687 if (count == 0) 1688 break; 1689 1690 if (add_cr) { 1691 *(dp++) = '\r'; 1692 add_cr = 0; 1693 } else { 1694 *(dp++) = *s; 1695 1696 if (*(s++) == '\n') { /* add '\r' after '\n' */ 1697 add_cr = 1; 1698 count++; 1699 } 1700 } 1701 1702 count--; 1703 } 1704 1705 dma_cache_sync(pi->port.dev, (void *)bp, MPSC_TXBE_SIZE, 1706 DMA_BIDIRECTIONAL); 1707#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) 1708 if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ 1709 flush_dcache_range((ulong)bp, 1710 (ulong)bp + MPSC_TXBE_SIZE); 1711#endif 1712 mpsc_setup_tx_desc(pi, i, 0); 1713 pi->txr_head = (pi->txr_head + 1) & (MPSC_TXR_ENTRIES - 1); 1714 mpsc_sdma_start_tx(pi); 1715 1716 while (mpsc_sdma_tx_active(pi)) 1717 udelay(100); 1718 1719 pi->txr_tail = (pi->txr_tail + 1) & (MPSC_TXR_ENTRIES - 1); 1720 } 1721 1722 spin_unlock_irqrestore(&pi->tx_lock, iflags); 1723} 1724 1725static int __init mpsc_console_setup(struct console *co, char *options) 1726{ 1727 struct mpsc_port_info *pi; 1728 int baud, bits, parity, flow; 1729 1730 pr_debug("mpsc_console_setup[%d]: options: %s\n", co->index, options); 1731 1732 if (co->index >= MPSC_NUM_CTLRS) 1733 co->index = 0; 1734 1735 pi = &mpsc_ports[co->index]; 1736 1737 baud = pi->default_baud; 1738 bits = pi->default_bits; 1739 parity = pi->default_parity; 1740 flow = pi->default_flow; 1741 1742 if (!pi->port.ops) 1743 return -ENODEV; 1744 1745 spin_lock_init(&pi->port.lock); /* Temporary fix--copied from 8250.c */ 1746 1747 if (options) 1748 uart_parse_options(options, &baud, &parity, &bits, &flow); 1749 1750 return uart_set_options(&pi->port, co, baud, parity, bits, flow); 1751} 1752 1753static struct console mpsc_console = { 1754 .name = MPSC_DEV_NAME, 1755 .write = mpsc_console_write, 1756 .device = uart_console_device, 1757 .setup = mpsc_console_setup, 1758 .flags = CON_PRINTBUFFER, 1759 .index = -1, 1760 .data = &mpsc_reg, 1761}; 1762 1763static int __init mpsc_late_console_init(void) 1764{ 1765 pr_debug("mpsc_late_console_init: Enter\n"); 1766 1767 if (!(mpsc_console.flags & CON_ENABLED)) 1768 register_console(&mpsc_console); 1769 return 0; 1770} 1771 1772late_initcall(mpsc_late_console_init); 1773 1774#define MPSC_CONSOLE &mpsc_console 1775#else 1776#define MPSC_CONSOLE NULL 1777#endif 1778/* 1779 ****************************************************************************** 1780 * 1781 * Dummy Platform Driver to extract & map shared register regions 1782 * 1783 ****************************************************************************** 1784 */ 1785static void mpsc_resource_err(char *s) 1786{ 1787 printk(KERN_WARNING "MPSC: Platform device resource error in %s\n", s); 1788} 1789 1790static int mpsc_shared_map_regs(struct platform_device *pd) 1791{ 1792 struct resource *r; 1793 1794 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1795 MPSC_ROUTING_BASE_ORDER)) 1796 && request_mem_region(r->start, 1797 MPSC_ROUTING_REG_BLOCK_SIZE, 1798 "mpsc_routing_regs")) { 1799 mpsc_shared_regs.mpsc_routing_base = ioremap(r->start, 1800 MPSC_ROUTING_REG_BLOCK_SIZE); 1801 mpsc_shared_regs.mpsc_routing_base_p = r->start; 1802 } else { 1803 mpsc_resource_err("MPSC routing base"); 1804 return -ENOMEM; 1805 } 1806 1807 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1808 MPSC_SDMA_INTR_BASE_ORDER)) 1809 && request_mem_region(r->start, 1810 MPSC_SDMA_INTR_REG_BLOCK_SIZE, 1811 "sdma_intr_regs")) { 1812 mpsc_shared_regs.sdma_intr_base = ioremap(r->start, 1813 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1814 mpsc_shared_regs.sdma_intr_base_p = r->start; 1815 } else { 1816 iounmap(mpsc_shared_regs.mpsc_routing_base); 1817 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1818 MPSC_ROUTING_REG_BLOCK_SIZE); 1819 mpsc_resource_err("SDMA intr base"); 1820 return -ENOMEM; 1821 } 1822 1823 return 0; 1824} 1825 1826static void mpsc_shared_unmap_regs(void) 1827{ 1828 if (!mpsc_shared_regs.mpsc_routing_base) { 1829 iounmap(mpsc_shared_regs.mpsc_routing_base); 1830 release_mem_region(mpsc_shared_regs.mpsc_routing_base_p, 1831 MPSC_ROUTING_REG_BLOCK_SIZE); 1832 } 1833 if (!mpsc_shared_regs.sdma_intr_base) { 1834 iounmap(mpsc_shared_regs.sdma_intr_base); 1835 release_mem_region(mpsc_shared_regs.sdma_intr_base_p, 1836 MPSC_SDMA_INTR_REG_BLOCK_SIZE); 1837 } 1838 1839 mpsc_shared_regs.mpsc_routing_base = NULL; 1840 mpsc_shared_regs.sdma_intr_base = NULL; 1841 1842 mpsc_shared_regs.mpsc_routing_base_p = 0; 1843 mpsc_shared_regs.sdma_intr_base_p = 0; 1844} 1845 1846static int mpsc_shared_drv_probe(struct platform_device *dev) 1847{ 1848 struct mpsc_shared_pdata *pdata; 1849 int rc = -ENODEV; 1850 1851 if (dev->id == 0) { 1852 if (!(rc = mpsc_shared_map_regs(dev))) { 1853 pdata = (struct mpsc_shared_pdata *) 1854 dev->dev.platform_data; 1855 1856 mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val; 1857 mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val; 1858 mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val; 1859 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 1860 pdata->intr_cause_val; 1861 mpsc_shared_regs.SDMA_INTR_MASK_m = 1862 pdata->intr_mask_val; 1863 1864 rc = 0; 1865 } 1866 } 1867 1868 return rc; 1869} 1870 1871static int mpsc_shared_drv_remove(struct platform_device *dev) 1872{ 1873 int rc = -ENODEV; 1874 1875 if (dev->id == 0) { 1876 mpsc_shared_unmap_regs(); 1877 mpsc_shared_regs.MPSC_MRR_m = 0; 1878 mpsc_shared_regs.MPSC_RCRR_m = 0; 1879 mpsc_shared_regs.MPSC_TCRR_m = 0; 1880 mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0; 1881 mpsc_shared_regs.SDMA_INTR_MASK_m = 0; 1882 rc = 0; 1883 } 1884 1885 return rc; 1886} 1887 1888static struct platform_driver mpsc_shared_driver = { 1889 .probe = mpsc_shared_drv_probe, 1890 .remove = mpsc_shared_drv_remove, 1891 .driver = { 1892 .name = MPSC_SHARED_NAME, 1893 }, 1894}; 1895 1896/* 1897 ****************************************************************************** 1898 * 1899 * Driver Interface Routines 1900 * 1901 ****************************************************************************** 1902 */ 1903static struct uart_driver mpsc_reg = { 1904 .owner = THIS_MODULE, 1905 .driver_name = MPSC_DRIVER_NAME, 1906 .dev_name = MPSC_DEV_NAME, 1907 .major = MPSC_MAJOR, 1908 .minor = MPSC_MINOR_START, 1909 .nr = MPSC_NUM_CTLRS, 1910 .cons = MPSC_CONSOLE, 1911}; 1912 1913static int mpsc_drv_map_regs(struct mpsc_port_info *pi, 1914 struct platform_device *pd) 1915{ 1916 struct resource *r; 1917 1918 if ((r = platform_get_resource(pd, IORESOURCE_MEM, MPSC_BASE_ORDER)) 1919 && request_mem_region(r->start, MPSC_REG_BLOCK_SIZE, 1920 "mpsc_regs")) { 1921 pi->mpsc_base = ioremap(r->start, MPSC_REG_BLOCK_SIZE); 1922 pi->mpsc_base_p = r->start; 1923 } else { 1924 mpsc_resource_err("MPSC base"); 1925 goto err; 1926 } 1927 1928 if ((r = platform_get_resource(pd, IORESOURCE_MEM, 1929 MPSC_SDMA_BASE_ORDER)) 1930 && request_mem_region(r->start, 1931 MPSC_SDMA_REG_BLOCK_SIZE, "sdma_regs")) { 1932 pi->sdma_base = ioremap(r->start,MPSC_SDMA_REG_BLOCK_SIZE); 1933 pi->sdma_base_p = r->start; 1934 } else { 1935 mpsc_resource_err("SDMA base"); 1936 if (pi->mpsc_base) { 1937 iounmap(pi->mpsc_base); 1938 pi->mpsc_base = NULL; 1939 } 1940 goto err; 1941 } 1942 1943 if ((r = platform_get_resource(pd,IORESOURCE_MEM,MPSC_BRG_BASE_ORDER)) 1944 && request_mem_region(r->start, 1945 MPSC_BRG_REG_BLOCK_SIZE, "brg_regs")) { 1946 pi->brg_base = ioremap(r->start, MPSC_BRG_REG_BLOCK_SIZE); 1947 pi->brg_base_p = r->start; 1948 } else { 1949 mpsc_resource_err("BRG base"); 1950 if (pi->mpsc_base) { 1951 iounmap(pi->mpsc_base); 1952 pi->mpsc_base = NULL; 1953 } 1954 if (pi->sdma_base) { 1955 iounmap(pi->sdma_base); 1956 pi->sdma_base = NULL; 1957 } 1958 goto err; 1959 } 1960 return 0; 1961 1962err: 1963 return -ENOMEM; 1964} 1965 1966static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi) 1967{ 1968 if (!pi->mpsc_base) { 1969 iounmap(pi->mpsc_base); 1970 release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE); 1971 } 1972 if (!pi->sdma_base) { 1973 iounmap(pi->sdma_base); 1974 release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE); 1975 } 1976 if (!pi->brg_base) { 1977 iounmap(pi->brg_base); 1978 release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE); 1979 } 1980 1981 pi->mpsc_base = NULL; 1982 pi->sdma_base = NULL; 1983 pi->brg_base = NULL; 1984 1985 pi->mpsc_base_p = 0; 1986 pi->sdma_base_p = 0; 1987 pi->brg_base_p = 0; 1988} 1989 1990static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi, 1991 struct platform_device *pd, int num) 1992{ 1993 struct mpsc_pdata *pdata; 1994 1995 pdata = (struct mpsc_pdata *)pd->dev.platform_data; 1996 1997 pi->port.uartclk = pdata->brg_clk_freq; 1998 pi->port.iotype = UPIO_MEM; 1999 pi->port.line = num; 2000 pi->port.type = PORT_MPSC; 2001 pi->port.fifosize = MPSC_TXBE_SIZE; 2002 pi->port.membase = pi->mpsc_base; 2003 pi->port.mapbase = (ulong)pi->mpsc_base; 2004 pi->port.ops = &mpsc_pops; 2005 2006 pi->mirror_regs = pdata->mirror_regs; 2007 pi->cache_mgmt = pdata->cache_mgmt; 2008 pi->brg_can_tune = pdata->brg_can_tune; 2009 pi->brg_clk_src = pdata->brg_clk_src; 2010 pi->mpsc_max_idle = pdata->max_idle; 2011 pi->default_baud = pdata->default_baud; 2012 pi->default_bits = pdata->default_bits; 2013 pi->default_parity = pdata->default_parity; 2014 pi->default_flow = pdata->default_flow; 2015 2016 /* Initial values of mirrored regs */ 2017 pi->MPSC_CHR_1_m = pdata->chr_1_val; 2018 pi->MPSC_CHR_2_m = pdata->chr_2_val; 2019 pi->MPSC_CHR_10_m = pdata->chr_10_val; 2020 pi->MPSC_MPCR_m = pdata->mpcr_val; 2021 pi->BRG_BCR_m = pdata->bcr_val; 2022 2023 pi->shared_regs = &mpsc_shared_regs; 2024 2025 pi->port.irq = platform_get_irq(pd, 0); 2026} 2027 2028static int mpsc_drv_probe(struct platform_device *dev) 2029{ 2030 struct mpsc_port_info *pi; 2031 int rc = -ENODEV; 2032 2033 pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id); 2034 2035 if (dev->id < MPSC_NUM_CTLRS) { 2036 pi = &mpsc_ports[dev->id]; 2037 2038 if (!(rc = mpsc_drv_map_regs(pi, dev))) { 2039 mpsc_drv_get_platform_data(pi, dev, dev->id); 2040 pi->port.dev = &dev->dev; 2041 2042 if (!(rc = mpsc_make_ready(pi))) { 2043 spin_lock_init(&pi->tx_lock); 2044 if (!(rc = uart_add_one_port(&mpsc_reg, 2045 &pi->port))) { 2046 rc = 0; 2047 } else { 2048 mpsc_release_port((struct uart_port *) 2049 pi); 2050 mpsc_drv_unmap_regs(pi); 2051 } 2052 } else { 2053 mpsc_drv_unmap_regs(pi); 2054 } 2055 } 2056 } 2057 2058 return rc; 2059} 2060 2061static int mpsc_drv_remove(struct platform_device *dev) 2062{ 2063 pr_debug("mpsc_drv_exit: Removing MPSC %d\n", dev->id); 2064 2065 if (dev->id < MPSC_NUM_CTLRS) { 2066 uart_remove_one_port(&mpsc_reg, &mpsc_ports[dev->id].port); 2067 mpsc_release_port((struct uart_port *) 2068 &mpsc_ports[dev->id].port); 2069 mpsc_drv_unmap_regs(&mpsc_ports[dev->id]); 2070 return 0; 2071 } else { 2072 return -ENODEV; 2073 } 2074} 2075 2076static struct platform_driver mpsc_driver = { 2077 .probe = mpsc_drv_probe, 2078 .remove = mpsc_drv_remove, 2079 .driver = { 2080 .name = MPSC_CTLR_NAME, 2081 .owner = THIS_MODULE, 2082 }, 2083}; 2084 2085static int __init mpsc_drv_init(void) 2086{ 2087 int rc; 2088 2089 printk(KERN_INFO "Serial: MPSC driver\n"); 2090 2091 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 2092 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 2093 2094 if (!(rc = uart_register_driver(&mpsc_reg))) { 2095 if (!(rc = platform_driver_register(&mpsc_shared_driver))) { 2096 if ((rc = platform_driver_register(&mpsc_driver))) { 2097 platform_driver_unregister(&mpsc_shared_driver); 2098 uart_unregister_driver(&mpsc_reg); 2099 } 2100 } else { 2101 uart_unregister_driver(&mpsc_reg); 2102 } 2103 } 2104 2105 return rc; 2106} 2107 2108static void __exit mpsc_drv_exit(void) 2109{ 2110 platform_driver_unregister(&mpsc_driver); 2111 platform_driver_unregister(&mpsc_shared_driver); 2112 uart_unregister_driver(&mpsc_reg); 2113 memset(mpsc_ports, 0, sizeof(mpsc_ports)); 2114 memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs)); 2115} 2116 2117module_init(mpsc_drv_init); 2118module_exit(mpsc_drv_exit); 2119 2120MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>"); 2121MODULE_DESCRIPTION("Generic Marvell MPSC serial/UART driver"); 2122MODULE_VERSION(MPSC_VERSION); 2123MODULE_LICENSE("GPL"); 2124MODULE_ALIAS_CHARDEV_MAJOR(MPSC_MAJOR); 2125MODULE_ALIAS("platform:" MPSC_CTLR_NAME); 2126