1/* 2 A FORE Systems 200E-series driver for ATM on Linux. 3 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 4 5 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). 6 7 This driver simultaneously supports PCA-200E and SBA-200E adapters 8 on i386, alpha (untested), powerpc, sparc and sparc64 architectures. 9 10 This program is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2 of the License, or 13 (at your option) any later version. 14 15 This program is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with this program; if not, write to the Free Software 22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23*/ 24 25 26#include <linux/kernel.h> 27#include <linux/slab.h> 28#include <linux/init.h> 29#include <linux/capability.h> 30#include <linux/interrupt.h> 31#include <linux/bitops.h> 32#include <linux/pci.h> 33#include <linux/module.h> 34#include <linux/atmdev.h> 35#include <linux/sonet.h> 36#include <linux/atm_suni.h> 37#include <linux/dma-mapping.h> 38#include <linux/delay.h> 39#include <asm/io.h> 40#include <asm/string.h> 41#include <asm/page.h> 42#include <asm/irq.h> 43#include <asm/dma.h> 44#include <asm/byteorder.h> 45#include <asm/uaccess.h> 46#include <asm/atomic.h> 47 48#ifdef CONFIG_ATM_FORE200E_SBA 49#include <asm/idprom.h> 50#include <asm/sbus.h> 51#include <asm/openprom.h> 52#include <asm/oplib.h> 53#include <asm/pgtable.h> 54#endif 55 56#if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet \ 57 */ 58#define FORE200E_USE_TASKLET 59#endif 60 61 62#define FORE200E_52BYTE_AAL0_SDU 63 64#include "fore200e.h" 65#include "suni.h" 66 67#define FORE200E_VERSION "0.3e" 68 69#define FORE200E "fore200e: " 70 71#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 72#define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ 73 printk(FORE200E format, ##args); } while (0) 74#else 75#define DPRINTK(level, format, args...) do {} while (0) 76#endif 77 78 79#define FORE200E_ALIGN(addr, alignment) \ 80 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) 81 82#define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) 83 84#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 85 86#define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo)) 87 88#define ASSERT(expr) if (!(expr)) { \ 89 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 90 __FUNCTION__, __LINE__, #expr); \ 91 panic(FORE200E "%s", __FUNCTION__); \ 92 } 93 94 95static const struct atmdev_ops fore200e_ops; 96static const struct fore200e_bus fore200e_bus[]; 97 98static LIST_HEAD(fore200e_boards); 99 100 101MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 102MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 103MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E"); 104 105 106static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 107 { BUFFER_S1_NBR, BUFFER_L1_NBR }, 108 { BUFFER_S2_NBR, BUFFER_L2_NBR } 109}; 110 111static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 112 { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, 113 { BUFFER_S2_SIZE, BUFFER_L2_SIZE } 114}; 115 116 117#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 118static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; 119#endif 120 121 122 123 124static enum fore200e_aal 125fore200e_atm2fore_aal(int aal) 126{ 127 switch(aal) { 128 case ATM_AAL0: return FORE200E_AAL0; 129 case ATM_AAL34: return FORE200E_AAL34; 130 case ATM_AAL1: 131 case ATM_AAL2: 132 case ATM_AAL5: return FORE200E_AAL5; 133 } 134 135 return -EINVAL; 136} 137 138 139static char* 140fore200e_irq_itoa(int irq) 141{ 142 static char str[8]; 143 sprintf(str, "%d", irq); 144 return str; 145} 146 147 148/* allocate and align a chunk of memory intended to hold the data behing exchanged 149 between the driver and the adapter (using streaming DVMA) */ 150 151static int 152fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) 153{ 154 unsigned long offset = 0; 155 156 if (alignment <= sizeof(int)) 157 alignment = 0; 158 159 chunk->alloc_size = size + alignment; 160 chunk->align_size = size; 161 chunk->direction = direction; 162 163 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA); 164 if (chunk->alloc_addr == NULL) 165 return -ENOMEM; 166 167 if (alignment > 0) 168 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 169 170 chunk->align_addr = chunk->alloc_addr + offset; 171 172 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction); 173 174 return 0; 175} 176 177 178/* free a chunk of memory */ 179 180static void 181fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 182{ 183 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction); 184 185 kfree(chunk->alloc_addr); 186} 187 188 189static void 190fore200e_spin(int msecs) 191{ 192 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 193 while (time_before(jiffies, timeout)); 194} 195 196 197static int 198fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) 199{ 200 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 201 int ok; 202 203 mb(); 204 do { 205 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) 206 break; 207 208 } while (time_before(jiffies, timeout)); 209 210 if (!ok) { 211 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", 212 *addr, val); 213 } 214 215 return ok; 216} 217 218 219static int 220fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) 221{ 222 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 223 int ok; 224 225 do { 226 if ((ok = (fore200e->bus->read(addr) == val))) 227 break; 228 229 } while (time_before(jiffies, timeout)); 230 231 if (!ok) { 232 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", 233 fore200e->bus->read(addr), val); 234 } 235 236 return ok; 237} 238 239 240static void 241fore200e_free_rx_buf(struct fore200e* fore200e) 242{ 243 int scheme, magn, nbr; 244 struct buffer* buffer; 245 246 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 247 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 248 249 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { 250 251 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { 252 253 struct chunk* data = &buffer[ nbr ].data; 254 255 if (data->alloc_addr != NULL) 256 fore200e_chunk_free(fore200e, data); 257 } 258 } 259 } 260 } 261} 262 263 264static void 265fore200e_uninit_bs_queue(struct fore200e* fore200e) 266{ 267 int scheme, magn; 268 269 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 270 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 271 272 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; 273 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 274 275 if (status->alloc_addr) 276 fore200e->bus->dma_chunk_free(fore200e, status); 277 278 if (rbd_block->alloc_addr) 279 fore200e->bus->dma_chunk_free(fore200e, rbd_block); 280 } 281 } 282} 283 284 285static int 286fore200e_reset(struct fore200e* fore200e, int diag) 287{ 288 int ok; 289 290 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; 291 292 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); 293 294 fore200e->bus->reset(fore200e); 295 296 if (diag) { 297 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); 298 if (ok == 0) { 299 300 printk(FORE200E "device %s self-test failed\n", fore200e->name); 301 return -ENODEV; 302 } 303 304 printk(FORE200E "device %s self-test passed\n", fore200e->name); 305 306 fore200e->state = FORE200E_STATE_RESET; 307 } 308 309 return 0; 310} 311 312 313static void 314fore200e_shutdown(struct fore200e* fore200e) 315{ 316 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", 317 fore200e->name, fore200e->phys_base, 318 fore200e_irq_itoa(fore200e->irq)); 319 320 if (fore200e->state > FORE200E_STATE_RESET) { 321 /* first, reset the board to prevent further interrupts or data transfers */ 322 fore200e_reset(fore200e, 0); 323 } 324 325 /* then, release all allocated resources */ 326 switch(fore200e->state) { 327 328 case FORE200E_STATE_COMPLETE: 329 kfree(fore200e->stats); 330 331 case FORE200E_STATE_IRQ: 332 free_irq(fore200e->irq, fore200e->atm_dev); 333 334 case FORE200E_STATE_ALLOC_BUF: 335 fore200e_free_rx_buf(fore200e); 336 337 case FORE200E_STATE_INIT_BSQ: 338 fore200e_uninit_bs_queue(fore200e); 339 340 case FORE200E_STATE_INIT_RXQ: 341 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status); 342 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 343 344 case FORE200E_STATE_INIT_TXQ: 345 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status); 346 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 347 348 case FORE200E_STATE_INIT_CMDQ: 349 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 350 351 case FORE200E_STATE_INITIALIZE: 352 /* nothing to do for that state */ 353 354 case FORE200E_STATE_START_FW: 355 /* nothing to do for that state */ 356 357 case FORE200E_STATE_LOAD_FW: 358 /* nothing to do for that state */ 359 360 case FORE200E_STATE_RESET: 361 /* nothing to do for that state */ 362 363 case FORE200E_STATE_MAP: 364 fore200e->bus->unmap(fore200e); 365 366 case FORE200E_STATE_CONFIGURE: 367 /* nothing to do for that state */ 368 369 case FORE200E_STATE_REGISTER: 370 atm_dev_deregister(fore200e->atm_dev); 371 372 case FORE200E_STATE_BLANK: 373 /* nothing to do for that state */ 374 break; 375 } 376} 377 378 379#ifdef CONFIG_ATM_FORE200E_PCA 380 381static u32 fore200e_pca_read(volatile u32 __iomem *addr) 382{ 383 /* on big-endian hosts, the board is configured to convert 384 the endianess of slave RAM accesses */ 385 return le32_to_cpu(readl(addr)); 386} 387 388 389static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) 390{ 391 /* on big-endian hosts, the board is configured to convert 392 the endianess of slave RAM accesses */ 393 writel(cpu_to_le32(val), addr); 394} 395 396 397static u32 398fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 399{ 400 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction); 401 402 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n", 403 virt_addr, size, direction, dma_addr); 404 405 return dma_addr; 406} 407 408 409static void 410fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 411{ 412 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n", 413 dma_addr, size, direction); 414 415 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 416} 417 418 419static void 420fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 421{ 422 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 423 424 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 425} 426 427static void 428fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 429{ 430 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 431 432 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction); 433} 434 435 436/* allocate a DMA consistent chunk of memory intended to act as a communication mechanism 437 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 438 439static int 440fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 441 int size, int nbr, int alignment) 442{ 443 /* returned chunks are page-aligned */ 444 chunk->alloc_size = size * nbr; 445 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev, 446 chunk->alloc_size, 447 &chunk->dma_addr); 448 449 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 450 return -ENOMEM; 451 452 chunk->align_addr = chunk->alloc_addr; 453 454 return 0; 455} 456 457 458/* free a DMA consistent chunk of memory */ 459 460static void 461fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 462{ 463 pci_free_consistent((struct pci_dev*)fore200e->bus_dev, 464 chunk->alloc_size, 465 chunk->alloc_addr, 466 chunk->dma_addr); 467} 468 469 470static int 471fore200e_pca_irq_check(struct fore200e* fore200e) 472{ 473 /* this is a 1 bit register */ 474 int irq_posted = readl(fore200e->regs.pca.psr); 475 476#if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) 477 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { 478 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); 479 } 480#endif 481 482 return irq_posted; 483} 484 485 486static void 487fore200e_pca_irq_ack(struct fore200e* fore200e) 488{ 489 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); 490} 491 492 493static void 494fore200e_pca_reset(struct fore200e* fore200e) 495{ 496 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); 497 fore200e_spin(10); 498 writel(0, fore200e->regs.pca.hcr); 499} 500 501 502static int __devinit 503fore200e_pca_map(struct fore200e* fore200e) 504{ 505 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); 506 507 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); 508 509 if (fore200e->virt_base == NULL) { 510 printk(FORE200E "can't map device %s\n", fore200e->name); 511 return -EFAULT; 512 } 513 514 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 515 516 /* gain access to the PCA specific registers */ 517 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; 518 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; 519 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; 520 521 fore200e->state = FORE200E_STATE_MAP; 522 return 0; 523} 524 525 526static void 527fore200e_pca_unmap(struct fore200e* fore200e) 528{ 529 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); 530 531 if (fore200e->virt_base != NULL) 532 iounmap(fore200e->virt_base); 533} 534 535 536static int __devinit 537fore200e_pca_configure(struct fore200e* fore200e) 538{ 539 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 540 u8 master_ctrl, latency; 541 542 DPRINTK(2, "device %s being configured\n", fore200e->name); 543 544 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { 545 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); 546 return -EIO; 547 } 548 549 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); 550 551 master_ctrl = master_ctrl 552#if defined(__BIG_ENDIAN) 553 /* request the PCA board to convert the endianess of slave RAM accesses */ 554 | PCA200E_CTRL_CONVERT_ENDIAN 555#endif 556 | PCA200E_CTRL_LARGE_PCI_BURSTS; 557 558 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); 559 560 /* raise latency from 32 (default) to 192, as this seems to prevent NIC 561 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. 562 this may impact the performances of other PCI devices on the same bus, though */ 563 latency = 192; 564 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); 565 566 fore200e->state = FORE200E_STATE_CONFIGURE; 567 return 0; 568} 569 570 571static int __init 572fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) 573{ 574 struct host_cmdq* cmdq = &fore200e->host_cmdq; 575 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 576 struct prom_opcode opcode; 577 int ok; 578 u32 prom_dma; 579 580 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 581 582 opcode.opcode = OPCODE_GET_PROM; 583 opcode.pad = 0; 584 585 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE); 586 587 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); 588 589 *entry->status = STATUS_PENDING; 590 591 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); 592 593 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 594 595 *entry->status = STATUS_FREE; 596 597 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); 598 599 if (ok == 0) { 600 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); 601 return -EIO; 602 } 603 604#if defined(__BIG_ENDIAN) 605 606#define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) 607 608 /* MAC address is stored as little-endian */ 609 swap_here(&prom->mac_addr[0]); 610 swap_here(&prom->mac_addr[4]); 611#endif 612 613 return 0; 614} 615 616 617static int 618fore200e_pca_proc_read(struct fore200e* fore200e, char *page) 619{ 620 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev; 621 622 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", 623 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); 624} 625 626#endif /* CONFIG_ATM_FORE200E_PCA */ 627 628 629#ifdef CONFIG_ATM_FORE200E_SBA 630 631static u32 632fore200e_sba_read(volatile u32 __iomem *addr) 633{ 634 return sbus_readl(addr); 635} 636 637 638static void 639fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 640{ 641 sbus_writel(val, addr); 642} 643 644 645static u32 646fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction) 647{ 648 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction); 649 650 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n", 651 virt_addr, size, direction, dma_addr); 652 653 return dma_addr; 654} 655 656 657static void 658fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 659{ 660 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n", 661 dma_addr, size, direction); 662 663 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 664} 665 666 667static void 668fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 669{ 670 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 671 672 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 673} 674 675static void 676fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction) 677{ 678 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction); 679 680 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction); 681} 682 683 684/* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism 685 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */ 686 687static int 688fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, 689 int size, int nbr, int alignment) 690{ 691 chunk->alloc_size = chunk->align_size = size * nbr; 692 693 /* returned chunks are page-aligned */ 694 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev, 695 chunk->alloc_size, 696 &chunk->dma_addr); 697 698 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0)) 699 return -ENOMEM; 700 701 chunk->align_addr = chunk->alloc_addr; 702 703 return 0; 704} 705 706 707/* free a DVMA consistent chunk of memory */ 708 709static void 710fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 711{ 712 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev, 713 chunk->alloc_size, 714 chunk->alloc_addr, 715 chunk->dma_addr); 716} 717 718 719static void 720fore200e_sba_irq_enable(struct fore200e* fore200e) 721{ 722 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 723 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); 724} 725 726 727static int 728fore200e_sba_irq_check(struct fore200e* fore200e) 729{ 730 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; 731} 732 733 734static void 735fore200e_sba_irq_ack(struct fore200e* fore200e) 736{ 737 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 738 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); 739} 740 741 742static void 743fore200e_sba_reset(struct fore200e* fore200e) 744{ 745 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); 746 fore200e_spin(10); 747 fore200e->bus->write(0, fore200e->regs.sba.hcr); 748} 749 750 751static int __init 752fore200e_sba_map(struct fore200e* fore200e) 753{ 754 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 755 unsigned int bursts; 756 757 /* gain access to the SBA specific registers */ 758 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); 759 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); 760 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); 761 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); 762 763 if (fore200e->virt_base == NULL) { 764 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); 765 return -EFAULT; 766 } 767 768 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 769 770 fore200e->bus->write(0x02, fore200e->regs.sba.isr); 771 772 /* get the supported DVMA burst sizes */ 773 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00); 774 775 if (sbus_can_dma_64bit(sbus_dev)) 776 sbus_set_sbus64(sbus_dev, bursts); 777 778 fore200e->state = FORE200E_STATE_MAP; 779 return 0; 780} 781 782 783static void 784fore200e_sba_unmap(struct fore200e* fore200e) 785{ 786 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); 787 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); 788 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); 789 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH); 790} 791 792 793static int __init 794fore200e_sba_configure(struct fore200e* fore200e) 795{ 796 fore200e->state = FORE200E_STATE_CONFIGURE; 797 return 0; 798} 799 800 801static struct fore200e* __init 802fore200e_sba_detect(const struct fore200e_bus* bus, int index) 803{ 804 struct fore200e* fore200e; 805 struct sbus_bus* sbus_bus; 806 struct sbus_dev* sbus_dev = NULL; 807 808 unsigned int count = 0; 809 810 for_each_sbus (sbus_bus) { 811 for_each_sbusdev (sbus_dev, sbus_bus) { 812 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) { 813 if (count >= index) 814 goto found; 815 count++; 816 } 817 } 818 } 819 return NULL; 820 821 found: 822 if (sbus_dev->num_registers != 4) { 823 printk(FORE200E "this %s device has %d instead of 4 registers\n", 824 bus->model_name, sbus_dev->num_registers); 825 return NULL; 826 } 827 828 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 829 if (fore200e == NULL) 830 return NULL; 831 832 fore200e->bus = bus; 833 fore200e->bus_dev = sbus_dev; 834 fore200e->irq = sbus_dev->irqs[ 0 ]; 835 836 fore200e->phys_base = (unsigned long)sbus_dev; 837 838 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 839 840 return fore200e; 841} 842 843 844static int __init 845fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom) 846{ 847 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev; 848 int len; 849 850 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4); 851 if (len < 0) 852 return -EBUSY; 853 854 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4); 855 if (len < 0) 856 return -EBUSY; 857 858 prom_getproperty(sbus_dev->prom_node, "serialnumber", 859 (char*)&prom->serial_number, sizeof(prom->serial_number)); 860 861 prom_getproperty(sbus_dev->prom_node, "promversion", 862 (char*)&prom->hw_revision, sizeof(prom->hw_revision)); 863 864 return 0; 865} 866 867 868static int 869fore200e_sba_proc_read(struct fore200e* fore200e, char *page) 870{ 871 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev; 872 873 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name); 874} 875#endif /* CONFIG_ATM_FORE200E_SBA */ 876 877 878static void 879fore200e_tx_irq(struct fore200e* fore200e) 880{ 881 struct host_txq* txq = &fore200e->host_txq; 882 struct host_txq_entry* entry; 883 struct atm_vcc* vcc; 884 struct fore200e_vc_map* vc_map; 885 886 if (fore200e->host_txq.txing == 0) 887 return; 888 889 for (;;) { 890 891 entry = &txq->host_entry[ txq->tail ]; 892 893 if ((*entry->status & STATUS_COMPLETE) == 0) { 894 break; 895 } 896 897 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 898 entry, txq->tail, entry->vc_map, entry->skb); 899 900 /* free copy of misaligned data */ 901 kfree(entry->data); 902 903 /* remove DMA mapping */ 904 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, 905 DMA_TO_DEVICE); 906 907 vc_map = entry->vc_map; 908 909 /* vcc closed since the time the entry was submitted for tx? */ 910 if ((vc_map->vcc == NULL) || 911 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 912 913 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", 914 fore200e->atm_dev->number); 915 916 dev_kfree_skb_any(entry->skb); 917 } 918 else { 919 ASSERT(vc_map->vcc); 920 921 /* vcc closed then immediately re-opened? */ 922 if (vc_map->incarn != entry->incarn) { 923 924 /* when a vcc is closed, some PDUs may be still pending in the tx queue. 925 if the same vcc is immediately re-opened, those pending PDUs must 926 not be popped after the completion of their emission, as they refer 927 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc 928 would be decremented by the size of the (unrelated) skb, possibly 929 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. 930 we thus bind the tx entry to the current incarnation of the vcc 931 when the entry is submitted for tx. When the tx later completes, 932 if the incarnation number of the tx entry does not match the one 933 of the vcc, then this implies that the vcc has been closed then re-opened. 934 we thus just drop the skb here. */ 935 936 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", 937 fore200e->atm_dev->number); 938 939 dev_kfree_skb_any(entry->skb); 940 } 941 else { 942 vcc = vc_map->vcc; 943 ASSERT(vcc); 944 945 /* notify tx completion */ 946 if (vcc->pop) { 947 vcc->pop(vcc, entry->skb); 948 } 949 else { 950 dev_kfree_skb_any(entry->skb); 951 } 952 /* race fixed by the above incarnation mechanism, but... */ 953 if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) { 954 atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0); 955 } 956 /* check error condition */ 957 if (*entry->status & STATUS_ERROR) 958 atomic_inc(&vcc->stats->tx_err); 959 else 960 atomic_inc(&vcc->stats->tx); 961 } 962 } 963 964 *entry->status = STATUS_FREE; 965 966 fore200e->host_txq.txing--; 967 968 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); 969 } 970} 971 972 973#ifdef FORE200E_BSQ_DEBUG 974int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) 975{ 976 struct buffer* buffer; 977 int count = 0; 978 979 buffer = bsq->freebuf; 980 while (buffer) { 981 982 if (buffer->supplied) { 983 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", 984 where, scheme, magn, buffer->index); 985 } 986 987 if (buffer->magn != magn) { 988 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", 989 where, scheme, magn, buffer->index, buffer->magn); 990 } 991 992 if (buffer->scheme != scheme) { 993 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", 994 where, scheme, magn, buffer->index, buffer->scheme); 995 } 996 997 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { 998 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", 999 where, scheme, magn, buffer->index); 1000 } 1001 1002 count++; 1003 buffer = buffer->next; 1004 } 1005 1006 if (count != bsq->freebuf_count) { 1007 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", 1008 where, scheme, magn, count, bsq->freebuf_count); 1009 } 1010 return 0; 1011} 1012#endif 1013 1014 1015static void 1016fore200e_supply(struct fore200e* fore200e) 1017{ 1018 int scheme, magn, i; 1019 1020 struct host_bsq* bsq; 1021 struct host_bsq_entry* entry; 1022 struct buffer* buffer; 1023 1024 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 1025 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 1026 1027 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 1028 1029#ifdef FORE200E_BSQ_DEBUG 1030 bsq_audit(1, bsq, scheme, magn); 1031#endif 1032 while (bsq->freebuf_count >= RBD_BLK_SIZE) { 1033 1034 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", 1035 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); 1036 1037 entry = &bsq->host_entry[ bsq->head ]; 1038 1039 for (i = 0; i < RBD_BLK_SIZE; i++) { 1040 1041 /* take the first buffer in the free buffer list */ 1042 buffer = bsq->freebuf; 1043 if (!buffer) { 1044 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", 1045 scheme, magn, bsq->freebuf_count); 1046 return; 1047 } 1048 bsq->freebuf = buffer->next; 1049 1050#ifdef FORE200E_BSQ_DEBUG 1051 if (buffer->supplied) 1052 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", 1053 scheme, magn, buffer->index); 1054 buffer->supplied = 1; 1055#endif 1056 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; 1057 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); 1058 } 1059 1060 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); 1061 1062 /* decrease accordingly the number of free rx buffers */ 1063 bsq->freebuf_count -= RBD_BLK_SIZE; 1064 1065 *entry->status = STATUS_PENDING; 1066 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); 1067 } 1068 } 1069 } 1070} 1071 1072 1073static int 1074fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) 1075{ 1076 struct sk_buff* skb; 1077 struct buffer* buffer; 1078 struct fore200e_vcc* fore200e_vcc; 1079 int i, pdu_len = 0; 1080#ifdef FORE200E_52BYTE_AAL0_SDU 1081 u32 cell_header = 0; 1082#endif 1083 1084 ASSERT(vcc); 1085 1086 fore200e_vcc = FORE200E_VCC(vcc); 1087 ASSERT(fore200e_vcc); 1088 1089#ifdef FORE200E_52BYTE_AAL0_SDU 1090 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { 1091 1092 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | 1093 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | 1094 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | 1095 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 1096 rpd->atm_header.clp; 1097 pdu_len = 4; 1098 } 1099#endif 1100 1101 /* compute total PDU length */ 1102 for (i = 0; i < rpd->nseg; i++) 1103 pdu_len += rpd->rsd[ i ].length; 1104 1105 skb = alloc_skb(pdu_len, GFP_ATOMIC); 1106 if (skb == NULL) { 1107 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); 1108 1109 atomic_inc(&vcc->stats->rx_drop); 1110 return -ENOMEM; 1111 } 1112 1113 __net_timestamp(skb); 1114 1115#ifdef FORE200E_52BYTE_AAL0_SDU 1116 if (cell_header) { 1117 *((u32*)skb_put(skb, 4)) = cell_header; 1118 } 1119#endif 1120 1121 /* reassemble segments */ 1122 for (i = 0; i < rpd->nseg; i++) { 1123 1124 /* rebuild rx buffer address from rsd handle */ 1125 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1126 1127 /* Make device DMA transfer visible to CPU. */ 1128 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1129 1130 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length); 1131 1132 /* Now let the device get at it again. */ 1133 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE); 1134 } 1135 1136 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); 1137 1138 if (pdu_len < fore200e_vcc->rx_min_pdu) 1139 fore200e_vcc->rx_min_pdu = pdu_len; 1140 if (pdu_len > fore200e_vcc->rx_max_pdu) 1141 fore200e_vcc->rx_max_pdu = pdu_len; 1142 fore200e_vcc->rx_pdu++; 1143 1144 /* push PDU */ 1145 if (atm_charge(vcc, skb->truesize) == 0) { 1146 1147 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", 1148 vcc->itf, vcc->vpi, vcc->vci); 1149 1150 dev_kfree_skb_any(skb); 1151 1152 atomic_inc(&vcc->stats->rx_drop); 1153 return -ENOMEM; 1154 } 1155 1156 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1157 1158 vcc->push(vcc, skb); 1159 atomic_inc(&vcc->stats->rx); 1160 1161 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1162 1163 return 0; 1164} 1165 1166 1167static void 1168fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) 1169{ 1170 struct host_bsq* bsq; 1171 struct buffer* buffer; 1172 int i; 1173 1174 for (i = 0; i < rpd->nseg; i++) { 1175 1176 /* rebuild rx buffer address from rsd handle */ 1177 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1178 1179 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; 1180 1181#ifdef FORE200E_BSQ_DEBUG 1182 bsq_audit(2, bsq, buffer->scheme, buffer->magn); 1183 1184 if (buffer->supplied == 0) 1185 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", 1186 buffer->scheme, buffer->magn, buffer->index); 1187 buffer->supplied = 0; 1188#endif 1189 1190 /* re-insert the buffer into the free buffer list */ 1191 buffer->next = bsq->freebuf; 1192 bsq->freebuf = buffer; 1193 1194 /* then increment the number of free rx buffers */ 1195 bsq->freebuf_count++; 1196 } 1197} 1198 1199 1200static void 1201fore200e_rx_irq(struct fore200e* fore200e) 1202{ 1203 struct host_rxq* rxq = &fore200e->host_rxq; 1204 struct host_rxq_entry* entry; 1205 struct atm_vcc* vcc; 1206 struct fore200e_vc_map* vc_map; 1207 1208 for (;;) { 1209 1210 entry = &rxq->host_entry[ rxq->head ]; 1211 1212 /* no more received PDUs */ 1213 if ((*entry->status & STATUS_COMPLETE) == 0) 1214 break; 1215 1216 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1217 1218 if ((vc_map->vcc == NULL) || 1219 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 1220 1221 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", 1222 fore200e->atm_dev->number, 1223 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1224 } 1225 else { 1226 vcc = vc_map->vcc; 1227 ASSERT(vcc); 1228 1229 if ((*entry->status & STATUS_ERROR) == 0) { 1230 1231 fore200e_push_rpd(fore200e, vcc, entry->rpd); 1232 } 1233 else { 1234 DPRINTK(2, "damaged PDU on %d.%d.%d\n", 1235 fore200e->atm_dev->number, 1236 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1237 atomic_inc(&vcc->stats->rx_err); 1238 } 1239 } 1240 1241 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); 1242 1243 fore200e_collect_rpd(fore200e, entry->rpd); 1244 1245 /* rewrite the rpd address to ack the received PDU */ 1246 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); 1247 *entry->status = STATUS_FREE; 1248 1249 fore200e_supply(fore200e); 1250 } 1251} 1252 1253 1254#ifndef FORE200E_USE_TASKLET 1255static void 1256fore200e_irq(struct fore200e* fore200e) 1257{ 1258 unsigned long flags; 1259 1260 spin_lock_irqsave(&fore200e->q_lock, flags); 1261 fore200e_rx_irq(fore200e); 1262 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1263 1264 spin_lock_irqsave(&fore200e->q_lock, flags); 1265 fore200e_tx_irq(fore200e); 1266 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1267} 1268#endif 1269 1270 1271static irqreturn_t 1272fore200e_interrupt(int irq, void* dev) 1273{ 1274 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); 1275 1276 if (fore200e->bus->irq_check(fore200e) == 0) { 1277 1278 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); 1279 return IRQ_NONE; 1280 } 1281 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); 1282 1283#ifdef FORE200E_USE_TASKLET 1284 tasklet_schedule(&fore200e->tx_tasklet); 1285 tasklet_schedule(&fore200e->rx_tasklet); 1286#else 1287 fore200e_irq(fore200e); 1288#endif 1289 1290 fore200e->bus->irq_ack(fore200e); 1291 return IRQ_HANDLED; 1292} 1293 1294 1295#ifdef FORE200E_USE_TASKLET 1296static void 1297fore200e_tx_tasklet(unsigned long data) 1298{ 1299 struct fore200e* fore200e = (struct fore200e*) data; 1300 unsigned long flags; 1301 1302 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1303 1304 spin_lock_irqsave(&fore200e->q_lock, flags); 1305 fore200e_tx_irq(fore200e); 1306 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1307} 1308 1309 1310static void 1311fore200e_rx_tasklet(unsigned long data) 1312{ 1313 struct fore200e* fore200e = (struct fore200e*) data; 1314 unsigned long flags; 1315 1316 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1317 1318 spin_lock_irqsave(&fore200e->q_lock, flags); 1319 fore200e_rx_irq((struct fore200e*) data); 1320 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1321} 1322#endif 1323 1324 1325static int 1326fore200e_select_scheme(struct atm_vcc* vcc) 1327{ 1328 /* fairly balance the VCs over (identical) buffer schemes */ 1329 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; 1330 1331 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", 1332 vcc->itf, vcc->vpi, vcc->vci, scheme); 1333 1334 return scheme; 1335} 1336 1337 1338static int 1339fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) 1340{ 1341 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1342 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1343 struct activate_opcode activ_opcode; 1344 struct deactivate_opcode deactiv_opcode; 1345 struct vpvc vpvc; 1346 int ok; 1347 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); 1348 1349 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1350 1351 if (activate) { 1352 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); 1353 1354 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; 1355 activ_opcode.aal = aal; 1356 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; 1357 activ_opcode.pad = 0; 1358 } 1359 else { 1360 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; 1361 deactiv_opcode.pad = 0; 1362 } 1363 1364 vpvc.vci = vcc->vci; 1365 vpvc.vpi = vcc->vpi; 1366 1367 *entry->status = STATUS_PENDING; 1368 1369 if (activate) { 1370 1371#ifdef FORE200E_52BYTE_AAL0_SDU 1372 mtu = 48; 1373#endif 1374 /* the MTU is not used by the cp, except in the case of AAL0 */ 1375 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); 1376 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); 1377 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); 1378 } 1379 else { 1380 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); 1381 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); 1382 } 1383 1384 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1385 1386 *entry->status = STATUS_FREE; 1387 1388 if (ok == 0) { 1389 printk(FORE200E "unable to %s VC %d.%d.%d\n", 1390 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); 1391 return -EIO; 1392 } 1393 1394 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 1395 activate ? "open" : "clos"); 1396 1397 return 0; 1398} 1399 1400 1401#define FORE200E_MAX_BACK2BACK_CELLS 255 1402 1403static void 1404fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) 1405{ 1406 if (qos->txtp.max_pcr < ATM_OC3_PCR) { 1407 1408 /* compute the data cells to idle cells ratio from the tx PCR */ 1409 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; 1410 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; 1411 } 1412 else { 1413 /* disable rate control */ 1414 rate->data_cells = rate->idle_cells = 0; 1415 } 1416} 1417 1418 1419static int 1420fore200e_open(struct atm_vcc *vcc) 1421{ 1422 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1423 struct fore200e_vcc* fore200e_vcc; 1424 struct fore200e_vc_map* vc_map; 1425 unsigned long flags; 1426 int vci = vcc->vci; 1427 short vpi = vcc->vpi; 1428 1429 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); 1430 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); 1431 1432 spin_lock_irqsave(&fore200e->q_lock, flags); 1433 1434 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); 1435 if (vc_map->vcc) { 1436 1437 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1438 1439 printk(FORE200E "VC %d.%d.%d already in use\n", 1440 fore200e->atm_dev->number, vpi, vci); 1441 1442 return -EINVAL; 1443 } 1444 1445 vc_map->vcc = vcc; 1446 1447 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1448 1449 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); 1450 if (fore200e_vcc == NULL) { 1451 vc_map->vcc = NULL; 1452 return -ENOMEM; 1453 } 1454 1455 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1456 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", 1457 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1458 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], 1459 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, 1460 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], 1461 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); 1462 1463 /* pseudo-CBR bandwidth requested? */ 1464 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1465 1466 mutex_lock(&fore200e->rate_mtx); 1467 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1468 mutex_unlock(&fore200e->rate_mtx); 1469 1470 kfree(fore200e_vcc); 1471 vc_map->vcc = NULL; 1472 return -EAGAIN; 1473 } 1474 1475 /* reserve bandwidth */ 1476 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1477 mutex_unlock(&fore200e->rate_mtx); 1478 } 1479 1480 vcc->itf = vcc->dev->number; 1481 1482 set_bit(ATM_VF_PARTIAL,&vcc->flags); 1483 set_bit(ATM_VF_ADDR, &vcc->flags); 1484 1485 vcc->dev_data = fore200e_vcc; 1486 1487 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { 1488 1489 vc_map->vcc = NULL; 1490 1491 clear_bit(ATM_VF_ADDR, &vcc->flags); 1492 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1493 1494 vcc->dev_data = NULL; 1495 1496 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1497 1498 kfree(fore200e_vcc); 1499 return -EINVAL; 1500 } 1501 1502 /* compute rate control parameters */ 1503 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1504 1505 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); 1506 set_bit(ATM_VF_HASQOS, &vcc->flags); 1507 1508 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", 1509 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1510 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 1511 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); 1512 } 1513 1514 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; 1515 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; 1516 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; 1517 1518 /* new incarnation of the vcc */ 1519 vc_map->incarn = ++fore200e->incarn_count; 1520 1521 /* VC unusable before this flag is set */ 1522 set_bit(ATM_VF_READY, &vcc->flags); 1523 1524 return 0; 1525} 1526 1527 1528static void 1529fore200e_close(struct atm_vcc* vcc) 1530{ 1531 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1532 struct fore200e_vcc* fore200e_vcc; 1533 struct fore200e_vc_map* vc_map; 1534 unsigned long flags; 1535 1536 ASSERT(vcc); 1537 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); 1538 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); 1539 1540 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); 1541 1542 clear_bit(ATM_VF_READY, &vcc->flags); 1543 1544 fore200e_activate_vcin(fore200e, 0, vcc, 0); 1545 1546 spin_lock_irqsave(&fore200e->q_lock, flags); 1547 1548 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1549 1550 /* the vc is no longer considered as "in use" by fore200e_open() */ 1551 vc_map->vcc = NULL; 1552 1553 vcc->itf = vcc->vci = vcc->vpi = 0; 1554 1555 fore200e_vcc = FORE200E_VCC(vcc); 1556 vcc->dev_data = NULL; 1557 1558 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1559 1560 /* release reserved bandwidth, if any */ 1561 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1562 1563 mutex_lock(&fore200e->rate_mtx); 1564 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1565 mutex_unlock(&fore200e->rate_mtx); 1566 1567 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1568 } 1569 1570 clear_bit(ATM_VF_ADDR, &vcc->flags); 1571 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1572 1573 ASSERT(fore200e_vcc); 1574 kfree(fore200e_vcc); 1575} 1576 1577 1578static int 1579fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) 1580{ 1581 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1582 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1583 struct fore200e_vc_map* vc_map; 1584 struct host_txq* txq = &fore200e->host_txq; 1585 struct host_txq_entry* entry; 1586 struct tpd* tpd; 1587 struct tpd_haddr tpd_haddr; 1588 int retry = CONFIG_ATM_FORE200E_TX_RETRY; 1589 int tx_copy = 0; 1590 int tx_len = skb->len; 1591 u32* cell_header = NULL; 1592 unsigned char* skb_data; 1593 int skb_len; 1594 unsigned char* data; 1595 unsigned long flags; 1596 1597 ASSERT(vcc); 1598 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); 1599 ASSERT(fore200e); 1600 ASSERT(fore200e_vcc); 1601 1602 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1603 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); 1604 dev_kfree_skb_any(skb); 1605 return -EINVAL; 1606 } 1607 1608#ifdef FORE200E_52BYTE_AAL0_SDU 1609 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { 1610 cell_header = (u32*) skb->data; 1611 skb_data = skb->data + 4; /* skip 4-byte cell header */ 1612 skb_len = tx_len = skb->len - 4; 1613 1614 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); 1615 } 1616 else 1617#endif 1618 { 1619 skb_data = skb->data; 1620 skb_len = skb->len; 1621 } 1622 1623 if (((unsigned long)skb_data) & 0x3) { 1624 1625 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); 1626 tx_copy = 1; 1627 tx_len = skb_len; 1628 } 1629 1630 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { 1631 1632 /* this simply NUKES the PCA board */ 1633 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); 1634 tx_copy = 1; 1635 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; 1636 } 1637 1638 if (tx_copy) { 1639 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA); 1640 if (data == NULL) { 1641 if (vcc->pop) { 1642 vcc->pop(vcc, skb); 1643 } 1644 else { 1645 dev_kfree_skb_any(skb); 1646 } 1647 return -ENOMEM; 1648 } 1649 1650 memcpy(data, skb_data, skb_len); 1651 if (skb_len < tx_len) 1652 memset(data + skb_len, 0x00, tx_len - skb_len); 1653 } 1654 else { 1655 data = skb_data; 1656 } 1657 1658 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1659 ASSERT(vc_map->vcc == vcc); 1660 1661 retry_here: 1662 1663 spin_lock_irqsave(&fore200e->q_lock, flags); 1664 1665 entry = &txq->host_entry[ txq->head ]; 1666 1667 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { 1668 1669 /* try to free completed tx queue entries */ 1670 fore200e_tx_irq(fore200e); 1671 1672 if (*entry->status != STATUS_FREE) { 1673 1674 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1675 1676 /* retry once again? */ 1677 if (--retry > 0) { 1678 udelay(50); 1679 goto retry_here; 1680 } 1681 1682 atomic_inc(&vcc->stats->tx_err); 1683 1684 fore200e->tx_sat++; 1685 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", 1686 fore200e->name, fore200e->cp_queues->heartbeat); 1687 if (vcc->pop) { 1688 vcc->pop(vcc, skb); 1689 } 1690 else { 1691 dev_kfree_skb_any(skb); 1692 } 1693 1694 if (tx_copy) 1695 kfree(data); 1696 1697 return -ENOBUFS; 1698 } 1699 } 1700 1701 entry->incarn = vc_map->incarn; 1702 entry->vc_map = vc_map; 1703 entry->skb = skb; 1704 entry->data = tx_copy ? data : NULL; 1705 1706 tpd = entry->tpd; 1707 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE); 1708 tpd->tsd[ 0 ].length = tx_len; 1709 1710 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); 1711 txq->txing++; 1712 1713 /* The dma_map call above implies a dma_sync so the device can use it, 1714 * thus no explicit dma_sync call is necessary here. 1715 */ 1716 1717 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 1718 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1719 tpd->tsd[0].length, skb_len); 1720 1721 if (skb_len < fore200e_vcc->tx_min_pdu) 1722 fore200e_vcc->tx_min_pdu = skb_len; 1723 if (skb_len > fore200e_vcc->tx_max_pdu) 1724 fore200e_vcc->tx_max_pdu = skb_len; 1725 fore200e_vcc->tx_pdu++; 1726 1727 /* set tx rate control information */ 1728 tpd->rate.data_cells = fore200e_vcc->rate.data_cells; 1729 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; 1730 1731 if (cell_header) { 1732 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); 1733 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 1734 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1735 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1736 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; 1737 } 1738 else { 1739 /* set the ATM header, common to all cells conveying the PDU */ 1740 tpd->atm_header.clp = 0; 1741 tpd->atm_header.plt = 0; 1742 tpd->atm_header.vci = vcc->vci; 1743 tpd->atm_header.vpi = vcc->vpi; 1744 tpd->atm_header.gfc = 0; 1745 } 1746 1747 tpd->spec.length = tx_len; 1748 tpd->spec.nseg = 1; 1749 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); 1750 tpd->spec.intr = 1; 1751 1752 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ 1753 tpd_haddr.pad = 0; 1754 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ 1755 1756 *entry->status = STATUS_PENDING; 1757 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); 1758 1759 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1760 1761 return 0; 1762} 1763 1764 1765static int 1766fore200e_getstats(struct fore200e* fore200e) 1767{ 1768 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1769 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1770 struct stats_opcode opcode; 1771 int ok; 1772 u32 stats_dma_addr; 1773 1774 if (fore200e->stats == NULL) { 1775 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA); 1776 if (fore200e->stats == NULL) 1777 return -ENOMEM; 1778 } 1779 1780 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats, 1781 sizeof(struct stats), DMA_FROM_DEVICE); 1782 1783 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1784 1785 opcode.opcode = OPCODE_GET_STATS; 1786 opcode.pad = 0; 1787 1788 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); 1789 1790 *entry->status = STATUS_PENDING; 1791 1792 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); 1793 1794 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1795 1796 *entry->status = STATUS_FREE; 1797 1798 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); 1799 1800 if (ok == 0) { 1801 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); 1802 return -EIO; 1803 } 1804 1805 return 0; 1806} 1807 1808 1809static int 1810fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1811{ 1812 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1813 1814 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1815 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1816 1817 return -EINVAL; 1818} 1819 1820 1821static int 1822fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) 1823{ 1824 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ 1825 1826 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", 1827 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); 1828 1829 return -EINVAL; 1830} 1831 1832 1833 1834 1835static int 1836fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) 1837{ 1838 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1839 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1840 struct oc3_opcode opcode; 1841 int ok; 1842 1843 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); 1844 1845 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1846 1847 opcode.opcode = OPCODE_SET_OC3; 1848 opcode.reg = reg; 1849 opcode.value = value; 1850 opcode.mask = mask; 1851 1852 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1853 1854 *entry->status = STATUS_PENDING; 1855 1856 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); 1857 1858 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1859 1860 *entry->status = STATUS_FREE; 1861 1862 if (ok == 0) { 1863 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); 1864 return -EIO; 1865 } 1866 1867 return 0; 1868} 1869 1870 1871static int 1872fore200e_setloop(struct fore200e* fore200e, int loop_mode) 1873{ 1874 u32 mct_value, mct_mask; 1875 int error; 1876 1877 if (!capable(CAP_NET_ADMIN)) 1878 return -EPERM; 1879 1880 switch (loop_mode) { 1881 1882 case ATM_LM_NONE: 1883 mct_value = 0; 1884 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; 1885 break; 1886 1887 case ATM_LM_LOC_PHY: 1888 mct_value = mct_mask = SUNI_MCT_DLE; 1889 break; 1890 1891 case ATM_LM_RMT_PHY: 1892 mct_value = mct_mask = SUNI_MCT_LLE; 1893 break; 1894 1895 default: 1896 return -EINVAL; 1897 } 1898 1899 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); 1900 if (error == 0) 1901 fore200e->loop_mode = loop_mode; 1902 1903 return error; 1904} 1905 1906 1907static int 1908fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) 1909{ 1910 struct sonet_stats tmp; 1911 1912 if (fore200e_getstats(fore200e) < 0) 1913 return -EIO; 1914 1915 tmp.section_bip = cpu_to_be32(fore200e->stats->oc3.section_bip8_errors); 1916 tmp.line_bip = cpu_to_be32(fore200e->stats->oc3.line_bip24_errors); 1917 tmp.path_bip = cpu_to_be32(fore200e->stats->oc3.path_bip8_errors); 1918 tmp.line_febe = cpu_to_be32(fore200e->stats->oc3.line_febe_errors); 1919 tmp.path_febe = cpu_to_be32(fore200e->stats->oc3.path_febe_errors); 1920 tmp.corr_hcs = cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors); 1921 tmp.uncorr_hcs = cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors); 1922 tmp.tx_cells = cpu_to_be32(fore200e->stats->aal0.cells_transmitted) + 1923 cpu_to_be32(fore200e->stats->aal34.cells_transmitted) + 1924 cpu_to_be32(fore200e->stats->aal5.cells_transmitted); 1925 tmp.rx_cells = cpu_to_be32(fore200e->stats->aal0.cells_received) + 1926 cpu_to_be32(fore200e->stats->aal34.cells_received) + 1927 cpu_to_be32(fore200e->stats->aal5.cells_received); 1928 1929 if (arg) 1930 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; 1931 1932 return 0; 1933} 1934 1935 1936static int 1937fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) 1938{ 1939 struct fore200e* fore200e = FORE200E_DEV(dev); 1940 1941 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); 1942 1943 switch (cmd) { 1944 1945 case SONET_GETSTAT: 1946 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); 1947 1948 case SONET_GETDIAG: 1949 return put_user(0, (int __user *)arg) ? -EFAULT : 0; 1950 1951 case ATM_SETLOOP: 1952 return fore200e_setloop(fore200e, (int)(unsigned long)arg); 1953 1954 case ATM_GETLOOP: 1955 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; 1956 1957 case ATM_QUERYLOOP: 1958 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; 1959 } 1960 1961 return -ENOSYS; /* not implemented */ 1962} 1963 1964 1965static int 1966fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) 1967{ 1968 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1969 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1970 1971 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1972 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); 1973 return -EINVAL; 1974 } 1975 1976 DPRINTK(2, "change_qos %d.%d.%d, " 1977 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1978 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" 1979 "available_cell_rate = %u", 1980 vcc->itf, vcc->vpi, vcc->vci, 1981 fore200e_traffic_class[ qos->txtp.traffic_class ], 1982 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, 1983 fore200e_traffic_class[ qos->rxtp.traffic_class ], 1984 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, 1985 flags, fore200e->available_cell_rate); 1986 1987 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 1988 1989 mutex_lock(&fore200e->rate_mtx); 1990 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 1991 mutex_unlock(&fore200e->rate_mtx); 1992 return -EAGAIN; 1993 } 1994 1995 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1996 fore200e->available_cell_rate -= qos->txtp.max_pcr; 1997 1998 mutex_unlock(&fore200e->rate_mtx); 1999 2000 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 2001 2002 /* update rate control parameters */ 2003 fore200e_rate_ctrl(qos, &fore200e_vcc->rate); 2004 2005 set_bit(ATM_VF_HASQOS, &vcc->flags); 2006 2007 return 0; 2008 } 2009 2010 return -EINVAL; 2011} 2012 2013 2014static int __devinit 2015fore200e_irq_request(struct fore200e* fore200e) 2016{ 2017 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) { 2018 2019 printk(FORE200E "unable to reserve IRQ %s for device %s\n", 2020 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2021 return -EBUSY; 2022 } 2023 2024 printk(FORE200E "IRQ %s reserved for device %s\n", 2025 fore200e_irq_itoa(fore200e->irq), fore200e->name); 2026 2027#ifdef FORE200E_USE_TASKLET 2028 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); 2029 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); 2030#endif 2031 2032 fore200e->state = FORE200E_STATE_IRQ; 2033 return 0; 2034} 2035 2036 2037static int __devinit 2038fore200e_get_esi(struct fore200e* fore200e) 2039{ 2040 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA); 2041 int ok, i; 2042 2043 if (!prom) 2044 return -ENOMEM; 2045 2046 ok = fore200e->bus->prom_read(fore200e, prom); 2047 if (ok < 0) { 2048 kfree(prom); 2049 return -EBUSY; 2050 } 2051 2052 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n", 2053 fore200e->name, 2054 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 2055 prom->serial_number & 0xFFFF, 2056 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ], 2057 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]); 2058 2059 for (i = 0; i < ESI_LEN; i++) { 2060 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 2061 } 2062 2063 kfree(prom); 2064 2065 return 0; 2066} 2067 2068 2069static int __devinit 2070fore200e_alloc_rx_buf(struct fore200e* fore200e) 2071{ 2072 int scheme, magn, nbr, size, i; 2073 2074 struct host_bsq* bsq; 2075 struct buffer* buffer; 2076 2077 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2078 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2079 2080 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2081 2082 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; 2083 size = fore200e_rx_buf_size[ scheme ][ magn ]; 2084 2085 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); 2086 2087 /* allocate the array of receive buffers */ 2088 buffer = bsq->buffer = kzalloc(nbr * sizeof(struct buffer), GFP_KERNEL); 2089 2090 if (buffer == NULL) 2091 return -ENOMEM; 2092 2093 bsq->freebuf = NULL; 2094 2095 for (i = 0; i < nbr; i++) { 2096 2097 buffer[ i ].scheme = scheme; 2098 buffer[ i ].magn = magn; 2099#ifdef FORE200E_BSQ_DEBUG 2100 buffer[ i ].index = i; 2101 buffer[ i ].supplied = 0; 2102#endif 2103 2104 /* allocate the receive buffer body */ 2105 if (fore200e_chunk_alloc(fore200e, 2106 &buffer[ i ].data, size, fore200e->bus->buffer_alignment, 2107 DMA_FROM_DEVICE) < 0) { 2108 2109 while (i > 0) 2110 fore200e_chunk_free(fore200e, &buffer[ --i ].data); 2111 kfree(buffer); 2112 2113 return -ENOMEM; 2114 } 2115 2116 /* insert the buffer into the free buffer list */ 2117 buffer[ i ].next = bsq->freebuf; 2118 bsq->freebuf = &buffer[ i ]; 2119 } 2120 /* all the buffers are free, initially */ 2121 bsq->freebuf_count = nbr; 2122 2123#ifdef FORE200E_BSQ_DEBUG 2124 bsq_audit(3, bsq, scheme, magn); 2125#endif 2126 } 2127 } 2128 2129 fore200e->state = FORE200E_STATE_ALLOC_BUF; 2130 return 0; 2131} 2132 2133 2134static int __devinit 2135fore200e_init_bs_queue(struct fore200e* fore200e) 2136{ 2137 int scheme, magn, i; 2138 2139 struct host_bsq* bsq; 2140 struct cp_bsq_entry __iomem * cp_entry; 2141 2142 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2143 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2144 2145 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); 2146 2147 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2148 2149 /* allocate and align the array of status words */ 2150 if (fore200e->bus->dma_chunk_alloc(fore200e, 2151 &bsq->status, 2152 sizeof(enum status), 2153 QUEUE_SIZE_BS, 2154 fore200e->bus->status_alignment) < 0) { 2155 return -ENOMEM; 2156 } 2157 2158 /* allocate and align the array of receive buffer descriptors */ 2159 if (fore200e->bus->dma_chunk_alloc(fore200e, 2160 &bsq->rbd_block, 2161 sizeof(struct rbd_block), 2162 QUEUE_SIZE_BS, 2163 fore200e->bus->descr_alignment) < 0) { 2164 2165 fore200e->bus->dma_chunk_free(fore200e, &bsq->status); 2166 return -ENOMEM; 2167 } 2168 2169 /* get the base address of the cp resident buffer supply queue entries */ 2170 cp_entry = fore200e->virt_base + 2171 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); 2172 2173 /* fill the host resident and cp resident buffer supply queue entries */ 2174 for (i = 0; i < QUEUE_SIZE_BS; i++) { 2175 2176 bsq->host_entry[ i ].status = 2177 FORE200E_INDEX(bsq->status.align_addr, enum status, i); 2178 bsq->host_entry[ i ].rbd_block = 2179 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); 2180 bsq->host_entry[ i ].rbd_block_dma = 2181 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); 2182 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2183 2184 *bsq->host_entry[ i ].status = STATUS_FREE; 2185 2186 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 2187 &cp_entry[ i ].status_haddr); 2188 } 2189 } 2190 } 2191 2192 fore200e->state = FORE200E_STATE_INIT_BSQ; 2193 return 0; 2194} 2195 2196 2197static int __devinit 2198fore200e_init_rx_queue(struct fore200e* fore200e) 2199{ 2200 struct host_rxq* rxq = &fore200e->host_rxq; 2201 struct cp_rxq_entry __iomem * cp_entry; 2202 int i; 2203 2204 DPRINTK(2, "receive queue is being initialized\n"); 2205 2206 /* allocate and align the array of status words */ 2207 if (fore200e->bus->dma_chunk_alloc(fore200e, 2208 &rxq->status, 2209 sizeof(enum status), 2210 QUEUE_SIZE_RX, 2211 fore200e->bus->status_alignment) < 0) { 2212 return -ENOMEM; 2213 } 2214 2215 /* allocate and align the array of receive PDU descriptors */ 2216 if (fore200e->bus->dma_chunk_alloc(fore200e, 2217 &rxq->rpd, 2218 sizeof(struct rpd), 2219 QUEUE_SIZE_RX, 2220 fore200e->bus->descr_alignment) < 0) { 2221 2222 fore200e->bus->dma_chunk_free(fore200e, &rxq->status); 2223 return -ENOMEM; 2224 } 2225 2226 /* get the base address of the cp resident rx queue entries */ 2227 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); 2228 2229 /* fill the host resident and cp resident rx entries */ 2230 for (i=0; i < QUEUE_SIZE_RX; i++) { 2231 2232 rxq->host_entry[ i ].status = 2233 FORE200E_INDEX(rxq->status.align_addr, enum status, i); 2234 rxq->host_entry[ i ].rpd = 2235 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); 2236 rxq->host_entry[ i ].rpd_dma = 2237 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); 2238 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2239 2240 *rxq->host_entry[ i ].status = STATUS_FREE; 2241 2242 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 2243 &cp_entry[ i ].status_haddr); 2244 2245 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), 2246 &cp_entry[ i ].rpd_haddr); 2247 } 2248 2249 /* set the head entry of the queue */ 2250 rxq->head = 0; 2251 2252 fore200e->state = FORE200E_STATE_INIT_RXQ; 2253 return 0; 2254} 2255 2256 2257static int __devinit 2258fore200e_init_tx_queue(struct fore200e* fore200e) 2259{ 2260 struct host_txq* txq = &fore200e->host_txq; 2261 struct cp_txq_entry __iomem * cp_entry; 2262 int i; 2263 2264 DPRINTK(2, "transmit queue is being initialized\n"); 2265 2266 /* allocate and align the array of status words */ 2267 if (fore200e->bus->dma_chunk_alloc(fore200e, 2268 &txq->status, 2269 sizeof(enum status), 2270 QUEUE_SIZE_TX, 2271 fore200e->bus->status_alignment) < 0) { 2272 return -ENOMEM; 2273 } 2274 2275 /* allocate and align the array of transmit PDU descriptors */ 2276 if (fore200e->bus->dma_chunk_alloc(fore200e, 2277 &txq->tpd, 2278 sizeof(struct tpd), 2279 QUEUE_SIZE_TX, 2280 fore200e->bus->descr_alignment) < 0) { 2281 2282 fore200e->bus->dma_chunk_free(fore200e, &txq->status); 2283 return -ENOMEM; 2284 } 2285 2286 /* get the base address of the cp resident tx queue entries */ 2287 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); 2288 2289 /* fill the host resident and cp resident tx entries */ 2290 for (i=0; i < QUEUE_SIZE_TX; i++) { 2291 2292 txq->host_entry[ i ].status = 2293 FORE200E_INDEX(txq->status.align_addr, enum status, i); 2294 txq->host_entry[ i ].tpd = 2295 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); 2296 txq->host_entry[ i ].tpd_dma = 2297 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); 2298 txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2299 2300 *txq->host_entry[ i ].status = STATUS_FREE; 2301 2302 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 2303 &cp_entry[ i ].status_haddr); 2304 2305 /* although there is a one-to-one mapping of tx queue entries and tpds, 2306 we do not write here the DMA (physical) base address of each tpd into 2307 the related cp resident entry, because the cp relies on this write 2308 operation to detect that a new pdu has been submitted for tx */ 2309 } 2310 2311 /* set the head and tail entries of the queue */ 2312 txq->head = 0; 2313 txq->tail = 0; 2314 2315 fore200e->state = FORE200E_STATE_INIT_TXQ; 2316 return 0; 2317} 2318 2319 2320static int __devinit 2321fore200e_init_cmd_queue(struct fore200e* fore200e) 2322{ 2323 struct host_cmdq* cmdq = &fore200e->host_cmdq; 2324 struct cp_cmdq_entry __iomem * cp_entry; 2325 int i; 2326 2327 DPRINTK(2, "command queue is being initialized\n"); 2328 2329 /* allocate and align the array of status words */ 2330 if (fore200e->bus->dma_chunk_alloc(fore200e, 2331 &cmdq->status, 2332 sizeof(enum status), 2333 QUEUE_SIZE_CMD, 2334 fore200e->bus->status_alignment) < 0) { 2335 return -ENOMEM; 2336 } 2337 2338 /* get the base address of the cp resident cmd queue entries */ 2339 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); 2340 2341 /* fill the host resident and cp resident cmd entries */ 2342 for (i=0; i < QUEUE_SIZE_CMD; i++) { 2343 2344 cmdq->host_entry[ i ].status = 2345 FORE200E_INDEX(cmdq->status.align_addr, enum status, i); 2346 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2347 2348 *cmdq->host_entry[ i ].status = STATUS_FREE; 2349 2350 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 2351 &cp_entry[ i ].status_haddr); 2352 } 2353 2354 /* set the head entry of the queue */ 2355 cmdq->head = 0; 2356 2357 fore200e->state = FORE200E_STATE_INIT_CMDQ; 2358 return 0; 2359} 2360 2361 2362static void __init 2363fore200e_param_bs_queue(struct fore200e* fore200e, 2364 enum buffer_scheme scheme, enum buffer_magn magn, 2365 int queue_length, int pool_size, int supply_blksize) 2366{ 2367 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; 2368 2369 fore200e->bus->write(queue_length, &bs_spec->queue_length); 2370 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); 2371 fore200e->bus->write(pool_size, &bs_spec->pool_size); 2372 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); 2373} 2374 2375 2376static int __devinit 2377fore200e_initialize(struct fore200e* fore200e) 2378{ 2379 struct cp_queues __iomem * cpq; 2380 int ok, scheme, magn; 2381 2382 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2383 2384 mutex_init(&fore200e->rate_mtx); 2385 spin_lock_init(&fore200e->q_lock); 2386 2387 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2388 2389 /* enable cp to host interrupts */ 2390 fore200e->bus->write(1, &cpq->imask); 2391 2392 if (fore200e->bus->irq_enable) 2393 fore200e->bus->irq_enable(fore200e); 2394 2395 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); 2396 2397 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); 2398 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); 2399 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); 2400 2401 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); 2402 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); 2403 2404 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) 2405 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) 2406 fore200e_param_bs_queue(fore200e, scheme, magn, 2407 QUEUE_SIZE_BS, 2408 fore200e_rx_buf_nbr[ scheme ][ magn ], 2409 RBD_BLK_SIZE); 2410 2411 /* issue the initialize command */ 2412 fore200e->bus->write(STATUS_PENDING, &cpq->init.status); 2413 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); 2414 2415 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); 2416 if (ok == 0) { 2417 printk(FORE200E "device %s initialization failed\n", fore200e->name); 2418 return -ENODEV; 2419 } 2420 2421 printk(FORE200E "device %s initialized\n", fore200e->name); 2422 2423 fore200e->state = FORE200E_STATE_INITIALIZE; 2424 return 0; 2425} 2426 2427 2428static void __devinit 2429fore200e_monitor_putc(struct fore200e* fore200e, char c) 2430{ 2431 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2432 2433 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); 2434} 2435 2436 2437static int __devinit 2438fore200e_monitor_getc(struct fore200e* fore200e) 2439{ 2440 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2441 unsigned long timeout = jiffies + msecs_to_jiffies(50); 2442 int c; 2443 2444 while (time_before(jiffies, timeout)) { 2445 2446 c = (int) fore200e->bus->read(&monitor->soft_uart.recv); 2447 2448 if (c & FORE200E_CP_MONITOR_UART_AVAIL) { 2449 2450 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); 2451 return c & 0xFF; 2452 } 2453 } 2454 2455 return -1; 2456} 2457 2458 2459static void __devinit 2460fore200e_monitor_puts(struct fore200e* fore200e, char* str) 2461{ 2462 while (*str) { 2463 2464 /* the i960 monitor doesn't accept any new character if it has something to say */ 2465 while (fore200e_monitor_getc(fore200e) >= 0); 2466 2467 fore200e_monitor_putc(fore200e, *str++); 2468 } 2469 2470 while (fore200e_monitor_getc(fore200e) >= 0); 2471} 2472 2473 2474static int __devinit 2475fore200e_start_fw(struct fore200e* fore200e) 2476{ 2477 int ok; 2478 char cmd[ 48 ]; 2479 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data; 2480 2481 DPRINTK(2, "device %s firmware being started\n", fore200e->name); 2482 2483#if defined(__sparc_v9__) 2484 /* reported to be required by SBA cards on some sparc64 hosts */ 2485 fore200e_spin(100); 2486#endif 2487 2488 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); 2489 2490 fore200e_monitor_puts(fore200e, cmd); 2491 2492 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000); 2493 if (ok == 0) { 2494 printk(FORE200E "device %s firmware didn't start\n", fore200e->name); 2495 return -ENODEV; 2496 } 2497 2498 printk(FORE200E "device %s firmware started\n", fore200e->name); 2499 2500 fore200e->state = FORE200E_STATE_START_FW; 2501 return 0; 2502} 2503 2504 2505static int __devinit 2506fore200e_load_fw(struct fore200e* fore200e) 2507{ 2508 u32* fw_data = (u32*) fore200e->bus->fw_data; 2509 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32); 2510 2511 struct fw_header* fw_header = (struct fw_header*) fw_data; 2512 2513 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2514 2515 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", 2516 fore200e->name, load_addr, fw_size); 2517 2518 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { 2519 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); 2520 return -ENODEV; 2521 } 2522 2523 for (; fw_size--; fw_data++, load_addr++) 2524 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); 2525 2526 fore200e->state = FORE200E_STATE_LOAD_FW; 2527 return 0; 2528} 2529 2530 2531static int __devinit 2532fore200e_register(struct fore200e* fore200e) 2533{ 2534 struct atm_dev* atm_dev; 2535 2536 DPRINTK(2, "device %s being registered\n", fore200e->name); 2537 2538 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1, 2539 NULL); 2540 if (atm_dev == NULL) { 2541 printk(FORE200E "unable to register device %s\n", fore200e->name); 2542 return -ENODEV; 2543 } 2544 2545 atm_dev->dev_data = fore200e; 2546 fore200e->atm_dev = atm_dev; 2547 2548 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; 2549 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; 2550 2551 fore200e->available_cell_rate = ATM_OC3_PCR; 2552 2553 fore200e->state = FORE200E_STATE_REGISTER; 2554 return 0; 2555} 2556 2557 2558static int __devinit 2559fore200e_init(struct fore200e* fore200e) 2560{ 2561 if (fore200e_register(fore200e) < 0) 2562 return -ENODEV; 2563 2564 if (fore200e->bus->configure(fore200e) < 0) 2565 return -ENODEV; 2566 2567 if (fore200e->bus->map(fore200e) < 0) 2568 return -ENODEV; 2569 2570 if (fore200e_reset(fore200e, 1) < 0) 2571 return -ENODEV; 2572 2573 if (fore200e_load_fw(fore200e) < 0) 2574 return -ENODEV; 2575 2576 if (fore200e_start_fw(fore200e) < 0) 2577 return -ENODEV; 2578 2579 if (fore200e_initialize(fore200e) < 0) 2580 return -ENODEV; 2581 2582 if (fore200e_init_cmd_queue(fore200e) < 0) 2583 return -ENOMEM; 2584 2585 if (fore200e_init_tx_queue(fore200e) < 0) 2586 return -ENOMEM; 2587 2588 if (fore200e_init_rx_queue(fore200e) < 0) 2589 return -ENOMEM; 2590 2591 if (fore200e_init_bs_queue(fore200e) < 0) 2592 return -ENOMEM; 2593 2594 if (fore200e_alloc_rx_buf(fore200e) < 0) 2595 return -ENOMEM; 2596 2597 if (fore200e_get_esi(fore200e) < 0) 2598 return -EIO; 2599 2600 if (fore200e_irq_request(fore200e) < 0) 2601 return -EBUSY; 2602 2603 fore200e_supply(fore200e); 2604 2605 /* all done, board initialization is now complete */ 2606 fore200e->state = FORE200E_STATE_COMPLETE; 2607 return 0; 2608} 2609 2610 2611static int __devinit 2612fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) 2613{ 2614 const struct fore200e_bus* bus = (struct fore200e_bus*) pci_ent->driver_data; 2615 struct fore200e* fore200e; 2616 int err = 0; 2617 static int index = 0; 2618 2619 if (pci_enable_device(pci_dev)) { 2620 err = -EINVAL; 2621 goto out; 2622 } 2623 2624 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2625 if (fore200e == NULL) { 2626 err = -ENOMEM; 2627 goto out_disable; 2628 } 2629 2630 fore200e->bus = bus; 2631 fore200e->bus_dev = pci_dev; 2632 fore200e->irq = pci_dev->irq; 2633 fore200e->phys_base = pci_resource_start(pci_dev, 0); 2634 2635 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1); 2636 2637 pci_set_master(pci_dev); 2638 2639 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2640 fore200e->bus->model_name, 2641 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2642 2643 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2644 2645 err = fore200e_init(fore200e); 2646 if (err < 0) { 2647 fore200e_shutdown(fore200e); 2648 goto out_free; 2649 } 2650 2651 ++index; 2652 pci_set_drvdata(pci_dev, fore200e); 2653 2654out: 2655 return err; 2656 2657out_free: 2658 kfree(fore200e); 2659out_disable: 2660 pci_disable_device(pci_dev); 2661 goto out; 2662} 2663 2664 2665static void __devexit fore200e_pca_remove_one(struct pci_dev *pci_dev) 2666{ 2667 struct fore200e *fore200e; 2668 2669 fore200e = pci_get_drvdata(pci_dev); 2670 2671 fore200e_shutdown(fore200e); 2672 kfree(fore200e); 2673 pci_disable_device(pci_dev); 2674} 2675 2676 2677#ifdef CONFIG_ATM_FORE200E_PCA 2678static struct pci_device_id fore200e_pca_tbl[] = { 2679 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 2680 0, 0, (unsigned long) &fore200e_bus[0] }, 2681 { 0, } 2682}; 2683 2684MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); 2685 2686static struct pci_driver fore200e_pca_driver = { 2687 .name = "fore_200e", 2688 .probe = fore200e_pca_detect, 2689 .remove = __devexit_p(fore200e_pca_remove_one), 2690 .id_table = fore200e_pca_tbl, 2691}; 2692#endif 2693 2694 2695static int __init 2696fore200e_module_init(void) 2697{ 2698 const struct fore200e_bus* bus; 2699 struct fore200e* fore200e; 2700 int index; 2701 2702 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2703 2704 /* for each configured bus interface */ 2705 for (bus = fore200e_bus; bus->model_name; bus++) { 2706 2707 /* detect all boards present on that bus */ 2708 for (index = 0; bus->detect && (fore200e = bus->detect(bus, index)); index++) { 2709 2710 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n", 2711 fore200e->bus->model_name, 2712 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2713 2714 sprintf(fore200e->name, "%s-%d", bus->model_name, index); 2715 2716 if (fore200e_init(fore200e) < 0) { 2717 2718 fore200e_shutdown(fore200e); 2719 break; 2720 } 2721 2722 list_add(&fore200e->entry, &fore200e_boards); 2723 } 2724 } 2725 2726#ifdef CONFIG_ATM_FORE200E_PCA 2727 if (!pci_register_driver(&fore200e_pca_driver)) 2728 return 0; 2729#endif 2730 2731 if (!list_empty(&fore200e_boards)) 2732 return 0; 2733 2734 return -ENODEV; 2735} 2736 2737 2738static void __exit 2739fore200e_module_cleanup(void) 2740{ 2741 struct fore200e *fore200e, *next; 2742 2743#ifdef CONFIG_ATM_FORE200E_PCA 2744 pci_unregister_driver(&fore200e_pca_driver); 2745#endif 2746 2747 list_for_each_entry_safe(fore200e, next, &fore200e_boards, entry) { 2748 fore200e_shutdown(fore200e); 2749 kfree(fore200e); 2750 } 2751 DPRINTK(1, "module being removed\n"); 2752} 2753 2754 2755static int 2756fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) 2757{ 2758 struct fore200e* fore200e = FORE200E_DEV(dev); 2759 struct fore200e_vcc* fore200e_vcc; 2760 struct atm_vcc* vcc; 2761 int i, len, left = *pos; 2762 unsigned long flags; 2763 2764 if (!left--) { 2765 2766 if (fore200e_getstats(fore200e) < 0) 2767 return -EIO; 2768 2769 len = sprintf(page,"\n" 2770 " device:\n" 2771 " internal name:\t\t%s\n", fore200e->name); 2772 2773 /* print bus-specific information */ 2774 if (fore200e->bus->proc_read) 2775 len += fore200e->bus->proc_read(fore200e, page + len); 2776 2777 len += sprintf(page + len, 2778 " interrupt line:\t\t%s\n" 2779 " physical base address:\t0x%p\n" 2780 " virtual base address:\t0x%p\n" 2781 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n" 2782 " board serial number:\t\t%d\n\n", 2783 fore200e_irq_itoa(fore200e->irq), 2784 (void*)fore200e->phys_base, 2785 fore200e->virt_base, 2786 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2], 2787 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5], 2788 fore200e->esi[4] * 256 + fore200e->esi[5]); 2789 2790 return len; 2791 } 2792 2793 if (!left--) 2794 return sprintf(page, 2795 " free small bufs, scheme 1:\t%d\n" 2796 " free large bufs, scheme 1:\t%d\n" 2797 " free small bufs, scheme 2:\t%d\n" 2798 " free large bufs, scheme 2:\t%d\n", 2799 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, 2800 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, 2801 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, 2802 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); 2803 2804 if (!left--) { 2805 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); 2806 2807 len = sprintf(page,"\n\n" 2808 " cell processor:\n" 2809 " heartbeat state:\t\t"); 2810 2811 if (hb >> 16 != 0xDEAD) 2812 len += sprintf(page + len, "0x%08x\n", hb); 2813 else 2814 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); 2815 2816 return len; 2817 } 2818 2819 if (!left--) { 2820 static const char* media_name[] = { 2821 "unshielded twisted pair", 2822 "multimode optical fiber ST", 2823 "multimode optical fiber SC", 2824 "single-mode optical fiber ST", 2825 "single-mode optical fiber SC", 2826 "unknown" 2827 }; 2828 2829 static const char* oc3_mode[] = { 2830 "normal operation", 2831 "diagnostic loopback", 2832 "line loopback", 2833 "unknown" 2834 }; 2835 2836 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); 2837 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); 2838 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); 2839 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2840 u32 oc3_index; 2841 2842 if ((media_index < 0) || (media_index > 4)) 2843 media_index = 5; 2844 2845 switch (fore200e->loop_mode) { 2846 case ATM_LM_NONE: oc3_index = 0; 2847 break; 2848 case ATM_LM_LOC_PHY: oc3_index = 1; 2849 break; 2850 case ATM_LM_RMT_PHY: oc3_index = 2; 2851 break; 2852 default: oc3_index = 3; 2853 } 2854 2855 return sprintf(page, 2856 " firmware release:\t\t%d.%d.%d\n" 2857 " monitor release:\t\t%d.%d\n" 2858 " media type:\t\t\t%s\n" 2859 " OC-3 revision:\t\t0x%x\n" 2860 " OC-3 mode:\t\t\t%s", 2861 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, 2862 mon960_release >> 16, mon960_release << 16 >> 16, 2863 media_name[ media_index ], 2864 oc3_revision, 2865 oc3_mode[ oc3_index ]); 2866 } 2867 2868 if (!left--) { 2869 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; 2870 2871 return sprintf(page, 2872 "\n\n" 2873 " monitor:\n" 2874 " version number:\t\t%d\n" 2875 " boot status word:\t\t0x%08x\n", 2876 fore200e->bus->read(&cp_monitor->mon_version), 2877 fore200e->bus->read(&cp_monitor->bstat)); 2878 } 2879 2880 if (!left--) 2881 return sprintf(page, 2882 "\n" 2883 " device statistics:\n" 2884 " 4b5b:\n" 2885 " crc_header_errors:\t\t%10u\n" 2886 " framing_errors:\t\t%10u\n", 2887 cpu_to_be32(fore200e->stats->phy.crc_header_errors), 2888 cpu_to_be32(fore200e->stats->phy.framing_errors)); 2889 2890 if (!left--) 2891 return sprintf(page, "\n" 2892 " OC-3:\n" 2893 " section_bip8_errors:\t%10u\n" 2894 " path_bip8_errors:\t\t%10u\n" 2895 " line_bip24_errors:\t\t%10u\n" 2896 " line_febe_errors:\t\t%10u\n" 2897 " path_febe_errors:\t\t%10u\n" 2898 " corr_hcs_errors:\t\t%10u\n" 2899 " ucorr_hcs_errors:\t\t%10u\n", 2900 cpu_to_be32(fore200e->stats->oc3.section_bip8_errors), 2901 cpu_to_be32(fore200e->stats->oc3.path_bip8_errors), 2902 cpu_to_be32(fore200e->stats->oc3.line_bip24_errors), 2903 cpu_to_be32(fore200e->stats->oc3.line_febe_errors), 2904 cpu_to_be32(fore200e->stats->oc3.path_febe_errors), 2905 cpu_to_be32(fore200e->stats->oc3.corr_hcs_errors), 2906 cpu_to_be32(fore200e->stats->oc3.ucorr_hcs_errors)); 2907 2908 if (!left--) 2909 return sprintf(page,"\n" 2910 " ATM:\t\t\t\t cells\n" 2911 " TX:\t\t\t%10u\n" 2912 " RX:\t\t\t%10u\n" 2913 " vpi out of range:\t\t%10u\n" 2914 " vpi no conn:\t\t%10u\n" 2915 " vci out of range:\t\t%10u\n" 2916 " vci no conn:\t\t%10u\n", 2917 cpu_to_be32(fore200e->stats->atm.cells_transmitted), 2918 cpu_to_be32(fore200e->stats->atm.cells_received), 2919 cpu_to_be32(fore200e->stats->atm.vpi_bad_range), 2920 cpu_to_be32(fore200e->stats->atm.vpi_no_conn), 2921 cpu_to_be32(fore200e->stats->atm.vci_bad_range), 2922 cpu_to_be32(fore200e->stats->atm.vci_no_conn)); 2923 2924 if (!left--) 2925 return sprintf(page,"\n" 2926 " AAL0:\t\t\t cells\n" 2927 " TX:\t\t\t%10u\n" 2928 " RX:\t\t\t%10u\n" 2929 " dropped:\t\t\t%10u\n", 2930 cpu_to_be32(fore200e->stats->aal0.cells_transmitted), 2931 cpu_to_be32(fore200e->stats->aal0.cells_received), 2932 cpu_to_be32(fore200e->stats->aal0.cells_dropped)); 2933 2934 if (!left--) 2935 return sprintf(page,"\n" 2936 " AAL3/4:\n" 2937 " SAR sublayer:\t\t cells\n" 2938 " TX:\t\t\t%10u\n" 2939 " RX:\t\t\t%10u\n" 2940 " dropped:\t\t\t%10u\n" 2941 " CRC errors:\t\t%10u\n" 2942 " protocol errors:\t\t%10u\n\n" 2943 " CS sublayer:\t\t PDUs\n" 2944 " TX:\t\t\t%10u\n" 2945 " RX:\t\t\t%10u\n" 2946 " dropped:\t\t\t%10u\n" 2947 " protocol errors:\t\t%10u\n", 2948 cpu_to_be32(fore200e->stats->aal34.cells_transmitted), 2949 cpu_to_be32(fore200e->stats->aal34.cells_received), 2950 cpu_to_be32(fore200e->stats->aal34.cells_dropped), 2951 cpu_to_be32(fore200e->stats->aal34.cells_crc_errors), 2952 cpu_to_be32(fore200e->stats->aal34.cells_protocol_errors), 2953 cpu_to_be32(fore200e->stats->aal34.cspdus_transmitted), 2954 cpu_to_be32(fore200e->stats->aal34.cspdus_received), 2955 cpu_to_be32(fore200e->stats->aal34.cspdus_dropped), 2956 cpu_to_be32(fore200e->stats->aal34.cspdus_protocol_errors)); 2957 2958 if (!left--) 2959 return sprintf(page,"\n" 2960 " AAL5:\n" 2961 " SAR sublayer:\t\t cells\n" 2962 " TX:\t\t\t%10u\n" 2963 " RX:\t\t\t%10u\n" 2964 " dropped:\t\t\t%10u\n" 2965 " congestions:\t\t%10u\n\n" 2966 " CS sublayer:\t\t PDUs\n" 2967 " TX:\t\t\t%10u\n" 2968 " RX:\t\t\t%10u\n" 2969 " dropped:\t\t\t%10u\n" 2970 " CRC errors:\t\t%10u\n" 2971 " protocol errors:\t\t%10u\n", 2972 cpu_to_be32(fore200e->stats->aal5.cells_transmitted), 2973 cpu_to_be32(fore200e->stats->aal5.cells_received), 2974 cpu_to_be32(fore200e->stats->aal5.cells_dropped), 2975 cpu_to_be32(fore200e->stats->aal5.congestion_experienced), 2976 cpu_to_be32(fore200e->stats->aal5.cspdus_transmitted), 2977 cpu_to_be32(fore200e->stats->aal5.cspdus_received), 2978 cpu_to_be32(fore200e->stats->aal5.cspdus_dropped), 2979 cpu_to_be32(fore200e->stats->aal5.cspdus_crc_errors), 2980 cpu_to_be32(fore200e->stats->aal5.cspdus_protocol_errors)); 2981 2982 if (!left--) 2983 return sprintf(page,"\n" 2984 " AUX:\t\t allocation failures\n" 2985 " small b1:\t\t\t%10u\n" 2986 " large b1:\t\t\t%10u\n" 2987 " small b2:\t\t\t%10u\n" 2988 " large b2:\t\t\t%10u\n" 2989 " RX PDUs:\t\t\t%10u\n" 2990 " TX PDUs:\t\t\t%10lu\n", 2991 cpu_to_be32(fore200e->stats->aux.small_b1_failed), 2992 cpu_to_be32(fore200e->stats->aux.large_b1_failed), 2993 cpu_to_be32(fore200e->stats->aux.small_b2_failed), 2994 cpu_to_be32(fore200e->stats->aux.large_b2_failed), 2995 cpu_to_be32(fore200e->stats->aux.rpd_alloc_failed), 2996 fore200e->tx_sat); 2997 2998 if (!left--) 2999 return sprintf(page,"\n" 3000 " receive carrier:\t\t\t%s\n", 3001 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); 3002 3003 if (!left--) { 3004 return sprintf(page,"\n" 3005 " VCCs:\n address VPI VCI AAL " 3006 "TX PDUs TX min/max size RX PDUs RX min/max size\n"); 3007 } 3008 3009 for (i = 0; i < NBR_CONNECT; i++) { 3010 3011 vcc = fore200e->vc_map[i].vcc; 3012 3013 if (vcc == NULL) 3014 continue; 3015 3016 spin_lock_irqsave(&fore200e->q_lock, flags); 3017 3018 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { 3019 3020 fore200e_vcc = FORE200E_VCC(vcc); 3021 ASSERT(fore200e_vcc); 3022 3023 len = sprintf(page, 3024 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 3025 (u32)(unsigned long)vcc, 3026 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 3027 fore200e_vcc->tx_pdu, 3028 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 3029 fore200e_vcc->tx_max_pdu, 3030 fore200e_vcc->rx_pdu, 3031 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, 3032 fore200e_vcc->rx_max_pdu); 3033 3034 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3035 return len; 3036 } 3037 3038 spin_unlock_irqrestore(&fore200e->q_lock, flags); 3039 } 3040 3041 return 0; 3042} 3043 3044module_init(fore200e_module_init); 3045module_exit(fore200e_module_cleanup); 3046 3047 3048static const struct atmdev_ops fore200e_ops = 3049{ 3050 .open = fore200e_open, 3051 .close = fore200e_close, 3052 .ioctl = fore200e_ioctl, 3053 .getsockopt = fore200e_getsockopt, 3054 .setsockopt = fore200e_setsockopt, 3055 .send = fore200e_send, 3056 .change_qos = fore200e_change_qos, 3057 .proc_read = fore200e_proc_read, 3058 .owner = THIS_MODULE 3059}; 3060 3061 3062#ifdef CONFIG_ATM_FORE200E_PCA 3063extern const unsigned char _fore200e_pca_fw_data[]; 3064extern const unsigned int _fore200e_pca_fw_size; 3065#endif 3066#ifdef CONFIG_ATM_FORE200E_SBA 3067extern const unsigned char _fore200e_sba_fw_data[]; 3068extern const unsigned int _fore200e_sba_fw_size; 3069#endif 3070 3071static const struct fore200e_bus fore200e_bus[] = { 3072#ifdef CONFIG_ATM_FORE200E_PCA 3073 { "PCA-200E", "pca200e", 32, 4, 32, 3074 _fore200e_pca_fw_data, &_fore200e_pca_fw_size, 3075 fore200e_pca_read, 3076 fore200e_pca_write, 3077 fore200e_pca_dma_map, 3078 fore200e_pca_dma_unmap, 3079 fore200e_pca_dma_sync_for_cpu, 3080 fore200e_pca_dma_sync_for_device, 3081 fore200e_pca_dma_chunk_alloc, 3082 fore200e_pca_dma_chunk_free, 3083 NULL, 3084 fore200e_pca_configure, 3085 fore200e_pca_map, 3086 fore200e_pca_reset, 3087 fore200e_pca_prom_read, 3088 fore200e_pca_unmap, 3089 NULL, 3090 fore200e_pca_irq_check, 3091 fore200e_pca_irq_ack, 3092 fore200e_pca_proc_read, 3093 }, 3094#endif 3095#ifdef CONFIG_ATM_FORE200E_SBA 3096 { "SBA-200E", "sba200e", 32, 64, 32, 3097 _fore200e_sba_fw_data, &_fore200e_sba_fw_size, 3098 fore200e_sba_read, 3099 fore200e_sba_write, 3100 fore200e_sba_dma_map, 3101 fore200e_sba_dma_unmap, 3102 fore200e_sba_dma_sync_for_cpu, 3103 fore200e_sba_dma_sync_for_device, 3104 fore200e_sba_dma_chunk_alloc, 3105 fore200e_sba_dma_chunk_free, 3106 fore200e_sba_detect, 3107 fore200e_sba_configure, 3108 fore200e_sba_map, 3109 fore200e_sba_reset, 3110 fore200e_sba_prom_read, 3111 fore200e_sba_unmap, 3112 fore200e_sba_irq_enable, 3113 fore200e_sba_irq_check, 3114 fore200e_sba_irq_ack, 3115 fore200e_sba_proc_read, 3116 }, 3117#endif 3118 {} 3119}; 3120 3121#ifdef MODULE_LICENSE 3122MODULE_LICENSE("GPL"); 3123#endif 3124