if_ti.c revision 227086
1/*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* 34 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. 35 * Manuals, sample driver and firmware source kits are available 36 * from http://www.alteon.com/support/openkits. 37 * 38 * Written by Bill Paul <wpaul@ctr.columbia.edu> 39 * Electrical Engineering Department 40 * Columbia University, New York City 41 */ 42 43/* 44 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 45 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 46 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 47 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 48 * filtering and jumbo (9014 byte) frames. The hardware is largely 49 * controlled by firmware, which must be loaded into the NIC during 50 * initialization. 51 * 52 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 53 * revision, which supports new features such as extended commands, 54 * extended jumbo receive ring desciptors and a mini receive ring. 55 * 56 * Alteon Networks is to be commended for releasing such a vast amount 57 * of development material for the Tigon NIC without requiring an NDA 58 * (although they really should have done it a long time ago). With 59 * any luck, the other vendors will finally wise up and follow Alteon's 60 * stellar example. 61 * 62 * The firmware for the Tigon 1 and 2 NICs is compiled directly into 63 * this driver by #including it as a C header file. This bloats the 64 * driver somewhat, but it's the easiest method considering that the 65 * driver code and firmware code need to be kept in sync. The source 66 * for the firmware is not provided with the FreeBSD distribution since 67 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. 68 * 69 * The following people deserve special thanks: 70 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 71 * for testing 72 * - Raymond Lee of Netgear, for providing a pair of Netgear 73 * GA620 Tigon 2 boards for testing 74 * - Ulf Zimmermann, for bringing the GA260 to my attention and 75 * convincing me to write this driver. 76 * - Andrew Gallatin for providing FreeBSD/Alpha support. 77 */ 78 79#include <sys/cdefs.h> 80__FBSDID("$FreeBSD: head/sys/dev/ti/if_ti.c 227086 2011-11-04 17:07:53Z yongari $"); 81 82#include "opt_ti.h" 83 84#include <sys/param.h> 85#include <sys/systm.h> 86#include <sys/sockio.h> 87#include <sys/mbuf.h> 88#include <sys/malloc.h> 89#include <sys/kernel.h> 90#include <sys/module.h> 91#include <sys/socket.h> 92#include <sys/queue.h> 93#include <sys/conf.h> 94#include <sys/sf_buf.h> 95 96#include <net/if.h> 97#include <net/if_arp.h> 98#include <net/ethernet.h> 99#include <net/if_dl.h> 100#include <net/if_media.h> 101#include <net/if_types.h> 102#include <net/if_vlan_var.h> 103 104#include <net/bpf.h> 105 106#include <netinet/in_systm.h> 107#include <netinet/in.h> 108#include <netinet/ip.h> 109 110#include <machine/bus.h> 111#include <machine/resource.h> 112#include <sys/bus.h> 113#include <sys/rman.h> 114 115/* #define TI_PRIVATE_JUMBOS */ 116#ifndef TI_PRIVATE_JUMBOS 117#include <vm/vm.h> 118#include <vm/vm_page.h> 119#endif 120 121#include <dev/pci/pcireg.h> 122#include <dev/pci/pcivar.h> 123 124#include <sys/tiio.h> 125#include <dev/ti/if_tireg.h> 126#include <dev/ti/ti_fw.h> 127#include <dev/ti/ti_fw2.h> 128 129#define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) 130/* 131 * We can only turn on header splitting if we're using extended receive 132 * BDs. 133 */ 134#if defined(TI_JUMBO_HDRSPLIT) && defined(TI_PRIVATE_JUMBOS) 135#error "options TI_JUMBO_HDRSPLIT and TI_PRIVATE_JUMBOS are mutually exclusive" 136#endif /* TI_JUMBO_HDRSPLIT && TI_JUMBO_HDRSPLIT */ 137 138typedef enum { 139 TI_SWAP_HTON, 140 TI_SWAP_NTOH 141} ti_swap_type; 142 143 144/* 145 * Various supported device vendors/types and their names. 146 */ 147 148static const struct ti_type const ti_devs[] = { 149 { ALT_VENDORID, ALT_DEVICEID_ACENIC, 150 "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, 151 { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, 152 "Alteon AceNIC 1000baseT Gigabit Ethernet" }, 153 { TC_VENDORID, TC_DEVICEID_3C985, 154 "3Com 3c985-SX Gigabit Ethernet" }, 155 { NG_VENDORID, NG_DEVICEID_GA620, 156 "Netgear GA620 1000baseSX Gigabit Ethernet" }, 157 { NG_VENDORID, NG_DEVICEID_GA620T, 158 "Netgear GA620 1000baseT Gigabit Ethernet" }, 159 { SGI_VENDORID, SGI_DEVICEID_TIGON, 160 "Silicon Graphics Gigabit Ethernet" }, 161 { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, 162 "Farallon PN9000SX Gigabit Ethernet" }, 163 { 0, 0, NULL } 164}; 165 166 167static d_open_t ti_open; 168static d_close_t ti_close; 169static d_ioctl_t ti_ioctl2; 170 171static struct cdevsw ti_cdevsw = { 172 .d_version = D_VERSION, 173 .d_flags = 0, 174 .d_open = ti_open, 175 .d_close = ti_close, 176 .d_ioctl = ti_ioctl2, 177 .d_name = "ti", 178}; 179 180static int ti_probe(device_t); 181static int ti_attach(device_t); 182static int ti_detach(device_t); 183static void ti_txeof(struct ti_softc *); 184static void ti_rxeof(struct ti_softc *); 185 186static void ti_stats_update(struct ti_softc *); 187static int ti_encap(struct ti_softc *, struct mbuf **); 188 189static void ti_intr(void *); 190static void ti_start(struct ifnet *); 191static void ti_start_locked(struct ifnet *); 192static int ti_ioctl(struct ifnet *, u_long, caddr_t); 193static void ti_init(void *); 194static void ti_init_locked(void *); 195static void ti_init2(struct ti_softc *); 196static void ti_stop(struct ti_softc *); 197static void ti_watchdog(void *); 198static int ti_shutdown(device_t); 199static int ti_ifmedia_upd(struct ifnet *); 200static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 201 202static u_int32_t ti_eeprom_putbyte(struct ti_softc *, int); 203static u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *); 204static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); 205 206static void ti_add_mcast(struct ti_softc *, struct ether_addr *); 207static void ti_del_mcast(struct ti_softc *, struct ether_addr *); 208static void ti_setmulti(struct ti_softc *); 209 210static void ti_mem_read(struct ti_softc *, u_int32_t, u_int32_t, void *); 211static void ti_mem_write(struct ti_softc *, u_int32_t, u_int32_t, void *); 212static void ti_mem_zero(struct ti_softc *, u_int32_t, u_int32_t); 213static int ti_copy_mem(struct ti_softc *, u_int32_t, u_int32_t, caddr_t, int, int); 214static int ti_copy_scratch(struct ti_softc *, u_int32_t, u_int32_t, caddr_t, 215 int, int, int); 216static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type); 217static void ti_loadfw(struct ti_softc *); 218static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 219static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int); 220static void ti_handle_events(struct ti_softc *); 221static int ti_alloc_dmamaps(struct ti_softc *); 222static void ti_free_dmamaps(struct ti_softc *); 223static int ti_alloc_jumbo_mem(struct ti_softc *); 224#ifdef TI_PRIVATE_JUMBOS 225static void *ti_jalloc(struct ti_softc *); 226static void ti_jfree(void *, void *); 227#endif /* TI_PRIVATE_JUMBOS */ 228static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *); 229static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *); 230static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); 231static int ti_init_rx_ring_std(struct ti_softc *); 232static void ti_free_rx_ring_std(struct ti_softc *); 233static int ti_init_rx_ring_jumbo(struct ti_softc *); 234static void ti_free_rx_ring_jumbo(struct ti_softc *); 235static int ti_init_rx_ring_mini(struct ti_softc *); 236static void ti_free_rx_ring_mini(struct ti_softc *); 237static void ti_free_tx_ring(struct ti_softc *); 238static int ti_init_tx_ring(struct ti_softc *); 239 240static int ti_64bitslot_war(struct ti_softc *); 241static int ti_chipinit(struct ti_softc *); 242static int ti_gibinit(struct ti_softc *); 243 244#ifdef TI_JUMBO_HDRSPLIT 245static __inline void ti_hdr_split (struct mbuf *top, int hdr_len, 246 int pkt_len, int idx); 247#endif /* TI_JUMBO_HDRSPLIT */ 248 249static device_method_t ti_methods[] = { 250 /* Device interface */ 251 DEVMETHOD(device_probe, ti_probe), 252 DEVMETHOD(device_attach, ti_attach), 253 DEVMETHOD(device_detach, ti_detach), 254 DEVMETHOD(device_shutdown, ti_shutdown), 255 { 0, 0 } 256}; 257 258static driver_t ti_driver = { 259 "ti", 260 ti_methods, 261 sizeof(struct ti_softc) 262}; 263 264static devclass_t ti_devclass; 265 266DRIVER_MODULE(ti, pci, ti_driver, ti_devclass, 0, 0); 267MODULE_DEPEND(ti, pci, 1, 1, 1); 268MODULE_DEPEND(ti, ether, 1, 1, 1); 269 270/* 271 * Send an instruction or address to the EEPROM, check for ACK. 272 */ 273static u_int32_t ti_eeprom_putbyte(struct ti_softc *sc, int byte) 274{ 275 int i, ack = 0; 276 277 /* 278 * Make sure we're in TX mode. 279 */ 280 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 281 282 /* 283 * Feed in each bit and stobe the clock. 284 */ 285 for (i = 0x80; i; i >>= 1) { 286 if (byte & i) { 287 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 288 } else { 289 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 290 } 291 DELAY(1); 292 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 293 DELAY(1); 294 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 295 } 296 297 /* 298 * Turn off TX mode. 299 */ 300 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 301 302 /* 303 * Check for ack. 304 */ 305 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 306 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 307 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 308 309 return (ack); 310} 311 312/* 313 * Read a byte of data stored in the EEPROM at address 'addr.' 314 * We have to send two address bytes since the EEPROM can hold 315 * more than 256 bytes of data. 316 */ 317static u_int8_t ti_eeprom_getbyte(struct ti_softc *sc, int addr, u_int8_t *dest) 318{ 319 int i; 320 u_int8_t byte = 0; 321 322 EEPROM_START; 323 324 /* 325 * Send write control code to EEPROM. 326 */ 327 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 328 device_printf(sc->ti_dev, 329 "failed to send write command, status: %x\n", 330 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 331 return (1); 332 } 333 334 /* 335 * Send first byte of address of byte we want to read. 336 */ 337 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 338 device_printf(sc->ti_dev, "failed to send address, status: %x\n", 339 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 340 return (1); 341 } 342 /* 343 * Send second byte address of byte we want to read. 344 */ 345 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 346 device_printf(sc->ti_dev, "failed to send address, status: %x\n", 347 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 348 return (1); 349 } 350 351 EEPROM_STOP; 352 EEPROM_START; 353 /* 354 * Send read control code to EEPROM. 355 */ 356 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 357 device_printf(sc->ti_dev, 358 "failed to send read command, status: %x\n", 359 CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 360 return (1); 361 } 362 363 /* 364 * Start reading bits from EEPROM. 365 */ 366 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 367 for (i = 0x80; i; i >>= 1) { 368 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 369 DELAY(1); 370 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 371 byte |= i; 372 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 373 DELAY(1); 374 } 375 376 EEPROM_STOP; 377 378 /* 379 * No ACK generated for read, so just return byte. 380 */ 381 382 *dest = byte; 383 384 return (0); 385} 386 387/* 388 * Read a sequence of bytes from the EEPROM. 389 */ 390static int 391ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt) 392{ 393 int err = 0, i; 394 u_int8_t byte = 0; 395 396 for (i = 0; i < cnt; i++) { 397 err = ti_eeprom_getbyte(sc, off + i, &byte); 398 if (err) 399 break; 400 *(dest + i) = byte; 401 } 402 403 return (err ? 1 : 0); 404} 405 406/* 407 * NIC memory read function. 408 * Can be used to copy data from NIC local memory. 409 */ 410static void 411ti_mem_read(struct ti_softc *sc, u_int32_t addr, u_int32_t len, void *buf) 412{ 413 int segptr, segsize, cnt; 414 char *ptr; 415 416 segptr = addr; 417 cnt = len; 418 ptr = buf; 419 420 while (cnt) { 421 if (cnt < TI_WINLEN) 422 segsize = cnt; 423 else 424 segsize = TI_WINLEN - (segptr % TI_WINLEN); 425 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 426 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 427 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 428 segsize / 4); 429 ptr += segsize; 430 segptr += segsize; 431 cnt -= segsize; 432 } 433} 434 435 436/* 437 * NIC memory write function. 438 * Can be used to copy data into NIC local memory. 439 */ 440static void 441ti_mem_write(struct ti_softc *sc, u_int32_t addr, u_int32_t len, void *buf) 442{ 443 int segptr, segsize, cnt; 444 char *ptr; 445 446 segptr = addr; 447 cnt = len; 448 ptr = buf; 449 450 while (cnt) { 451 if (cnt < TI_WINLEN) 452 segsize = cnt; 453 else 454 segsize = TI_WINLEN - (segptr % TI_WINLEN); 455 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 456 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 457 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 458 segsize / 4); 459 ptr += segsize; 460 segptr += segsize; 461 cnt -= segsize; 462 } 463} 464 465/* 466 * NIC memory read function. 467 * Can be used to clear a section of NIC local memory. 468 */ 469static void 470ti_mem_zero(struct ti_softc *sc, u_int32_t addr, u_int32_t len) 471{ 472 int segptr, segsize, cnt; 473 474 segptr = addr; 475 cnt = len; 476 477 while (cnt) { 478 if (cnt < TI_WINLEN) 479 segsize = cnt; 480 else 481 segsize = TI_WINLEN - (segptr % TI_WINLEN); 482 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 483 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle, 484 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4); 485 segptr += segsize; 486 cnt -= segsize; 487 } 488} 489 490static int 491ti_copy_mem(struct ti_softc *sc, u_int32_t tigon_addr, u_int32_t len, 492 caddr_t buf, int useraddr, int readdata) 493{ 494 int segptr, segsize, cnt; 495 caddr_t ptr; 496 u_int32_t origwin; 497 u_int8_t tmparray[TI_WINLEN], tmparray2[TI_WINLEN]; 498 int resid, segresid; 499 int first_pass; 500 501 TI_LOCK_ASSERT(sc); 502 503 /* 504 * At the moment, we don't handle non-aligned cases, we just bail. 505 * If this proves to be a problem, it will be fixed. 506 */ 507 if ((readdata == 0) 508 && (tigon_addr & 0x3)) { 509 device_printf(sc->ti_dev, "%s: tigon address %#x isn't " 510 "word-aligned\n", __func__, tigon_addr); 511 device_printf(sc->ti_dev, "%s: unaligned writes aren't " 512 "yet supported\n", __func__); 513 return (EINVAL); 514 } 515 516 segptr = tigon_addr & ~0x3; 517 segresid = tigon_addr - segptr; 518 519 /* 520 * This is the non-aligned amount left over that we'll need to 521 * copy. 522 */ 523 resid = len & 0x3; 524 525 /* Add in the left over amount at the front of the buffer */ 526 resid += segresid; 527 528 cnt = len & ~0x3; 529 /* 530 * If resid + segresid is >= 4, add multiples of 4 to the count and 531 * decrease the residual by that much. 532 */ 533 cnt += resid & ~0x3; 534 resid -= resid & ~0x3; 535 536 ptr = buf; 537 538 first_pass = 1; 539 540 /* 541 * Save the old window base value. 542 */ 543 origwin = CSR_READ_4(sc, TI_WINBASE); 544 545 while (cnt) { 546 bus_size_t ti_offset; 547 548 if (cnt < TI_WINLEN) 549 segsize = cnt; 550 else 551 segsize = TI_WINLEN - (segptr % TI_WINLEN); 552 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 553 554 ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1)); 555 556 if (readdata) { 557 558 bus_space_read_region_4(sc->ti_btag, 559 sc->ti_bhandle, ti_offset, 560 (u_int32_t *)tmparray, 561 segsize >> 2); 562 if (useraddr) { 563 /* 564 * Yeah, this is a little on the kludgy 565 * side, but at least this code is only 566 * used for debugging. 567 */ 568 ti_bcopy_swap(tmparray, tmparray2, segsize, 569 TI_SWAP_NTOH); 570 571 TI_UNLOCK(sc); 572 if (first_pass) { 573 copyout(&tmparray2[segresid], ptr, 574 segsize - segresid); 575 first_pass = 0; 576 } else 577 copyout(tmparray2, ptr, segsize); 578 TI_LOCK(sc); 579 } else { 580 if (first_pass) { 581 582 ti_bcopy_swap(tmparray, tmparray2, 583 segsize, TI_SWAP_NTOH); 584 TI_UNLOCK(sc); 585 bcopy(&tmparray2[segresid], ptr, 586 segsize - segresid); 587 TI_LOCK(sc); 588 first_pass = 0; 589 } else 590 ti_bcopy_swap(tmparray, ptr, segsize, 591 TI_SWAP_NTOH); 592 } 593 594 } else { 595 if (useraddr) { 596 TI_UNLOCK(sc); 597 copyin(ptr, tmparray2, segsize); 598 TI_LOCK(sc); 599 ti_bcopy_swap(tmparray2, tmparray, segsize, 600 TI_SWAP_HTON); 601 } else 602 ti_bcopy_swap(ptr, tmparray, segsize, 603 TI_SWAP_HTON); 604 605 bus_space_write_region_4(sc->ti_btag, 606 sc->ti_bhandle, ti_offset, 607 (u_int32_t *)tmparray, 608 segsize >> 2); 609 } 610 segptr += segsize; 611 ptr += segsize; 612 cnt -= segsize; 613 } 614 615 /* 616 * Handle leftover, non-word-aligned bytes. 617 */ 618 if (resid != 0) { 619 u_int32_t tmpval, tmpval2; 620 bus_size_t ti_offset; 621 622 /* 623 * Set the segment pointer. 624 */ 625 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 626 627 ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1)); 628 629 /* 630 * First, grab whatever is in our source/destination. 631 * We'll obviously need this for reads, but also for 632 * writes, since we'll be doing read/modify/write. 633 */ 634 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 635 ti_offset, &tmpval, 1); 636 637 /* 638 * Next, translate this from little-endian to big-endian 639 * (at least on i386 boxes). 640 */ 641 tmpval2 = ntohl(tmpval); 642 643 if (readdata) { 644 /* 645 * If we're reading, just copy the leftover number 646 * of bytes from the host byte order buffer to 647 * the user's buffer. 648 */ 649 if (useraddr) { 650 TI_UNLOCK(sc); 651 copyout(&tmpval2, ptr, resid); 652 TI_LOCK(sc); 653 } else 654 bcopy(&tmpval2, ptr, resid); 655 } else { 656 /* 657 * If we're writing, first copy the bytes to be 658 * written into the network byte order buffer, 659 * leaving the rest of the buffer with whatever was 660 * originally in there. Then, swap the bytes 661 * around into host order and write them out. 662 * 663 * XXX KDM the read side of this has been verified 664 * to work, but the write side of it has not been 665 * verified. So user beware. 666 */ 667 if (useraddr) { 668 TI_UNLOCK(sc); 669 copyin(ptr, &tmpval2, resid); 670 TI_LOCK(sc); 671 } else 672 bcopy(ptr, &tmpval2, resid); 673 674 tmpval = htonl(tmpval2); 675 676 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 677 ti_offset, &tmpval, 1); 678 } 679 } 680 681 CSR_WRITE_4(sc, TI_WINBASE, origwin); 682 683 return (0); 684} 685 686static int 687ti_copy_scratch(struct ti_softc *sc, u_int32_t tigon_addr, u_int32_t len, 688 caddr_t buf, int useraddr, int readdata, int cpu) 689{ 690 u_int32_t segptr; 691 int cnt; 692 u_int32_t tmpval, tmpval2; 693 caddr_t ptr; 694 695 TI_LOCK_ASSERT(sc); 696 697 /* 698 * At the moment, we don't handle non-aligned cases, we just bail. 699 * If this proves to be a problem, it will be fixed. 700 */ 701 if (tigon_addr & 0x3) { 702 device_printf(sc->ti_dev, "%s: tigon address %#x " 703 "isn't word-aligned\n", __func__, tigon_addr); 704 return (EINVAL); 705 } 706 707 if (len & 0x3) { 708 device_printf(sc->ti_dev, "%s: transfer length %d " 709 "isn't word-aligned\n", __func__, len); 710 return (EINVAL); 711 } 712 713 segptr = tigon_addr; 714 cnt = len; 715 ptr = buf; 716 717 while (cnt) { 718 CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr); 719 720 if (readdata) { 721 tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu)); 722 723 tmpval = ntohl(tmpval2); 724 725 /* 726 * Note: I've used this debugging interface 727 * extensively with Alteon's 12.3.15 firmware, 728 * compiled with GCC 2.7.2.1 and binutils 2.9.1. 729 * 730 * When you compile the firmware without 731 * optimization, which is necessary sometimes in 732 * order to properly step through it, you sometimes 733 * read out a bogus value of 0xc0017c instead of 734 * whatever was supposed to be in that scratchpad 735 * location. That value is on the stack somewhere, 736 * but I've never been able to figure out what was 737 * causing the problem. 738 * 739 * The address seems to pop up in random places, 740 * often not in the same place on two subsequent 741 * reads. 742 * 743 * In any case, the underlying data doesn't seem 744 * to be affected, just the value read out. 745 * 746 * KDM, 3/7/2000 747 */ 748 749 if (tmpval2 == 0xc0017c) 750 device_printf(sc->ti_dev, "found 0xc0017c at " 751 "%#x (tmpval2)\n", segptr); 752 753 if (tmpval == 0xc0017c) 754 device_printf(sc->ti_dev, "found 0xc0017c at " 755 "%#x (tmpval)\n", segptr); 756 757 if (useraddr) 758 copyout(&tmpval, ptr, 4); 759 else 760 bcopy(&tmpval, ptr, 4); 761 } else { 762 if (useraddr) 763 copyin(ptr, &tmpval2, 4); 764 else 765 bcopy(ptr, &tmpval2, 4); 766 767 tmpval = htonl(tmpval2); 768 769 CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu), tmpval); 770 } 771 772 cnt -= 4; 773 segptr += 4; 774 ptr += 4; 775 } 776 777 return (0); 778} 779 780static int 781ti_bcopy_swap(const void *src, void *dst, size_t len, ti_swap_type swap_type) 782{ 783 const u_int8_t *tmpsrc; 784 u_int8_t *tmpdst; 785 size_t tmplen; 786 787 if (len & 0x3) { 788 printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", 789 len); 790 return (-1); 791 } 792 793 tmpsrc = src; 794 tmpdst = dst; 795 tmplen = len; 796 797 while (tmplen) { 798 if (swap_type == TI_SWAP_NTOH) 799 *(u_int32_t *)tmpdst = 800 ntohl(*(const u_int32_t *)tmpsrc); 801 else 802 *(u_int32_t *)tmpdst = 803 htonl(*(const u_int32_t *)tmpsrc); 804 805 tmpsrc += 4; 806 tmpdst += 4; 807 tmplen -= 4; 808 } 809 810 return (0); 811} 812 813/* 814 * Load firmware image into the NIC. Check that the firmware revision 815 * is acceptable and see if we want the firmware for the Tigon 1 or 816 * Tigon 2. 817 */ 818static void 819ti_loadfw(struct ti_softc *sc) 820{ 821 822 TI_LOCK_ASSERT(sc); 823 824 switch (sc->ti_hwrev) { 825 case TI_HWREV_TIGON: 826 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || 827 tigonFwReleaseMinor != TI_FIRMWARE_MINOR || 828 tigonFwReleaseFix != TI_FIRMWARE_FIX) { 829 device_printf(sc->ti_dev, "firmware revision mismatch; " 830 "want %d.%d.%d, got %d.%d.%d\n", 831 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 832 TI_FIRMWARE_FIX, tigonFwReleaseMajor, 833 tigonFwReleaseMinor, tigonFwReleaseFix); 834 return; 835 } 836 ti_mem_write(sc, tigonFwTextAddr, tigonFwTextLen, tigonFwText); 837 ti_mem_write(sc, tigonFwDataAddr, tigonFwDataLen, tigonFwData); 838 ti_mem_write(sc, tigonFwRodataAddr, tigonFwRodataLen, 839 tigonFwRodata); 840 ti_mem_zero(sc, tigonFwBssAddr, tigonFwBssLen); 841 ti_mem_zero(sc, tigonFwSbssAddr, tigonFwSbssLen); 842 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); 843 break; 844 case TI_HWREV_TIGON_II: 845 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || 846 tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || 847 tigon2FwReleaseFix != TI_FIRMWARE_FIX) { 848 device_printf(sc->ti_dev, "firmware revision mismatch; " 849 "want %d.%d.%d, got %d.%d.%d\n", 850 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 851 TI_FIRMWARE_FIX, tigon2FwReleaseMajor, 852 tigon2FwReleaseMinor, tigon2FwReleaseFix); 853 return; 854 } 855 ti_mem_write(sc, tigon2FwTextAddr, tigon2FwTextLen, 856 tigon2FwText); 857 ti_mem_write(sc, tigon2FwDataAddr, tigon2FwDataLen, 858 tigon2FwData); 859 ti_mem_write(sc, tigon2FwRodataAddr, tigon2FwRodataLen, 860 tigon2FwRodata); 861 ti_mem_zero(sc, tigon2FwBssAddr, tigon2FwBssLen); 862 ti_mem_zero(sc, tigon2FwSbssAddr, tigon2FwSbssLen); 863 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); 864 break; 865 default: 866 device_printf(sc->ti_dev, 867 "can't load firmware: unknown hardware rev\n"); 868 break; 869 } 870} 871 872/* 873 * Send the NIC a command via the command ring. 874 */ 875static void 876ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) 877{ 878 int index; 879 880 index = sc->ti_cmd_saved_prodidx; 881 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 882 TI_INC(index, TI_CMD_RING_CNT); 883 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 884 sc->ti_cmd_saved_prodidx = index; 885} 886 887/* 888 * Send the NIC an extended command. The 'len' parameter specifies the 889 * number of command slots to include after the initial command. 890 */ 891static void 892ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, int len) 893{ 894 int index; 895 int i; 896 897 index = sc->ti_cmd_saved_prodidx; 898 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 899 TI_INC(index, TI_CMD_RING_CNT); 900 for (i = 0; i < len; i++) { 901 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 902 *(u_int32_t *)(&arg[i * 4])); 903 TI_INC(index, TI_CMD_RING_CNT); 904 } 905 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 906 sc->ti_cmd_saved_prodidx = index; 907} 908 909/* 910 * Handle events that have triggered interrupts. 911 */ 912static void 913ti_handle_events(struct ti_softc *sc) 914{ 915 struct ti_event_desc *e; 916 917 if (sc->ti_rdata->ti_event_ring == NULL) 918 return; 919 920 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 921 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 922 switch (TI_EVENT_EVENT(e)) { 923 case TI_EV_LINKSTAT_CHANGED: 924 sc->ti_linkstat = TI_EVENT_CODE(e); 925 if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) 926 device_printf(sc->ti_dev, "10/100 link up\n"); 927 else if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) 928 device_printf(sc->ti_dev, "gigabit link up\n"); 929 else if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) 930 device_printf(sc->ti_dev, "link down\n"); 931 break; 932 case TI_EV_ERROR: 933 if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD) 934 device_printf(sc->ti_dev, "invalid command\n"); 935 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD) 936 device_printf(sc->ti_dev, "unknown command\n"); 937 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG) 938 device_printf(sc->ti_dev, "bad config data\n"); 939 break; 940 case TI_EV_FIRMWARE_UP: 941 ti_init2(sc); 942 break; 943 case TI_EV_STATS_UPDATED: 944 ti_stats_update(sc); 945 break; 946 case TI_EV_RESET_JUMBO_RING: 947 case TI_EV_MCAST_UPDATED: 948 /* Who cares. */ 949 break; 950 default: 951 device_printf(sc->ti_dev, "unknown event: %d\n", 952 TI_EVENT_EVENT(e)); 953 break; 954 } 955 /* Advance the consumer index. */ 956 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 957 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 958 } 959} 960 961static int 962ti_alloc_dmamaps(struct ti_softc *sc) 963{ 964 int i; 965 966 for (i = 0; i < TI_TX_RING_CNT; i++) { 967 sc->ti_cdata.ti_txdesc[i].tx_m = NULL; 968 sc->ti_cdata.ti_txdesc[i].tx_dmamap = 0; 969 if (bus_dmamap_create(sc->ti_mbuftx_dmat, 0, 970 &sc->ti_cdata.ti_txdesc[i].tx_dmamap)) 971 return (ENOBUFS); 972 } 973 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 974 if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0, 975 &sc->ti_cdata.ti_rx_std_maps[i])) 976 return (ENOBUFS); 977 } 978 979 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 980 if (bus_dmamap_create(sc->ti_jumbo_dmat, 0, 981 &sc->ti_cdata.ti_rx_jumbo_maps[i])) 982 return (ENOBUFS); 983 } 984 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 985 if (bus_dmamap_create(sc->ti_mbufrx_dmat, 0, 986 &sc->ti_cdata.ti_rx_mini_maps[i])) 987 return (ENOBUFS); 988 } 989 990 return (0); 991} 992 993static void 994ti_free_dmamaps(struct ti_softc *sc) 995{ 996 int i; 997 998 if (sc->ti_mbuftx_dmat) 999 for (i = 0; i < TI_TX_RING_CNT; i++) 1000 if (sc->ti_cdata.ti_txdesc[i].tx_dmamap) { 1001 bus_dmamap_destroy(sc->ti_mbuftx_dmat, 1002 sc->ti_cdata.ti_txdesc[i].tx_dmamap); 1003 sc->ti_cdata.ti_txdesc[i].tx_dmamap = 0; 1004 } 1005 1006 if (sc->ti_mbufrx_dmat) 1007 for (i = 0; i < TI_STD_RX_RING_CNT; i++) 1008 if (sc->ti_cdata.ti_rx_std_maps[i]) { 1009 bus_dmamap_destroy(sc->ti_mbufrx_dmat, 1010 sc->ti_cdata.ti_rx_std_maps[i]); 1011 sc->ti_cdata.ti_rx_std_maps[i] = 0; 1012 } 1013 1014 if (sc->ti_jumbo_dmat) 1015 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) 1016 if (sc->ti_cdata.ti_rx_jumbo_maps[i]) { 1017 bus_dmamap_destroy(sc->ti_jumbo_dmat, 1018 sc->ti_cdata.ti_rx_jumbo_maps[i]); 1019 sc->ti_cdata.ti_rx_jumbo_maps[i] = 0; 1020 } 1021 if (sc->ti_mbufrx_dmat) 1022 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) 1023 if (sc->ti_cdata.ti_rx_mini_maps[i]) { 1024 bus_dmamap_destroy(sc->ti_mbufrx_dmat, 1025 sc->ti_cdata.ti_rx_mini_maps[i]); 1026 sc->ti_cdata.ti_rx_mini_maps[i] = 0; 1027 } 1028} 1029 1030#ifdef TI_PRIVATE_JUMBOS 1031 1032/* 1033 * Memory management for the jumbo receive ring is a pain in the 1034 * butt. We need to allocate at least 9018 bytes of space per frame, 1035 * _and_ it has to be contiguous (unless you use the extended 1036 * jumbo descriptor format). Using malloc() all the time won't 1037 * work: malloc() allocates memory in powers of two, which means we 1038 * would end up wasting a considerable amount of space by allocating 1039 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 1040 * to do our own memory management. 1041 * 1042 * The driver needs to allocate a contiguous chunk of memory at boot 1043 * time. We then chop this up ourselves into 9K pieces and use them 1044 * as external mbuf storage. 1045 * 1046 * One issue here is how much memory to allocate. The jumbo ring has 1047 * 256 slots in it, but at 9K per slot than can consume over 2MB of 1048 * RAM. This is a bit much, especially considering we also need 1049 * RAM for the standard ring and mini ring (on the Tigon 2). To 1050 * save space, we only actually allocate enough memory for 64 slots 1051 * by default, which works out to between 500 and 600K. This can 1052 * be tuned by changing a #define in if_tireg.h. 1053 */ 1054 1055static int 1056ti_alloc_jumbo_mem(struct ti_softc *sc) 1057{ 1058 caddr_t ptr; 1059 int i; 1060 struct ti_jpool_entry *entry; 1061 1062 /* 1063 * Grab a big chunk o' storage. Since we are chopping this pool up 1064 * into ~9k chunks, there doesn't appear to be a need to use page 1065 * alignment. 1066 */ 1067 if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ 1068 1, 0, /* algnmnt, boundary */ 1069 BUS_SPACE_MAXADDR, /* lowaddr */ 1070 BUS_SPACE_MAXADDR, /* highaddr */ 1071 NULL, NULL, /* filter, filterarg */ 1072 TI_JMEM, /* maxsize */ 1073 1, /* nsegments */ 1074 TI_JMEM, /* maxsegsize */ 1075 0, /* flags */ 1076 NULL, NULL, /* lockfunc, lockarg */ 1077 &sc->ti_jumbo_dmat) != 0) { 1078 device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n"); 1079 return (ENOBUFS); 1080 } 1081 1082 if (bus_dmamem_alloc(sc->ti_jumbo_dmat, 1083 (void**)&sc->ti_cdata.ti_jumbo_buf, 1084 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 1085 &sc->ti_jumbo_dmamap) != 0) { 1086 device_printf(sc->ti_dev, "Failed to allocate jumbo memory\n"); 1087 return (ENOBUFS); 1088 } 1089 1090 SLIST_INIT(&sc->ti_jfree_listhead); 1091 SLIST_INIT(&sc->ti_jinuse_listhead); 1092 1093 /* 1094 * Now divide it up into 9K pieces and save the addresses 1095 * in an array. 1096 */ 1097 ptr = sc->ti_cdata.ti_jumbo_buf; 1098 for (i = 0; i < TI_JSLOTS; i++) { 1099 sc->ti_cdata.ti_jslots[i] = ptr; 1100 ptr += TI_JLEN; 1101 entry = malloc(sizeof(struct ti_jpool_entry), 1102 M_DEVBUF, M_NOWAIT); 1103 if (entry == NULL) { 1104 device_printf(sc->ti_dev, "no memory for jumbo " 1105 "buffer queue!\n"); 1106 return (ENOBUFS); 1107 } 1108 entry->slot = i; 1109 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 1110 } 1111 1112 return (0); 1113} 1114 1115/* 1116 * Allocate a jumbo buffer. 1117 */ 1118static void *ti_jalloc(struct ti_softc *sc) 1119{ 1120 struct ti_jpool_entry *entry; 1121 1122 entry = SLIST_FIRST(&sc->ti_jfree_listhead); 1123 1124 if (entry == NULL) { 1125 device_printf(sc->ti_dev, "no free jumbo buffers\n"); 1126 return (NULL); 1127 } 1128 1129 SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); 1130 SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); 1131 return (sc->ti_cdata.ti_jslots[entry->slot]); 1132} 1133 1134/* 1135 * Release a jumbo buffer. 1136 */ 1137static void 1138ti_jfree(void *buf, void *args) 1139{ 1140 struct ti_softc *sc; 1141 int i; 1142 struct ti_jpool_entry *entry; 1143 1144 /* Extract the softc struct pointer. */ 1145 sc = (struct ti_softc *)args; 1146 1147 if (sc == NULL) 1148 panic("ti_jfree: didn't get softc pointer!"); 1149 1150 /* calculate the slot this buffer belongs to */ 1151 i = ((vm_offset_t)buf 1152 - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; 1153 1154 if ((i < 0) || (i >= TI_JSLOTS)) 1155 panic("ti_jfree: asked to free buffer that we don't manage!"); 1156 1157 entry = SLIST_FIRST(&sc->ti_jinuse_listhead); 1158 if (entry == NULL) 1159 panic("ti_jfree: buffer not in use!"); 1160 entry->slot = i; 1161 SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); 1162 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 1163} 1164 1165#else 1166 1167static int 1168ti_alloc_jumbo_mem(struct ti_softc *sc) 1169{ 1170 1171 /* 1172 * The VM system will take care of providing aligned pages. Alignment 1173 * is set to 1 here so that busdma resources won't be wasted. 1174 */ 1175 if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ 1176 1, 0, /* algnmnt, boundary */ 1177 BUS_SPACE_MAXADDR, /* lowaddr */ 1178 BUS_SPACE_MAXADDR, /* highaddr */ 1179 NULL, NULL, /* filter, filterarg */ 1180 PAGE_SIZE * 4 /*XXX*/, /* maxsize */ 1181 4, /* nsegments */ 1182 PAGE_SIZE, /* maxsegsize */ 1183 0, /* flags */ 1184 NULL, NULL, /* lockfunc, lockarg */ 1185 &sc->ti_jumbo_dmat) != 0) { 1186 device_printf(sc->ti_dev, "Failed to allocate jumbo dmat\n"); 1187 return (ENOBUFS); 1188 } 1189 1190 return (0); 1191} 1192 1193#endif /* TI_PRIVATE_JUMBOS */ 1194 1195/* 1196 * Intialize a standard receive ring descriptor. 1197 */ 1198static int 1199ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m) 1200{ 1201 bus_dmamap_t map; 1202 bus_dma_segment_t segs; 1203 struct mbuf *m_new = NULL; 1204 struct ti_rx_desc *r; 1205 int nsegs; 1206 1207 nsegs = 0; 1208 if (m == NULL) { 1209 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1210 if (m_new == NULL) 1211 return (ENOBUFS); 1212 1213 MCLGET(m_new, M_DONTWAIT); 1214 if (!(m_new->m_flags & M_EXT)) { 1215 m_freem(m_new); 1216 return (ENOBUFS); 1217 } 1218 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1219 } else { 1220 m_new = m; 1221 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1222 m_new->m_data = m_new->m_ext.ext_buf; 1223 } 1224 1225 m_adj(m_new, ETHER_ALIGN); 1226 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 1227 r = &sc->ti_rdata->ti_rx_std_ring[i]; 1228 map = sc->ti_cdata.ti_rx_std_maps[i]; 1229 if (bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat, map, m_new, &segs, 1230 &nsegs, 0)) 1231 return (ENOBUFS); 1232 if (nsegs != 1) 1233 return (ENOBUFS); 1234 ti_hostaddr64(&r->ti_addr, segs.ds_addr); 1235 r->ti_len = segs.ds_len; 1236 r->ti_type = TI_BDTYPE_RECV_BD; 1237 r->ti_flags = 0; 1238 if (sc->ti_ifp->if_hwassist) 1239 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 1240 r->ti_idx = i; 1241 1242 bus_dmamap_sync(sc->ti_mbufrx_dmat, map, BUS_DMASYNC_PREREAD); 1243 return (0); 1244} 1245 1246/* 1247 * Intialize a mini receive ring descriptor. This only applies to 1248 * the Tigon 2. 1249 */ 1250static int 1251ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m) 1252{ 1253 bus_dma_segment_t segs; 1254 bus_dmamap_t map; 1255 struct mbuf *m_new = NULL; 1256 struct ti_rx_desc *r; 1257 int nsegs; 1258 1259 nsegs = 0; 1260 if (m == NULL) { 1261 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1262 if (m_new == NULL) { 1263 return (ENOBUFS); 1264 } 1265 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 1266 } else { 1267 m_new = m; 1268 m_new->m_data = m_new->m_pktdat; 1269 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 1270 } 1271 1272 m_adj(m_new, ETHER_ALIGN); 1273 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 1274 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 1275 map = sc->ti_cdata.ti_rx_mini_maps[i]; 1276 if (bus_dmamap_load_mbuf_sg(sc->ti_mbufrx_dmat, map, m_new, &segs, 1277 &nsegs, 0)) 1278 return (ENOBUFS); 1279 if (nsegs != 1) 1280 return (ENOBUFS); 1281 ti_hostaddr64(&r->ti_addr, segs.ds_addr); 1282 r->ti_len = segs.ds_len; 1283 r->ti_type = TI_BDTYPE_RECV_BD; 1284 r->ti_flags = TI_BDFLAG_MINI_RING; 1285 if (sc->ti_ifp->if_hwassist) 1286 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 1287 r->ti_idx = i; 1288 1289 bus_dmamap_sync(sc->ti_mbufrx_dmat, map, BUS_DMASYNC_PREREAD); 1290 return (0); 1291} 1292 1293#ifdef TI_PRIVATE_JUMBOS 1294 1295/* 1296 * Initialize a jumbo receive ring descriptor. This allocates 1297 * a jumbo buffer from the pool managed internally by the driver. 1298 */ 1299static int 1300ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m) 1301{ 1302 bus_dmamap_t map; 1303 struct mbuf *m_new = NULL; 1304 struct ti_rx_desc *r; 1305 int nsegs; 1306 bus_dma_segment_t segs; 1307 1308 if (m == NULL) { 1309 caddr_t *buf = NULL; 1310 1311 /* Allocate the mbuf. */ 1312 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1313 if (m_new == NULL) { 1314 return (ENOBUFS); 1315 } 1316 1317 /* Allocate the jumbo buffer */ 1318 buf = ti_jalloc(sc); 1319 if (buf == NULL) { 1320 m_freem(m_new); 1321 device_printf(sc->ti_dev, "jumbo allocation failed " 1322 "-- packet dropped!\n"); 1323 return (ENOBUFS); 1324 } 1325 1326 /* Attach the buffer to the mbuf. */ 1327 m_new->m_data = (void *) buf; 1328 m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; 1329 MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, 1330 (struct ti_softc *)sc, 0, EXT_NET_DRV); 1331 } else { 1332 m_new = m; 1333 m_new->m_data = m_new->m_ext.ext_buf; 1334 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 1335 } 1336 1337 m_adj(m_new, ETHER_ALIGN); 1338 /* Set up the descriptor. */ 1339 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 1340 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 1341 map = sc->ti_cdata.ti_rx_jumbo_maps[i]; 1342 if (bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat, map, m_new, &segs, 1343 &nsegs, 0)) 1344 return (ENOBUFS); 1345 if (nsegs != 1) 1346 return (ENOBUFS); 1347 ti_hostaddr64(&r->ti_addr, segs.ds_addr); 1348 r->ti_len = segs.ds_len; 1349 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 1350 r->ti_flags = TI_BDFLAG_JUMBO_RING; 1351 if (sc->ti_ifp->if_hwassist) 1352 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; 1353 r->ti_idx = i; 1354 1355 bus_dmamap_sync(sc->ti_jumbo_dmat, map, BUS_DMASYNC_PREREAD); 1356 return (0); 1357} 1358 1359#else 1360 1361#if (PAGE_SIZE == 4096) 1362#define NPAYLOAD 2 1363#else 1364#define NPAYLOAD 1 1365#endif 1366 1367#define TCP_HDR_LEN (52 + sizeof(struct ether_header)) 1368#define UDP_HDR_LEN (28 + sizeof(struct ether_header)) 1369#define NFS_HDR_LEN (UDP_HDR_LEN) 1370static int HDR_LEN = TCP_HDR_LEN; 1371 1372 1373/* 1374 * Initialize a jumbo receive ring descriptor. This allocates 1375 * a jumbo buffer from the pool managed internally by the driver. 1376 */ 1377static int 1378ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old) 1379{ 1380 bus_dmamap_t map; 1381 struct mbuf *cur, *m_new = NULL; 1382 struct mbuf *m[3] = {NULL, NULL, NULL}; 1383 struct ti_rx_desc_ext *r; 1384 vm_page_t frame; 1385 static int color; 1386 /* 1 extra buf to make nobufs easy*/ 1387 struct sf_buf *sf[3] = {NULL, NULL, NULL}; 1388 int i; 1389 bus_dma_segment_t segs[4]; 1390 int nsegs; 1391 1392 if (m_old != NULL) { 1393 m_new = m_old; 1394 cur = m_old->m_next; 1395 for (i = 0; i <= NPAYLOAD; i++){ 1396 m[i] = cur; 1397 cur = cur->m_next; 1398 } 1399 } else { 1400 /* Allocate the mbufs. */ 1401 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1402 if (m_new == NULL) { 1403 device_printf(sc->ti_dev, "mbuf allocation failed " 1404 "-- packet dropped!\n"); 1405 goto nobufs; 1406 } 1407 MGET(m[NPAYLOAD], M_DONTWAIT, MT_DATA); 1408 if (m[NPAYLOAD] == NULL) { 1409 device_printf(sc->ti_dev, "cluster mbuf allocation " 1410 "failed -- packet dropped!\n"); 1411 goto nobufs; 1412 } 1413 MCLGET(m[NPAYLOAD], M_DONTWAIT); 1414 if ((m[NPAYLOAD]->m_flags & M_EXT) == 0) { 1415 device_printf(sc->ti_dev, "mbuf allocation failed " 1416 "-- packet dropped!\n"); 1417 goto nobufs; 1418 } 1419 m[NPAYLOAD]->m_len = MCLBYTES; 1420 1421 for (i = 0; i < NPAYLOAD; i++){ 1422 MGET(m[i], M_DONTWAIT, MT_DATA); 1423 if (m[i] == NULL) { 1424 device_printf(sc->ti_dev, "mbuf allocation " 1425 "failed -- packet dropped!\n"); 1426 goto nobufs; 1427 } 1428 frame = vm_page_alloc(NULL, color++, 1429 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 1430 VM_ALLOC_WIRED); 1431 if (frame == NULL) { 1432 device_printf(sc->ti_dev, "buffer allocation " 1433 "failed -- packet dropped!\n"); 1434 printf(" index %d page %d\n", idx, i); 1435 goto nobufs; 1436 } 1437 sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); 1438 if (sf[i] == NULL) { 1439 vm_page_unwire(frame, 0); 1440 vm_page_free(frame); 1441 device_printf(sc->ti_dev, "buffer allocation " 1442 "failed -- packet dropped!\n"); 1443 printf(" index %d page %d\n", idx, i); 1444 goto nobufs; 1445 } 1446 } 1447 for (i = 0; i < NPAYLOAD; i++){ 1448 /* Attach the buffer to the mbuf. */ 1449 m[i]->m_data = (void *)sf_buf_kva(sf[i]); 1450 m[i]->m_len = PAGE_SIZE; 1451 MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE, 1452 sf_buf_mext, (void*)sf_buf_kva(sf[i]), sf[i], 1453 0, EXT_DISPOSABLE); 1454 m[i]->m_next = m[i+1]; 1455 } 1456 /* link the buffers to the header */ 1457 m_new->m_next = m[0]; 1458 m_new->m_data += ETHER_ALIGN; 1459 if (sc->ti_hdrsplit) 1460 m_new->m_len = MHLEN - ETHER_ALIGN; 1461 else 1462 m_new->m_len = HDR_LEN; 1463 m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len; 1464 } 1465 1466 /* Set up the descriptor. */ 1467 r = &sc->ti_rdata->ti_rx_jumbo_ring[idx]; 1468 sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new; 1469 map = sc->ti_cdata.ti_rx_jumbo_maps[i]; 1470 if (bus_dmamap_load_mbuf_sg(sc->ti_jumbo_dmat, map, m_new, segs, 1471 &nsegs, 0)) 1472 return (ENOBUFS); 1473 if ((nsegs < 1) || (nsegs > 4)) 1474 return (ENOBUFS); 1475 ti_hostaddr64(&r->ti_addr0, segs[0].ds_addr); 1476 r->ti_len0 = m_new->m_len; 1477 1478 ti_hostaddr64(&r->ti_addr1, segs[1].ds_addr); 1479 r->ti_len1 = PAGE_SIZE; 1480 1481 ti_hostaddr64(&r->ti_addr2, segs[2].ds_addr); 1482 r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */ 1483 1484 if (PAGE_SIZE == 4096) { 1485 ti_hostaddr64(&r->ti_addr3, segs[3].ds_addr); 1486 r->ti_len3 = MCLBYTES; 1487 } else { 1488 r->ti_len3 = 0; 1489 } 1490 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 1491 1492 r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD; 1493 1494 if (sc->ti_ifp->if_hwassist) 1495 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM; 1496 1497 r->ti_idx = idx; 1498 1499 bus_dmamap_sync(sc->ti_jumbo_dmat, map, BUS_DMASYNC_PREREAD); 1500 return (0); 1501 1502nobufs: 1503 1504 /* 1505 * Warning! : 1506 * This can only be called before the mbufs are strung together. 1507 * If the mbufs are strung together, m_freem() will free the chain, 1508 * so that the later mbufs will be freed multiple times. 1509 */ 1510 if (m_new) 1511 m_freem(m_new); 1512 1513 for (i = 0; i < 3; i++) { 1514 if (m[i]) 1515 m_freem(m[i]); 1516 if (sf[i]) 1517 sf_buf_mext((void *)sf_buf_kva(sf[i]), sf[i]); 1518 } 1519 return (ENOBUFS); 1520} 1521#endif 1522 1523 1524 1525/* 1526 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 1527 * that's 1MB or memory, which is a lot. For now, we fill only the first 1528 * 256 ring entries and hope that our CPU is fast enough to keep up with 1529 * the NIC. 1530 */ 1531static int 1532ti_init_rx_ring_std(struct ti_softc *sc) 1533{ 1534 int i; 1535 struct ti_cmd_desc cmd; 1536 1537 for (i = 0; i < TI_SSLOTS; i++) { 1538 if (ti_newbuf_std(sc, i, NULL) == ENOBUFS) 1539 return (ENOBUFS); 1540 }; 1541 1542 TI_UPDATE_STDPROD(sc, i - 1); 1543 sc->ti_std = i - 1; 1544 1545 return (0); 1546} 1547 1548static void 1549ti_free_rx_ring_std(struct ti_softc *sc) 1550{ 1551 bus_dmamap_t map; 1552 int i; 1553 1554 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 1555 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 1556 map = sc->ti_cdata.ti_rx_std_maps[i]; 1557 bus_dmamap_sync(sc->ti_mbufrx_dmat, map, 1558 BUS_DMASYNC_POSTREAD); 1559 bus_dmamap_unload(sc->ti_mbufrx_dmat, map); 1560 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 1561 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 1562 } 1563 bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i], 1564 sizeof(struct ti_rx_desc)); 1565 } 1566} 1567 1568static int 1569ti_init_rx_ring_jumbo(struct ti_softc *sc) 1570{ 1571 int i; 1572 struct ti_cmd_desc cmd; 1573 1574 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 1575 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 1576 return (ENOBUFS); 1577 }; 1578 1579 TI_UPDATE_JUMBOPROD(sc, i - 1); 1580 sc->ti_jumbo = i - 1; 1581 1582 return (0); 1583} 1584 1585static void 1586ti_free_rx_ring_jumbo(struct ti_softc *sc) 1587{ 1588 bus_dmamap_t map; 1589 int i; 1590 1591 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 1592 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 1593 map = sc->ti_cdata.ti_rx_jumbo_maps[i]; 1594 bus_dmamap_sync(sc->ti_jumbo_dmat, map, 1595 BUS_DMASYNC_POSTREAD); 1596 bus_dmamap_unload(sc->ti_jumbo_dmat, map); 1597 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 1598 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 1599 } 1600 bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], 1601 sizeof(struct ti_rx_desc)); 1602 } 1603} 1604 1605static int 1606ti_init_rx_ring_mini(struct ti_softc *sc) 1607{ 1608 int i; 1609 1610 for (i = 0; i < TI_MSLOTS; i++) { 1611 if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS) 1612 return (ENOBUFS); 1613 }; 1614 1615 TI_UPDATE_MINIPROD(sc, i - 1); 1616 sc->ti_mini = i - 1; 1617 1618 return (0); 1619} 1620 1621static void 1622ti_free_rx_ring_mini(struct ti_softc *sc) 1623{ 1624 bus_dmamap_t map; 1625 int i; 1626 1627 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 1628 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 1629 map = sc->ti_cdata.ti_rx_mini_maps[i]; 1630 bus_dmamap_sync(sc->ti_mbufrx_dmat, map, 1631 BUS_DMASYNC_POSTREAD); 1632 bus_dmamap_unload(sc->ti_mbufrx_dmat, map); 1633 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 1634 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 1635 } 1636 bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i], 1637 sizeof(struct ti_rx_desc)); 1638 } 1639} 1640 1641static void 1642ti_free_tx_ring(struct ti_softc *sc) 1643{ 1644 struct ti_txdesc *txd; 1645 int i; 1646 1647 if (sc->ti_rdata->ti_tx_ring == NULL) 1648 return; 1649 1650 for (i = 0; i < TI_TX_RING_CNT; i++) { 1651 txd = &sc->ti_cdata.ti_txdesc[i]; 1652 if (txd->tx_m != NULL) { 1653 bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap, 1654 BUS_DMASYNC_POSTWRITE); 1655 bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap); 1656 m_freem(txd->tx_m); 1657 txd->tx_m = NULL; 1658 } 1659 bzero((char *)&sc->ti_rdata->ti_tx_ring[i], 1660 sizeof(struct ti_tx_desc)); 1661 } 1662} 1663 1664static int 1665ti_init_tx_ring(struct ti_softc *sc) 1666{ 1667 struct ti_txdesc *txd; 1668 int i; 1669 1670 STAILQ_INIT(&sc->ti_cdata.ti_txfreeq); 1671 STAILQ_INIT(&sc->ti_cdata.ti_txbusyq); 1672 for (i = 0; i < TI_TX_RING_CNT; i++) { 1673 txd = &sc->ti_cdata.ti_txdesc[i]; 1674 STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q); 1675 } 1676 sc->ti_txcnt = 0; 1677 sc->ti_tx_saved_considx = 0; 1678 sc->ti_tx_saved_prodidx = 0; 1679 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 1680 return (0); 1681} 1682 1683/* 1684 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 1685 * but we have to support the old way too so that Tigon 1 cards will 1686 * work. 1687 */ 1688static void 1689ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) 1690{ 1691 struct ti_cmd_desc cmd; 1692 u_int16_t *m; 1693 u_int32_t ext[2] = {0, 0}; 1694 1695 m = (u_int16_t *)&addr->octet[0]; 1696 1697 switch (sc->ti_hwrev) { 1698 case TI_HWREV_TIGON: 1699 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1700 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1701 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 1702 break; 1703 case TI_HWREV_TIGON_II: 1704 ext[0] = htons(m[0]); 1705 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1706 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 1707 break; 1708 default: 1709 device_printf(sc->ti_dev, "unknown hwrev\n"); 1710 break; 1711 } 1712} 1713 1714static void 1715ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) 1716{ 1717 struct ti_cmd_desc cmd; 1718 u_int16_t *m; 1719 u_int32_t ext[2] = {0, 0}; 1720 1721 m = (u_int16_t *)&addr->octet[0]; 1722 1723 switch (sc->ti_hwrev) { 1724 case TI_HWREV_TIGON: 1725 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1726 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1727 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 1728 break; 1729 case TI_HWREV_TIGON_II: 1730 ext[0] = htons(m[0]); 1731 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1732 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 1733 break; 1734 default: 1735 device_printf(sc->ti_dev, "unknown hwrev\n"); 1736 break; 1737 } 1738} 1739 1740/* 1741 * Configure the Tigon's multicast address filter. 1742 * 1743 * The actual multicast table management is a bit of a pain, thanks to 1744 * slight brain damage on the part of both Alteon and us. With our 1745 * multicast code, we are only alerted when the multicast address table 1746 * changes and at that point we only have the current list of addresses: 1747 * we only know the current state, not the previous state, so we don't 1748 * actually know what addresses were removed or added. The firmware has 1749 * state, but we can't get our grubby mits on it, and there is no 'delete 1750 * all multicast addresses' command. Hence, we have to maintain our own 1751 * state so we know what addresses have been programmed into the NIC at 1752 * any given time. 1753 */ 1754static void 1755ti_setmulti(struct ti_softc *sc) 1756{ 1757 struct ifnet *ifp; 1758 struct ifmultiaddr *ifma; 1759 struct ti_cmd_desc cmd; 1760 struct ti_mc_entry *mc; 1761 u_int32_t intrs; 1762 1763 TI_LOCK_ASSERT(sc); 1764 1765 ifp = sc->ti_ifp; 1766 1767 if (ifp->if_flags & IFF_ALLMULTI) { 1768 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); 1769 return; 1770 } else { 1771 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1772 } 1773 1774 /* Disable interrupts. */ 1775 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1776 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1777 1778 /* First, zot all the existing filters. */ 1779 while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) { 1780 mc = SLIST_FIRST(&sc->ti_mc_listhead); 1781 ti_del_mcast(sc, &mc->mc_addr); 1782 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1783 free(mc, M_DEVBUF); 1784 } 1785 1786 /* Now program new ones. */ 1787 if_maddr_rlock(ifp); 1788 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1789 if (ifma->ifma_addr->sa_family != AF_LINK) 1790 continue; 1791 mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); 1792 if (mc == NULL) { 1793 device_printf(sc->ti_dev, 1794 "no memory for mcast filter entry\n"); 1795 continue; 1796 } 1797 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1798 (char *)&mc->mc_addr, ETHER_ADDR_LEN); 1799 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1800 ti_add_mcast(sc, &mc->mc_addr); 1801 } 1802 if_maddr_runlock(ifp); 1803 1804 /* Re-enable interrupts. */ 1805 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1806} 1807 1808/* 1809 * Check to see if the BIOS has configured us for a 64 bit slot when 1810 * we aren't actually in one. If we detect this condition, we can work 1811 * around it on the Tigon 2 by setting a bit in the PCI state register, 1812 * but for the Tigon 1 we must give up and abort the interface attach. 1813 */ 1814static int ti_64bitslot_war(struct ti_softc *sc) 1815{ 1816 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { 1817 CSR_WRITE_4(sc, 0x600, 0); 1818 CSR_WRITE_4(sc, 0x604, 0); 1819 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1820 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1821 if (sc->ti_hwrev == TI_HWREV_TIGON) 1822 return (EINVAL); 1823 else { 1824 TI_SETBIT(sc, TI_PCI_STATE, 1825 TI_PCISTATE_32BIT_BUS); 1826 return (0); 1827 } 1828 } 1829 } 1830 1831 return (0); 1832} 1833 1834/* 1835 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1836 * self-test results. 1837 */ 1838static int 1839ti_chipinit(struct ti_softc *sc) 1840{ 1841 u_int32_t cacheline; 1842 u_int32_t pci_writemax = 0; 1843 u_int32_t hdrsplit; 1844 1845 /* Initialize link to down state. */ 1846 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1847 1848 if (sc->ti_ifp->if_capenable & IFCAP_HWCSUM) 1849 sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES; 1850 else 1851 sc->ti_ifp->if_hwassist = 0; 1852 1853 /* Set endianness before we access any non-PCI registers. */ 1854#if 0 && BYTE_ORDER == BIG_ENDIAN 1855 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1856 TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); 1857#else 1858 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1859 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1860#endif 1861 1862 /* Check the ROM failed bit to see if self-tests passed. */ 1863 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1864 device_printf(sc->ti_dev, "board self-diagnostics failed!\n"); 1865 return (ENODEV); 1866 } 1867 1868 /* Halt the CPU. */ 1869 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1870 1871 /* Figure out the hardware revision. */ 1872 switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { 1873 case TI_REV_TIGON_I: 1874 sc->ti_hwrev = TI_HWREV_TIGON; 1875 break; 1876 case TI_REV_TIGON_II: 1877 sc->ti_hwrev = TI_HWREV_TIGON_II; 1878 break; 1879 default: 1880 device_printf(sc->ti_dev, "unsupported chip revision\n"); 1881 return (ENODEV); 1882 } 1883 1884 /* Do special setup for Tigon 2. */ 1885 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1886 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1887 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); 1888 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1889 } 1890 1891 /* 1892 * We don't have firmware source for the Tigon 1, so Tigon 1 boards 1893 * can't do header splitting. 1894 */ 1895#ifdef TI_JUMBO_HDRSPLIT 1896 if (sc->ti_hwrev != TI_HWREV_TIGON) 1897 sc->ti_hdrsplit = 1; 1898 else 1899 device_printf(sc->ti_dev, 1900 "can't do header splitting on a Tigon I board\n"); 1901#endif /* TI_JUMBO_HDRSPLIT */ 1902 1903 /* Set up the PCI state register. */ 1904 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); 1905 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1906 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1907 } 1908 1909 /* Clear the read/write max DMA parameters. */ 1910 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1911 TI_PCISTATE_READ_MAXDMA)); 1912 1913 /* Get cache line size. */ 1914 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; 1915 1916 /* 1917 * If the system has set enabled the PCI memory write 1918 * and invalidate command in the command register, set 1919 * the write max parameter accordingly. This is necessary 1920 * to use MWI with the Tigon 2. 1921 */ 1922 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { 1923 switch (cacheline) { 1924 case 1: 1925 case 4: 1926 case 8: 1927 case 16: 1928 case 32: 1929 case 64: 1930 break; 1931 default: 1932 /* Disable PCI memory write and invalidate. */ 1933 if (bootverbose) 1934 device_printf(sc->ti_dev, "cache line size %d" 1935 " not supported; disabling PCI MWI\n", 1936 cacheline); 1937 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, 1938 TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); 1939 break; 1940 } 1941 } 1942 1943 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1944 1945 /* This sets the min dma param all the way up (0xff). */ 1946 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1947 1948 if (sc->ti_hdrsplit) 1949 hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT; 1950 else 1951 hdrsplit = 0; 1952 1953 /* Configure DMA variables. */ 1954#if BYTE_ORDER == BIG_ENDIAN 1955 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | 1956 TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | 1957 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1958 TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit); 1959#else /* BYTE_ORDER */ 1960 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| 1961 TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| 1962 TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit); 1963#endif /* BYTE_ORDER */ 1964 1965 /* 1966 * Only allow 1 DMA channel to be active at a time. 1967 * I don't think this is a good idea, but without it 1968 * the firmware racks up lots of nicDmaReadRingFull 1969 * errors. This is not compatible with hardware checksums. 1970 */ 1971 if (sc->ti_ifp->if_hwassist == 0) 1972 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); 1973 1974 /* Recommended settings from Tigon manual. */ 1975 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1976 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1977 1978 if (ti_64bitslot_war(sc)) { 1979 device_printf(sc->ti_dev, "bios thinks we're in a 64 bit slot, " 1980 "but we aren't"); 1981 return (EINVAL); 1982 } 1983 1984 return (0); 1985} 1986 1987/* 1988 * Initialize the general information block and firmware, and 1989 * start the CPU(s) running. 1990 */ 1991static int 1992ti_gibinit(struct ti_softc *sc) 1993{ 1994 struct ti_rcb *rcb; 1995 int i; 1996 struct ifnet *ifp; 1997 uint32_t rdphys; 1998 1999 TI_LOCK_ASSERT(sc); 2000 2001 ifp = sc->ti_ifp; 2002 rdphys = sc->ti_rdata_phys; 2003 2004 /* Disable interrupts for now. */ 2005 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2006 2007 /* 2008 * Tell the chip where to find the general information block. 2009 * While this struct could go into >4GB memory, we allocate it in a 2010 * single slab with the other descriptors, and those don't seem to 2011 * support being located in a 64-bit region. 2012 */ 2013 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 2014 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, rdphys + TI_RD_OFF(ti_info)); 2015 2016 /* Load the firmware into SRAM. */ 2017 ti_loadfw(sc); 2018 2019 /* Set up the contents of the general info and ring control blocks. */ 2020 2021 /* Set up the event ring and producer pointer. */ 2022 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 2023 2024 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_event_ring); 2025 rcb->ti_flags = 0; 2026 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 2027 rdphys + TI_RD_OFF(ti_ev_prodidx_r); 2028 sc->ti_ev_prodidx.ti_idx = 0; 2029 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 2030 sc->ti_ev_saved_considx = 0; 2031 2032 /* Set up the command ring and producer mailbox. */ 2033 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 2034 2035 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 2036 rcb->ti_flags = 0; 2037 rcb->ti_max_len = 0; 2038 for (i = 0; i < TI_CMD_RING_CNT; i++) { 2039 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 2040 } 2041 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 2042 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 2043 sc->ti_cmd_saved_prodidx = 0; 2044 2045 /* 2046 * Assign the address of the stats refresh buffer. 2047 * We re-use the current stats buffer for this to 2048 * conserve memory. 2049 */ 2050 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 2051 rdphys + TI_RD_OFF(ti_info.ti_stats); 2052 2053 /* Set up the standard receive ring. */ 2054 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 2055 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_std_ring); 2056 rcb->ti_max_len = TI_FRAMELEN; 2057 rcb->ti_flags = 0; 2058 if (sc->ti_ifp->if_hwassist) 2059 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 2060 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 2061 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 2062 2063 /* Set up the jumbo receive ring. */ 2064 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 2065 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_jumbo_ring); 2066 2067#ifdef TI_PRIVATE_JUMBOS 2068 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 2069 rcb->ti_flags = 0; 2070#else 2071 rcb->ti_max_len = PAGE_SIZE; 2072 rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD; 2073#endif 2074 if (sc->ti_ifp->if_hwassist) 2075 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 2076 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 2077 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 2078 2079 /* 2080 * Set up the mini ring. Only activated on the 2081 * Tigon 2 but the slot in the config block is 2082 * still there on the Tigon 1. 2083 */ 2084 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 2085 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_mini_ring); 2086 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 2087 if (sc->ti_hwrev == TI_HWREV_TIGON) 2088 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 2089 else 2090 rcb->ti_flags = 0; 2091 if (sc->ti_ifp->if_hwassist) 2092 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 2093 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 2094 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 2095 2096 /* 2097 * Set up the receive return ring. 2098 */ 2099 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 2100 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_return_ring); 2101 rcb->ti_flags = 0; 2102 rcb->ti_max_len = TI_RETURN_RING_CNT; 2103 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 2104 rdphys + TI_RD_OFF(ti_return_prodidx_r); 2105 2106 /* 2107 * Set up the tx ring. Note: for the Tigon 2, we have the option 2108 * of putting the transmit ring in the host's address space and 2109 * letting the chip DMA it instead of leaving the ring in the NIC's 2110 * memory and accessing it through the shared memory region. We 2111 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 2112 * so we have to revert to the shared memory scheme if we detect 2113 * a Tigon 1 chip. 2114 */ 2115 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 2116 bzero((char *)sc->ti_rdata->ti_tx_ring, 2117 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 2118 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 2119 if (sc->ti_hwrev == TI_HWREV_TIGON) 2120 rcb->ti_flags = 0; 2121 else 2122 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 2123 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 2124 if (sc->ti_ifp->if_hwassist) 2125 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | 2126 TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 2127 rcb->ti_max_len = TI_TX_RING_CNT; 2128 if (sc->ti_hwrev == TI_HWREV_TIGON) 2129 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 2130 else 2131 TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_tx_ring); 2132 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 2133 rdphys + TI_RD_OFF(ti_tx_considx_r); 2134 2135 bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap, 2136 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2137 2138 /* Set up tuneables */ 2139#if 0 2140 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2141 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 2142 (sc->ti_rx_coal_ticks / 10)); 2143 else 2144#endif 2145 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); 2146 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 2147 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 2148 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 2149 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 2150 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 2151 2152 /* Turn interrupts on. */ 2153 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 2154 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2155 2156 /* Start CPU. */ 2157 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 2158 2159 return (0); 2160} 2161 2162static void 2163ti_rdata_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 2164{ 2165 struct ti_softc *sc; 2166 2167 sc = arg; 2168 if (error || nseg != 1) 2169 return; 2170 2171 /* 2172 * All of the Tigon data structures need to live at <4GB. This 2173 * cast is fine since busdma was told about this constraint. 2174 */ 2175 sc->ti_rdata_phys = segs[0].ds_addr; 2176 return; 2177} 2178 2179/* 2180 * Probe for a Tigon chip. Check the PCI vendor and device IDs 2181 * against our list and return its name if we find a match. 2182 */ 2183static int 2184ti_probe(device_t dev) 2185{ 2186 const struct ti_type *t; 2187 2188 t = ti_devs; 2189 2190 while (t->ti_name != NULL) { 2191 if ((pci_get_vendor(dev) == t->ti_vid) && 2192 (pci_get_device(dev) == t->ti_did)) { 2193 device_set_desc(dev, t->ti_name); 2194 return (BUS_PROBE_DEFAULT); 2195 } 2196 t++; 2197 } 2198 2199 return (ENXIO); 2200} 2201 2202static int 2203ti_attach(device_t dev) 2204{ 2205 struct ifnet *ifp; 2206 struct ti_softc *sc; 2207 int error = 0, rid; 2208 u_char eaddr[6]; 2209 2210 sc = device_get_softc(dev); 2211 sc->ti_unit = device_get_unit(dev); 2212 sc->ti_dev = dev; 2213 2214 mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 2215 MTX_DEF); 2216 callout_init_mtx(&sc->ti_watchdog, &sc->ti_mtx, 0); 2217 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 2218 ifp = sc->ti_ifp = if_alloc(IFT_ETHER); 2219 if (ifp == NULL) { 2220 device_printf(dev, "can not if_alloc()\n"); 2221 error = ENOSPC; 2222 goto fail; 2223 } 2224 sc->ti_ifp->if_capabilities = IFCAP_HWCSUM | 2225 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2226 sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities; 2227 2228 /* 2229 * Map control/status registers. 2230 */ 2231 pci_enable_busmaster(dev); 2232 2233 rid = TI_PCI_LOMEM; 2234 sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 2235 RF_ACTIVE); 2236 2237 if (sc->ti_res == NULL) { 2238 device_printf(dev, "couldn't map memory\n"); 2239 error = ENXIO; 2240 goto fail; 2241 } 2242 2243 sc->ti_btag = rman_get_bustag(sc->ti_res); 2244 sc->ti_bhandle = rman_get_bushandle(sc->ti_res); 2245 2246 /* Allocate interrupt */ 2247 rid = 0; 2248 2249 sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 2250 RF_SHAREABLE | RF_ACTIVE); 2251 2252 if (sc->ti_irq == NULL) { 2253 device_printf(dev, "couldn't map interrupt\n"); 2254 error = ENXIO; 2255 goto fail; 2256 } 2257 2258 if (ti_chipinit(sc)) { 2259 device_printf(dev, "chip initialization failed\n"); 2260 error = ENXIO; 2261 goto fail; 2262 } 2263 2264 /* Zero out the NIC's on-board SRAM. */ 2265 ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000); 2266 2267 /* Init again -- zeroing memory may have clobbered some registers. */ 2268 if (ti_chipinit(sc)) { 2269 device_printf(dev, "chip initialization failed\n"); 2270 error = ENXIO; 2271 goto fail; 2272 } 2273 2274 /* 2275 * Get station address from the EEPROM. Note: the manual states 2276 * that the MAC address is at offset 0x8c, however the data is 2277 * stored as two longwords (since that's how it's loaded into 2278 * the NIC). This means the MAC address is actually preceded 2279 * by two zero bytes. We need to skip over those. 2280 */ 2281 if (ti_read_eeprom(sc, eaddr, 2282 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 2283 device_printf(dev, "failed to read station address\n"); 2284 error = ENXIO; 2285 goto fail; 2286 } 2287 2288 /* Allocate the general information block and ring buffers. */ 2289 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 2290 1, 0, /* algnmnt, boundary */ 2291 BUS_SPACE_MAXADDR, /* lowaddr */ 2292 BUS_SPACE_MAXADDR, /* highaddr */ 2293 NULL, NULL, /* filter, filterarg */ 2294 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 2295 0, /* nsegments */ 2296 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 2297 0, /* flags */ 2298 NULL, NULL, /* lockfunc, lockarg */ 2299 &sc->ti_parent_dmat) != 0) { 2300 device_printf(dev, "Failed to allocate parent dmat\n"); 2301 error = ENOMEM; 2302 goto fail; 2303 } 2304 2305 if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ 2306 PAGE_SIZE, 0, /* algnmnt, boundary */ 2307 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 2308 BUS_SPACE_MAXADDR, /* highaddr */ 2309 NULL, NULL, /* filter, filterarg */ 2310 sizeof(struct ti_ring_data), /* maxsize */ 2311 1, /* nsegments */ 2312 sizeof(struct ti_ring_data), /* maxsegsize */ 2313 0, /* flags */ 2314 NULL, NULL, /* lockfunc, lockarg */ 2315 &sc->ti_rdata_dmat) != 0) { 2316 device_printf(dev, "Failed to allocate rdata dmat\n"); 2317 error = ENOMEM; 2318 goto fail; 2319 } 2320 2321 if (bus_dmamem_alloc(sc->ti_rdata_dmat, (void**)&sc->ti_rdata, 2322 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 2323 &sc->ti_rdata_dmamap) != 0) { 2324 device_printf(dev, "Failed to allocate rdata memory\n"); 2325 error = ENOMEM; 2326 goto fail; 2327 } 2328 2329 if (bus_dmamap_load(sc->ti_rdata_dmat, sc->ti_rdata_dmamap, 2330 sc->ti_rdata, sizeof(struct ti_ring_data), 2331 ti_rdata_cb, sc, BUS_DMA_NOWAIT) != 0) { 2332 device_printf(dev, "Failed to load rdata segments\n"); 2333 error = ENOMEM; 2334 goto fail; 2335 } 2336 2337 bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); 2338 2339 /* Try to allocate memory for jumbo buffers. */ 2340 if (ti_alloc_jumbo_mem(sc)) { 2341 device_printf(dev, "jumbo buffer allocation failed\n"); 2342 error = ENXIO; 2343 goto fail; 2344 } 2345 2346 if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ 2347 1, 0, /* algnmnt, boundary */ 2348 BUS_SPACE_MAXADDR, /* lowaddr */ 2349 BUS_SPACE_MAXADDR, /* highaddr */ 2350 NULL, NULL, /* filter, filterarg */ 2351 MCLBYTES * TI_MAXTXSEGS,/* maxsize */ 2352 TI_MAXTXSEGS, /* nsegments */ 2353 MCLBYTES, /* maxsegsize */ 2354 0, /* flags */ 2355 NULL, NULL, /* lockfunc, lockarg */ 2356 &sc->ti_mbuftx_dmat) != 0) { 2357 device_printf(dev, "Failed to allocate rdata dmat\n"); 2358 error = ENOMEM; 2359 goto fail; 2360 } 2361 2362 if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ 2363 1, 0, /* algnmnt, boundary */ 2364 BUS_SPACE_MAXADDR, /* lowaddr */ 2365 BUS_SPACE_MAXADDR, /* highaddr */ 2366 NULL, NULL, /* filter, filterarg */ 2367 MCLBYTES, /* maxsize */ 2368 1, /* nsegments */ 2369 MCLBYTES, /* maxsegsize */ 2370 0, /* flags */ 2371 NULL, NULL, /* lockfunc, lockarg */ 2372 &sc->ti_mbufrx_dmat) != 0) { 2373 device_printf(dev, "Failed to allocate rdata dmat\n"); 2374 error = ENOMEM; 2375 goto fail; 2376 } 2377 2378 if (ti_alloc_dmamaps(sc)) { 2379 device_printf(dev, "dma map creation failed\n"); 2380 error = ENXIO; 2381 goto fail; 2382 } 2383 2384 /* 2385 * We really need a better way to tell a 1000baseTX card 2386 * from a 1000baseSX one, since in theory there could be 2387 * OEMed 1000baseTX cards from lame vendors who aren't 2388 * clever enough to change the PCI ID. For the moment 2389 * though, the AceNIC is the only copper card available. 2390 */ 2391 if (pci_get_vendor(dev) == ALT_VENDORID && 2392 pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) 2393 sc->ti_copper = 1; 2394 /* Ok, it's not the only copper card available. */ 2395 if (pci_get_vendor(dev) == NG_VENDORID && 2396 pci_get_device(dev) == NG_DEVICEID_GA620T) 2397 sc->ti_copper = 1; 2398 2399 /* Set default tuneable values. */ 2400 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 2401#if 0 2402 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 2403#endif 2404 sc->ti_rx_coal_ticks = 170; 2405 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 2406 sc->ti_rx_max_coal_bds = 64; 2407#if 0 2408 sc->ti_tx_max_coal_bds = 128; 2409#endif 2410 sc->ti_tx_max_coal_bds = 32; 2411 sc->ti_tx_buf_ratio = 21; 2412 2413 /* Set up ifnet structure */ 2414 ifp->if_softc = sc; 2415 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2416 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2417 ifp->if_ioctl = ti_ioctl; 2418 ifp->if_start = ti_start; 2419 ifp->if_init = ti_init; 2420 ifp->if_baudrate = 1000000000; 2421 ifp->if_snd.ifq_maxlen = TI_TX_RING_CNT - 1; 2422 2423 /* Set up ifmedia support. */ 2424 if (sc->ti_copper) { 2425 /* 2426 * Copper cards allow manual 10/100 mode selection, 2427 * but not manual 1000baseTX mode selection. Why? 2428 * Becuase currently there's no way to specify the 2429 * master/slave setting through the firmware interface, 2430 * so Alteon decided to just bag it and handle it 2431 * via autonegotiation. 2432 */ 2433 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 2434 ifmedia_add(&sc->ifmedia, 2435 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 2436 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 2437 ifmedia_add(&sc->ifmedia, 2438 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 2439 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL); 2440 ifmedia_add(&sc->ifmedia, 2441 IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 2442 } else { 2443 /* Fiber cards don't support 10/100 modes. */ 2444 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 2445 ifmedia_add(&sc->ifmedia, 2446 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 2447 } 2448 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 2449 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 2450 2451 /* 2452 * We're assuming here that card initialization is a sequential 2453 * thing. If it isn't, multiple cards probing at the same time 2454 * could stomp on the list of softcs here. 2455 */ 2456 2457 /* Register the device */ 2458 sc->dev = make_dev(&ti_cdevsw, sc->ti_unit, UID_ROOT, GID_OPERATOR, 2459 0600, "ti%d", sc->ti_unit); 2460 sc->dev->si_drv1 = sc; 2461 2462 /* 2463 * Call MI attach routine. 2464 */ 2465 ether_ifattach(ifp, eaddr); 2466 2467 /* Hook interrupt last to avoid having to lock softc */ 2468 error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET|INTR_MPSAFE, 2469 NULL, ti_intr, sc, &sc->ti_intrhand); 2470 2471 if (error) { 2472 device_printf(dev, "couldn't set up irq\n"); 2473 goto fail; 2474 } 2475 2476fail: 2477 if (error) 2478 ti_detach(dev); 2479 2480 return (error); 2481} 2482 2483/* 2484 * Shutdown hardware and free up resources. This can be called any 2485 * time after the mutex has been initialized. It is called in both 2486 * the error case in attach and the normal detach case so it needs 2487 * to be careful about only freeing resources that have actually been 2488 * allocated. 2489 */ 2490static int 2491ti_detach(device_t dev) 2492{ 2493 struct ti_softc *sc; 2494 struct ifnet *ifp; 2495 2496 sc = device_get_softc(dev); 2497 if (sc->dev) 2498 destroy_dev(sc->dev); 2499 KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized")); 2500 ifp = sc->ti_ifp; 2501 if (device_is_attached(dev)) { 2502 ether_ifdetach(ifp); 2503 TI_LOCK(sc); 2504 ti_stop(sc); 2505 TI_UNLOCK(sc); 2506 } 2507 2508 /* These should only be active if attach succeeded */ 2509 callout_drain(&sc->ti_watchdog); 2510 bus_generic_detach(dev); 2511 ti_free_dmamaps(sc); 2512 ifmedia_removeall(&sc->ifmedia); 2513 2514#ifdef TI_PRIVATE_JUMBOS 2515 if (sc->ti_cdata.ti_jumbo_buf) 2516 bus_dmamem_free(sc->ti_jumbo_dmat, sc->ti_cdata.ti_jumbo_buf, 2517 sc->ti_jumbo_dmamap); 2518#endif 2519 if (sc->ti_jumbo_dmat) 2520 bus_dma_tag_destroy(sc->ti_jumbo_dmat); 2521 if (sc->ti_mbuftx_dmat) 2522 bus_dma_tag_destroy(sc->ti_mbuftx_dmat); 2523 if (sc->ti_mbufrx_dmat) 2524 bus_dma_tag_destroy(sc->ti_mbufrx_dmat); 2525 if (sc->ti_rdata) 2526 bus_dmamem_free(sc->ti_rdata_dmat, sc->ti_rdata, 2527 sc->ti_rdata_dmamap); 2528 if (sc->ti_rdata_dmat) 2529 bus_dma_tag_destroy(sc->ti_rdata_dmat); 2530 if (sc->ti_parent_dmat) 2531 bus_dma_tag_destroy(sc->ti_parent_dmat); 2532 if (sc->ti_intrhand) 2533 bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); 2534 if (sc->ti_irq) 2535 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); 2536 if (sc->ti_res) { 2537 bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, 2538 sc->ti_res); 2539 } 2540 if (ifp) 2541 if_free(ifp); 2542 2543 mtx_destroy(&sc->ti_mtx); 2544 2545 return (0); 2546} 2547 2548#ifdef TI_JUMBO_HDRSPLIT 2549/* 2550 * If hdr_len is 0, that means that header splitting wasn't done on 2551 * this packet for some reason. The two most likely reasons are that 2552 * the protocol isn't a supported protocol for splitting, or this 2553 * packet had a fragment offset that wasn't 0. 2554 * 2555 * The header length, if it is non-zero, will always be the length of 2556 * the headers on the packet, but that length could be longer than the 2557 * first mbuf. So we take the minimum of the two as the actual 2558 * length. 2559 */ 2560static __inline void 2561ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx) 2562{ 2563 int i = 0; 2564 int lengths[4] = {0, 0, 0, 0}; 2565 struct mbuf *m, *mp; 2566 2567 if (hdr_len != 0) 2568 top->m_len = min(hdr_len, top->m_len); 2569 pkt_len -= top->m_len; 2570 lengths[i++] = top->m_len; 2571 2572 mp = top; 2573 for (m = top->m_next; m && pkt_len; m = m->m_next) { 2574 m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len); 2575 pkt_len -= m->m_len; 2576 lengths[i++] = m->m_len; 2577 mp = m; 2578 } 2579 2580#if 0 2581 if (hdr_len != 0) 2582 printf("got split packet: "); 2583 else 2584 printf("got non-split packet: "); 2585 2586 printf("%d,%d,%d,%d = %d\n", lengths[0], 2587 lengths[1], lengths[2], lengths[3], 2588 lengths[0] + lengths[1] + lengths[2] + 2589 lengths[3]); 2590#endif 2591 2592 if (pkt_len) 2593 panic("header splitting didn't"); 2594 2595 if (m) { 2596 m_freem(m); 2597 mp->m_next = NULL; 2598 2599 } 2600 if (mp->m_next != NULL) 2601 panic("ti_hdr_split: last mbuf in chain should be null"); 2602} 2603#endif /* TI_JUMBO_HDRSPLIT */ 2604 2605/* 2606 * Frame reception handling. This is called if there's a frame 2607 * on the receive return list. 2608 * 2609 * Note: we have to be able to handle three possibilities here: 2610 * 1) the frame is from the mini receive ring (can only happen) 2611 * on Tigon 2 boards) 2612 * 2) the frame is from the jumbo recieve ring 2613 * 3) the frame is from the standard receive ring 2614 */ 2615 2616static void 2617ti_rxeof(struct ti_softc *sc) 2618{ 2619 bus_dmamap_t map; 2620 struct ifnet *ifp; 2621 struct ti_cmd_desc cmd; 2622 2623 TI_LOCK_ASSERT(sc); 2624 2625 ifp = sc->ti_ifp; 2626 2627 while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 2628 struct ti_rx_desc *cur_rx; 2629 u_int32_t rxidx; 2630 struct mbuf *m = NULL; 2631 u_int16_t vlan_tag = 0; 2632 int have_tag = 0; 2633 2634 cur_rx = 2635 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 2636 rxidx = cur_rx->ti_idx; 2637 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 2638 2639 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 2640 have_tag = 1; 2641 vlan_tag = cur_rx->ti_vlan_tag & 0xfff; 2642 } 2643 2644 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 2645 2646 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 2647 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 2648 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 2649 map = sc->ti_cdata.ti_rx_jumbo_maps[rxidx]; 2650 bus_dmamap_sync(sc->ti_jumbo_dmat, map, 2651 BUS_DMASYNC_POSTREAD); 2652 bus_dmamap_unload(sc->ti_jumbo_dmat, map); 2653 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 2654 ifp->if_ierrors++; 2655 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 2656 continue; 2657 } 2658 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { 2659 ifp->if_ierrors++; 2660 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 2661 continue; 2662 } 2663#ifdef TI_PRIVATE_JUMBOS 2664 m->m_len = cur_rx->ti_len; 2665#else /* TI_PRIVATE_JUMBOS */ 2666#ifdef TI_JUMBO_HDRSPLIT 2667 if (sc->ti_hdrsplit) 2668 ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr), 2669 cur_rx->ti_len, rxidx); 2670 else 2671#endif /* TI_JUMBO_HDRSPLIT */ 2672 m_adj(m, cur_rx->ti_len - m->m_pkthdr.len); 2673#endif /* TI_PRIVATE_JUMBOS */ 2674 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 2675 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 2676 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 2677 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 2678 map = sc->ti_cdata.ti_rx_mini_maps[rxidx]; 2679 bus_dmamap_sync(sc->ti_mbufrx_dmat, map, 2680 BUS_DMASYNC_POSTREAD); 2681 bus_dmamap_unload(sc->ti_mbufrx_dmat, map); 2682 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 2683 ifp->if_ierrors++; 2684 ti_newbuf_mini(sc, sc->ti_mini, m); 2685 continue; 2686 } 2687 if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) { 2688 ifp->if_ierrors++; 2689 ti_newbuf_mini(sc, sc->ti_mini, m); 2690 continue; 2691 } 2692 m->m_len = cur_rx->ti_len; 2693 } else { 2694 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 2695 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 2696 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 2697 map = sc->ti_cdata.ti_rx_std_maps[rxidx]; 2698 bus_dmamap_sync(sc->ti_mbufrx_dmat, map, 2699 BUS_DMASYNC_POSTREAD); 2700 bus_dmamap_unload(sc->ti_mbufrx_dmat, map); 2701 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 2702 ifp->if_ierrors++; 2703 ti_newbuf_std(sc, sc->ti_std, m); 2704 continue; 2705 } 2706 if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) { 2707 ifp->if_ierrors++; 2708 ti_newbuf_std(sc, sc->ti_std, m); 2709 continue; 2710 } 2711 m->m_len = cur_rx->ti_len; 2712 } 2713 2714 m->m_pkthdr.len = cur_rx->ti_len; 2715 ifp->if_ipackets++; 2716 m->m_pkthdr.rcvif = ifp; 2717 2718 if (ifp->if_hwassist) { 2719 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 2720 CSUM_DATA_VALID; 2721 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) 2722 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2723 m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; 2724 } 2725 2726 /* 2727 * If we received a packet with a vlan tag, 2728 * tag it before passing the packet upward. 2729 */ 2730 if (have_tag) { 2731 m->m_pkthdr.ether_vtag = vlan_tag; 2732 m->m_flags |= M_VLANTAG; 2733 } 2734 TI_UNLOCK(sc); 2735 (*ifp->if_input)(ifp, m); 2736 TI_LOCK(sc); 2737 } 2738 2739 /* Only necessary on the Tigon 1. */ 2740 if (sc->ti_hwrev == TI_HWREV_TIGON) 2741 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 2742 sc->ti_rx_saved_considx); 2743 2744 TI_UPDATE_STDPROD(sc, sc->ti_std); 2745 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 2746 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 2747} 2748 2749static void 2750ti_txeof(struct ti_softc *sc) 2751{ 2752 struct ti_txdesc *txd; 2753 struct ti_tx_desc txdesc; 2754 struct ti_tx_desc *cur_tx = NULL; 2755 struct ifnet *ifp; 2756 int idx; 2757 2758 ifp = sc->ti_ifp; 2759 2760 txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq); 2761 if (txd == NULL) 2762 return; 2763 /* 2764 * Go through our tx ring and free mbufs for those 2765 * frames that have been sent. 2766 */ 2767 for (idx = sc->ti_tx_saved_considx; idx != sc->ti_tx_considx.ti_idx; 2768 TI_INC(idx, TI_TX_RING_CNT)) { 2769 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2770 ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc), 2771 sizeof(txdesc), &txdesc); 2772 cur_tx = &txdesc; 2773 } else 2774 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 2775 sc->ti_txcnt--; 2776 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2777 if ((cur_tx->ti_flags & TI_BDFLAG_END) == 0) 2778 continue; 2779 bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap, 2780 BUS_DMASYNC_POSTWRITE); 2781 bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap); 2782 2783 ifp->if_opackets++; 2784 m_freem(txd->tx_m); 2785 txd->tx_m = NULL; 2786 STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txbusyq, tx_q); 2787 STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txfreeq, txd, tx_q); 2788 txd = STAILQ_FIRST(&sc->ti_cdata.ti_txbusyq); 2789 } 2790 sc->ti_tx_saved_considx = idx; 2791 2792 sc->ti_timer = sc->ti_txcnt > 0 ? 5 : 0; 2793} 2794 2795static void 2796ti_intr(void *xsc) 2797{ 2798 struct ti_softc *sc; 2799 struct ifnet *ifp; 2800 2801 sc = xsc; 2802 TI_LOCK(sc); 2803 ifp = sc->ti_ifp; 2804 2805/*#ifdef notdef*/ 2806 /* Avoid this for now -- checking this register is expensive. */ 2807 /* Make sure this is really our interrupt. */ 2808 if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) { 2809 TI_UNLOCK(sc); 2810 return; 2811 } 2812/*#endif*/ 2813 2814 /* Ack interrupt and stop others from occuring. */ 2815 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2816 2817 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2818 /* Check RX return ring producer/consumer */ 2819 ti_rxeof(sc); 2820 2821 /* Check TX ring producer/consumer */ 2822 ti_txeof(sc); 2823 } 2824 2825 ti_handle_events(sc); 2826 2827 /* Re-enable interrupts. */ 2828 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2829 2830 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2831 ifp->if_snd.ifq_head != NULL) 2832 ti_start_locked(ifp); 2833 2834 TI_UNLOCK(sc); 2835} 2836 2837static void 2838ti_stats_update(struct ti_softc *sc) 2839{ 2840 struct ifnet *ifp; 2841 2842 ifp = sc->ti_ifp; 2843 2844 bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap, 2845 BUS_DMASYNC_POSTREAD); 2846 2847 ifp->if_collisions += 2848 (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + 2849 sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + 2850 sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + 2851 sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - 2852 ifp->if_collisions; 2853 2854 bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap, 2855 BUS_DMASYNC_PREREAD); 2856} 2857 2858/* 2859 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2860 * pointers to descriptors. 2861 */ 2862static int 2863ti_encap(struct ti_softc *sc, struct mbuf **m_head) 2864{ 2865 struct ti_txdesc *txd; 2866 struct ti_tx_desc *f; 2867 struct ti_tx_desc txdesc; 2868 struct mbuf *m; 2869 bus_dma_segment_t txsegs[TI_MAXTXSEGS]; 2870 u_int16_t csum_flags; 2871 int error, frag, i, nseg; 2872 2873 if ((txd = STAILQ_FIRST(&sc->ti_cdata.ti_txfreeq)) == NULL) 2874 return (ENOBUFS); 2875 2876 error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat, txd->tx_dmamap, 2877 *m_head, txsegs, &nseg, 0); 2878 if (error == EFBIG) { 2879 m = m_defrag(*m_head, M_DONTWAIT); 2880 if (m == NULL) { 2881 m_freem(*m_head); 2882 *m_head = NULL; 2883 return (ENOMEM); 2884 } 2885 *m_head = m; 2886 error = bus_dmamap_load_mbuf_sg(sc->ti_mbuftx_dmat, 2887 txd->tx_dmamap, *m_head, txsegs, &nseg, 0); 2888 if (error) { 2889 m_freem(*m_head); 2890 *m_head = NULL; 2891 return (error); 2892 } 2893 } else if (error != 0) 2894 return (error); 2895 if (nseg == 0) { 2896 m_freem(*m_head); 2897 *m_head = NULL; 2898 return (EIO); 2899 } 2900 2901 if (sc->ti_txcnt + nseg >= TI_TX_RING_CNT) { 2902 bus_dmamap_unload(sc->ti_mbuftx_dmat, txd->tx_dmamap); 2903 return (ENOBUFS); 2904 } 2905 2906 m = *m_head; 2907 csum_flags = 0; 2908 if (m->m_pkthdr.csum_flags) { 2909 if (m->m_pkthdr.csum_flags & CSUM_IP) 2910 csum_flags |= TI_BDFLAG_IP_CKSUM; 2911 if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 2912 csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; 2913 if (m->m_flags & M_LASTFRAG) 2914 csum_flags |= TI_BDFLAG_IP_FRAG_END; 2915 else if (m->m_flags & M_FRAG) 2916 csum_flags |= TI_BDFLAG_IP_FRAG; 2917 } 2918 2919 bus_dmamap_sync(sc->ti_mbuftx_dmat, txd->tx_dmamap, 2920 BUS_DMASYNC_PREWRITE); 2921 bus_dmamap_sync(sc->ti_rdata_dmat, sc->ti_rdata_dmamap, 2922 BUS_DMASYNC_PREWRITE); 2923 2924 frag = sc->ti_tx_saved_prodidx; 2925 for (i = 0; i < nseg; i++) { 2926 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2927 bzero(&txdesc, sizeof(txdesc)); 2928 f = &txdesc; 2929 } else 2930 f = &sc->ti_rdata->ti_tx_ring[frag]; 2931 ti_hostaddr64(&f->ti_addr, txsegs[i].ds_addr); 2932 f->ti_len = txsegs[i].ds_len; 2933 f->ti_flags = csum_flags; 2934 if (m->m_flags & M_VLANTAG) { 2935 f->ti_flags |= TI_BDFLAG_VLAN_TAG; 2936 f->ti_vlan_tag = m->m_pkthdr.ether_vtag & 0xfff; 2937 } else { 2938 f->ti_vlan_tag = 0; 2939 } 2940 2941 if (sc->ti_hwrev == TI_HWREV_TIGON) 2942 ti_mem_write(sc, TI_TX_RING_BASE + frag * 2943 sizeof(txdesc), sizeof(txdesc), &txdesc); 2944 TI_INC(frag, TI_TX_RING_CNT); 2945 } 2946 2947 sc->ti_tx_saved_prodidx = frag; 2948 /* set TI_BDFLAG_END on the last descriptor */ 2949 frag = (frag + TI_TX_RING_CNT - 1) % TI_TX_RING_CNT; 2950 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2951 txdesc.ti_flags |= TI_BDFLAG_END; 2952 ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc), 2953 sizeof(txdesc), &txdesc); 2954 } else 2955 sc->ti_rdata->ti_tx_ring[frag].ti_flags |= TI_BDFLAG_END; 2956 2957 STAILQ_REMOVE_HEAD(&sc->ti_cdata.ti_txfreeq, tx_q); 2958 STAILQ_INSERT_TAIL(&sc->ti_cdata.ti_txbusyq, txd, tx_q); 2959 txd->tx_m = m; 2960 sc->ti_txcnt += nseg; 2961 2962 return (0); 2963} 2964 2965static void 2966ti_start(struct ifnet *ifp) 2967{ 2968 struct ti_softc *sc; 2969 2970 sc = ifp->if_softc; 2971 TI_LOCK(sc); 2972 ti_start_locked(ifp); 2973 TI_UNLOCK(sc); 2974} 2975 2976/* 2977 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2978 * to the mbuf data regions directly in the transmit descriptors. 2979 */ 2980static void 2981ti_start_locked(struct ifnet *ifp) 2982{ 2983 struct ti_softc *sc; 2984 struct mbuf *m_head = NULL; 2985 int enq = 0; 2986 2987 sc = ifp->if_softc; 2988 2989 for (; ifp->if_snd.ifq_head != NULL && 2990 sc->ti_txcnt < (TI_TX_RING_CNT - 16);) { 2991 IF_DEQUEUE(&ifp->if_snd, m_head); 2992 if (m_head == NULL) 2993 break; 2994 2995 /* 2996 * XXX 2997 * safety overkill. If this is a fragmented packet chain 2998 * with delayed TCP/UDP checksums, then only encapsulate 2999 * it if we have enough descriptors to handle the entire 3000 * chain at once. 3001 * (paranoia -- may not actually be needed) 3002 */ 3003 if (m_head->m_flags & M_FIRSTFRAG && 3004 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { 3005 if ((TI_TX_RING_CNT - sc->ti_txcnt) < 3006 m_head->m_pkthdr.csum_data + 16) { 3007 IF_PREPEND(&ifp->if_snd, m_head); 3008 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3009 break; 3010 } 3011 } 3012 3013 /* 3014 * Pack the data into the transmit ring. If we 3015 * don't have room, set the OACTIVE flag and wait 3016 * for the NIC to drain the ring. 3017 */ 3018 if (ti_encap(sc, &m_head)) { 3019 if (m_head == NULL) 3020 break; 3021 IF_PREPEND(&ifp->if_snd, m_head); 3022 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 3023 break; 3024 } 3025 3026 enq++; 3027 /* 3028 * If there's a BPF listener, bounce a copy of this frame 3029 * to him. 3030 */ 3031 ETHER_BPF_MTAP(ifp, m_head); 3032 } 3033 3034 if (enq > 0) { 3035 /* Transmit */ 3036 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, sc->ti_tx_saved_prodidx); 3037 3038 /* 3039 * Set a timeout in case the chip goes out to lunch. 3040 */ 3041 sc->ti_timer = 5; 3042 } 3043} 3044 3045static void 3046ti_init(void *xsc) 3047{ 3048 struct ti_softc *sc; 3049 3050 sc = xsc; 3051 TI_LOCK(sc); 3052 ti_init_locked(sc); 3053 TI_UNLOCK(sc); 3054} 3055 3056static void 3057ti_init_locked(void *xsc) 3058{ 3059 struct ti_softc *sc = xsc; 3060 3061 /* Cancel pending I/O and flush buffers. */ 3062 ti_stop(sc); 3063 3064 /* Init the gen info block, ring control blocks and firmware. */ 3065 if (ti_gibinit(sc)) { 3066 device_printf(sc->ti_dev, "initialization failure\n"); 3067 return; 3068 } 3069} 3070 3071static void ti_init2(struct ti_softc *sc) 3072{ 3073 struct ti_cmd_desc cmd; 3074 struct ifnet *ifp; 3075 u_int8_t *ea; 3076 struct ifmedia *ifm; 3077 int tmp; 3078 3079 TI_LOCK_ASSERT(sc); 3080 3081 ifp = sc->ti_ifp; 3082 3083 /* Specify MTU and interface index. */ 3084 CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->ti_unit); 3085 CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + 3086 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 3087 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 3088 3089 /* Load our MAC address. */ 3090 ea = IF_LLADDR(sc->ti_ifp); 3091 CSR_WRITE_4(sc, TI_GCR_PAR0, (ea[0] << 8) | ea[1]); 3092 CSR_WRITE_4(sc, TI_GCR_PAR1, 3093 (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]); 3094 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 3095 3096 /* Enable or disable promiscuous mode as needed. */ 3097 if (ifp->if_flags & IFF_PROMISC) { 3098 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); 3099 } else { 3100 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 3101 } 3102 3103 /* Program multicast filter. */ 3104 ti_setmulti(sc); 3105 3106 /* 3107 * If this is a Tigon 1, we should tell the 3108 * firmware to use software packet filtering. 3109 */ 3110 if (sc->ti_hwrev == TI_HWREV_TIGON) { 3111 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 3112 } 3113 3114 /* Init RX ring. */ 3115 ti_init_rx_ring_std(sc); 3116 3117 /* Init jumbo RX ring. */ 3118 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 3119 ti_init_rx_ring_jumbo(sc); 3120 3121 /* 3122 * If this is a Tigon 2, we can also configure the 3123 * mini ring. 3124 */ 3125 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 3126 ti_init_rx_ring_mini(sc); 3127 3128 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 3129 sc->ti_rx_saved_considx = 0; 3130 3131 /* Init TX ring. */ 3132 ti_init_tx_ring(sc); 3133 3134 /* Tell firmware we're alive. */ 3135 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 3136 3137 /* Enable host interrupts. */ 3138 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 3139 3140 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3141 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3142 callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc); 3143 3144 /* 3145 * Make sure to set media properly. We have to do this 3146 * here since we have to issue commands in order to set 3147 * the link negotiation and we can't issue commands until 3148 * the firmware is running. 3149 */ 3150 ifm = &sc->ifmedia; 3151 tmp = ifm->ifm_media; 3152 ifm->ifm_media = ifm->ifm_cur->ifm_media; 3153 ti_ifmedia_upd(ifp); 3154 ifm->ifm_media = tmp; 3155} 3156 3157/* 3158 * Set media options. 3159 */ 3160static int 3161ti_ifmedia_upd(struct ifnet *ifp) 3162{ 3163 struct ti_softc *sc; 3164 struct ifmedia *ifm; 3165 struct ti_cmd_desc cmd; 3166 u_int32_t flowctl; 3167 3168 sc = ifp->if_softc; 3169 ifm = &sc->ifmedia; 3170 3171 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 3172 return (EINVAL); 3173 3174 flowctl = 0; 3175 3176 switch (IFM_SUBTYPE(ifm->ifm_media)) { 3177 case IFM_AUTO: 3178 /* 3179 * Transmit flow control doesn't work on the Tigon 1. 3180 */ 3181 flowctl = TI_GLNK_RX_FLOWCTL_Y; 3182 3183 /* 3184 * Transmit flow control can also cause problems on the 3185 * Tigon 2, apparantly with both the copper and fiber 3186 * boards. The symptom is that the interface will just 3187 * hang. This was reproduced with Alteon 180 switches. 3188 */ 3189#if 0 3190 if (sc->ti_hwrev != TI_HWREV_TIGON) 3191 flowctl |= TI_GLNK_TX_FLOWCTL_Y; 3192#endif 3193 3194 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 3195 TI_GLNK_FULL_DUPLEX| flowctl | 3196 TI_GLNK_AUTONEGENB|TI_GLNK_ENB); 3197 3198 flowctl = TI_LNK_RX_FLOWCTL_Y; 3199#if 0 3200 if (sc->ti_hwrev != TI_HWREV_TIGON) 3201 flowctl |= TI_LNK_TX_FLOWCTL_Y; 3202#endif 3203 3204 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| 3205 TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl | 3206 TI_LNK_AUTONEGENB|TI_LNK_ENB); 3207 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 3208 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 3209 break; 3210 case IFM_1000_SX: 3211 case IFM_1000_T: 3212 flowctl = TI_GLNK_RX_FLOWCTL_Y; 3213#if 0 3214 if (sc->ti_hwrev != TI_HWREV_TIGON) 3215 flowctl |= TI_GLNK_TX_FLOWCTL_Y; 3216#endif 3217 3218 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 3219 flowctl |TI_GLNK_ENB); 3220 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 3221 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3222 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); 3223 } 3224 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 3225 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 3226 break; 3227 case IFM_100_FX: 3228 case IFM_10_FL: 3229 case IFM_100_TX: 3230 case IFM_10_T: 3231 flowctl = TI_LNK_RX_FLOWCTL_Y; 3232#if 0 3233 if (sc->ti_hwrev != TI_HWREV_TIGON) 3234 flowctl |= TI_LNK_TX_FLOWCTL_Y; 3235#endif 3236 3237 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 3238 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl); 3239 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 3240 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 3241 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 3242 } else { 3243 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 3244 } 3245 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 3246 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 3247 } else { 3248 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 3249 } 3250 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 3251 TI_CMD_CODE_NEGOTIATE_10_100, 0); 3252 break; 3253 } 3254 3255 return (0); 3256} 3257 3258/* 3259 * Report current media status. 3260 */ 3261static void 3262ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3263{ 3264 struct ti_softc *sc; 3265 u_int32_t media = 0; 3266 3267 sc = ifp->if_softc; 3268 3269 ifmr->ifm_status = IFM_AVALID; 3270 ifmr->ifm_active = IFM_ETHER; 3271 3272 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) 3273 return; 3274 3275 ifmr->ifm_status |= IFM_ACTIVE; 3276 3277 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 3278 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 3279 if (sc->ti_copper) 3280 ifmr->ifm_active |= IFM_1000_T; 3281 else 3282 ifmr->ifm_active |= IFM_1000_SX; 3283 if (media & TI_GLNK_FULL_DUPLEX) 3284 ifmr->ifm_active |= IFM_FDX; 3285 else 3286 ifmr->ifm_active |= IFM_HDX; 3287 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 3288 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 3289 if (sc->ti_copper) { 3290 if (media & TI_LNK_100MB) 3291 ifmr->ifm_active |= IFM_100_TX; 3292 if (media & TI_LNK_10MB) 3293 ifmr->ifm_active |= IFM_10_T; 3294 } else { 3295 if (media & TI_LNK_100MB) 3296 ifmr->ifm_active |= IFM_100_FX; 3297 if (media & TI_LNK_10MB) 3298 ifmr->ifm_active |= IFM_10_FL; 3299 } 3300 if (media & TI_LNK_FULL_DUPLEX) 3301 ifmr->ifm_active |= IFM_FDX; 3302 if (media & TI_LNK_HALF_DUPLEX) 3303 ifmr->ifm_active |= IFM_HDX; 3304 } 3305} 3306 3307static int 3308ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3309{ 3310 struct ti_softc *sc = ifp->if_softc; 3311 struct ifreq *ifr = (struct ifreq *) data; 3312 int mask, error = 0; 3313 struct ti_cmd_desc cmd; 3314 3315 switch (command) { 3316 case SIOCSIFMTU: 3317 TI_LOCK(sc); 3318 if (ifr->ifr_mtu > TI_JUMBO_MTU) 3319 error = EINVAL; 3320 else { 3321 ifp->if_mtu = ifr->ifr_mtu; 3322 ti_init_locked(sc); 3323 } 3324 TI_UNLOCK(sc); 3325 break; 3326 case SIOCSIFFLAGS: 3327 TI_LOCK(sc); 3328 if (ifp->if_flags & IFF_UP) { 3329 /* 3330 * If only the state of the PROMISC flag changed, 3331 * then just use the 'set promisc mode' command 3332 * instead of reinitializing the entire NIC. Doing 3333 * a full re-init means reloading the firmware and 3334 * waiting for it to start up, which may take a 3335 * second or two. 3336 */ 3337 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3338 ifp->if_flags & IFF_PROMISC && 3339 !(sc->ti_if_flags & IFF_PROMISC)) { 3340 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 3341 TI_CMD_CODE_PROMISC_ENB, 0); 3342 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 3343 !(ifp->if_flags & IFF_PROMISC) && 3344 sc->ti_if_flags & IFF_PROMISC) { 3345 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 3346 TI_CMD_CODE_PROMISC_DIS, 0); 3347 } else 3348 ti_init_locked(sc); 3349 } else { 3350 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 3351 ti_stop(sc); 3352 } 3353 } 3354 sc->ti_if_flags = ifp->if_flags; 3355 TI_UNLOCK(sc); 3356 break; 3357 case SIOCADDMULTI: 3358 case SIOCDELMULTI: 3359 TI_LOCK(sc); 3360 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3361 ti_setmulti(sc); 3362 TI_UNLOCK(sc); 3363 break; 3364 case SIOCSIFMEDIA: 3365 case SIOCGIFMEDIA: 3366 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 3367 break; 3368 case SIOCSIFCAP: 3369 TI_LOCK(sc); 3370 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3371 if (mask & IFCAP_HWCSUM) { 3372 if (IFCAP_HWCSUM & ifp->if_capenable) 3373 ifp->if_capenable &= ~IFCAP_HWCSUM; 3374 else 3375 ifp->if_capenable |= IFCAP_HWCSUM; 3376 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3377 ti_init_locked(sc); 3378 } 3379 TI_UNLOCK(sc); 3380 break; 3381 default: 3382 error = ether_ioctl(ifp, command, data); 3383 break; 3384 } 3385 3386 return (error); 3387} 3388 3389static int 3390ti_open(struct cdev *dev, int flags, int fmt, struct thread *td) 3391{ 3392 struct ti_softc *sc; 3393 3394 sc = dev->si_drv1; 3395 if (sc == NULL) 3396 return (ENODEV); 3397 3398 TI_LOCK(sc); 3399 sc->ti_flags |= TI_FLAG_DEBUGING; 3400 TI_UNLOCK(sc); 3401 3402 return (0); 3403} 3404 3405static int 3406ti_close(struct cdev *dev, int flag, int fmt, struct thread *td) 3407{ 3408 struct ti_softc *sc; 3409 3410 sc = dev->si_drv1; 3411 if (sc == NULL) 3412 return (ENODEV); 3413 3414 TI_LOCK(sc); 3415 sc->ti_flags &= ~TI_FLAG_DEBUGING; 3416 TI_UNLOCK(sc); 3417 3418 return (0); 3419} 3420 3421/* 3422 * This ioctl routine goes along with the Tigon character device. 3423 */ 3424static int 3425ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 3426 struct thread *td) 3427{ 3428 int error; 3429 struct ti_softc *sc; 3430 3431 sc = dev->si_drv1; 3432 if (sc == NULL) 3433 return (ENODEV); 3434 3435 error = 0; 3436 3437 switch (cmd) { 3438 case TIIOCGETSTATS: 3439 { 3440 struct ti_stats *outstats; 3441 3442 outstats = (struct ti_stats *)addr; 3443 3444 TI_LOCK(sc); 3445 bcopy(&sc->ti_rdata->ti_info.ti_stats, outstats, 3446 sizeof(struct ti_stats)); 3447 TI_UNLOCK(sc); 3448 break; 3449 } 3450 case TIIOCGETPARAMS: 3451 { 3452 struct ti_params *params; 3453 3454 params = (struct ti_params *)addr; 3455 3456 TI_LOCK(sc); 3457 params->ti_stat_ticks = sc->ti_stat_ticks; 3458 params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks; 3459 params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks; 3460 params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds; 3461 params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds; 3462 params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio; 3463 params->param_mask = TI_PARAM_ALL; 3464 TI_UNLOCK(sc); 3465 3466 error = 0; 3467 3468 break; 3469 } 3470 case TIIOCSETPARAMS: 3471 { 3472 struct ti_params *params; 3473 3474 params = (struct ti_params *)addr; 3475 3476 TI_LOCK(sc); 3477 if (params->param_mask & TI_PARAM_STAT_TICKS) { 3478 sc->ti_stat_ticks = params->ti_stat_ticks; 3479 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 3480 } 3481 3482 if (params->param_mask & TI_PARAM_RX_COAL_TICKS) { 3483 sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks; 3484 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 3485 sc->ti_rx_coal_ticks); 3486 } 3487 3488 if (params->param_mask & TI_PARAM_TX_COAL_TICKS) { 3489 sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks; 3490 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, 3491 sc->ti_tx_coal_ticks); 3492 } 3493 3494 if (params->param_mask & TI_PARAM_RX_COAL_BDS) { 3495 sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds; 3496 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, 3497 sc->ti_rx_max_coal_bds); 3498 } 3499 3500 if (params->param_mask & TI_PARAM_TX_COAL_BDS) { 3501 sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds; 3502 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, 3503 sc->ti_tx_max_coal_bds); 3504 } 3505 3506 if (params->param_mask & TI_PARAM_TX_BUF_RATIO) { 3507 sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio; 3508 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, 3509 sc->ti_tx_buf_ratio); 3510 } 3511 TI_UNLOCK(sc); 3512 3513 error = 0; 3514 3515 break; 3516 } 3517 case TIIOCSETTRACE: { 3518 ti_trace_type trace_type; 3519 3520 trace_type = *(ti_trace_type *)addr; 3521 3522 /* 3523 * Set tracing to whatever the user asked for. Setting 3524 * this register to 0 should have the effect of disabling 3525 * tracing. 3526 */ 3527 CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type); 3528 3529 error = 0; 3530 3531 break; 3532 } 3533 case TIIOCGETTRACE: { 3534 struct ti_trace_buf *trace_buf; 3535 u_int32_t trace_start, cur_trace_ptr, trace_len; 3536 3537 trace_buf = (struct ti_trace_buf *)addr; 3538 3539 TI_LOCK(sc); 3540 trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START); 3541 cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR); 3542 trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN); 3543 3544#if 0 3545 if_printf(sc->ti_ifp, "trace_start = %#x, cur_trace_ptr = %#x, " 3546 "trace_len = %d\n", trace_start, 3547 cur_trace_ptr, trace_len); 3548 if_printf(sc->ti_ifp, "trace_buf->buf_len = %d\n", 3549 trace_buf->buf_len); 3550#endif 3551 3552 error = ti_copy_mem(sc, trace_start, min(trace_len, 3553 trace_buf->buf_len), 3554 (caddr_t)trace_buf->buf, 1, 1); 3555 3556 if (error == 0) { 3557 trace_buf->fill_len = min(trace_len, 3558 trace_buf->buf_len); 3559 if (cur_trace_ptr < trace_start) 3560 trace_buf->cur_trace_ptr = 3561 trace_start - cur_trace_ptr; 3562 else 3563 trace_buf->cur_trace_ptr = 3564 cur_trace_ptr - trace_start; 3565 } else 3566 trace_buf->fill_len = 0; 3567 TI_UNLOCK(sc); 3568 3569 break; 3570 } 3571 3572 /* 3573 * For debugging, five ioctls are needed: 3574 * ALT_ATTACH 3575 * ALT_READ_TG_REG 3576 * ALT_WRITE_TG_REG 3577 * ALT_READ_TG_MEM 3578 * ALT_WRITE_TG_MEM 3579 */ 3580 case ALT_ATTACH: 3581 /* 3582 * From what I can tell, Alteon's Solaris Tigon driver 3583 * only has one character device, so you have to attach 3584 * to the Tigon board you're interested in. This seems 3585 * like a not-so-good way to do things, since unless you 3586 * subsequently specify the unit number of the device 3587 * you're interested in every ioctl, you'll only be 3588 * able to debug one board at a time. 3589 */ 3590 error = 0; 3591 break; 3592 case ALT_READ_TG_MEM: 3593 case ALT_WRITE_TG_MEM: 3594 { 3595 struct tg_mem *mem_param; 3596 u_int32_t sram_end, scratch_end; 3597 3598 mem_param = (struct tg_mem *)addr; 3599 3600 if (sc->ti_hwrev == TI_HWREV_TIGON) { 3601 sram_end = TI_END_SRAM_I; 3602 scratch_end = TI_END_SCRATCH_I; 3603 } else { 3604 sram_end = TI_END_SRAM_II; 3605 scratch_end = TI_END_SCRATCH_II; 3606 } 3607 3608 /* 3609 * For now, we'll only handle accessing regular SRAM, 3610 * nothing else. 3611 */ 3612 TI_LOCK(sc); 3613 if ((mem_param->tgAddr >= TI_BEG_SRAM) 3614 && ((mem_param->tgAddr + mem_param->len) <= sram_end)) { 3615 /* 3616 * In this instance, we always copy to/from user 3617 * space, so the user space argument is set to 1. 3618 */ 3619 error = ti_copy_mem(sc, mem_param->tgAddr, 3620 mem_param->len, 3621 mem_param->userAddr, 1, 3622 (cmd == ALT_READ_TG_MEM) ? 1 : 0); 3623 } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH) 3624 && (mem_param->tgAddr <= scratch_end)) { 3625 error = ti_copy_scratch(sc, mem_param->tgAddr, 3626 mem_param->len, 3627 mem_param->userAddr, 1, 3628 (cmd == ALT_READ_TG_MEM) ? 3629 1 : 0, TI_PROCESSOR_A); 3630 } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG) 3631 && (mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG)) { 3632 if (sc->ti_hwrev == TI_HWREV_TIGON) { 3633 if_printf(sc->ti_ifp, 3634 "invalid memory range for Tigon I\n"); 3635 error = EINVAL; 3636 break; 3637 } 3638 error = ti_copy_scratch(sc, mem_param->tgAddr - 3639 TI_SCRATCH_DEBUG_OFF, 3640 mem_param->len, 3641 mem_param->userAddr, 1, 3642 (cmd == ALT_READ_TG_MEM) ? 3643 1 : 0, TI_PROCESSOR_B); 3644 } else { 3645 if_printf(sc->ti_ifp, "memory address %#x len %d is " 3646 "out of supported range\n", 3647 mem_param->tgAddr, mem_param->len); 3648 error = EINVAL; 3649 } 3650 TI_UNLOCK(sc); 3651 3652 break; 3653 } 3654 case ALT_READ_TG_REG: 3655 case ALT_WRITE_TG_REG: 3656 { 3657 struct tg_reg *regs; 3658 u_int32_t tmpval; 3659 3660 regs = (struct tg_reg *)addr; 3661 3662 /* 3663 * Make sure the address in question isn't out of range. 3664 */ 3665 if (regs->addr > TI_REG_MAX) { 3666 error = EINVAL; 3667 break; 3668 } 3669 TI_LOCK(sc); 3670 if (cmd == ALT_READ_TG_REG) { 3671 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 3672 regs->addr, &tmpval, 1); 3673 regs->data = ntohl(tmpval); 3674#if 0 3675 if ((regs->addr == TI_CPU_STATE) 3676 || (regs->addr == TI_CPU_CTL_B)) { 3677 if_printf(sc->ti_ifp, "register %#x = %#x\n", 3678 regs->addr, tmpval); 3679 } 3680#endif 3681 } else { 3682 tmpval = htonl(regs->data); 3683 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 3684 regs->addr, &tmpval, 1); 3685 } 3686 TI_UNLOCK(sc); 3687 3688 break; 3689 } 3690 default: 3691 error = ENOTTY; 3692 break; 3693 } 3694 return (error); 3695} 3696 3697static void 3698ti_watchdog(void *arg) 3699{ 3700 struct ti_softc *sc; 3701 struct ifnet *ifp; 3702 3703 sc = arg; 3704 TI_LOCK_ASSERT(sc); 3705 callout_reset(&sc->ti_watchdog, hz, ti_watchdog, sc); 3706 if (sc->ti_timer == 0 || --sc->ti_timer > 0) 3707 return; 3708 3709 /* 3710 * When we're debugging, the chip is often stopped for long periods 3711 * of time, and that would normally cause the watchdog timer to fire. 3712 * Since that impedes debugging, we don't want to do that. 3713 */ 3714 if (sc->ti_flags & TI_FLAG_DEBUGING) 3715 return; 3716 3717 ifp = sc->ti_ifp; 3718 if_printf(ifp, "watchdog timeout -- resetting\n"); 3719 ti_stop(sc); 3720 ti_init_locked(sc); 3721 3722 ifp->if_oerrors++; 3723} 3724 3725/* 3726 * Stop the adapter and free any mbufs allocated to the 3727 * RX and TX lists. 3728 */ 3729static void 3730ti_stop(struct ti_softc *sc) 3731{ 3732 struct ifnet *ifp; 3733 struct ti_cmd_desc cmd; 3734 3735 TI_LOCK_ASSERT(sc); 3736 3737 ifp = sc->ti_ifp; 3738 3739 /* Disable host interrupts. */ 3740 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 3741 /* 3742 * Tell firmware we're shutting down. 3743 */ 3744 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 3745 3746 /* Halt and reinitialize. */ 3747 if (ti_chipinit(sc) != 0) 3748 return; 3749 ti_mem_zero(sc, 0x2000, 0x100000 - 0x2000); 3750 if (ti_chipinit(sc) != 0) 3751 return; 3752 3753 /* Free the RX lists. */ 3754 ti_free_rx_ring_std(sc); 3755 3756 /* Free jumbo RX list. */ 3757 ti_free_rx_ring_jumbo(sc); 3758 3759 /* Free mini RX list. */ 3760 ti_free_rx_ring_mini(sc); 3761 3762 /* Free TX buffers. */ 3763 ti_free_tx_ring(sc); 3764 3765 sc->ti_ev_prodidx.ti_idx = 0; 3766 sc->ti_return_prodidx.ti_idx = 0; 3767 sc->ti_tx_considx.ti_idx = 0; 3768 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 3769 3770 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 3771 callout_stop(&sc->ti_watchdog); 3772} 3773 3774/* 3775 * Stop all chip I/O so that the kernel's probe routines don't 3776 * get confused by errant DMAs when rebooting. 3777 */ 3778static int 3779ti_shutdown(device_t dev) 3780{ 3781 struct ti_softc *sc; 3782 3783 sc = device_get_softc(dev); 3784 TI_LOCK(sc); 3785 ti_chipinit(sc); 3786 TI_UNLOCK(sc); 3787 3788 return (0); 3789} 3790