if_nve.c revision 150220
1/* 2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>. 3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ 28 */ 29 30/* 31 * NVIDIA nForce MCP Networking Adapter driver 32 * 33 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA 34 * through their web site. 35 * 36 * All mainstream nForce and nForce2 motherboards are supported. This module 37 * is as stable, sometimes more stable, than the linux version. (Recent 38 * Linux stability issues seem to be related to some issues with newer 39 * distributions using GCC 3.x, however this don't appear to effect FreeBSD 40 * 5.x). 41 * 42 * In accordance with the NVIDIA distribution license it is necessary to 43 * link this module against the nvlibnet.o binary object included in the 44 * Linux driver source distribution. The binary component is not modified in 45 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c 46 * linux kernel module "wrapper". 47 * 48 * The Linux driver uses a common code API that is shared between Win32 and 49 * i386 Linux. This abstracts the low level driver functions and uses 50 * callbacks and hooks to access the underlying hardware device. By using 51 * this same API in a FreeBSD kernel module it is possible to support the 52 * hardware without breaching the Linux source distributions licensing 53 * requirements, or obtaining the hardware programming specifications. 54 * 55 * Although not conventional, it works, and given the relatively small 56 * amount of hardware centric code, it's hopefully no more buggy than its 57 * linux counterpart. 58 * 59 * NVIDIA now support the nForce3 AMD64 platform, however I have been 60 * unable to access such a system to verify support. However, the code is 61 * reported to work with little modification when compiled with the AMD64 62 * version of the NVIDIA Linux library. All that should be necessary to make 63 * the driver work is to link it directly into the kernel, instead of as a 64 * module, and apply the docs/amd64.diff patch in this source distribution to 65 * the NVIDIA Linux driver source. 66 * 67 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well 68 * as recent versions of DragonFly. 69 * 70 * Written by Quinton Dolan <q@onthenet.com.au> 71 * Portions based on existing FreeBSD network drivers. 72 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. 73 * 74 */ 75 76#include <sys/cdefs.h> 77__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 150220 2005-09-16 12:49:06Z ru $"); 78 79#include <sys/param.h> 80#include <sys/systm.h> 81#include <sys/sockio.h> 82#include <sys/mbuf.h> 83#include <sys/malloc.h> 84#include <sys/kernel.h> 85#include <sys/socket.h> 86#include <sys/sysctl.h> 87#include <sys/queue.h> 88#include <sys/module.h> 89 90#include <net/if.h> 91#include <net/if_arp.h> 92#include <net/ethernet.h> 93#include <net/if_dl.h> 94#include <net/if_media.h> 95#include <net/if_types.h> 96#include <net/bpf.h> 97#include <net/if_vlan_var.h> 98 99#include <machine/bus.h> 100#include <machine/resource.h> 101 102#include <vm/vm.h> /* for vtophys */ 103#include <vm/pmap.h> /* for vtophys */ 104#include <machine/clock.h> /* for DELAY */ 105#include <sys/bus.h> 106#include <sys/rman.h> 107 108#include <dev/pci/pcireg.h> 109#include <dev/pci/pcivar.h> 110#include <dev/mii/mii.h> 111#include <dev/mii/miivar.h> 112#include "miibus_if.h" 113 114/* Include NVIDIA Linux driver header files */ 115#define linux 116#include <contrib/dev/nve/basetype.h> 117#include <contrib/dev/nve/phy.h> 118#include "os+%DIKED-nve.h" 119#include <contrib/dev/nve/drvinfo.h> 120#include <contrib/dev/nve/adapter.h> 121#undef linux 122 123#include <dev/nve/if_nvereg.h> 124 125MODULE_DEPEND(nve, pci, 1, 1, 1); 126MODULE_DEPEND(nve, ether, 1, 1, 1); 127MODULE_DEPEND(nve, miibus, 1, 1, 1); 128 129static int nve_probe(device_t); 130static int nve_attach(device_t); 131static int nve_detach(device_t); 132static void nve_init(void *); 133static void nve_stop(struct nve_softc *); 134static void nve_shutdown(device_t); 135static int nve_init_rings(struct nve_softc *); 136static void nve_free_rings(struct nve_softc *); 137 138static void nve_ifstart(struct ifnet *); 139static int nve_ioctl(struct ifnet *, u_long, caddr_t); 140static void nve_intr(void *); 141static void nve_tick(void *); 142static void nve_setmulti(struct nve_softc *); 143static void nve_watchdog(struct ifnet *); 144static void nve_update_stats(struct nve_softc *); 145 146static int nve_ifmedia_upd(struct ifnet *); 147static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); 148static int nve_miibus_readreg(device_t, int, int); 149static void nve_miibus_writereg(device_t, int, int, int); 150 151static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); 152static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); 153 154static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); 155static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); 156static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); 157static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); 158static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); 159static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); 160static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); 161static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); 162static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); 163static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); 164static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); 165static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); 166static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); 167static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); 168static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); 169static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); 170 171static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); 172static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); 173static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); 174static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); 175static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); 176static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); 177static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); 178 179static device_method_t nve_methods[] = { 180 /* Device interface */ 181 DEVMETHOD(device_probe, nve_probe), 182 DEVMETHOD(device_attach, nve_attach), 183 DEVMETHOD(device_detach, nve_detach), 184 DEVMETHOD(device_shutdown, nve_shutdown), 185 186 /* Bus interface */ 187 DEVMETHOD(bus_print_child, bus_generic_print_child), 188 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 189 190 /* MII interface */ 191 DEVMETHOD(miibus_readreg, nve_miibus_readreg), 192 DEVMETHOD(miibus_writereg, nve_miibus_writereg), 193 194 {0, 0} 195}; 196 197static driver_t nve_driver = { 198 "nve", 199 nve_methods, 200 sizeof(struct nve_softc) 201}; 202 203static devclass_t nve_devclass; 204 205static int nve_pollinterval = 0; 206SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, 207 &nve_pollinterval, 0, "delay between interface polls"); 208 209DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); 210DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); 211 212static struct nve_type nve_devs[] = { 213 {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID, 214 "NVIDIA nForce MCP Networking Adapter"}, 215 {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID, 216 "NVIDIA nForce MCP2 Networking Adapter"}, 217 {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID, 218 "NVIDIA nForce MCP3 Networking Adapter"}, 219 {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID, 220 "NVIDIA nForce MCP4 Networking Adapter"}, 221 {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID, 222 "NVIDIA nForce MCP5 Networking Adapter"}, 223 {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID, 224 "NVIDIA nForce MCP6 Networking Adapter"}, 225 {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID, 226 "NVIDIA nForce MCP7 Networking Adapter"}, 227 {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID, 228 "NVIDIA nForce MCP8 Networking Adapter"}, 229 {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID, 230 "NVIDIA nForce MCP9 Networking Adapter"}, 231 {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID, 232 "NVIDIA nForce MCP10 Networking Adapter"}, 233 {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID, 234 "NVIDIA nForce MCP11 Networking Adapter"}, 235 {0, 0, NULL} 236}; 237 238/* DMA MEM map callback function to get data segment physical address */ 239static void 240nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) 241{ 242 if (error) 243 return; 244 245 KASSERT(nsegs == 1, 246 ("Too many DMA segments returned when mapping DMA memory")); 247 *(bus_addr_t *)arg = segs->ds_addr; 248} 249 250/* DMA RX map callback function to get data segment physical address */ 251static void 252nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, 253 bus_size_t mapsize, int error) 254{ 255 if (error) 256 return; 257 *(bus_addr_t *)arg = segs->ds_addr; 258} 259 260/* 261 * DMA TX buffer callback function to allocate fragment data segment 262 * addresses 263 */ 264static void 265nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) 266{ 267 struct nve_tx_desc *info; 268 269 info = arg; 270 if (error) 271 return; 272 KASSERT(nsegs < NV_MAX_FRAGS, 273 ("Too many DMA segments returned when mapping mbuf")); 274 info->numfrags = nsegs; 275 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); 276} 277 278/* Probe for supported hardware ID's */ 279static int 280nve_probe(device_t dev) 281{ 282 struct nve_type *t; 283 284 t = nve_devs; 285 /* Check for matching PCI DEVICE ID's */ 286 while (t->name != NULL) { 287 if ((pci_get_vendor(dev) == t->vid_id) && 288 (pci_get_device(dev) == t->dev_id)) { 289 device_set_desc(dev, t->name); 290 return (0); 291 } 292 t++; 293 } 294 295 return (ENXIO); 296} 297 298/* Attach driver and initialise hardware for use */ 299static int 300nve_attach(device_t dev) 301{ 302 u_char eaddr[ETHER_ADDR_LEN]; 303 struct nve_softc *sc; 304 struct ifnet *ifp; 305 OS_API *osapi; 306 ADAPTER_OPEN_PARAMS OpenParams; 307 int error = 0, i, rid, unit; 308 309 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); 310 311 sc = device_get_softc(dev); 312 unit = device_get_unit(dev); 313 314 /* Allocate mutex */ 315 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 316 MTX_DEF | MTX_RECURSE); 317 mtx_init(&sc->osmtx, device_get_nameunit(dev), NULL, MTX_SPIN); 318 319 sc->dev = dev; 320 sc->unit = unit; 321 322 /* Preinitialize data structures */ 323 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); 324 325 /* Enable bus mastering */ 326 pci_enable_busmaster(dev); 327 328 /* Allocate memory mapped address space */ 329 rid = NV_RID; 330 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, 331 RF_ACTIVE); 332 333 if (sc->res == NULL) { 334 device_printf(dev, "couldn't map memory\n"); 335 error = ENXIO; 336 goto fail; 337 } 338 sc->sc_st = rman_get_bustag(sc->res); 339 sc->sc_sh = rman_get_bushandle(sc->res); 340 341 /* Allocate interrupt */ 342 rid = 0; 343 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 344 RF_SHAREABLE | RF_ACTIVE); 345 346 if (sc->irq == NULL) { 347 device_printf(dev, "couldn't map interrupt\n"); 348 error = ENXIO; 349 goto fail; 350 } 351 /* Allocate DMA tags */ 352 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 353 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, 354 NV_MAX_FRAGS, MCLBYTES, 0, 355 busdma_lock_mutex, &Giant, 356 &sc->mtag); 357 if (error) { 358 device_printf(dev, "couldn't allocate dma tag\n"); 359 goto fail; 360 } 361 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 362 BUS_SPACE_MAXADDR, NULL, NULL, 363 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, 364 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, 365 busdma_lock_mutex, &Giant, 366 &sc->rtag); 367 if (error) { 368 device_printf(dev, "couldn't allocate dma tag\n"); 369 goto fail; 370 } 371 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 372 BUS_SPACE_MAXADDR, NULL, NULL, 373 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, 374 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, 375 busdma_lock_mutex, &Giant, 376 &sc->ttag); 377 if (error) { 378 device_printf(dev, "couldn't allocate dma tag\n"); 379 goto fail; 380 } 381 /* Allocate DMA safe memory and get the DMA addresses. */ 382 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, 383 BUS_DMA_WAITOK, &sc->tmap); 384 if (error) { 385 device_printf(dev, "couldn't allocate dma memory\n"); 386 goto fail; 387 } 388 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); 389 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, 390 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, 391 &sc->tx_addr, 0); 392 if (error) { 393 device_printf(dev, "couldn't map dma memory\n"); 394 goto fail; 395 } 396 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, 397 BUS_DMA_WAITOK, &sc->rmap); 398 if (error) { 399 device_printf(dev, "couldn't allocate dma memory\n"); 400 goto fail; 401 } 402 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); 403 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, 404 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, 405 &sc->rx_addr, 0); 406 if (error) { 407 device_printf(dev, "couldn't map dma memory\n"); 408 goto fail; 409 } 410 /* Initialize rings. */ 411 if (nve_init_rings(sc)) { 412 device_printf(dev, "failed to init rings\n"); 413 error = ENXIO; 414 goto fail; 415 } 416 /* Setup NVIDIA API callback routines */ 417 osapi = &sc->osapi; 418 osapi->pOSCX = sc; 419 osapi->pfnAllocMemory = nve_osalloc; 420 osapi->pfnFreeMemory = nve_osfree; 421 osapi->pfnAllocMemoryEx = nve_osallocex; 422 osapi->pfnFreeMemoryEx = nve_osfreeex; 423 osapi->pfnClearMemory = nve_osclear; 424 osapi->pfnStallExecution = nve_osdelay; 425 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; 426 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; 427 osapi->pfnPacketWasSent = nve_ospackettx; 428 osapi->pfnPacketWasReceived = nve_ospacketrx; 429 osapi->pfnLinkStateHasChanged = nve_oslinkchg; 430 osapi->pfnAllocTimer = nve_osalloctimer; 431 osapi->pfnFreeTimer = nve_osfreetimer; 432 osapi->pfnInitializeTimer = nve_osinittimer; 433 osapi->pfnSetTimer = nve_ossettimer; 434 osapi->pfnCancelTimer = nve_oscanceltimer; 435 osapi->pfnPreprocessPacket = nve_ospreprocpkt; 436 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; 437 osapi->pfnIndicatePackets = nve_osindicatepkt; 438 osapi->pfnLockAlloc = nve_oslockalloc; 439 osapi->pfnLockAcquire = nve_oslockacquire; 440 osapi->pfnLockRelease = nve_oslockrelease; 441 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; 442 443 sc->linkup = FALSE; 444 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; 445 446 /* TODO - We don't support hardware offload yet */ 447 sc->hwmode = 1; 448 sc->media = 0; 449 450 /* Set NVIDIA API startup parameters */ 451 OpenParams.MaxDpcLoop = 2; 452 OpenParams.MaxRxPkt = RX_RING_SIZE; 453 OpenParams.MaxTxPkt = TX_RING_SIZE; 454 OpenParams.SentPacketStatusSuccess = 1; 455 OpenParams.SentPacketStatusFailure = 0; 456 OpenParams.MaxRxPktToAccumulate = 6; 457 OpenParams.ulPollInterval = nve_pollinterval; 458 OpenParams.SetForcedModeEveryNthRxPacket = 0; 459 OpenParams.SetForcedModeEveryNthTxPacket = 0; 460 OpenParams.RxForcedInterrupt = 0; 461 OpenParams.TxForcedInterrupt = 0; 462 OpenParams.pOSApi = osapi; 463 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); 464 OpenParams.bASFEnabled = 0; 465 OpenParams.ulDescriptorVersion = sc->hwmode; 466 OpenParams.ulMaxPacketSize = sc->max_frame_size; 467 OpenParams.DeviceId = pci_get_device(dev); 468 469 /* Open NVIDIA Hardware API */ 470 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); 471 if (error) { 472 device_printf(dev, 473 "failed to open NVIDIA Hardware API: 0x%x\n", error); 474 goto fail; 475 } 476 477 /* TODO - Add support for MODE2 hardware offload */ 478 479 bzero(&sc->adapterdata, sizeof(sc->adapterdata)); 480 481 sc->adapterdata.ulMediaIF = sc->media; 482 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; 483 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); 484 485 /* MAC is loaded backwards into h/w reg */ 486 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); 487 for (i = 0; i < 6; i++) { 488 eaddr[i] = sc->original_mac_addr[5 - i]; 489 } 490 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); 491 492 /* Display ethernet address ,... */ 493 device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); 494 495 /* Allocate interface structures */ 496 ifp = sc->ifp = if_alloc(IFT_ETHER); 497 if (ifp == NULL) { 498 device_printf(dev, "can not if_alloc()\n"); 499 error = ENOSPC; 500 goto fail; 501 } 502 503 /* Probe device for MII interface to PHY */ 504 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n"); 505 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) { 506 device_printf(dev, "MII without any phy!\n"); 507 error = ENXIO; 508 goto fail; 509 } 510 511 /* Setup interface parameters */ 512 ifp->if_softc = sc; 513 if_initname(ifp, "nve", unit); 514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 515 ifp->if_ioctl = nve_ioctl; 516 ifp->if_output = ether_output; 517 ifp->if_start = nve_ifstart; 518 ifp->if_watchdog = nve_watchdog; 519 ifp->if_timer = 0; 520 ifp->if_init = nve_init; 521 ifp->if_mtu = ETHERMTU; 522 ifp->if_baudrate = IF_Mbps(100); 523 ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; 524 ifp->if_capabilities |= IFCAP_VLAN_MTU; 525 526 /* Attach to OS's managers. */ 527 ether_ifattach(ifp, eaddr); 528 callout_handle_init(&sc->stat_ch); 529 530 /* Activate our interrupt handler. - attach last to avoid lock */ 531 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET, nve_intr, 532 sc, &sc->sc_ih); 533 if (error) { 534 device_printf(sc->dev, "couldn't set up interrupt handler\n"); 535 goto fail; 536 } 537 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); 538 539fail: 540 if (error) 541 nve_detach(dev); 542 543 return (error); 544} 545 546/* Detach interface for module unload */ 547static int 548nve_detach(device_t dev) 549{ 550 struct nve_softc *sc = device_get_softc(dev); 551 struct ifnet *ifp; 552 553 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); 554 NVE_LOCK(sc); 555 556 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); 557 558 ifp = sc->ifp; 559 560 if (device_is_attached(dev)) { 561 nve_stop(sc); 562 ether_ifdetach(ifp); 563 } 564 565 if (ifp) 566 if_free(ifp); 567 568 if (sc->miibus) 569 device_delete_child(dev, sc->miibus); 570 bus_generic_detach(dev); 571 572 /* Reload unreversed address back into MAC in original state */ 573 if (sc->original_mac_addr) 574 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, 575 sc->original_mac_addr); 576 577 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); 578 /* Detach from NVIDIA hardware API */ 579 if (sc->hwapi->pfnClose) 580 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); 581 /* Release resources */ 582 if (sc->sc_ih) 583 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); 584 if (sc->irq) 585 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 586 if (sc->res) 587 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); 588 589 nve_free_rings(sc); 590 591 if (sc->tx_desc) { 592 bus_dmamap_unload(sc->rtag, sc->rmap); 593 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); 594 bus_dmamap_destroy(sc->rtag, sc->rmap); 595 } 596 if (sc->mtag) 597 bus_dma_tag_destroy(sc->mtag); 598 if (sc->ttag) 599 bus_dma_tag_destroy(sc->ttag); 600 if (sc->rtag) 601 bus_dma_tag_destroy(sc->rtag); 602 603 NVE_UNLOCK(sc); 604 mtx_destroy(&sc->mtx); 605 mtx_destroy(&sc->osmtx); 606 607 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); 608 609 return (0); 610} 611 612/* Initialise interface and start it "RUNNING" */ 613static void 614nve_init(void *xsc) 615{ 616 struct nve_softc *sc = xsc; 617 struct ifnet *ifp; 618 int error; 619 620 NVE_LOCK(sc); 621 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); 622 623 ifp = sc->ifp; 624 625 /* Do nothing if already running */ 626 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 627 goto fail; 628 629 nve_stop(sc); 630 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); 631 632 /* Setup Hardware interface and allocate memory structures */ 633 error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 634 0, /* force speed */ 635 0, /* force full duplex */ 636 0, /* force mode */ 637 0, /* force async mode */ 638 &sc->linkup); 639 640 if (error) { 641 device_printf(sc->dev, 642 "failed to start NVIDIA Hardware interface\n"); 643 goto fail; 644 } 645 /* Set the MAC address */ 646 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IFP2ENADDR(sc->ifp)); 647 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 648 sc->hwapi->pfnStart(sc->hwapi->pADCX); 649 650 /* Setup multicast filter */ 651 nve_setmulti(sc); 652 nve_ifmedia_upd(ifp); 653 654 /* Update interface parameters */ 655 ifp->if_drv_flags |= IFF_DRV_RUNNING; 656 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 657 658 sc->stat_ch = timeout(nve_tick, sc, hz); 659 660 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); 661 662fail: 663 NVE_UNLOCK(sc); 664 665 return; 666} 667 668/* Stop interface activity ie. not "RUNNING" */ 669static void 670nve_stop(struct nve_softc *sc) 671{ 672 struct ifnet *ifp; 673 674 NVE_LOCK(sc); 675 676 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); 677 678 ifp = sc->ifp; 679 ifp->if_timer = 0; 680 681 /* Cancel tick timer */ 682 untimeout(nve_tick, sc, sc->stat_ch); 683 684 /* Stop hardware activity */ 685 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 686 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); 687 688 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); 689 /* Shutdown interface and deallocate memory buffers */ 690 if (sc->hwapi->pfnDeinit) 691 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); 692 693 sc->linkup = 0; 694 sc->cur_rx = 0; 695 sc->pending_rxs = 0; 696 697 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 698 699 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); 700 701 NVE_UNLOCK(sc); 702 703 return; 704} 705 706/* Shutdown interface for unload/reboot */ 707static void 708nve_shutdown(device_t dev) 709{ 710 struct nve_softc *sc; 711 712 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); 713 714 sc = device_get_softc(dev); 715 716 /* Stop hardware activity */ 717 nve_stop(sc); 718} 719 720/* Allocate TX ring buffers */ 721static int 722nve_init_rings(struct nve_softc *sc) 723{ 724 int error, i; 725 726 NVE_LOCK(sc); 727 728 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); 729 730 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; 731 /* Initialise RX ring */ 732 for (i = 0; i < RX_RING_SIZE; i++) { 733 struct nve_rx_desc *desc = sc->rx_desc + i; 734 struct nve_map_buffer *buf = &desc->buf; 735 736 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 737 if (buf->mbuf == NULL) { 738 device_printf(sc->dev, "couldn't allocate mbuf\n"); 739 nve_free_rings(sc); 740 error = ENOBUFS; 741 goto fail; 742 } 743 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 744 m_adj(buf->mbuf, ETHER_ALIGN); 745 746 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 747 if (error) { 748 device_printf(sc->dev, "couldn't create dma map\n"); 749 nve_free_rings(sc); 750 goto fail; 751 } 752 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 753 nve_dmamap_rx_cb, &desc->paddr, 0); 754 if (error) { 755 device_printf(sc->dev, "couldn't dma map mbuf\n"); 756 nve_free_rings(sc); 757 goto fail; 758 } 759 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 760 761 desc->buflength = buf->mbuf->m_len; 762 desc->vaddr = mtod(buf->mbuf, caddr_t); 763 } 764 bus_dmamap_sync(sc->rtag, sc->rmap, 765 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 766 767 /* Initialize TX ring */ 768 for (i = 0; i < TX_RING_SIZE; i++) { 769 struct nve_tx_desc *desc = sc->tx_desc + i; 770 struct nve_map_buffer *buf = &desc->buf; 771 772 buf->mbuf = NULL; 773 774 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 775 if (error) { 776 device_printf(sc->dev, "couldn't create dma map\n"); 777 nve_free_rings(sc); 778 goto fail; 779 } 780 } 781 bus_dmamap_sync(sc->ttag, sc->tmap, 782 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 783 784 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); 785 786fail: 787 NVE_UNLOCK(sc); 788 789 return (error); 790} 791 792/* Free the TX ring buffers */ 793static void 794nve_free_rings(struct nve_softc *sc) 795{ 796 int i; 797 798 NVE_LOCK(sc); 799 800 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); 801 802 for (i = 0; i < RX_RING_SIZE; i++) { 803 struct nve_rx_desc *desc = sc->rx_desc + i; 804 struct nve_map_buffer *buf = &desc->buf; 805 806 if (buf->mbuf) { 807 bus_dmamap_unload(sc->mtag, buf->map); 808 bus_dmamap_destroy(sc->mtag, buf->map); 809 m_freem(buf->mbuf); 810 } 811 buf->mbuf = NULL; 812 } 813 814 for (i = 0; i < TX_RING_SIZE; i++) { 815 struct nve_tx_desc *desc = sc->tx_desc + i; 816 struct nve_map_buffer *buf = &desc->buf; 817 818 if (buf->mbuf) { 819 bus_dmamap_unload(sc->mtag, buf->map); 820 bus_dmamap_destroy(sc->mtag, buf->map); 821 m_freem(buf->mbuf); 822 } 823 buf->mbuf = NULL; 824 } 825 826 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); 827 828 NVE_UNLOCK(sc); 829} 830 831/* Main loop for sending packets from OS to interface */ 832static void 833nve_ifstart(struct ifnet *ifp) 834{ 835 struct nve_softc *sc = ifp->if_softc; 836 struct nve_map_buffer *buf; 837 struct mbuf *m0, *m; 838 struct nve_tx_desc *desc; 839 ADAPTER_WRITE_DATA txdata; 840 int error, i; 841 842 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); 843 844 /* If link is down/busy or queue is empty do nothing */ 845 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || ifp->if_snd.ifq_head == NULL) 846 return; 847 848 /* Transmit queued packets until sent or TX ring is full */ 849 while (sc->pending_txs < TX_RING_SIZE) { 850 desc = sc->tx_desc + sc->cur_tx; 851 buf = &desc->buf; 852 853 /* Get next packet to send. */ 854 IF_DEQUEUE(&ifp->if_snd, m0); 855 856 /* If nothing to send, return. */ 857 if (m0 == NULL) 858 return; 859 860 /* Map MBUF for DMA access */ 861 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, 862 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 863 864 if (error && error != EFBIG) { 865 m_freem(m0); 866 sc->tx_errors++; 867 continue; 868 } 869 /* 870 * Packet has too many fragments - defrag into new mbuf 871 * cluster 872 */ 873 if (error) { 874 m = m_defrag(m0, M_DONTWAIT); 875 if (m == NULL) { 876 m_freem(m0); 877 sc->tx_errors++; 878 continue; 879 } 880 m0 = m; 881 882 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, 883 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 884 if (error) { 885 m_freem(m); 886 sc->tx_errors++; 887 continue; 888 } 889 } 890 /* Do sync on DMA bounce buffer */ 891 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); 892 893 buf->mbuf = m0; 894 txdata.ulNumberOfElements = desc->numfrags; 895 txdata.pvID = (PVOID)desc; 896 897 /* Put fragments into API element list */ 898 txdata.ulTotalLength = buf->mbuf->m_len; 899 for (i = 0; i < desc->numfrags; i++) { 900 txdata.sElement[i].ulLength = 901 (ulong)desc->frags[i].ds_len; 902 txdata.sElement[i].pPhysical = 903 (PVOID)desc->frags[i].ds_addr; 904 } 905 906 /* Send packet to Nvidia API for transmission */ 907 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); 908 909 switch (error) { 910 case ADAPTERERR_NONE: 911 /* Packet was queued in API TX queue successfully */ 912 sc->pending_txs++; 913 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; 914 break; 915 916 case ADAPTERERR_TRANSMIT_QUEUE_FULL: 917 /* The API TX queue is full - requeue the packet */ 918 device_printf(sc->dev, 919 "nve_ifstart: transmit queue is full\n"); 920 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 921 bus_dmamap_unload(sc->mtag, buf->map); 922 IF_PREPEND(&ifp->if_snd, buf->mbuf); 923 buf->mbuf = NULL; 924 return; 925 926 default: 927 /* The API failed to queue/send the packet so dump it */ 928 device_printf(sc->dev, "nve_ifstart: transmit error\n"); 929 bus_dmamap_unload(sc->mtag, buf->map); 930 m_freem(buf->mbuf); 931 buf->mbuf = NULL; 932 sc->tx_errors++; 933 return; 934 } 935 /* Set watchdog timer. */ 936 ifp->if_timer = 8; 937 938 /* Copy packet to BPF tap */ 939 BPF_MTAP(ifp, m0); 940 } 941 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 942 943 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); 944} 945 946/* Handle IOCTL events */ 947static int 948nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 949{ 950 struct nve_softc *sc = ifp->if_softc; 951 struct ifreq *ifr = (struct ifreq *) data; 952 struct mii_data *mii; 953 int error = 0; 954 955 NVE_LOCK(sc); 956 957 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); 958 959 switch (command) { 960 case SIOCSIFMTU: 961 /* Set MTU size */ 962 if (ifp->if_mtu == ifr->ifr_mtu) 963 break; 964 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { 965 ifp->if_mtu = ifr->ifr_mtu; 966 nve_stop(sc); 967 nve_init(sc); 968 } else 969 error = EINVAL; 970 break; 971 972 case SIOCSIFFLAGS: 973 /* Setup interface flags */ 974 if (ifp->if_flags & IFF_UP) { 975 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 976 nve_init(sc); 977 break; 978 } 979 } else { 980 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 981 nve_stop(sc); 982 break; 983 } 984 } 985 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ 986 nve_setmulti(sc); 987 break; 988 989 case SIOCADDMULTI: 990 case SIOCDELMULTI: 991 /* Setup multicast filter */ 992 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 993 nve_setmulti(sc); 994 } 995 break; 996 997 case SIOCGIFMEDIA: 998 case SIOCSIFMEDIA: 999 /* Get/Set interface media parameters */ 1000 mii = device_get_softc(sc->miibus); 1001 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1002 break; 1003 1004 default: 1005 /* Everything else we forward to generic ether ioctl */ 1006 error = ether_ioctl(ifp, (int)command, data); 1007 break; 1008 } 1009 1010 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); 1011 1012 NVE_UNLOCK(sc); 1013 1014 return (error); 1015} 1016 1017/* Interrupt service routine */ 1018static void 1019nve_intr(void *arg) 1020{ 1021 struct nve_softc *sc = arg; 1022 struct ifnet *ifp = sc->ifp; 1023 1024 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); 1025 1026 if (!ifp->if_flags & IFF_UP) { 1027 nve_stop(sc); 1028 return; 1029 } 1030 /* Handle interrupt event */ 1031 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { 1032 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1033 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1034 } 1035 if (ifp->if_snd.ifq_head != NULL) 1036 nve_ifstart(ifp); 1037 1038 /* If no pending packets we don't need a timeout */ 1039 if (sc->pending_txs == 0) 1040 sc->ifp->if_timer = 0; 1041 1042 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); 1043 1044 return; 1045} 1046 1047/* Setup multicast filters */ 1048static void 1049nve_setmulti(struct nve_softc *sc) 1050{ 1051 struct ifnet *ifp; 1052 struct ifmultiaddr *ifma; 1053 PACKET_FILTER hwfilter; 1054 int i; 1055 u_int8_t andaddr[6], oraddr[6]; 1056 1057 NVE_LOCK(sc); 1058 1059 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); 1060 1061 ifp = sc->ifp; 1062 1063 /* Initialize filter */ 1064 hwfilter.ulFilterFlags = 0; 1065 for (i = 0; i < 6; i++) { 1066 hwfilter.acMulticastAddress[i] = 0; 1067 hwfilter.acMulticastMask[i] = 0; 1068 } 1069 1070 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1071 /* Accept all packets */ 1072 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; 1073 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1074 NVE_UNLOCK(sc); 1075 return; 1076 } 1077 /* Setup multicast filter */ 1078 IF_ADDR_LOCK(ifp); 1079 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1080 u_char *addrp; 1081 1082 if (ifma->ifma_addr->sa_family != AF_LINK) 1083 continue; 1084 1085 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1086 for (i = 0; i < 6; i++) { 1087 u_int8_t mcaddr = addrp[i]; 1088 andaddr[i] &= mcaddr; 1089 oraddr[i] |= mcaddr; 1090 } 1091 } 1092 IF_ADDR_UNLOCK(ifp); 1093 for (i = 0; i < 6; i++) { 1094 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; 1095 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); 1096 } 1097 1098 /* Send filter to NVIDIA API */ 1099 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1100 1101 NVE_UNLOCK(sc); 1102 1103 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); 1104 1105 return; 1106} 1107 1108/* Change the current media/mediaopts */ 1109static int 1110nve_ifmedia_upd(struct ifnet *ifp) 1111{ 1112 struct nve_softc *sc = ifp->if_softc; 1113 struct mii_data *mii; 1114 1115 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); 1116 1117 mii = device_get_softc(sc->miibus); 1118 1119 if (mii->mii_instance) { 1120 struct mii_softc *miisc; 1121 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1122 miisc = LIST_NEXT(miisc, mii_list)) { 1123 mii_phy_reset(miisc); 1124 } 1125 } 1126 mii_mediachg(mii); 1127 1128 return (0); 1129} 1130 1131/* Update current miibus PHY status of media */ 1132static void 1133nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1134{ 1135 struct nve_softc *sc; 1136 struct mii_data *mii; 1137 1138 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); 1139 1140 sc = ifp->if_softc; 1141 mii = device_get_softc(sc->miibus); 1142 mii_pollstat(mii); 1143 1144 ifmr->ifm_active = mii->mii_media_active; 1145 ifmr->ifm_status = mii->mii_media_status; 1146 1147 return; 1148} 1149 1150/* miibus tick timer - maintain link status */ 1151static void 1152nve_tick(void *xsc) 1153{ 1154 struct nve_softc *sc = xsc; 1155 struct mii_data *mii; 1156 struct ifnet *ifp; 1157 1158 NVE_LOCK(sc); 1159 1160 ifp = sc->ifp; 1161 nve_update_stats(sc); 1162 1163 mii = device_get_softc(sc->miibus); 1164 mii_tick(mii); 1165 1166 if (mii->mii_media_status & IFM_ACTIVE && 1167 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1168 if (ifp->if_snd.ifq_head != NULL) 1169 nve_ifstart(ifp); 1170 } 1171 sc->stat_ch = timeout(nve_tick, sc, hz); 1172 1173 NVE_UNLOCK(sc); 1174 1175 return; 1176} 1177 1178/* Update ifnet data structure with collected interface stats from API */ 1179static void 1180nve_update_stats(struct nve_softc *sc) 1181{ 1182 struct ifnet *ifp = sc->ifp; 1183 ADAPTER_STATS stats; 1184 1185 NVE_LOCK(sc); 1186 1187 if (sc->hwapi) { 1188 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); 1189 1190 ifp->if_ipackets = stats.ulSuccessfulReceptions; 1191 ifp->if_ierrors = stats.ulMissedFrames + 1192 stats.ulFailedReceptions + 1193 stats.ulCRCErrors + 1194 stats.ulFramingErrors + 1195 stats.ulOverFlowErrors; 1196 1197 ifp->if_opackets = stats.ulSuccessfulTransmissions; 1198 ifp->if_oerrors = sc->tx_errors + 1199 stats.ulFailedTransmissions + 1200 stats.ulRetryErrors + 1201 stats.ulUnderflowErrors + 1202 stats.ulLossOfCarrierErrors + 1203 stats.ulLateCollisionErrors; 1204 1205 ifp->if_collisions = stats.ulLateCollisionErrors; 1206 } 1207 NVE_UNLOCK(sc); 1208 1209 return; 1210} 1211 1212/* miibus Read PHY register wrapper - calls Nvidia API entry point */ 1213static int 1214nve_miibus_readreg(device_t dev, int phy, int reg) 1215{ 1216 struct nve_softc *sc = device_get_softc(dev); 1217 ULONG data; 1218 1219 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); 1220 1221 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); 1222 1223 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); 1224 1225 return (data); 1226} 1227 1228/* miibus Write PHY register wrapper - calls Nvidia API entry point */ 1229static void 1230nve_miibus_writereg(device_t dev, int phy, int reg, int data) 1231{ 1232 struct nve_softc *sc = device_get_softc(dev); 1233 1234 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); 1235 1236 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); 1237 1238 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); 1239 1240 return; 1241} 1242 1243/* Watchdog timer to prevent PHY lockups */ 1244static void 1245nve_watchdog(struct ifnet *ifp) 1246{ 1247 struct nve_softc *sc = ifp->if_softc; 1248 1249 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); 1250 1251 sc->tx_errors++; 1252 1253 nve_stop(sc); 1254 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1255 nve_init(sc); 1256 1257 if (ifp->if_snd.ifq_head != NULL) 1258 nve_ifstart(ifp); 1259 1260 return; 1261} 1262 1263/* --- Start of NVOSAPI interface --- */ 1264 1265/* Allocate DMA enabled general use memory for API */ 1266static NV_SINT32 1267nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) 1268{ 1269 struct nve_softc *sc; 1270 bus_addr_t mem_physical; 1271 1272 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); 1273 1274 sc = (struct nve_softc *)ctx; 1275 1276 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, 1277 M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0); 1278 1279 if (!mem->pLogical) { 1280 device_printf(sc->dev, "memory allocation failed\n"); 1281 return (0); 1282 } 1283 memset(mem->pLogical, 0, (ulong)mem->uiLength); 1284 mem_physical = vtophys(mem->pLogical); 1285 mem->pPhysical = (PVOID)mem_physical; 1286 1287 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", 1288 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); 1289 1290 return (1); 1291} 1292 1293/* Free allocated memory */ 1294static NV_SINT32 1295nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) 1296{ 1297 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", 1298 (uint)mem->pLogical, (uint) mem->uiLength); 1299 1300 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); 1301 return (1); 1302} 1303 1304/* Copied directly from nvnet.c */ 1305static NV_SINT32 1306nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1307{ 1308 MEMORY_BLOCK mem_block; 1309 1310 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); 1311 1312 mem_block_ex->pLogical = NULL; 1313 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; 1314 1315 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && 1316 (mem_block_ex->AlignmentSize > 1)) { 1317 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", 1318 mem_block_ex->AlignmentSize); 1319 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; 1320 } 1321 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1322 1323 if (nve_osalloc(ctx, &mem_block) == 0) { 1324 return (0); 1325 } 1326 mem_block_ex->pLogicalOrig = mem_block.pLogical; 1327 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; 1328 mem_block_ex->pPhysicalOrigHigh = 0; 1329 1330 mem_block_ex->pPhysical = mem_block.pPhysical; 1331 mem_block_ex->pLogical = mem_block.pLogical; 1332 1333 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { 1334 unsigned int offset; 1335 offset = mem_block_ex->pPhysicalOrigLow & 1336 (mem_block_ex->AlignmentSize - 1); 1337 1338 if (offset) { 1339 mem_block_ex->pPhysical = 1340 (PVOID)((ulong)mem_block_ex->pPhysical + 1341 mem_block_ex->AlignmentSize - offset); 1342 mem_block_ex->pLogical = 1343 (PVOID)((ulong)mem_block_ex->pLogical + 1344 mem_block_ex->AlignmentSize - offset); 1345 } /* if (offset) */ 1346 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ 1347 return (1); 1348} 1349 1350/* Copied directly from nvnet.c */ 1351static NV_SINT32 1352nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1353{ 1354 MEMORY_BLOCK mem_block; 1355 1356 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); 1357 1358 mem_block.pLogical = mem_block_ex->pLogicalOrig; 1359 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); 1360 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1361 1362 return (nve_osfree(ctx, &mem_block)); 1363} 1364 1365/* Clear memory region */ 1366static NV_SINT32 1367nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) 1368{ 1369 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); 1370 memset(mem, 0, length); 1371 return (1); 1372} 1373 1374/* Sleep for a tick */ 1375static NV_SINT32 1376nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) 1377{ 1378 DELAY(usec); 1379 return (1); 1380} 1381 1382/* Allocate memory for rx buffer */ 1383static NV_SINT32 1384nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) 1385{ 1386 struct nve_softc *sc = ctx; 1387 struct nve_rx_desc *desc; 1388 struct nve_map_buffer *buf; 1389 int error; 1390 1391 NVE_LOCK(sc); 1392 1393 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); 1394 1395 if (sc->pending_rxs == RX_RING_SIZE) { 1396 device_printf(sc->dev, "rx ring buffer is full\n"); 1397 goto fail; 1398 } 1399 desc = sc->rx_desc + sc->cur_rx; 1400 buf = &desc->buf; 1401 1402 if (buf->mbuf == NULL) { 1403 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1404 if (buf->mbuf == NULL) { 1405 device_printf(sc->dev, "failed to allocate memory\n"); 1406 goto fail; 1407 } 1408 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 1409 m_adj(buf->mbuf, ETHER_ALIGN); 1410 1411 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 1412 nve_dmamap_rx_cb, &desc->paddr, 0); 1413 if (error) { 1414 device_printf(sc->dev, "failed to dmamap mbuf\n"); 1415 m_freem(buf->mbuf); 1416 buf->mbuf = NULL; 1417 goto fail; 1418 } 1419 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 1420 desc->buflength = buf->mbuf->m_len; 1421 desc->vaddr = mtod(buf->mbuf, caddr_t); 1422 } 1423 sc->pending_rxs++; 1424 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; 1425 1426 mem->pLogical = (void *)desc->vaddr; 1427 mem->pPhysical = (void *)desc->paddr; 1428 mem->uiLength = desc->buflength; 1429 *id = (void *)desc; 1430 1431 NVE_UNLOCK(sc); 1432 return (1); 1433 1434fail: 1435 NVE_UNLOCK(sc); 1436 return (0); 1437} 1438 1439/* Free the rx buffer */ 1440static NV_SINT32 1441nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) 1442{ 1443 struct nve_softc *sc = ctx; 1444 struct nve_rx_desc *desc; 1445 struct nve_map_buffer *buf; 1446 1447 NVE_LOCK(sc); 1448 1449 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); 1450 1451 desc = (struct nve_rx_desc *) id; 1452 buf = &desc->buf; 1453 1454 if (buf->mbuf) { 1455 bus_dmamap_unload(sc->mtag, buf->map); 1456 bus_dmamap_destroy(sc->mtag, buf->map); 1457 m_freem(buf->mbuf); 1458 } 1459 sc->pending_rxs--; 1460 buf->mbuf = NULL; 1461 1462 NVE_UNLOCK(sc); 1463 1464 return (1); 1465} 1466 1467/* This gets called by the Nvidia API after our TX packet has been sent */ 1468static NV_SINT32 1469nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) 1470{ 1471 struct nve_softc *sc = ctx; 1472 struct nve_map_buffer *buf; 1473 struct nve_tx_desc *desc = (struct nve_tx_desc *) id; 1474 struct ifnet *ifp; 1475 1476 NVE_LOCK(sc); 1477 1478 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); 1479 1480 ifp = sc->ifp; 1481 buf = &desc->buf; 1482 sc->pending_txs--; 1483 1484 /* Unload and free mbuf cluster */ 1485 if (buf->mbuf == NULL) 1486 goto fail; 1487 1488 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); 1489 bus_dmamap_unload(sc->mtag, buf->map); 1490 m_freem(buf->mbuf); 1491 buf->mbuf = NULL; 1492 1493 /* Send more packets if we have them */ 1494 if (sc->pending_txs < TX_RING_SIZE) 1495 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1496 1497 if (ifp->if_snd.ifq_head != NULL && sc->pending_txs < TX_RING_SIZE) 1498 nve_ifstart(ifp); 1499 1500fail: 1501 NVE_UNLOCK(sc); 1502 1503 return (1); 1504} 1505 1506/* This gets called by the Nvidia API when a new packet has been received */ 1507/* XXX What is newbuf used for? XXX */ 1508static NV_SINT32 1509nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, 1510 NV_UINT8 priority) 1511{ 1512 struct nve_softc *sc = ctx; 1513 struct ifnet *ifp; 1514 struct nve_rx_desc *desc; 1515 struct nve_map_buffer *buf; 1516 ADAPTER_READ_DATA *readdata; 1517 1518 NVE_LOCK(sc); 1519 1520 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); 1521 1522 ifp = sc->ifp; 1523 1524 readdata = (ADAPTER_READ_DATA *) data; 1525 desc = readdata->pvID; 1526 buf = &desc->buf; 1527 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1528 1529 if (success) { 1530 /* Sync DMA bounce buffer. */ 1531 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1532 1533 /* First mbuf in packet holds the ethernet and packet headers */ 1534 buf->mbuf->m_pkthdr.rcvif = ifp; 1535 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = 1536 readdata->ulTotalLength; 1537 1538 bus_dmamap_unload(sc->mtag, buf->map); 1539 1540 /* Give mbuf to OS. */ 1541 (*ifp->if_input) (ifp, buf->mbuf); 1542 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) 1543 ifp->if_imcasts++; 1544 1545 /* Blat the mbuf pointer, kernel will free the mbuf cluster */ 1546 buf->mbuf = NULL; 1547 } else { 1548 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1549 bus_dmamap_unload(sc->mtag, buf->map); 1550 m_freem(buf->mbuf); 1551 buf->mbuf = NULL; 1552 } 1553 1554 sc->cur_rx = desc - sc->rx_desc; 1555 sc->pending_rxs--; 1556 1557 NVE_UNLOCK(sc); 1558 1559 return (1); 1560} 1561 1562/* This gets called by NVIDIA API when the PHY link state changes */ 1563static NV_SINT32 1564nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) 1565{ 1566 struct nve_softc *sc = (struct nve_softc *)ctx; 1567 struct ifnet *ifp; 1568 1569 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); 1570 1571 ifp = sc->ifp; 1572 1573 if (enabled) 1574 ifp->if_flags |= IFF_UP; 1575 else 1576 ifp->if_flags &= ~IFF_UP; 1577 1578 return (1); 1579} 1580 1581/* Setup a watchdog timer */ 1582static NV_SINT32 1583nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) 1584{ 1585 struct nve_softc *sc = (struct nve_softc *)ctx; 1586 1587 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); 1588 1589 callout_handle_init(&sc->ostimer); 1590 *timer = &sc->ostimer; 1591 1592 return (1); 1593} 1594 1595/* Free the timer */ 1596static NV_SINT32 1597nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) 1598{ 1599 1600 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); 1601 1602 return (1); 1603} 1604 1605/* Setup timer parameters */ 1606static NV_SINT32 1607nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) 1608{ 1609 struct nve_softc *sc = (struct nve_softc *)ctx; 1610 1611 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); 1612 1613 sc->ostimer_func = func; 1614 sc->ostimer_params = parameters; 1615 1616 return (1); 1617} 1618 1619/* Set the timer to go off */ 1620static NV_SINT32 1621nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) 1622{ 1623 struct nve_softc *sc = ctx; 1624 1625 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); 1626 1627 *(struct callout_handle *)timer = timeout(sc->ostimer_func, 1628 sc->ostimer_params, delay); 1629 1630 return (1); 1631} 1632 1633/* Cancel the timer */ 1634static NV_SINT32 1635nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) 1636{ 1637 struct nve_softc *sc = ctx; 1638 1639 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); 1640 1641 untimeout(sc->ostimer_func, sc->ostimer_params, 1642 *(struct callout_handle *)timer); 1643 1644 return (1); 1645} 1646 1647static NV_SINT32 1648nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, 1649 NV_UINT8 *newbuffer, NV_UINT8 priority) 1650{ 1651 1652 /* Not implemented */ 1653 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1654 1655 return (1); 1656} 1657 1658static PNV_VOID 1659nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) 1660{ 1661 1662 /* Not implemented */ 1663 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1664 1665 return (NULL); 1666} 1667 1668static NV_SINT32 1669nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) 1670{ 1671 1672 /* Not implemented */ 1673 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); 1674 1675 return (1); 1676} 1677 1678/* Allocate mutex context (already done in nve_attach) */ 1679static NV_SINT32 1680nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) 1681{ 1682 struct nve_softc *sc = (struct nve_softc *)ctx; 1683 1684 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); 1685 1686 *pLock = (void **)sc; 1687 1688 return (1); 1689} 1690 1691/* Obtain a spin lock */ 1692static NV_SINT32 1693nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1694{ 1695 1696 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); 1697 1698 NVE_OSLOCK((struct nve_softc *)lock); 1699 1700 return (1); 1701} 1702 1703/* Release lock */ 1704static NV_SINT32 1705nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1706{ 1707 1708 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); 1709 1710 NVE_OSUNLOCK((struct nve_softc *)lock); 1711 1712 return (1); 1713} 1714 1715/* I have no idea what this is for */ 1716static PNV_VOID 1717nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) 1718{ 1719 1720 /* Not implemented */ 1721 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); 1722 panic("nve: nve_osreturnbufvirtual not implemented\n"); 1723 1724 return (NULL); 1725} 1726 1727/* --- End on NVOSAPI interface --- */ 1728