if_nve.c revision 158651
1/*- 2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>. 3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ 28 */ 29/* 30 * NVIDIA nForce MCP Networking Adapter driver 31 * 32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA 33 * through their web site. 34 * 35 * All mainstream nForce and nForce2 motherboards are supported. This module 36 * is as stable, sometimes more stable, than the linux version. (Recent 37 * Linux stability issues seem to be related to some issues with newer 38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD 39 * 5.x). 40 * 41 * In accordance with the NVIDIA distribution license it is necessary to 42 * link this module against the nvlibnet.o binary object included in the 43 * Linux driver source distribution. The binary component is not modified in 44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c 45 * linux kernel module "wrapper". 46 * 47 * The Linux driver uses a common code API that is shared between Win32 and 48 * i386 Linux. This abstracts the low level driver functions and uses 49 * callbacks and hooks to access the underlying hardware device. By using 50 * this same API in a FreeBSD kernel module it is possible to support the 51 * hardware without breaching the Linux source distributions licensing 52 * requirements, or obtaining the hardware programming specifications. 53 * 54 * Although not conventional, it works, and given the relatively small 55 * amount of hardware centric code, it's hopefully no more buggy than its 56 * linux counterpart. 57 * 58 * NVIDIA now support the nForce3 AMD64 platform, however I have been 59 * unable to access such a system to verify support. However, the code is 60 * reported to work with little modification when compiled with the AMD64 61 * version of the NVIDIA Linux library. All that should be necessary to make 62 * the driver work is to link it directly into the kernel, instead of as a 63 * module, and apply the docs/amd64.diff patch in this source distribution to 64 * the NVIDIA Linux driver source. 65 * 66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well 67 * as recent versions of DragonFly. 68 * 69 * Written by Quinton Dolan <q@onthenet.com.au> 70 * Portions based on existing FreeBSD network drivers. 71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. 72 */ 73 74#include <sys/cdefs.h> 75__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 158651 2006-05-16 14:37:58Z phk $"); 76 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/sockio.h> 80#include <sys/mbuf.h> 81#include <sys/malloc.h> 82#include <sys/kernel.h> 83#include <sys/socket.h> 84#include <sys/sysctl.h> 85#include <sys/queue.h> 86#include <sys/module.h> 87 88#include <net/if.h> 89#include <net/if_arp.h> 90#include <net/ethernet.h> 91#include <net/if_dl.h> 92#include <net/if_media.h> 93#include <net/if_types.h> 94#include <net/bpf.h> 95#include <net/if_vlan_var.h> 96 97#include <machine/bus.h> 98#include <machine/resource.h> 99 100#include <vm/vm.h> /* for vtophys */ 101#include <vm/pmap.h> /* for vtophys */ 102#include <sys/bus.h> 103#include <sys/rman.h> 104 105#include <dev/pci/pcireg.h> 106#include <dev/pci/pcivar.h> 107#include <dev/mii/mii.h> 108#include <dev/mii/miivar.h> 109#include "miibus_if.h" 110 111/* Include NVIDIA Linux driver header files */ 112#include <contrib/dev/nve/nvenet_version.h> 113#define linux 114#include <contrib/dev/nve/basetype.h> 115#include <contrib/dev/nve/phy.h> 116#include "os+%DIKED-nve.h" 117#include <contrib/dev/nve/drvinfo.h> 118#include <contrib/dev/nve/adapter.h> 119#undef linux 120 121#include <dev/nve/if_nvereg.h> 122 123MODULE_DEPEND(nve, pci, 1, 1, 1); 124MODULE_DEPEND(nve, ether, 1, 1, 1); 125MODULE_DEPEND(nve, miibus, 1, 1, 1); 126 127static int nve_probe(device_t); 128static int nve_attach(device_t); 129static int nve_detach(device_t); 130static void nve_init(void *); 131static void nve_init_locked(struct nve_softc *); 132static void nve_stop(struct nve_softc *); 133static void nve_shutdown(device_t); 134static int nve_init_rings(struct nve_softc *); 135static void nve_free_rings(struct nve_softc *); 136 137static void nve_ifstart(struct ifnet *); 138static void nve_ifstart_locked(struct ifnet *); 139static int nve_ioctl(struct ifnet *, u_long, caddr_t); 140static void nve_intr(void *); 141static void nve_tick(void *); 142static void nve_setmulti(struct nve_softc *); 143static void nve_watchdog(struct ifnet *); 144static void nve_update_stats(struct nve_softc *); 145 146static int nve_ifmedia_upd(struct ifnet *); 147static void nve_ifmedia_upd_locked(struct ifnet *); 148static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); 149static int nve_miibus_readreg(device_t, int, int); 150static void nve_miibus_writereg(device_t, int, int, int); 151 152static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); 153static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); 154 155static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); 156static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); 157static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); 158static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); 159static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); 160static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); 161static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); 162static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); 163static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); 164static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); 165static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); 166static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); 167static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); 168static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); 169static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); 170static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); 171 172static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); 173static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); 174static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); 175static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); 176static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); 177static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); 178static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); 179 180static device_method_t nve_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, nve_probe), 183 DEVMETHOD(device_attach, nve_attach), 184 DEVMETHOD(device_detach, nve_detach), 185 DEVMETHOD(device_shutdown, nve_shutdown), 186 187 /* Bus interface */ 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 191 /* MII interface */ 192 DEVMETHOD(miibus_readreg, nve_miibus_readreg), 193 DEVMETHOD(miibus_writereg, nve_miibus_writereg), 194 195 {0, 0} 196}; 197 198static driver_t nve_driver = { 199 "nve", 200 nve_methods, 201 sizeof(struct nve_softc) 202}; 203 204static devclass_t nve_devclass; 205 206static int nve_pollinterval = 0; 207SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, 208 &nve_pollinterval, 0, "delay between interface polls"); 209 210DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); 211DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); 212 213static struct nve_type nve_devs[] = { 214 {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID, 215 "NVIDIA nForce MCP Networking Adapter"}, 216 {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID, 217 "NVIDIA nForce MCP2 Networking Adapter"}, 218 {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID, 219 "NVIDIA nForce MCP3 Networking Adapter"}, 220 {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID, 221 "NVIDIA nForce MCP4 Networking Adapter"}, 222 {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID, 223 "NVIDIA nForce MCP5 Networking Adapter"}, 224 {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID, 225 "NVIDIA nForce MCP6 Networking Adapter"}, 226 {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID, 227 "NVIDIA nForce MCP7 Networking Adapter"}, 228 {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID, 229 "NVIDIA nForce MCP8 Networking Adapter"}, 230 {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID, 231 "NVIDIA nForce MCP9 Networking Adapter"}, 232 {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID, 233 "NVIDIA nForce MCP10 Networking Adapter"}, 234 {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID, 235 "NVIDIA nForce MCP11 Networking Adapter"}, 236 {NVIDIA_VENDORID, NFORCE_MCPNET12_DEVICEID, 237 "NVIDIA nForce MCP12 Networking Adapter"}, 238 {NVIDIA_VENDORID, NFORCE_MCPNET13_DEVICEID, 239 "NVIDIA nForce MCP13 Networking Adapter"}, 240 {0, 0, NULL} 241}; 242 243/* DMA MEM map callback function to get data segment physical address */ 244static void 245nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) 246{ 247 if (error) 248 return; 249 250 KASSERT(nsegs == 1, 251 ("Too many DMA segments returned when mapping DMA memory")); 252 *(bus_addr_t *)arg = segs->ds_addr; 253} 254 255/* DMA RX map callback function to get data segment physical address */ 256static void 257nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, 258 bus_size_t mapsize, int error) 259{ 260 if (error) 261 return; 262 *(bus_addr_t *)arg = segs->ds_addr; 263} 264 265/* 266 * DMA TX buffer callback function to allocate fragment data segment 267 * addresses 268 */ 269static void 270nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) 271{ 272 struct nve_tx_desc *info; 273 274 info = arg; 275 if (error) 276 return; 277 KASSERT(nsegs < NV_MAX_FRAGS, 278 ("Too many DMA segments returned when mapping mbuf")); 279 info->numfrags = nsegs; 280 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); 281} 282 283/* Probe for supported hardware ID's */ 284static int 285nve_probe(device_t dev) 286{ 287 struct nve_type *t; 288 289 t = nve_devs; 290 /* Check for matching PCI DEVICE ID's */ 291 while (t->name != NULL) { 292 if ((pci_get_vendor(dev) == t->vid_id) && 293 (pci_get_device(dev) == t->dev_id)) { 294 device_set_desc(dev, t->name); 295 return (0); 296 } 297 t++; 298 } 299 300 return (ENXIO); 301} 302 303/* Attach driver and initialise hardware for use */ 304static int 305nve_attach(device_t dev) 306{ 307 u_char eaddr[ETHER_ADDR_LEN]; 308 struct nve_softc *sc; 309 struct ifnet *ifp; 310 OS_API *osapi; 311 ADAPTER_OPEN_PARAMS OpenParams; 312 int error = 0, i, rid; 313 314 if (bootverbose) 315 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION); 316 317 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); 318 319 sc = device_get_softc(dev); 320 321 /* Allocate mutex */ 322 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 323 MTX_DEF); 324 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0); 325 326 sc->dev = dev; 327 328 /* Preinitialize data structures */ 329 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); 330 331 /* Enable bus mastering */ 332 pci_enable_busmaster(dev); 333 334 /* Allocate memory mapped address space */ 335 rid = NV_RID; 336 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, 337 RF_ACTIVE); 338 339 if (sc->res == NULL) { 340 device_printf(dev, "couldn't map memory\n"); 341 error = ENXIO; 342 goto fail; 343 } 344 sc->sc_st = rman_get_bustag(sc->res); 345 sc->sc_sh = rman_get_bushandle(sc->res); 346 347 /* Allocate interrupt */ 348 rid = 0; 349 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 350 RF_SHAREABLE | RF_ACTIVE); 351 352 if (sc->irq == NULL) { 353 device_printf(dev, "couldn't map interrupt\n"); 354 error = ENXIO; 355 goto fail; 356 } 357 /* Allocate DMA tags */ 358 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 359 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, 360 NV_MAX_FRAGS, MCLBYTES, 0, 361 busdma_lock_mutex, &Giant, 362 &sc->mtag); 363 if (error) { 364 device_printf(dev, "couldn't allocate dma tag\n"); 365 goto fail; 366 } 367 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 368 BUS_SPACE_MAXADDR, NULL, NULL, 369 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, 370 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, 371 busdma_lock_mutex, &Giant, 372 &sc->rtag); 373 if (error) { 374 device_printf(dev, "couldn't allocate dma tag\n"); 375 goto fail; 376 } 377 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 378 BUS_SPACE_MAXADDR, NULL, NULL, 379 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, 380 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, 381 busdma_lock_mutex, &Giant, 382 &sc->ttag); 383 if (error) { 384 device_printf(dev, "couldn't allocate dma tag\n"); 385 goto fail; 386 } 387 /* Allocate DMA safe memory and get the DMA addresses. */ 388 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, 389 BUS_DMA_WAITOK, &sc->tmap); 390 if (error) { 391 device_printf(dev, "couldn't allocate dma memory\n"); 392 goto fail; 393 } 394 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); 395 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, 396 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, 397 &sc->tx_addr, 0); 398 if (error) { 399 device_printf(dev, "couldn't map dma memory\n"); 400 goto fail; 401 } 402 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, 403 BUS_DMA_WAITOK, &sc->rmap); 404 if (error) { 405 device_printf(dev, "couldn't allocate dma memory\n"); 406 goto fail; 407 } 408 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); 409 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, 410 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, 411 &sc->rx_addr, 0); 412 if (error) { 413 device_printf(dev, "couldn't map dma memory\n"); 414 goto fail; 415 } 416 /* Initialize rings. */ 417 if (nve_init_rings(sc)) { 418 device_printf(dev, "failed to init rings\n"); 419 error = ENXIO; 420 goto fail; 421 } 422 /* Setup NVIDIA API callback routines */ 423 osapi = &sc->osapi; 424 osapi->pOSCX = sc; 425 osapi->pfnAllocMemory = nve_osalloc; 426 osapi->pfnFreeMemory = nve_osfree; 427 osapi->pfnAllocMemoryEx = nve_osallocex; 428 osapi->pfnFreeMemoryEx = nve_osfreeex; 429 osapi->pfnClearMemory = nve_osclear; 430 osapi->pfnStallExecution = nve_osdelay; 431 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; 432 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; 433 osapi->pfnPacketWasSent = nve_ospackettx; 434 osapi->pfnPacketWasReceived = nve_ospacketrx; 435 osapi->pfnLinkStateHasChanged = nve_oslinkchg; 436 osapi->pfnAllocTimer = nve_osalloctimer; 437 osapi->pfnFreeTimer = nve_osfreetimer; 438 osapi->pfnInitializeTimer = nve_osinittimer; 439 osapi->pfnSetTimer = nve_ossettimer; 440 osapi->pfnCancelTimer = nve_oscanceltimer; 441 osapi->pfnPreprocessPacket = nve_ospreprocpkt; 442 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; 443 osapi->pfnIndicatePackets = nve_osindicatepkt; 444 osapi->pfnLockAlloc = nve_oslockalloc; 445 osapi->pfnLockAcquire = nve_oslockacquire; 446 osapi->pfnLockRelease = nve_oslockrelease; 447 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; 448 449 sc->linkup = FALSE; 450 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; 451 452 /* TODO - We don't support hardware offload yet */ 453 sc->hwmode = 1; 454 sc->media = 0; 455 456 /* Set NVIDIA API startup parameters */ 457 OpenParams.MaxDpcLoop = 2; 458 OpenParams.MaxRxPkt = RX_RING_SIZE; 459 OpenParams.MaxTxPkt = TX_RING_SIZE; 460 OpenParams.SentPacketStatusSuccess = 1; 461 OpenParams.SentPacketStatusFailure = 0; 462 OpenParams.MaxRxPktToAccumulate = 6; 463 OpenParams.ulPollInterval = nve_pollinterval; 464 OpenParams.SetForcedModeEveryNthRxPacket = 0; 465 OpenParams.SetForcedModeEveryNthTxPacket = 0; 466 OpenParams.RxForcedInterrupt = 0; 467 OpenParams.TxForcedInterrupt = 0; 468 OpenParams.pOSApi = osapi; 469 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); 470 OpenParams.bASFEnabled = 0; 471 OpenParams.ulDescriptorVersion = sc->hwmode; 472 OpenParams.ulMaxPacketSize = sc->max_frame_size; 473 OpenParams.DeviceId = pci_get_device(dev); 474 475 /* Open NVIDIA Hardware API */ 476 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); 477 if (error) { 478 device_printf(dev, 479 "failed to open NVIDIA Hardware API: 0x%x\n", error); 480 goto fail; 481 } 482 483 /* TODO - Add support for MODE2 hardware offload */ 484 485 bzero(&sc->adapterdata, sizeof(sc->adapterdata)); 486 487 sc->adapterdata.ulMediaIF = sc->media; 488 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; 489 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); 490 491 /* MAC is loaded backwards into h/w reg */ 492 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); 493 for (i = 0; i < 6; i++) { 494 eaddr[i] = sc->original_mac_addr[5 - i]; 495 } 496 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); 497 498 /* Display ethernet address ,... */ 499 device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); 500 501 /* Allocate interface structures */ 502 ifp = sc->ifp = if_alloc(IFT_ETHER); 503 if (ifp == NULL) { 504 device_printf(dev, "can not if_alloc()\n"); 505 error = ENOSPC; 506 goto fail; 507 } 508 509 /* Probe device for MII interface to PHY */ 510 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n"); 511 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) { 512 device_printf(dev, "MII without any phy!\n"); 513 error = ENXIO; 514 goto fail; 515 } 516 517 /* Setup interface parameters */ 518 ifp->if_softc = sc; 519 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 520 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 521 ifp->if_ioctl = nve_ioctl; 522 ifp->if_output = ether_output; 523 ifp->if_start = nve_ifstart; 524 ifp->if_watchdog = nve_watchdog; 525 ifp->if_timer = 0; 526 ifp->if_init = nve_init; 527 ifp->if_mtu = ETHERMTU; 528 ifp->if_baudrate = IF_Mbps(100); 529 ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; 530 ifp->if_capabilities |= IFCAP_VLAN_MTU; 531 532 /* Attach to OS's managers. */ 533 ether_ifattach(ifp, eaddr); 534 535 /* Activate our interrupt handler. - attach last to avoid lock */ 536 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 537 nve_intr, sc, &sc->sc_ih); 538 if (error) { 539 device_printf(sc->dev, "couldn't set up interrupt handler\n"); 540 goto fail; 541 } 542 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); 543 544fail: 545 if (error) 546 nve_detach(dev); 547 548 return (error); 549} 550 551/* Detach interface for module unload */ 552static int 553nve_detach(device_t dev) 554{ 555 struct nve_softc *sc = device_get_softc(dev); 556 struct ifnet *ifp; 557 558 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); 559 560 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); 561 562 ifp = sc->ifp; 563 564 if (device_is_attached(dev)) { 565 NVE_LOCK(sc); 566 nve_stop(sc); 567 NVE_UNLOCK(sc); 568 callout_drain(&sc->stat_callout); 569 ether_ifdetach(ifp); 570 } 571 572 if (sc->miibus) 573 device_delete_child(dev, sc->miibus); 574 bus_generic_detach(dev); 575 576 /* Reload unreversed address back into MAC in original state */ 577 if (sc->original_mac_addr) 578 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, 579 sc->original_mac_addr); 580 581 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); 582 /* Detach from NVIDIA hardware API */ 583 if (sc->hwapi->pfnClose) 584 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); 585 /* Release resources */ 586 if (sc->sc_ih) 587 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); 588 if (sc->irq) 589 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 590 if (sc->res) 591 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); 592 593 nve_free_rings(sc); 594 595 if (sc->tx_desc) { 596 bus_dmamap_unload(sc->rtag, sc->rmap); 597 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); 598 bus_dmamap_destroy(sc->rtag, sc->rmap); 599 } 600 if (sc->mtag) 601 bus_dma_tag_destroy(sc->mtag); 602 if (sc->ttag) 603 bus_dma_tag_destroy(sc->ttag); 604 if (sc->rtag) 605 bus_dma_tag_destroy(sc->rtag); 606 607 if (ifp) 608 if_free(ifp); 609 mtx_destroy(&sc->mtx); 610 611 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); 612 613 return (0); 614} 615 616/* Initialise interface and start it "RUNNING" */ 617static void 618nve_init(void *xsc) 619{ 620 struct nve_softc *sc = xsc; 621 622 NVE_LOCK(sc); 623 nve_init_locked(sc); 624 NVE_UNLOCK(sc); 625} 626 627static void 628nve_init_locked(struct nve_softc *sc) 629{ 630 struct ifnet *ifp; 631 int error; 632 633 NVE_LOCK_ASSERT(sc); 634 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); 635 636 ifp = sc->ifp; 637 638 /* Do nothing if already running */ 639 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 640 return; 641 642 nve_stop(sc); 643 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); 644 645 nve_ifmedia_upd_locked(ifp); 646 647 /* Setup Hardware interface and allocate memory structures */ 648 error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 649 0, /* force speed */ 650 0, /* force full duplex */ 651 0, /* force mode */ 652 0, /* force async mode */ 653 &sc->linkup); 654 655 if (error) { 656 device_printf(sc->dev, 657 "failed to start NVIDIA Hardware interface\n"); 658 return; 659 } 660 /* Set the MAC address */ 661 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp)); 662 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 663 sc->hwapi->pfnStart(sc->hwapi->pADCX); 664 665 /* Setup multicast filter */ 666 nve_setmulti(sc); 667 668 /* Update interface parameters */ 669 ifp->if_drv_flags |= IFF_DRV_RUNNING; 670 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 671 672 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 673 674 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); 675 676 return; 677} 678 679/* Stop interface activity ie. not "RUNNING" */ 680static void 681nve_stop(struct nve_softc *sc) 682{ 683 struct ifnet *ifp; 684 685 NVE_LOCK_ASSERT(sc); 686 687 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); 688 689 ifp = sc->ifp; 690 ifp->if_timer = 0; 691 692 /* Cancel tick timer */ 693 callout_stop(&sc->stat_callout); 694 695 /* Stop hardware activity */ 696 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 697 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); 698 699 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); 700 /* Shutdown interface and deallocate memory buffers */ 701 if (sc->hwapi->pfnDeinit) 702 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); 703 704 sc->linkup = 0; 705 sc->cur_rx = 0; 706 sc->pending_rxs = 0; 707 sc->pending_txs = 0; 708 709 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 710 711 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); 712 713 return; 714} 715 716/* Shutdown interface for unload/reboot */ 717static void 718nve_shutdown(device_t dev) 719{ 720 struct nve_softc *sc; 721 722 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); 723 724 sc = device_get_softc(dev); 725 726 /* Stop hardware activity */ 727 NVE_LOCK(sc); 728 nve_stop(sc); 729 NVE_UNLOCK(sc); 730} 731 732/* Allocate TX ring buffers */ 733static int 734nve_init_rings(struct nve_softc *sc) 735{ 736 int error, i; 737 738 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); 739 740 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; 741 /* Initialise RX ring */ 742 for (i = 0; i < RX_RING_SIZE; i++) { 743 struct nve_rx_desc *desc = sc->rx_desc + i; 744 struct nve_map_buffer *buf = &desc->buf; 745 746 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 747 if (buf->mbuf == NULL) { 748 device_printf(sc->dev, "couldn't allocate mbuf\n"); 749 nve_free_rings(sc); 750 return (ENOBUFS); 751 } 752 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 753 m_adj(buf->mbuf, ETHER_ALIGN); 754 755 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 756 if (error) { 757 device_printf(sc->dev, "couldn't create dma map\n"); 758 nve_free_rings(sc); 759 return (error); 760 } 761 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 762 nve_dmamap_rx_cb, &desc->paddr, 0); 763 if (error) { 764 device_printf(sc->dev, "couldn't dma map mbuf\n"); 765 nve_free_rings(sc); 766 return (error); 767 } 768 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 769 770 desc->buflength = buf->mbuf->m_len; 771 desc->vaddr = mtod(buf->mbuf, caddr_t); 772 } 773 bus_dmamap_sync(sc->rtag, sc->rmap, 774 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 775 776 /* Initialize TX ring */ 777 for (i = 0; i < TX_RING_SIZE; i++) { 778 struct nve_tx_desc *desc = sc->tx_desc + i; 779 struct nve_map_buffer *buf = &desc->buf; 780 781 buf->mbuf = NULL; 782 783 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 784 if (error) { 785 device_printf(sc->dev, "couldn't create dma map\n"); 786 nve_free_rings(sc); 787 return (error); 788 } 789 } 790 bus_dmamap_sync(sc->ttag, sc->tmap, 791 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 792 793 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); 794 795 return (error); 796} 797 798/* Free the TX ring buffers */ 799static void 800nve_free_rings(struct nve_softc *sc) 801{ 802 int i; 803 804 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); 805 806 for (i = 0; i < RX_RING_SIZE; i++) { 807 struct nve_rx_desc *desc = sc->rx_desc + i; 808 struct nve_map_buffer *buf = &desc->buf; 809 810 if (buf->mbuf) { 811 bus_dmamap_unload(sc->mtag, buf->map); 812 bus_dmamap_destroy(sc->mtag, buf->map); 813 m_freem(buf->mbuf); 814 } 815 buf->mbuf = NULL; 816 } 817 818 for (i = 0; i < TX_RING_SIZE; i++) { 819 struct nve_tx_desc *desc = sc->tx_desc + i; 820 struct nve_map_buffer *buf = &desc->buf; 821 822 if (buf->mbuf) { 823 bus_dmamap_unload(sc->mtag, buf->map); 824 bus_dmamap_destroy(sc->mtag, buf->map); 825 m_freem(buf->mbuf); 826 } 827 buf->mbuf = NULL; 828 } 829 830 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); 831} 832 833/* Main loop for sending packets from OS to interface */ 834static void 835nve_ifstart(struct ifnet *ifp) 836{ 837 struct nve_softc *sc = ifp->if_softc; 838 839 NVE_LOCK(sc); 840 nve_ifstart_locked(ifp); 841 NVE_UNLOCK(sc); 842} 843 844static void 845nve_ifstart_locked(struct ifnet *ifp) 846{ 847 struct nve_softc *sc = ifp->if_softc; 848 struct nve_map_buffer *buf; 849 struct mbuf *m0, *m; 850 struct nve_tx_desc *desc; 851 ADAPTER_WRITE_DATA txdata; 852 int error, i; 853 854 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); 855 856 NVE_LOCK_ASSERT(sc); 857 858 /* If link is down/busy or queue is empty do nothing */ 859 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || 860 IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 861 return; 862 863 /* Transmit queued packets until sent or TX ring is full */ 864 while (sc->pending_txs < TX_RING_SIZE) { 865 desc = sc->tx_desc + sc->cur_tx; 866 buf = &desc->buf; 867 868 /* Get next packet to send. */ 869 IF_DEQUEUE(&ifp->if_snd, m0); 870 871 /* If nothing to send, return. */ 872 if (m0 == NULL) 873 return; 874 875 /* Map MBUF for DMA access */ 876 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, 877 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 878 879 if (error && error != EFBIG) { 880 m_freem(m0); 881 sc->tx_errors++; 882 continue; 883 } 884 /* 885 * Packet has too many fragments - defrag into new mbuf 886 * cluster 887 */ 888 if (error) { 889 m = m_defrag(m0, M_DONTWAIT); 890 if (m == NULL) { 891 m_freem(m0); 892 sc->tx_errors++; 893 continue; 894 } 895 m0 = m; 896 897 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, 898 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 899 if (error) { 900 m_freem(m); 901 sc->tx_errors++; 902 continue; 903 } 904 } 905 /* Do sync on DMA bounce buffer */ 906 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); 907 908 buf->mbuf = m0; 909 txdata.ulNumberOfElements = desc->numfrags; 910 txdata.pvID = (PVOID)desc; 911 912 /* Put fragments into API element list */ 913 txdata.ulTotalLength = buf->mbuf->m_len; 914 for (i = 0; i < desc->numfrags; i++) { 915 txdata.sElement[i].ulLength = 916 (ulong)desc->frags[i].ds_len; 917 txdata.sElement[i].pPhysical = 918 (PVOID)desc->frags[i].ds_addr; 919 } 920 921 /* Send packet to Nvidia API for transmission */ 922 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); 923 924 switch (error) { 925 case ADAPTERERR_NONE: 926 /* Packet was queued in API TX queue successfully */ 927 sc->pending_txs++; 928 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; 929 break; 930 931 case ADAPTERERR_TRANSMIT_QUEUE_FULL: 932 /* The API TX queue is full - requeue the packet */ 933 device_printf(sc->dev, 934 "nve_ifstart: transmit queue is full\n"); 935 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 936 bus_dmamap_unload(sc->mtag, buf->map); 937 IF_PREPEND(&ifp->if_snd, buf->mbuf); 938 buf->mbuf = NULL; 939 return; 940 941 default: 942 /* The API failed to queue/send the packet so dump it */ 943 device_printf(sc->dev, "nve_ifstart: transmit error\n"); 944 bus_dmamap_unload(sc->mtag, buf->map); 945 m_freem(buf->mbuf); 946 buf->mbuf = NULL; 947 sc->tx_errors++; 948 return; 949 } 950 /* Set watchdog timer. */ 951 ifp->if_timer = 8; 952 953 /* Copy packet to BPF tap */ 954 BPF_MTAP(ifp, m0); 955 } 956 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 957 958 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); 959} 960 961/* Handle IOCTL events */ 962static int 963nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 964{ 965 struct nve_softc *sc = ifp->if_softc; 966 struct ifreq *ifr = (struct ifreq *) data; 967 struct mii_data *mii; 968 int error = 0; 969 970 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); 971 972 switch (command) { 973 case SIOCSIFMTU: 974 /* Set MTU size */ 975 NVE_LOCK(sc); 976 if (ifp->if_mtu == ifr->ifr_mtu) { 977 NVE_UNLOCK(sc); 978 break; 979 } 980 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { 981 ifp->if_mtu = ifr->ifr_mtu; 982 nve_stop(sc); 983 nve_init_locked(sc); 984 } else 985 error = EINVAL; 986 NVE_UNLOCK(sc); 987 break; 988 989 case SIOCSIFFLAGS: 990 /* Setup interface flags */ 991 NVE_LOCK(sc); 992 if (ifp->if_flags & IFF_UP) { 993 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 994 nve_init_locked(sc); 995 NVE_UNLOCK(sc); 996 break; 997 } 998 } else { 999 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1000 nve_stop(sc); 1001 NVE_UNLOCK(sc); 1002 break; 1003 } 1004 } 1005 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ 1006 nve_setmulti(sc); 1007 NVE_UNLOCK(sc); 1008 break; 1009 1010 case SIOCADDMULTI: 1011 case SIOCDELMULTI: 1012 /* Setup multicast filter */ 1013 NVE_LOCK(sc); 1014 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1015 nve_setmulti(sc); 1016 } 1017 NVE_UNLOCK(sc); 1018 break; 1019 1020 case SIOCGIFMEDIA: 1021 case SIOCSIFMEDIA: 1022 /* Get/Set interface media parameters */ 1023 mii = device_get_softc(sc->miibus); 1024 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1025 break; 1026 1027 default: 1028 /* Everything else we forward to generic ether ioctl */ 1029 error = ether_ioctl(ifp, (int)command, data); 1030 break; 1031 } 1032 1033 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); 1034 1035 return (error); 1036} 1037 1038/* Interrupt service routine */ 1039static void 1040nve_intr(void *arg) 1041{ 1042 struct nve_softc *sc = arg; 1043 struct ifnet *ifp = sc->ifp; 1044 1045 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); 1046 1047 NVE_LOCK(sc); 1048 if (!ifp->if_flags & IFF_UP) { 1049 nve_stop(sc); 1050 NVE_UNLOCK(sc); 1051 return; 1052 } 1053 /* Handle interrupt event */ 1054 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { 1055 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1056 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1057 } 1058 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1059 nve_ifstart_locked(ifp); 1060 1061 /* If no pending packets we don't need a timeout */ 1062 if (sc->pending_txs == 0) 1063 sc->ifp->if_timer = 0; 1064 NVE_UNLOCK(sc); 1065 1066 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); 1067 1068 return; 1069} 1070 1071/* Setup multicast filters */ 1072static void 1073nve_setmulti(struct nve_softc *sc) 1074{ 1075 struct ifnet *ifp; 1076 struct ifmultiaddr *ifma; 1077 PACKET_FILTER hwfilter; 1078 int i; 1079 u_int8_t andaddr[6], oraddr[6]; 1080 1081 NVE_LOCK_ASSERT(sc); 1082 1083 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); 1084 1085 ifp = sc->ifp; 1086 1087 /* Initialize filter */ 1088 hwfilter.ulFilterFlags = 0; 1089 for (i = 0; i < 6; i++) { 1090 hwfilter.acMulticastAddress[i] = 0; 1091 hwfilter.acMulticastMask[i] = 0; 1092 } 1093 1094 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1095 /* Accept all packets */ 1096 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; 1097 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1098 return; 1099 } 1100 /* Setup multicast filter */ 1101 IF_ADDR_LOCK(ifp); 1102 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1103 u_char *addrp; 1104 1105 if (ifma->ifma_addr->sa_family != AF_LINK) 1106 continue; 1107 1108 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1109 for (i = 0; i < 6; i++) { 1110 u_int8_t mcaddr = addrp[i]; 1111 andaddr[i] &= mcaddr; 1112 oraddr[i] |= mcaddr; 1113 } 1114 } 1115 IF_ADDR_UNLOCK(ifp); 1116 for (i = 0; i < 6; i++) { 1117 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; 1118 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); 1119 } 1120 1121 /* Send filter to NVIDIA API */ 1122 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1123 1124 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); 1125 1126 return; 1127} 1128 1129/* Change the current media/mediaopts */ 1130static int 1131nve_ifmedia_upd(struct ifnet *ifp) 1132{ 1133 struct nve_softc *sc = ifp->if_softc; 1134 1135 NVE_LOCK(sc); 1136 nve_ifmedia_upd_locked(ifp); 1137 NVE_UNLOCK(sc); 1138 return (0); 1139} 1140 1141static void 1142nve_ifmedia_upd_locked(struct ifnet *ifp) 1143{ 1144 struct nve_softc *sc = ifp->if_softc; 1145 struct mii_data *mii; 1146 1147 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); 1148 1149 NVE_LOCK_ASSERT(sc); 1150 mii = device_get_softc(sc->miibus); 1151 1152 if (mii->mii_instance) { 1153 struct mii_softc *miisc; 1154 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1155 miisc = LIST_NEXT(miisc, mii_list)) { 1156 mii_phy_reset(miisc); 1157 } 1158 } 1159 mii_mediachg(mii); 1160} 1161 1162/* Update current miibus PHY status of media */ 1163static void 1164nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1165{ 1166 struct nve_softc *sc; 1167 struct mii_data *mii; 1168 1169 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); 1170 1171 sc = ifp->if_softc; 1172 NVE_LOCK(sc); 1173 mii = device_get_softc(sc->miibus); 1174 mii_pollstat(mii); 1175 NVE_UNLOCK(sc); 1176 1177 ifmr->ifm_active = mii->mii_media_active; 1178 ifmr->ifm_status = mii->mii_media_status; 1179 1180 return; 1181} 1182 1183/* miibus tick timer - maintain link status */ 1184static void 1185nve_tick(void *xsc) 1186{ 1187 struct nve_softc *sc = xsc; 1188 struct mii_data *mii; 1189 struct ifnet *ifp; 1190 1191 NVE_LOCK_ASSERT(sc); 1192 1193 ifp = sc->ifp; 1194 nve_update_stats(sc); 1195 1196 mii = device_get_softc(sc->miibus); 1197 mii_tick(mii); 1198 1199 if (mii->mii_media_status & IFM_ACTIVE && 1200 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1201 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1202 nve_ifstart_locked(ifp); 1203 } 1204 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 1205 1206 return; 1207} 1208 1209/* Update ifnet data structure with collected interface stats from API */ 1210static void 1211nve_update_stats(struct nve_softc *sc) 1212{ 1213 struct ifnet *ifp = sc->ifp; 1214 ADAPTER_STATS stats; 1215 1216 NVE_LOCK_ASSERT(sc); 1217 1218 if (sc->hwapi) { 1219 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); 1220 1221 ifp->if_ipackets = stats.ulSuccessfulReceptions; 1222 ifp->if_ierrors = stats.ulMissedFrames + 1223 stats.ulFailedReceptions + 1224 stats.ulCRCErrors + 1225 stats.ulFramingErrors + 1226 stats.ulOverFlowErrors; 1227 1228 ifp->if_opackets = stats.ulSuccessfulTransmissions; 1229 ifp->if_oerrors = sc->tx_errors + 1230 stats.ulFailedTransmissions + 1231 stats.ulRetryErrors + 1232 stats.ulUnderflowErrors + 1233 stats.ulLossOfCarrierErrors + 1234 stats.ulLateCollisionErrors; 1235 1236 ifp->if_collisions = stats.ulLateCollisionErrors; 1237 } 1238 1239 return; 1240} 1241 1242/* miibus Read PHY register wrapper - calls Nvidia API entry point */ 1243static int 1244nve_miibus_readreg(device_t dev, int phy, int reg) 1245{ 1246 struct nve_softc *sc = device_get_softc(dev); 1247 ULONG data; 1248 1249 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); 1250 1251 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); 1252 1253 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); 1254 1255 return (data); 1256} 1257 1258/* miibus Write PHY register wrapper - calls Nvidia API entry point */ 1259static void 1260nve_miibus_writereg(device_t dev, int phy, int reg, int data) 1261{ 1262 struct nve_softc *sc = device_get_softc(dev); 1263 1264 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); 1265 1266 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); 1267 1268 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); 1269 1270 return; 1271} 1272 1273/* Watchdog timer to prevent PHY lockups */ 1274static void 1275nve_watchdog(struct ifnet *ifp) 1276{ 1277 struct nve_softc *sc = ifp->if_softc; 1278 1279 NVE_LOCK(sc); 1280 1281 /* 1282 * The nvidia driver blob defers tx completion notifications. 1283 * Thus, sometimes the watchdog timer will go off when the 1284 * tx engine is fine, but the tx completions are just deferred. 1285 * Try kicking the driver blob to clear out any pending tx 1286 * completions. If that clears up all the pending tx 1287 * operations, then just return without printing the warning 1288 * message or resetting the adapter. 1289 */ 1290 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 1291 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1292 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1293 if (sc->pending_txs == 0) { 1294 NVE_UNLOCK(sc); 1295 return; 1296 } 1297 1298 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); 1299 1300 sc->tx_errors++; 1301 1302 nve_stop(sc); 1303 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1304 nve_init_locked(sc); 1305 1306 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1307 nve_ifstart_locked(ifp); 1308 NVE_UNLOCK(sc); 1309 1310 return; 1311} 1312 1313/* --- Start of NVOSAPI interface --- */ 1314 1315/* Allocate DMA enabled general use memory for API */ 1316static NV_SINT32 1317nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) 1318{ 1319 struct nve_softc *sc; 1320 bus_addr_t mem_physical; 1321 1322 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); 1323 1324 sc = (struct nve_softc *)ctx; 1325 1326 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, 1327 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1328 1329 if (!mem->pLogical) { 1330 device_printf(sc->dev, "memory allocation failed\n"); 1331 return (0); 1332 } 1333 memset(mem->pLogical, 0, (ulong)mem->uiLength); 1334 mem_physical = vtophys(mem->pLogical); 1335 mem->pPhysical = (PVOID)mem_physical; 1336 1337 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", 1338 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); 1339 1340 return (1); 1341} 1342 1343/* Free allocated memory */ 1344static NV_SINT32 1345nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) 1346{ 1347 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", 1348 (uint)mem->pLogical, (uint) mem->uiLength); 1349 1350 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); 1351 return (1); 1352} 1353 1354/* Copied directly from nvnet.c */ 1355static NV_SINT32 1356nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1357{ 1358 MEMORY_BLOCK mem_block; 1359 1360 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); 1361 1362 mem_block_ex->pLogical = NULL; 1363 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; 1364 1365 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && 1366 (mem_block_ex->AlignmentSize > 1)) { 1367 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", 1368 mem_block_ex->AlignmentSize); 1369 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; 1370 } 1371 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1372 1373 if (nve_osalloc(ctx, &mem_block) == 0) { 1374 return (0); 1375 } 1376 mem_block_ex->pLogicalOrig = mem_block.pLogical; 1377 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; 1378 mem_block_ex->pPhysicalOrigHigh = 0; 1379 1380 mem_block_ex->pPhysical = mem_block.pPhysical; 1381 mem_block_ex->pLogical = mem_block.pLogical; 1382 1383 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { 1384 unsigned int offset; 1385 offset = mem_block_ex->pPhysicalOrigLow & 1386 (mem_block_ex->AlignmentSize - 1); 1387 1388 if (offset) { 1389 mem_block_ex->pPhysical = 1390 (PVOID)((ulong)mem_block_ex->pPhysical + 1391 mem_block_ex->AlignmentSize - offset); 1392 mem_block_ex->pLogical = 1393 (PVOID)((ulong)mem_block_ex->pLogical + 1394 mem_block_ex->AlignmentSize - offset); 1395 } /* if (offset) */ 1396 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ 1397 return (1); 1398} 1399 1400/* Copied directly from nvnet.c */ 1401static NV_SINT32 1402nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1403{ 1404 MEMORY_BLOCK mem_block; 1405 1406 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); 1407 1408 mem_block.pLogical = mem_block_ex->pLogicalOrig; 1409 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); 1410 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1411 1412 return (nve_osfree(ctx, &mem_block)); 1413} 1414 1415/* Clear memory region */ 1416static NV_SINT32 1417nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) 1418{ 1419 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); 1420 memset(mem, 0, length); 1421 return (1); 1422} 1423 1424/* Sleep for a tick */ 1425static NV_SINT32 1426nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) 1427{ 1428 DELAY(usec); 1429 return (1); 1430} 1431 1432/* Allocate memory for rx buffer */ 1433static NV_SINT32 1434nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) 1435{ 1436 struct nve_softc *sc = ctx; 1437 struct nve_rx_desc *desc; 1438 struct nve_map_buffer *buf; 1439 int error; 1440 1441 if (device_is_attached(sc->dev)) 1442 NVE_LOCK_ASSERT(sc); 1443 1444 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); 1445 1446 if (sc->pending_rxs == RX_RING_SIZE) { 1447 device_printf(sc->dev, "rx ring buffer is full\n"); 1448 goto fail; 1449 } 1450 desc = sc->rx_desc + sc->cur_rx; 1451 buf = &desc->buf; 1452 1453 if (buf->mbuf == NULL) { 1454 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1455 if (buf->mbuf == NULL) { 1456 device_printf(sc->dev, "failed to allocate memory\n"); 1457 goto fail; 1458 } 1459 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 1460 m_adj(buf->mbuf, ETHER_ALIGN); 1461 1462 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 1463 nve_dmamap_rx_cb, &desc->paddr, 0); 1464 if (error) { 1465 device_printf(sc->dev, "failed to dmamap mbuf\n"); 1466 m_freem(buf->mbuf); 1467 buf->mbuf = NULL; 1468 goto fail; 1469 } 1470 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 1471 desc->buflength = buf->mbuf->m_len; 1472 desc->vaddr = mtod(buf->mbuf, caddr_t); 1473 } 1474 sc->pending_rxs++; 1475 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; 1476 1477 mem->pLogical = (void *)desc->vaddr; 1478 mem->pPhysical = (void *)desc->paddr; 1479 mem->uiLength = desc->buflength; 1480 *id = (void *)desc; 1481 1482 return (1); 1483 1484fail: 1485 return (0); 1486} 1487 1488/* Free the rx buffer */ 1489static NV_SINT32 1490nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) 1491{ 1492 struct nve_softc *sc = ctx; 1493 struct nve_rx_desc *desc; 1494 struct nve_map_buffer *buf; 1495 1496 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); 1497 1498 desc = (struct nve_rx_desc *) id; 1499 buf = &desc->buf; 1500 1501 if (buf->mbuf) { 1502 bus_dmamap_unload(sc->mtag, buf->map); 1503 bus_dmamap_destroy(sc->mtag, buf->map); 1504 m_freem(buf->mbuf); 1505 } 1506 sc->pending_rxs--; 1507 buf->mbuf = NULL; 1508 1509 return (1); 1510} 1511 1512/* This gets called by the Nvidia API after our TX packet has been sent */ 1513static NV_SINT32 1514nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) 1515{ 1516 struct nve_softc *sc = ctx; 1517 struct nve_map_buffer *buf; 1518 struct nve_tx_desc *desc = (struct nve_tx_desc *) id; 1519 struct ifnet *ifp; 1520 1521 NVE_LOCK_ASSERT(sc); 1522 1523 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); 1524 1525 ifp = sc->ifp; 1526 buf = &desc->buf; 1527 sc->pending_txs--; 1528 1529 /* Unload and free mbuf cluster */ 1530 if (buf->mbuf == NULL) 1531 goto fail; 1532 1533 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); 1534 bus_dmamap_unload(sc->mtag, buf->map); 1535 m_freem(buf->mbuf); 1536 buf->mbuf = NULL; 1537 1538 /* Send more packets if we have them */ 1539 if (sc->pending_txs < TX_RING_SIZE) 1540 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1541 1542 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE) 1543 nve_ifstart_locked(ifp); 1544 1545fail: 1546 1547 return (1); 1548} 1549 1550/* This gets called by the Nvidia API when a new packet has been received */ 1551/* XXX What is newbuf used for? XXX */ 1552static NV_SINT32 1553nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, 1554 NV_UINT8 priority) 1555{ 1556 struct nve_softc *sc = ctx; 1557 struct ifnet *ifp; 1558 struct nve_rx_desc *desc; 1559 struct nve_map_buffer *buf; 1560 ADAPTER_READ_DATA *readdata; 1561 struct mbuf *m; 1562 1563 NVE_LOCK_ASSERT(sc); 1564 1565 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); 1566 1567 ifp = sc->ifp; 1568 1569 readdata = (ADAPTER_READ_DATA *) data; 1570 desc = readdata->pvID; 1571 buf = &desc->buf; 1572 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1573 1574 if (success) { 1575 /* Sync DMA bounce buffer. */ 1576 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1577 1578 /* First mbuf in packet holds the ethernet and packet headers */ 1579 buf->mbuf->m_pkthdr.rcvif = ifp; 1580 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = 1581 readdata->ulTotalLength; 1582 1583 bus_dmamap_unload(sc->mtag, buf->map); 1584 1585 /* Blat the mbuf pointer, kernel will free the mbuf cluster */ 1586 m = buf->mbuf; 1587 buf->mbuf = NULL; 1588 1589 /* Give mbuf to OS. */ 1590 NVE_UNLOCK(sc); 1591 (*ifp->if_input)(ifp, m); 1592 NVE_LOCK(sc); 1593 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) 1594 ifp->if_imcasts++; 1595 1596 } else { 1597 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1598 bus_dmamap_unload(sc->mtag, buf->map); 1599 m_freem(buf->mbuf); 1600 buf->mbuf = NULL; 1601 } 1602 1603 sc->cur_rx = desc - sc->rx_desc; 1604 sc->pending_rxs--; 1605 1606 return (1); 1607} 1608 1609/* This gets called by NVIDIA API when the PHY link state changes */ 1610static NV_SINT32 1611nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) 1612{ 1613 1614 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); 1615 1616 return (1); 1617} 1618 1619/* Setup a watchdog timer */ 1620static NV_SINT32 1621nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) 1622{ 1623 struct nve_softc *sc = (struct nve_softc *)ctx; 1624 1625 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); 1626 1627 callout_init(&sc->ostimer, CALLOUT_MPSAFE); 1628 *timer = &sc->ostimer; 1629 1630 return (1); 1631} 1632 1633/* Free the timer */ 1634static NV_SINT32 1635nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) 1636{ 1637 1638 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); 1639 1640 callout_drain((struct callout *)timer); 1641 1642 return (1); 1643} 1644 1645/* Setup timer parameters */ 1646static NV_SINT32 1647nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) 1648{ 1649 struct nve_softc *sc = (struct nve_softc *)ctx; 1650 1651 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); 1652 1653 sc->ostimer_func = func; 1654 sc->ostimer_params = parameters; 1655 1656 return (1); 1657} 1658 1659/* Set the timer to go off */ 1660static NV_SINT32 1661nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) 1662{ 1663 struct nve_softc *sc = ctx; 1664 1665 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); 1666 1667 callout_reset((struct callout *)timer, delay, sc->ostimer_func, 1668 sc->ostimer_params); 1669 1670 return (1); 1671} 1672 1673/* Cancel the timer */ 1674static NV_SINT32 1675nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) 1676{ 1677 1678 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); 1679 1680 callout_stop((struct callout *)timer); 1681 1682 return (1); 1683} 1684 1685static NV_SINT32 1686nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, 1687 NV_UINT8 *newbuffer, NV_UINT8 priority) 1688{ 1689 1690 /* Not implemented */ 1691 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1692 1693 return (1); 1694} 1695 1696static PNV_VOID 1697nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) 1698{ 1699 1700 /* Not implemented */ 1701 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1702 1703 return (NULL); 1704} 1705 1706static NV_SINT32 1707nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) 1708{ 1709 1710 /* Not implemented */ 1711 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); 1712 1713 return (1); 1714} 1715 1716/* Allocate mutex context (already done in nve_attach) */ 1717static NV_SINT32 1718nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) 1719{ 1720 struct nve_softc *sc = (struct nve_softc *)ctx; 1721 1722 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); 1723 1724 *pLock = (void **)sc; 1725 1726 return (1); 1727} 1728 1729/* Obtain a spin lock */ 1730static NV_SINT32 1731nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1732{ 1733 1734 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); 1735 1736 return (1); 1737} 1738 1739/* Release lock */ 1740static NV_SINT32 1741nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1742{ 1743 1744 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); 1745 1746 return (1); 1747} 1748 1749/* I have no idea what this is for */ 1750static PNV_VOID 1751nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) 1752{ 1753 1754 /* Not implemented */ 1755 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); 1756 panic("nve: nve_osreturnbufvirtual not implemented\n"); 1757 1758 return (NULL); 1759} 1760 1761/* --- End on NVOSAPI interface --- */ 1762