if_nve.c revision 158123
1/*- 2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>. 3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ 28 */ 29/* 30 * NVIDIA nForce MCP Networking Adapter driver 31 * 32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA 33 * through their web site. 34 * 35 * All mainstream nForce and nForce2 motherboards are supported. This module 36 * is as stable, sometimes more stable, than the linux version. (Recent 37 * Linux stability issues seem to be related to some issues with newer 38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD 39 * 5.x). 40 * 41 * In accordance with the NVIDIA distribution license it is necessary to 42 * link this module against the nvlibnet.o binary object included in the 43 * Linux driver source distribution. The binary component is not modified in 44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c 45 * linux kernel module "wrapper". 46 * 47 * The Linux driver uses a common code API that is shared between Win32 and 48 * i386 Linux. This abstracts the low level driver functions and uses 49 * callbacks and hooks to access the underlying hardware device. By using 50 * this same API in a FreeBSD kernel module it is possible to support the 51 * hardware without breaching the Linux source distributions licensing 52 * requirements, or obtaining the hardware programming specifications. 53 * 54 * Although not conventional, it works, and given the relatively small 55 * amount of hardware centric code, it's hopefully no more buggy than its 56 * linux counterpart. 57 * 58 * NVIDIA now support the nForce3 AMD64 platform, however I have been 59 * unable to access such a system to verify support. However, the code is 60 * reported to work with little modification when compiled with the AMD64 61 * version of the NVIDIA Linux library. All that should be necessary to make 62 * the driver work is to link it directly into the kernel, instead of as a 63 * module, and apply the docs/amd64.diff patch in this source distribution to 64 * the NVIDIA Linux driver source. 65 * 66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well 67 * as recent versions of DragonFly. 68 * 69 * Written by Quinton Dolan <q@onthenet.com.au> 70 * Portions based on existing FreeBSD network drivers. 71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. 72 */ 73 74#include <sys/cdefs.h> 75__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 158123 2006-04-28 20:08:16Z jhb $"); 76 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/sockio.h> 80#include <sys/mbuf.h> 81#include <sys/malloc.h> 82#include <sys/kernel.h> 83#include <sys/socket.h> 84#include <sys/sysctl.h> 85#include <sys/queue.h> 86#include <sys/module.h> 87 88#include <net/if.h> 89#include <net/if_arp.h> 90#include <net/ethernet.h> 91#include <net/if_dl.h> 92#include <net/if_media.h> 93#include <net/if_types.h> 94#include <net/bpf.h> 95#include <net/if_vlan_var.h> 96 97#include <machine/bus.h> 98#include <machine/resource.h> 99 100#include <vm/vm.h> /* for vtophys */ 101#include <vm/pmap.h> /* for vtophys */ 102#include <machine/clock.h> /* for DELAY */ 103#include <sys/bus.h> 104#include <sys/rman.h> 105 106#include <dev/pci/pcireg.h> 107#include <dev/pci/pcivar.h> 108#include <dev/mii/mii.h> 109#include <dev/mii/miivar.h> 110#include "miibus_if.h" 111 112/* Include NVIDIA Linux driver header files */ 113#include <contrib/dev/nve/nvenet_version.h> 114#define linux 115#include <contrib/dev/nve/basetype.h> 116#include <contrib/dev/nve/phy.h> 117#include "os+%DIKED-nve.h" 118#include <contrib/dev/nve/drvinfo.h> 119#include <contrib/dev/nve/adapter.h> 120#undef linux 121 122#include <dev/nve/if_nvereg.h> 123 124MODULE_DEPEND(nve, pci, 1, 1, 1); 125MODULE_DEPEND(nve, ether, 1, 1, 1); 126MODULE_DEPEND(nve, miibus, 1, 1, 1); 127 128static int nve_probe(device_t); 129static int nve_attach(device_t); 130static int nve_detach(device_t); 131static void nve_init(void *); 132static void nve_init_locked(struct nve_softc *); 133static void nve_stop(struct nve_softc *); 134static void nve_shutdown(device_t); 135static int nve_init_rings(struct nve_softc *); 136static void nve_free_rings(struct nve_softc *); 137 138static void nve_ifstart(struct ifnet *); 139static void nve_ifstart_locked(struct ifnet *); 140static int nve_ioctl(struct ifnet *, u_long, caddr_t); 141static void nve_intr(void *); 142static void nve_tick(void *); 143static void nve_setmulti(struct nve_softc *); 144static void nve_watchdog(struct ifnet *); 145static void nve_update_stats(struct nve_softc *); 146 147static int nve_ifmedia_upd(struct ifnet *); 148static void nve_ifmedia_upd_locked(struct ifnet *); 149static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); 150static int nve_miibus_readreg(device_t, int, int); 151static void nve_miibus_writereg(device_t, int, int, int); 152 153static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); 154static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); 155 156static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); 157static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); 158static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); 159static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); 160static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); 161static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); 162static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); 163static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); 164static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); 165static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); 166static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); 167static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); 168static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); 169static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); 170static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); 171static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); 172 173static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); 174static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); 175static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); 176static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); 177static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); 178static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); 179static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); 180 181static device_method_t nve_methods[] = { 182 /* Device interface */ 183 DEVMETHOD(device_probe, nve_probe), 184 DEVMETHOD(device_attach, nve_attach), 185 DEVMETHOD(device_detach, nve_detach), 186 DEVMETHOD(device_shutdown, nve_shutdown), 187 188 /* Bus interface */ 189 DEVMETHOD(bus_print_child, bus_generic_print_child), 190 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 191 192 /* MII interface */ 193 DEVMETHOD(miibus_readreg, nve_miibus_readreg), 194 DEVMETHOD(miibus_writereg, nve_miibus_writereg), 195 196 {0, 0} 197}; 198 199static driver_t nve_driver = { 200 "nve", 201 nve_methods, 202 sizeof(struct nve_softc) 203}; 204 205static devclass_t nve_devclass; 206 207static int nve_pollinterval = 0; 208SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, 209 &nve_pollinterval, 0, "delay between interface polls"); 210 211DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); 212DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); 213 214static struct nve_type nve_devs[] = { 215 {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID, 216 "NVIDIA nForce MCP Networking Adapter"}, 217 {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID, 218 "NVIDIA nForce MCP2 Networking Adapter"}, 219 {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID, 220 "NVIDIA nForce MCP3 Networking Adapter"}, 221 {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID, 222 "NVIDIA nForce MCP4 Networking Adapter"}, 223 {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID, 224 "NVIDIA nForce MCP5 Networking Adapter"}, 225 {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID, 226 "NVIDIA nForce MCP6 Networking Adapter"}, 227 {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID, 228 "NVIDIA nForce MCP7 Networking Adapter"}, 229 {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID, 230 "NVIDIA nForce MCP8 Networking Adapter"}, 231 {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID, 232 "NVIDIA nForce MCP9 Networking Adapter"}, 233 {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID, 234 "NVIDIA nForce MCP10 Networking Adapter"}, 235 {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID, 236 "NVIDIA nForce MCP11 Networking Adapter"}, 237 {NVIDIA_VENDORID, NFORCE_MCPNET12_DEVICEID, 238 "NVIDIA nForce MCP12 Networking Adapter"}, 239 {NVIDIA_VENDORID, NFORCE_MCPNET13_DEVICEID, 240 "NVIDIA nForce MCP13 Networking Adapter"}, 241 {0, 0, NULL} 242}; 243 244/* DMA MEM map callback function to get data segment physical address */ 245static void 246nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) 247{ 248 if (error) 249 return; 250 251 KASSERT(nsegs == 1, 252 ("Too many DMA segments returned when mapping DMA memory")); 253 *(bus_addr_t *)arg = segs->ds_addr; 254} 255 256/* DMA RX map callback function to get data segment physical address */ 257static void 258nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, 259 bus_size_t mapsize, int error) 260{ 261 if (error) 262 return; 263 *(bus_addr_t *)arg = segs->ds_addr; 264} 265 266/* 267 * DMA TX buffer callback function to allocate fragment data segment 268 * addresses 269 */ 270static void 271nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) 272{ 273 struct nve_tx_desc *info; 274 275 info = arg; 276 if (error) 277 return; 278 KASSERT(nsegs < NV_MAX_FRAGS, 279 ("Too many DMA segments returned when mapping mbuf")); 280 info->numfrags = nsegs; 281 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); 282} 283 284/* Probe for supported hardware ID's */ 285static int 286nve_probe(device_t dev) 287{ 288 struct nve_type *t; 289 290 t = nve_devs; 291 /* Check for matching PCI DEVICE ID's */ 292 while (t->name != NULL) { 293 if ((pci_get_vendor(dev) == t->vid_id) && 294 (pci_get_device(dev) == t->dev_id)) { 295 device_set_desc(dev, t->name); 296 return (0); 297 } 298 t++; 299 } 300 301 return (ENXIO); 302} 303 304/* Attach driver and initialise hardware for use */ 305static int 306nve_attach(device_t dev) 307{ 308 u_char eaddr[ETHER_ADDR_LEN]; 309 struct nve_softc *sc; 310 struct ifnet *ifp; 311 OS_API *osapi; 312 ADAPTER_OPEN_PARAMS OpenParams; 313 int error = 0, i, rid; 314 315 if (bootverbose) 316 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION); 317 318 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); 319 320 sc = device_get_softc(dev); 321 322 /* Allocate mutex */ 323 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 324 MTX_DEF); 325 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0); 326 327 sc->dev = dev; 328 329 /* Preinitialize data structures */ 330 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); 331 332 /* Enable bus mastering */ 333 pci_enable_busmaster(dev); 334 335 /* Allocate memory mapped address space */ 336 rid = NV_RID; 337 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, 338 RF_ACTIVE); 339 340 if (sc->res == NULL) { 341 device_printf(dev, "couldn't map memory\n"); 342 error = ENXIO; 343 goto fail; 344 } 345 sc->sc_st = rman_get_bustag(sc->res); 346 sc->sc_sh = rman_get_bushandle(sc->res); 347 348 /* Allocate interrupt */ 349 rid = 0; 350 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 351 RF_SHAREABLE | RF_ACTIVE); 352 353 if (sc->irq == NULL) { 354 device_printf(dev, "couldn't map interrupt\n"); 355 error = ENXIO; 356 goto fail; 357 } 358 /* Allocate DMA tags */ 359 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 360 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, 361 NV_MAX_FRAGS, MCLBYTES, 0, 362 busdma_lock_mutex, &Giant, 363 &sc->mtag); 364 if (error) { 365 device_printf(dev, "couldn't allocate dma tag\n"); 366 goto fail; 367 } 368 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 369 BUS_SPACE_MAXADDR, NULL, NULL, 370 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, 371 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, 372 busdma_lock_mutex, &Giant, 373 &sc->rtag); 374 if (error) { 375 device_printf(dev, "couldn't allocate dma tag\n"); 376 goto fail; 377 } 378 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 379 BUS_SPACE_MAXADDR, NULL, NULL, 380 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, 381 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, 382 busdma_lock_mutex, &Giant, 383 &sc->ttag); 384 if (error) { 385 device_printf(dev, "couldn't allocate dma tag\n"); 386 goto fail; 387 } 388 /* Allocate DMA safe memory and get the DMA addresses. */ 389 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, 390 BUS_DMA_WAITOK, &sc->tmap); 391 if (error) { 392 device_printf(dev, "couldn't allocate dma memory\n"); 393 goto fail; 394 } 395 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); 396 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, 397 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, 398 &sc->tx_addr, 0); 399 if (error) { 400 device_printf(dev, "couldn't map dma memory\n"); 401 goto fail; 402 } 403 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, 404 BUS_DMA_WAITOK, &sc->rmap); 405 if (error) { 406 device_printf(dev, "couldn't allocate dma memory\n"); 407 goto fail; 408 } 409 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); 410 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, 411 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, 412 &sc->rx_addr, 0); 413 if (error) { 414 device_printf(dev, "couldn't map dma memory\n"); 415 goto fail; 416 } 417 /* Initialize rings. */ 418 if (nve_init_rings(sc)) { 419 device_printf(dev, "failed to init rings\n"); 420 error = ENXIO; 421 goto fail; 422 } 423 /* Setup NVIDIA API callback routines */ 424 osapi = &sc->osapi; 425 osapi->pOSCX = sc; 426 osapi->pfnAllocMemory = nve_osalloc; 427 osapi->pfnFreeMemory = nve_osfree; 428 osapi->pfnAllocMemoryEx = nve_osallocex; 429 osapi->pfnFreeMemoryEx = nve_osfreeex; 430 osapi->pfnClearMemory = nve_osclear; 431 osapi->pfnStallExecution = nve_osdelay; 432 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; 433 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; 434 osapi->pfnPacketWasSent = nve_ospackettx; 435 osapi->pfnPacketWasReceived = nve_ospacketrx; 436 osapi->pfnLinkStateHasChanged = nve_oslinkchg; 437 osapi->pfnAllocTimer = nve_osalloctimer; 438 osapi->pfnFreeTimer = nve_osfreetimer; 439 osapi->pfnInitializeTimer = nve_osinittimer; 440 osapi->pfnSetTimer = nve_ossettimer; 441 osapi->pfnCancelTimer = nve_oscanceltimer; 442 osapi->pfnPreprocessPacket = nve_ospreprocpkt; 443 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; 444 osapi->pfnIndicatePackets = nve_osindicatepkt; 445 osapi->pfnLockAlloc = nve_oslockalloc; 446 osapi->pfnLockAcquire = nve_oslockacquire; 447 osapi->pfnLockRelease = nve_oslockrelease; 448 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; 449 450 sc->linkup = FALSE; 451 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; 452 453 /* TODO - We don't support hardware offload yet */ 454 sc->hwmode = 1; 455 sc->media = 0; 456 457 /* Set NVIDIA API startup parameters */ 458 OpenParams.MaxDpcLoop = 2; 459 OpenParams.MaxRxPkt = RX_RING_SIZE; 460 OpenParams.MaxTxPkt = TX_RING_SIZE; 461 OpenParams.SentPacketStatusSuccess = 1; 462 OpenParams.SentPacketStatusFailure = 0; 463 OpenParams.MaxRxPktToAccumulate = 6; 464 OpenParams.ulPollInterval = nve_pollinterval; 465 OpenParams.SetForcedModeEveryNthRxPacket = 0; 466 OpenParams.SetForcedModeEveryNthTxPacket = 0; 467 OpenParams.RxForcedInterrupt = 0; 468 OpenParams.TxForcedInterrupt = 0; 469 OpenParams.pOSApi = osapi; 470 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); 471 OpenParams.bASFEnabled = 0; 472 OpenParams.ulDescriptorVersion = sc->hwmode; 473 OpenParams.ulMaxPacketSize = sc->max_frame_size; 474 OpenParams.DeviceId = pci_get_device(dev); 475 476 /* Open NVIDIA Hardware API */ 477 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); 478 if (error) { 479 device_printf(dev, 480 "failed to open NVIDIA Hardware API: 0x%x\n", error); 481 goto fail; 482 } 483 484 /* TODO - Add support for MODE2 hardware offload */ 485 486 bzero(&sc->adapterdata, sizeof(sc->adapterdata)); 487 488 sc->adapterdata.ulMediaIF = sc->media; 489 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; 490 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); 491 492 /* MAC is loaded backwards into h/w reg */ 493 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); 494 for (i = 0; i < 6; i++) { 495 eaddr[i] = sc->original_mac_addr[5 - i]; 496 } 497 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); 498 499 /* Display ethernet address ,... */ 500 device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); 501 502 /* Allocate interface structures */ 503 ifp = sc->ifp = if_alloc(IFT_ETHER); 504 if (ifp == NULL) { 505 device_printf(dev, "can not if_alloc()\n"); 506 error = ENOSPC; 507 goto fail; 508 } 509 510 /* Probe device for MII interface to PHY */ 511 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n"); 512 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) { 513 device_printf(dev, "MII without any phy!\n"); 514 error = ENXIO; 515 goto fail; 516 } 517 518 /* Setup interface parameters */ 519 ifp->if_softc = sc; 520 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 521 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 522 ifp->if_ioctl = nve_ioctl; 523 ifp->if_output = ether_output; 524 ifp->if_start = nve_ifstart; 525 ifp->if_watchdog = nve_watchdog; 526 ifp->if_timer = 0; 527 ifp->if_init = nve_init; 528 ifp->if_mtu = ETHERMTU; 529 ifp->if_baudrate = IF_Mbps(100); 530 ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; 531 ifp->if_capabilities |= IFCAP_VLAN_MTU; 532 533 /* Attach to OS's managers. */ 534 ether_ifattach(ifp, eaddr); 535 536 /* Activate our interrupt handler. - attach last to avoid lock */ 537 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 538 nve_intr, sc, &sc->sc_ih); 539 if (error) { 540 device_printf(sc->dev, "couldn't set up interrupt handler\n"); 541 goto fail; 542 } 543 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); 544 545fail: 546 if (error) 547 nve_detach(dev); 548 549 return (error); 550} 551 552/* Detach interface for module unload */ 553static int 554nve_detach(device_t dev) 555{ 556 struct nve_softc *sc = device_get_softc(dev); 557 struct ifnet *ifp; 558 559 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); 560 561 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); 562 563 ifp = sc->ifp; 564 565 if (device_is_attached(dev)) { 566 NVE_LOCK(sc); 567 nve_stop(sc); 568 NVE_UNLOCK(sc); 569 callout_drain(&sc->stat_callout); 570 ether_ifdetach(ifp); 571 } 572 573 if (sc->miibus) 574 device_delete_child(dev, sc->miibus); 575 bus_generic_detach(dev); 576 577 /* Reload unreversed address back into MAC in original state */ 578 if (sc->original_mac_addr) 579 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, 580 sc->original_mac_addr); 581 582 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); 583 /* Detach from NVIDIA hardware API */ 584 if (sc->hwapi->pfnClose) 585 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); 586 /* Release resources */ 587 if (sc->sc_ih) 588 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); 589 if (sc->irq) 590 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 591 if (sc->res) 592 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); 593 594 nve_free_rings(sc); 595 596 if (sc->tx_desc) { 597 bus_dmamap_unload(sc->rtag, sc->rmap); 598 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); 599 bus_dmamap_destroy(sc->rtag, sc->rmap); 600 } 601 if (sc->mtag) 602 bus_dma_tag_destroy(sc->mtag); 603 if (sc->ttag) 604 bus_dma_tag_destroy(sc->ttag); 605 if (sc->rtag) 606 bus_dma_tag_destroy(sc->rtag); 607 608 if (ifp) 609 if_free(ifp); 610 mtx_destroy(&sc->mtx); 611 612 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); 613 614 return (0); 615} 616 617/* Initialise interface and start it "RUNNING" */ 618static void 619nve_init(void *xsc) 620{ 621 struct nve_softc *sc = xsc; 622 623 NVE_LOCK(sc); 624 nve_init_locked(sc); 625 NVE_UNLOCK(sc); 626} 627 628static void 629nve_init_locked(struct nve_softc *sc) 630{ 631 struct ifnet *ifp; 632 int error; 633 634 NVE_LOCK_ASSERT(sc); 635 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); 636 637 ifp = sc->ifp; 638 639 /* Do nothing if already running */ 640 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 641 return; 642 643 nve_stop(sc); 644 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); 645 646 nve_ifmedia_upd_locked(ifp); 647 648 /* Setup Hardware interface and allocate memory structures */ 649 error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 650 0, /* force speed */ 651 0, /* force full duplex */ 652 0, /* force mode */ 653 0, /* force async mode */ 654 &sc->linkup); 655 656 if (error) { 657 device_printf(sc->dev, 658 "failed to start NVIDIA Hardware interface\n"); 659 return; 660 } 661 /* Set the MAC address */ 662 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp)); 663 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 664 sc->hwapi->pfnStart(sc->hwapi->pADCX); 665 666 /* Setup multicast filter */ 667 nve_setmulti(sc); 668 669 /* Update interface parameters */ 670 ifp->if_drv_flags |= IFF_DRV_RUNNING; 671 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 672 673 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 674 675 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); 676 677 return; 678} 679 680/* Stop interface activity ie. not "RUNNING" */ 681static void 682nve_stop(struct nve_softc *sc) 683{ 684 struct ifnet *ifp; 685 686 NVE_LOCK_ASSERT(sc); 687 688 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); 689 690 ifp = sc->ifp; 691 ifp->if_timer = 0; 692 693 /* Cancel tick timer */ 694 callout_stop(&sc->stat_callout); 695 696 /* Stop hardware activity */ 697 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 698 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); 699 700 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); 701 /* Shutdown interface and deallocate memory buffers */ 702 if (sc->hwapi->pfnDeinit) 703 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); 704 705 sc->linkup = 0; 706 sc->cur_rx = 0; 707 sc->pending_rxs = 0; 708 sc->pending_txs = 0; 709 710 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 711 712 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); 713 714 return; 715} 716 717/* Shutdown interface for unload/reboot */ 718static void 719nve_shutdown(device_t dev) 720{ 721 struct nve_softc *sc; 722 723 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); 724 725 sc = device_get_softc(dev); 726 727 /* Stop hardware activity */ 728 NVE_LOCK(sc); 729 nve_stop(sc); 730 NVE_UNLOCK(sc); 731} 732 733/* Allocate TX ring buffers */ 734static int 735nve_init_rings(struct nve_softc *sc) 736{ 737 int error, i; 738 739 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); 740 741 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; 742 /* Initialise RX ring */ 743 for (i = 0; i < RX_RING_SIZE; i++) { 744 struct nve_rx_desc *desc = sc->rx_desc + i; 745 struct nve_map_buffer *buf = &desc->buf; 746 747 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 748 if (buf->mbuf == NULL) { 749 device_printf(sc->dev, "couldn't allocate mbuf\n"); 750 nve_free_rings(sc); 751 return (ENOBUFS); 752 } 753 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 754 m_adj(buf->mbuf, ETHER_ALIGN); 755 756 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 757 if (error) { 758 device_printf(sc->dev, "couldn't create dma map\n"); 759 nve_free_rings(sc); 760 return (error); 761 } 762 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 763 nve_dmamap_rx_cb, &desc->paddr, 0); 764 if (error) { 765 device_printf(sc->dev, "couldn't dma map mbuf\n"); 766 nve_free_rings(sc); 767 return (error); 768 } 769 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 770 771 desc->buflength = buf->mbuf->m_len; 772 desc->vaddr = mtod(buf->mbuf, caddr_t); 773 } 774 bus_dmamap_sync(sc->rtag, sc->rmap, 775 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 776 777 /* Initialize TX ring */ 778 for (i = 0; i < TX_RING_SIZE; i++) { 779 struct nve_tx_desc *desc = sc->tx_desc + i; 780 struct nve_map_buffer *buf = &desc->buf; 781 782 buf->mbuf = NULL; 783 784 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 785 if (error) { 786 device_printf(sc->dev, "couldn't create dma map\n"); 787 nve_free_rings(sc); 788 return (error); 789 } 790 } 791 bus_dmamap_sync(sc->ttag, sc->tmap, 792 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 793 794 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); 795 796 return (error); 797} 798 799/* Free the TX ring buffers */ 800static void 801nve_free_rings(struct nve_softc *sc) 802{ 803 int i; 804 805 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); 806 807 for (i = 0; i < RX_RING_SIZE; i++) { 808 struct nve_rx_desc *desc = sc->rx_desc + i; 809 struct nve_map_buffer *buf = &desc->buf; 810 811 if (buf->mbuf) { 812 bus_dmamap_unload(sc->mtag, buf->map); 813 bus_dmamap_destroy(sc->mtag, buf->map); 814 m_freem(buf->mbuf); 815 } 816 buf->mbuf = NULL; 817 } 818 819 for (i = 0; i < TX_RING_SIZE; i++) { 820 struct nve_tx_desc *desc = sc->tx_desc + i; 821 struct nve_map_buffer *buf = &desc->buf; 822 823 if (buf->mbuf) { 824 bus_dmamap_unload(sc->mtag, buf->map); 825 bus_dmamap_destroy(sc->mtag, buf->map); 826 m_freem(buf->mbuf); 827 } 828 buf->mbuf = NULL; 829 } 830 831 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); 832} 833 834/* Main loop for sending packets from OS to interface */ 835static void 836nve_ifstart(struct ifnet *ifp) 837{ 838 struct nve_softc *sc = ifp->if_softc; 839 840 NVE_LOCK(sc); 841 nve_ifstart_locked(ifp); 842 NVE_UNLOCK(sc); 843} 844 845static void 846nve_ifstart_locked(struct ifnet *ifp) 847{ 848 struct nve_softc *sc = ifp->if_softc; 849 struct nve_map_buffer *buf; 850 struct mbuf *m0, *m; 851 struct nve_tx_desc *desc; 852 ADAPTER_WRITE_DATA txdata; 853 int error, i; 854 855 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); 856 857 NVE_LOCK_ASSERT(sc); 858 859 /* If link is down/busy or queue is empty do nothing */ 860 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || 861 IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 862 return; 863 864 /* Transmit queued packets until sent or TX ring is full */ 865 while (sc->pending_txs < TX_RING_SIZE) { 866 desc = sc->tx_desc + sc->cur_tx; 867 buf = &desc->buf; 868 869 /* Get next packet to send. */ 870 IF_DEQUEUE(&ifp->if_snd, m0); 871 872 /* If nothing to send, return. */ 873 if (m0 == NULL) 874 return; 875 876 /* Map MBUF for DMA access */ 877 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, 878 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 879 880 if (error && error != EFBIG) { 881 m_freem(m0); 882 sc->tx_errors++; 883 continue; 884 } 885 /* 886 * Packet has too many fragments - defrag into new mbuf 887 * cluster 888 */ 889 if (error) { 890 m = m_defrag(m0, M_DONTWAIT); 891 if (m == NULL) { 892 m_freem(m0); 893 sc->tx_errors++; 894 continue; 895 } 896 m0 = m; 897 898 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, 899 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 900 if (error) { 901 m_freem(m); 902 sc->tx_errors++; 903 continue; 904 } 905 } 906 /* Do sync on DMA bounce buffer */ 907 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); 908 909 buf->mbuf = m0; 910 txdata.ulNumberOfElements = desc->numfrags; 911 txdata.pvID = (PVOID)desc; 912 913 /* Put fragments into API element list */ 914 txdata.ulTotalLength = buf->mbuf->m_len; 915 for (i = 0; i < desc->numfrags; i++) { 916 txdata.sElement[i].ulLength = 917 (ulong)desc->frags[i].ds_len; 918 txdata.sElement[i].pPhysical = 919 (PVOID)desc->frags[i].ds_addr; 920 } 921 922 /* Send packet to Nvidia API for transmission */ 923 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); 924 925 switch (error) { 926 case ADAPTERERR_NONE: 927 /* Packet was queued in API TX queue successfully */ 928 sc->pending_txs++; 929 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; 930 break; 931 932 case ADAPTERERR_TRANSMIT_QUEUE_FULL: 933 /* The API TX queue is full - requeue the packet */ 934 device_printf(sc->dev, 935 "nve_ifstart: transmit queue is full\n"); 936 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 937 bus_dmamap_unload(sc->mtag, buf->map); 938 IF_PREPEND(&ifp->if_snd, buf->mbuf); 939 buf->mbuf = NULL; 940 return; 941 942 default: 943 /* The API failed to queue/send the packet so dump it */ 944 device_printf(sc->dev, "nve_ifstart: transmit error\n"); 945 bus_dmamap_unload(sc->mtag, buf->map); 946 m_freem(buf->mbuf); 947 buf->mbuf = NULL; 948 sc->tx_errors++; 949 return; 950 } 951 /* Set watchdog timer. */ 952 ifp->if_timer = 8; 953 954 /* Copy packet to BPF tap */ 955 BPF_MTAP(ifp, m0); 956 } 957 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 958 959 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); 960} 961 962/* Handle IOCTL events */ 963static int 964nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 965{ 966 struct nve_softc *sc = ifp->if_softc; 967 struct ifreq *ifr = (struct ifreq *) data; 968 struct mii_data *mii; 969 int error = 0; 970 971 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); 972 973 switch (command) { 974 case SIOCSIFMTU: 975 /* Set MTU size */ 976 NVE_LOCK(sc); 977 if (ifp->if_mtu == ifr->ifr_mtu) { 978 NVE_UNLOCK(sc); 979 break; 980 } 981 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { 982 ifp->if_mtu = ifr->ifr_mtu; 983 nve_stop(sc); 984 nve_init_locked(sc); 985 } else 986 error = EINVAL; 987 NVE_UNLOCK(sc); 988 break; 989 990 case SIOCSIFFLAGS: 991 /* Setup interface flags */ 992 NVE_LOCK(sc); 993 if (ifp->if_flags & IFF_UP) { 994 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 995 nve_init_locked(sc); 996 NVE_UNLOCK(sc); 997 break; 998 } 999 } else { 1000 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1001 nve_stop(sc); 1002 NVE_UNLOCK(sc); 1003 break; 1004 } 1005 } 1006 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ 1007 nve_setmulti(sc); 1008 NVE_UNLOCK(sc); 1009 break; 1010 1011 case SIOCADDMULTI: 1012 case SIOCDELMULTI: 1013 /* Setup multicast filter */ 1014 NVE_LOCK(sc); 1015 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1016 nve_setmulti(sc); 1017 } 1018 NVE_UNLOCK(sc); 1019 break; 1020 1021 case SIOCGIFMEDIA: 1022 case SIOCSIFMEDIA: 1023 /* Get/Set interface media parameters */ 1024 mii = device_get_softc(sc->miibus); 1025 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1026 break; 1027 1028 default: 1029 /* Everything else we forward to generic ether ioctl */ 1030 error = ether_ioctl(ifp, (int)command, data); 1031 break; 1032 } 1033 1034 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); 1035 1036 return (error); 1037} 1038 1039/* Interrupt service routine */ 1040static void 1041nve_intr(void *arg) 1042{ 1043 struct nve_softc *sc = arg; 1044 struct ifnet *ifp = sc->ifp; 1045 1046 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); 1047 1048 NVE_LOCK(sc); 1049 if (!ifp->if_flags & IFF_UP) { 1050 nve_stop(sc); 1051 NVE_UNLOCK(sc); 1052 return; 1053 } 1054 /* Handle interrupt event */ 1055 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { 1056 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1057 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1058 } 1059 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1060 nve_ifstart_locked(ifp); 1061 1062 /* If no pending packets we don't need a timeout */ 1063 if (sc->pending_txs == 0) 1064 sc->ifp->if_timer = 0; 1065 NVE_UNLOCK(sc); 1066 1067 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); 1068 1069 return; 1070} 1071 1072/* Setup multicast filters */ 1073static void 1074nve_setmulti(struct nve_softc *sc) 1075{ 1076 struct ifnet *ifp; 1077 struct ifmultiaddr *ifma; 1078 PACKET_FILTER hwfilter; 1079 int i; 1080 u_int8_t andaddr[6], oraddr[6]; 1081 1082 NVE_LOCK_ASSERT(sc); 1083 1084 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); 1085 1086 ifp = sc->ifp; 1087 1088 /* Initialize filter */ 1089 hwfilter.ulFilterFlags = 0; 1090 for (i = 0; i < 6; i++) { 1091 hwfilter.acMulticastAddress[i] = 0; 1092 hwfilter.acMulticastMask[i] = 0; 1093 } 1094 1095 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1096 /* Accept all packets */ 1097 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; 1098 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1099 return; 1100 } 1101 /* Setup multicast filter */ 1102 IF_ADDR_LOCK(ifp); 1103 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1104 u_char *addrp; 1105 1106 if (ifma->ifma_addr->sa_family != AF_LINK) 1107 continue; 1108 1109 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1110 for (i = 0; i < 6; i++) { 1111 u_int8_t mcaddr = addrp[i]; 1112 andaddr[i] &= mcaddr; 1113 oraddr[i] |= mcaddr; 1114 } 1115 } 1116 IF_ADDR_UNLOCK(ifp); 1117 for (i = 0; i < 6; i++) { 1118 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; 1119 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); 1120 } 1121 1122 /* Send filter to NVIDIA API */ 1123 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1124 1125 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); 1126 1127 return; 1128} 1129 1130/* Change the current media/mediaopts */ 1131static int 1132nve_ifmedia_upd(struct ifnet *ifp) 1133{ 1134 struct nve_softc *sc = ifp->if_softc; 1135 1136 NVE_LOCK(sc); 1137 nve_ifmedia_upd_locked(ifp); 1138 NVE_UNLOCK(sc); 1139 return (0); 1140} 1141 1142static void 1143nve_ifmedia_upd_locked(struct ifnet *ifp) 1144{ 1145 struct nve_softc *sc = ifp->if_softc; 1146 struct mii_data *mii; 1147 1148 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); 1149 1150 NVE_LOCK_ASSERT(sc); 1151 mii = device_get_softc(sc->miibus); 1152 1153 if (mii->mii_instance) { 1154 struct mii_softc *miisc; 1155 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1156 miisc = LIST_NEXT(miisc, mii_list)) { 1157 mii_phy_reset(miisc); 1158 } 1159 } 1160 mii_mediachg(mii); 1161} 1162 1163/* Update current miibus PHY status of media */ 1164static void 1165nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1166{ 1167 struct nve_softc *sc; 1168 struct mii_data *mii; 1169 1170 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); 1171 1172 sc = ifp->if_softc; 1173 NVE_LOCK(sc); 1174 mii = device_get_softc(sc->miibus); 1175 mii_pollstat(mii); 1176 NVE_UNLOCK(sc); 1177 1178 ifmr->ifm_active = mii->mii_media_active; 1179 ifmr->ifm_status = mii->mii_media_status; 1180 1181 return; 1182} 1183 1184/* miibus tick timer - maintain link status */ 1185static void 1186nve_tick(void *xsc) 1187{ 1188 struct nve_softc *sc = xsc; 1189 struct mii_data *mii; 1190 struct ifnet *ifp; 1191 1192 NVE_LOCK_ASSERT(sc); 1193 1194 ifp = sc->ifp; 1195 nve_update_stats(sc); 1196 1197 mii = device_get_softc(sc->miibus); 1198 mii_tick(mii); 1199 1200 if (mii->mii_media_status & IFM_ACTIVE && 1201 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1202 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1203 nve_ifstart_locked(ifp); 1204 } 1205 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 1206 1207 return; 1208} 1209 1210/* Update ifnet data structure with collected interface stats from API */ 1211static void 1212nve_update_stats(struct nve_softc *sc) 1213{ 1214 struct ifnet *ifp = sc->ifp; 1215 ADAPTER_STATS stats; 1216 1217 NVE_LOCK_ASSERT(sc); 1218 1219 if (sc->hwapi) { 1220 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); 1221 1222 ifp->if_ipackets = stats.ulSuccessfulReceptions; 1223 ifp->if_ierrors = stats.ulMissedFrames + 1224 stats.ulFailedReceptions + 1225 stats.ulCRCErrors + 1226 stats.ulFramingErrors + 1227 stats.ulOverFlowErrors; 1228 1229 ifp->if_opackets = stats.ulSuccessfulTransmissions; 1230 ifp->if_oerrors = sc->tx_errors + 1231 stats.ulFailedTransmissions + 1232 stats.ulRetryErrors + 1233 stats.ulUnderflowErrors + 1234 stats.ulLossOfCarrierErrors + 1235 stats.ulLateCollisionErrors; 1236 1237 ifp->if_collisions = stats.ulLateCollisionErrors; 1238 } 1239 1240 return; 1241} 1242 1243/* miibus Read PHY register wrapper - calls Nvidia API entry point */ 1244static int 1245nve_miibus_readreg(device_t dev, int phy, int reg) 1246{ 1247 struct nve_softc *sc = device_get_softc(dev); 1248 ULONG data; 1249 1250 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); 1251 1252 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); 1253 1254 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); 1255 1256 return (data); 1257} 1258 1259/* miibus Write PHY register wrapper - calls Nvidia API entry point */ 1260static void 1261nve_miibus_writereg(device_t dev, int phy, int reg, int data) 1262{ 1263 struct nve_softc *sc = device_get_softc(dev); 1264 1265 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); 1266 1267 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); 1268 1269 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); 1270 1271 return; 1272} 1273 1274/* Watchdog timer to prevent PHY lockups */ 1275static void 1276nve_watchdog(struct ifnet *ifp) 1277{ 1278 struct nve_softc *sc = ifp->if_softc; 1279 1280 NVE_LOCK(sc); 1281 1282 /* 1283 * The nvidia driver blob defers tx completion notifications. 1284 * Thus, sometimes the watchdog timer will go off when the 1285 * tx engine is fine, but the tx completions are just deferred. 1286 * Try kicking the driver blob to clear out any pending tx 1287 * completions. If that clears up all the pending tx 1288 * operations, then just return without printing the warning 1289 * message or resetting the adapter. 1290 */ 1291 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 1292 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1293 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1294 if (sc->pending_txs == 0) { 1295 NVE_UNLOCK(sc); 1296 return; 1297 } 1298 1299 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); 1300 1301 sc->tx_errors++; 1302 1303 nve_stop(sc); 1304 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1305 nve_init_locked(sc); 1306 1307 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1308 nve_ifstart_locked(ifp); 1309 NVE_UNLOCK(sc); 1310 1311 return; 1312} 1313 1314/* --- Start of NVOSAPI interface --- */ 1315 1316/* Allocate DMA enabled general use memory for API */ 1317static NV_SINT32 1318nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) 1319{ 1320 struct nve_softc *sc; 1321 bus_addr_t mem_physical; 1322 1323 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); 1324 1325 sc = (struct nve_softc *)ctx; 1326 1327 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, 1328 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1329 1330 if (!mem->pLogical) { 1331 device_printf(sc->dev, "memory allocation failed\n"); 1332 return (0); 1333 } 1334 memset(mem->pLogical, 0, (ulong)mem->uiLength); 1335 mem_physical = vtophys(mem->pLogical); 1336 mem->pPhysical = (PVOID)mem_physical; 1337 1338 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", 1339 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); 1340 1341 return (1); 1342} 1343 1344/* Free allocated memory */ 1345static NV_SINT32 1346nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) 1347{ 1348 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", 1349 (uint)mem->pLogical, (uint) mem->uiLength); 1350 1351 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); 1352 return (1); 1353} 1354 1355/* Copied directly from nvnet.c */ 1356static NV_SINT32 1357nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1358{ 1359 MEMORY_BLOCK mem_block; 1360 1361 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); 1362 1363 mem_block_ex->pLogical = NULL; 1364 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; 1365 1366 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && 1367 (mem_block_ex->AlignmentSize > 1)) { 1368 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", 1369 mem_block_ex->AlignmentSize); 1370 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; 1371 } 1372 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1373 1374 if (nve_osalloc(ctx, &mem_block) == 0) { 1375 return (0); 1376 } 1377 mem_block_ex->pLogicalOrig = mem_block.pLogical; 1378 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; 1379 mem_block_ex->pPhysicalOrigHigh = 0; 1380 1381 mem_block_ex->pPhysical = mem_block.pPhysical; 1382 mem_block_ex->pLogical = mem_block.pLogical; 1383 1384 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { 1385 unsigned int offset; 1386 offset = mem_block_ex->pPhysicalOrigLow & 1387 (mem_block_ex->AlignmentSize - 1); 1388 1389 if (offset) { 1390 mem_block_ex->pPhysical = 1391 (PVOID)((ulong)mem_block_ex->pPhysical + 1392 mem_block_ex->AlignmentSize - offset); 1393 mem_block_ex->pLogical = 1394 (PVOID)((ulong)mem_block_ex->pLogical + 1395 mem_block_ex->AlignmentSize - offset); 1396 } /* if (offset) */ 1397 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ 1398 return (1); 1399} 1400 1401/* Copied directly from nvnet.c */ 1402static NV_SINT32 1403nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1404{ 1405 MEMORY_BLOCK mem_block; 1406 1407 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); 1408 1409 mem_block.pLogical = mem_block_ex->pLogicalOrig; 1410 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); 1411 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1412 1413 return (nve_osfree(ctx, &mem_block)); 1414} 1415 1416/* Clear memory region */ 1417static NV_SINT32 1418nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) 1419{ 1420 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); 1421 memset(mem, 0, length); 1422 return (1); 1423} 1424 1425/* Sleep for a tick */ 1426static NV_SINT32 1427nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) 1428{ 1429 DELAY(usec); 1430 return (1); 1431} 1432 1433/* Allocate memory for rx buffer */ 1434static NV_SINT32 1435nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) 1436{ 1437 struct nve_softc *sc = ctx; 1438 struct nve_rx_desc *desc; 1439 struct nve_map_buffer *buf; 1440 int error; 1441 1442 if (device_is_attached(sc->dev)) 1443 NVE_LOCK_ASSERT(sc); 1444 1445 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); 1446 1447 if (sc->pending_rxs == RX_RING_SIZE) { 1448 device_printf(sc->dev, "rx ring buffer is full\n"); 1449 goto fail; 1450 } 1451 desc = sc->rx_desc + sc->cur_rx; 1452 buf = &desc->buf; 1453 1454 if (buf->mbuf == NULL) { 1455 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1456 if (buf->mbuf == NULL) { 1457 device_printf(sc->dev, "failed to allocate memory\n"); 1458 goto fail; 1459 } 1460 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 1461 m_adj(buf->mbuf, ETHER_ALIGN); 1462 1463 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 1464 nve_dmamap_rx_cb, &desc->paddr, 0); 1465 if (error) { 1466 device_printf(sc->dev, "failed to dmamap mbuf\n"); 1467 m_freem(buf->mbuf); 1468 buf->mbuf = NULL; 1469 goto fail; 1470 } 1471 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 1472 desc->buflength = buf->mbuf->m_len; 1473 desc->vaddr = mtod(buf->mbuf, caddr_t); 1474 } 1475 sc->pending_rxs++; 1476 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; 1477 1478 mem->pLogical = (void *)desc->vaddr; 1479 mem->pPhysical = (void *)desc->paddr; 1480 mem->uiLength = desc->buflength; 1481 *id = (void *)desc; 1482 1483 return (1); 1484 1485fail: 1486 return (0); 1487} 1488 1489/* Free the rx buffer */ 1490static NV_SINT32 1491nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) 1492{ 1493 struct nve_softc *sc = ctx; 1494 struct nve_rx_desc *desc; 1495 struct nve_map_buffer *buf; 1496 1497 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); 1498 1499 desc = (struct nve_rx_desc *) id; 1500 buf = &desc->buf; 1501 1502 if (buf->mbuf) { 1503 bus_dmamap_unload(sc->mtag, buf->map); 1504 bus_dmamap_destroy(sc->mtag, buf->map); 1505 m_freem(buf->mbuf); 1506 } 1507 sc->pending_rxs--; 1508 buf->mbuf = NULL; 1509 1510 return (1); 1511} 1512 1513/* This gets called by the Nvidia API after our TX packet has been sent */ 1514static NV_SINT32 1515nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) 1516{ 1517 struct nve_softc *sc = ctx; 1518 struct nve_map_buffer *buf; 1519 struct nve_tx_desc *desc = (struct nve_tx_desc *) id; 1520 struct ifnet *ifp; 1521 1522 NVE_LOCK_ASSERT(sc); 1523 1524 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); 1525 1526 ifp = sc->ifp; 1527 buf = &desc->buf; 1528 sc->pending_txs--; 1529 1530 /* Unload and free mbuf cluster */ 1531 if (buf->mbuf == NULL) 1532 goto fail; 1533 1534 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); 1535 bus_dmamap_unload(sc->mtag, buf->map); 1536 m_freem(buf->mbuf); 1537 buf->mbuf = NULL; 1538 1539 /* Send more packets if we have them */ 1540 if (sc->pending_txs < TX_RING_SIZE) 1541 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1542 1543 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE) 1544 nve_ifstart_locked(ifp); 1545 1546fail: 1547 1548 return (1); 1549} 1550 1551/* This gets called by the Nvidia API when a new packet has been received */ 1552/* XXX What is newbuf used for? XXX */ 1553static NV_SINT32 1554nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, 1555 NV_UINT8 priority) 1556{ 1557 struct nve_softc *sc = ctx; 1558 struct ifnet *ifp; 1559 struct nve_rx_desc *desc; 1560 struct nve_map_buffer *buf; 1561 ADAPTER_READ_DATA *readdata; 1562 struct mbuf *m; 1563 1564 NVE_LOCK_ASSERT(sc); 1565 1566 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); 1567 1568 ifp = sc->ifp; 1569 1570 readdata = (ADAPTER_READ_DATA *) data; 1571 desc = readdata->pvID; 1572 buf = &desc->buf; 1573 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1574 1575 if (success) { 1576 /* Sync DMA bounce buffer. */ 1577 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1578 1579 /* First mbuf in packet holds the ethernet and packet headers */ 1580 buf->mbuf->m_pkthdr.rcvif = ifp; 1581 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = 1582 readdata->ulTotalLength; 1583 1584 bus_dmamap_unload(sc->mtag, buf->map); 1585 1586 /* Blat the mbuf pointer, kernel will free the mbuf cluster */ 1587 m = buf->mbuf; 1588 buf->mbuf = NULL; 1589 1590 /* Give mbuf to OS. */ 1591 NVE_UNLOCK(sc); 1592 (*ifp->if_input)(ifp, m); 1593 NVE_LOCK(sc); 1594 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) 1595 ifp->if_imcasts++; 1596 1597 } else { 1598 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1599 bus_dmamap_unload(sc->mtag, buf->map); 1600 m_freem(buf->mbuf); 1601 buf->mbuf = NULL; 1602 } 1603 1604 sc->cur_rx = desc - sc->rx_desc; 1605 sc->pending_rxs--; 1606 1607 return (1); 1608} 1609 1610/* This gets called by NVIDIA API when the PHY link state changes */ 1611static NV_SINT32 1612nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) 1613{ 1614 1615 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); 1616 1617 return (1); 1618} 1619 1620/* Setup a watchdog timer */ 1621static NV_SINT32 1622nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) 1623{ 1624 struct nve_softc *sc = (struct nve_softc *)ctx; 1625 1626 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); 1627 1628 callout_init(&sc->ostimer, CALLOUT_MPSAFE); 1629 *timer = &sc->ostimer; 1630 1631 return (1); 1632} 1633 1634/* Free the timer */ 1635static NV_SINT32 1636nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) 1637{ 1638 1639 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); 1640 1641 callout_drain((struct callout *)timer); 1642 1643 return (1); 1644} 1645 1646/* Setup timer parameters */ 1647static NV_SINT32 1648nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) 1649{ 1650 struct nve_softc *sc = (struct nve_softc *)ctx; 1651 1652 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); 1653 1654 sc->ostimer_func = func; 1655 sc->ostimer_params = parameters; 1656 1657 return (1); 1658} 1659 1660/* Set the timer to go off */ 1661static NV_SINT32 1662nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) 1663{ 1664 struct nve_softc *sc = ctx; 1665 1666 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); 1667 1668 callout_reset((struct callout *)timer, delay, sc->ostimer_func, 1669 sc->ostimer_params); 1670 1671 return (1); 1672} 1673 1674/* Cancel the timer */ 1675static NV_SINT32 1676nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) 1677{ 1678 1679 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); 1680 1681 callout_stop((struct callout *)timer); 1682 1683 return (1); 1684} 1685 1686static NV_SINT32 1687nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, 1688 NV_UINT8 *newbuffer, NV_UINT8 priority) 1689{ 1690 1691 /* Not implemented */ 1692 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1693 1694 return (1); 1695} 1696 1697static PNV_VOID 1698nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) 1699{ 1700 1701 /* Not implemented */ 1702 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1703 1704 return (NULL); 1705} 1706 1707static NV_SINT32 1708nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) 1709{ 1710 1711 /* Not implemented */ 1712 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); 1713 1714 return (1); 1715} 1716 1717/* Allocate mutex context (already done in nve_attach) */ 1718static NV_SINT32 1719nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) 1720{ 1721 struct nve_softc *sc = (struct nve_softc *)ctx; 1722 1723 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); 1724 1725 *pLock = (void **)sc; 1726 1727 return (1); 1728} 1729 1730/* Obtain a spin lock */ 1731static NV_SINT32 1732nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1733{ 1734 1735 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); 1736 1737 return (1); 1738} 1739 1740/* Release lock */ 1741static NV_SINT32 1742nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1743{ 1744 1745 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); 1746 1747 return (1); 1748} 1749 1750/* I have no idea what this is for */ 1751static PNV_VOID 1752nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) 1753{ 1754 1755 /* Not implemented */ 1756 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); 1757 panic("nve: nve_osreturnbufvirtual not implemented\n"); 1758 1759 return (NULL); 1760} 1761 1762/* --- End on NVOSAPI interface --- */ 1763