if_nve.c revision 173839
1/*- 2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>. 3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ 28 */ 29/* 30 * NVIDIA nForce MCP Networking Adapter driver 31 * 32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA 33 * through their web site. 34 * 35 * All mainstream nForce and nForce2 motherboards are supported. This module 36 * is as stable, sometimes more stable, than the linux version. (Recent 37 * Linux stability issues seem to be related to some issues with newer 38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD 39 * 5.x). 40 * 41 * In accordance with the NVIDIA distribution license it is necessary to 42 * link this module against the nvlibnet.o binary object included in the 43 * Linux driver source distribution. The binary component is not modified in 44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c 45 * linux kernel module "wrapper". 46 * 47 * The Linux driver uses a common code API that is shared between Win32 and 48 * i386 Linux. This abstracts the low level driver functions and uses 49 * callbacks and hooks to access the underlying hardware device. By using 50 * this same API in a FreeBSD kernel module it is possible to support the 51 * hardware without breaching the Linux source distributions licensing 52 * requirements, or obtaining the hardware programming specifications. 53 * 54 * Although not conventional, it works, and given the relatively small 55 * amount of hardware centric code, it's hopefully no more buggy than its 56 * linux counterpart. 57 * 58 * NVIDIA now support the nForce3 AMD64 platform, however I have been 59 * unable to access such a system to verify support. However, the code is 60 * reported to work with little modification when compiled with the AMD64 61 * version of the NVIDIA Linux library. All that should be necessary to make 62 * the driver work is to link it directly into the kernel, instead of as a 63 * module, and apply the docs/amd64.diff patch in this source distribution to 64 * the NVIDIA Linux driver source. 65 * 66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well 67 * as recent versions of DragonFly. 68 * 69 * Written by Quinton Dolan <q@onthenet.com.au> 70 * Portions based on existing FreeBSD network drivers. 71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. 72 */ 73 74#include <sys/cdefs.h> 75__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 173839 2007-11-22 02:45:00Z yongari $"); 76 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/sockio.h> 80#include <sys/mbuf.h> 81#include <sys/malloc.h> 82#include <sys/kernel.h> 83#include <sys/socket.h> 84#include <sys/sysctl.h> 85#include <sys/queue.h> 86#include <sys/module.h> 87 88#include <net/if.h> 89#include <net/if_arp.h> 90#include <net/ethernet.h> 91#include <net/if_dl.h> 92#include <net/if_media.h> 93#include <net/if_types.h> 94#include <net/bpf.h> 95#include <net/if_vlan_var.h> 96 97#include <machine/bus.h> 98#include <machine/resource.h> 99 100#include <vm/vm.h> /* for vtophys */ 101#include <vm/pmap.h> /* for vtophys */ 102#include <sys/bus.h> 103#include <sys/rman.h> 104 105#include <dev/pci/pcireg.h> 106#include <dev/pci/pcivar.h> 107#include <dev/mii/mii.h> 108#include <dev/mii/miivar.h> 109#include "miibus_if.h" 110 111/* Include NVIDIA Linux driver header files */ 112#include <contrib/dev/nve/nvenet_version.h> 113#define linux 114#include <contrib/dev/nve/basetype.h> 115#include <contrib/dev/nve/phy.h> 116#include "os+%DIKED-nve.h" 117#include <contrib/dev/nve/drvinfo.h> 118#include <contrib/dev/nve/adapter.h> 119#undef linux 120 121#include <dev/nve/if_nvereg.h> 122 123MODULE_DEPEND(nve, pci, 1, 1, 1); 124MODULE_DEPEND(nve, ether, 1, 1, 1); 125MODULE_DEPEND(nve, miibus, 1, 1, 1); 126 127static int nve_probe(device_t); 128static int nve_attach(device_t); 129static int nve_detach(device_t); 130static void nve_init(void *); 131static void nve_init_locked(struct nve_softc *); 132static void nve_stop(struct nve_softc *); 133static int nve_shutdown(device_t); 134static int nve_init_rings(struct nve_softc *); 135static void nve_free_rings(struct nve_softc *); 136 137static void nve_ifstart(struct ifnet *); 138static void nve_ifstart_locked(struct ifnet *); 139static int nve_ioctl(struct ifnet *, u_long, caddr_t); 140static void nve_intr(void *); 141static void nve_tick(void *); 142static void nve_setmulti(struct nve_softc *); 143static void nve_watchdog(struct ifnet *); 144static void nve_update_stats(struct nve_softc *); 145 146static int nve_ifmedia_upd(struct ifnet *); 147static void nve_ifmedia_upd_locked(struct ifnet *); 148static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); 149static int nve_miibus_readreg(device_t, int, int); 150static void nve_miibus_writereg(device_t, int, int, int); 151 152static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); 153static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); 154 155static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); 156static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); 157static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); 158static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); 159static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); 160static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); 161static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); 162static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); 163static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); 164static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); 165static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); 166static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); 167static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); 168static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); 169static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); 170static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); 171 172static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); 173static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); 174static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); 175static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); 176static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); 177static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); 178static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); 179 180static device_method_t nve_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, nve_probe), 183 DEVMETHOD(device_attach, nve_attach), 184 DEVMETHOD(device_detach, nve_detach), 185 DEVMETHOD(device_shutdown, nve_shutdown), 186 187 /* Bus interface */ 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 191 /* MII interface */ 192 DEVMETHOD(miibus_readreg, nve_miibus_readreg), 193 DEVMETHOD(miibus_writereg, nve_miibus_writereg), 194 195 {0, 0} 196}; 197 198static driver_t nve_driver = { 199 "nve", 200 nve_methods, 201 sizeof(struct nve_softc) 202}; 203 204static devclass_t nve_devclass; 205 206static int nve_pollinterval = 0; 207SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, 208 &nve_pollinterval, 0, "delay between interface polls"); 209 210DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); 211DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); 212 213static struct nve_type nve_devs[] = { 214 {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID, 215 "NVIDIA nForce MCP Networking Adapter"}, 216 {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID, 217 "NVIDIA nForce MCP2 Networking Adapter"}, 218 {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID, 219 "NVIDIA nForce MCP3 Networking Adapter"}, 220 {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID, 221 "NVIDIA nForce MCP4 Networking Adapter"}, 222 {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID, 223 "NVIDIA nForce MCP5 Networking Adapter"}, 224 {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID, 225 "NVIDIA nForce MCP6 Networking Adapter"}, 226 {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID, 227 "NVIDIA nForce MCP7 Networking Adapter"}, 228 {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID, 229 "NVIDIA nForce MCP8 Networking Adapter"}, 230 {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID, 231 "NVIDIA nForce MCP9 Networking Adapter"}, 232 {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID, 233 "NVIDIA nForce MCP10 Networking Adapter"}, 234 {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID, 235 "NVIDIA nForce MCP11 Networking Adapter"}, 236 {NVIDIA_VENDORID, NFORCE_MCPNET12_DEVICEID, 237 "NVIDIA nForce MCP12 Networking Adapter"}, 238 {NVIDIA_VENDORID, NFORCE_MCPNET13_DEVICEID, 239 "NVIDIA nForce MCP13 Networking Adapter"}, 240 {0, 0, NULL} 241}; 242 243/* DMA MEM map callback function to get data segment physical address */ 244static void 245nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) 246{ 247 if (error) 248 return; 249 250 KASSERT(nsegs == 1, 251 ("Too many DMA segments returned when mapping DMA memory")); 252 *(bus_addr_t *)arg = segs->ds_addr; 253} 254 255/* DMA RX map callback function to get data segment physical address */ 256static void 257nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, 258 bus_size_t mapsize, int error) 259{ 260 if (error) 261 return; 262 *(bus_addr_t *)arg = segs->ds_addr; 263} 264 265/* 266 * DMA TX buffer callback function to allocate fragment data segment 267 * addresses 268 */ 269static void 270nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) 271{ 272 struct nve_tx_desc *info; 273 274 info = arg; 275 if (error) 276 return; 277 KASSERT(nsegs < NV_MAX_FRAGS, 278 ("Too many DMA segments returned when mapping mbuf")); 279 info->numfrags = nsegs; 280 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); 281} 282 283/* Probe for supported hardware ID's */ 284static int 285nve_probe(device_t dev) 286{ 287 struct nve_type *t; 288 289 t = nve_devs; 290 /* Check for matching PCI DEVICE ID's */ 291 while (t->name != NULL) { 292 if ((pci_get_vendor(dev) == t->vid_id) && 293 (pci_get_device(dev) == t->dev_id)) { 294 device_set_desc(dev, t->name); 295 return (BUS_PROBE_LOW_PRIORITY); 296 } 297 t++; 298 } 299 300 return (ENXIO); 301} 302 303/* Attach driver and initialise hardware for use */ 304static int 305nve_attach(device_t dev) 306{ 307 u_char eaddr[ETHER_ADDR_LEN]; 308 struct nve_softc *sc; 309 struct ifnet *ifp; 310 OS_API *osapi; 311 ADAPTER_OPEN_PARAMS OpenParams; 312 int error = 0, i, rid; 313 314 if (bootverbose) 315 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION); 316 317 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); 318 319 sc = device_get_softc(dev); 320 321 /* Allocate mutex */ 322 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 323 MTX_DEF); 324 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0); 325 326 sc->dev = dev; 327 328 /* Preinitialize data structures */ 329 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); 330 331 /* Enable bus mastering */ 332 pci_enable_busmaster(dev); 333 334 /* Allocate memory mapped address space */ 335 rid = NV_RID; 336 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, 337 RF_ACTIVE); 338 339 if (sc->res == NULL) { 340 device_printf(dev, "couldn't map memory\n"); 341 error = ENXIO; 342 goto fail; 343 } 344 sc->sc_st = rman_get_bustag(sc->res); 345 sc->sc_sh = rman_get_bushandle(sc->res); 346 347 /* Allocate interrupt */ 348 rid = 0; 349 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 350 RF_SHAREABLE | RF_ACTIVE); 351 352 if (sc->irq == NULL) { 353 device_printf(dev, "couldn't map interrupt\n"); 354 error = ENXIO; 355 goto fail; 356 } 357 /* Allocate DMA tags */ 358 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 359 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, 360 NV_MAX_FRAGS, MCLBYTES, 0, 361 busdma_lock_mutex, &Giant, 362 &sc->mtag); 363 if (error) { 364 device_printf(dev, "couldn't allocate dma tag\n"); 365 goto fail; 366 } 367 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 368 BUS_SPACE_MAXADDR, NULL, NULL, 369 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, 370 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, 371 busdma_lock_mutex, &Giant, 372 &sc->rtag); 373 if (error) { 374 device_printf(dev, "couldn't allocate dma tag\n"); 375 goto fail; 376 } 377 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 378 BUS_SPACE_MAXADDR, NULL, NULL, 379 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, 380 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, 381 busdma_lock_mutex, &Giant, 382 &sc->ttag); 383 if (error) { 384 device_printf(dev, "couldn't allocate dma tag\n"); 385 goto fail; 386 } 387 /* Allocate DMA safe memory and get the DMA addresses. */ 388 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, 389 BUS_DMA_WAITOK, &sc->tmap); 390 if (error) { 391 device_printf(dev, "couldn't allocate dma memory\n"); 392 goto fail; 393 } 394 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); 395 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, 396 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, 397 &sc->tx_addr, 0); 398 if (error) { 399 device_printf(dev, "couldn't map dma memory\n"); 400 goto fail; 401 } 402 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, 403 BUS_DMA_WAITOK, &sc->rmap); 404 if (error) { 405 device_printf(dev, "couldn't allocate dma memory\n"); 406 goto fail; 407 } 408 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); 409 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, 410 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, 411 &sc->rx_addr, 0); 412 if (error) { 413 device_printf(dev, "couldn't map dma memory\n"); 414 goto fail; 415 } 416 /* Initialize rings. */ 417 if (nve_init_rings(sc)) { 418 device_printf(dev, "failed to init rings\n"); 419 error = ENXIO; 420 goto fail; 421 } 422 /* Setup NVIDIA API callback routines */ 423 osapi = &sc->osapi; 424 osapi->pOSCX = sc; 425 osapi->pfnAllocMemory = nve_osalloc; 426 osapi->pfnFreeMemory = nve_osfree; 427 osapi->pfnAllocMemoryEx = nve_osallocex; 428 osapi->pfnFreeMemoryEx = nve_osfreeex; 429 osapi->pfnClearMemory = nve_osclear; 430 osapi->pfnStallExecution = nve_osdelay; 431 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; 432 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; 433 osapi->pfnPacketWasSent = nve_ospackettx; 434 osapi->pfnPacketWasReceived = nve_ospacketrx; 435 osapi->pfnLinkStateHasChanged = nve_oslinkchg; 436 osapi->pfnAllocTimer = nve_osalloctimer; 437 osapi->pfnFreeTimer = nve_osfreetimer; 438 osapi->pfnInitializeTimer = nve_osinittimer; 439 osapi->pfnSetTimer = nve_ossettimer; 440 osapi->pfnCancelTimer = nve_oscanceltimer; 441 osapi->pfnPreprocessPacket = nve_ospreprocpkt; 442 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; 443 osapi->pfnIndicatePackets = nve_osindicatepkt; 444 osapi->pfnLockAlloc = nve_oslockalloc; 445 osapi->pfnLockAcquire = nve_oslockacquire; 446 osapi->pfnLockRelease = nve_oslockrelease; 447 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; 448 449 sc->linkup = FALSE; 450 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; 451 452 /* TODO - We don't support hardware offload yet */ 453 sc->hwmode = 1; 454 sc->media = 0; 455 456 /* Set NVIDIA API startup parameters */ 457 OpenParams.MaxDpcLoop = 2; 458 OpenParams.MaxRxPkt = RX_RING_SIZE; 459 OpenParams.MaxTxPkt = TX_RING_SIZE; 460 OpenParams.SentPacketStatusSuccess = 1; 461 OpenParams.SentPacketStatusFailure = 0; 462 OpenParams.MaxRxPktToAccumulate = 6; 463 OpenParams.ulPollInterval = nve_pollinterval; 464 OpenParams.SetForcedModeEveryNthRxPacket = 0; 465 OpenParams.SetForcedModeEveryNthTxPacket = 0; 466 OpenParams.RxForcedInterrupt = 0; 467 OpenParams.TxForcedInterrupt = 0; 468 OpenParams.pOSApi = osapi; 469 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); 470 OpenParams.bASFEnabled = 0; 471 OpenParams.ulDescriptorVersion = sc->hwmode; 472 OpenParams.ulMaxPacketSize = sc->max_frame_size; 473 OpenParams.DeviceId = pci_get_device(dev); 474 475 /* Open NVIDIA Hardware API */ 476 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); 477 if (error) { 478 device_printf(dev, 479 "failed to open NVIDIA Hardware API: 0x%x\n", error); 480 goto fail; 481 } 482 483 /* TODO - Add support for MODE2 hardware offload */ 484 485 bzero(&sc->adapterdata, sizeof(sc->adapterdata)); 486 487 sc->adapterdata.ulMediaIF = sc->media; 488 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; 489 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); 490 491 /* MAC is loaded backwards into h/w reg */ 492 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); 493 for (i = 0; i < 6; i++) { 494 eaddr[i] = sc->original_mac_addr[5 - i]; 495 } 496 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); 497 498 /* Display ethernet address ,... */ 499 device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); 500 501 /* Allocate interface structures */ 502 ifp = sc->ifp = if_alloc(IFT_ETHER); 503 if (ifp == NULL) { 504 device_printf(dev, "can not if_alloc()\n"); 505 error = ENOSPC; 506 goto fail; 507 } 508 509 /* Probe device for MII interface to PHY */ 510 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n"); 511 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) { 512 device_printf(dev, "MII without any phy!\n"); 513 error = ENXIO; 514 goto fail; 515 } 516 517 /* Setup interface parameters */ 518 ifp->if_softc = sc; 519 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 520 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 521 ifp->if_ioctl = nve_ioctl; 522 ifp->if_output = ether_output; 523 ifp->if_start = nve_ifstart; 524 ifp->if_watchdog = nve_watchdog; 525 ifp->if_timer = 0; 526 ifp->if_init = nve_init; 527 ifp->if_mtu = ETHERMTU; 528 ifp->if_baudrate = IF_Mbps(100); 529 IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1); 530 ifp->if_snd.ifq_drv_maxlen = TX_RING_SIZE - 1; 531 IFQ_SET_READY(&ifp->if_snd); 532 ifp->if_capabilities |= IFCAP_VLAN_MTU; 533 ifp->if_capenable |= IFCAP_VLAN_MTU; 534 535 /* Attach to OS's managers. */ 536 ether_ifattach(ifp, eaddr); 537 538 /* Activate our interrupt handler. - attach last to avoid lock */ 539 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 540 NULL, nve_intr, sc, &sc->sc_ih); 541 if (error) { 542 device_printf(sc->dev, "couldn't set up interrupt handler\n"); 543 goto fail; 544 } 545 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); 546 547fail: 548 if (error) 549 nve_detach(dev); 550 551 return (error); 552} 553 554/* Detach interface for module unload */ 555static int 556nve_detach(device_t dev) 557{ 558 struct nve_softc *sc = device_get_softc(dev); 559 struct ifnet *ifp; 560 561 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); 562 563 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); 564 565 ifp = sc->ifp; 566 567 if (device_is_attached(dev)) { 568 NVE_LOCK(sc); 569 nve_stop(sc); 570 NVE_UNLOCK(sc); 571 callout_drain(&sc->stat_callout); 572 ether_ifdetach(ifp); 573 } 574 575 if (sc->miibus) 576 device_delete_child(dev, sc->miibus); 577 bus_generic_detach(dev); 578 579 /* Reload unreversed address back into MAC in original state */ 580 if (sc->original_mac_addr) 581 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, 582 sc->original_mac_addr); 583 584 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); 585 /* Detach from NVIDIA hardware API */ 586 if (sc->hwapi->pfnClose) 587 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); 588 /* Release resources */ 589 if (sc->sc_ih) 590 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); 591 if (sc->irq) 592 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 593 if (sc->res) 594 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); 595 596 nve_free_rings(sc); 597 598 if (sc->tx_desc) { 599 bus_dmamap_unload(sc->rtag, sc->rmap); 600 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); 601 bus_dmamap_destroy(sc->rtag, sc->rmap); 602 } 603 if (sc->mtag) 604 bus_dma_tag_destroy(sc->mtag); 605 if (sc->ttag) 606 bus_dma_tag_destroy(sc->ttag); 607 if (sc->rtag) 608 bus_dma_tag_destroy(sc->rtag); 609 610 if (ifp) 611 if_free(ifp); 612 mtx_destroy(&sc->mtx); 613 614 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); 615 616 return (0); 617} 618 619/* Initialise interface and start it "RUNNING" */ 620static void 621nve_init(void *xsc) 622{ 623 struct nve_softc *sc = xsc; 624 625 NVE_LOCK(sc); 626 nve_init_locked(sc); 627 NVE_UNLOCK(sc); 628} 629 630static void 631nve_init_locked(struct nve_softc *sc) 632{ 633 struct ifnet *ifp; 634 int error; 635 636 NVE_LOCK_ASSERT(sc); 637 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); 638 639 ifp = sc->ifp; 640 641 /* Do nothing if already running */ 642 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 643 return; 644 645 nve_stop(sc); 646 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); 647 648 nve_ifmedia_upd_locked(ifp); 649 650 /* Setup Hardware interface and allocate memory structures */ 651 error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 652 0, /* force speed */ 653 0, /* force full duplex */ 654 0, /* force mode */ 655 0, /* force async mode */ 656 &sc->linkup); 657 658 if (error) { 659 device_printf(sc->dev, 660 "failed to start NVIDIA Hardware interface\n"); 661 return; 662 } 663 /* Set the MAC address */ 664 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp)); 665 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 666 sc->hwapi->pfnStart(sc->hwapi->pADCX); 667 668 /* Setup multicast filter */ 669 nve_setmulti(sc); 670 671 /* Update interface parameters */ 672 ifp->if_drv_flags |= IFF_DRV_RUNNING; 673 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 674 675 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 676 677 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); 678 679 return; 680} 681 682/* Stop interface activity ie. not "RUNNING" */ 683static void 684nve_stop(struct nve_softc *sc) 685{ 686 struct ifnet *ifp; 687 688 NVE_LOCK_ASSERT(sc); 689 690 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); 691 692 ifp = sc->ifp; 693 ifp->if_timer = 0; 694 695 /* Cancel tick timer */ 696 callout_stop(&sc->stat_callout); 697 698 /* Stop hardware activity */ 699 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 700 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); 701 702 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); 703 /* Shutdown interface and deallocate memory buffers */ 704 if (sc->hwapi->pfnDeinit) 705 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); 706 707 sc->linkup = 0; 708 sc->cur_rx = 0; 709 sc->pending_rxs = 0; 710 sc->pending_txs = 0; 711 712 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 713 714 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); 715 716 return; 717} 718 719/* Shutdown interface for unload/reboot */ 720static int 721nve_shutdown(device_t dev) 722{ 723 struct nve_softc *sc; 724 725 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); 726 727 sc = device_get_softc(dev); 728 729 /* Stop hardware activity */ 730 NVE_LOCK(sc); 731 nve_stop(sc); 732 NVE_UNLOCK(sc); 733 734 return (0); 735} 736 737/* Allocate TX ring buffers */ 738static int 739nve_init_rings(struct nve_softc *sc) 740{ 741 int error, i; 742 743 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); 744 745 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; 746 /* Initialise RX ring */ 747 for (i = 0; i < RX_RING_SIZE; i++) { 748 struct nve_rx_desc *desc = sc->rx_desc + i; 749 struct nve_map_buffer *buf = &desc->buf; 750 751 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 752 if (buf->mbuf == NULL) { 753 device_printf(sc->dev, "couldn't allocate mbuf\n"); 754 nve_free_rings(sc); 755 return (ENOBUFS); 756 } 757 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 758 m_adj(buf->mbuf, ETHER_ALIGN); 759 760 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 761 if (error) { 762 device_printf(sc->dev, "couldn't create dma map\n"); 763 nve_free_rings(sc); 764 return (error); 765 } 766 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 767 nve_dmamap_rx_cb, &desc->paddr, 0); 768 if (error) { 769 device_printf(sc->dev, "couldn't dma map mbuf\n"); 770 nve_free_rings(sc); 771 return (error); 772 } 773 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 774 775 desc->buflength = buf->mbuf->m_len; 776 desc->vaddr = mtod(buf->mbuf, caddr_t); 777 } 778 bus_dmamap_sync(sc->rtag, sc->rmap, 779 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 780 781 /* Initialize TX ring */ 782 for (i = 0; i < TX_RING_SIZE; i++) { 783 struct nve_tx_desc *desc = sc->tx_desc + i; 784 struct nve_map_buffer *buf = &desc->buf; 785 786 buf->mbuf = NULL; 787 788 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 789 if (error) { 790 device_printf(sc->dev, "couldn't create dma map\n"); 791 nve_free_rings(sc); 792 return (error); 793 } 794 } 795 bus_dmamap_sync(sc->ttag, sc->tmap, 796 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 797 798 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); 799 800 return (error); 801} 802 803/* Free the TX ring buffers */ 804static void 805nve_free_rings(struct nve_softc *sc) 806{ 807 int i; 808 809 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); 810 811 for (i = 0; i < RX_RING_SIZE; i++) { 812 struct nve_rx_desc *desc = sc->rx_desc + i; 813 struct nve_map_buffer *buf = &desc->buf; 814 815 if (buf->mbuf) { 816 bus_dmamap_unload(sc->mtag, buf->map); 817 bus_dmamap_destroy(sc->mtag, buf->map); 818 m_freem(buf->mbuf); 819 } 820 buf->mbuf = NULL; 821 } 822 823 for (i = 0; i < TX_RING_SIZE; i++) { 824 struct nve_tx_desc *desc = sc->tx_desc + i; 825 struct nve_map_buffer *buf = &desc->buf; 826 827 if (buf->mbuf) { 828 bus_dmamap_unload(sc->mtag, buf->map); 829 bus_dmamap_destroy(sc->mtag, buf->map); 830 m_freem(buf->mbuf); 831 } 832 buf->mbuf = NULL; 833 } 834 835 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); 836} 837 838/* Main loop for sending packets from OS to interface */ 839static void 840nve_ifstart(struct ifnet *ifp) 841{ 842 struct nve_softc *sc = ifp->if_softc; 843 844 NVE_LOCK(sc); 845 nve_ifstart_locked(ifp); 846 NVE_UNLOCK(sc); 847} 848 849static void 850nve_ifstart_locked(struct ifnet *ifp) 851{ 852 struct nve_softc *sc = ifp->if_softc; 853 struct nve_map_buffer *buf; 854 struct mbuf *m0, *m; 855 struct nve_tx_desc *desc; 856 ADAPTER_WRITE_DATA txdata; 857 int error, i; 858 859 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); 860 861 NVE_LOCK_ASSERT(sc); 862 863 /* If link is down/busy or queue is empty do nothing */ 864 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || 865 IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 866 return; 867 868 /* Transmit queued packets until sent or TX ring is full */ 869 while (sc->pending_txs < TX_RING_SIZE) { 870 desc = sc->tx_desc + sc->cur_tx; 871 buf = &desc->buf; 872 873 /* Get next packet to send. */ 874 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 875 876 /* If nothing to send, return. */ 877 if (m0 == NULL) 878 return; 879 880 /* 881 * On nForce4, the chip doesn't interrupt on transmit, 882 * so try to flush transmitted packets from the queue 883 * if it's getting large (see note in nve_watchdog). 884 */ 885 if (sc->pending_txs > TX_RING_SIZE/2) { 886 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 887 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 888 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 889 } 890 891 /* Map MBUF for DMA access */ 892 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, 893 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 894 895 if (error && error != EFBIG) { 896 m_freem(m0); 897 sc->tx_errors++; 898 continue; 899 } 900 /* 901 * Packet has too many fragments - defrag into new mbuf 902 * cluster 903 */ 904 if (error) { 905 m = m_defrag(m0, M_DONTWAIT); 906 if (m == NULL) { 907 m_freem(m0); 908 sc->tx_errors++; 909 continue; 910 } 911 m0 = m; 912 913 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, 914 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 915 if (error) { 916 m_freem(m); 917 sc->tx_errors++; 918 continue; 919 } 920 } 921 /* Do sync on DMA bounce buffer */ 922 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); 923 924 buf->mbuf = m0; 925 txdata.ulNumberOfElements = desc->numfrags; 926 txdata.pvID = (PVOID)desc; 927 928 /* Put fragments into API element list */ 929 txdata.ulTotalLength = buf->mbuf->m_len; 930 for (i = 0; i < desc->numfrags; i++) { 931 txdata.sElement[i].ulLength = 932 (ulong)desc->frags[i].ds_len; 933 txdata.sElement[i].pPhysical = 934 (PVOID)desc->frags[i].ds_addr; 935 } 936 937 /* Send packet to Nvidia API for transmission */ 938 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); 939 940 switch (error) { 941 case ADAPTERERR_NONE: 942 /* Packet was queued in API TX queue successfully */ 943 sc->pending_txs++; 944 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; 945 break; 946 947 case ADAPTERERR_TRANSMIT_QUEUE_FULL: 948 /* The API TX queue is full - requeue the packet */ 949 device_printf(sc->dev, 950 "nve_ifstart: transmit queue is full\n"); 951 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 952 bus_dmamap_unload(sc->mtag, buf->map); 953 IFQ_DRV_PREPEND(&ifp->if_snd, buf->mbuf); 954 buf->mbuf = NULL; 955 return; 956 957 default: 958 /* The API failed to queue/send the packet so dump it */ 959 device_printf(sc->dev, "nve_ifstart: transmit error\n"); 960 bus_dmamap_unload(sc->mtag, buf->map); 961 m_freem(buf->mbuf); 962 buf->mbuf = NULL; 963 sc->tx_errors++; 964 return; 965 } 966 /* Set watchdog timer. */ 967 ifp->if_timer = 8; 968 969 /* Copy packet to BPF tap */ 970 BPF_MTAP(ifp, m0); 971 } 972 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 973 974 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); 975} 976 977/* Handle IOCTL events */ 978static int 979nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 980{ 981 struct nve_softc *sc = ifp->if_softc; 982 struct ifreq *ifr = (struct ifreq *) data; 983 struct mii_data *mii; 984 int error = 0; 985 986 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); 987 988 switch (command) { 989 case SIOCSIFMTU: 990 /* Set MTU size */ 991 NVE_LOCK(sc); 992 if (ifp->if_mtu == ifr->ifr_mtu) { 993 NVE_UNLOCK(sc); 994 break; 995 } 996 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { 997 ifp->if_mtu = ifr->ifr_mtu; 998 nve_stop(sc); 999 nve_init_locked(sc); 1000 } else 1001 error = EINVAL; 1002 NVE_UNLOCK(sc); 1003 break; 1004 1005 case SIOCSIFFLAGS: 1006 /* Setup interface flags */ 1007 NVE_LOCK(sc); 1008 if (ifp->if_flags & IFF_UP) { 1009 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1010 nve_init_locked(sc); 1011 NVE_UNLOCK(sc); 1012 break; 1013 } 1014 } else { 1015 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1016 nve_stop(sc); 1017 NVE_UNLOCK(sc); 1018 break; 1019 } 1020 } 1021 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ 1022 nve_setmulti(sc); 1023 NVE_UNLOCK(sc); 1024 break; 1025 1026 case SIOCADDMULTI: 1027 case SIOCDELMULTI: 1028 /* Setup multicast filter */ 1029 NVE_LOCK(sc); 1030 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1031 nve_setmulti(sc); 1032 } 1033 NVE_UNLOCK(sc); 1034 break; 1035 1036 case SIOCGIFMEDIA: 1037 case SIOCSIFMEDIA: 1038 /* Get/Set interface media parameters */ 1039 mii = device_get_softc(sc->miibus); 1040 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1041 break; 1042 1043 default: 1044 /* Everything else we forward to generic ether ioctl */ 1045 error = ether_ioctl(ifp, command, data); 1046 break; 1047 } 1048 1049 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); 1050 1051 return (error); 1052} 1053 1054/* Interrupt service routine */ 1055static void 1056nve_intr(void *arg) 1057{ 1058 struct nve_softc *sc = arg; 1059 struct ifnet *ifp = sc->ifp; 1060 1061 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); 1062 1063 NVE_LOCK(sc); 1064 if (!ifp->if_flags & IFF_UP) { 1065 nve_stop(sc); 1066 NVE_UNLOCK(sc); 1067 return; 1068 } 1069 /* Handle interrupt event */ 1070 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { 1071 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1072 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1073 } 1074 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1075 nve_ifstart_locked(ifp); 1076 1077 /* If no pending packets we don't need a timeout */ 1078 if (sc->pending_txs == 0) 1079 sc->ifp->if_timer = 0; 1080 NVE_UNLOCK(sc); 1081 1082 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); 1083 1084 return; 1085} 1086 1087/* Setup multicast filters */ 1088static void 1089nve_setmulti(struct nve_softc *sc) 1090{ 1091 struct ifnet *ifp; 1092 struct ifmultiaddr *ifma; 1093 PACKET_FILTER hwfilter; 1094 int i; 1095 u_int8_t andaddr[6], oraddr[6]; 1096 1097 NVE_LOCK_ASSERT(sc); 1098 1099 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); 1100 1101 ifp = sc->ifp; 1102 1103 /* Initialize filter */ 1104 hwfilter.ulFilterFlags = 0; 1105 for (i = 0; i < 6; i++) { 1106 hwfilter.acMulticastAddress[i] = 0; 1107 hwfilter.acMulticastMask[i] = 0; 1108 } 1109 1110 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1111 /* Accept all packets */ 1112 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; 1113 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1114 return; 1115 } 1116 /* Setup multicast filter */ 1117 IF_ADDR_LOCK(ifp); 1118 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1119 u_char *addrp; 1120 1121 if (ifma->ifma_addr->sa_family != AF_LINK) 1122 continue; 1123 1124 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1125 for (i = 0; i < 6; i++) { 1126 u_int8_t mcaddr = addrp[i]; 1127 andaddr[i] &= mcaddr; 1128 oraddr[i] |= mcaddr; 1129 } 1130 } 1131 IF_ADDR_UNLOCK(ifp); 1132 for (i = 0; i < 6; i++) { 1133 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; 1134 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); 1135 } 1136 1137 /* Send filter to NVIDIA API */ 1138 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1139 1140 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); 1141 1142 return; 1143} 1144 1145/* Change the current media/mediaopts */ 1146static int 1147nve_ifmedia_upd(struct ifnet *ifp) 1148{ 1149 struct nve_softc *sc = ifp->if_softc; 1150 1151 NVE_LOCK(sc); 1152 nve_ifmedia_upd_locked(ifp); 1153 NVE_UNLOCK(sc); 1154 return (0); 1155} 1156 1157static void 1158nve_ifmedia_upd_locked(struct ifnet *ifp) 1159{ 1160 struct nve_softc *sc = ifp->if_softc; 1161 struct mii_data *mii; 1162 1163 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); 1164 1165 NVE_LOCK_ASSERT(sc); 1166 mii = device_get_softc(sc->miibus); 1167 1168 if (mii->mii_instance) { 1169 struct mii_softc *miisc; 1170 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1171 miisc = LIST_NEXT(miisc, mii_list)) { 1172 mii_phy_reset(miisc); 1173 } 1174 } 1175 mii_mediachg(mii); 1176} 1177 1178/* Update current miibus PHY status of media */ 1179static void 1180nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1181{ 1182 struct nve_softc *sc; 1183 struct mii_data *mii; 1184 1185 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); 1186 1187 sc = ifp->if_softc; 1188 NVE_LOCK(sc); 1189 mii = device_get_softc(sc->miibus); 1190 mii_pollstat(mii); 1191 NVE_UNLOCK(sc); 1192 1193 ifmr->ifm_active = mii->mii_media_active; 1194 ifmr->ifm_status = mii->mii_media_status; 1195 1196 return; 1197} 1198 1199/* miibus tick timer - maintain link status */ 1200static void 1201nve_tick(void *xsc) 1202{ 1203 struct nve_softc *sc = xsc; 1204 struct mii_data *mii; 1205 struct ifnet *ifp; 1206 1207 NVE_LOCK_ASSERT(sc); 1208 1209 ifp = sc->ifp; 1210 nve_update_stats(sc); 1211 1212 mii = device_get_softc(sc->miibus); 1213 mii_tick(mii); 1214 1215 if (mii->mii_media_status & IFM_ACTIVE && 1216 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1217 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1218 nve_ifstart_locked(ifp); 1219 } 1220 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 1221 1222 return; 1223} 1224 1225/* Update ifnet data structure with collected interface stats from API */ 1226static void 1227nve_update_stats(struct nve_softc *sc) 1228{ 1229 struct ifnet *ifp = sc->ifp; 1230 ADAPTER_STATS stats; 1231 1232 NVE_LOCK_ASSERT(sc); 1233 1234 if (sc->hwapi) { 1235 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); 1236 1237 ifp->if_ipackets = stats.ulSuccessfulReceptions; 1238 ifp->if_ierrors = stats.ulMissedFrames + 1239 stats.ulFailedReceptions + 1240 stats.ulCRCErrors + 1241 stats.ulFramingErrors + 1242 stats.ulOverFlowErrors; 1243 1244 ifp->if_opackets = stats.ulSuccessfulTransmissions; 1245 ifp->if_oerrors = sc->tx_errors + 1246 stats.ulFailedTransmissions + 1247 stats.ulRetryErrors + 1248 stats.ulUnderflowErrors + 1249 stats.ulLossOfCarrierErrors + 1250 stats.ulLateCollisionErrors; 1251 1252 ifp->if_collisions = stats.ulLateCollisionErrors; 1253 } 1254 1255 return; 1256} 1257 1258/* miibus Read PHY register wrapper - calls Nvidia API entry point */ 1259static int 1260nve_miibus_readreg(device_t dev, int phy, int reg) 1261{ 1262 struct nve_softc *sc = device_get_softc(dev); 1263 ULONG data; 1264 1265 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); 1266 1267 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); 1268 1269 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); 1270 1271 return (data); 1272} 1273 1274/* miibus Write PHY register wrapper - calls Nvidia API entry point */ 1275static void 1276nve_miibus_writereg(device_t dev, int phy, int reg, int data) 1277{ 1278 struct nve_softc *sc = device_get_softc(dev); 1279 1280 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); 1281 1282 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); 1283 1284 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); 1285 1286 return; 1287} 1288 1289/* Watchdog timer to prevent PHY lockups */ 1290static void 1291nve_watchdog(struct ifnet *ifp) 1292{ 1293 struct nve_softc *sc = ifp->if_softc; 1294 int pending_txs_start; 1295 1296 NVE_LOCK(sc); 1297 1298 /* 1299 * The nvidia driver blob defers tx completion notifications. 1300 * Thus, sometimes the watchdog timer will go off when the 1301 * tx engine is fine, but the tx completions are just deferred. 1302 * Try kicking the driver blob to clear out any pending tx 1303 * completions. If that clears up any of the pending tx 1304 * operations, then just return without printing the warning 1305 * message or resetting the adapter, as we can then conclude 1306 * the chip hasn't actually crashed (it's still sending packets). 1307 */ 1308 pending_txs_start = sc->pending_txs; 1309 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 1310 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1311 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1312 if (sc->pending_txs < pending_txs_start) { 1313 NVE_UNLOCK(sc); 1314 return; 1315 } 1316 1317 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); 1318 1319 sc->tx_errors++; 1320 1321 nve_stop(sc); 1322 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1323 nve_init_locked(sc); 1324 1325 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1326 nve_ifstart_locked(ifp); 1327 NVE_UNLOCK(sc); 1328 1329 return; 1330} 1331 1332/* --- Start of NVOSAPI interface --- */ 1333 1334/* Allocate DMA enabled general use memory for API */ 1335static NV_SINT32 1336nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) 1337{ 1338 struct nve_softc *sc; 1339 bus_addr_t mem_physical; 1340 1341 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); 1342 1343 sc = (struct nve_softc *)ctx; 1344 1345 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, 1346 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1347 1348 if (!mem->pLogical) { 1349 device_printf(sc->dev, "memory allocation failed\n"); 1350 return (0); 1351 } 1352 memset(mem->pLogical, 0, (ulong)mem->uiLength); 1353 mem_physical = vtophys(mem->pLogical); 1354 mem->pPhysical = (PVOID)mem_physical; 1355 1356 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", 1357 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); 1358 1359 return (1); 1360} 1361 1362/* Free allocated memory */ 1363static NV_SINT32 1364nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) 1365{ 1366 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", 1367 (uint)mem->pLogical, (uint) mem->uiLength); 1368 1369 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); 1370 return (1); 1371} 1372 1373/* Copied directly from nvnet.c */ 1374static NV_SINT32 1375nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1376{ 1377 MEMORY_BLOCK mem_block; 1378 1379 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); 1380 1381 mem_block_ex->pLogical = NULL; 1382 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; 1383 1384 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && 1385 (mem_block_ex->AlignmentSize > 1)) { 1386 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", 1387 mem_block_ex->AlignmentSize); 1388 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; 1389 } 1390 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1391 1392 if (nve_osalloc(ctx, &mem_block) == 0) { 1393 return (0); 1394 } 1395 mem_block_ex->pLogicalOrig = mem_block.pLogical; 1396 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; 1397 mem_block_ex->pPhysicalOrigHigh = 0; 1398 1399 mem_block_ex->pPhysical = mem_block.pPhysical; 1400 mem_block_ex->pLogical = mem_block.pLogical; 1401 1402 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { 1403 unsigned int offset; 1404 offset = mem_block_ex->pPhysicalOrigLow & 1405 (mem_block_ex->AlignmentSize - 1); 1406 1407 if (offset) { 1408 mem_block_ex->pPhysical = 1409 (PVOID)((ulong)mem_block_ex->pPhysical + 1410 mem_block_ex->AlignmentSize - offset); 1411 mem_block_ex->pLogical = 1412 (PVOID)((ulong)mem_block_ex->pLogical + 1413 mem_block_ex->AlignmentSize - offset); 1414 } /* if (offset) */ 1415 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ 1416 return (1); 1417} 1418 1419/* Copied directly from nvnet.c */ 1420static NV_SINT32 1421nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1422{ 1423 MEMORY_BLOCK mem_block; 1424 1425 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); 1426 1427 mem_block.pLogical = mem_block_ex->pLogicalOrig; 1428 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); 1429 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1430 1431 return (nve_osfree(ctx, &mem_block)); 1432} 1433 1434/* Clear memory region */ 1435static NV_SINT32 1436nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) 1437{ 1438 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); 1439 memset(mem, 0, length); 1440 return (1); 1441} 1442 1443/* Sleep for a tick */ 1444static NV_SINT32 1445nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) 1446{ 1447 DELAY(usec); 1448 return (1); 1449} 1450 1451/* Allocate memory for rx buffer */ 1452static NV_SINT32 1453nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) 1454{ 1455 struct nve_softc *sc = ctx; 1456 struct nve_rx_desc *desc; 1457 struct nve_map_buffer *buf; 1458 int error; 1459 1460 if (device_is_attached(sc->dev)) 1461 NVE_LOCK_ASSERT(sc); 1462 1463 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); 1464 1465 if (sc->pending_rxs == RX_RING_SIZE) { 1466 device_printf(sc->dev, "rx ring buffer is full\n"); 1467 goto fail; 1468 } 1469 desc = sc->rx_desc + sc->cur_rx; 1470 buf = &desc->buf; 1471 1472 if (buf->mbuf == NULL) { 1473 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1474 if (buf->mbuf == NULL) { 1475 device_printf(sc->dev, "failed to allocate memory\n"); 1476 goto fail; 1477 } 1478 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 1479 m_adj(buf->mbuf, ETHER_ALIGN); 1480 1481 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 1482 nve_dmamap_rx_cb, &desc->paddr, 0); 1483 if (error) { 1484 device_printf(sc->dev, "failed to dmamap mbuf\n"); 1485 m_freem(buf->mbuf); 1486 buf->mbuf = NULL; 1487 goto fail; 1488 } 1489 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 1490 desc->buflength = buf->mbuf->m_len; 1491 desc->vaddr = mtod(buf->mbuf, caddr_t); 1492 } 1493 sc->pending_rxs++; 1494 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; 1495 1496 mem->pLogical = (void *)desc->vaddr; 1497 mem->pPhysical = (void *)desc->paddr; 1498 mem->uiLength = desc->buflength; 1499 *id = (void *)desc; 1500 1501 return (1); 1502 1503fail: 1504 return (0); 1505} 1506 1507/* Free the rx buffer */ 1508static NV_SINT32 1509nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) 1510{ 1511 struct nve_softc *sc = ctx; 1512 struct nve_rx_desc *desc; 1513 struct nve_map_buffer *buf; 1514 1515 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); 1516 1517 desc = (struct nve_rx_desc *) id; 1518 buf = &desc->buf; 1519 1520 if (buf->mbuf) { 1521 bus_dmamap_unload(sc->mtag, buf->map); 1522 bus_dmamap_destroy(sc->mtag, buf->map); 1523 m_freem(buf->mbuf); 1524 } 1525 sc->pending_rxs--; 1526 buf->mbuf = NULL; 1527 1528 return (1); 1529} 1530 1531/* This gets called by the Nvidia API after our TX packet has been sent */ 1532static NV_SINT32 1533nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) 1534{ 1535 struct nve_softc *sc = ctx; 1536 struct nve_map_buffer *buf; 1537 struct nve_tx_desc *desc = (struct nve_tx_desc *) id; 1538 struct ifnet *ifp; 1539 1540 NVE_LOCK_ASSERT(sc); 1541 1542 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); 1543 1544 ifp = sc->ifp; 1545 buf = &desc->buf; 1546 sc->pending_txs--; 1547 1548 /* Unload and free mbuf cluster */ 1549 if (buf->mbuf == NULL) 1550 goto fail; 1551 1552 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); 1553 bus_dmamap_unload(sc->mtag, buf->map); 1554 m_freem(buf->mbuf); 1555 buf->mbuf = NULL; 1556 1557 /* Send more packets if we have them */ 1558 if (sc->pending_txs < TX_RING_SIZE) 1559 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1560 1561 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE) 1562 nve_ifstart_locked(ifp); 1563 1564fail: 1565 1566 return (1); 1567} 1568 1569/* This gets called by the Nvidia API when a new packet has been received */ 1570/* XXX What is newbuf used for? XXX */ 1571static NV_SINT32 1572nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, 1573 NV_UINT8 priority) 1574{ 1575 struct nve_softc *sc = ctx; 1576 struct ifnet *ifp; 1577 struct nve_rx_desc *desc; 1578 struct nve_map_buffer *buf; 1579 ADAPTER_READ_DATA *readdata; 1580 struct mbuf *m; 1581 1582 NVE_LOCK_ASSERT(sc); 1583 1584 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); 1585 1586 ifp = sc->ifp; 1587 1588 readdata = (ADAPTER_READ_DATA *) data; 1589 desc = readdata->pvID; 1590 buf = &desc->buf; 1591 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1592 1593 if (success) { 1594 /* Sync DMA bounce buffer. */ 1595 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1596 1597 /* First mbuf in packet holds the ethernet and packet headers */ 1598 buf->mbuf->m_pkthdr.rcvif = ifp; 1599 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = 1600 readdata->ulTotalLength; 1601 1602 bus_dmamap_unload(sc->mtag, buf->map); 1603 1604 /* Blat the mbuf pointer, kernel will free the mbuf cluster */ 1605 m = buf->mbuf; 1606 buf->mbuf = NULL; 1607 1608 /* Give mbuf to OS. */ 1609 NVE_UNLOCK(sc); 1610 (*ifp->if_input)(ifp, m); 1611 NVE_LOCK(sc); 1612 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) 1613 ifp->if_imcasts++; 1614 1615 } else { 1616 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1617 bus_dmamap_unload(sc->mtag, buf->map); 1618 m_freem(buf->mbuf); 1619 buf->mbuf = NULL; 1620 } 1621 1622 sc->cur_rx = desc - sc->rx_desc; 1623 sc->pending_rxs--; 1624 1625 return (1); 1626} 1627 1628/* This gets called by NVIDIA API when the PHY link state changes */ 1629static NV_SINT32 1630nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) 1631{ 1632 1633 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); 1634 1635 return (1); 1636} 1637 1638/* Setup a watchdog timer */ 1639static NV_SINT32 1640nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) 1641{ 1642 struct nve_softc *sc = (struct nve_softc *)ctx; 1643 1644 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); 1645 1646 callout_init(&sc->ostimer, CALLOUT_MPSAFE); 1647 *timer = &sc->ostimer; 1648 1649 return (1); 1650} 1651 1652/* Free the timer */ 1653static NV_SINT32 1654nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) 1655{ 1656 1657 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); 1658 1659 callout_drain((struct callout *)timer); 1660 1661 return (1); 1662} 1663 1664/* Setup timer parameters */ 1665static NV_SINT32 1666nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) 1667{ 1668 struct nve_softc *sc = (struct nve_softc *)ctx; 1669 1670 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); 1671 1672 sc->ostimer_func = func; 1673 sc->ostimer_params = parameters; 1674 1675 return (1); 1676} 1677 1678/* Set the timer to go off */ 1679static NV_SINT32 1680nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) 1681{ 1682 struct nve_softc *sc = ctx; 1683 1684 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); 1685 1686 callout_reset((struct callout *)timer, delay, sc->ostimer_func, 1687 sc->ostimer_params); 1688 1689 return (1); 1690} 1691 1692/* Cancel the timer */ 1693static NV_SINT32 1694nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) 1695{ 1696 1697 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); 1698 1699 callout_stop((struct callout *)timer); 1700 1701 return (1); 1702} 1703 1704static NV_SINT32 1705nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, 1706 NV_UINT8 *newbuffer, NV_UINT8 priority) 1707{ 1708 1709 /* Not implemented */ 1710 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1711 1712 return (1); 1713} 1714 1715static PNV_VOID 1716nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) 1717{ 1718 1719 /* Not implemented */ 1720 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1721 1722 return (NULL); 1723} 1724 1725static NV_SINT32 1726nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) 1727{ 1728 1729 /* Not implemented */ 1730 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); 1731 1732 return (1); 1733} 1734 1735/* Allocate mutex context (already done in nve_attach) */ 1736static NV_SINT32 1737nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) 1738{ 1739 struct nve_softc *sc = (struct nve_softc *)ctx; 1740 1741 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); 1742 1743 *pLock = (void **)sc; 1744 1745 return (1); 1746} 1747 1748/* Obtain a spin lock */ 1749static NV_SINT32 1750nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1751{ 1752 1753 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); 1754 1755 return (1); 1756} 1757 1758/* Release lock */ 1759static NV_SINT32 1760nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1761{ 1762 1763 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); 1764 1765 return (1); 1766} 1767 1768/* I have no idea what this is for */ 1769static PNV_VOID 1770nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) 1771{ 1772 1773 /* Not implemented */ 1774 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); 1775 panic("nve: nve_osreturnbufvirtual not implemented\n"); 1776 1777 return (NULL); 1778} 1779 1780/* --- End on NVOSAPI interface --- */ 1781