if_nve.c revision 229767
1/*- 2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>. 3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ 28 */ 29/* 30 * NVIDIA nForce MCP Networking Adapter driver 31 * 32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA 33 * through their web site. 34 * 35 * All mainstream nForce and nForce2 motherboards are supported. This module 36 * is as stable, sometimes more stable, than the linux version. (Recent 37 * Linux stability issues seem to be related to some issues with newer 38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD 39 * 5.x). 40 * 41 * In accordance with the NVIDIA distribution license it is necessary to 42 * link this module against the nvlibnet.o binary object included in the 43 * Linux driver source distribution. The binary component is not modified in 44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c 45 * linux kernel module "wrapper". 46 * 47 * The Linux driver uses a common code API that is shared between Win32 and 48 * i386 Linux. This abstracts the low level driver functions and uses 49 * callbacks and hooks to access the underlying hardware device. By using 50 * this same API in a FreeBSD kernel module it is possible to support the 51 * hardware without breaching the Linux source distributions licensing 52 * requirements, or obtaining the hardware programming specifications. 53 * 54 * Although not conventional, it works, and given the relatively small 55 * amount of hardware centric code, it's hopefully no more buggy than its 56 * linux counterpart. 57 * 58 * NVIDIA now support the nForce3 AMD64 platform, however I have been 59 * unable to access such a system to verify support. However, the code is 60 * reported to work with little modification when compiled with the AMD64 61 * version of the NVIDIA Linux library. All that should be necessary to make 62 * the driver work is to link it directly into the kernel, instead of as a 63 * module, and apply the docs/amd64.diff patch in this source distribution to 64 * the NVIDIA Linux driver source. 65 * 66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well 67 * as recent versions of DragonFly. 68 * 69 * Written by Quinton Dolan <q@onthenet.com.au> 70 * Portions based on existing FreeBSD network drivers. 71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. 72 */ 73 74#include <sys/cdefs.h> 75__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 229767 2012-01-07 09:41:57Z kevlo $"); 76 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/sockio.h> 80#include <sys/mbuf.h> 81#include <sys/malloc.h> 82#include <sys/kernel.h> 83#include <sys/socket.h> 84#include <sys/sysctl.h> 85#include <sys/queue.h> 86#include <sys/module.h> 87 88#include <net/if.h> 89#include <net/if_arp.h> 90#include <net/ethernet.h> 91#include <net/if_dl.h> 92#include <net/if_media.h> 93#include <net/if_types.h> 94#include <net/bpf.h> 95#include <net/if_vlan_var.h> 96 97#include <machine/bus.h> 98#include <machine/resource.h> 99 100#include <vm/vm.h> /* for vtophys */ 101#include <vm/pmap.h> /* for vtophys */ 102#include <sys/bus.h> 103#include <sys/rman.h> 104 105#include <dev/pci/pcireg.h> 106#include <dev/pci/pcivar.h> 107#include <dev/mii/mii.h> 108#include <dev/mii/miivar.h> 109#include "miibus_if.h" 110 111/* Include NVIDIA Linux driver header files */ 112#include <contrib/dev/nve/nvenet_version.h> 113#define linux 114#include <contrib/dev/nve/basetype.h> 115#include <contrib/dev/nve/phy.h> 116#include "os+%DIKED-nve.h" 117#include <contrib/dev/nve/drvinfo.h> 118#include <contrib/dev/nve/adapter.h> 119#undef linux 120 121#include <dev/nve/if_nvereg.h> 122 123MODULE_DEPEND(nve, pci, 1, 1, 1); 124MODULE_DEPEND(nve, ether, 1, 1, 1); 125MODULE_DEPEND(nve, miibus, 1, 1, 1); 126 127static int nve_probe(device_t); 128static int nve_attach(device_t); 129static int nve_detach(device_t); 130static void nve_init(void *); 131static void nve_init_locked(struct nve_softc *); 132static void nve_stop(struct nve_softc *); 133static int nve_shutdown(device_t); 134static int nve_init_rings(struct nve_softc *); 135static void nve_free_rings(struct nve_softc *); 136 137static void nve_ifstart(struct ifnet *); 138static void nve_ifstart_locked(struct ifnet *); 139static int nve_ioctl(struct ifnet *, u_long, caddr_t); 140static void nve_intr(void *); 141static void nve_tick(void *); 142static void nve_setmulti(struct nve_softc *); 143static void nve_watchdog(struct nve_softc *); 144static void nve_update_stats(struct nve_softc *); 145 146static int nve_ifmedia_upd(struct ifnet *); 147static void nve_ifmedia_upd_locked(struct ifnet *); 148static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); 149static int nve_miibus_readreg(device_t, int, int); 150static int nve_miibus_writereg(device_t, int, int, int); 151 152static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); 153static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); 154 155static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); 156static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); 157static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); 158static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); 159static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); 160static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); 161static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); 162static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); 163static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); 164static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); 165static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); 166static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); 167static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); 168static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); 169static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); 170static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); 171 172static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); 173static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); 174static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); 175static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); 176static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); 177static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); 178static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); 179 180static device_method_t nve_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, nve_probe), 183 DEVMETHOD(device_attach, nve_attach), 184 DEVMETHOD(device_detach, nve_detach), 185 DEVMETHOD(device_shutdown, nve_shutdown), 186 187 /* MII interface */ 188 DEVMETHOD(miibus_readreg, nve_miibus_readreg), 189 DEVMETHOD(miibus_writereg, nve_miibus_writereg), 190 191 DEVMETHOD_END 192}; 193 194static driver_t nve_driver = { 195 "nve", 196 nve_methods, 197 sizeof(struct nve_softc) 198}; 199 200static devclass_t nve_devclass; 201 202static int nve_pollinterval = 0; 203SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, 204 &nve_pollinterval, 0, "delay between interface polls"); 205 206DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); 207DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); 208 209static struct nve_type nve_devs[] = { 210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 211 "NVIDIA nForce MCP Networking Adapter"}, 212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 213 "NVIDIA nForce2 MCP2 Networking Adapter"}, 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 215 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 217 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 219 "NVIDIA nForce3 MCP3 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 221 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 223 "NVIDIA nForce3 MCP7 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 225 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 227 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 229 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 231 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 233 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 235 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 237 "NVIDIA nForce MCP55 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 239 "NVIDIA nForce MCP55 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 241 "NVIDIA nForce MCP61 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 243 "NVIDIA nForce MCP61 Networking Adapter"}, 244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 245 "NVIDIA nForce MCP61 Networking Adapter"}, 246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 247 "NVIDIA nForce MCP61 Networking Adapter"}, 248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 249 "NVIDIA nForce MCP65 Networking Adapter"}, 250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 251 "NVIDIA nForce MCP65 Networking Adapter"}, 252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 253 "NVIDIA nForce MCP65 Networking Adapter"}, 254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 255 "NVIDIA nForce MCP65 Networking Adapter"}, 256 {0, 0, NULL} 257}; 258 259/* DMA MEM map callback function to get data segment physical address */ 260static void 261nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) 262{ 263 if (error) 264 return; 265 266 KASSERT(nsegs == 1, 267 ("Too many DMA segments returned when mapping DMA memory")); 268 *(bus_addr_t *)arg = segs->ds_addr; 269} 270 271/* DMA RX map callback function to get data segment physical address */ 272static void 273nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, 274 bus_size_t mapsize, int error) 275{ 276 if (error) 277 return; 278 *(bus_addr_t *)arg = segs->ds_addr; 279} 280 281/* 282 * DMA TX buffer callback function to allocate fragment data segment 283 * addresses 284 */ 285static void 286nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) 287{ 288 struct nve_tx_desc *info; 289 290 info = arg; 291 if (error) 292 return; 293 KASSERT(nsegs < NV_MAX_FRAGS, 294 ("Too many DMA segments returned when mapping mbuf")); 295 info->numfrags = nsegs; 296 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); 297} 298 299/* Probe for supported hardware ID's */ 300static int 301nve_probe(device_t dev) 302{ 303 struct nve_type *t; 304 305 t = nve_devs; 306 /* Check for matching PCI DEVICE ID's */ 307 while (t->name != NULL) { 308 if ((pci_get_vendor(dev) == t->vid_id) && 309 (pci_get_device(dev) == t->dev_id)) { 310 device_set_desc(dev, t->name); 311 return (BUS_PROBE_LOW_PRIORITY); 312 } 313 t++; 314 } 315 316 return (ENXIO); 317} 318 319/* Attach driver and initialise hardware for use */ 320static int 321nve_attach(device_t dev) 322{ 323 u_char eaddr[ETHER_ADDR_LEN]; 324 struct nve_softc *sc; 325 struct ifnet *ifp; 326 OS_API *osapi; 327 ADAPTER_OPEN_PARAMS OpenParams; 328 int error = 0, i, rid; 329 330 if (bootverbose) 331 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION); 332 333 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); 334 335 sc = device_get_softc(dev); 336 337 /* Allocate mutex */ 338 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 339 MTX_DEF); 340 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0); 341 342 sc->dev = dev; 343 344 /* Preinitialize data structures */ 345 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); 346 347 /* Enable bus mastering */ 348 pci_enable_busmaster(dev); 349 350 /* Allocate memory mapped address space */ 351 rid = NV_RID; 352 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, 353 RF_ACTIVE); 354 355 if (sc->res == NULL) { 356 device_printf(dev, "couldn't map memory\n"); 357 error = ENXIO; 358 goto fail; 359 } 360 sc->sc_st = rman_get_bustag(sc->res); 361 sc->sc_sh = rman_get_bushandle(sc->res); 362 363 /* Allocate interrupt */ 364 rid = 0; 365 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 366 RF_SHAREABLE | RF_ACTIVE); 367 368 if (sc->irq == NULL) { 369 device_printf(dev, "couldn't map interrupt\n"); 370 error = ENXIO; 371 goto fail; 372 } 373 /* Allocate DMA tags */ 374 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 375 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, 376 NV_MAX_FRAGS, MCLBYTES, 0, 377 busdma_lock_mutex, &Giant, 378 &sc->mtag); 379 if (error) { 380 device_printf(dev, "couldn't allocate dma tag\n"); 381 goto fail; 382 } 383 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 384 BUS_SPACE_MAXADDR, NULL, NULL, 385 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, 386 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, 387 busdma_lock_mutex, &Giant, 388 &sc->rtag); 389 if (error) { 390 device_printf(dev, "couldn't allocate dma tag\n"); 391 goto fail; 392 } 393 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 394 BUS_SPACE_MAXADDR, NULL, NULL, 395 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, 396 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, 397 busdma_lock_mutex, &Giant, 398 &sc->ttag); 399 if (error) { 400 device_printf(dev, "couldn't allocate dma tag\n"); 401 goto fail; 402 } 403 /* Allocate DMA safe memory and get the DMA addresses. */ 404 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, 405 BUS_DMA_WAITOK, &sc->tmap); 406 if (error) { 407 device_printf(dev, "couldn't allocate dma memory\n"); 408 goto fail; 409 } 410 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); 411 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, 412 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, 413 &sc->tx_addr, 0); 414 if (error) { 415 device_printf(dev, "couldn't map dma memory\n"); 416 goto fail; 417 } 418 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, 419 BUS_DMA_WAITOK, &sc->rmap); 420 if (error) { 421 device_printf(dev, "couldn't allocate dma memory\n"); 422 goto fail; 423 } 424 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); 425 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, 426 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, 427 &sc->rx_addr, 0); 428 if (error) { 429 device_printf(dev, "couldn't map dma memory\n"); 430 goto fail; 431 } 432 /* Initialize rings. */ 433 if (nve_init_rings(sc)) { 434 device_printf(dev, "failed to init rings\n"); 435 error = ENXIO; 436 goto fail; 437 } 438 /* Setup NVIDIA API callback routines */ 439 osapi = &sc->osapi; 440 osapi->pOSCX = sc; 441 osapi->pfnAllocMemory = nve_osalloc; 442 osapi->pfnFreeMemory = nve_osfree; 443 osapi->pfnAllocMemoryEx = nve_osallocex; 444 osapi->pfnFreeMemoryEx = nve_osfreeex; 445 osapi->pfnClearMemory = nve_osclear; 446 osapi->pfnStallExecution = nve_osdelay; 447 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; 448 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; 449 osapi->pfnPacketWasSent = nve_ospackettx; 450 osapi->pfnPacketWasReceived = nve_ospacketrx; 451 osapi->pfnLinkStateHasChanged = nve_oslinkchg; 452 osapi->pfnAllocTimer = nve_osalloctimer; 453 osapi->pfnFreeTimer = nve_osfreetimer; 454 osapi->pfnInitializeTimer = nve_osinittimer; 455 osapi->pfnSetTimer = nve_ossettimer; 456 osapi->pfnCancelTimer = nve_oscanceltimer; 457 osapi->pfnPreprocessPacket = nve_ospreprocpkt; 458 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; 459 osapi->pfnIndicatePackets = nve_osindicatepkt; 460 osapi->pfnLockAlloc = nve_oslockalloc; 461 osapi->pfnLockAcquire = nve_oslockacquire; 462 osapi->pfnLockRelease = nve_oslockrelease; 463 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; 464 465 sc->linkup = FALSE; 466 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; 467 468 /* TODO - We don't support hardware offload yet */ 469 sc->hwmode = 1; 470 sc->media = 0; 471 472 /* Set NVIDIA API startup parameters */ 473 OpenParams.MaxDpcLoop = 2; 474 OpenParams.MaxRxPkt = RX_RING_SIZE; 475 OpenParams.MaxTxPkt = TX_RING_SIZE; 476 OpenParams.SentPacketStatusSuccess = 1; 477 OpenParams.SentPacketStatusFailure = 0; 478 OpenParams.MaxRxPktToAccumulate = 6; 479 OpenParams.ulPollInterval = nve_pollinterval; 480 OpenParams.SetForcedModeEveryNthRxPacket = 0; 481 OpenParams.SetForcedModeEveryNthTxPacket = 0; 482 OpenParams.RxForcedInterrupt = 0; 483 OpenParams.TxForcedInterrupt = 0; 484 OpenParams.pOSApi = osapi; 485 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); 486 OpenParams.bASFEnabled = 0; 487 OpenParams.ulDescriptorVersion = sc->hwmode; 488 OpenParams.ulMaxPacketSize = sc->max_frame_size; 489 OpenParams.DeviceId = pci_get_device(dev); 490 491 /* Open NVIDIA Hardware API */ 492 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); 493 if (error) { 494 device_printf(dev, 495 "failed to open NVIDIA Hardware API: 0x%x\n", error); 496 goto fail; 497 } 498 499 /* TODO - Add support for MODE2 hardware offload */ 500 501 bzero(&sc->adapterdata, sizeof(sc->adapterdata)); 502 503 sc->adapterdata.ulMediaIF = sc->media; 504 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; 505 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); 506 507 /* MAC is loaded backwards into h/w reg */ 508 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); 509 for (i = 0; i < 6; i++) { 510 eaddr[i] = sc->original_mac_addr[5 - i]; 511 } 512 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); 513 514 /* Display ethernet address ,... */ 515 device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); 516 517 /* Allocate interface structures */ 518 ifp = sc->ifp = if_alloc(IFT_ETHER); 519 if (ifp == NULL) { 520 device_printf(dev, "can not if_alloc()\n"); 521 error = ENOSPC; 522 goto fail; 523 } 524 525 /* Setup interface parameters */ 526 ifp->if_softc = sc; 527 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 528 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 529 ifp->if_ioctl = nve_ioctl; 530 ifp->if_start = nve_ifstart; 531 ifp->if_init = nve_init; 532 ifp->if_baudrate = IF_Mbps(100); 533 IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1); 534 ifp->if_snd.ifq_drv_maxlen = TX_RING_SIZE - 1; 535 IFQ_SET_READY(&ifp->if_snd); 536 ifp->if_capabilities |= IFCAP_VLAN_MTU; 537 ifp->if_capenable |= IFCAP_VLAN_MTU; 538 539 /* Attach device for MII interface to PHY */ 540 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_attach\n"); 541 error = mii_attach(dev, &sc->miibus, ifp, nve_ifmedia_upd, 542 nve_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 543 if (error != 0) { 544 device_printf(dev, "attaching PHYs failed\n"); 545 goto fail; 546 } 547 548 /* Attach to OS's managers. */ 549 ether_ifattach(ifp, eaddr); 550 551 /* Activate our interrupt handler. - attach last to avoid lock */ 552 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 553 NULL, nve_intr, sc, &sc->sc_ih); 554 if (error) { 555 device_printf(dev, "couldn't set up interrupt handler\n"); 556 goto fail; 557 } 558 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); 559 560fail: 561 if (error) 562 nve_detach(dev); 563 564 return (error); 565} 566 567/* Detach interface for module unload */ 568static int 569nve_detach(device_t dev) 570{ 571 struct nve_softc *sc = device_get_softc(dev); 572 struct ifnet *ifp; 573 574 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); 575 576 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); 577 578 ifp = sc->ifp; 579 580 if (device_is_attached(dev)) { 581 ether_ifdetach(ifp); 582 NVE_LOCK(sc); 583 nve_stop(sc); 584 NVE_UNLOCK(sc); 585 callout_drain(&sc->stat_callout); 586 } 587 588 if (sc->miibus) 589 device_delete_child(dev, sc->miibus); 590 bus_generic_detach(dev); 591 592 /* Reload unreversed address back into MAC in original state */ 593 if (sc->original_mac_addr) 594 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, 595 sc->original_mac_addr); 596 597 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); 598 /* Detach from NVIDIA hardware API */ 599 if (sc->hwapi->pfnClose) 600 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); 601 /* Release resources */ 602 if (sc->sc_ih) 603 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); 604 if (sc->irq) 605 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 606 if (sc->res) 607 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); 608 609 nve_free_rings(sc); 610 611 if (sc->tx_desc) { 612 bus_dmamap_unload(sc->rtag, sc->rmap); 613 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); 614 bus_dmamap_destroy(sc->rtag, sc->rmap); 615 } 616 if (sc->mtag) 617 bus_dma_tag_destroy(sc->mtag); 618 if (sc->ttag) 619 bus_dma_tag_destroy(sc->ttag); 620 if (sc->rtag) 621 bus_dma_tag_destroy(sc->rtag); 622 623 if (ifp) 624 if_free(ifp); 625 mtx_destroy(&sc->mtx); 626 627 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); 628 629 return (0); 630} 631 632/* Initialise interface and start it "RUNNING" */ 633static void 634nve_init(void *xsc) 635{ 636 struct nve_softc *sc = xsc; 637 638 NVE_LOCK(sc); 639 nve_init_locked(sc); 640 NVE_UNLOCK(sc); 641} 642 643static void 644nve_init_locked(struct nve_softc *sc) 645{ 646 struct ifnet *ifp; 647 int error; 648 649 NVE_LOCK_ASSERT(sc); 650 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); 651 652 ifp = sc->ifp; 653 654 /* Do nothing if already running */ 655 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 656 return; 657 658 nve_stop(sc); 659 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); 660 661 nve_ifmedia_upd_locked(ifp); 662 663 /* Setup Hardware interface and allocate memory structures */ 664 error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 665 0, /* force speed */ 666 0, /* force full duplex */ 667 0, /* force mode */ 668 0, /* force async mode */ 669 &sc->linkup); 670 671 if (error) { 672 device_printf(sc->dev, 673 "failed to start NVIDIA Hardware interface\n"); 674 return; 675 } 676 /* Set the MAC address */ 677 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp)); 678 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 679 sc->hwapi->pfnStart(sc->hwapi->pADCX); 680 681 /* Setup multicast filter */ 682 nve_setmulti(sc); 683 684 /* Update interface parameters */ 685 ifp->if_drv_flags |= IFF_DRV_RUNNING; 686 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 687 688 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 689 690 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); 691 692 return; 693} 694 695/* Stop interface activity ie. not "RUNNING" */ 696static void 697nve_stop(struct nve_softc *sc) 698{ 699 struct ifnet *ifp; 700 701 NVE_LOCK_ASSERT(sc); 702 703 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); 704 705 ifp = sc->ifp; 706 sc->tx_timer = 0; 707 708 /* Cancel tick timer */ 709 callout_stop(&sc->stat_callout); 710 711 /* Stop hardware activity */ 712 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 713 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); 714 715 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); 716 /* Shutdown interface and deallocate memory buffers */ 717 if (sc->hwapi->pfnDeinit) 718 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); 719 720 sc->linkup = 0; 721 sc->cur_rx = 0; 722 sc->pending_rxs = 0; 723 sc->pending_txs = 0; 724 725 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 726 727 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); 728 729 return; 730} 731 732/* Shutdown interface for unload/reboot */ 733static int 734nve_shutdown(device_t dev) 735{ 736 struct nve_softc *sc; 737 738 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); 739 740 sc = device_get_softc(dev); 741 742 /* Stop hardware activity */ 743 NVE_LOCK(sc); 744 nve_stop(sc); 745 NVE_UNLOCK(sc); 746 747 return (0); 748} 749 750/* Allocate TX ring buffers */ 751static int 752nve_init_rings(struct nve_softc *sc) 753{ 754 int error, i; 755 756 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); 757 758 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; 759 /* Initialise RX ring */ 760 for (i = 0; i < RX_RING_SIZE; i++) { 761 struct nve_rx_desc *desc = sc->rx_desc + i; 762 struct nve_map_buffer *buf = &desc->buf; 763 764 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 765 if (buf->mbuf == NULL) { 766 device_printf(sc->dev, "couldn't allocate mbuf\n"); 767 nve_free_rings(sc); 768 return (ENOBUFS); 769 } 770 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 771 m_adj(buf->mbuf, ETHER_ALIGN); 772 773 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 774 if (error) { 775 device_printf(sc->dev, "couldn't create dma map\n"); 776 nve_free_rings(sc); 777 return (error); 778 } 779 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 780 nve_dmamap_rx_cb, &desc->paddr, 0); 781 if (error) { 782 device_printf(sc->dev, "couldn't dma map mbuf\n"); 783 nve_free_rings(sc); 784 return (error); 785 } 786 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 787 788 desc->buflength = buf->mbuf->m_len; 789 desc->vaddr = mtod(buf->mbuf, caddr_t); 790 } 791 bus_dmamap_sync(sc->rtag, sc->rmap, 792 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 793 794 /* Initialize TX ring */ 795 for (i = 0; i < TX_RING_SIZE; i++) { 796 struct nve_tx_desc *desc = sc->tx_desc + i; 797 struct nve_map_buffer *buf = &desc->buf; 798 799 buf->mbuf = NULL; 800 801 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 802 if (error) { 803 device_printf(sc->dev, "couldn't create dma map\n"); 804 nve_free_rings(sc); 805 return (error); 806 } 807 } 808 bus_dmamap_sync(sc->ttag, sc->tmap, 809 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 810 811 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); 812 813 return (error); 814} 815 816/* Free the TX ring buffers */ 817static void 818nve_free_rings(struct nve_softc *sc) 819{ 820 int i; 821 822 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); 823 824 for (i = 0; i < RX_RING_SIZE; i++) { 825 struct nve_rx_desc *desc = sc->rx_desc + i; 826 struct nve_map_buffer *buf = &desc->buf; 827 828 if (buf->mbuf) { 829 bus_dmamap_unload(sc->mtag, buf->map); 830 bus_dmamap_destroy(sc->mtag, buf->map); 831 m_freem(buf->mbuf); 832 } 833 buf->mbuf = NULL; 834 } 835 836 for (i = 0; i < TX_RING_SIZE; i++) { 837 struct nve_tx_desc *desc = sc->tx_desc + i; 838 struct nve_map_buffer *buf = &desc->buf; 839 840 if (buf->mbuf) { 841 bus_dmamap_unload(sc->mtag, buf->map); 842 bus_dmamap_destroy(sc->mtag, buf->map); 843 m_freem(buf->mbuf); 844 } 845 buf->mbuf = NULL; 846 } 847 848 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); 849} 850 851/* Main loop for sending packets from OS to interface */ 852static void 853nve_ifstart(struct ifnet *ifp) 854{ 855 struct nve_softc *sc = ifp->if_softc; 856 857 NVE_LOCK(sc); 858 nve_ifstart_locked(ifp); 859 NVE_UNLOCK(sc); 860} 861 862static void 863nve_ifstart_locked(struct ifnet *ifp) 864{ 865 struct nve_softc *sc = ifp->if_softc; 866 struct nve_map_buffer *buf; 867 struct mbuf *m0, *m; 868 struct nve_tx_desc *desc; 869 ADAPTER_WRITE_DATA txdata; 870 int error, i; 871 872 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); 873 874 NVE_LOCK_ASSERT(sc); 875 876 /* If link is down/busy or queue is empty do nothing */ 877 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || 878 IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 879 return; 880 881 /* Transmit queued packets until sent or TX ring is full */ 882 while (sc->pending_txs < TX_RING_SIZE) { 883 desc = sc->tx_desc + sc->cur_tx; 884 buf = &desc->buf; 885 886 /* Get next packet to send. */ 887 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 888 889 /* If nothing to send, return. */ 890 if (m0 == NULL) 891 return; 892 893 /* 894 * On nForce4, the chip doesn't interrupt on transmit, 895 * so try to flush transmitted packets from the queue 896 * if it's getting large (see note in nve_watchdog). 897 */ 898 if (sc->pending_txs > TX_RING_SIZE/2) { 899 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 900 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 901 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 902 } 903 904 /* Map MBUF for DMA access */ 905 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, 906 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 907 908 if (error && error != EFBIG) { 909 m_freem(m0); 910 sc->tx_errors++; 911 continue; 912 } 913 /* 914 * Packet has too many fragments - defrag into new mbuf 915 * cluster 916 */ 917 if (error) { 918 m = m_defrag(m0, M_DONTWAIT); 919 if (m == NULL) { 920 m_freem(m0); 921 sc->tx_errors++; 922 continue; 923 } 924 m0 = m; 925 926 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, 927 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 928 if (error) { 929 m_freem(m); 930 sc->tx_errors++; 931 continue; 932 } 933 } 934 /* Do sync on DMA bounce buffer */ 935 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); 936 937 buf->mbuf = m0; 938 txdata.ulNumberOfElements = desc->numfrags; 939 txdata.pvID = (PVOID)desc; 940 941 /* Put fragments into API element list */ 942 txdata.ulTotalLength = buf->mbuf->m_len; 943 for (i = 0; i < desc->numfrags; i++) { 944 txdata.sElement[i].ulLength = 945 (ulong)desc->frags[i].ds_len; 946 txdata.sElement[i].pPhysical = 947 (PVOID)desc->frags[i].ds_addr; 948 } 949 950 /* Send packet to Nvidia API for transmission */ 951 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); 952 953 switch (error) { 954 case ADAPTERERR_NONE: 955 /* Packet was queued in API TX queue successfully */ 956 sc->pending_txs++; 957 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; 958 break; 959 960 case ADAPTERERR_TRANSMIT_QUEUE_FULL: 961 /* The API TX queue is full - requeue the packet */ 962 device_printf(sc->dev, 963 "nve_ifstart: transmit queue is full\n"); 964 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 965 bus_dmamap_unload(sc->mtag, buf->map); 966 IFQ_DRV_PREPEND(&ifp->if_snd, buf->mbuf); 967 buf->mbuf = NULL; 968 return; 969 970 default: 971 /* The API failed to queue/send the packet so dump it */ 972 device_printf(sc->dev, "nve_ifstart: transmit error\n"); 973 bus_dmamap_unload(sc->mtag, buf->map); 974 m_freem(buf->mbuf); 975 buf->mbuf = NULL; 976 sc->tx_errors++; 977 return; 978 } 979 /* Set watchdog timer. */ 980 sc->tx_timer = 8; 981 982 /* Copy packet to BPF tap */ 983 BPF_MTAP(ifp, m0); 984 } 985 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 986 987 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); 988} 989 990/* Handle IOCTL events */ 991static int 992nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 993{ 994 struct nve_softc *sc = ifp->if_softc; 995 struct ifreq *ifr = (struct ifreq *) data; 996 struct mii_data *mii; 997 int error = 0; 998 999 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); 1000 1001 switch (command) { 1002 case SIOCSIFMTU: 1003 /* Set MTU size */ 1004 NVE_LOCK(sc); 1005 if (ifp->if_mtu == ifr->ifr_mtu) { 1006 NVE_UNLOCK(sc); 1007 break; 1008 } 1009 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { 1010 ifp->if_mtu = ifr->ifr_mtu; 1011 nve_stop(sc); 1012 nve_init_locked(sc); 1013 } else 1014 error = EINVAL; 1015 NVE_UNLOCK(sc); 1016 break; 1017 1018 case SIOCSIFFLAGS: 1019 /* Setup interface flags */ 1020 NVE_LOCK(sc); 1021 if (ifp->if_flags & IFF_UP) { 1022 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1023 nve_init_locked(sc); 1024 NVE_UNLOCK(sc); 1025 break; 1026 } 1027 } else { 1028 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1029 nve_stop(sc); 1030 NVE_UNLOCK(sc); 1031 break; 1032 } 1033 } 1034 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ 1035 nve_setmulti(sc); 1036 NVE_UNLOCK(sc); 1037 break; 1038 1039 case SIOCADDMULTI: 1040 case SIOCDELMULTI: 1041 /* Setup multicast filter */ 1042 NVE_LOCK(sc); 1043 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1044 nve_setmulti(sc); 1045 } 1046 NVE_UNLOCK(sc); 1047 break; 1048 1049 case SIOCGIFMEDIA: 1050 case SIOCSIFMEDIA: 1051 /* Get/Set interface media parameters */ 1052 mii = device_get_softc(sc->miibus); 1053 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1054 break; 1055 1056 default: 1057 /* Everything else we forward to generic ether ioctl */ 1058 error = ether_ioctl(ifp, command, data); 1059 break; 1060 } 1061 1062 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); 1063 1064 return (error); 1065} 1066 1067/* Interrupt service routine */ 1068static void 1069nve_intr(void *arg) 1070{ 1071 struct nve_softc *sc = arg; 1072 struct ifnet *ifp = sc->ifp; 1073 1074 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); 1075 1076 NVE_LOCK(sc); 1077 if (!ifp->if_flags & IFF_UP) { 1078 nve_stop(sc); 1079 NVE_UNLOCK(sc); 1080 return; 1081 } 1082 /* Handle interrupt event */ 1083 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { 1084 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1085 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1086 } 1087 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1088 nve_ifstart_locked(ifp); 1089 1090 /* If no pending packets we don't need a timeout */ 1091 if (sc->pending_txs == 0) 1092 sc->tx_timer = 0; 1093 NVE_UNLOCK(sc); 1094 1095 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); 1096 1097 return; 1098} 1099 1100/* Setup multicast filters */ 1101static void 1102nve_setmulti(struct nve_softc *sc) 1103{ 1104 struct ifnet *ifp; 1105 struct ifmultiaddr *ifma; 1106 PACKET_FILTER hwfilter; 1107 int i; 1108 u_int8_t andaddr[6], oraddr[6]; 1109 1110 NVE_LOCK_ASSERT(sc); 1111 1112 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); 1113 1114 ifp = sc->ifp; 1115 1116 /* Initialize filter */ 1117 hwfilter.ulFilterFlags = 0; 1118 for (i = 0; i < 6; i++) { 1119 hwfilter.acMulticastAddress[i] = 0; 1120 hwfilter.acMulticastMask[i] = 0; 1121 } 1122 1123 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1124 /* Accept all packets */ 1125 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; 1126 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1127 return; 1128 } 1129 /* Setup multicast filter */ 1130 if_maddr_rlock(ifp); 1131 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1132 u_char *addrp; 1133 1134 if (ifma->ifma_addr->sa_family != AF_LINK) 1135 continue; 1136 1137 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1138 for (i = 0; i < 6; i++) { 1139 u_int8_t mcaddr = addrp[i]; 1140 andaddr[i] &= mcaddr; 1141 oraddr[i] |= mcaddr; 1142 } 1143 } 1144 if_maddr_runlock(ifp); 1145 for (i = 0; i < 6; i++) { 1146 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; 1147 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); 1148 } 1149 1150 /* Send filter to NVIDIA API */ 1151 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1152 1153 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); 1154 1155 return; 1156} 1157 1158/* Change the current media/mediaopts */ 1159static int 1160nve_ifmedia_upd(struct ifnet *ifp) 1161{ 1162 struct nve_softc *sc = ifp->if_softc; 1163 1164 NVE_LOCK(sc); 1165 nve_ifmedia_upd_locked(ifp); 1166 NVE_UNLOCK(sc); 1167 return (0); 1168} 1169 1170static void 1171nve_ifmedia_upd_locked(struct ifnet *ifp) 1172{ 1173 struct nve_softc *sc = ifp->if_softc; 1174 struct mii_data *mii; 1175 struct mii_softc *miisc; 1176 1177 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); 1178 1179 NVE_LOCK_ASSERT(sc); 1180 mii = device_get_softc(sc->miibus); 1181 1182 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1183 PHY_RESET(miisc); 1184 mii_mediachg(mii); 1185} 1186 1187/* Update current miibus PHY status of media */ 1188static void 1189nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1190{ 1191 struct nve_softc *sc; 1192 struct mii_data *mii; 1193 1194 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); 1195 1196 sc = ifp->if_softc; 1197 NVE_LOCK(sc); 1198 mii = device_get_softc(sc->miibus); 1199 mii_pollstat(mii); 1200 1201 ifmr->ifm_active = mii->mii_media_active; 1202 ifmr->ifm_status = mii->mii_media_status; 1203 NVE_UNLOCK(sc); 1204 1205 return; 1206} 1207 1208/* miibus tick timer - maintain link status */ 1209static void 1210nve_tick(void *xsc) 1211{ 1212 struct nve_softc *sc = xsc; 1213 struct mii_data *mii; 1214 struct ifnet *ifp; 1215 1216 NVE_LOCK_ASSERT(sc); 1217 1218 ifp = sc->ifp; 1219 nve_update_stats(sc); 1220 1221 mii = device_get_softc(sc->miibus); 1222 mii_tick(mii); 1223 1224 if (mii->mii_media_status & IFM_ACTIVE && 1225 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1226 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1227 nve_ifstart_locked(ifp); 1228 } 1229 1230 if (sc->tx_timer > 0 && --sc->tx_timer == 0) 1231 nve_watchdog(sc); 1232 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 1233 1234 return; 1235} 1236 1237/* Update ifnet data structure with collected interface stats from API */ 1238static void 1239nve_update_stats(struct nve_softc *sc) 1240{ 1241 struct ifnet *ifp = sc->ifp; 1242 ADAPTER_STATS stats; 1243 1244 NVE_LOCK_ASSERT(sc); 1245 1246 if (sc->hwapi) { 1247 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); 1248 1249 ifp->if_ipackets = stats.ulSuccessfulReceptions; 1250 ifp->if_ierrors = stats.ulMissedFrames + 1251 stats.ulFailedReceptions + 1252 stats.ulCRCErrors + 1253 stats.ulFramingErrors + 1254 stats.ulOverFlowErrors; 1255 1256 ifp->if_opackets = stats.ulSuccessfulTransmissions; 1257 ifp->if_oerrors = sc->tx_errors + 1258 stats.ulFailedTransmissions + 1259 stats.ulRetryErrors + 1260 stats.ulUnderflowErrors + 1261 stats.ulLossOfCarrierErrors + 1262 stats.ulLateCollisionErrors; 1263 1264 ifp->if_collisions = stats.ulLateCollisionErrors; 1265 } 1266 1267 return; 1268} 1269 1270/* miibus Read PHY register wrapper - calls Nvidia API entry point */ 1271static int 1272nve_miibus_readreg(device_t dev, int phy, int reg) 1273{ 1274 struct nve_softc *sc = device_get_softc(dev); 1275 ULONG data; 1276 1277 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); 1278 1279 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); 1280 1281 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); 1282 1283 return (data); 1284} 1285 1286/* miibus Write PHY register wrapper - calls Nvidia API entry point */ 1287static int 1288nve_miibus_writereg(device_t dev, int phy, int reg, int data) 1289{ 1290 struct nve_softc *sc = device_get_softc(dev); 1291 1292 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); 1293 1294 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); 1295 1296 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); 1297 1298 return 0; 1299} 1300 1301/* Watchdog timer to prevent PHY lockups */ 1302static void 1303nve_watchdog(struct nve_softc *sc) 1304{ 1305 struct ifnet *ifp; 1306 int pending_txs_start; 1307 1308 NVE_LOCK_ASSERT(sc); 1309 ifp = sc->ifp; 1310 1311 /* 1312 * The nvidia driver blob defers tx completion notifications. 1313 * Thus, sometimes the watchdog timer will go off when the 1314 * tx engine is fine, but the tx completions are just deferred. 1315 * Try kicking the driver blob to clear out any pending tx 1316 * completions. If that clears up any of the pending tx 1317 * operations, then just return without printing the warning 1318 * message or resetting the adapter, as we can then conclude 1319 * the chip hasn't actually crashed (it's still sending packets). 1320 */ 1321 pending_txs_start = sc->pending_txs; 1322 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 1323 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1324 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1325 if (sc->pending_txs < pending_txs_start) 1326 return; 1327 1328 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); 1329 1330 sc->tx_errors++; 1331 1332 nve_stop(sc); 1333 nve_init_locked(sc); 1334 1335 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1336 nve_ifstart_locked(ifp); 1337} 1338 1339/* --- Start of NVOSAPI interface --- */ 1340 1341/* Allocate DMA enabled general use memory for API */ 1342static NV_SINT32 1343nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) 1344{ 1345 struct nve_softc *sc; 1346 bus_addr_t mem_physical; 1347 1348 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); 1349 1350 sc = (struct nve_softc *)ctx; 1351 1352 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, 1353 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1354 1355 if (!mem->pLogical) { 1356 device_printf(sc->dev, "memory allocation failed\n"); 1357 return (0); 1358 } 1359 memset(mem->pLogical, 0, (ulong)mem->uiLength); 1360 mem_physical = vtophys(mem->pLogical); 1361 mem->pPhysical = (PVOID)mem_physical; 1362 1363 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", 1364 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); 1365 1366 return (1); 1367} 1368 1369/* Free allocated memory */ 1370static NV_SINT32 1371nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) 1372{ 1373 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", 1374 (uint)mem->pLogical, (uint) mem->uiLength); 1375 1376 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); 1377 return (1); 1378} 1379 1380/* Copied directly from nvnet.c */ 1381static NV_SINT32 1382nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1383{ 1384 MEMORY_BLOCK mem_block; 1385 1386 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); 1387 1388 mem_block_ex->pLogical = NULL; 1389 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; 1390 1391 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && 1392 (mem_block_ex->AlignmentSize > 1)) { 1393 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", 1394 mem_block_ex->AlignmentSize); 1395 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; 1396 } 1397 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1398 1399 if (nve_osalloc(ctx, &mem_block) == 0) { 1400 return (0); 1401 } 1402 mem_block_ex->pLogicalOrig = mem_block.pLogical; 1403 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; 1404 mem_block_ex->pPhysicalOrigHigh = 0; 1405 1406 mem_block_ex->pPhysical = mem_block.pPhysical; 1407 mem_block_ex->pLogical = mem_block.pLogical; 1408 1409 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { 1410 unsigned int offset; 1411 offset = mem_block_ex->pPhysicalOrigLow & 1412 (mem_block_ex->AlignmentSize - 1); 1413 1414 if (offset) { 1415 mem_block_ex->pPhysical = 1416 (PVOID)((ulong)mem_block_ex->pPhysical + 1417 mem_block_ex->AlignmentSize - offset); 1418 mem_block_ex->pLogical = 1419 (PVOID)((ulong)mem_block_ex->pLogical + 1420 mem_block_ex->AlignmentSize - offset); 1421 } /* if (offset) */ 1422 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ 1423 return (1); 1424} 1425 1426/* Copied directly from nvnet.c */ 1427static NV_SINT32 1428nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1429{ 1430 MEMORY_BLOCK mem_block; 1431 1432 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); 1433 1434 mem_block.pLogical = mem_block_ex->pLogicalOrig; 1435 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); 1436 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1437 1438 return (nve_osfree(ctx, &mem_block)); 1439} 1440 1441/* Clear memory region */ 1442static NV_SINT32 1443nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) 1444{ 1445 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); 1446 memset(mem, 0, length); 1447 return (1); 1448} 1449 1450/* Sleep for a tick */ 1451static NV_SINT32 1452nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) 1453{ 1454 DELAY(usec); 1455 return (1); 1456} 1457 1458/* Allocate memory for rx buffer */ 1459static NV_SINT32 1460nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) 1461{ 1462 struct nve_softc *sc = ctx; 1463 struct nve_rx_desc *desc; 1464 struct nve_map_buffer *buf; 1465 int error; 1466 1467 if (device_is_attached(sc->dev)) 1468 NVE_LOCK_ASSERT(sc); 1469 1470 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); 1471 1472 if (sc->pending_rxs == RX_RING_SIZE) { 1473 device_printf(sc->dev, "rx ring buffer is full\n"); 1474 goto fail; 1475 } 1476 desc = sc->rx_desc + sc->cur_rx; 1477 buf = &desc->buf; 1478 1479 if (buf->mbuf == NULL) { 1480 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1481 if (buf->mbuf == NULL) { 1482 device_printf(sc->dev, "failed to allocate memory\n"); 1483 goto fail; 1484 } 1485 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 1486 m_adj(buf->mbuf, ETHER_ALIGN); 1487 1488 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 1489 nve_dmamap_rx_cb, &desc->paddr, 0); 1490 if (error) { 1491 device_printf(sc->dev, "failed to dmamap mbuf\n"); 1492 m_freem(buf->mbuf); 1493 buf->mbuf = NULL; 1494 goto fail; 1495 } 1496 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 1497 desc->buflength = buf->mbuf->m_len; 1498 desc->vaddr = mtod(buf->mbuf, caddr_t); 1499 } 1500 sc->pending_rxs++; 1501 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; 1502 1503 mem->pLogical = (void *)desc->vaddr; 1504 mem->pPhysical = (void *)desc->paddr; 1505 mem->uiLength = desc->buflength; 1506 *id = (void *)desc; 1507 1508 return (1); 1509 1510fail: 1511 return (0); 1512} 1513 1514/* Free the rx buffer */ 1515static NV_SINT32 1516nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) 1517{ 1518 struct nve_softc *sc = ctx; 1519 struct nve_rx_desc *desc; 1520 struct nve_map_buffer *buf; 1521 1522 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); 1523 1524 desc = (struct nve_rx_desc *) id; 1525 buf = &desc->buf; 1526 1527 if (buf->mbuf) { 1528 bus_dmamap_unload(sc->mtag, buf->map); 1529 bus_dmamap_destroy(sc->mtag, buf->map); 1530 m_freem(buf->mbuf); 1531 } 1532 sc->pending_rxs--; 1533 buf->mbuf = NULL; 1534 1535 return (1); 1536} 1537 1538/* This gets called by the Nvidia API after our TX packet has been sent */ 1539static NV_SINT32 1540nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) 1541{ 1542 struct nve_softc *sc = ctx; 1543 struct nve_map_buffer *buf; 1544 struct nve_tx_desc *desc = (struct nve_tx_desc *) id; 1545 struct ifnet *ifp; 1546 1547 NVE_LOCK_ASSERT(sc); 1548 1549 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); 1550 1551 ifp = sc->ifp; 1552 buf = &desc->buf; 1553 sc->pending_txs--; 1554 1555 /* Unload and free mbuf cluster */ 1556 if (buf->mbuf == NULL) 1557 goto fail; 1558 1559 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); 1560 bus_dmamap_unload(sc->mtag, buf->map); 1561 m_freem(buf->mbuf); 1562 buf->mbuf = NULL; 1563 1564 /* Send more packets if we have them */ 1565 if (sc->pending_txs < TX_RING_SIZE) 1566 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1567 1568 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE) 1569 nve_ifstart_locked(ifp); 1570 1571fail: 1572 1573 return (1); 1574} 1575 1576/* This gets called by the Nvidia API when a new packet has been received */ 1577/* XXX What is newbuf used for? XXX */ 1578static NV_SINT32 1579nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, 1580 NV_UINT8 priority) 1581{ 1582 struct nve_softc *sc = ctx; 1583 struct ifnet *ifp; 1584 struct nve_rx_desc *desc; 1585 struct nve_map_buffer *buf; 1586 ADAPTER_READ_DATA *readdata; 1587 struct mbuf *m; 1588 1589 NVE_LOCK_ASSERT(sc); 1590 1591 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); 1592 1593 ifp = sc->ifp; 1594 1595 readdata = (ADAPTER_READ_DATA *) data; 1596 desc = readdata->pvID; 1597 buf = &desc->buf; 1598 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1599 1600 if (success) { 1601 /* Sync DMA bounce buffer. */ 1602 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1603 1604 /* First mbuf in packet holds the ethernet and packet headers */ 1605 buf->mbuf->m_pkthdr.rcvif = ifp; 1606 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = 1607 readdata->ulTotalLength; 1608 1609 bus_dmamap_unload(sc->mtag, buf->map); 1610 1611 /* Blat the mbuf pointer, kernel will free the mbuf cluster */ 1612 m = buf->mbuf; 1613 buf->mbuf = NULL; 1614 1615 /* Give mbuf to OS. */ 1616 NVE_UNLOCK(sc); 1617 (*ifp->if_input)(ifp, m); 1618 NVE_LOCK(sc); 1619 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) 1620 ifp->if_imcasts++; 1621 1622 } else { 1623 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1624 bus_dmamap_unload(sc->mtag, buf->map); 1625 m_freem(buf->mbuf); 1626 buf->mbuf = NULL; 1627 } 1628 1629 sc->cur_rx = desc - sc->rx_desc; 1630 sc->pending_rxs--; 1631 1632 return (1); 1633} 1634 1635/* This gets called by NVIDIA API when the PHY link state changes */ 1636static NV_SINT32 1637nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) 1638{ 1639 1640 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); 1641 1642 return (1); 1643} 1644 1645/* Setup a watchdog timer */ 1646static NV_SINT32 1647nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) 1648{ 1649 struct nve_softc *sc = (struct nve_softc *)ctx; 1650 1651 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); 1652 1653 callout_init(&sc->ostimer, CALLOUT_MPSAFE); 1654 *timer = &sc->ostimer; 1655 1656 return (1); 1657} 1658 1659/* Free the timer */ 1660static NV_SINT32 1661nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) 1662{ 1663 1664 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); 1665 1666 callout_drain((struct callout *)timer); 1667 1668 return (1); 1669} 1670 1671/* Setup timer parameters */ 1672static NV_SINT32 1673nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) 1674{ 1675 struct nve_softc *sc = (struct nve_softc *)ctx; 1676 1677 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); 1678 1679 sc->ostimer_func = func; 1680 sc->ostimer_params = parameters; 1681 1682 return (1); 1683} 1684 1685/* Set the timer to go off */ 1686static NV_SINT32 1687nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) 1688{ 1689 struct nve_softc *sc = ctx; 1690 1691 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); 1692 1693 callout_reset((struct callout *)timer, delay, sc->ostimer_func, 1694 sc->ostimer_params); 1695 1696 return (1); 1697} 1698 1699/* Cancel the timer */ 1700static NV_SINT32 1701nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) 1702{ 1703 1704 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); 1705 1706 callout_stop((struct callout *)timer); 1707 1708 return (1); 1709} 1710 1711static NV_SINT32 1712nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, 1713 NV_UINT8 *newbuffer, NV_UINT8 priority) 1714{ 1715 1716 /* Not implemented */ 1717 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1718 1719 return (1); 1720} 1721 1722static PNV_VOID 1723nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) 1724{ 1725 1726 /* Not implemented */ 1727 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1728 1729 return (NULL); 1730} 1731 1732static NV_SINT32 1733nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) 1734{ 1735 1736 /* Not implemented */ 1737 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); 1738 1739 return (1); 1740} 1741 1742/* Allocate mutex context (already done in nve_attach) */ 1743static NV_SINT32 1744nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) 1745{ 1746 struct nve_softc *sc = (struct nve_softc *)ctx; 1747 1748 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); 1749 1750 *pLock = (void **)sc; 1751 1752 return (1); 1753} 1754 1755/* Obtain a spin lock */ 1756static NV_SINT32 1757nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1758{ 1759 1760 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); 1761 1762 return (1); 1763} 1764 1765/* Release lock */ 1766static NV_SINT32 1767nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1768{ 1769 1770 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); 1771 1772 return (1); 1773} 1774 1775/* I have no idea what this is for */ 1776static PNV_VOID 1777nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) 1778{ 1779 1780 /* Not implemented */ 1781 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); 1782 panic("nve: nve_osreturnbufvirtual not implemented\n"); 1783 1784 return (NULL); 1785} 1786 1787/* --- End on NVOSAPI interface --- */ 1788