if_nve.c revision 195049
1130812Smarcel/*- 2130812Smarcel * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>. 3130812Smarcel * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>. 4130812Smarcel * All rights reserved. 5130812Smarcel * 6130812Smarcel * Redistribution and use in source and binary forms, with or without 7130812Smarcel * modification, are permitted provided that the following conditions 8130812Smarcel * are met: 9130812Smarcel * 1. Redistributions of source code must retain the above copyright 10130812Smarcel * notice, this list of conditions and the following disclaimer. 11130812Smarcel * 2. Redistributions in binary form must reproduce the above copyright 12130812Smarcel * notice, this list of conditions and the following disclaimer in the 13130812Smarcel * documentation and/or other materials provided with the distribution. 14130812Smarcel * 15130812Smarcel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 16130812Smarcel * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17130812Smarcel * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18130812Smarcel * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19130812Smarcel * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20130812Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21130812Smarcel * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22130812Smarcel * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23130812Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24130812Smarcel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25130812Smarcel * SUCH DAMAGE. 26130812Smarcel * 27130812Smarcel * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ 28130812Smarcel */ 29130812Smarcel/* 30130812Smarcel * NVIDIA nForce MCP Networking Adapter driver 31130812Smarcel * 32130812Smarcel * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA 33130812Smarcel * through their web site. 34130812Smarcel * 35130812Smarcel * All mainstream nForce and nForce2 motherboards are supported. This module 36130812Smarcel * is as stable, sometimes more stable, than the linux version. (Recent 37130812Smarcel * Linux stability issues seem to be related to some issues with newer 38130812Smarcel * distributions using GCC 3.x, however this don't appear to effect FreeBSD 39130812Smarcel * 5.x). 40130812Smarcel * 41130812Smarcel * In accordance with the NVIDIA distribution license it is necessary to 42130812Smarcel * link this module against the nvlibnet.o binary object included in the 43130812Smarcel * Linux driver source distribution. The binary component is not modified in 44130812Smarcel * any way and is simply linked against a FreeBSD equivalent of the nvnet.c 45130812Smarcel * linux kernel module "wrapper". 46130812Smarcel * 47130812Smarcel * The Linux driver uses a common code API that is shared between Win32 and 48130812Smarcel * i386 Linux. This abstracts the low level driver functions and uses 49130812Smarcel * callbacks and hooks to access the underlying hardware device. By using 50130812Smarcel * this same API in a FreeBSD kernel module it is possible to support the 51130812Smarcel * hardware without breaching the Linux source distributions licensing 52130812Smarcel * requirements, or obtaining the hardware programming specifications. 53130812Smarcel * 54130812Smarcel * Although not conventional, it works, and given the relatively small 55130812Smarcel * amount of hardware centric code, it's hopefully no more buggy than its 56130812Smarcel * linux counterpart. 57130812Smarcel * 58130812Smarcel * NVIDIA now support the nForce3 AMD64 platform, however I have been 59130812Smarcel * unable to access such a system to verify support. However, the code is 60130812Smarcel * reported to work with little modification when compiled with the AMD64 61130812Smarcel * version of the NVIDIA Linux library. All that should be necessary to make 62130812Smarcel * the driver work is to link it directly into the kernel, instead of as a 63130812Smarcel * module, and apply the docs/amd64.diff patch in this source distribution to 64130812Smarcel * the NVIDIA Linux driver source. 65130812Smarcel * 66130812Smarcel * This driver should work on all versions of FreeBSD since 4.9/5.1 as well 67130812Smarcel * as recent versions of DragonFly. 68130812Smarcel * 69130812Smarcel * Written by Quinton Dolan <q@onthenet.com.au> 70130812Smarcel * Portions based on existing FreeBSD network drivers. 71130812Smarcel * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. 72130812Smarcel */ 73130812Smarcel 74130812Smarcel#include <sys/cdefs.h> 75130812Smarcel__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 195049 2009-06-26 11:45:06Z rwatson $"); 76130812Smarcel 77130812Smarcel#include <sys/param.h> 78130812Smarcel#include <sys/systm.h> 79130812Smarcel#include <sys/sockio.h> 80130812Smarcel#include <sys/mbuf.h> 81130812Smarcel#include <sys/malloc.h> 82130812Smarcel#include <sys/kernel.h> 83130812Smarcel#include <sys/socket.h> 84130812Smarcel#include <sys/sysctl.h> 85130812Smarcel#include <sys/queue.h> 86130812Smarcel#include <sys/module.h> 87130812Smarcel 88130812Smarcel#include <net/if.h> 89130812Smarcel#include <net/if_arp.h> 90130812Smarcel#include <net/ethernet.h> 91130812Smarcel#include <net/if_dl.h> 92#include <net/if_media.h> 93#include <net/if_types.h> 94#include <net/bpf.h> 95#include <net/if_vlan_var.h> 96 97#include <machine/bus.h> 98#include <machine/resource.h> 99 100#include <vm/vm.h> /* for vtophys */ 101#include <vm/pmap.h> /* for vtophys */ 102#include <sys/bus.h> 103#include <sys/rman.h> 104 105#include <dev/pci/pcireg.h> 106#include <dev/pci/pcivar.h> 107#include <dev/mii/mii.h> 108#include <dev/mii/miivar.h> 109#include "miibus_if.h" 110 111/* Include NVIDIA Linux driver header files */ 112#include <contrib/dev/nve/nvenet_version.h> 113#define linux 114#include <contrib/dev/nve/basetype.h> 115#include <contrib/dev/nve/phy.h> 116#include "os+%DIKED-nve.h" 117#include <contrib/dev/nve/drvinfo.h> 118#include <contrib/dev/nve/adapter.h> 119#undef linux 120 121#include <dev/nve/if_nvereg.h> 122 123MODULE_DEPEND(nve, pci, 1, 1, 1); 124MODULE_DEPEND(nve, ether, 1, 1, 1); 125MODULE_DEPEND(nve, miibus, 1, 1, 1); 126 127static int nve_probe(device_t); 128static int nve_attach(device_t); 129static int nve_detach(device_t); 130static void nve_init(void *); 131static void nve_init_locked(struct nve_softc *); 132static void nve_stop(struct nve_softc *); 133static int nve_shutdown(device_t); 134static int nve_init_rings(struct nve_softc *); 135static void nve_free_rings(struct nve_softc *); 136 137static void nve_ifstart(struct ifnet *); 138static void nve_ifstart_locked(struct ifnet *); 139static int nve_ioctl(struct ifnet *, u_long, caddr_t); 140static void nve_intr(void *); 141static void nve_tick(void *); 142static void nve_setmulti(struct nve_softc *); 143static void nve_watchdog(struct ifnet *); 144static void nve_update_stats(struct nve_softc *); 145 146static int nve_ifmedia_upd(struct ifnet *); 147static void nve_ifmedia_upd_locked(struct ifnet *); 148static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); 149static int nve_miibus_readreg(device_t, int, int); 150static int nve_miibus_writereg(device_t, int, int, int); 151 152static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); 153static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); 154 155static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); 156static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); 157static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); 158static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); 159static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); 160static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); 161static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); 162static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); 163static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); 164static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); 165static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); 166static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); 167static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); 168static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); 169static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); 170static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); 171 172static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); 173static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); 174static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); 175static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); 176static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); 177static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); 178static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); 179 180static device_method_t nve_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, nve_probe), 183 DEVMETHOD(device_attach, nve_attach), 184 DEVMETHOD(device_detach, nve_detach), 185 DEVMETHOD(device_shutdown, nve_shutdown), 186 187 /* Bus interface */ 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 191 /* MII interface */ 192 DEVMETHOD(miibus_readreg, nve_miibus_readreg), 193 DEVMETHOD(miibus_writereg, nve_miibus_writereg), 194 195 {0, 0} 196}; 197 198static driver_t nve_driver = { 199 "nve", 200 nve_methods, 201 sizeof(struct nve_softc) 202}; 203 204static devclass_t nve_devclass; 205 206static int nve_pollinterval = 0; 207SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, 208 &nve_pollinterval, 0, "delay between interface polls"); 209 210DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); 211DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); 212 213static struct nve_type nve_devs[] = { 214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 215 "NVIDIA nForce MCP Networking Adapter"}, 216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 217 "NVIDIA nForce2 MCP2 Networking Adapter"}, 218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, 219 "NVIDIA nForce2 400 MCP4 Networking Adapter"}, 220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, 221 "NVIDIA nForce2 400 MCP5 Networking Adapter"}, 222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 223 "NVIDIA nForce3 MCP3 Networking Adapter"}, 224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, 225 "NVIDIA nForce3 250 MCP6 Networking Adapter"}, 226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 227 "NVIDIA nForce3 MCP7 Networking Adapter"}, 228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, 229 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, 230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, 231 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, 232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 233 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP10 234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 235 "NVIDIA nForce MCP04 Networking Adapter"}, // MCP11 236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, 237 "NVIDIA nForce 430 MCP12 Networking Adapter"}, 238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, 239 "NVIDIA nForce 430 MCP13 Networking Adapter"}, 240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 241 "NVIDIA nForce MCP55 Networking Adapter"}, 242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 243 "NVIDIA nForce MCP55 Networking Adapter"}, 244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 245 "NVIDIA nForce MCP61 Networking Adapter"}, 246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 247 "NVIDIA nForce MCP61 Networking Adapter"}, 248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 249 "NVIDIA nForce MCP61 Networking Adapter"}, 250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 251 "NVIDIA nForce MCP61 Networking Adapter"}, 252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 253 "NVIDIA nForce MCP65 Networking Adapter"}, 254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 255 "NVIDIA nForce MCP65 Networking Adapter"}, 256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 257 "NVIDIA nForce MCP65 Networking Adapter"}, 258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 259 "NVIDIA nForce MCP65 Networking Adapter"}, 260 {0, 0, NULL} 261}; 262 263/* DMA MEM map callback function to get data segment physical address */ 264static void 265nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) 266{ 267 if (error) 268 return; 269 270 KASSERT(nsegs == 1, 271 ("Too many DMA segments returned when mapping DMA memory")); 272 *(bus_addr_t *)arg = segs->ds_addr; 273} 274 275/* DMA RX map callback function to get data segment physical address */ 276static void 277nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, 278 bus_size_t mapsize, int error) 279{ 280 if (error) 281 return; 282 *(bus_addr_t *)arg = segs->ds_addr; 283} 284 285/* 286 * DMA TX buffer callback function to allocate fragment data segment 287 * addresses 288 */ 289static void 290nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) 291{ 292 struct nve_tx_desc *info; 293 294 info = arg; 295 if (error) 296 return; 297 KASSERT(nsegs < NV_MAX_FRAGS, 298 ("Too many DMA segments returned when mapping mbuf")); 299 info->numfrags = nsegs; 300 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); 301} 302 303/* Probe for supported hardware ID's */ 304static int 305nve_probe(device_t dev) 306{ 307 struct nve_type *t; 308 309 t = nve_devs; 310 /* Check for matching PCI DEVICE ID's */ 311 while (t->name != NULL) { 312 if ((pci_get_vendor(dev) == t->vid_id) && 313 (pci_get_device(dev) == t->dev_id)) { 314 device_set_desc(dev, t->name); 315 return (BUS_PROBE_LOW_PRIORITY); 316 } 317 t++; 318 } 319 320 return (ENXIO); 321} 322 323/* Attach driver and initialise hardware for use */ 324static int 325nve_attach(device_t dev) 326{ 327 u_char eaddr[ETHER_ADDR_LEN]; 328 struct nve_softc *sc; 329 struct ifnet *ifp; 330 OS_API *osapi; 331 ADAPTER_OPEN_PARAMS OpenParams; 332 int error = 0, i, rid; 333 334 if (bootverbose) 335 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION); 336 337 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); 338 339 sc = device_get_softc(dev); 340 341 /* Allocate mutex */ 342 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 343 MTX_DEF); 344 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0); 345 346 sc->dev = dev; 347 348 /* Preinitialize data structures */ 349 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); 350 351 /* Enable bus mastering */ 352 pci_enable_busmaster(dev); 353 354 /* Allocate memory mapped address space */ 355 rid = NV_RID; 356 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, 357 RF_ACTIVE); 358 359 if (sc->res == NULL) { 360 device_printf(dev, "couldn't map memory\n"); 361 error = ENXIO; 362 goto fail; 363 } 364 sc->sc_st = rman_get_bustag(sc->res); 365 sc->sc_sh = rman_get_bushandle(sc->res); 366 367 /* Allocate interrupt */ 368 rid = 0; 369 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 370 RF_SHAREABLE | RF_ACTIVE); 371 372 if (sc->irq == NULL) { 373 device_printf(dev, "couldn't map interrupt\n"); 374 error = ENXIO; 375 goto fail; 376 } 377 /* Allocate DMA tags */ 378 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 379 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, 380 NV_MAX_FRAGS, MCLBYTES, 0, 381 busdma_lock_mutex, &Giant, 382 &sc->mtag); 383 if (error) { 384 device_printf(dev, "couldn't allocate dma tag\n"); 385 goto fail; 386 } 387 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 388 BUS_SPACE_MAXADDR, NULL, NULL, 389 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, 390 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, 391 busdma_lock_mutex, &Giant, 392 &sc->rtag); 393 if (error) { 394 device_printf(dev, "couldn't allocate dma tag\n"); 395 goto fail; 396 } 397 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 398 BUS_SPACE_MAXADDR, NULL, NULL, 399 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, 400 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, 401 busdma_lock_mutex, &Giant, 402 &sc->ttag); 403 if (error) { 404 device_printf(dev, "couldn't allocate dma tag\n"); 405 goto fail; 406 } 407 /* Allocate DMA safe memory and get the DMA addresses. */ 408 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, 409 BUS_DMA_WAITOK, &sc->tmap); 410 if (error) { 411 device_printf(dev, "couldn't allocate dma memory\n"); 412 goto fail; 413 } 414 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); 415 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, 416 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, 417 &sc->tx_addr, 0); 418 if (error) { 419 device_printf(dev, "couldn't map dma memory\n"); 420 goto fail; 421 } 422 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, 423 BUS_DMA_WAITOK, &sc->rmap); 424 if (error) { 425 device_printf(dev, "couldn't allocate dma memory\n"); 426 goto fail; 427 } 428 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); 429 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, 430 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, 431 &sc->rx_addr, 0); 432 if (error) { 433 device_printf(dev, "couldn't map dma memory\n"); 434 goto fail; 435 } 436 /* Initialize rings. */ 437 if (nve_init_rings(sc)) { 438 device_printf(dev, "failed to init rings\n"); 439 error = ENXIO; 440 goto fail; 441 } 442 /* Setup NVIDIA API callback routines */ 443 osapi = &sc->osapi; 444 osapi->pOSCX = sc; 445 osapi->pfnAllocMemory = nve_osalloc; 446 osapi->pfnFreeMemory = nve_osfree; 447 osapi->pfnAllocMemoryEx = nve_osallocex; 448 osapi->pfnFreeMemoryEx = nve_osfreeex; 449 osapi->pfnClearMemory = nve_osclear; 450 osapi->pfnStallExecution = nve_osdelay; 451 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; 452 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; 453 osapi->pfnPacketWasSent = nve_ospackettx; 454 osapi->pfnPacketWasReceived = nve_ospacketrx; 455 osapi->pfnLinkStateHasChanged = nve_oslinkchg; 456 osapi->pfnAllocTimer = nve_osalloctimer; 457 osapi->pfnFreeTimer = nve_osfreetimer; 458 osapi->pfnInitializeTimer = nve_osinittimer; 459 osapi->pfnSetTimer = nve_ossettimer; 460 osapi->pfnCancelTimer = nve_oscanceltimer; 461 osapi->pfnPreprocessPacket = nve_ospreprocpkt; 462 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; 463 osapi->pfnIndicatePackets = nve_osindicatepkt; 464 osapi->pfnLockAlloc = nve_oslockalloc; 465 osapi->pfnLockAcquire = nve_oslockacquire; 466 osapi->pfnLockRelease = nve_oslockrelease; 467 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; 468 469 sc->linkup = FALSE; 470 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; 471 472 /* TODO - We don't support hardware offload yet */ 473 sc->hwmode = 1; 474 sc->media = 0; 475 476 /* Set NVIDIA API startup parameters */ 477 OpenParams.MaxDpcLoop = 2; 478 OpenParams.MaxRxPkt = RX_RING_SIZE; 479 OpenParams.MaxTxPkt = TX_RING_SIZE; 480 OpenParams.SentPacketStatusSuccess = 1; 481 OpenParams.SentPacketStatusFailure = 0; 482 OpenParams.MaxRxPktToAccumulate = 6; 483 OpenParams.ulPollInterval = nve_pollinterval; 484 OpenParams.SetForcedModeEveryNthRxPacket = 0; 485 OpenParams.SetForcedModeEveryNthTxPacket = 0; 486 OpenParams.RxForcedInterrupt = 0; 487 OpenParams.TxForcedInterrupt = 0; 488 OpenParams.pOSApi = osapi; 489 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); 490 OpenParams.bASFEnabled = 0; 491 OpenParams.ulDescriptorVersion = sc->hwmode; 492 OpenParams.ulMaxPacketSize = sc->max_frame_size; 493 OpenParams.DeviceId = pci_get_device(dev); 494 495 /* Open NVIDIA Hardware API */ 496 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); 497 if (error) { 498 device_printf(dev, 499 "failed to open NVIDIA Hardware API: 0x%x\n", error); 500 goto fail; 501 } 502 503 /* TODO - Add support for MODE2 hardware offload */ 504 505 bzero(&sc->adapterdata, sizeof(sc->adapterdata)); 506 507 sc->adapterdata.ulMediaIF = sc->media; 508 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; 509 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); 510 511 /* MAC is loaded backwards into h/w reg */ 512 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); 513 for (i = 0; i < 6; i++) { 514 eaddr[i] = sc->original_mac_addr[5 - i]; 515 } 516 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); 517 518 /* Display ethernet address ,... */ 519 device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); 520 521 /* Allocate interface structures */ 522 ifp = sc->ifp = if_alloc(IFT_ETHER); 523 if (ifp == NULL) { 524 device_printf(dev, "can not if_alloc()\n"); 525 error = ENOSPC; 526 goto fail; 527 } 528 529 /* Probe device for MII interface to PHY */ 530 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n"); 531 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) { 532 device_printf(dev, "MII without any phy!\n"); 533 error = ENXIO; 534 goto fail; 535 } 536 537 /* Setup interface parameters */ 538 ifp->if_softc = sc; 539 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 540 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 541 ifp->if_ioctl = nve_ioctl; 542 ifp->if_start = nve_ifstart; 543 ifp->if_watchdog = nve_watchdog; 544 ifp->if_timer = 0; 545 ifp->if_init = nve_init; 546 ifp->if_mtu = ETHERMTU; 547 ifp->if_baudrate = IF_Mbps(100); 548 IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1); 549 ifp->if_snd.ifq_drv_maxlen = TX_RING_SIZE - 1; 550 IFQ_SET_READY(&ifp->if_snd); 551 ifp->if_capabilities |= IFCAP_VLAN_MTU; 552 ifp->if_capenable |= IFCAP_VLAN_MTU; 553 554 /* Attach to OS's managers. */ 555 ether_ifattach(ifp, eaddr); 556 557 /* Activate our interrupt handler. - attach last to avoid lock */ 558 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 559 NULL, nve_intr, sc, &sc->sc_ih); 560 if (error) { 561 device_printf(sc->dev, "couldn't set up interrupt handler\n"); 562 goto fail; 563 } 564 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); 565 566fail: 567 if (error) 568 nve_detach(dev); 569 570 return (error); 571} 572 573/* Detach interface for module unload */ 574static int 575nve_detach(device_t dev) 576{ 577 struct nve_softc *sc = device_get_softc(dev); 578 struct ifnet *ifp; 579 580 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); 581 582 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); 583 584 ifp = sc->ifp; 585 586 if (device_is_attached(dev)) { 587 ether_ifdetach(ifp); 588 NVE_LOCK(sc); 589 nve_stop(sc); 590 NVE_UNLOCK(sc); 591 callout_drain(&sc->stat_callout); 592 } 593 594 if (sc->miibus) 595 device_delete_child(dev, sc->miibus); 596 bus_generic_detach(dev); 597 598 /* Reload unreversed address back into MAC in original state */ 599 if (sc->original_mac_addr) 600 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, 601 sc->original_mac_addr); 602 603 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); 604 /* Detach from NVIDIA hardware API */ 605 if (sc->hwapi->pfnClose) 606 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); 607 /* Release resources */ 608 if (sc->sc_ih) 609 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); 610 if (sc->irq) 611 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 612 if (sc->res) 613 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); 614 615 nve_free_rings(sc); 616 617 if (sc->tx_desc) { 618 bus_dmamap_unload(sc->rtag, sc->rmap); 619 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); 620 bus_dmamap_destroy(sc->rtag, sc->rmap); 621 } 622 if (sc->mtag) 623 bus_dma_tag_destroy(sc->mtag); 624 if (sc->ttag) 625 bus_dma_tag_destroy(sc->ttag); 626 if (sc->rtag) 627 bus_dma_tag_destroy(sc->rtag); 628 629 if (ifp) 630 if_free(ifp); 631 mtx_destroy(&sc->mtx); 632 633 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); 634 635 return (0); 636} 637 638/* Initialise interface and start it "RUNNING" */ 639static void 640nve_init(void *xsc) 641{ 642 struct nve_softc *sc = xsc; 643 644 NVE_LOCK(sc); 645 nve_init_locked(sc); 646 NVE_UNLOCK(sc); 647} 648 649static void 650nve_init_locked(struct nve_softc *sc) 651{ 652 struct ifnet *ifp; 653 int error; 654 655 NVE_LOCK_ASSERT(sc); 656 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); 657 658 ifp = sc->ifp; 659 660 /* Do nothing if already running */ 661 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 662 return; 663 664 nve_stop(sc); 665 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); 666 667 nve_ifmedia_upd_locked(ifp); 668 669 /* Setup Hardware interface and allocate memory structures */ 670 error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 671 0, /* force speed */ 672 0, /* force full duplex */ 673 0, /* force mode */ 674 0, /* force async mode */ 675 &sc->linkup); 676 677 if (error) { 678 device_printf(sc->dev, 679 "failed to start NVIDIA Hardware interface\n"); 680 return; 681 } 682 /* Set the MAC address */ 683 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp)); 684 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 685 sc->hwapi->pfnStart(sc->hwapi->pADCX); 686 687 /* Setup multicast filter */ 688 nve_setmulti(sc); 689 690 /* Update interface parameters */ 691 ifp->if_drv_flags |= IFF_DRV_RUNNING; 692 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 693 694 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 695 696 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); 697 698 return; 699} 700 701/* Stop interface activity ie. not "RUNNING" */ 702static void 703nve_stop(struct nve_softc *sc) 704{ 705 struct ifnet *ifp; 706 707 NVE_LOCK_ASSERT(sc); 708 709 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); 710 711 ifp = sc->ifp; 712 ifp->if_timer = 0; 713 714 /* Cancel tick timer */ 715 callout_stop(&sc->stat_callout); 716 717 /* Stop hardware activity */ 718 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 719 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); 720 721 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); 722 /* Shutdown interface and deallocate memory buffers */ 723 if (sc->hwapi->pfnDeinit) 724 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); 725 726 sc->linkup = 0; 727 sc->cur_rx = 0; 728 sc->pending_rxs = 0; 729 sc->pending_txs = 0; 730 731 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 732 733 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); 734 735 return; 736} 737 738/* Shutdown interface for unload/reboot */ 739static int 740nve_shutdown(device_t dev) 741{ 742 struct nve_softc *sc; 743 744 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); 745 746 sc = device_get_softc(dev); 747 748 /* Stop hardware activity */ 749 NVE_LOCK(sc); 750 nve_stop(sc); 751 NVE_UNLOCK(sc); 752 753 return (0); 754} 755 756/* Allocate TX ring buffers */ 757static int 758nve_init_rings(struct nve_softc *sc) 759{ 760 int error, i; 761 762 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); 763 764 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; 765 /* Initialise RX ring */ 766 for (i = 0; i < RX_RING_SIZE; i++) { 767 struct nve_rx_desc *desc = sc->rx_desc + i; 768 struct nve_map_buffer *buf = &desc->buf; 769 770 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 771 if (buf->mbuf == NULL) { 772 device_printf(sc->dev, "couldn't allocate mbuf\n"); 773 nve_free_rings(sc); 774 return (ENOBUFS); 775 } 776 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 777 m_adj(buf->mbuf, ETHER_ALIGN); 778 779 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 780 if (error) { 781 device_printf(sc->dev, "couldn't create dma map\n"); 782 nve_free_rings(sc); 783 return (error); 784 } 785 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 786 nve_dmamap_rx_cb, &desc->paddr, 0); 787 if (error) { 788 device_printf(sc->dev, "couldn't dma map mbuf\n"); 789 nve_free_rings(sc); 790 return (error); 791 } 792 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 793 794 desc->buflength = buf->mbuf->m_len; 795 desc->vaddr = mtod(buf->mbuf, caddr_t); 796 } 797 bus_dmamap_sync(sc->rtag, sc->rmap, 798 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 799 800 /* Initialize TX ring */ 801 for (i = 0; i < TX_RING_SIZE; i++) { 802 struct nve_tx_desc *desc = sc->tx_desc + i; 803 struct nve_map_buffer *buf = &desc->buf; 804 805 buf->mbuf = NULL; 806 807 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 808 if (error) { 809 device_printf(sc->dev, "couldn't create dma map\n"); 810 nve_free_rings(sc); 811 return (error); 812 } 813 } 814 bus_dmamap_sync(sc->ttag, sc->tmap, 815 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 816 817 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); 818 819 return (error); 820} 821 822/* Free the TX ring buffers */ 823static void 824nve_free_rings(struct nve_softc *sc) 825{ 826 int i; 827 828 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); 829 830 for (i = 0; i < RX_RING_SIZE; i++) { 831 struct nve_rx_desc *desc = sc->rx_desc + i; 832 struct nve_map_buffer *buf = &desc->buf; 833 834 if (buf->mbuf) { 835 bus_dmamap_unload(sc->mtag, buf->map); 836 bus_dmamap_destroy(sc->mtag, buf->map); 837 m_freem(buf->mbuf); 838 } 839 buf->mbuf = NULL; 840 } 841 842 for (i = 0; i < TX_RING_SIZE; i++) { 843 struct nve_tx_desc *desc = sc->tx_desc + i; 844 struct nve_map_buffer *buf = &desc->buf; 845 846 if (buf->mbuf) { 847 bus_dmamap_unload(sc->mtag, buf->map); 848 bus_dmamap_destroy(sc->mtag, buf->map); 849 m_freem(buf->mbuf); 850 } 851 buf->mbuf = NULL; 852 } 853 854 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); 855} 856 857/* Main loop for sending packets from OS to interface */ 858static void 859nve_ifstart(struct ifnet *ifp) 860{ 861 struct nve_softc *sc = ifp->if_softc; 862 863 NVE_LOCK(sc); 864 nve_ifstart_locked(ifp); 865 NVE_UNLOCK(sc); 866} 867 868static void 869nve_ifstart_locked(struct ifnet *ifp) 870{ 871 struct nve_softc *sc = ifp->if_softc; 872 struct nve_map_buffer *buf; 873 struct mbuf *m0, *m; 874 struct nve_tx_desc *desc; 875 ADAPTER_WRITE_DATA txdata; 876 int error, i; 877 878 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); 879 880 NVE_LOCK_ASSERT(sc); 881 882 /* If link is down/busy or queue is empty do nothing */ 883 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || 884 IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 885 return; 886 887 /* Transmit queued packets until sent or TX ring is full */ 888 while (sc->pending_txs < TX_RING_SIZE) { 889 desc = sc->tx_desc + sc->cur_tx; 890 buf = &desc->buf; 891 892 /* Get next packet to send. */ 893 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 894 895 /* If nothing to send, return. */ 896 if (m0 == NULL) 897 return; 898 899 /* 900 * On nForce4, the chip doesn't interrupt on transmit, 901 * so try to flush transmitted packets from the queue 902 * if it's getting large (see note in nve_watchdog). 903 */ 904 if (sc->pending_txs > TX_RING_SIZE/2) { 905 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 906 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 907 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 908 } 909 910 /* Map MBUF for DMA access */ 911 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, 912 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 913 914 if (error && error != EFBIG) { 915 m_freem(m0); 916 sc->tx_errors++; 917 continue; 918 } 919 /* 920 * Packet has too many fragments - defrag into new mbuf 921 * cluster 922 */ 923 if (error) { 924 m = m_defrag(m0, M_DONTWAIT); 925 if (m == NULL) { 926 m_freem(m0); 927 sc->tx_errors++; 928 continue; 929 } 930 m0 = m; 931 932 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, 933 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 934 if (error) { 935 m_freem(m); 936 sc->tx_errors++; 937 continue; 938 } 939 } 940 /* Do sync on DMA bounce buffer */ 941 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); 942 943 buf->mbuf = m0; 944 txdata.ulNumberOfElements = desc->numfrags; 945 txdata.pvID = (PVOID)desc; 946 947 /* Put fragments into API element list */ 948 txdata.ulTotalLength = buf->mbuf->m_len; 949 for (i = 0; i < desc->numfrags; i++) { 950 txdata.sElement[i].ulLength = 951 (ulong)desc->frags[i].ds_len; 952 txdata.sElement[i].pPhysical = 953 (PVOID)desc->frags[i].ds_addr; 954 } 955 956 /* Send packet to Nvidia API for transmission */ 957 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); 958 959 switch (error) { 960 case ADAPTERERR_NONE: 961 /* Packet was queued in API TX queue successfully */ 962 sc->pending_txs++; 963 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; 964 break; 965 966 case ADAPTERERR_TRANSMIT_QUEUE_FULL: 967 /* The API TX queue is full - requeue the packet */ 968 device_printf(sc->dev, 969 "nve_ifstart: transmit queue is full\n"); 970 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 971 bus_dmamap_unload(sc->mtag, buf->map); 972 IFQ_DRV_PREPEND(&ifp->if_snd, buf->mbuf); 973 buf->mbuf = NULL; 974 return; 975 976 default: 977 /* The API failed to queue/send the packet so dump it */ 978 device_printf(sc->dev, "nve_ifstart: transmit error\n"); 979 bus_dmamap_unload(sc->mtag, buf->map); 980 m_freem(buf->mbuf); 981 buf->mbuf = NULL; 982 sc->tx_errors++; 983 return; 984 } 985 /* Set watchdog timer. */ 986 ifp->if_timer = 8; 987 988 /* Copy packet to BPF tap */ 989 BPF_MTAP(ifp, m0); 990 } 991 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 992 993 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); 994} 995 996/* Handle IOCTL events */ 997static int 998nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 999{ 1000 struct nve_softc *sc = ifp->if_softc; 1001 struct ifreq *ifr = (struct ifreq *) data; 1002 struct mii_data *mii; 1003 int error = 0; 1004 1005 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); 1006 1007 switch (command) { 1008 case SIOCSIFMTU: 1009 /* Set MTU size */ 1010 NVE_LOCK(sc); 1011 if (ifp->if_mtu == ifr->ifr_mtu) { 1012 NVE_UNLOCK(sc); 1013 break; 1014 } 1015 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { 1016 ifp->if_mtu = ifr->ifr_mtu; 1017 nve_stop(sc); 1018 nve_init_locked(sc); 1019 } else 1020 error = EINVAL; 1021 NVE_UNLOCK(sc); 1022 break; 1023 1024 case SIOCSIFFLAGS: 1025 /* Setup interface flags */ 1026 NVE_LOCK(sc); 1027 if (ifp->if_flags & IFF_UP) { 1028 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1029 nve_init_locked(sc); 1030 NVE_UNLOCK(sc); 1031 break; 1032 } 1033 } else { 1034 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1035 nve_stop(sc); 1036 NVE_UNLOCK(sc); 1037 break; 1038 } 1039 } 1040 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ 1041 nve_setmulti(sc); 1042 NVE_UNLOCK(sc); 1043 break; 1044 1045 case SIOCADDMULTI: 1046 case SIOCDELMULTI: 1047 /* Setup multicast filter */ 1048 NVE_LOCK(sc); 1049 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1050 nve_setmulti(sc); 1051 } 1052 NVE_UNLOCK(sc); 1053 break; 1054 1055 case SIOCGIFMEDIA: 1056 case SIOCSIFMEDIA: 1057 /* Get/Set interface media parameters */ 1058 mii = device_get_softc(sc->miibus); 1059 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1060 break; 1061 1062 default: 1063 /* Everything else we forward to generic ether ioctl */ 1064 error = ether_ioctl(ifp, command, data); 1065 break; 1066 } 1067 1068 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); 1069 1070 return (error); 1071} 1072 1073/* Interrupt service routine */ 1074static void 1075nve_intr(void *arg) 1076{ 1077 struct nve_softc *sc = arg; 1078 struct ifnet *ifp = sc->ifp; 1079 1080 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); 1081 1082 NVE_LOCK(sc); 1083 if (!ifp->if_flags & IFF_UP) { 1084 nve_stop(sc); 1085 NVE_UNLOCK(sc); 1086 return; 1087 } 1088 /* Handle interrupt event */ 1089 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { 1090 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1091 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1092 } 1093 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1094 nve_ifstart_locked(ifp); 1095 1096 /* If no pending packets we don't need a timeout */ 1097 if (sc->pending_txs == 0) 1098 sc->ifp->if_timer = 0; 1099 NVE_UNLOCK(sc); 1100 1101 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); 1102 1103 return; 1104} 1105 1106/* Setup multicast filters */ 1107static void 1108nve_setmulti(struct nve_softc *sc) 1109{ 1110 struct ifnet *ifp; 1111 struct ifmultiaddr *ifma; 1112 PACKET_FILTER hwfilter; 1113 int i; 1114 u_int8_t andaddr[6], oraddr[6]; 1115 1116 NVE_LOCK_ASSERT(sc); 1117 1118 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); 1119 1120 ifp = sc->ifp; 1121 1122 /* Initialize filter */ 1123 hwfilter.ulFilterFlags = 0; 1124 for (i = 0; i < 6; i++) { 1125 hwfilter.acMulticastAddress[i] = 0; 1126 hwfilter.acMulticastMask[i] = 0; 1127 } 1128 1129 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1130 /* Accept all packets */ 1131 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; 1132 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1133 return; 1134 } 1135 /* Setup multicast filter */ 1136 if_maddr_rlock(ifp); 1137 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1138 u_char *addrp; 1139 1140 if (ifma->ifma_addr->sa_family != AF_LINK) 1141 continue; 1142 1143 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1144 for (i = 0; i < 6; i++) { 1145 u_int8_t mcaddr = addrp[i]; 1146 andaddr[i] &= mcaddr; 1147 oraddr[i] |= mcaddr; 1148 } 1149 } 1150 if_maddr_runlock(ifp); 1151 for (i = 0; i < 6; i++) { 1152 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; 1153 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); 1154 } 1155 1156 /* Send filter to NVIDIA API */ 1157 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1158 1159 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); 1160 1161 return; 1162} 1163 1164/* Change the current media/mediaopts */ 1165static int 1166nve_ifmedia_upd(struct ifnet *ifp) 1167{ 1168 struct nve_softc *sc = ifp->if_softc; 1169 1170 NVE_LOCK(sc); 1171 nve_ifmedia_upd_locked(ifp); 1172 NVE_UNLOCK(sc); 1173 return (0); 1174} 1175 1176static void 1177nve_ifmedia_upd_locked(struct ifnet *ifp) 1178{ 1179 struct nve_softc *sc = ifp->if_softc; 1180 struct mii_data *mii; 1181 1182 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); 1183 1184 NVE_LOCK_ASSERT(sc); 1185 mii = device_get_softc(sc->miibus); 1186 1187 if (mii->mii_instance) { 1188 struct mii_softc *miisc; 1189 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1190 miisc = LIST_NEXT(miisc, mii_list)) { 1191 mii_phy_reset(miisc); 1192 } 1193 } 1194 mii_mediachg(mii); 1195} 1196 1197/* Update current miibus PHY status of media */ 1198static void 1199nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1200{ 1201 struct nve_softc *sc; 1202 struct mii_data *mii; 1203 1204 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); 1205 1206 sc = ifp->if_softc; 1207 NVE_LOCK(sc); 1208 mii = device_get_softc(sc->miibus); 1209 mii_pollstat(mii); 1210 NVE_UNLOCK(sc); 1211 1212 ifmr->ifm_active = mii->mii_media_active; 1213 ifmr->ifm_status = mii->mii_media_status; 1214 1215 return; 1216} 1217 1218/* miibus tick timer - maintain link status */ 1219static void 1220nve_tick(void *xsc) 1221{ 1222 struct nve_softc *sc = xsc; 1223 struct mii_data *mii; 1224 struct ifnet *ifp; 1225 1226 NVE_LOCK_ASSERT(sc); 1227 1228 ifp = sc->ifp; 1229 nve_update_stats(sc); 1230 1231 mii = device_get_softc(sc->miibus); 1232 mii_tick(mii); 1233 1234 if (mii->mii_media_status & IFM_ACTIVE && 1235 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1236 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1237 nve_ifstart_locked(ifp); 1238 } 1239 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 1240 1241 return; 1242} 1243 1244/* Update ifnet data structure with collected interface stats from API */ 1245static void 1246nve_update_stats(struct nve_softc *sc) 1247{ 1248 struct ifnet *ifp = sc->ifp; 1249 ADAPTER_STATS stats; 1250 1251 NVE_LOCK_ASSERT(sc); 1252 1253 if (sc->hwapi) { 1254 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); 1255 1256 ifp->if_ipackets = stats.ulSuccessfulReceptions; 1257 ifp->if_ierrors = stats.ulMissedFrames + 1258 stats.ulFailedReceptions + 1259 stats.ulCRCErrors + 1260 stats.ulFramingErrors + 1261 stats.ulOverFlowErrors; 1262 1263 ifp->if_opackets = stats.ulSuccessfulTransmissions; 1264 ifp->if_oerrors = sc->tx_errors + 1265 stats.ulFailedTransmissions + 1266 stats.ulRetryErrors + 1267 stats.ulUnderflowErrors + 1268 stats.ulLossOfCarrierErrors + 1269 stats.ulLateCollisionErrors; 1270 1271 ifp->if_collisions = stats.ulLateCollisionErrors; 1272 } 1273 1274 return; 1275} 1276 1277/* miibus Read PHY register wrapper - calls Nvidia API entry point */ 1278static int 1279nve_miibus_readreg(device_t dev, int phy, int reg) 1280{ 1281 struct nve_softc *sc = device_get_softc(dev); 1282 ULONG data; 1283 1284 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); 1285 1286 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); 1287 1288 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); 1289 1290 return (data); 1291} 1292 1293/* miibus Write PHY register wrapper - calls Nvidia API entry point */ 1294static int 1295nve_miibus_writereg(device_t dev, int phy, int reg, int data) 1296{ 1297 struct nve_softc *sc = device_get_softc(dev); 1298 1299 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); 1300 1301 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); 1302 1303 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); 1304 1305 return 0; 1306} 1307 1308/* Watchdog timer to prevent PHY lockups */ 1309static void 1310nve_watchdog(struct ifnet *ifp) 1311{ 1312 struct nve_softc *sc = ifp->if_softc; 1313 int pending_txs_start; 1314 1315 NVE_LOCK(sc); 1316 1317 /* 1318 * The nvidia driver blob defers tx completion notifications. 1319 * Thus, sometimes the watchdog timer will go off when the 1320 * tx engine is fine, but the tx completions are just deferred. 1321 * Try kicking the driver blob to clear out any pending tx 1322 * completions. If that clears up any of the pending tx 1323 * operations, then just return without printing the warning 1324 * message or resetting the adapter, as we can then conclude 1325 * the chip hasn't actually crashed (it's still sending packets). 1326 */ 1327 pending_txs_start = sc->pending_txs; 1328 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 1329 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1330 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1331 if (sc->pending_txs < pending_txs_start) { 1332 NVE_UNLOCK(sc); 1333 return; 1334 } 1335 1336 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); 1337 1338 sc->tx_errors++; 1339 1340 nve_stop(sc); 1341 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1342 nve_init_locked(sc); 1343 1344 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1345 nve_ifstart_locked(ifp); 1346 NVE_UNLOCK(sc); 1347 1348 return; 1349} 1350 1351/* --- Start of NVOSAPI interface --- */ 1352 1353/* Allocate DMA enabled general use memory for API */ 1354static NV_SINT32 1355nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) 1356{ 1357 struct nve_softc *sc; 1358 bus_addr_t mem_physical; 1359 1360 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); 1361 1362 sc = (struct nve_softc *)ctx; 1363 1364 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, 1365 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1366 1367 if (!mem->pLogical) { 1368 device_printf(sc->dev, "memory allocation failed\n"); 1369 return (0); 1370 } 1371 memset(mem->pLogical, 0, (ulong)mem->uiLength); 1372 mem_physical = vtophys(mem->pLogical); 1373 mem->pPhysical = (PVOID)mem_physical; 1374 1375 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", 1376 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); 1377 1378 return (1); 1379} 1380 1381/* Free allocated memory */ 1382static NV_SINT32 1383nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) 1384{ 1385 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", 1386 (uint)mem->pLogical, (uint) mem->uiLength); 1387 1388 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); 1389 return (1); 1390} 1391 1392/* Copied directly from nvnet.c */ 1393static NV_SINT32 1394nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1395{ 1396 MEMORY_BLOCK mem_block; 1397 1398 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); 1399 1400 mem_block_ex->pLogical = NULL; 1401 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; 1402 1403 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && 1404 (mem_block_ex->AlignmentSize > 1)) { 1405 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", 1406 mem_block_ex->AlignmentSize); 1407 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; 1408 } 1409 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1410 1411 if (nve_osalloc(ctx, &mem_block) == 0) { 1412 return (0); 1413 } 1414 mem_block_ex->pLogicalOrig = mem_block.pLogical; 1415 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; 1416 mem_block_ex->pPhysicalOrigHigh = 0; 1417 1418 mem_block_ex->pPhysical = mem_block.pPhysical; 1419 mem_block_ex->pLogical = mem_block.pLogical; 1420 1421 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { 1422 unsigned int offset; 1423 offset = mem_block_ex->pPhysicalOrigLow & 1424 (mem_block_ex->AlignmentSize - 1); 1425 1426 if (offset) { 1427 mem_block_ex->pPhysical = 1428 (PVOID)((ulong)mem_block_ex->pPhysical + 1429 mem_block_ex->AlignmentSize - offset); 1430 mem_block_ex->pLogical = 1431 (PVOID)((ulong)mem_block_ex->pLogical + 1432 mem_block_ex->AlignmentSize - offset); 1433 } /* if (offset) */ 1434 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ 1435 return (1); 1436} 1437 1438/* Copied directly from nvnet.c */ 1439static NV_SINT32 1440nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1441{ 1442 MEMORY_BLOCK mem_block; 1443 1444 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); 1445 1446 mem_block.pLogical = mem_block_ex->pLogicalOrig; 1447 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); 1448 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1449 1450 return (nve_osfree(ctx, &mem_block)); 1451} 1452 1453/* Clear memory region */ 1454static NV_SINT32 1455nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) 1456{ 1457 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); 1458 memset(mem, 0, length); 1459 return (1); 1460} 1461 1462/* Sleep for a tick */ 1463static NV_SINT32 1464nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) 1465{ 1466 DELAY(usec); 1467 return (1); 1468} 1469 1470/* Allocate memory for rx buffer */ 1471static NV_SINT32 1472nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) 1473{ 1474 struct nve_softc *sc = ctx; 1475 struct nve_rx_desc *desc; 1476 struct nve_map_buffer *buf; 1477 int error; 1478 1479 if (device_is_attached(sc->dev)) 1480 NVE_LOCK_ASSERT(sc); 1481 1482 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); 1483 1484 if (sc->pending_rxs == RX_RING_SIZE) { 1485 device_printf(sc->dev, "rx ring buffer is full\n"); 1486 goto fail; 1487 } 1488 desc = sc->rx_desc + sc->cur_rx; 1489 buf = &desc->buf; 1490 1491 if (buf->mbuf == NULL) { 1492 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1493 if (buf->mbuf == NULL) { 1494 device_printf(sc->dev, "failed to allocate memory\n"); 1495 goto fail; 1496 } 1497 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 1498 m_adj(buf->mbuf, ETHER_ALIGN); 1499 1500 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 1501 nve_dmamap_rx_cb, &desc->paddr, 0); 1502 if (error) { 1503 device_printf(sc->dev, "failed to dmamap mbuf\n"); 1504 m_freem(buf->mbuf); 1505 buf->mbuf = NULL; 1506 goto fail; 1507 } 1508 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 1509 desc->buflength = buf->mbuf->m_len; 1510 desc->vaddr = mtod(buf->mbuf, caddr_t); 1511 } 1512 sc->pending_rxs++; 1513 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; 1514 1515 mem->pLogical = (void *)desc->vaddr; 1516 mem->pPhysical = (void *)desc->paddr; 1517 mem->uiLength = desc->buflength; 1518 *id = (void *)desc; 1519 1520 return (1); 1521 1522fail: 1523 return (0); 1524} 1525 1526/* Free the rx buffer */ 1527static NV_SINT32 1528nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) 1529{ 1530 struct nve_softc *sc = ctx; 1531 struct nve_rx_desc *desc; 1532 struct nve_map_buffer *buf; 1533 1534 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); 1535 1536 desc = (struct nve_rx_desc *) id; 1537 buf = &desc->buf; 1538 1539 if (buf->mbuf) { 1540 bus_dmamap_unload(sc->mtag, buf->map); 1541 bus_dmamap_destroy(sc->mtag, buf->map); 1542 m_freem(buf->mbuf); 1543 } 1544 sc->pending_rxs--; 1545 buf->mbuf = NULL; 1546 1547 return (1); 1548} 1549 1550/* This gets called by the Nvidia API after our TX packet has been sent */ 1551static NV_SINT32 1552nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) 1553{ 1554 struct nve_softc *sc = ctx; 1555 struct nve_map_buffer *buf; 1556 struct nve_tx_desc *desc = (struct nve_tx_desc *) id; 1557 struct ifnet *ifp; 1558 1559 NVE_LOCK_ASSERT(sc); 1560 1561 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); 1562 1563 ifp = sc->ifp; 1564 buf = &desc->buf; 1565 sc->pending_txs--; 1566 1567 /* Unload and free mbuf cluster */ 1568 if (buf->mbuf == NULL) 1569 goto fail; 1570 1571 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); 1572 bus_dmamap_unload(sc->mtag, buf->map); 1573 m_freem(buf->mbuf); 1574 buf->mbuf = NULL; 1575 1576 /* Send more packets if we have them */ 1577 if (sc->pending_txs < TX_RING_SIZE) 1578 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1579 1580 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE) 1581 nve_ifstart_locked(ifp); 1582 1583fail: 1584 1585 return (1); 1586} 1587 1588/* This gets called by the Nvidia API when a new packet has been received */ 1589/* XXX What is newbuf used for? XXX */ 1590static NV_SINT32 1591nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, 1592 NV_UINT8 priority) 1593{ 1594 struct nve_softc *sc = ctx; 1595 struct ifnet *ifp; 1596 struct nve_rx_desc *desc; 1597 struct nve_map_buffer *buf; 1598 ADAPTER_READ_DATA *readdata; 1599 struct mbuf *m; 1600 1601 NVE_LOCK_ASSERT(sc); 1602 1603 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); 1604 1605 ifp = sc->ifp; 1606 1607 readdata = (ADAPTER_READ_DATA *) data; 1608 desc = readdata->pvID; 1609 buf = &desc->buf; 1610 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1611 1612 if (success) { 1613 /* Sync DMA bounce buffer. */ 1614 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1615 1616 /* First mbuf in packet holds the ethernet and packet headers */ 1617 buf->mbuf->m_pkthdr.rcvif = ifp; 1618 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = 1619 readdata->ulTotalLength; 1620 1621 bus_dmamap_unload(sc->mtag, buf->map); 1622 1623 /* Blat the mbuf pointer, kernel will free the mbuf cluster */ 1624 m = buf->mbuf; 1625 buf->mbuf = NULL; 1626 1627 /* Give mbuf to OS. */ 1628 NVE_UNLOCK(sc); 1629 (*ifp->if_input)(ifp, m); 1630 NVE_LOCK(sc); 1631 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) 1632 ifp->if_imcasts++; 1633 1634 } else { 1635 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1636 bus_dmamap_unload(sc->mtag, buf->map); 1637 m_freem(buf->mbuf); 1638 buf->mbuf = NULL; 1639 } 1640 1641 sc->cur_rx = desc - sc->rx_desc; 1642 sc->pending_rxs--; 1643 1644 return (1); 1645} 1646 1647/* This gets called by NVIDIA API when the PHY link state changes */ 1648static NV_SINT32 1649nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) 1650{ 1651 1652 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); 1653 1654 return (1); 1655} 1656 1657/* Setup a watchdog timer */ 1658static NV_SINT32 1659nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) 1660{ 1661 struct nve_softc *sc = (struct nve_softc *)ctx; 1662 1663 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); 1664 1665 callout_init(&sc->ostimer, CALLOUT_MPSAFE); 1666 *timer = &sc->ostimer; 1667 1668 return (1); 1669} 1670 1671/* Free the timer */ 1672static NV_SINT32 1673nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) 1674{ 1675 1676 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); 1677 1678 callout_drain((struct callout *)timer); 1679 1680 return (1); 1681} 1682 1683/* Setup timer parameters */ 1684static NV_SINT32 1685nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) 1686{ 1687 struct nve_softc *sc = (struct nve_softc *)ctx; 1688 1689 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); 1690 1691 sc->ostimer_func = func; 1692 sc->ostimer_params = parameters; 1693 1694 return (1); 1695} 1696 1697/* Set the timer to go off */ 1698static NV_SINT32 1699nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) 1700{ 1701 struct nve_softc *sc = ctx; 1702 1703 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); 1704 1705 callout_reset((struct callout *)timer, delay, sc->ostimer_func, 1706 sc->ostimer_params); 1707 1708 return (1); 1709} 1710 1711/* Cancel the timer */ 1712static NV_SINT32 1713nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) 1714{ 1715 1716 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); 1717 1718 callout_stop((struct callout *)timer); 1719 1720 return (1); 1721} 1722 1723static NV_SINT32 1724nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, 1725 NV_UINT8 *newbuffer, NV_UINT8 priority) 1726{ 1727 1728 /* Not implemented */ 1729 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1730 1731 return (1); 1732} 1733 1734static PNV_VOID 1735nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) 1736{ 1737 1738 /* Not implemented */ 1739 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1740 1741 return (NULL); 1742} 1743 1744static NV_SINT32 1745nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) 1746{ 1747 1748 /* Not implemented */ 1749 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); 1750 1751 return (1); 1752} 1753 1754/* Allocate mutex context (already done in nve_attach) */ 1755static NV_SINT32 1756nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) 1757{ 1758 struct nve_softc *sc = (struct nve_softc *)ctx; 1759 1760 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); 1761 1762 *pLock = (void **)sc; 1763 1764 return (1); 1765} 1766 1767/* Obtain a spin lock */ 1768static NV_SINT32 1769nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1770{ 1771 1772 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); 1773 1774 return (1); 1775} 1776 1777/* Release lock */ 1778static NV_SINT32 1779nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1780{ 1781 1782 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); 1783 1784 return (1); 1785} 1786 1787/* I have no idea what this is for */ 1788static PNV_VOID 1789nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) 1790{ 1791 1792 /* Not implemented */ 1793 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); 1794 panic("nve: nve_osreturnbufvirtual not implemented\n"); 1795 1796 return (NULL); 1797} 1798 1799/* --- End on NVOSAPI interface --- */ 1800