if_nve.c revision 158773
1/*- 2 * Copyright (c) 2005 by David E. O'Brien <obrien@FreeBSD.org>. 3 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ 28 */ 29/* 30 * NVIDIA nForce MCP Networking Adapter driver 31 * 32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA 33 * through their web site. 34 * 35 * All mainstream nForce and nForce2 motherboards are supported. This module 36 * is as stable, sometimes more stable, than the linux version. (Recent 37 * Linux stability issues seem to be related to some issues with newer 38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD 39 * 5.x). 40 * 41 * In accordance with the NVIDIA distribution license it is necessary to 42 * link this module against the nvlibnet.o binary object included in the 43 * Linux driver source distribution. The binary component is not modified in 44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c 45 * linux kernel module "wrapper". 46 * 47 * The Linux driver uses a common code API that is shared between Win32 and 48 * i386 Linux. This abstracts the low level driver functions and uses 49 * callbacks and hooks to access the underlying hardware device. By using 50 * this same API in a FreeBSD kernel module it is possible to support the 51 * hardware without breaching the Linux source distributions licensing 52 * requirements, or obtaining the hardware programming specifications. 53 * 54 * Although not conventional, it works, and given the relatively small 55 * amount of hardware centric code, it's hopefully no more buggy than its 56 * linux counterpart. 57 * 58 * NVIDIA now support the nForce3 AMD64 platform, however I have been 59 * unable to access such a system to verify support. However, the code is 60 * reported to work with little modification when compiled with the AMD64 61 * version of the NVIDIA Linux library. All that should be necessary to make 62 * the driver work is to link it directly into the kernel, instead of as a 63 * module, and apply the docs/amd64.diff patch in this source distribution to 64 * the NVIDIA Linux driver source. 65 * 66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well 67 * as recent versions of DragonFly. 68 * 69 * Written by Quinton Dolan <q@onthenet.com.au> 70 * Portions based on existing FreeBSD network drivers. 71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. 72 */ 73 74#include <sys/cdefs.h> 75__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 158773 2006-05-20 21:08:09Z mlaier $"); 76 77#include <sys/param.h> 78#include <sys/systm.h> 79#include <sys/sockio.h> 80#include <sys/mbuf.h> 81#include <sys/malloc.h> 82#include <sys/kernel.h> 83#include <sys/socket.h> 84#include <sys/sysctl.h> 85#include <sys/queue.h> 86#include <sys/module.h> 87 88#include <net/if.h> 89#include <net/if_arp.h> 90#include <net/ethernet.h> 91#include <net/if_dl.h> 92#include <net/if_media.h> 93#include <net/if_types.h> 94#include <net/bpf.h> 95#include <net/if_vlan_var.h> 96 97#include <machine/bus.h> 98#include <machine/resource.h> 99 100#include <vm/vm.h> /* for vtophys */ 101#include <vm/pmap.h> /* for vtophys */ 102#include <sys/bus.h> 103#include <sys/rman.h> 104 105#include <dev/pci/pcireg.h> 106#include <dev/pci/pcivar.h> 107#include <dev/mii/mii.h> 108#include <dev/mii/miivar.h> 109#include "miibus_if.h" 110 111/* Include NVIDIA Linux driver header files */ 112#include <contrib/dev/nve/nvenet_version.h> 113#define linux 114#include <contrib/dev/nve/basetype.h> 115#include <contrib/dev/nve/phy.h> 116#include "os+%DIKED-nve.h" 117#include <contrib/dev/nve/drvinfo.h> 118#include <contrib/dev/nve/adapter.h> 119#undef linux 120 121#include <dev/nve/if_nvereg.h> 122 123MODULE_DEPEND(nve, pci, 1, 1, 1); 124MODULE_DEPEND(nve, ether, 1, 1, 1); 125MODULE_DEPEND(nve, miibus, 1, 1, 1); 126 127static int nve_probe(device_t); 128static int nve_attach(device_t); 129static int nve_detach(device_t); 130static void nve_init(void *); 131static void nve_init_locked(struct nve_softc *); 132static void nve_stop(struct nve_softc *); 133static void nve_shutdown(device_t); 134static int nve_init_rings(struct nve_softc *); 135static void nve_free_rings(struct nve_softc *); 136 137static void nve_ifstart(struct ifnet *); 138static void nve_ifstart_locked(struct ifnet *); 139static int nve_ioctl(struct ifnet *, u_long, caddr_t); 140static void nve_intr(void *); 141static void nve_tick(void *); 142static void nve_setmulti(struct nve_softc *); 143static void nve_watchdog(struct ifnet *); 144static void nve_update_stats(struct nve_softc *); 145 146static int nve_ifmedia_upd(struct ifnet *); 147static void nve_ifmedia_upd_locked(struct ifnet *); 148static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); 149static int nve_miibus_readreg(device_t, int, int); 150static void nve_miibus_writereg(device_t, int, int, int); 151 152static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); 153static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); 154 155static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); 156static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); 157static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); 158static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); 159static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); 160static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); 161static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); 162static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); 163static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); 164static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); 165static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); 166static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); 167static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); 168static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); 169static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); 170static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); 171 172static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); 173static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); 174static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); 175static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); 176static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); 177static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); 178static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); 179 180static device_method_t nve_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, nve_probe), 183 DEVMETHOD(device_attach, nve_attach), 184 DEVMETHOD(device_detach, nve_detach), 185 DEVMETHOD(device_shutdown, nve_shutdown), 186 187 /* Bus interface */ 188 DEVMETHOD(bus_print_child, bus_generic_print_child), 189 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 190 191 /* MII interface */ 192 DEVMETHOD(miibus_readreg, nve_miibus_readreg), 193 DEVMETHOD(miibus_writereg, nve_miibus_writereg), 194 195 {0, 0} 196}; 197 198static driver_t nve_driver = { 199 "nve", 200 nve_methods, 201 sizeof(struct nve_softc) 202}; 203 204static devclass_t nve_devclass; 205 206static int nve_pollinterval = 0; 207SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, 208 &nve_pollinterval, 0, "delay between interface polls"); 209 210DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); 211DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); 212 213static struct nve_type nve_devs[] = { 214 {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID, 215 "NVIDIA nForce MCP Networking Adapter"}, 216 {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID, 217 "NVIDIA nForce MCP2 Networking Adapter"}, 218 {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID, 219 "NVIDIA nForce MCP3 Networking Adapter"}, 220 {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID, 221 "NVIDIA nForce MCP4 Networking Adapter"}, 222 {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID, 223 "NVIDIA nForce MCP5 Networking Adapter"}, 224 {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID, 225 "NVIDIA nForce MCP6 Networking Adapter"}, 226 {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID, 227 "NVIDIA nForce MCP7 Networking Adapter"}, 228 {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID, 229 "NVIDIA nForce MCP8 Networking Adapter"}, 230 {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID, 231 "NVIDIA nForce MCP9 Networking Adapter"}, 232 {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID, 233 "NVIDIA nForce MCP10 Networking Adapter"}, 234 {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID, 235 "NVIDIA nForce MCP11 Networking Adapter"}, 236 {NVIDIA_VENDORID, NFORCE_MCPNET12_DEVICEID, 237 "NVIDIA nForce MCP12 Networking Adapter"}, 238 {NVIDIA_VENDORID, NFORCE_MCPNET13_DEVICEID, 239 "NVIDIA nForce MCP13 Networking Adapter"}, 240 {0, 0, NULL} 241}; 242 243/* DMA MEM map callback function to get data segment physical address */ 244static void 245nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) 246{ 247 if (error) 248 return; 249 250 KASSERT(nsegs == 1, 251 ("Too many DMA segments returned when mapping DMA memory")); 252 *(bus_addr_t *)arg = segs->ds_addr; 253} 254 255/* DMA RX map callback function to get data segment physical address */ 256static void 257nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, 258 bus_size_t mapsize, int error) 259{ 260 if (error) 261 return; 262 *(bus_addr_t *)arg = segs->ds_addr; 263} 264 265/* 266 * DMA TX buffer callback function to allocate fragment data segment 267 * addresses 268 */ 269static void 270nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) 271{ 272 struct nve_tx_desc *info; 273 274 info = arg; 275 if (error) 276 return; 277 KASSERT(nsegs < NV_MAX_FRAGS, 278 ("Too many DMA segments returned when mapping mbuf")); 279 info->numfrags = nsegs; 280 bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); 281} 282 283/* Probe for supported hardware ID's */ 284static int 285nve_probe(device_t dev) 286{ 287 struct nve_type *t; 288 289 t = nve_devs; 290 /* Check for matching PCI DEVICE ID's */ 291 while (t->name != NULL) { 292 if ((pci_get_vendor(dev) == t->vid_id) && 293 (pci_get_device(dev) == t->dev_id)) { 294 device_set_desc(dev, t->name); 295 return (0); 296 } 297 t++; 298 } 299 300 return (ENXIO); 301} 302 303/* Attach driver and initialise hardware for use */ 304static int 305nve_attach(device_t dev) 306{ 307 u_char eaddr[ETHER_ADDR_LEN]; 308 struct nve_softc *sc; 309 struct ifnet *ifp; 310 OS_API *osapi; 311 ADAPTER_OPEN_PARAMS OpenParams; 312 int error = 0, i, rid; 313 314 if (bootverbose) 315 device_printf(dev, "nvenetlib.o version %s\n", DRIVER_VERSION); 316 317 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); 318 319 sc = device_get_softc(dev); 320 321 /* Allocate mutex */ 322 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 323 MTX_DEF); 324 callout_init_mtx(&sc->stat_callout, &sc->mtx, 0); 325 326 sc->dev = dev; 327 328 /* Preinitialize data structures */ 329 bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); 330 331 /* Enable bus mastering */ 332 pci_enable_busmaster(dev); 333 334 /* Allocate memory mapped address space */ 335 rid = NV_RID; 336 sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, 337 RF_ACTIVE); 338 339 if (sc->res == NULL) { 340 device_printf(dev, "couldn't map memory\n"); 341 error = ENXIO; 342 goto fail; 343 } 344 sc->sc_st = rman_get_bustag(sc->res); 345 sc->sc_sh = rman_get_bushandle(sc->res); 346 347 /* Allocate interrupt */ 348 rid = 0; 349 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 350 RF_SHAREABLE | RF_ACTIVE); 351 352 if (sc->irq == NULL) { 353 device_printf(dev, "couldn't map interrupt\n"); 354 error = ENXIO; 355 goto fail; 356 } 357 /* Allocate DMA tags */ 358 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 359 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, 360 NV_MAX_FRAGS, MCLBYTES, 0, 361 busdma_lock_mutex, &Giant, 362 &sc->mtag); 363 if (error) { 364 device_printf(dev, "couldn't allocate dma tag\n"); 365 goto fail; 366 } 367 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 368 BUS_SPACE_MAXADDR, NULL, NULL, 369 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, 370 sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, 371 busdma_lock_mutex, &Giant, 372 &sc->rtag); 373 if (error) { 374 device_printf(dev, "couldn't allocate dma tag\n"); 375 goto fail; 376 } 377 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 378 BUS_SPACE_MAXADDR, NULL, NULL, 379 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, 380 sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, 381 busdma_lock_mutex, &Giant, 382 &sc->ttag); 383 if (error) { 384 device_printf(dev, "couldn't allocate dma tag\n"); 385 goto fail; 386 } 387 /* Allocate DMA safe memory and get the DMA addresses. */ 388 error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, 389 BUS_DMA_WAITOK, &sc->tmap); 390 if (error) { 391 device_printf(dev, "couldn't allocate dma memory\n"); 392 goto fail; 393 } 394 bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); 395 error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, 396 sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, 397 &sc->tx_addr, 0); 398 if (error) { 399 device_printf(dev, "couldn't map dma memory\n"); 400 goto fail; 401 } 402 error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, 403 BUS_DMA_WAITOK, &sc->rmap); 404 if (error) { 405 device_printf(dev, "couldn't allocate dma memory\n"); 406 goto fail; 407 } 408 bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); 409 error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, 410 sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, 411 &sc->rx_addr, 0); 412 if (error) { 413 device_printf(dev, "couldn't map dma memory\n"); 414 goto fail; 415 } 416 /* Initialize rings. */ 417 if (nve_init_rings(sc)) { 418 device_printf(dev, "failed to init rings\n"); 419 error = ENXIO; 420 goto fail; 421 } 422 /* Setup NVIDIA API callback routines */ 423 osapi = &sc->osapi; 424 osapi->pOSCX = sc; 425 osapi->pfnAllocMemory = nve_osalloc; 426 osapi->pfnFreeMemory = nve_osfree; 427 osapi->pfnAllocMemoryEx = nve_osallocex; 428 osapi->pfnFreeMemoryEx = nve_osfreeex; 429 osapi->pfnClearMemory = nve_osclear; 430 osapi->pfnStallExecution = nve_osdelay; 431 osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; 432 osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; 433 osapi->pfnPacketWasSent = nve_ospackettx; 434 osapi->pfnPacketWasReceived = nve_ospacketrx; 435 osapi->pfnLinkStateHasChanged = nve_oslinkchg; 436 osapi->pfnAllocTimer = nve_osalloctimer; 437 osapi->pfnFreeTimer = nve_osfreetimer; 438 osapi->pfnInitializeTimer = nve_osinittimer; 439 osapi->pfnSetTimer = nve_ossettimer; 440 osapi->pfnCancelTimer = nve_oscanceltimer; 441 osapi->pfnPreprocessPacket = nve_ospreprocpkt; 442 osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; 443 osapi->pfnIndicatePackets = nve_osindicatepkt; 444 osapi->pfnLockAlloc = nve_oslockalloc; 445 osapi->pfnLockAcquire = nve_oslockacquire; 446 osapi->pfnLockRelease = nve_oslockrelease; 447 osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; 448 449 sc->linkup = FALSE; 450 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; 451 452 /* TODO - We don't support hardware offload yet */ 453 sc->hwmode = 1; 454 sc->media = 0; 455 456 /* Set NVIDIA API startup parameters */ 457 OpenParams.MaxDpcLoop = 2; 458 OpenParams.MaxRxPkt = RX_RING_SIZE; 459 OpenParams.MaxTxPkt = TX_RING_SIZE; 460 OpenParams.SentPacketStatusSuccess = 1; 461 OpenParams.SentPacketStatusFailure = 0; 462 OpenParams.MaxRxPktToAccumulate = 6; 463 OpenParams.ulPollInterval = nve_pollinterval; 464 OpenParams.SetForcedModeEveryNthRxPacket = 0; 465 OpenParams.SetForcedModeEveryNthTxPacket = 0; 466 OpenParams.RxForcedInterrupt = 0; 467 OpenParams.TxForcedInterrupt = 0; 468 OpenParams.pOSApi = osapi; 469 OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); 470 OpenParams.bASFEnabled = 0; 471 OpenParams.ulDescriptorVersion = sc->hwmode; 472 OpenParams.ulMaxPacketSize = sc->max_frame_size; 473 OpenParams.DeviceId = pci_get_device(dev); 474 475 /* Open NVIDIA Hardware API */ 476 error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); 477 if (error) { 478 device_printf(dev, 479 "failed to open NVIDIA Hardware API: 0x%x\n", error); 480 goto fail; 481 } 482 483 /* TODO - Add support for MODE2 hardware offload */ 484 485 bzero(&sc->adapterdata, sizeof(sc->adapterdata)); 486 487 sc->adapterdata.ulMediaIF = sc->media; 488 sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; 489 sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); 490 491 /* MAC is loaded backwards into h/w reg */ 492 sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); 493 for (i = 0; i < 6; i++) { 494 eaddr[i] = sc->original_mac_addr[5 - i]; 495 } 496 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); 497 498 /* Display ethernet address ,... */ 499 device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); 500 501 /* Allocate interface structures */ 502 ifp = sc->ifp = if_alloc(IFT_ETHER); 503 if (ifp == NULL) { 504 device_printf(dev, "can not if_alloc()\n"); 505 error = ENOSPC; 506 goto fail; 507 } 508 509 /* Probe device for MII interface to PHY */ 510 DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n"); 511 if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) { 512 device_printf(dev, "MII without any phy!\n"); 513 error = ENXIO; 514 goto fail; 515 } 516 517 /* Setup interface parameters */ 518 ifp->if_softc = sc; 519 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 520 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 521 ifp->if_ioctl = nve_ioctl; 522 ifp->if_output = ether_output; 523 ifp->if_start = nve_ifstart; 524 ifp->if_watchdog = nve_watchdog; 525 ifp->if_timer = 0; 526 ifp->if_init = nve_init; 527 ifp->if_mtu = ETHERMTU; 528 ifp->if_baudrate = IF_Mbps(100); 529 IFQ_SET_MAXLEN(&ifp->if_snd, TX_RING_SIZE - 1); 530 ifp->if_snd.ifq_drv_maxlen = TX_RING_SIZE - 1; 531 IFQ_SET_READY(&ifp->if_snd); 532 ifp->if_capabilities |= IFCAP_VLAN_MTU; 533 534 /* Attach to OS's managers. */ 535 ether_ifattach(ifp, eaddr); 536 537 /* Activate our interrupt handler. - attach last to avoid lock */ 538 error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 539 nve_intr, sc, &sc->sc_ih); 540 if (error) { 541 device_printf(sc->dev, "couldn't set up interrupt handler\n"); 542 goto fail; 543 } 544 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); 545 546fail: 547 if (error) 548 nve_detach(dev); 549 550 return (error); 551} 552 553/* Detach interface for module unload */ 554static int 555nve_detach(device_t dev) 556{ 557 struct nve_softc *sc = device_get_softc(dev); 558 struct ifnet *ifp; 559 560 KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); 561 562 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); 563 564 ifp = sc->ifp; 565 566 if (device_is_attached(dev)) { 567 NVE_LOCK(sc); 568 nve_stop(sc); 569 NVE_UNLOCK(sc); 570 callout_drain(&sc->stat_callout); 571 ether_ifdetach(ifp); 572 } 573 574 if (sc->miibus) 575 device_delete_child(dev, sc->miibus); 576 bus_generic_detach(dev); 577 578 /* Reload unreversed address back into MAC in original state */ 579 if (sc->original_mac_addr) 580 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, 581 sc->original_mac_addr); 582 583 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); 584 /* Detach from NVIDIA hardware API */ 585 if (sc->hwapi->pfnClose) 586 sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); 587 /* Release resources */ 588 if (sc->sc_ih) 589 bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); 590 if (sc->irq) 591 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 592 if (sc->res) 593 bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); 594 595 nve_free_rings(sc); 596 597 if (sc->tx_desc) { 598 bus_dmamap_unload(sc->rtag, sc->rmap); 599 bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); 600 bus_dmamap_destroy(sc->rtag, sc->rmap); 601 } 602 if (sc->mtag) 603 bus_dma_tag_destroy(sc->mtag); 604 if (sc->ttag) 605 bus_dma_tag_destroy(sc->ttag); 606 if (sc->rtag) 607 bus_dma_tag_destroy(sc->rtag); 608 609 if (ifp) 610 if_free(ifp); 611 mtx_destroy(&sc->mtx); 612 613 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); 614 615 return (0); 616} 617 618/* Initialise interface and start it "RUNNING" */ 619static void 620nve_init(void *xsc) 621{ 622 struct nve_softc *sc = xsc; 623 624 NVE_LOCK(sc); 625 nve_init_locked(sc); 626 NVE_UNLOCK(sc); 627} 628 629static void 630nve_init_locked(struct nve_softc *sc) 631{ 632 struct ifnet *ifp; 633 int error; 634 635 NVE_LOCK_ASSERT(sc); 636 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); 637 638 ifp = sc->ifp; 639 640 /* Do nothing if already running */ 641 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 642 return; 643 644 nve_stop(sc); 645 DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); 646 647 nve_ifmedia_upd_locked(ifp); 648 649 /* Setup Hardware interface and allocate memory structures */ 650 error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 651 0, /* force speed */ 652 0, /* force full duplex */ 653 0, /* force mode */ 654 0, /* force async mode */ 655 &sc->linkup); 656 657 if (error) { 658 device_printf(sc->dev, 659 "failed to start NVIDIA Hardware interface\n"); 660 return; 661 } 662 /* Set the MAC address */ 663 sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IF_LLADDR(sc->ifp)); 664 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 665 sc->hwapi->pfnStart(sc->hwapi->pADCX); 666 667 /* Setup multicast filter */ 668 nve_setmulti(sc); 669 670 /* Update interface parameters */ 671 ifp->if_drv_flags |= IFF_DRV_RUNNING; 672 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 673 674 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 675 676 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); 677 678 return; 679} 680 681/* Stop interface activity ie. not "RUNNING" */ 682static void 683nve_stop(struct nve_softc *sc) 684{ 685 struct ifnet *ifp; 686 687 NVE_LOCK_ASSERT(sc); 688 689 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); 690 691 ifp = sc->ifp; 692 ifp->if_timer = 0; 693 694 /* Cancel tick timer */ 695 callout_stop(&sc->stat_callout); 696 697 /* Stop hardware activity */ 698 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 699 sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); 700 701 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); 702 /* Shutdown interface and deallocate memory buffers */ 703 if (sc->hwapi->pfnDeinit) 704 sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); 705 706 sc->linkup = 0; 707 sc->cur_rx = 0; 708 sc->pending_rxs = 0; 709 sc->pending_txs = 0; 710 711 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 712 713 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); 714 715 return; 716} 717 718/* Shutdown interface for unload/reboot */ 719static void 720nve_shutdown(device_t dev) 721{ 722 struct nve_softc *sc; 723 724 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); 725 726 sc = device_get_softc(dev); 727 728 /* Stop hardware activity */ 729 NVE_LOCK(sc); 730 nve_stop(sc); 731 NVE_UNLOCK(sc); 732} 733 734/* Allocate TX ring buffers */ 735static int 736nve_init_rings(struct nve_softc *sc) 737{ 738 int error, i; 739 740 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); 741 742 sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; 743 /* Initialise RX ring */ 744 for (i = 0; i < RX_RING_SIZE; i++) { 745 struct nve_rx_desc *desc = sc->rx_desc + i; 746 struct nve_map_buffer *buf = &desc->buf; 747 748 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 749 if (buf->mbuf == NULL) { 750 device_printf(sc->dev, "couldn't allocate mbuf\n"); 751 nve_free_rings(sc); 752 return (ENOBUFS); 753 } 754 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 755 m_adj(buf->mbuf, ETHER_ALIGN); 756 757 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 758 if (error) { 759 device_printf(sc->dev, "couldn't create dma map\n"); 760 nve_free_rings(sc); 761 return (error); 762 } 763 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 764 nve_dmamap_rx_cb, &desc->paddr, 0); 765 if (error) { 766 device_printf(sc->dev, "couldn't dma map mbuf\n"); 767 nve_free_rings(sc); 768 return (error); 769 } 770 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 771 772 desc->buflength = buf->mbuf->m_len; 773 desc->vaddr = mtod(buf->mbuf, caddr_t); 774 } 775 bus_dmamap_sync(sc->rtag, sc->rmap, 776 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 777 778 /* Initialize TX ring */ 779 for (i = 0; i < TX_RING_SIZE; i++) { 780 struct nve_tx_desc *desc = sc->tx_desc + i; 781 struct nve_map_buffer *buf = &desc->buf; 782 783 buf->mbuf = NULL; 784 785 error = bus_dmamap_create(sc->mtag, 0, &buf->map); 786 if (error) { 787 device_printf(sc->dev, "couldn't create dma map\n"); 788 nve_free_rings(sc); 789 return (error); 790 } 791 } 792 bus_dmamap_sync(sc->ttag, sc->tmap, 793 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 794 795 DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); 796 797 return (error); 798} 799 800/* Free the TX ring buffers */ 801static void 802nve_free_rings(struct nve_softc *sc) 803{ 804 int i; 805 806 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); 807 808 for (i = 0; i < RX_RING_SIZE; i++) { 809 struct nve_rx_desc *desc = sc->rx_desc + i; 810 struct nve_map_buffer *buf = &desc->buf; 811 812 if (buf->mbuf) { 813 bus_dmamap_unload(sc->mtag, buf->map); 814 bus_dmamap_destroy(sc->mtag, buf->map); 815 m_freem(buf->mbuf); 816 } 817 buf->mbuf = NULL; 818 } 819 820 for (i = 0; i < TX_RING_SIZE; i++) { 821 struct nve_tx_desc *desc = sc->tx_desc + i; 822 struct nve_map_buffer *buf = &desc->buf; 823 824 if (buf->mbuf) { 825 bus_dmamap_unload(sc->mtag, buf->map); 826 bus_dmamap_destroy(sc->mtag, buf->map); 827 m_freem(buf->mbuf); 828 } 829 buf->mbuf = NULL; 830 } 831 832 DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); 833} 834 835/* Main loop for sending packets from OS to interface */ 836static void 837nve_ifstart(struct ifnet *ifp) 838{ 839 struct nve_softc *sc = ifp->if_softc; 840 841 NVE_LOCK(sc); 842 nve_ifstart_locked(ifp); 843 NVE_UNLOCK(sc); 844} 845 846static void 847nve_ifstart_locked(struct ifnet *ifp) 848{ 849 struct nve_softc *sc = ifp->if_softc; 850 struct nve_map_buffer *buf; 851 struct mbuf *m0, *m; 852 struct nve_tx_desc *desc; 853 ADAPTER_WRITE_DATA txdata; 854 int error, i; 855 856 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); 857 858 NVE_LOCK_ASSERT(sc); 859 860 /* If link is down/busy or queue is empty do nothing */ 861 if (ifp->if_drv_flags & IFF_DRV_OACTIVE || 862 IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 863 return; 864 865 /* Transmit queued packets until sent or TX ring is full */ 866 while (sc->pending_txs < TX_RING_SIZE) { 867 desc = sc->tx_desc + sc->cur_tx; 868 buf = &desc->buf; 869 870 /* Get next packet to send. */ 871 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 872 873 /* If nothing to send, return. */ 874 if (m0 == NULL) 875 return; 876 877 /* 878 * On nForce4, the chip doesn't interrupt on transmit, 879 * so try to flush transmitted packets from the queue 880 * if it's getting large (see note in nve_watchdog). 881 */ 882 if (sc->pending_txs > TX_RING_SIZE/2) { 883 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 884 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 885 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 886 } 887 888 /* Map MBUF for DMA access */ 889 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, 890 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 891 892 if (error && error != EFBIG) { 893 m_freem(m0); 894 sc->tx_errors++; 895 continue; 896 } 897 /* 898 * Packet has too many fragments - defrag into new mbuf 899 * cluster 900 */ 901 if (error) { 902 m = m_defrag(m0, M_DONTWAIT); 903 if (m == NULL) { 904 m_freem(m0); 905 sc->tx_errors++; 906 continue; 907 } 908 m0 = m; 909 910 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, 911 nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); 912 if (error) { 913 m_freem(m); 914 sc->tx_errors++; 915 continue; 916 } 917 } 918 /* Do sync on DMA bounce buffer */ 919 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); 920 921 buf->mbuf = m0; 922 txdata.ulNumberOfElements = desc->numfrags; 923 txdata.pvID = (PVOID)desc; 924 925 /* Put fragments into API element list */ 926 txdata.ulTotalLength = buf->mbuf->m_len; 927 for (i = 0; i < desc->numfrags; i++) { 928 txdata.sElement[i].ulLength = 929 (ulong)desc->frags[i].ds_len; 930 txdata.sElement[i].pPhysical = 931 (PVOID)desc->frags[i].ds_addr; 932 } 933 934 /* Send packet to Nvidia API for transmission */ 935 error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); 936 937 switch (error) { 938 case ADAPTERERR_NONE: 939 /* Packet was queued in API TX queue successfully */ 940 sc->pending_txs++; 941 sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; 942 break; 943 944 case ADAPTERERR_TRANSMIT_QUEUE_FULL: 945 /* The API TX queue is full - requeue the packet */ 946 device_printf(sc->dev, 947 "nve_ifstart: transmit queue is full\n"); 948 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 949 bus_dmamap_unload(sc->mtag, buf->map); 950 IFQ_DRV_PREPEND(&ifp->if_snd, buf->mbuf); 951 buf->mbuf = NULL; 952 return; 953 954 default: 955 /* The API failed to queue/send the packet so dump it */ 956 device_printf(sc->dev, "nve_ifstart: transmit error\n"); 957 bus_dmamap_unload(sc->mtag, buf->map); 958 m_freem(buf->mbuf); 959 buf->mbuf = NULL; 960 sc->tx_errors++; 961 return; 962 } 963 /* Set watchdog timer. */ 964 ifp->if_timer = 8; 965 966 /* Copy packet to BPF tap */ 967 BPF_MTAP(ifp, m0); 968 } 969 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 970 971 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); 972} 973 974/* Handle IOCTL events */ 975static int 976nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 977{ 978 struct nve_softc *sc = ifp->if_softc; 979 struct ifreq *ifr = (struct ifreq *) data; 980 struct mii_data *mii; 981 int error = 0; 982 983 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); 984 985 switch (command) { 986 case SIOCSIFMTU: 987 /* Set MTU size */ 988 NVE_LOCK(sc); 989 if (ifp->if_mtu == ifr->ifr_mtu) { 990 NVE_UNLOCK(sc); 991 break; 992 } 993 if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { 994 ifp->if_mtu = ifr->ifr_mtu; 995 nve_stop(sc); 996 nve_init_locked(sc); 997 } else 998 error = EINVAL; 999 NVE_UNLOCK(sc); 1000 break; 1001 1002 case SIOCSIFFLAGS: 1003 /* Setup interface flags */ 1004 NVE_LOCK(sc); 1005 if (ifp->if_flags & IFF_UP) { 1006 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1007 nve_init_locked(sc); 1008 NVE_UNLOCK(sc); 1009 break; 1010 } 1011 } else { 1012 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1013 nve_stop(sc); 1014 NVE_UNLOCK(sc); 1015 break; 1016 } 1017 } 1018 /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ 1019 nve_setmulti(sc); 1020 NVE_UNLOCK(sc); 1021 break; 1022 1023 case SIOCADDMULTI: 1024 case SIOCDELMULTI: 1025 /* Setup multicast filter */ 1026 NVE_LOCK(sc); 1027 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1028 nve_setmulti(sc); 1029 } 1030 NVE_UNLOCK(sc); 1031 break; 1032 1033 case SIOCGIFMEDIA: 1034 case SIOCSIFMEDIA: 1035 /* Get/Set interface media parameters */ 1036 mii = device_get_softc(sc->miibus); 1037 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1038 break; 1039 1040 default: 1041 /* Everything else we forward to generic ether ioctl */ 1042 error = ether_ioctl(ifp, (int)command, data); 1043 break; 1044 } 1045 1046 DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); 1047 1048 return (error); 1049} 1050 1051/* Interrupt service routine */ 1052static void 1053nve_intr(void *arg) 1054{ 1055 struct nve_softc *sc = arg; 1056 struct ifnet *ifp = sc->ifp; 1057 1058 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); 1059 1060 NVE_LOCK(sc); 1061 if (!ifp->if_flags & IFF_UP) { 1062 nve_stop(sc); 1063 NVE_UNLOCK(sc); 1064 return; 1065 } 1066 /* Handle interrupt event */ 1067 if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { 1068 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1069 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1070 } 1071 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1072 nve_ifstart_locked(ifp); 1073 1074 /* If no pending packets we don't need a timeout */ 1075 if (sc->pending_txs == 0) 1076 sc->ifp->if_timer = 0; 1077 NVE_UNLOCK(sc); 1078 1079 DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); 1080 1081 return; 1082} 1083 1084/* Setup multicast filters */ 1085static void 1086nve_setmulti(struct nve_softc *sc) 1087{ 1088 struct ifnet *ifp; 1089 struct ifmultiaddr *ifma; 1090 PACKET_FILTER hwfilter; 1091 int i; 1092 u_int8_t andaddr[6], oraddr[6]; 1093 1094 NVE_LOCK_ASSERT(sc); 1095 1096 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); 1097 1098 ifp = sc->ifp; 1099 1100 /* Initialize filter */ 1101 hwfilter.ulFilterFlags = 0; 1102 for (i = 0; i < 6; i++) { 1103 hwfilter.acMulticastAddress[i] = 0; 1104 hwfilter.acMulticastMask[i] = 0; 1105 } 1106 1107 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1108 /* Accept all packets */ 1109 hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; 1110 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1111 return; 1112 } 1113 /* Setup multicast filter */ 1114 IF_ADDR_LOCK(ifp); 1115 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1116 u_char *addrp; 1117 1118 if (ifma->ifma_addr->sa_family != AF_LINK) 1119 continue; 1120 1121 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); 1122 for (i = 0; i < 6; i++) { 1123 u_int8_t mcaddr = addrp[i]; 1124 andaddr[i] &= mcaddr; 1125 oraddr[i] |= mcaddr; 1126 } 1127 } 1128 IF_ADDR_UNLOCK(ifp); 1129 for (i = 0; i < 6; i++) { 1130 hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; 1131 hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); 1132 } 1133 1134 /* Send filter to NVIDIA API */ 1135 sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); 1136 1137 DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); 1138 1139 return; 1140} 1141 1142/* Change the current media/mediaopts */ 1143static int 1144nve_ifmedia_upd(struct ifnet *ifp) 1145{ 1146 struct nve_softc *sc = ifp->if_softc; 1147 1148 NVE_LOCK(sc); 1149 nve_ifmedia_upd_locked(ifp); 1150 NVE_UNLOCK(sc); 1151 return (0); 1152} 1153 1154static void 1155nve_ifmedia_upd_locked(struct ifnet *ifp) 1156{ 1157 struct nve_softc *sc = ifp->if_softc; 1158 struct mii_data *mii; 1159 1160 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); 1161 1162 NVE_LOCK_ASSERT(sc); 1163 mii = device_get_softc(sc->miibus); 1164 1165 if (mii->mii_instance) { 1166 struct mii_softc *miisc; 1167 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1168 miisc = LIST_NEXT(miisc, mii_list)) { 1169 mii_phy_reset(miisc); 1170 } 1171 } 1172 mii_mediachg(mii); 1173} 1174 1175/* Update current miibus PHY status of media */ 1176static void 1177nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1178{ 1179 struct nve_softc *sc; 1180 struct mii_data *mii; 1181 1182 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); 1183 1184 sc = ifp->if_softc; 1185 NVE_LOCK(sc); 1186 mii = device_get_softc(sc->miibus); 1187 mii_pollstat(mii); 1188 NVE_UNLOCK(sc); 1189 1190 ifmr->ifm_active = mii->mii_media_active; 1191 ifmr->ifm_status = mii->mii_media_status; 1192 1193 return; 1194} 1195 1196/* miibus tick timer - maintain link status */ 1197static void 1198nve_tick(void *xsc) 1199{ 1200 struct nve_softc *sc = xsc; 1201 struct mii_data *mii; 1202 struct ifnet *ifp; 1203 1204 NVE_LOCK_ASSERT(sc); 1205 1206 ifp = sc->ifp; 1207 nve_update_stats(sc); 1208 1209 mii = device_get_softc(sc->miibus); 1210 mii_tick(mii); 1211 1212 if (mii->mii_media_status & IFM_ACTIVE && 1213 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1214 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1215 nve_ifstart_locked(ifp); 1216 } 1217 callout_reset(&sc->stat_callout, hz, nve_tick, sc); 1218 1219 return; 1220} 1221 1222/* Update ifnet data structure with collected interface stats from API */ 1223static void 1224nve_update_stats(struct nve_softc *sc) 1225{ 1226 struct ifnet *ifp = sc->ifp; 1227 ADAPTER_STATS stats; 1228 1229 NVE_LOCK_ASSERT(sc); 1230 1231 if (sc->hwapi) { 1232 sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); 1233 1234 ifp->if_ipackets = stats.ulSuccessfulReceptions; 1235 ifp->if_ierrors = stats.ulMissedFrames + 1236 stats.ulFailedReceptions + 1237 stats.ulCRCErrors + 1238 stats.ulFramingErrors + 1239 stats.ulOverFlowErrors; 1240 1241 ifp->if_opackets = stats.ulSuccessfulTransmissions; 1242 ifp->if_oerrors = sc->tx_errors + 1243 stats.ulFailedTransmissions + 1244 stats.ulRetryErrors + 1245 stats.ulUnderflowErrors + 1246 stats.ulLossOfCarrierErrors + 1247 stats.ulLateCollisionErrors; 1248 1249 ifp->if_collisions = stats.ulLateCollisionErrors; 1250 } 1251 1252 return; 1253} 1254 1255/* miibus Read PHY register wrapper - calls Nvidia API entry point */ 1256static int 1257nve_miibus_readreg(device_t dev, int phy, int reg) 1258{ 1259 struct nve_softc *sc = device_get_softc(dev); 1260 ULONG data; 1261 1262 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); 1263 1264 ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); 1265 1266 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); 1267 1268 return (data); 1269} 1270 1271/* miibus Write PHY register wrapper - calls Nvidia API entry point */ 1272static void 1273nve_miibus_writereg(device_t dev, int phy, int reg, int data) 1274{ 1275 struct nve_softc *sc = device_get_softc(dev); 1276 1277 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); 1278 1279 ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); 1280 1281 DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); 1282 1283 return; 1284} 1285 1286/* Watchdog timer to prevent PHY lockups */ 1287static void 1288nve_watchdog(struct ifnet *ifp) 1289{ 1290 struct nve_softc *sc = ifp->if_softc; 1291 int pending_txs_start; 1292 1293 NVE_LOCK(sc); 1294 1295 /* 1296 * The nvidia driver blob defers tx completion notifications. 1297 * Thus, sometimes the watchdog timer will go off when the 1298 * tx engine is fine, but the tx completions are just deferred. 1299 * Try kicking the driver blob to clear out any pending tx 1300 * completions. If that clears up any of the pending tx 1301 * operations, then just return without printing the warning 1302 * message or resetting the adapter, as we can then conclude 1303 * the chip hasn't actually crashed (it's still sending packets). 1304 */ 1305 pending_txs_start = sc->pending_txs; 1306 sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); 1307 sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); 1308 sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); 1309 if (sc->pending_txs < pending_txs_start) { 1310 NVE_UNLOCK(sc); 1311 return; 1312 } 1313 1314 device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); 1315 1316 sc->tx_errors++; 1317 1318 nve_stop(sc); 1319 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1320 nve_init_locked(sc); 1321 1322 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1323 nve_ifstart_locked(ifp); 1324 NVE_UNLOCK(sc); 1325 1326 return; 1327} 1328 1329/* --- Start of NVOSAPI interface --- */ 1330 1331/* Allocate DMA enabled general use memory for API */ 1332static NV_SINT32 1333nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) 1334{ 1335 struct nve_softc *sc; 1336 bus_addr_t mem_physical; 1337 1338 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); 1339 1340 sc = (struct nve_softc *)ctx; 1341 1342 mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, 1343 M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 1344 1345 if (!mem->pLogical) { 1346 device_printf(sc->dev, "memory allocation failed\n"); 1347 return (0); 1348 } 1349 memset(mem->pLogical, 0, (ulong)mem->uiLength); 1350 mem_physical = vtophys(mem->pLogical); 1351 mem->pPhysical = (PVOID)mem_physical; 1352 1353 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", 1354 (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); 1355 1356 return (1); 1357} 1358 1359/* Free allocated memory */ 1360static NV_SINT32 1361nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) 1362{ 1363 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", 1364 (uint)mem->pLogical, (uint) mem->uiLength); 1365 1366 contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); 1367 return (1); 1368} 1369 1370/* Copied directly from nvnet.c */ 1371static NV_SINT32 1372nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1373{ 1374 MEMORY_BLOCK mem_block; 1375 1376 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); 1377 1378 mem_block_ex->pLogical = NULL; 1379 mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; 1380 1381 if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && 1382 (mem_block_ex->AlignmentSize > 1)) { 1383 DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", 1384 mem_block_ex->AlignmentSize); 1385 mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; 1386 } 1387 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1388 1389 if (nve_osalloc(ctx, &mem_block) == 0) { 1390 return (0); 1391 } 1392 mem_block_ex->pLogicalOrig = mem_block.pLogical; 1393 mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; 1394 mem_block_ex->pPhysicalOrigHigh = 0; 1395 1396 mem_block_ex->pPhysical = mem_block.pPhysical; 1397 mem_block_ex->pLogical = mem_block.pLogical; 1398 1399 if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { 1400 unsigned int offset; 1401 offset = mem_block_ex->pPhysicalOrigLow & 1402 (mem_block_ex->AlignmentSize - 1); 1403 1404 if (offset) { 1405 mem_block_ex->pPhysical = 1406 (PVOID)((ulong)mem_block_ex->pPhysical + 1407 mem_block_ex->AlignmentSize - offset); 1408 mem_block_ex->pLogical = 1409 (PVOID)((ulong)mem_block_ex->pLogical + 1410 mem_block_ex->AlignmentSize - offset); 1411 } /* if (offset) */ 1412 } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ 1413 return (1); 1414} 1415 1416/* Copied directly from nvnet.c */ 1417static NV_SINT32 1418nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) 1419{ 1420 MEMORY_BLOCK mem_block; 1421 1422 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); 1423 1424 mem_block.pLogical = mem_block_ex->pLogicalOrig; 1425 mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); 1426 mem_block.uiLength = mem_block_ex->uiLengthOrig; 1427 1428 return (nve_osfree(ctx, &mem_block)); 1429} 1430 1431/* Clear memory region */ 1432static NV_SINT32 1433nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) 1434{ 1435 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); 1436 memset(mem, 0, length); 1437 return (1); 1438} 1439 1440/* Sleep for a tick */ 1441static NV_SINT32 1442nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) 1443{ 1444 DELAY(usec); 1445 return (1); 1446} 1447 1448/* Allocate memory for rx buffer */ 1449static NV_SINT32 1450nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) 1451{ 1452 struct nve_softc *sc = ctx; 1453 struct nve_rx_desc *desc; 1454 struct nve_map_buffer *buf; 1455 int error; 1456 1457 if (device_is_attached(sc->dev)) 1458 NVE_LOCK_ASSERT(sc); 1459 1460 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); 1461 1462 if (sc->pending_rxs == RX_RING_SIZE) { 1463 device_printf(sc->dev, "rx ring buffer is full\n"); 1464 goto fail; 1465 } 1466 desc = sc->rx_desc + sc->cur_rx; 1467 buf = &desc->buf; 1468 1469 if (buf->mbuf == NULL) { 1470 buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1471 if (buf->mbuf == NULL) { 1472 device_printf(sc->dev, "failed to allocate memory\n"); 1473 goto fail; 1474 } 1475 buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; 1476 m_adj(buf->mbuf, ETHER_ALIGN); 1477 1478 error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, 1479 nve_dmamap_rx_cb, &desc->paddr, 0); 1480 if (error) { 1481 device_printf(sc->dev, "failed to dmamap mbuf\n"); 1482 m_freem(buf->mbuf); 1483 buf->mbuf = NULL; 1484 goto fail; 1485 } 1486 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); 1487 desc->buflength = buf->mbuf->m_len; 1488 desc->vaddr = mtod(buf->mbuf, caddr_t); 1489 } 1490 sc->pending_rxs++; 1491 sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; 1492 1493 mem->pLogical = (void *)desc->vaddr; 1494 mem->pPhysical = (void *)desc->paddr; 1495 mem->uiLength = desc->buflength; 1496 *id = (void *)desc; 1497 1498 return (1); 1499 1500fail: 1501 return (0); 1502} 1503 1504/* Free the rx buffer */ 1505static NV_SINT32 1506nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) 1507{ 1508 struct nve_softc *sc = ctx; 1509 struct nve_rx_desc *desc; 1510 struct nve_map_buffer *buf; 1511 1512 DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); 1513 1514 desc = (struct nve_rx_desc *) id; 1515 buf = &desc->buf; 1516 1517 if (buf->mbuf) { 1518 bus_dmamap_unload(sc->mtag, buf->map); 1519 bus_dmamap_destroy(sc->mtag, buf->map); 1520 m_freem(buf->mbuf); 1521 } 1522 sc->pending_rxs--; 1523 buf->mbuf = NULL; 1524 1525 return (1); 1526} 1527 1528/* This gets called by the Nvidia API after our TX packet has been sent */ 1529static NV_SINT32 1530nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) 1531{ 1532 struct nve_softc *sc = ctx; 1533 struct nve_map_buffer *buf; 1534 struct nve_tx_desc *desc = (struct nve_tx_desc *) id; 1535 struct ifnet *ifp; 1536 1537 NVE_LOCK_ASSERT(sc); 1538 1539 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); 1540 1541 ifp = sc->ifp; 1542 buf = &desc->buf; 1543 sc->pending_txs--; 1544 1545 /* Unload and free mbuf cluster */ 1546 if (buf->mbuf == NULL) 1547 goto fail; 1548 1549 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); 1550 bus_dmamap_unload(sc->mtag, buf->map); 1551 m_freem(buf->mbuf); 1552 buf->mbuf = NULL; 1553 1554 /* Send more packets if we have them */ 1555 if (sc->pending_txs < TX_RING_SIZE) 1556 sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1557 1558 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->pending_txs < TX_RING_SIZE) 1559 nve_ifstart_locked(ifp); 1560 1561fail: 1562 1563 return (1); 1564} 1565 1566/* This gets called by the Nvidia API when a new packet has been received */ 1567/* XXX What is newbuf used for? XXX */ 1568static NV_SINT32 1569nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, 1570 NV_UINT8 priority) 1571{ 1572 struct nve_softc *sc = ctx; 1573 struct ifnet *ifp; 1574 struct nve_rx_desc *desc; 1575 struct nve_map_buffer *buf; 1576 ADAPTER_READ_DATA *readdata; 1577 struct mbuf *m; 1578 1579 NVE_LOCK_ASSERT(sc); 1580 1581 DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); 1582 1583 ifp = sc->ifp; 1584 1585 readdata = (ADAPTER_READ_DATA *) data; 1586 desc = readdata->pvID; 1587 buf = &desc->buf; 1588 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1589 1590 if (success) { 1591 /* Sync DMA bounce buffer. */ 1592 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1593 1594 /* First mbuf in packet holds the ethernet and packet headers */ 1595 buf->mbuf->m_pkthdr.rcvif = ifp; 1596 buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = 1597 readdata->ulTotalLength; 1598 1599 bus_dmamap_unload(sc->mtag, buf->map); 1600 1601 /* Blat the mbuf pointer, kernel will free the mbuf cluster */ 1602 m = buf->mbuf; 1603 buf->mbuf = NULL; 1604 1605 /* Give mbuf to OS. */ 1606 NVE_UNLOCK(sc); 1607 (*ifp->if_input)(ifp, m); 1608 NVE_LOCK(sc); 1609 if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) 1610 ifp->if_imcasts++; 1611 1612 } else { 1613 bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); 1614 bus_dmamap_unload(sc->mtag, buf->map); 1615 m_freem(buf->mbuf); 1616 buf->mbuf = NULL; 1617 } 1618 1619 sc->cur_rx = desc - sc->rx_desc; 1620 sc->pending_rxs--; 1621 1622 return (1); 1623} 1624 1625/* This gets called by NVIDIA API when the PHY link state changes */ 1626static NV_SINT32 1627nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) 1628{ 1629 1630 DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); 1631 1632 return (1); 1633} 1634 1635/* Setup a watchdog timer */ 1636static NV_SINT32 1637nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) 1638{ 1639 struct nve_softc *sc = (struct nve_softc *)ctx; 1640 1641 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); 1642 1643 callout_init(&sc->ostimer, CALLOUT_MPSAFE); 1644 *timer = &sc->ostimer; 1645 1646 return (1); 1647} 1648 1649/* Free the timer */ 1650static NV_SINT32 1651nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) 1652{ 1653 1654 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); 1655 1656 callout_drain((struct callout *)timer); 1657 1658 return (1); 1659} 1660 1661/* Setup timer parameters */ 1662static NV_SINT32 1663nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) 1664{ 1665 struct nve_softc *sc = (struct nve_softc *)ctx; 1666 1667 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); 1668 1669 sc->ostimer_func = func; 1670 sc->ostimer_params = parameters; 1671 1672 return (1); 1673} 1674 1675/* Set the timer to go off */ 1676static NV_SINT32 1677nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) 1678{ 1679 struct nve_softc *sc = ctx; 1680 1681 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); 1682 1683 callout_reset((struct callout *)timer, delay, sc->ostimer_func, 1684 sc->ostimer_params); 1685 1686 return (1); 1687} 1688 1689/* Cancel the timer */ 1690static NV_SINT32 1691nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) 1692{ 1693 1694 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); 1695 1696 callout_stop((struct callout *)timer); 1697 1698 return (1); 1699} 1700 1701static NV_SINT32 1702nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, 1703 NV_UINT8 *newbuffer, NV_UINT8 priority) 1704{ 1705 1706 /* Not implemented */ 1707 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1708 1709 return (1); 1710} 1711 1712static PNV_VOID 1713nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) 1714{ 1715 1716 /* Not implemented */ 1717 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); 1718 1719 return (NULL); 1720} 1721 1722static NV_SINT32 1723nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) 1724{ 1725 1726 /* Not implemented */ 1727 DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); 1728 1729 return (1); 1730} 1731 1732/* Allocate mutex context (already done in nve_attach) */ 1733static NV_SINT32 1734nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) 1735{ 1736 struct nve_softc *sc = (struct nve_softc *)ctx; 1737 1738 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); 1739 1740 *pLock = (void **)sc; 1741 1742 return (1); 1743} 1744 1745/* Obtain a spin lock */ 1746static NV_SINT32 1747nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1748{ 1749 1750 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); 1751 1752 return (1); 1753} 1754 1755/* Release lock */ 1756static NV_SINT32 1757nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) 1758{ 1759 1760 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); 1761 1762 return (1); 1763} 1764 1765/* I have no idea what this is for */ 1766static PNV_VOID 1767nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) 1768{ 1769 1770 /* Not implemented */ 1771 DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); 1772 panic("nve: nve_osreturnbufvirtual not implemented\n"); 1773 1774 return (NULL); 1775} 1776 1777/* --- End on NVOSAPI interface --- */ 1778