if_nve.c revision 143442
1/*
2 * Copyright (c) 2003,2004 by Quinton Dolan <q@onthenet.com.au>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $
27 */
28
29/*
30 * NVIDIA nForce MCP Networking Adapter driver
31 *
32 * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA
33 * through their web site.
34 *
35 * All mainstream nForce and nForce2 motherboards are supported. This module
36 * is as stable, sometimes more stable, than the linux version. (Recent
37 * Linux stability issues seem to be related to some issues with newer
38 * distributions using GCC 3.x, however this don't appear to effect FreeBSD
39 * 5.x).
40 *
41 * In accordance with the NVIDIA distribution license it is necessary to
42 * link this module against the nvlibnet.o binary object included in the
43 * Linux driver source distribution. The binary component is not modified in
44 * any way and is simply linked against a FreeBSD equivalent of the nvnet.c
45 * linux kernel module "wrapper".
46 *
47 * The Linux driver uses a common code API that is shared between Win32 and
48 * i386 Linux. This abstracts the low level driver functions and uses
49 * callbacks and hooks to access the underlying hardware device. By using
50 * this same API in a FreeBSD kernel module it is possible to support the
51 * hardware without breaching the Linux source distributions licensing
52 * requirements, or obtaining the hardware programming specifications.
53 *
54 * Although not conventional, it works, and given the relatively small
55 * amount of hardware centric code, it's hopefully no more buggy than its
56 * linux counterpart.
57 *
58 * NVIDIA now suppport the nForce3 AMD64 platform, however I have been
59 * unable to access such a system to verify support. However, the code is
60 * reported to work with little modification when compiled with the AMD64
61 * version of the NVIDIA Linux library. All that should be necessary to make
62 * the driver work is to link it directly into the kernel, instead of as a
63 * module, and apply the docs/amd64.diff patch in this source distribution to
64 * the NVIDIA Linux driver source.
65 *
66 * This driver should work on all versions of FreeBSD since 4.9/5.1 as well
67 * as recent versions of DragonFly.
68 *
69 * Written by Quinton Dolan <q@onthenet.com.au>
70 * Portions based on existing FreeBSD network drivers.
71 * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files.
72 *
73 */
74
75#include <sys/cdefs.h>
76__FBSDID("$FreeBSD: head/sys/dev/nve/if_nve.c 143442 2005-03-12 00:29:30Z obrien $");
77
78#include <sys/param.h>
79#include <sys/systm.h>
80#include <sys/sockio.h>
81#include <sys/mbuf.h>
82#include <sys/malloc.h>
83#include <sys/kernel.h>
84#include <sys/socket.h>
85#include <sys/sysctl.h>
86#include <sys/queue.h>
87#include <sys/module.h>
88
89#include <net/if.h>
90#include <net/if_arp.h>
91#include <net/ethernet.h>
92#include <net/if_dl.h>
93#include <net/if_media.h>
94#include <net/bpf.h>
95#include <net/if_vlan_var.h>
96
97#include <machine/bus_memio.h>
98#include <machine/bus.h>
99#include <machine/resource.h>
100
101#include <vm/vm.h>		/* for vtophys */
102#include <vm/pmap.h>		/* for vtophys */
103#include <machine/clock.h>	/* for DELAY */
104#include <sys/bus.h>
105#include <sys/rman.h>
106
107#include <dev/pci/pcireg.h>
108#include <dev/pci/pcivar.h>
109#include <dev/mii/mii.h>
110#include <dev/mii/miivar.h>
111#include "miibus_if.h"
112
113/* Include NVIDIA Linux driver header files */
114#define	linux
115#include <contrib/dev/nve/basetype.h>
116#include <contrib/dev/nve/phy.h>
117#include "os+%DIKED-nve.h"
118#include <contrib/dev/nve/drvinfo.h>
119#include <contrib/dev/nve/adapter.h>
120#undef linux
121
122#include <dev/nve/if_nvereg.h>
123
124MODULE_DEPEND(nve, pci, 1, 1, 1);
125MODULE_DEPEND(nve, ether, 1, 1, 1);
126MODULE_DEPEND(nve, miibus, 1, 1, 1);
127
128static int      nve_probe(device_t);
129static int      nve_attach(device_t);
130static int      nve_detach(device_t);
131static void     nve_init(void *);
132static void     nve_stop(struct nve_softc *);
133static void     nve_shutdown(device_t);
134static int      nve_init_rings(struct nve_softc *);
135static void     nve_free_rings(struct nve_softc *);
136
137static void     nve_ifstart(struct ifnet *);
138static int      nve_ioctl(struct ifnet *, u_long, caddr_t);
139static void     nve_intr(void *);
140static void     nve_tick(void *);
141static void     nve_setmulti(struct nve_softc *);
142static void     nve_watchdog(struct ifnet *);
143static void     nve_update_stats(struct nve_softc *);
144
145static int      nve_ifmedia_upd(struct ifnet *);
146static void     nve_ifmedia_sts(struct ifnet *, struct ifmediareq *);
147static int      nve_miibus_readreg(device_t, int, int);
148static void     nve_miibus_writereg(device_t, int, int, int);
149
150static void     nve_dmamap_cb(void *, bus_dma_segment_t *, int, int);
151static void     nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int);
152
153static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK);
154static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK);
155static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX);
156static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX);
157static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32);
158static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32);
159static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *);
160static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID);
161static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32);
162static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8);
163static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32);
164static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *);
165static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID);
166static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID);
167static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32);
168static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID);
169
170static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8);
171static PNV_VOID  nve_ospreprocpktnopq(PNV_VOID, PNV_VOID);
172static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32);
173static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *);
174static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID);
175static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID);
176static PNV_VOID  nve_osreturnbufvirt(PNV_VOID, PNV_VOID);
177
178static device_method_t nve_methods[] = {
179	/* Device interface */
180	DEVMETHOD(device_probe, nve_probe),
181	DEVMETHOD(device_attach, nve_attach),
182	DEVMETHOD(device_detach, nve_detach),
183	DEVMETHOD(device_shutdown, nve_shutdown),
184
185	/* Bus interface */
186	DEVMETHOD(bus_print_child, bus_generic_print_child),
187	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
188
189	/* MII interface */
190	DEVMETHOD(miibus_readreg, nve_miibus_readreg),
191	DEVMETHOD(miibus_writereg, nve_miibus_writereg),
192
193	{0, 0}
194};
195
196static driver_t nve_driver = {
197	"nv",
198	nve_methods,
199	sizeof(struct nve_softc)
200};
201
202static devclass_t nve_devclass;
203
204static int      nve_pollinterval = 0;
205SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW,
206	   &nve_pollinterval, 0, "delay between interface polls");
207
208DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0);
209DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0);
210
211static struct nve_type nve_devs[] = {
212	{NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID,
213	"NVIDIA nForce MCP Networking Adapter"},
214	{NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID,
215	"NVIDIA nForce MCP2 Networking Adapter"},
216	{NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID,
217	"NVIDIA nForce MCP3 Networking Adapter"},
218	{0, 0, NULL}
219};
220
221/* DMA MEM map callback function to get data segment physical address */
222static void
223nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error)
224{
225	if (error)
226		return;
227
228	KASSERT(nsegs == 1,
229	    ("Too many DMA segments returned when mapping DMA memory"));
230	*(bus_addr_t *)arg = segs->ds_addr;
231}
232
233/* DMA RX map callback function to get data segment physical address */
234static void
235nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs,
236    bus_size_t mapsize, int error)
237{
238	if (error)
239		return;
240	*(bus_addr_t *)arg = segs->ds_addr;
241}
242
243/*
244 * DMA TX buffer callback function to allocate fragment data segment
245 * addresses
246 */
247static void
248nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error)
249{
250	struct nve_tx_desc *info;
251
252	info = arg;
253	if (error)
254		return;
255	KASSERT(nsegs < NV_MAX_FRAGS,
256	    ("Too many DMA segments returned when mapping mbuf"));
257	info->numfrags = nsegs;
258	bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t));
259}
260
261/* Probe for supported hardware ID's */
262static int
263nve_probe(device_t dev)
264{
265	struct nve_type *t;
266
267	t = nve_devs;
268	/* Check for matching PCI DEVICE ID's */
269	while (t->name != NULL) {
270		if ((pci_get_vendor(dev) == t->vid_id) &&
271		    (pci_get_device(dev) == t->dev_id)) {
272			device_set_desc(dev, t->name);
273			return (0);
274		}
275		t++;
276	}
277
278	return (ENXIO);
279}
280
281/* Attach driver and initialise hardware for use */
282static int
283nve_attach(device_t dev)
284{
285	u_char			eaddr[ETHER_ADDR_LEN];
286	struct nve_softc	*sc;
287	struct ifnet		*ifp;
288	OS_API			*osapi;
289	ADAPTER_OPEN_PARAMS	OpenParams;
290	int			error = 0, i, rid, unit;
291
292	DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n");
293
294	sc = device_get_softc(dev);
295	unit = device_get_unit(dev);
296
297	/* Allocate mutex */
298	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
299	    MTX_DEF | MTX_RECURSE);
300	mtx_init(&sc->osmtx, device_get_nameunit(dev), NULL, MTX_SPIN);
301
302	sc->dev = dev;
303	sc->unit = unit;
304
305	/* Preinitialize data structures */
306	bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS));
307
308	/* Enable bus mastering */
309	pci_enable_busmaster(dev);
310
311	/* Allocate memory mapped address space */
312	rid = NV_RID;
313	sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1,
314	    RF_ACTIVE);
315
316	if (sc->res == NULL) {
317		device_printf(dev, "couldn't map memory\n");
318		error = ENXIO;
319		goto fail;
320	}
321	sc->sc_st = rman_get_bustag(sc->res);
322	sc->sc_sh = rman_get_bushandle(sc->res);
323
324	/* Allocate interrupt */
325	rid = 0;
326	sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
327	    RF_SHAREABLE | RF_ACTIVE);
328
329	if (sc->irq == NULL) {
330		device_printf(dev, "couldn't map interrupt\n");
331		error = ENXIO;
332		goto fail;
333	}
334	/* Allocate DMA tags */
335	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
336		     BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS,
337				   NV_MAX_FRAGS, MCLBYTES, 0,
338				   busdma_lock_mutex, &Giant,
339				   &sc->mtag);
340	if (error) {
341		device_printf(dev, "couldn't allocate dma tag\n");
342		goto fail;
343	}
344	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
345	    BUS_SPACE_MAXADDR, NULL, NULL,
346	    sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1,
347	    sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0,
348	    busdma_lock_mutex, &Giant,
349	    &sc->rtag);
350	if (error) {
351		device_printf(dev, "couldn't allocate dma tag\n");
352		goto fail;
353	}
354	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
355	    BUS_SPACE_MAXADDR, NULL, NULL,
356	    sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1,
357	    sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0,
358	    busdma_lock_mutex, &Giant,
359	    &sc->ttag);
360	if (error) {
361		device_printf(dev, "couldn't allocate dma tag\n");
362		goto fail;
363	}
364	/* Allocate DMA safe memory and get the DMA addresses. */
365	error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc,
366	    BUS_DMA_WAITOK, &sc->tmap);
367	if (error) {
368		device_printf(dev, "couldn't allocate dma memory\n");
369		goto fail;
370	}
371	bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE);
372	error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc,
373		    sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb,
374		    &sc->tx_addr, 0);
375	if (error) {
376		device_printf(dev, "couldn't map dma memory\n");
377		goto fail;
378	}
379	error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc,
380	    BUS_DMA_WAITOK, &sc->rmap);
381	if (error) {
382		device_printf(dev, "couldn't allocate dma memory\n");
383		goto fail;
384	}
385	bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE);
386	error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc,
387	    sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb,
388	    &sc->rx_addr, 0);
389	if (error) {
390		device_printf(dev, "couldn't map dma memory\n");
391		goto fail;
392	}
393	/* Initialize rings. */
394	if (nve_init_rings(sc)) {
395		device_printf(dev, "failed to init rings\n");
396		error = ENXIO;
397		goto fail;
398	}
399	/* Setup NVIDIA API callback routines */
400	osapi				= &sc->osapi;
401	osapi->pOSCX			= sc;
402	osapi->pfnAllocMemory		= nve_osalloc;
403	osapi->pfnFreeMemory		= nve_osfree;
404	osapi->pfnAllocMemoryEx		= nve_osallocex;
405	osapi->pfnFreeMemoryEx		= nve_osfreeex;
406	osapi->pfnClearMemory		= nve_osclear;
407	osapi->pfnStallExecution	= nve_osdelay;
408	osapi->pfnAllocReceiveBuffer	= nve_osallocrxbuf;
409	osapi->pfnFreeReceiveBuffer	= nve_osfreerxbuf;
410	osapi->pfnPacketWasSent		= nve_ospackettx;
411	osapi->pfnPacketWasReceived	= nve_ospacketrx;
412	osapi->pfnLinkStateHasChanged	= nve_oslinkchg;
413	osapi->pfnAllocTimer		= nve_osalloctimer;
414	osapi->pfnFreeTimer		= nve_osfreetimer;
415	osapi->pfnInitializeTimer	= nve_osinittimer;
416	osapi->pfnSetTimer		= nve_ossettimer;
417	osapi->pfnCancelTimer		= nve_oscanceltimer;
418	osapi->pfnPreprocessPacket	= nve_ospreprocpkt;
419	osapi->pfnPreprocessPacketNopq	= nve_ospreprocpktnopq;
420	osapi->pfnIndicatePackets	= nve_osindicatepkt;
421	osapi->pfnLockAlloc		= nve_oslockalloc;
422	osapi->pfnLockAcquire		= nve_oslockacquire;
423	osapi->pfnLockRelease		= nve_oslockrelease;
424	osapi->pfnReturnBufferVirtual	= nve_osreturnbufvirt;
425
426	sc->linkup = FALSE;
427	sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN;
428
429	/* TODO - We don't support hardware offload yet */
430	sc->hwmode = 1;
431	sc->media = 0;
432
433	/* Set NVIDIA API startup parameters */
434	OpenParams.MaxDpcLoop = 2;
435	OpenParams.MaxRxPkt = RX_RING_SIZE;
436	OpenParams.MaxTxPkt = TX_RING_SIZE;
437	OpenParams.SentPacketStatusSuccess = 1;
438	OpenParams.SentPacketStatusFailure = 0;
439	OpenParams.MaxRxPktToAccumulate = 6;
440	OpenParams.ulPollInterval = nve_pollinterval;
441	OpenParams.SetForcedModeEveryNthRxPacket = 0;
442	OpenParams.SetForcedModeEveryNthTxPacket = 0;
443	OpenParams.RxForcedInterrupt = 0;
444	OpenParams.TxForcedInterrupt = 0;
445	OpenParams.pOSApi = osapi;
446	OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res);
447	OpenParams.bASFEnabled = 0;
448	OpenParams.ulDescriptorVersion = sc->hwmode;
449	OpenParams.ulMaxPacketSize = sc->max_frame_size;
450	OpenParams.DeviceId = pci_get_device(dev);
451
452	/* Open NVIDIA Hardware API */
453	error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr);
454	if (error) {
455		device_printf(dev,
456		    "failed to open NVIDIA Hardware API: 0x%x\n", error);
457		goto fail;
458	}
459
460	/* TODO - Add support for MODE2 hardware offload */
461
462	bzero(&sc->adapterdata, sizeof(sc->adapterdata));
463
464	sc->adapterdata.ulMediaIF = sc->media;
465	sc->adapterdata.ulModeRegTxReadCompleteEnable = 1;
466	sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata);
467
468	/* MAC is loaded backwards into h/w reg */
469	sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr);
470	for (i = 0; i < 6; i++) {
471		eaddr[i] = sc->original_mac_addr[5 - i];
472	}
473	sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr);
474	bcopy(eaddr, (char *)&sc->sc_macaddr, ETHER_ADDR_LEN);
475
476	/* Display ethernet address ,... */
477	device_printf(dev, "Ethernet address %6D\n", sc->sc_macaddr, ":");
478
479	DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n");
480
481	/* Probe device for MII interface to PHY */
482	if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) {
483		device_printf(dev, "MII without any phy!\n");
484		error = ENXIO;
485		goto fail;
486	}
487	/* Setup interface parameters */
488	ifp = &sc->sc_if;
489	ifp->if_softc = sc;
490	if_initname(ifp, "nve", unit);
491	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
492	ifp->if_ioctl = nve_ioctl;
493	ifp->if_output = ether_output;
494	ifp->if_start = nve_ifstart;
495	ifp->if_watchdog = nve_watchdog;
496	ifp->if_timer = 0;
497	ifp->if_init = nve_init;
498	ifp->if_mtu = ETHERMTU;
499	ifp->if_baudrate = IF_Mbps(100);
500	ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1;
501	ifp->if_capabilities |= IFCAP_VLAN_MTU;
502
503	/* Attach to OS's managers. */
504	ether_ifattach(ifp, sc->sc_macaddr);
505	callout_handle_init(&sc->stat_ch);
506
507	/* Activate our interrupt handler. - attach last to avoid lock */
508	error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET, nve_intr,
509	    sc, &sc->sc_ih);
510	if (error) {
511		device_printf(sc->dev, "couldn't set up interrupt handler\n");
512		goto fail;
513	}
514	DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n");
515
516fail:
517	if (error)
518		nve_detach(dev);
519
520	return (error);
521}
522
523/* Detach interface for module unload */
524static int
525nve_detach(device_t dev)
526{
527	struct nve_softc *sc = device_get_softc(dev);
528	struct ifnet *ifp;
529
530	KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized"));
531	NVE_LOCK(sc);
532
533	DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n");
534
535	ifp = &sc->arpcom.ac_if;
536
537	if (device_is_attached(dev)) {
538		nve_stop(sc);
539		ether_ifdetach(ifp);
540	}
541
542	if (sc->miibus)
543		device_delete_child(dev, sc->miibus);
544	bus_generic_detach(dev);
545
546	/* Reload unreversed address back into MAC in original state */
547	if (sc->original_mac_addr)
548		sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX,
549		    sc->original_mac_addr);
550
551	DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n");
552	/* Detach from NVIDIA hardware API */
553	if (sc->hwapi->pfnClose)
554		sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE);
555	/* Release resources */
556	if (sc->sc_ih)
557		bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih);
558	if (sc->irq)
559		bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
560	if (sc->res)
561		bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res);
562
563	nve_free_rings(sc);
564
565	if (sc->tx_desc) {
566		bus_dmamap_unload(sc->rtag, sc->rmap);
567		bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap);
568		bus_dmamap_destroy(sc->rtag, sc->rmap);
569	}
570	if (sc->mtag)
571		bus_dma_tag_destroy(sc->mtag);
572	if (sc->ttag)
573		bus_dma_tag_destroy(sc->ttag);
574	if (sc->rtag)
575		bus_dma_tag_destroy(sc->rtag);
576
577	NVE_UNLOCK(sc);
578	mtx_destroy(&sc->mtx);
579	mtx_destroy(&sc->osmtx);
580
581	DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n");
582
583	return (0);
584}
585
586/* Initialise interface and start it "RUNNING" */
587static void
588nve_init(void *xsc)
589{
590	struct nve_softc *sc = xsc;
591	struct ifnet *ifp;
592	int error;
593
594	NVE_LOCK(sc);
595	DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup);
596
597	ifp = &sc->sc_if;
598
599	/* Do nothing if already running */
600	if (ifp->if_flags & IFF_RUNNING)
601		goto fail;
602
603	nve_stop(sc);
604	DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n");
605
606	/* Setup Hardware interface and allocate memory structures */
607	error = sc->hwapi->pfnInit(sc->hwapi->pADCX,
608	    0, /* force speed */
609	    0, /* force full duplex */
610	    0, /* force mode */
611	    0, /* force async mode */
612	    &sc->linkup);
613
614	if (error) {
615		device_printf(sc->dev,
616		    "failed to start NVIDIA Hardware interface\n");
617		goto fail;
618	}
619	/* Set the MAC address */
620	sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, sc->sc_macaddr);
621	sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
622	sc->hwapi->pfnStart(sc->hwapi->pADCX);
623
624	/* Setup multicast filter */
625	nve_setmulti(sc);
626	nve_ifmedia_upd(ifp);
627
628	/* Update interface parameters */
629	ifp->if_flags |= IFF_RUNNING;
630	ifp->if_flags &= ~IFF_OACTIVE;
631
632	sc->stat_ch = timeout(nve_tick, sc, hz);
633
634	DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n");
635
636fail:
637	NVE_UNLOCK(sc);
638
639	return;
640}
641
642/* Stop interface activity ie. not "RUNNING" */
643static void
644nve_stop(struct nve_softc *sc)
645{
646	struct ifnet *ifp;
647
648	NVE_LOCK(sc);
649
650	DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n");
651
652	ifp = &sc->sc_if;
653	ifp->if_timer = 0;
654
655	/* Cancel tick timer */
656	untimeout(nve_tick, sc, sc->stat_ch);
657
658	/* Stop hardware activity */
659	sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX);
660	sc->hwapi->pfnStop(sc->hwapi->pADCX, 0);
661
662	DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n");
663	/* Shutdown interface and deallocate memory buffers */
664	if (sc->hwapi->pfnDeinit)
665		sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0);
666
667	sc->linkup = 0;
668	sc->cur_rx = 0;
669	sc->pending_rxs = 0;
670
671	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
672
673	DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n");
674
675	NVE_UNLOCK(sc);
676
677	return;
678}
679
680/* Shutdown interface for unload/reboot */
681static void
682nve_shutdown(device_t dev)
683{
684	struct nve_softc *sc;
685
686	DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n");
687
688	sc = device_get_softc(dev);
689
690	/* Stop hardware activity */
691	nve_stop(sc);
692}
693
694/* Allocate TX ring buffers */
695static int
696nve_init_rings(struct nve_softc *sc)
697{
698	int error, i;
699
700	NVE_LOCK(sc);
701
702	DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n");
703
704	sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0;
705	/* Initialise RX ring */
706	for (i = 0; i < RX_RING_SIZE; i++) {
707		struct nve_rx_desc *desc = sc->rx_desc + i;
708		struct nve_map_buffer *buf = &desc->buf;
709
710		buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
711		if (buf->mbuf == NULL) {
712			device_printf(sc->dev, "couldn't allocate mbuf\n");
713			nve_free_rings(sc);
714			error = ENOBUFS;
715			goto fail;
716		}
717		buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
718		m_adj(buf->mbuf, ETHER_ALIGN);
719
720		error = bus_dmamap_create(sc->mtag, 0, &buf->map);
721		if (error) {
722			device_printf(sc->dev, "couldn't create dma map\n");
723			nve_free_rings(sc);
724			goto fail;
725		}
726		error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
727					  nve_dmamap_rx_cb, &desc->paddr, 0);
728		if (error) {
729			device_printf(sc->dev, "couldn't dma map mbuf\n");
730			nve_free_rings(sc);
731			goto fail;
732		}
733		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
734
735		desc->buflength = buf->mbuf->m_len;
736		desc->vaddr = mtod(buf->mbuf, caddr_t);
737	}
738	bus_dmamap_sync(sc->rtag, sc->rmap,
739	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
740
741	/* Initialize TX ring */
742	for (i = 0; i < TX_RING_SIZE; i++) {
743		struct nve_tx_desc *desc = sc->tx_desc + i;
744		struct nve_map_buffer *buf = &desc->buf;
745
746		buf->mbuf = NULL;
747
748		error = bus_dmamap_create(sc->mtag, 0, &buf->map);
749		if (error) {
750			device_printf(sc->dev, "couldn't create dma map\n");
751			nve_free_rings(sc);
752			goto fail;
753		}
754	}
755	bus_dmamap_sync(sc->ttag, sc->tmap,
756	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
757
758	DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n");
759
760fail:
761	NVE_UNLOCK(sc);
762
763	return (error);
764}
765
766/* Free the TX ring buffers */
767static void
768nve_free_rings(struct nve_softc *sc)
769{
770	int i;
771
772	NVE_LOCK(sc);
773
774	DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n");
775
776	for (i = 0; i < RX_RING_SIZE; i++) {
777		struct nve_rx_desc *desc = sc->rx_desc + i;
778		struct nve_map_buffer *buf = &desc->buf;
779
780		if (buf->mbuf) {
781			bus_dmamap_unload(sc->mtag, buf->map);
782			bus_dmamap_destroy(sc->mtag, buf->map);
783			m_freem(buf->mbuf);
784		}
785		buf->mbuf = NULL;
786	}
787
788	for (i = 0; i < TX_RING_SIZE; i++) {
789		struct nve_tx_desc *desc = sc->tx_desc + i;
790		struct nve_map_buffer *buf = &desc->buf;
791
792		if (buf->mbuf) {
793			bus_dmamap_unload(sc->mtag, buf->map);
794			bus_dmamap_destroy(sc->mtag, buf->map);
795			m_freem(buf->mbuf);
796		}
797		buf->mbuf = NULL;
798	}
799
800	DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n");
801
802	NVE_UNLOCK(sc);
803}
804
805/* Main loop for sending packets from OS to interface */
806static void
807nve_ifstart(struct ifnet *ifp)
808{
809	struct nve_softc *sc = ifp->if_softc;
810	struct nve_map_buffer *buf;
811	struct mbuf    *m0, *m;
812	struct nve_tx_desc *desc;
813	ADAPTER_WRITE_DATA txdata;
814	int error, i;
815
816	DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n");
817
818	/* If link is down/busy or queue is empty do nothing */
819	if (ifp->if_flags & IFF_OACTIVE || ifp->if_snd.ifq_head == NULL)
820		return;
821
822	/* Transmit queued packets until sent or TX ring is full */
823	while (sc->pending_txs < TX_RING_SIZE) {
824		desc = sc->tx_desc + sc->cur_tx;
825		buf = &desc->buf;
826
827		/* Get next packet to send. */
828		IF_DEQUEUE(&ifp->if_snd, m0);
829
830		/* If nothing to send, return. */
831		if (m0 == NULL)
832			return;
833
834		/* Map MBUF for DMA access */
835		error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0,
836		    nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
837
838		if (error && error != EFBIG) {
839			m_freem(m0);
840			sc->tx_errors++;
841			continue;
842		}
843		/*
844		 * Packet has too many fragments - defrag into new mbuf
845		 * cluster
846		 */
847		if (error) {
848			m = m_defrag(m0, M_DONTWAIT);
849			if (m == NULL) {
850				m_freem(m0);
851				sc->tx_errors++;
852				continue;
853			}
854			m_freem(m0);
855			m0 = m;
856
857			error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m,
858			    nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT);
859			if (error) {
860				m_freem(m);
861				sc->tx_errors++;
862				continue;
863			}
864		}
865		/* Do sync on DMA bounce buffer */
866		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE);
867
868		buf->mbuf = m0;
869		txdata.ulNumberOfElements = desc->numfrags;
870		txdata.pvID = (PVOID)desc;
871
872		/* Put fragments into API element list */
873		txdata.ulTotalLength = buf->mbuf->m_len;
874		for (i = 0; i < desc->numfrags; i++) {
875			txdata.sElement[i].ulLength =
876			    (ulong)desc->frags[i].ds_len;
877			txdata.sElement[i].pPhysical =
878			    (PVOID)desc->frags[i].ds_addr;
879		}
880
881		/* Send packet to Nvidia API for transmission */
882		error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata);
883
884		switch (error) {
885		case ADAPTERERR_NONE:
886			/* Packet was queued in API TX queue successfully */
887			sc->pending_txs++;
888			sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE;
889			break;
890
891		case ADAPTERERR_TRANSMIT_QUEUE_FULL:
892			/* The API TX queue is full - requeue the packet */
893			device_printf(sc->dev,
894			    "nve_ifstart: transmit queue is full\n");
895			ifp->if_flags |= IFF_OACTIVE;
896			bus_dmamap_unload(sc->mtag, buf->map);
897			IF_PREPEND(&ifp->if_snd, buf->mbuf);
898			buf->mbuf = NULL;
899			return;
900
901		default:
902			/* The API failed to queue/send the packet so dump it */
903			device_printf(sc->dev, "nve_ifstart: transmit error\n");
904			bus_dmamap_unload(sc->mtag, buf->map);
905			m_freem(buf->mbuf);
906			buf->mbuf = NULL;
907			sc->tx_errors++;
908			return;
909		}
910		/* Set watchdog timer. */
911		ifp->if_timer = 8;
912
913		/* Copy packet to BPF tap */
914		BPF_MTAP(ifp, m0);
915	}
916	ifp->if_flags |= IFF_OACTIVE;
917
918	DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n");
919}
920
921/* Handle IOCTL events */
922static int
923nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
924{
925	struct nve_softc *sc = ifp->if_softc;
926	struct ifreq *ifr = (struct ifreq *) data;
927	struct mii_data *mii;
928	int error = 0;
929
930	NVE_LOCK(sc);
931
932	DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n");
933
934	switch (command) {
935	case SIOCSIFMTU:
936		/* Set MTU size */
937		if (ifp->if_mtu == ifr->ifr_mtu)
938			break;
939		if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) {
940			ifp->if_mtu = ifr->ifr_mtu;
941			nve_stop(sc);
942			nve_init(sc);
943		} else
944			error = EINVAL;
945		break;
946
947	case SIOCSIFFLAGS:
948		/* Setup interface flags */
949		if (ifp->if_flags & IFF_UP) {
950			if ((ifp->if_flags & IFF_RUNNING) == 0) {
951				nve_init(sc);
952				break;
953			}
954		} else {
955			if (ifp->if_flags & IFF_RUNNING) {
956				nve_stop(sc);
957				break;
958			}
959		}
960		/* Handle IFF_PROMISC and IFF_ALLMULTI flags. */
961		nve_setmulti(sc);
962		break;
963
964	case SIOCADDMULTI:
965	case SIOCDELMULTI:
966		/* Setup multicast filter */
967		if (ifp->if_flags & IFF_RUNNING) {
968			nve_setmulti(sc);
969		}
970		break;
971
972	case SIOCGIFMEDIA:
973	case SIOCSIFMEDIA:
974		/* Get/Set interface media parameters */
975		mii = device_get_softc(sc->miibus);
976		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
977		break;
978
979	default:
980		/* Everything else we forward to generic ether ioctl */
981		error = ether_ioctl(ifp, (int)command, data);
982		break;
983	}
984
985	DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n");
986
987	NVE_UNLOCK(sc);
988
989	return (error);
990}
991
992/* Interrupt service routine */
993static void
994nve_intr(void *arg)
995{
996	struct nve_softc *sc = arg;
997	struct ifnet *ifp = &sc->sc_if;
998
999	DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n");
1000
1001	if (!ifp->if_flags & IFF_UP) {
1002		nve_stop(sc);
1003		return;
1004	}
1005	/* Handle interrupt event */
1006	if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) {
1007		sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX);
1008		sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX);
1009	}
1010	if (ifp->if_snd.ifq_head != NULL)
1011		nve_ifstart(ifp);
1012
1013	/* If no pending packets we don't need a timeout */
1014	if (sc->pending_txs == 0)
1015		sc->sc_if.if_timer = 0;
1016
1017	DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n");
1018
1019	return;
1020}
1021
1022/* Setup multicast filters */
1023static void
1024nve_setmulti(struct nve_softc *sc)
1025{
1026	struct ifnet *ifp;
1027	struct ifmultiaddr *ifma;
1028	PACKET_FILTER hwfilter;
1029	int i;
1030	u_int8_t andaddr[6], oraddr[6];
1031
1032	NVE_LOCK(sc);
1033
1034	DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n");
1035
1036	ifp = &sc->sc_if;
1037
1038	/* Initialize filter */
1039	hwfilter.ulFilterFlags = 0;
1040	for (i = 0; i < 6; i++) {
1041		hwfilter.acMulticastAddress[i] = 0;
1042		hwfilter.acMulticastMask[i] = 0;
1043	}
1044
1045	if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1046		/* Accept all packets */
1047		hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS;
1048		sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1049		NVE_UNLOCK(sc);
1050		return;
1051	}
1052	/* Setup multicast filter */
1053	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1054		u_char *addrp;
1055
1056		if (ifma->ifma_addr->sa_family != AF_LINK)
1057			continue;
1058
1059		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1060		for (i = 0; i < 6; i++) {
1061			u_int8_t mcaddr = addrp[i];
1062			andaddr[i] &= mcaddr;
1063			oraddr[i] |= mcaddr;
1064		}
1065	}
1066	for (i = 0; i < 6; i++) {
1067		hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i];
1068		hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]);
1069	}
1070
1071	/* Send filter to NVIDIA API */
1072	sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter);
1073
1074	NVE_UNLOCK(sc);
1075
1076	DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n");
1077
1078	return;
1079}
1080
1081/* Change the current media/mediaopts */
1082static int
1083nve_ifmedia_upd(struct ifnet *ifp)
1084{
1085	struct nve_softc *sc = ifp->if_softc;
1086	struct mii_data *mii;
1087
1088	DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n");
1089
1090	mii = device_get_softc(sc->miibus);
1091
1092	if (mii->mii_instance) {
1093		struct mii_softc *miisc;
1094		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1095		    miisc = LIST_NEXT(miisc, mii_list)) {
1096			mii_phy_reset(miisc);
1097		}
1098	}
1099	mii_mediachg(mii);
1100
1101	return (0);
1102}
1103
1104/* Update current miibus PHY status of media */
1105static void
1106nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1107{
1108	struct nve_softc *sc;
1109	struct mii_data *mii;
1110
1111	DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n");
1112
1113	sc = ifp->if_softc;
1114	mii = device_get_softc(sc->miibus);
1115	mii_pollstat(mii);
1116
1117	ifmr->ifm_active = mii->mii_media_active;
1118	ifmr->ifm_status = mii->mii_media_status;
1119
1120	return;
1121}
1122
1123/* miibus tick timer - maintain link status */
1124static void
1125nve_tick(void *xsc)
1126{
1127	struct nve_softc *sc = xsc;
1128	struct mii_data *mii;
1129	struct ifnet *ifp;
1130
1131	NVE_LOCK(sc);
1132
1133	ifp = &sc->sc_if;
1134	nve_update_stats(sc);
1135
1136	mii = device_get_softc(sc->miibus);
1137	mii_tick(mii);
1138
1139	if (mii->mii_media_status & IFM_ACTIVE &&
1140	    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1141		if (ifp->if_snd.ifq_head != NULL)
1142			nve_ifstart(ifp);
1143	}
1144	sc->stat_ch = timeout(nve_tick, sc, hz);
1145
1146	NVE_UNLOCK(sc);
1147
1148	return;
1149}
1150
1151/* Update ifnet data structure with collected interface stats from API */
1152static void
1153nve_update_stats(struct nve_softc *sc)
1154{
1155	struct ifnet *ifp = &sc->sc_if;
1156	ADAPTER_STATS stats;
1157
1158	NVE_LOCK(sc);
1159
1160	if (sc->hwapi) {
1161		sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats);
1162
1163		ifp->if_ipackets = stats.ulSuccessfulReceptions;
1164		ifp->if_ierrors = stats.ulMissedFrames +
1165			stats.ulFailedReceptions +
1166			stats.ulCRCErrors +
1167			stats.ulFramingErrors +
1168			stats.ulOverFlowErrors;
1169
1170		ifp->if_opackets = stats.ulSuccessfulTransmissions;
1171		ifp->if_oerrors = sc->tx_errors +
1172			stats.ulFailedTransmissions +
1173			stats.ulRetryErrors +
1174			stats.ulUnderflowErrors +
1175			stats.ulLossOfCarrierErrors +
1176			stats.ulLateCollisionErrors;
1177
1178		ifp->if_collisions = stats.ulLateCollisionErrors;
1179	}
1180	NVE_UNLOCK(sc);
1181
1182	return;
1183}
1184
1185/* miibus Read PHY register wrapper - calls Nvidia API entry point */
1186static int
1187nve_miibus_readreg(device_t dev, int phy, int reg)
1188{
1189	struct nve_softc *sc = device_get_softc(dev);
1190	ULONG data;
1191
1192	DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n");
1193
1194	ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data);
1195
1196	DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n");
1197
1198	return (data);
1199}
1200
1201/* miibus Write PHY register wrapper - calls Nvidia API entry point */
1202static void
1203nve_miibus_writereg(device_t dev, int phy, int reg, int data)
1204{
1205	struct nve_softc *sc = device_get_softc(dev);
1206
1207	DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n");
1208
1209	ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data);
1210
1211	DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n");
1212
1213	return;
1214}
1215
1216/* Watchdog timer to prevent PHY lockups */
1217static void
1218nve_watchdog(struct ifnet *ifp)
1219{
1220	struct nve_softc *sc = ifp->if_softc;
1221
1222	device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs);
1223
1224	sc->tx_errors++;
1225
1226	nve_stop(sc);
1227	ifp->if_flags &= ~IFF_RUNNING;
1228	nve_init(sc);
1229
1230	if (ifp->if_snd.ifq_head != NULL)
1231		nve_ifstart(ifp);
1232
1233	return;
1234}
1235
1236/* --- Start of NVOSAPI interface --- */
1237
1238/* Allocate DMA enabled general use memory for API */
1239static NV_SINT32
1240nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem)
1241{
1242	struct nve_softc *sc;
1243	bus_addr_t mem_physical;
1244
1245	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength);
1246
1247	sc = (struct nve_softc *)ctx;
1248
1249	mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF,
1250	    M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0);
1251
1252	if (!mem->pLogical) {
1253		device_printf(sc->dev, "memory allocation failed\n");
1254		return (0);
1255	}
1256	memset(mem->pLogical, 0, (ulong)mem->uiLength);
1257	mem_physical = vtophys(mem->pLogical);
1258	mem->pPhysical = (PVOID)mem_physical;
1259
1260	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n",
1261	    (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength);
1262
1263	return (1);
1264}
1265
1266/* Free allocated memory */
1267static NV_SINT32
1268nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem)
1269{
1270	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n",
1271	    (uint)mem->pLogical, (uint) mem->uiLength);
1272
1273	contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF);
1274	return (1);
1275}
1276
1277/* Copied directly from nvnet.c */
1278static NV_SINT32
1279nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1280{
1281	MEMORY_BLOCK mem_block;
1282
1283	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n");
1284
1285	mem_block_ex->pLogical = NULL;
1286	mem_block_ex->uiLengthOrig = mem_block_ex->uiLength;
1287
1288	if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) &&
1289	    (mem_block_ex->AlignmentSize > 1)) {
1290		DEBUGOUT(NVE_DEBUG_API, "     aligning on %d\n",
1291		    mem_block_ex->AlignmentSize);
1292		mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize;
1293	}
1294	mem_block.uiLength = mem_block_ex->uiLengthOrig;
1295
1296	if (nve_osalloc(ctx, &mem_block) == 0) {
1297		return (0);
1298	}
1299	mem_block_ex->pLogicalOrig = mem_block.pLogical;
1300	mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical;
1301	mem_block_ex->pPhysicalOrigHigh = 0;
1302
1303	mem_block_ex->pPhysical = mem_block.pPhysical;
1304	mem_block_ex->pLogical = mem_block.pLogical;
1305
1306	if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) {
1307		unsigned int offset;
1308		offset = mem_block_ex->pPhysicalOrigLow &
1309		    (mem_block_ex->AlignmentSize - 1);
1310
1311		if (offset) {
1312			mem_block_ex->pPhysical =
1313			    (PVOID)((ulong)mem_block_ex->pPhysical +
1314			    mem_block_ex->AlignmentSize - offset);
1315			mem_block_ex->pLogical =
1316			    (PVOID)((ulong)mem_block_ex->pLogical +
1317			    mem_block_ex->AlignmentSize - offset);
1318		} /* if (offset) */
1319	} /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */
1320	return (1);
1321}
1322
1323/* Copied directly from nvnet.c */
1324static NV_SINT32
1325nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
1326{
1327	MEMORY_BLOCK mem_block;
1328
1329	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n");
1330
1331	mem_block.pLogical = mem_block_ex->pLogicalOrig;
1332	mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow);
1333	mem_block.uiLength = mem_block_ex->uiLengthOrig;
1334
1335	return (nve_osfree(ctx, &mem_block));
1336}
1337
1338/* Clear memory region */
1339static NV_SINT32
1340nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length)
1341{
1342	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n");
1343	memset(mem, 0, length);
1344	return (1);
1345}
1346
1347/* Sleep for a tick */
1348static NV_SINT32
1349nve_osdelay(PNV_VOID ctx, NV_UINT32 usec)
1350{
1351	DELAY(usec);
1352	return (1);
1353}
1354
1355/* Allocate memory for rx buffer */
1356static NV_SINT32
1357nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id)
1358{
1359	struct nve_softc *sc = ctx;
1360	struct nve_rx_desc *desc;
1361	struct nve_map_buffer *buf;
1362	int error;
1363
1364	NVE_LOCK(sc);
1365
1366	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n");
1367
1368	if (sc->pending_rxs == RX_RING_SIZE) {
1369		device_printf(sc->dev, "rx ring buffer is full\n");
1370		goto fail;
1371	}
1372	desc = sc->rx_desc + sc->cur_rx;
1373	buf = &desc->buf;
1374
1375	if (buf->mbuf == NULL) {
1376		buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1377		if (buf->mbuf == NULL) {
1378			device_printf(sc->dev, "failed to allocate memory\n");
1379			goto fail;
1380		}
1381		buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES;
1382		m_adj(buf->mbuf, ETHER_ALIGN);
1383
1384		error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf,
1385		    nve_dmamap_rx_cb, &desc->paddr, 0);
1386		if (error) {
1387			device_printf(sc->dev, "failed to dmamap mbuf\n");
1388			m_freem(buf->mbuf);
1389			buf->mbuf = NULL;
1390			goto fail;
1391		}
1392		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD);
1393		desc->buflength = buf->mbuf->m_len;
1394		desc->vaddr = mtod(buf->mbuf, caddr_t);
1395	}
1396	sc->pending_rxs++;
1397	sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE;
1398
1399	mem->pLogical = (void *)desc->vaddr;
1400	mem->pPhysical = (void *)desc->paddr;
1401	mem->uiLength = desc->buflength;
1402	*id = (void *)desc;
1403
1404	NVE_UNLOCK(sc);
1405	return (1);
1406
1407fail:
1408	NVE_UNLOCK(sc);
1409	return (0);
1410}
1411
1412/* Free the rx buffer */
1413static NV_SINT32
1414nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id)
1415{
1416	struct nve_softc *sc = ctx;
1417	struct nve_rx_desc *desc;
1418	struct nve_map_buffer *buf;
1419
1420	NVE_LOCK(sc);
1421
1422	DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n");
1423
1424	desc = (struct nve_rx_desc *) id;
1425	buf = &desc->buf;
1426
1427	if (buf->mbuf) {
1428		bus_dmamap_unload(sc->mtag, buf->map);
1429		bus_dmamap_destroy(sc->mtag, buf->map);
1430		m_freem(buf->mbuf);
1431	}
1432	sc->pending_rxs--;
1433	buf->mbuf = NULL;
1434
1435	NVE_UNLOCK(sc);
1436
1437	return (1);
1438}
1439
1440/* This gets called by the Nvidia API after our TX packet has been sent */
1441static NV_SINT32
1442nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success)
1443{
1444	struct nve_softc *sc = ctx;
1445	struct nve_map_buffer *buf;
1446	struct nve_tx_desc *desc = (struct nve_tx_desc *) id;
1447	struct ifnet *ifp;
1448
1449	NVE_LOCK(sc);
1450
1451	DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n");
1452
1453	ifp = &sc->sc_if;
1454	buf = &desc->buf;
1455	sc->pending_txs--;
1456
1457	/* Unload and free mbuf cluster */
1458	if (buf->mbuf == NULL)
1459		goto fail;
1460
1461	bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE);
1462	bus_dmamap_unload(sc->mtag, buf->map);
1463	m_freem(buf->mbuf);
1464	buf->mbuf = NULL;
1465
1466	/* Send more packets if we have them */
1467	if (sc->pending_txs < TX_RING_SIZE)
1468		sc->sc_if.if_flags &= ~IFF_OACTIVE;
1469
1470	if (ifp->if_snd.ifq_head != NULL && sc->pending_txs < TX_RING_SIZE)
1471		nve_ifstart(ifp);
1472
1473fail:
1474	NVE_UNLOCK(sc);
1475
1476	return (1);
1477}
1478
1479/* This gets called by the Nvidia API when a new packet has been received */
1480/* XXX What is newbuf used for? XXX */
1481static NV_SINT32
1482nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf,
1483    NV_UINT8 priority)
1484{
1485	struct nve_softc *sc = ctx;
1486	struct ifnet *ifp;
1487	struct nve_rx_desc *desc;
1488	struct nve_map_buffer *buf;
1489	ADAPTER_READ_DATA *readdata;
1490
1491	NVE_LOCK(sc);
1492
1493	DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n");
1494
1495	ifp = &sc->sc_if;
1496
1497	readdata = (ADAPTER_READ_DATA *) data;
1498	desc = readdata->pvID;
1499	buf = &desc->buf;
1500	bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1501
1502	if (success) {
1503		/* Sync DMA bounce buffer. */
1504		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1505
1506		/* First mbuf in packet holds the ethernet and packet headers */
1507		buf->mbuf->m_pkthdr.rcvif = ifp;
1508		buf->mbuf->m_pkthdr.len = buf->mbuf->m_len =
1509		    readdata->ulTotalLength;
1510
1511		bus_dmamap_unload(sc->mtag, buf->map);
1512
1513		/* Give mbuf to OS. */
1514		(*ifp->if_input) (ifp, buf->mbuf);
1515		if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH)
1516			ifp->if_imcasts++;
1517
1518		/* Blat the mbuf pointer, kernel will free the mbuf cluster */
1519		buf->mbuf = NULL;
1520	} else {
1521		bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD);
1522		bus_dmamap_unload(sc->mtag, buf->map);
1523		m_freem(buf->mbuf);
1524		buf->mbuf = NULL;
1525	}
1526
1527	sc->cur_rx = desc - sc->rx_desc;
1528	sc->pending_rxs--;
1529
1530	NVE_UNLOCK(sc);
1531
1532	return (1);
1533}
1534
1535/* This gets called by NVIDIA API when the PHY link state changes */
1536static NV_SINT32
1537nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled)
1538{
1539	struct nve_softc *sc = (struct nve_softc *)ctx;
1540	struct ifnet *ifp;
1541
1542	DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n");
1543
1544	ifp = &sc->sc_if;
1545
1546	if (enabled)
1547		ifp->if_flags |= IFF_UP;
1548	else
1549		ifp->if_flags &= ~IFF_UP;
1550
1551	return (1);
1552}
1553
1554/* Setup a watchdog timer */
1555static NV_SINT32
1556nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer)
1557{
1558	struct nve_softc *sc = (struct nve_softc *)ctx;
1559
1560	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n");
1561
1562	callout_handle_init(&sc->ostimer);
1563	*timer = &sc->ostimer;
1564
1565	return (1);
1566}
1567
1568/* Free the timer */
1569static NV_SINT32
1570nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer)
1571{
1572
1573	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n");
1574
1575	return (1);
1576}
1577
1578/* Setup timer parameters */
1579static NV_SINT32
1580nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters)
1581{
1582	struct nve_softc *sc = (struct nve_softc *)ctx;
1583
1584	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n");
1585
1586	sc->ostimer_func = func;
1587	sc->ostimer_params = parameters;
1588
1589	return (1);
1590}
1591
1592/* Set the timer to go off */
1593static NV_SINT32
1594nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay)
1595{
1596	struct nve_softc *sc = ctx;
1597
1598	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n");
1599
1600	*(struct callout_handle *)timer = timeout(sc->ostimer_func,
1601	    sc->ostimer_params, delay);
1602
1603	return (1);
1604}
1605
1606/* Cancel the timer */
1607static NV_SINT32
1608nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer)
1609{
1610	struct nve_softc *sc = ctx;
1611
1612	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n");
1613
1614	untimeout(sc->ostimer_func, sc->ostimer_params,
1615	    *(struct callout_handle *)timer);
1616
1617	return (1);
1618}
1619
1620static NV_SINT32
1621nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id,
1622    NV_UINT8 *newbuffer, NV_UINT8 priority)
1623{
1624
1625	/* Not implemented */
1626	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1627
1628	return (1);
1629}
1630
1631static PNV_VOID
1632nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata)
1633{
1634
1635	/* Not implemented */
1636	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n");
1637
1638	return (NULL);
1639}
1640
1641static NV_SINT32
1642nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno)
1643{
1644
1645	/* Not implemented */
1646	DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n");
1647
1648	return (1);
1649}
1650
1651/* Allocate mutex context (already done in nve_attach) */
1652static NV_SINT32
1653nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock)
1654{
1655	struct nve_softc *sc = (struct nve_softc *)ctx;
1656
1657	DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n");
1658
1659	*pLock = (void **)sc;
1660
1661	return (1);
1662}
1663
1664/* Obtain a spin lock */
1665static NV_SINT32
1666nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1667{
1668
1669	DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n");
1670
1671	NVE_OSLOCK((struct nve_softc *)lock);
1672
1673	return (1);
1674}
1675
1676/* Release lock */
1677static NV_SINT32
1678nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
1679{
1680
1681	DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n");
1682
1683	NVE_OSUNLOCK((struct nve_softc *)lock);
1684
1685	return (1);
1686}
1687
1688/* I have no idea what this is for */
1689static PNV_VOID
1690nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata)
1691{
1692
1693	/* Not implemented */
1694	DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n");
1695	panic("nve: nve_osreturnbufvirtual not implemented\n");
1696
1697	return (NULL);
1698}
1699
1700/* --- End on NVOSAPI interface --- */
1701