if_nfe.c revision 164649
1/*	$OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $	*/
2
3/*-
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__FBSDID("$FreeBSD: head/sys/dev/nfe/if_nfe.c 164649 2006-11-27 00:10:00Z obrien $");
25
26/* Uncomment the following line to enable polling. */
27/* #define	DEVICE_POLLING */
28
29#define	NFE_NO_JUMBO
30#define	NFE_CSUM
31#define	NFE_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
32#define	NVLAN 0
33
34#ifdef HAVE_KERNEL_OPTION_HEADERS
35#include "opt_device_polling.h"
36#endif
37
38#include <sys/param.h>
39#include <sys/endian.h>
40#include <sys/systm.h>
41#include <sys/sockio.h>
42#include <sys/mbuf.h>
43#include <sys/malloc.h>
44#include <sys/module.h>
45#include <sys/kernel.h>
46#include <sys/socket.h>
47#include <sys/taskqueue.h>
48
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_dl.h>
53#include <net/if_media.h>
54#include <net/if_types.h>
55#include <net/if_vlan_var.h>
56
57#include <net/bpf.h>
58
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/bus.h>
62#include <sys/rman.h>
63
64#include <dev/mii/mii.h>
65#include <dev/mii/miivar.h>
66
67#include <dev/pci/pcireg.h>
68#include <dev/pci/pcivar.h>
69
70#include <dev/nfe/if_nfereg.h>
71#include <dev/nfe/if_nfevar.h>
72
73MODULE_DEPEND(nfe, pci, 1, 1, 1);
74MODULE_DEPEND(nfe, ether, 1, 1, 1);
75MODULE_DEPEND(nfe, miibus, 1, 1, 1);
76#include "miibus_if.h"
77
78static int  nfe_probe(device_t);
79static int  nfe_attach(device_t);
80static int  nfe_detach(device_t);
81static void nfe_shutdown(device_t);
82static int  nfe_miibus_readreg(device_t, int, int);
83static int  nfe_miibus_writereg(device_t, int, int, int);
84static void nfe_miibus_statchg(device_t);
85static int  nfe_ioctl(struct ifnet *, u_long, caddr_t);
86static void nfe_intr(void *);
87static void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
88static void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
89static void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
90static void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
91static void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
92static void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
93static void nfe_rxeof(struct nfe_softc *);
94static void nfe_txeof(struct nfe_softc *);
95static int  nfe_encap(struct nfe_softc *, struct mbuf *);
96static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
97static void nfe_jfree(void *, void *);
98static int  nfe_jpool_alloc(struct nfe_softc *);
99static void nfe_jpool_free(struct nfe_softc *);
100static void nfe_setmulti(struct nfe_softc *);
101static void nfe_start(struct ifnet *);
102static void nfe_start_locked(struct ifnet *);
103static void nfe_watchdog(struct ifnet *);
104static void nfe_init(void *);
105static void nfe_init_locked(void *);
106static void nfe_stop(struct ifnet *, int);
107static int  nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110static int  nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
111static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
112static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
113static int  nfe_ifmedia_upd(struct ifnet *);
114static int  nfe_ifmedia_upd_locked(struct ifnet *);
115static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
116static void nfe_tick(void *);
117static void nfe_tick_locked(struct nfe_softc *);
118static void nfe_get_macaddr(struct nfe_softc *, u_char *);
119static void nfe_set_macaddr(struct nfe_softc *, u_char *);
120static void nfe_dma_map_segs	(void *, bus_dma_segment_t *, int, int);
121#ifdef DEVICE_POLLING
122static void nfe_poll_locked(struct ifnet *, enum poll_cmd, int);
123#endif
124
125#ifdef NFE_DEBUG
126int nfedebug = 0;
127#define	DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
128#define	DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
129#else
130#define	DPRINTF(x)
131#define	DPRINTFN(n,x)
132#endif
133
134#define	NFE_LOCK(_sc)		mtx_lock(&(_sc)->nfe_mtx)
135#define	NFE_UNLOCK(_sc)		mtx_unlock(&(_sc)->nfe_mtx)
136#define	NFE_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
137
138#define	letoh16(x) le16toh(x)
139
140#define	NV_RID		0x10
141
142static device_method_t nfe_methods[] = {
143	/* Device interface */
144	DEVMETHOD(device_probe,		nfe_probe),
145	DEVMETHOD(device_attach,	nfe_attach),
146	DEVMETHOD(device_detach,	nfe_detach),
147	DEVMETHOD(device_shutdown,	nfe_shutdown),
148
149	/* bus interface */
150	DEVMETHOD(bus_print_child,	bus_generic_print_child),
151	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
152
153	/* MII interface */
154	DEVMETHOD(miibus_readreg,	nfe_miibus_readreg),
155	DEVMETHOD(miibus_writereg,	nfe_miibus_writereg),
156	DEVMETHOD(miibus_statchg,	nfe_miibus_statchg),
157
158	{ 0, 0 }
159};
160
161static driver_t nfe_driver = {
162	"nfe",
163	nfe_methods,
164	sizeof(struct nfe_softc)
165};
166
167static devclass_t nfe_devclass;
168
169DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
170DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
171
172static struct nfe_type nfe_devs[] = {
173	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
174	    "NVIDIA nForce MCP Networking Adapter"},
175	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
176	    "NVIDIA nForce2 MCP2 Networking Adapter"},
177	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
178	    "NVIDIA nForce2 400 MCP4 Networking Adapter"},
179	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
180	    "NVIDIA nForce2 400 MCP5 Networking Adapter"},
181	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
182	    "NVIDIA nForce3 MCP3 Networking Adapter"},
183	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
184	    "NVIDIA nForce3 250 MCP6 Networking Adapter"},
185	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
186	    "NVIDIA nForce3 MCP7 Networking Adapter"},
187	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
188	    "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
189	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
190	    "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
191	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
192	    "NVIDIA nForce MCP04 Networking Adapter"},		// MCP10
193	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
194	    "NVIDIA nForce MCP04 Networking Adapter"},		// MCP11
195	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
196	    "NVIDIA nForce 430 MCP12 Networking Adapter"},
197	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
198	    "NVIDIA nForce 430 MCP13 Networking Adapter"},
199	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
200	    "NVIDIA nForce MCP55 Networking Adapter"},
201	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
202	    "NVIDIA nForce MCP55 Networking Adapter"},
203	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
204	    "NVIDIA nForce MCP61 Networking Adapter"},
205	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
206	    "NVIDIA nForce MCP61 Networking Adapter"},
207	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
208	    "NVIDIA nForce MCP61 Networking Adapter"},
209	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
210	    "NVIDIA nForce MCP61 Networking Adapter"},
211	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
212	    "NVIDIA nForce MCP65 Networking Adapter"},
213	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
214	    "NVIDIA nForce MCP65 Networking Adapter"},
215	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
216	    "NVIDIA nForce MCP65 Networking Adapter"},
217	{PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
218	    "NVIDIA nForce MCP65 Networking Adapter"},
219	{0, 0, NULL}
220};
221
222
223/* Probe for supported hardware ID's */
224static int
225nfe_probe(device_t dev)
226{
227	struct nfe_type *t;
228
229	t = nfe_devs;
230	/* Check for matching PCI DEVICE ID's */
231	while (t->name != NULL) {
232		if ((pci_get_vendor(dev) == t->vid_id) &&
233		    (pci_get_device(dev) == t->dev_id)) {
234			device_set_desc(dev, t->name);
235			return (0);
236		}
237		t++;
238	}
239
240	return (ENXIO);
241}
242
243
244static int
245nfe_attach(device_t dev)
246{
247	struct nfe_softc *sc;
248	struct ifnet *ifp;
249	int unit, error = 0, rid;
250
251	sc = device_get_softc(dev);
252	unit = device_get_unit(dev);
253	sc->nfe_dev = dev;
254	sc->nfe_unit = unit;
255
256	mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
257	    MTX_DEF | MTX_RECURSE);
258	callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
259
260	pci_enable_busmaster(dev);
261
262	rid = NV_RID;
263	sc->nfe_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
264	    0, ~0, 1, RF_ACTIVE);
265
266	if (sc->nfe_res == NULL) {
267		printf ("nfe%d: couldn't map ports/memory\n", unit);
268		error = ENXIO;
269		goto fail;
270	}
271
272	sc->nfe_memt = rman_get_bustag(sc->nfe_res);
273	sc->nfe_memh = rman_get_bushandle(sc->nfe_res);
274
275	/* Allocate interrupt */
276	rid = 0;
277	sc->nfe_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
278	    0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
279
280	if (sc->nfe_irq == NULL) {
281		printf("nfe%d: couldn't map interrupt\n", unit);
282		error = ENXIO;
283		goto fail;
284	}
285
286	nfe_get_macaddr(sc, sc->eaddr);
287
288	sc->nfe_flags = 0;
289
290	switch (pci_get_device(dev)) {
291	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
292	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
293	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
294	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
295		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
296		break;
297	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
298	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
299		sc->nfe_flags |= NFE_40BIT_ADDR;
300		break;
301	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
302	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
303	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
304	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
305		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
306		break;
307	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
308	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
309		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
310		    NFE_HW_VLAN;
311		break;
312	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
313	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
314	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
315	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
316		sc->nfe_flags |= NFE_40BIT_ADDR;
317		break;
318	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
319	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
320	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
321	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
322		sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
323		break;
324	}
325
326#ifndef NFE_NO_JUMBO
327	/* enable jumbo frames for adapters that support it */
328	if (sc->nfe_flags & NFE_JUMBO_SUP)
329		sc->nfe_flags |= NFE_USE_JUMBO;
330#endif
331
332	/*
333	 * Allocate the parent bus DMA tag appropriate for PCI.
334	 */
335#define	NFE_NSEG_NEW 32
336	error = bus_dma_tag_create(NULL,	/* parent */
337	    1, 0,				/* alignment, boundary */
338	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
339	    BUS_SPACE_MAXADDR,			/* highaddr */
340	    NULL, NULL,				/* filter, filterarg */
341	    MAXBSIZE, NFE_NSEG_NEW,		/* maxsize, nsegments */
342	    BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
343	    BUS_DMA_ALLOCNOW,			/* flags */
344	    NULL, NULL,				/* lockfunc, lockarg */
345	    &sc->nfe_parent_tag);
346	if (error)
347		goto fail;
348
349	/*
350	 * Allocate Tx and Rx rings.
351	 */
352	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
353		printf("nfe%d: could not allocate Tx ring\n", unit);
354		error = ENXIO;
355		goto fail;
356	}
357
358	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
359		printf("nfe%d: could not allocate Rx ring\n", unit);
360		nfe_free_tx_ring(sc, &sc->txq);
361		error = ENXIO;
362		goto fail;
363	}
364
365	ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
366	if (ifp == NULL) {
367		printf("nfe%d: can not if_alloc()\n", unit);
368		error = ENOSPC;
369		goto fail;
370	}
371
372	ifp->if_softc = sc;
373	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
374	ifp->if_mtu = ETHERMTU;
375	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
376	ifp->if_ioctl = nfe_ioctl;
377	ifp->if_start = nfe_start;
378	/* ifp->if_hwassist = NFE_CSUM_FEATURES; */
379	ifp->if_watchdog = nfe_watchdog;
380	ifp->if_init = nfe_init;
381	ifp->if_baudrate = IF_Gbps(1);
382	ifp->if_snd.ifq_maxlen = NFE_IFQ_MAXLEN;
383
384	ifp->if_capabilities = IFCAP_VLAN_MTU;
385#if NVLAN > 0
386	if (sc->nfe_flags & NFE_HW_VLAN)
387		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
388#endif
389#ifdef NFE_CSUM
390	if (sc->nfe_flags & NFE_HW_CSUM) {
391		ifp->if_capabilities |= IFCAP_HWCSUM;
392	}
393#endif
394	ifp->if_capenable = ifp->if_capabilities;
395
396#ifdef DEVICE_POLLING
397	ifp->if_capabilities |= IFCAP_POLLING;
398#endif
399
400	/* Do MII setup */
401	if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
402	    nfe_ifmedia_sts)) {
403		printf("nfe%d: MII without any phy!\n", unit);
404		error = ENXIO;
405		goto fail;
406	}
407
408	ether_ifattach(ifp, sc->eaddr);
409
410	error = bus_setup_intr(dev, sc->nfe_irq, INTR_TYPE_NET | INTR_MPSAFE,
411	    nfe_intr, sc, &sc->nfe_intrhand);
412
413	if (error) {
414		printf("nfe%d: couldn't set up irq\n", unit);
415		ether_ifdetach(ifp);
416		goto fail;
417	}
418
419fail:
420	if (error)
421		nfe_detach(dev);
422
423	return (error);
424}
425
426
427static int
428nfe_detach(device_t dev)
429{
430	struct nfe_softc *sc;
431	struct ifnet *ifp;
432	u_char eaddr[ETHER_ADDR_LEN];
433	int i;
434
435	sc = device_get_softc(dev);
436	KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
437	ifp = sc->nfe_ifp;
438
439#ifdef DEVICE_POLLING
440	if (ifp->if_capenable & IFCAP_POLLING)
441		ether_poll_deregister(ifp);
442#endif
443
444	for (i = 0; i < ETHER_ADDR_LEN; i++) {
445		eaddr[i] = sc->eaddr[5 - i];
446	}
447	nfe_set_macaddr(sc, eaddr);
448
449	if (device_is_attached(dev)) {
450		NFE_LOCK(sc);
451		nfe_stop(ifp, 1);
452		ifp->if_flags &= ~IFF_UP;
453		NFE_UNLOCK(sc);
454		callout_drain(&sc->nfe_stat_ch);
455		ether_ifdetach(ifp);
456	}
457
458	if (ifp)
459		if_free(ifp);
460	if (sc->nfe_miibus)
461		device_delete_child(dev, sc->nfe_miibus);
462	bus_generic_detach(dev);
463
464	if (sc->nfe_intrhand)
465		bus_teardown_intr(dev, sc->nfe_irq, sc->nfe_intrhand);
466	if (sc->nfe_irq)
467		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq);
468	if (sc->nfe_res)
469		bus_release_resource(dev, SYS_RES_MEMORY, NV_RID, sc->nfe_res);
470
471	nfe_free_tx_ring(sc, &sc->txq);
472	nfe_free_rx_ring(sc, &sc->rxq);
473
474	if (sc->nfe_parent_tag)
475		bus_dma_tag_destroy(sc->nfe_parent_tag);
476
477	mtx_destroy(&sc->nfe_mtx);
478
479	return (0);
480}
481
482
483static void
484nfe_miibus_statchg(device_t dev)
485{
486	struct nfe_softc *sc;
487	struct mii_data *mii;
488	u_int32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
489
490	sc = device_get_softc(dev);
491	mii = device_get_softc(sc->nfe_miibus);
492
493	phy = NFE_READ(sc, NFE_PHY_IFACE);
494	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
495
496	seed = NFE_READ(sc, NFE_RNDSEED);
497	seed &= ~NFE_SEED_MASK;
498
499	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
500		phy  |= NFE_PHY_HDX;	/* half-duplex */
501		misc |= NFE_MISC1_HDX;
502	}
503
504	switch (IFM_SUBTYPE(mii->mii_media_active)) {
505	case IFM_1000_T:	/* full-duplex only */
506		link |= NFE_MEDIA_1000T;
507		seed |= NFE_SEED_1000T;
508		phy  |= NFE_PHY_1000T;
509		break;
510	case IFM_100_TX:
511		link |= NFE_MEDIA_100TX;
512		seed |= NFE_SEED_100TX;
513		phy  |= NFE_PHY_100TX;
514		break;
515	case IFM_10_T:
516		link |= NFE_MEDIA_10T;
517		seed |= NFE_SEED_10T;
518		break;
519	}
520
521	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
522
523	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
524	NFE_WRITE(sc, NFE_MISC1, misc);
525	NFE_WRITE(sc, NFE_LINKSPEED, link);
526}
527
528
529static int
530nfe_miibus_readreg(device_t dev, int phy, int reg)
531{
532	struct nfe_softc *sc = device_get_softc(dev);
533	u_int32_t val;
534	int ntries;
535
536	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
537
538	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
539		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
540		DELAY(100);
541	}
542
543	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
544
545	for (ntries = 0; ntries < 1000; ntries++) {
546		DELAY(100);
547		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
548			break;
549	}
550	if (ntries == 1000) {
551		DPRINTFN(2, ("nfe%d: timeout waiting for PHY\n", sc->nfe_unit));
552		return 0;
553	}
554
555	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
556		DPRINTFN(2, ("nfe%d: could not read PHY\n", sc->nfe_unit));
557		return 0;
558	}
559
560	val = NFE_READ(sc, NFE_PHY_DATA);
561	if (val != 0xffffffff && val != 0)
562		sc->mii_phyaddr = phy;
563
564	DPRINTFN(2, ("nfe%d: mii read phy %d reg 0x%x ret 0x%x\n",
565	    sc->nfe_unit, phy, reg, val));
566
567	return val;
568}
569
570
571static int
572nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
573{
574	struct nfe_softc *sc = device_get_softc(dev);
575	u_int32_t ctl;
576	int ntries;
577
578	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
579
580	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
581		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
582		DELAY(100);
583	}
584
585	NFE_WRITE(sc, NFE_PHY_DATA, val);
586	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
587	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
588
589	for (ntries = 0; ntries < 1000; ntries++) {
590		DELAY(100);
591		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
592			break;
593	}
594#ifdef NFE_DEBUG
595	if (nfedebug >= 2 && ntries == 1000)
596		printf("could not write to PHY\n");
597#endif
598	return 0;
599}
600
601
602static int
603nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
604{
605	struct nfe_desc32 *desc32;
606	struct nfe_desc64 *desc64;
607	struct nfe_rx_data *data;
608	struct nfe_jbuf *jbuf;
609	void **desc;
610	bus_addr_t physaddr;
611	int i, error, descsize;
612
613	if (sc->nfe_flags & NFE_40BIT_ADDR) {
614		desc = (void **)&ring->desc64;
615		descsize = sizeof (struct nfe_desc64);
616	} else {
617		desc = (void **)&ring->desc32;
618		descsize = sizeof (struct nfe_desc32);
619	}
620
621	ring->cur = ring->next = 0;
622	ring->bufsz = MCLBYTES;
623
624	error = bus_dma_tag_create(sc->nfe_parent_tag,
625	   PAGE_SIZE, 0,			/* alignment, boundary */
626	   BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
627	   BUS_SPACE_MAXADDR,			/* highaddr */
628	   NULL, NULL,				/* filter, filterarg */
629	   NFE_RX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
630	   NFE_RX_RING_COUNT * descsize,	/* maxsegsize */
631	   BUS_DMA_ALLOCNOW,			/* flags */
632	   NULL, NULL,				/* lockfunc, lockarg */
633	   &ring->rx_desc_tag);
634	if (error != 0) {
635		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
636		goto fail;
637	}
638
639	/* allocate memory to desc */
640	error = bus_dmamem_alloc(ring->rx_desc_tag, (void **)desc,
641	    BUS_DMA_NOWAIT, &ring->rx_desc_map);
642	if (error != 0) {
643		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
644		goto fail;
645	}
646
647	/* map desc to device visible address space */
648	error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, *desc,
649	    NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs,
650	    &ring->rx_desc_segs, BUS_DMA_NOWAIT);
651	if (error != 0) {
652		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
653		goto fail;
654	}
655
656	bzero(*desc, NFE_RX_RING_COUNT * descsize);
657	ring->rx_desc_addr = ring->rx_desc_segs.ds_addr;
658	ring->physaddr = ring->rx_desc_addr;
659
660	if (sc->nfe_flags & NFE_USE_JUMBO) {
661		ring->bufsz = NFE_JBYTES;
662		if ((error = nfe_jpool_alloc(sc)) != 0) {
663			printf("nfe%d: could not allocate jumbo frames\n",
664			    sc->nfe_unit);
665			goto fail;
666		}
667	}
668
669	/*
670	 * Pre-allocate Rx buffers and populate Rx ring.
671	 */
672	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
673		data = &sc->rxq.data[i];
674
675		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
676		if (data->m == NULL) {
677			printf("nfe%d: could not allocate rx mbuf\n",
678			    sc->nfe_unit);
679			error = ENOMEM;
680			goto fail;
681		}
682
683		if (sc->nfe_flags & NFE_USE_JUMBO) {
684			if ((jbuf = nfe_jalloc(sc)) == NULL) {
685				printf("nfe%d: could not allocate jumbo buffer\n",
686				    sc->nfe_unit);
687				goto fail;
688			}
689			data->m->m_data = (void *)jbuf->buf;
690			data->m->m_len = data->m->m_pkthdr.len = NFE_JBYTES;
691			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, nfe_jfree,
692			    (struct nfe_softc *)sc, 0, EXT_NET_DRV);
693			/* m_adj(data->m, ETHER_ALIGN); */
694			physaddr = jbuf->physaddr;
695		} else {
696			error = bus_dma_tag_create(sc->nfe_parent_tag,
697			    ETHER_ALIGN, 0,	       /* alignment, boundary */
698			    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
699			    BUS_SPACE_MAXADDR,		/* highaddr */
700			    NULL, NULL,		/* filter, filterarg */
701			    MCLBYTES, 1,		/* maxsize, nsegments */
702			    MCLBYTES,			/* maxsegsize */
703			    BUS_DMA_ALLOCNOW,		/* flags */
704			    NULL, NULL,		/* lockfunc, lockarg */
705			    &data->rx_data_tag);
706			if (error != 0) {
707				printf("nfe%d: could not create DMA map\n",
708				    sc->nfe_unit);
709				goto fail;
710			}
711
712			error = bus_dmamap_create(data->rx_data_tag, 0,
713			    &data->rx_data_map);
714			if (error != 0) {
715				printf("nfe%d: could not allocate mbuf cluster\n",
716				    sc->nfe_unit);
717				goto fail;
718			}
719
720			MCLGET(data->m, M_DONTWAIT);
721			if (!(data->m->m_flags & M_EXT)) {
722				error = ENOMEM;
723				goto fail;
724			}
725
726			error = bus_dmamap_load(data->rx_data_tag,
727			    data->rx_data_map, mtod(data->m, void *), MCLBYTES,
728			    nfe_dma_map_segs, &data->rx_data_segs,
729			    BUS_DMA_NOWAIT);
730			if (error != 0) {
731				printf("nfe%d: could not load rx buf DMA map\n",
732				    sc->nfe_unit);
733				goto fail;
734			}
735
736			data->rx_data_addr = data->rx_data_segs.ds_addr;
737			physaddr = data->rx_data_addr;
738
739		}
740
741		if (sc->nfe_flags & NFE_40BIT_ADDR) {
742			desc64 = &sc->rxq.desc64[i];
743#if defined(__LP64__)
744			desc64->physaddr[0] = htole32(physaddr >> 32);
745#endif
746			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
747			desc64->length = htole16(sc->rxq.bufsz);
748			desc64->flags = htole16(NFE_RX_READY);
749		} else {
750			desc32 = &sc->rxq.desc32[i];
751			desc32->physaddr = htole32(physaddr);
752			desc32->length = htole16(sc->rxq.bufsz);
753			desc32->flags = htole16(NFE_RX_READY);
754		}
755
756	}
757
758	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
759	    BUS_DMASYNC_PREWRITE);
760
761	return 0;
762
763fail:	nfe_free_rx_ring(sc, ring);
764
765	return error;
766}
767
768
769static int
770nfe_jpool_alloc(struct nfe_softc *sc)
771{
772	struct nfe_rx_ring *ring = &sc->rxq;
773	struct nfe_jbuf *jbuf;
774	bus_addr_t physaddr;
775	caddr_t buf;
776	int i, error;
777
778	/*
779	 * Allocate a big chunk of DMA'able memory.
780	 */
781	error = bus_dma_tag_create(sc->nfe_parent_tag,
782	   PAGE_SIZE, 0,		/* alignment, boundary */
783	   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
784	   BUS_SPACE_MAXADDR,		/* highaddr */
785	   NULL, NULL,			/* filter, filterarg */
786	   NFE_JPOOL_SIZE, 1,		/* maxsize, nsegments */
787	   NFE_JPOOL_SIZE,		/* maxsegsize */
788	   BUS_DMA_ALLOCNOW,		/* flags */
789	   NULL, NULL,			/* lockfunc, lockarg */
790	   &ring->rx_jumbo_tag);
791	if (error != 0) {
792		printf("nfe%d: could not create jumbo DMA tag\n", sc->nfe_unit);
793		goto fail;
794	}
795
796	error = bus_dmamem_alloc(ring->rx_jumbo_tag, (void **)&ring->jpool,
797	    BUS_DMA_NOWAIT, &ring->rx_jumbo_map);
798	if (error != 0) {
799		printf("nfe%d: could not create jumbo DMA memory\n",
800		    sc->nfe_unit);
801		goto fail;
802	}
803
804	error = bus_dmamap_load(ring->rx_jumbo_tag, ring->rx_jumbo_map,
805	    ring->jpool, NFE_JPOOL_SIZE, nfe_dma_map_segs, &ring->rx_jumbo_segs,
806	    BUS_DMA_NOWAIT);
807	if (error != 0) {
808		printf("nfe%d: could not load jumbo DMA map\n", sc->nfe_unit);
809		goto fail;
810	}
811
812	/* ..and split it into 9KB chunks */
813	SLIST_INIT(&ring->jfreelist);
814
815	buf = ring->jpool;
816	ring->rx_jumbo_addr = ring->rx_jumbo_segs.ds_addr;
817	physaddr = ring->rx_jumbo_addr;
818
819	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
820		jbuf = &ring->jbuf[i];
821
822		jbuf->buf = buf;
823		jbuf->physaddr = physaddr;
824
825		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
826
827		buf += NFE_JBYTES;
828		physaddr += NFE_JBYTES;
829	}
830
831	return 0;
832
833fail:	nfe_jpool_free(sc);
834	return error;
835}
836
837
838static void
839nfe_jpool_free(struct nfe_softc *sc)
840{
841	struct nfe_rx_ring *ring = &sc->rxq;
842
843	if (ring->jpool != NULL) {
844#if 0
845		bus_dmamem_unmap(ring->rx_jumbo_tag, ring->jpool,
846		    NFE_JPOOL_SIZE);
847#endif
848		bus_dmamem_free(ring->rx_jumbo_tag, &ring->rx_jumbo_segs,
849		    ring->rx_jumbo_map);
850	}
851	if (ring->rx_jumbo_map != NULL) {
852		bus_dmamap_sync(ring->rx_jumbo_tag, ring->rx_jumbo_map,
853		    BUS_DMASYNC_POSTWRITE);
854		bus_dmamap_unload(ring->rx_jumbo_tag, ring->rx_jumbo_map);
855		bus_dmamap_destroy(ring->rx_jumbo_tag, ring->rx_jumbo_map);
856	}
857}
858
859
860static struct nfe_jbuf *
861nfe_jalloc(struct nfe_softc *sc)
862{
863	struct nfe_jbuf *jbuf;
864
865	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
866	if (jbuf == NULL)
867		return NULL;
868	SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
869	return jbuf;
870}
871
872
873/*
874 * This is called automatically by the network stack when the mbuf is freed.
875 * Caution must be taken that the NIC might be reset by the time the mbuf is
876 * freed.
877 */
878static void
879nfe_jfree(void *buf, void *arg)
880{
881	struct nfe_softc *sc = arg;
882	struct nfe_jbuf *jbuf;
883	int i;
884
885	/* find the jbuf from the base pointer */
886	i = ((vm_offset_t)buf - (vm_offset_t)sc->rxq.jpool) / NFE_JBYTES;
887	if (i < 0 || i >= NFE_JPOOL_COUNT) {
888		printf("nfe%d: request to free a buffer (%p) not managed by us\n",
889		    sc->nfe_unit, buf);
890		return;
891	}
892	jbuf = &sc->rxq.jbuf[i];
893
894	/* ..and put it back in the free list */
895	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
896}
897
898
899static void
900nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
901{
902	int i;
903
904	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
905		if (sc->nfe_flags & NFE_40BIT_ADDR) {
906			ring->desc64[i].length = htole16(ring->bufsz);
907			ring->desc64[i].flags = htole16(NFE_RX_READY);
908		} else {
909			ring->desc32[i].length = htole16(ring->bufsz);
910			ring->desc32[i].flags = htole16(NFE_RX_READY);
911		}
912	}
913
914	bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
915	    BUS_DMASYNC_PREWRITE);
916
917	ring->cur = ring->next = 0;
918}
919
920
921static void
922nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
923{
924	struct nfe_rx_data *data;
925	void *desc;
926	int i, descsize;
927
928	if (sc->nfe_flags & NFE_40BIT_ADDR) {
929		desc = ring->desc64;
930		descsize = sizeof (struct nfe_desc64);
931	} else {
932		desc = ring->desc32;
933		descsize = sizeof (struct nfe_desc32);
934	}
935
936	if (desc != NULL) {
937		bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
938		    BUS_DMASYNC_POSTWRITE);
939		bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
940		bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
941		bus_dma_tag_destroy(ring->rx_desc_tag);
942	}
943
944	if (sc->nfe_flags & NFE_USE_JUMBO) {
945		nfe_jpool_free(sc);
946	} else {
947		for (i = 0; i < NFE_RX_RING_COUNT; i++) {
948			data = &ring->data[i];
949
950			if (data->rx_data_map != NULL) {
951				bus_dmamap_sync(data->rx_data_tag,
952				    data->rx_data_map, BUS_DMASYNC_POSTREAD);
953				bus_dmamap_unload(data->rx_data_tag,
954				    data->rx_data_map);
955				bus_dmamap_destroy(data->rx_data_tag,
956				    data->rx_data_map);
957				bus_dma_tag_destroy(data->rx_data_tag);
958			}
959
960			if (data->m != NULL)
961				m_freem(data->m);
962		}
963	}
964}
965
966
967static int
968nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
969{
970	int i, error;
971	void **desc;
972	int descsize;
973
974	if (sc->nfe_flags & NFE_40BIT_ADDR) {
975		desc = (void **)&ring->desc64;
976		descsize = sizeof (struct nfe_desc64);
977	} else {
978		desc = (void **)&ring->desc32;
979		descsize = sizeof (struct nfe_desc32);
980	}
981
982	ring->queued = 0;
983	ring->cur = ring->next = 0;
984
985	error = bus_dma_tag_create(sc->nfe_parent_tag,
986	   PAGE_SIZE, 0,			/* alignment, boundary */
987	   BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
988	   BUS_SPACE_MAXADDR,			/* highaddr */
989	   NULL, NULL,				/* filter, filterarg */
990	   NFE_TX_RING_COUNT * descsize, 1,	/* maxsize, nsegments */
991	   NFE_TX_RING_COUNT * descsize,	/* maxsegsize */
992	   BUS_DMA_ALLOCNOW,			/* flags */
993	   NULL, NULL,				/* lockfunc, lockarg */
994	   &ring->tx_desc_tag);
995	if (error != 0) {
996		printf("nfe%d: could not create desc DMA tag\n", sc->nfe_unit);
997		goto fail;
998	}
999
1000	error = bus_dmamem_alloc(ring->tx_desc_tag, (void **)desc,
1001	    BUS_DMA_NOWAIT, &ring->tx_desc_map);
1002	if (error != 0) {
1003		printf("nfe%d: could not create desc DMA map\n", sc->nfe_unit);
1004		goto fail;
1005	}
1006
1007	error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, *desc,
1008	    NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ring->tx_desc_segs,
1009	    BUS_DMA_NOWAIT);
1010	if (error != 0) {
1011		printf("nfe%d: could not load desc DMA map\n", sc->nfe_unit);
1012		goto fail;
1013	}
1014
1015	bzero(*desc, NFE_TX_RING_COUNT * descsize);
1016
1017	ring->tx_desc_addr = ring->tx_desc_segs.ds_addr;
1018	ring->physaddr = ring->tx_desc_addr;
1019
1020	error = bus_dma_tag_create(sc->nfe_parent_tag,
1021	   ETHER_ALIGN, 0,
1022	   BUS_SPACE_MAXADDR_32BIT,
1023	   BUS_SPACE_MAXADDR,
1024	   NULL, NULL,
1025	   NFE_JBYTES, NFE_MAX_SCATTER,
1026	   NFE_JBYTES,
1027	   BUS_DMA_ALLOCNOW,
1028	   NULL, NULL,
1029	   &ring->tx_data_tag);
1030	if (error != 0) {
1031	  printf("nfe%d: could not create DMA tag\n", sc->nfe_unit);
1032	  goto fail;
1033	}
1034
1035	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1036		error = bus_dmamap_create(ring->tx_data_tag, 0,
1037		    &ring->data[i].tx_data_map);
1038		if (error != 0) {
1039			printf("nfe%d: could not create DMA map\n",
1040			    sc->nfe_unit);
1041			goto fail;
1042		}
1043	}
1044
1045	return 0;
1046
1047fail:	nfe_free_tx_ring(sc, ring);
1048	return error;
1049}
1050
1051
1052static void
1053nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1054{
1055	struct nfe_tx_data *data;
1056	int i;
1057
1058	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1059		if (sc->nfe_flags & NFE_40BIT_ADDR)
1060			ring->desc64[i].flags = 0;
1061		else
1062			ring->desc32[i].flags = 0;
1063
1064		data = &ring->data[i];
1065
1066		if (data->m != NULL) {
1067			bus_dmamap_sync(ring->tx_data_tag, data->active,
1068			    BUS_DMASYNC_POSTWRITE);
1069			bus_dmamap_unload(ring->tx_data_tag, data->active);
1070			m_freem(data->m);
1071			data->m = NULL;
1072		}
1073	}
1074
1075	bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1076	    BUS_DMASYNC_PREWRITE);
1077
1078	ring->queued = 0;
1079	ring->cur = ring->next = 0;
1080}
1081
1082
1083static void
1084nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1085{
1086	struct nfe_tx_data *data;
1087	void *desc;
1088	int i, descsize;
1089
1090	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1091		desc = ring->desc64;
1092		descsize = sizeof (struct nfe_desc64);
1093	} else {
1094		desc = ring->desc32;
1095		descsize = sizeof (struct nfe_desc32);
1096	}
1097
1098	if (desc != NULL) {
1099		bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1100		    BUS_DMASYNC_POSTWRITE);
1101		bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1102		bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1103		bus_dma_tag_destroy(ring->tx_desc_tag);
1104	}
1105
1106	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1107		data = &ring->data[i];
1108
1109		if (data->m != NULL) {
1110			bus_dmamap_sync(ring->tx_data_tag, data->active,
1111			    BUS_DMASYNC_POSTWRITE);
1112			bus_dmamap_unload(ring->tx_data_tag, data->active);
1113			m_freem(data->m);
1114		}
1115	}
1116
1117	/* ..and now actually destroy the DMA mappings */
1118	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1119		data = &ring->data[i];
1120		if (data->tx_data_map == NULL)
1121			continue;
1122		bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map);
1123	}
1124
1125	bus_dma_tag_destroy(ring->tx_data_tag);
1126}
1127
1128#ifdef DEVICE_POLLING
1129static poll_handler_t nfe_poll;
1130
1131
1132static void
1133nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1134{
1135	struct nfe_softc *sc = ifp->if_softc;
1136
1137	NFE_LOCK(sc);
1138	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1139		nfe_poll_locked(ifp, cmd, count);
1140	NFE_UNLOCK(sc);
1141}
1142
1143
1144static void
1145nfe_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
1146{
1147	struct nfe_softc *sc = ifp->if_softc;
1148	u_int32_t r;
1149
1150	NFE_LOCK_ASSERT(sc);
1151
1152	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1153		return;
1154	}
1155
1156	sc->rxcycles = count;
1157	nfe_rxeof(sc);
1158	nfe_txeof(sc);
1159	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1160		nfe_start_locked(ifp);
1161
1162	if (cmd == POLL_AND_CHECK_STATUS) {
1163		if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1164			return;
1165		}
1166		NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1167
1168		if (r & NFE_IRQ_LINK) {
1169			NFE_READ(sc, NFE_PHY_STATUS);
1170			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1171			DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1172		}
1173	}
1174}
1175#endif /* DEVICE_POLLING */
1176
1177
1178static int
1179nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1180{
1181	struct nfe_softc *sc = ifp->if_softc;
1182	struct ifreq *ifr = (struct ifreq *) data;
1183	struct mii_data *mii;
1184	int error = 0;
1185
1186	switch (cmd) {
1187	case SIOCSIFMTU:
1188		if (ifr->ifr_mtu < ETHERMIN ||
1189		    ((sc->nfe_flags & NFE_USE_JUMBO) &&
1190		    ifr->ifr_mtu > ETHERMTU_JUMBO) ||
1191		    (!(sc->nfe_flags & NFE_USE_JUMBO) &&
1192		    ifr->ifr_mtu > ETHERMTU)) {
1193			error = EINVAL;
1194		} else if (ifp->if_mtu != ifr->ifr_mtu) {
1195			ifp->if_mtu = ifr->ifr_mtu;
1196			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1197			nfe_init(sc);
1198		}
1199		break;
1200	case SIOCSIFFLAGS:
1201		NFE_LOCK(sc);
1202		if (ifp->if_flags & IFF_UP) {
1203			/*
1204			 * If only the PROMISC or ALLMULTI flag changes, then
1205			 * don't do a full re-init of the chip, just update
1206			 * the Rx filter.
1207			 */
1208			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1209			    ((ifp->if_flags ^ sc->nfe_if_flags) &
1210			     (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1211				nfe_setmulti(sc);
1212			else
1213				nfe_init_locked(sc);
1214		} else {
1215			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1216				nfe_stop(ifp, 1);
1217		}
1218		sc->nfe_if_flags = ifp->if_flags;
1219		NFE_UNLOCK(sc);
1220		error = 0;
1221		break;
1222	case SIOCADDMULTI:
1223	case SIOCDELMULTI:
1224		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1225			NFE_LOCK(sc);
1226			nfe_setmulti(sc);
1227			NFE_UNLOCK(sc);
1228			error = 0;
1229		}
1230		break;
1231	case SIOCSIFMEDIA:
1232	case SIOCGIFMEDIA:
1233		mii = device_get_softc(sc->nfe_miibus);
1234		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1235		break;
1236	case SIOCSIFCAP:
1237	{
1238		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1239#ifdef DEVICE_POLLING
1240		if (mask & IFCAP_POLLING) {
1241			if (ifr->ifr_reqcap & IFCAP_POLLING) {
1242				error = ether_poll_register(nfe_poll, ifp);
1243				if (error)
1244					return(error);
1245				NFE_LOCK(sc);
1246				NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1247				ifp->if_capenable |= IFCAP_POLLING;
1248				NFE_UNLOCK(sc);
1249			} else {
1250				error = ether_poll_deregister(ifp);
1251				/* Enable interrupt even in error case */
1252				NFE_LOCK(sc);
1253				NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1254				ifp->if_capenable &= ~IFCAP_POLLING;
1255				NFE_UNLOCK(sc);
1256			}
1257		}
1258#endif /* DEVICE_POLLING */
1259		if (mask & IFCAP_HWCSUM) {
1260			ifp->if_capenable ^= IFCAP_HWCSUM;
1261			if (IFCAP_HWCSUM & ifp->if_capenable &&
1262			    IFCAP_HWCSUM & ifp->if_capabilities)
1263				ifp->if_hwassist = NFE_CSUM_FEATURES;
1264			else
1265				ifp->if_hwassist = 0;
1266		}
1267	}
1268		break;
1269
1270	default:
1271		error = ether_ioctl(ifp, cmd, data);
1272		break;
1273	}
1274
1275	return error;
1276}
1277
1278
1279static void
1280nfe_intr(void *arg)
1281{
1282	struct nfe_softc *sc = arg;
1283	struct ifnet *ifp = sc->nfe_ifp;
1284	u_int32_t r;
1285
1286	NFE_LOCK(sc);
1287
1288#ifdef DEVICE_POLLING
1289	if (ifp->if_capenable & IFCAP_POLLING) {
1290		NFE_UNLOCK(sc);
1291		return;
1292	}
1293#endif
1294
1295	if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0) {
1296		NFE_UNLOCK(sc);
1297		return;	/* not for us */
1298	}
1299	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
1300
1301	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
1302
1303	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1304
1305	if (r & NFE_IRQ_LINK) {
1306		NFE_READ(sc, NFE_PHY_STATUS);
1307		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1308		DPRINTF(("nfe%d: link state changed\n", sc->nfe_unit));
1309	}
1310
1311	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1312		/* check Rx ring */
1313		nfe_rxeof(sc);
1314		/* check Tx ring */
1315		nfe_txeof(sc);
1316	}
1317
1318	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1319
1320	if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1321	    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1322		nfe_start_locked(ifp);
1323
1324	NFE_UNLOCK(sc);
1325
1326	return;
1327}
1328
1329
1330static void
1331nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1332{
1333
1334	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1335}
1336
1337
1338static void
1339nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1340{
1341
1342	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1343}
1344
1345
1346static void
1347nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
1348{
1349
1350	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1351}
1352
1353
1354static void
1355nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
1356{
1357
1358	bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, ops);
1359}
1360
1361
1362static void
1363nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
1364{
1365
1366	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1367}
1368
1369
1370static void
1371nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
1372{
1373
1374	bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, ops);
1375}
1376
1377
1378static void
1379nfe_rxeof(struct nfe_softc *sc)
1380{
1381	struct ifnet *ifp = sc->nfe_ifp;
1382	struct nfe_desc32 *desc32=NULL;
1383	struct nfe_desc64 *desc64=NULL;
1384	struct nfe_rx_data *data;
1385	struct nfe_jbuf *jbuf;
1386	struct mbuf *m, *mnew;
1387	bus_addr_t physaddr;
1388	u_int16_t flags;
1389	int error, len;
1390#if NVLAN > 1
1391	u_int16_t vlan_tag = 0;
1392	int have_tag = 0;
1393#endif
1394
1395	NFE_LOCK_ASSERT(sc);
1396
1397	for (;;) {
1398
1399#ifdef DEVICE_POLLING
1400		if (ifp->if_capenable & IFCAP_POLLING) {
1401			if (sc->rxcycles <= 0)
1402				break;
1403			sc->rxcycles--;
1404		}
1405#endif
1406
1407		data = &sc->rxq.data[sc->rxq.cur];
1408
1409		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1410			desc64 = &sc->rxq.desc64[sc->rxq.cur];
1411			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1412
1413			flags = letoh16(desc64->flags);
1414			len = letoh16(desc64->length) & 0x3fff;
1415
1416#if NVLAN > 1
1417			if (flags & NFE_TX_VLAN_TAG) {
1418				have_tag = 1;
1419				vlan_tag = desc64->vtag;
1420			}
1421#endif
1422
1423		} else {
1424			desc32 = &sc->rxq.desc32[sc->rxq.cur];
1425			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1426
1427			flags = letoh16(desc32->flags);
1428			len = letoh16(desc32->length) & 0x3fff;
1429		}
1430
1431		if (flags & NFE_RX_READY)
1432			break;
1433
1434		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1435			if (!(flags & NFE_RX_VALID_V1))
1436				goto skip;
1437			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
1438				flags &= ~NFE_RX_ERROR;
1439				len--;	/* fix buffer length */
1440			}
1441		} else {
1442			if (!(flags & NFE_RX_VALID_V2))
1443				goto skip;
1444
1445			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
1446				flags &= ~NFE_RX_ERROR;
1447				len--;	/* fix buffer length */
1448			}
1449		}
1450
1451		if (flags & NFE_RX_ERROR) {
1452			ifp->if_ierrors++;
1453			goto skip;
1454		}
1455
1456		/*
1457		 * Try to allocate a new mbuf for this ring element and load
1458		 * it before processing the current mbuf. If the ring element
1459		 * cannot be loaded, drop the received packet and reuse the
1460		 * old mbuf. In the unlikely case that the old mbuf can't be
1461		 * reloaded either, explicitly panic.
1462		 */
1463		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1464		if (mnew == NULL) {
1465			ifp->if_ierrors++;
1466			goto skip;
1467		}
1468
1469		if (sc->nfe_flags & NFE_USE_JUMBO) {
1470			if ((jbuf = nfe_jalloc(sc)) == NULL) {
1471				m_freem(mnew);
1472				ifp->if_ierrors++;
1473				goto skip;
1474			}
1475			mnew->m_data = (void *)jbuf->buf;
1476			mnew->m_len = mnew->m_pkthdr.len = NFE_JBYTES;
1477			MEXTADD(mnew, jbuf->buf, NFE_JBYTES, nfe_jfree,
1478			    (struct nfe_softc *)sc, 0 , EXT_NET_DRV);
1479
1480			bus_dmamap_sync(sc->rxq.rx_jumbo_tag,
1481			    sc->rxq.rx_jumbo_map, BUS_DMASYNC_POSTREAD);
1482			physaddr = jbuf->physaddr;
1483		} else {
1484			MCLGET(mnew, M_DONTWAIT);
1485			if (!(mnew->m_flags & M_EXT)) {
1486				m_freem(mnew);
1487				ifp->if_ierrors++;
1488				goto skip;
1489			}
1490
1491			bus_dmamap_sync(data->rx_data_tag, data->rx_data_map,
1492			    BUS_DMASYNC_POSTREAD);
1493			bus_dmamap_unload(data->rx_data_tag, data->rx_data_map);
1494			error = bus_dmamap_load(data->rx_data_tag,
1495			    data->rx_data_map, mtod(mnew, void *), MCLBYTES,
1496			    nfe_dma_map_segs, &data->rx_data_segs,
1497			    BUS_DMA_NOWAIT);
1498			if (error != 0) {
1499				m_freem(mnew);
1500
1501				/* try to reload the old mbuf */
1502				error = bus_dmamap_load(data->rx_data_tag,
1503				    data->rx_data_map, mtod(data->m, void *),
1504				    MCLBYTES, nfe_dma_map_segs,
1505				    &data->rx_data_segs, BUS_DMA_NOWAIT);
1506				if (error != 0) {
1507					/* very unlikely that it will fail.. */
1508				      panic("nfe%d: could not load old rx mbuf",
1509					    sc->nfe_unit);
1510				}
1511				ifp->if_ierrors++;
1512				goto skip;
1513			}
1514			data->rx_data_addr = data->rx_data_segs.ds_addr;
1515			physaddr = data->rx_data_addr;
1516		}
1517
1518		/*
1519		 * New mbuf successfully loaded, update Rx ring and continue
1520		 * processing.
1521		 */
1522		m = data->m;
1523		data->m = mnew;
1524
1525		/* finalize mbuf */
1526		m->m_pkthdr.len = m->m_len = len;
1527		m->m_pkthdr.rcvif = ifp;
1528
1529
1530#if defined(NFE_CSUM)
1531		if ((sc->nfe_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) {
1532			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1533			if (flags & NFE_RX_IP_CSUMOK_V2) {
1534				m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1535			}
1536			if (flags & NFE_RX_UDP_CSUMOK_V2 ||
1537			    flags & NFE_RX_TCP_CSUMOK_V2) {
1538				m->m_pkthdr.csum_flags |=
1539				    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1540				m->m_pkthdr.csum_data = 0xffff;
1541			}
1542		}
1543#endif
1544
1545#if NVLAN > 1
1546		if (have_tag) {
1547			m->m_pkthdr.ether_vtag = vlan_tag;
1548			m->m_flags |= M_VLANTAG;
1549		}
1550#endif
1551
1552		ifp->if_ipackets++;
1553
1554		NFE_UNLOCK(sc);
1555		(*ifp->if_input)(ifp, m);
1556		NFE_LOCK(sc);
1557
1558		/* update mapping address in h/w descriptor */
1559		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1560#if defined(__LP64__)
1561			desc64->physaddr[0] = htole32(physaddr >> 32);
1562#endif
1563			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1564		} else {
1565			desc32->physaddr = htole32(physaddr);
1566		}
1567
1568skip:		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1569			desc64->length = htole16(sc->rxq.bufsz);
1570			desc64->flags = htole16(NFE_RX_READY);
1571
1572			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
1573		} else {
1574			desc32->length = htole16(sc->rxq.bufsz);
1575			desc32->flags = htole16(NFE_RX_READY);
1576
1577			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
1578		}
1579
1580		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
1581	}
1582}
1583
1584
1585static void
1586nfe_txeof(struct nfe_softc *sc)
1587{
1588	struct ifnet *ifp = sc->nfe_ifp;
1589	struct nfe_desc32 *desc32;
1590	struct nfe_desc64 *desc64;
1591	struct nfe_tx_data *data = NULL;
1592	u_int16_t flags;
1593
1594	NFE_LOCK_ASSERT(sc);
1595
1596	while (sc->txq.next != sc->txq.cur) {
1597		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1598			desc64 = &sc->txq.desc64[sc->txq.next];
1599			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
1600
1601			flags = letoh16(desc64->flags);
1602		} else {
1603			desc32 = &sc->txq.desc32[sc->txq.next];
1604			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
1605
1606			flags = letoh16(desc32->flags);
1607		}
1608
1609		if (flags & NFE_TX_VALID)
1610			break;
1611
1612		data = &sc->txq.data[sc->txq.next];
1613
1614		if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1615			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
1616				goto skip;
1617
1618			if ((flags & NFE_TX_ERROR_V1) != 0) {
1619				printf("nfe%d: tx v1 error 0x%4b\n",
1620				    sc->nfe_unit, flags, NFE_V1_TXERR);
1621
1622				ifp->if_oerrors++;
1623			} else
1624				ifp->if_opackets++;
1625		} else {
1626			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
1627				goto skip;
1628
1629			if ((flags & NFE_TX_ERROR_V2) != 0) {
1630				printf("nfe%d: tx v1 error 0x%4b\n",
1631				    sc->nfe_unit, flags, NFE_V2_TXERR);
1632
1633				ifp->if_oerrors++;
1634			} else
1635				ifp->if_opackets++;
1636		}
1637
1638		if (data->m == NULL) {	/* should not get there */
1639			printf("nfe%d: last fragment bit w/o associated mbuf!\n",
1640			    sc->nfe_unit);
1641			goto skip;
1642		}
1643
1644		/* last fragment of the mbuf chain transmitted */
1645		bus_dmamap_sync(sc->txq.tx_data_tag, data->active,
1646		    BUS_DMASYNC_POSTWRITE);
1647		bus_dmamap_unload(sc->txq.tx_data_tag, data->active);
1648		m_freem(data->m);
1649		data->m = NULL;
1650
1651		ifp->if_timer = 0;
1652
1653skip:		sc->txq.queued--;
1654		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
1655	}
1656
1657	if (data != NULL) {	/* at least one slot freed */
1658		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1659		nfe_start_locked(ifp);
1660	}
1661}
1662
1663
1664static int
1665nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1666{
1667	struct nfe_desc32 *desc32=NULL;
1668	struct nfe_desc64 *desc64=NULL;
1669	struct nfe_tx_data *data=NULL;
1670	bus_dmamap_t map;
1671	bus_dma_segment_t segs[NFE_MAX_SCATTER];
1672	int error, i, nsegs;
1673	u_int16_t flags = NFE_TX_VALID;
1674
1675	map = sc->txq.data[sc->txq.cur].tx_data_map;
1676
1677	error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, m0, segs,
1678	    &nsegs, BUS_DMA_NOWAIT);
1679
1680	if (error != 0) {
1681		printf("nfe%d: could not map mbuf (error %d)\n", sc->nfe_unit,
1682		    error);
1683		return error;
1684	}
1685
1686	if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 1) {
1687		bus_dmamap_unload(sc->txq.tx_data_tag, map);
1688		return ENOBUFS;
1689	}
1690
1691
1692#ifdef NFE_CSUM
1693	if (m0->m_pkthdr.csum_flags & CSUM_IP)
1694		flags |= NFE_TX_IP_CSUM;
1695	if (m0->m_pkthdr.csum_flags & CSUM_TCP)
1696		flags |= NFE_TX_TCP_CSUM;
1697	if (m0->m_pkthdr.csum_flags & CSUM_UDP)
1698		flags |= NFE_TX_TCP_CSUM;
1699#endif
1700
1701	for (i = 0; i < nsegs; i++) {
1702		data = &sc->txq.data[sc->txq.cur];
1703
1704		if (sc->nfe_flags & NFE_40BIT_ADDR) {
1705			desc64 = &sc->txq.desc64[sc->txq.cur];
1706#if defined(__LP64__)
1707			desc64->physaddr[0] = htole32(segs[i].ds_addr >> 32);
1708#endif
1709			desc64->physaddr[1] = htole32(segs[i].ds_addr &
1710			    0xffffffff);
1711			desc64->length = htole16(segs[i].ds_len - 1);
1712			desc64->flags = htole16(flags);
1713#if NVLAN > 0
1714			if (m0->m_flags & M_VLANTAG)
1715				desc64->vtag = htole32(NFE_TX_VTAG |
1716				    m0->m_pkthdr.ether_vtag);
1717#endif
1718		} else {
1719			desc32 = &sc->txq.desc32[sc->txq.cur];
1720
1721			desc32->physaddr = htole32(segs[i].ds_addr);
1722			desc32->length = htole16(segs[i].ds_len - 1);
1723			desc32->flags = htole16(flags);
1724		}
1725
1726		/* csum flags and vtag belong to the first fragment only */
1727		if (nsegs > 1) {
1728			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
1729		}
1730
1731		sc->txq.queued++;
1732		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1733	}
1734
1735	/* the whole mbuf chain has been DMA mapped, fix last descriptor */
1736	if (sc->nfe_flags & NFE_40BIT_ADDR) {
1737		flags |= NFE_TX_LASTFRAG_V2;
1738		desc64->flags = htole16(flags);
1739	} else {
1740		if (sc->nfe_flags & NFE_JUMBO_SUP)
1741			flags |= NFE_TX_LASTFRAG_V2;
1742		else
1743			flags |= NFE_TX_LASTFRAG_V1;
1744		desc32->flags = htole16(flags);
1745	}
1746
1747	data->m = m0;
1748	data->active = map;
1749	data->nsegs = nsegs;
1750
1751	bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
1752
1753	return 0;
1754}
1755
1756
1757static void
1758nfe_setmulti(struct nfe_softc *sc)
1759{
1760	struct ifnet *ifp = sc->nfe_ifp;
1761	struct ifmultiaddr *ifma;
1762	int i;
1763	u_int32_t filter = NFE_RXFILTER_MAGIC;
1764	u_int8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1765	u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
1766		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1767	};
1768
1769	NFE_LOCK_ASSERT(sc);
1770
1771	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1772		bzero(addr, ETHER_ADDR_LEN);
1773		bzero(mask, ETHER_ADDR_LEN);
1774		goto done;
1775	}
1776
1777	bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1778	bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1779
1780	IF_ADDR_LOCK(ifp);
1781	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1782		u_char *addrp;
1783
1784		if (ifma->ifma_addr->sa_family != AF_LINK)
1785			continue;
1786
1787		addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
1788		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1789			u_int8_t mcaddr = addrp[i];
1790			addr[i] &= mcaddr;
1791			mask[i] &= ~mcaddr;
1792		}
1793	}
1794	IF_ADDR_UNLOCK(ifp);
1795
1796	for (i = 0; i < ETHER_ADDR_LEN; i++) {
1797		mask[i] |= addr[i];
1798	}
1799
1800done:
1801	addr[0] |= 0x01;	/* make sure multicast bit is set */
1802
1803	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1804	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1805	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1806	    addr[5] <<  8 | addr[4]);
1807	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1808	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1809	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1810	    mask[5] <<  8 | mask[4]);
1811
1812	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1813	NFE_WRITE(sc, NFE_RXFILTER, filter);
1814}
1815
1816
1817static void
1818nfe_start(struct ifnet *ifp)
1819{
1820	struct nfe_softc *sc;
1821
1822	sc = ifp->if_softc;
1823	NFE_LOCK(sc);
1824	nfe_start_locked(ifp);
1825	NFE_UNLOCK(sc);
1826}
1827
1828
1829static void
1830nfe_start_locked(struct ifnet *ifp)
1831{
1832	struct nfe_softc *sc = ifp->if_softc;
1833	struct mbuf *m0;
1834	int old = sc->txq.cur;
1835
1836	if (!sc->nfe_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1837		return;
1838	}
1839
1840	for (;;) {
1841		IFQ_POLL(&ifp->if_snd, m0);
1842		if (m0 == NULL)
1843			break;
1844
1845		if (nfe_encap(sc, m0) != 0) {
1846			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1847			break;
1848		}
1849
1850		/* packet put in h/w queue, remove from s/w queue */
1851		IFQ_DEQUEUE(&ifp->if_snd, m0);
1852
1853		BPF_MTAP(ifp, m0);
1854	}
1855	if (sc->txq.cur == old)	{ /* nothing sent */
1856		return;
1857	}
1858
1859	if (sc->nfe_flags & NFE_40BIT_ADDR)
1860		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1861	else
1862		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1863
1864	/* kick Tx */
1865	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1866
1867	/*
1868	 * Set a timeout in case the chip goes out to lunch.
1869	 */
1870	ifp->if_timer = 5;
1871
1872	return;
1873}
1874
1875
1876static void
1877nfe_watchdog(struct ifnet *ifp)
1878{
1879	struct nfe_softc *sc = ifp->if_softc;
1880
1881	printf("nfe%d: watchdog timeout\n", sc->nfe_unit);
1882
1883	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1884	nfe_init(sc);
1885	ifp->if_oerrors++;
1886
1887	return;
1888}
1889
1890
1891static void
1892nfe_init(void *xsc)
1893{
1894	struct nfe_softc *sc = xsc;
1895
1896	NFE_LOCK(sc);
1897	nfe_init_locked(sc);
1898	NFE_UNLOCK(sc);
1899
1900	return;
1901}
1902
1903
1904static void
1905nfe_init_locked(void *xsc)
1906{
1907	struct nfe_softc *sc = xsc;
1908	struct ifnet *ifp = sc->nfe_ifp;
1909	struct mii_data *mii;
1910	u_int32_t tmp;
1911
1912	NFE_LOCK_ASSERT(sc);
1913
1914	mii = device_get_softc(sc->nfe_miibus);
1915
1916	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1917		return;
1918	}
1919
1920	nfe_stop(ifp, 0);
1921
1922	NFE_WRITE(sc, NFE_TX_UNK, 0);
1923	NFE_WRITE(sc, NFE_STATUS, 0);
1924
1925	sc->rxtxctl = NFE_RXTX_BIT2;
1926	if (sc->nfe_flags & NFE_40BIT_ADDR)
1927		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1928	else if (sc->nfe_flags & NFE_JUMBO_SUP)
1929		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1930#ifdef NFE_CSUM
1931	if (sc->nfe_flags & NFE_HW_CSUM)
1932		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1933#endif
1934
1935#if NVLAN > 0
1936	/*
1937	 * Although the adapter is capable of stripping VLAN tags from received
1938	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1939	 * purpose.  This will be done in software by our network stack.
1940	 */
1941	if (sc->nfe_flags & NFE_HW_VLAN)
1942		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1943#endif
1944
1945	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1946	DELAY(10);
1947	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1948
1949#if NVLAN
1950	if (sc->nfe_flags & NFE_HW_VLAN)
1951		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1952#endif
1953
1954	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1955
1956	/* set MAC address */
1957	nfe_set_macaddr(sc, sc->eaddr);
1958
1959	/* tell MAC where rings are in memory */
1960#ifdef __LP64__
1961	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1962#endif
1963	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1964#ifdef __LP64__
1965	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1966#endif
1967	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1968
1969	NFE_WRITE(sc, NFE_RING_SIZE,
1970	    (NFE_RX_RING_COUNT - 1) << 16 |
1971	    (NFE_TX_RING_COUNT - 1));
1972
1973	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1974
1975	/* force MAC to wakeup */
1976	tmp = NFE_READ(sc, NFE_PWR_STATE);
1977	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1978	DELAY(10);
1979	tmp = NFE_READ(sc, NFE_PWR_STATE);
1980	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1981
1982#if 1
1983	/* configure interrupts coalescing/mitigation */
1984	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1985#else
1986	/* no interrupt mitigation: one interrupt per packet */
1987	NFE_WRITE(sc, NFE_IMTIMER, 970);
1988#endif
1989
1990	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1991	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1992	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1993
1994	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1995	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1996
1997	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1998	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1999
2000	sc->rxtxctl &= ~NFE_RXTX_BIT2;
2001	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2002	DELAY(10);
2003	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2004
2005	/* set Rx filter */
2006	nfe_setmulti(sc);
2007
2008	nfe_ifmedia_upd(ifp);
2009
2010	nfe_tick_locked(sc);
2011
2012	/* enable Rx */
2013	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2014
2015	/* enable Tx */
2016	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2017
2018	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2019
2020#ifdef DEVICE_POLLING
2021	if (ifp->if_capenable & IFCAP_POLLING)
2022		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2023	else
2024#endif
2025	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); /* enable interrupts */
2026
2027	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2028	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2029
2030	sc->nfe_link = 0;
2031
2032	return;
2033}
2034
2035
2036static void
2037nfe_stop(struct ifnet *ifp, int disable)
2038{
2039	struct nfe_softc *sc = ifp->if_softc;
2040	struct mii_data  *mii;
2041
2042	NFE_LOCK_ASSERT(sc);
2043
2044	ifp->if_timer = 0;
2045	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2046
2047	mii = device_get_softc(sc->nfe_miibus);
2048
2049	callout_stop(&sc->nfe_stat_ch);
2050
2051	/* abort Tx */
2052	NFE_WRITE(sc, NFE_TX_CTL, 0);
2053
2054	/* disable Rx */
2055	NFE_WRITE(sc, NFE_RX_CTL, 0);
2056
2057	/* disable interrupts */
2058	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
2059
2060	sc->nfe_link = 0;
2061
2062	/* reset Tx and Rx rings */
2063	nfe_reset_tx_ring(sc, &sc->txq);
2064	nfe_reset_rx_ring(sc, &sc->rxq);
2065
2066	return;
2067}
2068
2069
2070static int
2071nfe_ifmedia_upd(struct ifnet *ifp)
2072{
2073	struct nfe_softc *sc = ifp->if_softc;
2074
2075	NFE_LOCK(sc);
2076	nfe_ifmedia_upd_locked(ifp);
2077	NFE_UNLOCK(sc);
2078	return (0);
2079}
2080
2081
2082static int
2083nfe_ifmedia_upd_locked(struct ifnet *ifp)
2084{
2085	struct nfe_softc *sc = ifp->if_softc;
2086	struct mii_data *mii;
2087
2088	NFE_LOCK_ASSERT(sc);
2089
2090	mii = device_get_softc(sc->nfe_miibus);
2091
2092	if (mii->mii_instance) {
2093		struct mii_softc *miisc;
2094		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2095		    miisc = LIST_NEXT(miisc, mii_list)) {
2096			mii_phy_reset(miisc);
2097		}
2098	}
2099	mii_mediachg(mii);
2100
2101	return (0);
2102}
2103
2104
2105static void
2106nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2107{
2108	struct nfe_softc *sc;
2109	struct mii_data *mii;
2110
2111	sc = ifp->if_softc;
2112
2113	NFE_LOCK(sc);
2114	mii = device_get_softc(sc->nfe_miibus);
2115	mii_pollstat(mii);
2116	NFE_UNLOCK(sc);
2117
2118	ifmr->ifm_active = mii->mii_media_active;
2119	ifmr->ifm_status = mii->mii_media_status;
2120
2121	return;
2122}
2123
2124
2125static void
2126nfe_tick(void *xsc)
2127{
2128	struct nfe_softc *sc;
2129
2130	sc = xsc;
2131
2132	NFE_LOCK(sc);
2133	nfe_tick_locked(sc);
2134	NFE_UNLOCK(sc);
2135}
2136
2137
2138void
2139nfe_tick_locked(struct nfe_softc *arg)
2140{
2141	struct nfe_softc *sc;
2142	struct mii_data *mii;
2143	struct ifnet *ifp;
2144
2145	sc = arg;
2146
2147	NFE_LOCK_ASSERT(sc);
2148
2149	ifp = sc->nfe_ifp;
2150
2151	mii = device_get_softc(sc->nfe_miibus);
2152	mii_tick(mii);
2153
2154	if (!sc->nfe_link) {
2155		if (mii->mii_media_status & IFM_ACTIVE &&
2156		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2157			sc->nfe_link++;
2158			if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T
2159			    && bootverbose)
2160				if_printf(sc->nfe_ifp, "gigabit link up\n");
2161					if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2162						nfe_start_locked(ifp);
2163		}
2164	}
2165	callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2166
2167	return;
2168}
2169
2170
2171static void
2172nfe_shutdown(device_t dev)
2173{
2174	struct nfe_softc *sc;
2175	struct ifnet *ifp;
2176
2177	sc = device_get_softc(dev);
2178
2179	NFE_LOCK(sc);
2180	ifp = sc->nfe_ifp;
2181	nfe_stop(ifp,0);
2182	/* nfe_reset(sc); */
2183	NFE_UNLOCK(sc);
2184
2185	return;
2186}
2187
2188
2189static void
2190nfe_get_macaddr(struct nfe_softc *sc, u_char *addr)
2191{
2192	uint32_t tmp;
2193
2194	tmp = NFE_READ(sc, NFE_MACADDR_LO);
2195	addr[0] = (tmp >> 8) & 0xff;
2196	addr[1] = (tmp & 0xff);
2197
2198	tmp = NFE_READ(sc, NFE_MACADDR_HI);
2199	addr[2] = (tmp >> 24) & 0xff;
2200	addr[3] = (tmp >> 16) & 0xff;
2201	addr[4] = (tmp >>  8) & 0xff;
2202	addr[5] = (tmp & 0xff);
2203}
2204
2205
2206static void
2207nfe_set_macaddr(struct nfe_softc *sc, u_char *addr)
2208{
2209
2210	NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] <<  8 | addr[4]);
2211	NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2212	    addr[1] << 8 | addr[0]);
2213}
2214
2215
2216/*
2217 * Map a single buffer address.
2218 */
2219
2220static void
2221nfe_dma_map_segs(arg, segs, nseg, error)
2222	void *arg;
2223	bus_dma_segment_t *segs;
2224	int error, nseg;
2225{
2226
2227	if (error)
2228		return;
2229
2230	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2231
2232	*(bus_dma_segment_t *)arg = *segs;
2233
2234	return;
2235}
2236