1/*	$NetBSD: if_nfe.c,v 1.55 2012/01/30 19:41:20 drochner Exp $	*/
2/*	$OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $	*/
3
4/*-
5 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23#include <sys/cdefs.h>
24__KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.55 2012/01/30 19:41:20 drochner Exp $");
25
26#include "opt_inet.h"
27#include "vlan.h"
28
29#include <sys/param.h>
30#include <sys/endian.h>
31#include <sys/systm.h>
32#include <sys/types.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/mutex.h>
36#include <sys/queue.h>
37#include <sys/kernel.h>
38#include <sys/device.h>
39#include <sys/callout.h>
40#include <sys/socket.h>
41
42#include <sys/bus.h>
43
44#include <net/if.h>
45#include <net/if_dl.h>
46#include <net/if_media.h>
47#include <net/if_ether.h>
48#include <net/if_arp.h>
49
50#ifdef INET
51#include <netinet/in.h>
52#include <netinet/in_systm.h>
53#include <netinet/in_var.h>
54#include <netinet/ip.h>
55#include <netinet/if_inarp.h>
56#endif
57
58#if NVLAN > 0
59#include <net/if_types.h>
60#endif
61
62#include <net/bpf.h>
63
64#include <dev/mii/mii.h>
65#include <dev/mii/miivar.h>
66
67#include <dev/pci/pcireg.h>
68#include <dev/pci/pcivar.h>
69#include <dev/pci/pcidevs.h>
70
71#include <dev/pci/if_nfereg.h>
72#include <dev/pci/if_nfevar.h>
73
74static int nfe_ifflags_cb(struct ethercom *);
75
76int	nfe_match(device_t, cfdata_t, void *);
77void	nfe_attach(device_t, device_t, void *);
78int	nfe_detach(device_t, int);
79void	nfe_power(int, void *);
80void	nfe_miibus_statchg(device_t);
81int	nfe_miibus_readreg(device_t, int, int);
82void	nfe_miibus_writereg(device_t, int, int, int);
83int	nfe_intr(void *);
84int	nfe_ioctl(struct ifnet *, u_long, void *);
85void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
86void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
87void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
88void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
89void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
90void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
91void	nfe_rxeof(struct nfe_softc *);
92void	nfe_txeof(struct nfe_softc *);
93int	nfe_encap(struct nfe_softc *, struct mbuf *);
94void	nfe_start(struct ifnet *);
95void	nfe_watchdog(struct ifnet *);
96int	nfe_init(struct ifnet *);
97void	nfe_stop(struct ifnet *, int);
98struct	nfe_jbuf *nfe_jalloc(struct nfe_softc *, int);
99void	nfe_jfree(struct mbuf *, void *, size_t, void *);
100int	nfe_jpool_alloc(struct nfe_softc *);
101void	nfe_jpool_free(struct nfe_softc *);
102int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
103void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
104void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
105int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
106void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
107void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
108void	nfe_setmulti(struct nfe_softc *);
109void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
110void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
111void	nfe_tick(void *);
112void	nfe_poweron(device_t);
113bool	nfe_resume(device_t, const pmf_qual_t *);
114
115CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc),
116    nfe_match, nfe_attach, nfe_detach, NULL);
117
118/* #define NFE_NO_JUMBO */
119
120#ifdef NFE_DEBUG
121int nfedebug = 0;
122#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
123#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
124#else
125#define DPRINTF(x)
126#define DPRINTFN(n,x)
127#endif
128
129/* deal with naming differences */
130
131#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \
132	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
133#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \
134	PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
135#define	PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \
136	PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
137
138#define	PCI_PRODUCT_NVIDIA_CK804_LAN1 \
139	PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
140#define	PCI_PRODUCT_NVIDIA_CK804_LAN2 \
141	PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
142
143#define	PCI_PRODUCT_NVIDIA_MCP51_LAN1 \
144	PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
145#define	PCI_PRODUCT_NVIDIA_MCP51_LAN2 \
146	PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
147
148#ifdef	_LP64
149#define	__LP64__ 1
150#endif
151
152const struct nfe_product {
153	pci_vendor_id_t		vendor;
154	pci_product_id_t	product;
155} nfe_devices[] = {
156	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
157	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
158	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
159	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
160	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
161	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
162	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
163	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
164	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
165	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
166	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
167	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
168	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
169	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
170	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
171	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
172	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
173	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
174	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
175	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
176	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
177	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
178	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
179	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
180	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
181	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
182	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
183	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
184	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
185	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
186	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
187	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
188	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
189	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
190	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
191	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
192	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
193	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
194	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 }
195};
196
197int
198nfe_match(device_t dev, cfdata_t match, void *aux)
199{
200	struct pci_attach_args *pa = aux;
201	const struct nfe_product *np;
202	int i;
203
204	for (i = 0; i < __arraycount(nfe_devices); i++) {
205		np = &nfe_devices[i];
206		if (PCI_VENDOR(pa->pa_id) == np->vendor &&
207		    PCI_PRODUCT(pa->pa_id) == np->product)
208			return 1;
209	}
210	return 0;
211}
212
213void
214nfe_attach(device_t parent, device_t self, void *aux)
215{
216	struct nfe_softc *sc = device_private(self);
217	struct pci_attach_args *pa = aux;
218	pci_chipset_tag_t pc = pa->pa_pc;
219	pci_intr_handle_t ih;
220	const char *intrstr;
221	struct ifnet *ifp;
222	pcireg_t memtype, csr;
223	int mii_flags = 0;
224
225	sc->sc_dev = self;
226	sc->sc_pc = pa->pa_pc;
227	pci_aprint_devinfo(pa, NULL);
228
229	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
230	switch (memtype) {
231	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
232	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
233		if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
234		    &sc->sc_memh, NULL, &sc->sc_mems) == 0)
235			break;
236		/* FALLTHROUGH */
237	default:
238		aprint_error_dev(self, "could not map mem space\n");
239		return;
240	}
241
242	if (pci_intr_map(pa, &ih) != 0) {
243		aprint_error_dev(self, "could not map interrupt\n");
244		goto fail;
245	}
246
247	intrstr = pci_intr_string(pc, ih);
248	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc);
249	if (sc->sc_ih == NULL) {
250		aprint_error_dev(self, "could not establish interrupt");
251		if (intrstr != NULL)
252			aprint_error(" at %s", intrstr);
253		aprint_error("\n");
254		goto fail;
255	}
256	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
257
258	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
259	csr |= PCI_COMMAND_MASTER_ENABLE;
260	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
261
262	sc->sc_flags = 0;
263
264	switch (PCI_PRODUCT(pa->pa_id)) {
265	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
266	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
267	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
268	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
269		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
270		break;
271	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
272	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
273		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
274		break;
275	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
276	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
277	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
278	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
279	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
280	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
281	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
282	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
283	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
284	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
285	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
286	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
287		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
288		    NFE_PWR_MGMT;
289		break;
290	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
291	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
292	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
293	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
294		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
295		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
296		break;
297	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
298	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
299	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
300	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
301		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
302		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
303		break;
304	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
305	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
306	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
307	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
308		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
309		break;
310	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
311	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
312	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
313	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
314		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
315		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
316		mii_flags = MIIF_DOPAUSE;
317		break;
318	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
319	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
320		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
321		    NFE_HW_VLAN | NFE_PWR_MGMT;
322		break;
323	}
324
325	if (pci_dma64_available(pa) && (sc->sc_flags & NFE_40BIT_ADDR) != 0)
326		sc->sc_dmat = pa->pa_dmat64;
327	else
328		sc->sc_dmat = pa->pa_dmat;
329
330	nfe_poweron(self);
331
332#ifndef NFE_NO_JUMBO
333	/* enable jumbo frames for adapters that support it */
334	if (sc->sc_flags & NFE_JUMBO_SUP)
335		sc->sc_flags |= NFE_USE_JUMBO;
336#endif
337
338	/* Check for reversed ethernet address */
339	if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
340		sc->sc_flags |= NFE_CORRECT_MACADDR;
341
342	nfe_get_macaddr(sc, sc->sc_enaddr);
343	aprint_normal_dev(self, "Ethernet address %s\n",
344	    ether_sprintf(sc->sc_enaddr));
345
346	/*
347	 * Allocate Tx and Rx rings.
348	 */
349	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
350		aprint_error_dev(self, "could not allocate Tx ring\n");
351		goto fail;
352	}
353
354	mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET);
355
356	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
357		aprint_error_dev(self, "could not allocate Rx ring\n");
358		nfe_free_tx_ring(sc, &sc->txq);
359		goto fail;
360	}
361
362	ifp = &sc->sc_ethercom.ec_if;
363	ifp->if_softc = sc;
364	ifp->if_mtu = ETHERMTU;
365	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
366	ifp->if_ioctl = nfe_ioctl;
367	ifp->if_start = nfe_start;
368	ifp->if_stop = nfe_stop;
369	ifp->if_watchdog = nfe_watchdog;
370	ifp->if_init = nfe_init;
371	ifp->if_baudrate = IF_Gbps(1);
372	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
373	IFQ_SET_READY(&ifp->if_snd);
374	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
375
376	if (sc->sc_flags & NFE_USE_JUMBO)
377		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
378
379#if NVLAN > 0
380	if (sc->sc_flags & NFE_HW_VLAN)
381		sc->sc_ethercom.ec_capabilities |=
382			ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
383#endif
384	if (sc->sc_flags & NFE_HW_CSUM) {
385		ifp->if_capabilities |=
386		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
387		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
388		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
389	}
390
391	sc->sc_mii.mii_ifp = ifp;
392	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
393	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
394	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
395
396	sc->sc_ethercom.ec_mii = &sc->sc_mii;
397	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
398	    ether_mediastatus);
399
400	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, mii_flags);
401
402	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
403		aprint_error_dev(self, "no PHY found!\n");
404		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
405		    0, NULL);
406		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
407	} else
408		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
409
410	if_attach(ifp);
411	ether_ifattach(ifp, sc->sc_enaddr);
412	ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb);
413
414	callout_init(&sc->sc_tick_ch, 0);
415	callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc);
416
417	if (pmf_device_register(self, NULL, nfe_resume))
418		pmf_class_network_register(self, ifp);
419	else
420		aprint_error_dev(self, "couldn't establish power handler\n");
421
422	return;
423
424fail:
425	if (sc->sc_ih != NULL) {
426		pci_intr_disestablish(pc, sc->sc_ih);
427		sc->sc_ih = NULL;
428	}
429	if (sc->sc_mems != 0) {
430		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
431		sc->sc_mems = 0;
432	}
433}
434
435int
436nfe_detach(device_t self, int flags)
437{
438	struct nfe_softc *sc = device_private(self);
439	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
440	int s;
441
442	s = splnet();
443
444	nfe_stop(ifp, 1);
445
446	pmf_device_deregister(self);
447	callout_destroy(&sc->sc_tick_ch);
448	ether_ifdetach(ifp);
449	if_detach(ifp);
450	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
451
452	nfe_free_rx_ring(sc, &sc->rxq);
453	mutex_destroy(&sc->rxq.mtx);
454	nfe_free_tx_ring(sc, &sc->txq);
455
456	if (sc->sc_ih != NULL) {
457		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
458		sc->sc_ih = NULL;
459	}
460
461	if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) {
462		nfe_set_macaddr(sc, sc->sc_enaddr);
463	} else {
464		NFE_WRITE(sc, NFE_MACADDR_LO,
465		    sc->sc_enaddr[0] <<  8 | sc->sc_enaddr[1]);
466		NFE_WRITE(sc, NFE_MACADDR_HI,
467		    sc->sc_enaddr[2] << 24 | sc->sc_enaddr[3] << 16 |
468		    sc->sc_enaddr[4] <<  8 | sc->sc_enaddr[5]);
469	}
470
471	if (sc->sc_mems != 0) {
472		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
473		sc->sc_mems = 0;
474	}
475
476	splx(s);
477
478	return 0;
479}
480
481void
482nfe_miibus_statchg(device_t dev)
483{
484	struct nfe_softc *sc = device_private(dev);
485	struct mii_data *mii = &sc->sc_mii;
486	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
487
488	phy = NFE_READ(sc, NFE_PHY_IFACE);
489	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
490
491	seed = NFE_READ(sc, NFE_RNDSEED);
492	seed &= ~NFE_SEED_MASK;
493
494	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
495		phy  |= NFE_PHY_HDX;	/* half-duplex */
496		misc |= NFE_MISC1_HDX;
497	}
498
499	switch (IFM_SUBTYPE(mii->mii_media_active)) {
500	case IFM_1000_T:	/* full-duplex only */
501		link |= NFE_MEDIA_1000T;
502		seed |= NFE_SEED_1000T;
503		phy  |= NFE_PHY_1000T;
504		break;
505	case IFM_100_TX:
506		link |= NFE_MEDIA_100TX;
507		seed |= NFE_SEED_100TX;
508		phy  |= NFE_PHY_100TX;
509		break;
510	case IFM_10_T:
511		link |= NFE_MEDIA_10T;
512		seed |= NFE_SEED_10T;
513		break;
514	}
515
516	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
517
518	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
519	NFE_WRITE(sc, NFE_MISC1, misc);
520	NFE_WRITE(sc, NFE_LINKSPEED, link);
521}
522
523int
524nfe_miibus_readreg(device_t dev, int phy, int reg)
525{
526	struct nfe_softc *sc = device_private(dev);
527	uint32_t val;
528	int ntries;
529
530	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
531
532	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
533		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
534		DELAY(100);
535	}
536
537	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
538
539	for (ntries = 0; ntries < 1000; ntries++) {
540		DELAY(100);
541		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
542			break;
543	}
544	if (ntries == 1000) {
545		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
546		    device_xname(sc->sc_dev)));
547		return 0;
548	}
549
550	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
551		DPRINTFN(2, ("%s: could not read PHY\n",
552		    device_xname(sc->sc_dev)));
553		return 0;
554	}
555
556	val = NFE_READ(sc, NFE_PHY_DATA);
557	if (val != 0xffffffff && val != 0)
558		sc->mii_phyaddr = phy;
559
560	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
561	    device_xname(sc->sc_dev), phy, reg, val));
562
563	return val;
564}
565
566void
567nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
568{
569	struct nfe_softc *sc = device_private(dev);
570	uint32_t ctl;
571	int ntries;
572
573	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
574
575	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
576		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
577		DELAY(100);
578	}
579
580	NFE_WRITE(sc, NFE_PHY_DATA, val);
581	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
582	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
583
584	for (ntries = 0; ntries < 1000; ntries++) {
585		DELAY(100);
586		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
587			break;
588	}
589#ifdef NFE_DEBUG
590	if (nfedebug >= 2 && ntries == 1000)
591		printf("could not write to PHY\n");
592#endif
593}
594
595int
596nfe_intr(void *arg)
597{
598	struct nfe_softc *sc = arg;
599	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
600	uint32_t r;
601	int handled;
602
603	if ((ifp->if_flags & IFF_UP) == 0)
604		return 0;
605
606	handled = 0;
607
608	for (;;) {
609		r = NFE_READ(sc, NFE_IRQ_STATUS);
610		if ((r & NFE_IRQ_WANTED) == 0)
611			break;
612
613		NFE_WRITE(sc, NFE_IRQ_STATUS, r);
614		handled = 1;
615		DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
616
617		if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) {
618			/* check Rx ring */
619			nfe_rxeof(sc);
620		}
621		if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) {
622			/* check Tx ring */
623			nfe_txeof(sc);
624		}
625		if ((r & NFE_IRQ_LINK) != 0) {
626			NFE_READ(sc, NFE_PHY_STATUS);
627			NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
628			DPRINTF(("%s: link state changed\n",
629			    device_xname(sc->sc_dev)));
630		}
631	}
632
633	if (handled && !IF_IS_EMPTY(&ifp->if_snd))
634		nfe_start(ifp);
635
636	return handled;
637}
638
639static int
640nfe_ifflags_cb(struct ethercom *ec)
641{
642	struct ifnet *ifp = &ec->ec_if;
643	struct nfe_softc *sc = ifp->if_softc;
644	int change = ifp->if_flags ^ sc->sc_if_flags;
645
646	/*
647	 * If only the PROMISC flag changes, then
648	 * don't do a full re-init of the chip, just update
649	 * the Rx filter.
650	 */
651	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
652		return ENETRESET;
653	else if ((change & IFF_PROMISC) != 0)
654		nfe_setmulti(sc);
655
656	return 0;
657}
658
659int
660nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data)
661{
662	struct nfe_softc *sc = ifp->if_softc;
663	struct ifaddr *ifa = (struct ifaddr *)data;
664	int s, error = 0;
665
666	s = splnet();
667
668	switch (cmd) {
669	case SIOCINITIFADDR:
670		ifp->if_flags |= IFF_UP;
671		nfe_init(ifp);
672		switch (ifa->ifa_addr->sa_family) {
673#ifdef INET
674		case AF_INET:
675			arp_ifinit(ifp, ifa);
676			break;
677#endif
678		default:
679			break;
680		}
681		break;
682	default:
683		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
684			break;
685
686		error = 0;
687
688		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
689			;
690		else if (ifp->if_flags & IFF_RUNNING)
691			nfe_setmulti(sc);
692		break;
693	}
694	sc->sc_if_flags = ifp->if_flags;
695
696	splx(s);
697
698	return error;
699}
700
701void
702nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
703{
704	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
705	    (char *)desc32 - (char *)sc->txq.desc32,
706	    sizeof (struct nfe_desc32), ops);
707}
708
709void
710nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
711{
712	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
713	    (char *)desc64 - (char *)sc->txq.desc64,
714	    sizeof (struct nfe_desc64), ops);
715}
716
717void
718nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
719{
720	if (end > start) {
721		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
722		    (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32,
723		    (char *)&sc->txq.desc32[end] -
724		    (char *)&sc->txq.desc32[start], ops);
725		return;
726	}
727	/* sync from 'start' to end of ring */
728	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
729	    (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32,
730	    (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] -
731	    (char *)&sc->txq.desc32[start], ops);
732
733	/* sync from start of ring to 'end' */
734	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
735	    (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops);
736}
737
738void
739nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
740{
741	if (end > start) {
742		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
743		    (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64,
744		    (char *)&sc->txq.desc64[end] -
745		    (char *)&sc->txq.desc64[start], ops);
746		return;
747	}
748	/* sync from 'start' to end of ring */
749	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
750	    (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64,
751	    (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] -
752	    (char *)&sc->txq.desc64[start], ops);
753
754	/* sync from start of ring to 'end' */
755	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
756	    (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops);
757}
758
759void
760nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
761{
762	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
763	    (char *)desc32 - (char *)sc->rxq.desc32,
764	    sizeof (struct nfe_desc32), ops);
765}
766
767void
768nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
769{
770	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
771	    (char *)desc64 - (char *)sc->rxq.desc64,
772	    sizeof (struct nfe_desc64), ops);
773}
774
775void
776nfe_rxeof(struct nfe_softc *sc)
777{
778	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
779	struct nfe_desc32 *desc32;
780	struct nfe_desc64 *desc64;
781	struct nfe_rx_data *data;
782	struct nfe_jbuf *jbuf;
783	struct mbuf *m, *mnew;
784	bus_addr_t physaddr;
785	uint16_t flags;
786	int error, len, i;
787
788	desc32 = NULL;
789	desc64 = NULL;
790	for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) {
791		data = &sc->rxq.data[i];
792
793		if (sc->sc_flags & NFE_40BIT_ADDR) {
794			desc64 = &sc->rxq.desc64[i];
795			nfe_rxdesc64_sync(sc, desc64,
796			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
797
798			flags = le16toh(desc64->flags);
799			len = le16toh(desc64->length) & 0x3fff;
800		} else {
801			desc32 = &sc->rxq.desc32[i];
802			nfe_rxdesc32_sync(sc, desc32,
803			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
804
805			flags = le16toh(desc32->flags);
806			len = le16toh(desc32->length) & 0x3fff;
807		}
808
809		if ((flags & NFE_RX_READY) != 0)
810			break;
811
812		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
813			if ((flags & NFE_RX_VALID_V1) == 0)
814				goto skip;
815
816			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
817				flags &= ~NFE_RX_ERROR;
818				len--;	/* fix buffer length */
819			}
820		} else {
821			if ((flags & NFE_RX_VALID_V2) == 0)
822				goto skip;
823
824			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
825				flags &= ~NFE_RX_ERROR;
826				len--;	/* fix buffer length */
827			}
828		}
829
830		if (flags & NFE_RX_ERROR) {
831			ifp->if_ierrors++;
832			goto skip;
833		}
834
835		/*
836		 * Try to allocate a new mbuf for this ring element and load
837		 * it before processing the current mbuf. If the ring element
838		 * cannot be loaded, drop the received packet and reuse the
839		 * old mbuf. In the unlikely case that the old mbuf can't be
840		 * reloaded either, explicitly panic.
841		 */
842		MGETHDR(mnew, M_DONTWAIT, MT_DATA);
843		if (mnew == NULL) {
844			ifp->if_ierrors++;
845			goto skip;
846		}
847
848		if (sc->sc_flags & NFE_USE_JUMBO) {
849			physaddr =
850			    sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr;
851			if ((jbuf = nfe_jalloc(sc, i)) == NULL) {
852				if (len > MCLBYTES) {
853					m_freem(mnew);
854					ifp->if_ierrors++;
855					goto skip1;
856				}
857				MCLGET(mnew, M_DONTWAIT);
858				if ((mnew->m_flags & M_EXT) == 0) {
859					m_freem(mnew);
860					ifp->if_ierrors++;
861					goto skip1;
862				}
863
864				(void)memcpy(mtod(mnew, void *),
865				    mtod(data->m, const void *), len);
866				m = mnew;
867				goto mbufcopied;
868			} else {
869				MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
870				bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
871				    mtod(data->m, char *) - (char *)sc->rxq.jpool,
872				    NFE_JBYTES, BUS_DMASYNC_POSTREAD);
873
874				physaddr = jbuf->physaddr;
875			}
876		} else {
877			MCLGET(mnew, M_DONTWAIT);
878			if ((mnew->m_flags & M_EXT) == 0) {
879				m_freem(mnew);
880				ifp->if_ierrors++;
881				goto skip;
882			}
883
884			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
885			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
886			bus_dmamap_unload(sc->sc_dmat, data->map);
887
888			error = bus_dmamap_load(sc->sc_dmat, data->map,
889			    mtod(mnew, void *), MCLBYTES, NULL,
890			    BUS_DMA_READ | BUS_DMA_NOWAIT);
891			if (error != 0) {
892				m_freem(mnew);
893
894				/* try to reload the old mbuf */
895				error = bus_dmamap_load(sc->sc_dmat, data->map,
896				    mtod(data->m, void *), MCLBYTES, NULL,
897				    BUS_DMA_READ | BUS_DMA_NOWAIT);
898				if (error != 0) {
899					/* very unlikely that it will fail.. */
900					panic("%s: could not load old rx mbuf",
901					    device_xname(sc->sc_dev));
902				}
903				ifp->if_ierrors++;
904				goto skip;
905			}
906			physaddr = data->map->dm_segs[0].ds_addr;
907		}
908
909		/*
910		 * New mbuf successfully loaded, update Rx ring and continue
911		 * processing.
912		 */
913		m = data->m;
914		data->m = mnew;
915
916mbufcopied:
917		/* finalize mbuf */
918		m->m_pkthdr.len = m->m_len = len;
919		m->m_pkthdr.rcvif = ifp;
920
921		if ((sc->sc_flags & NFE_HW_CSUM) != 0) {
922			/*
923			 * XXX
924			 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets?
925			 */
926			if (flags & NFE_RX_IP_CSUMOK) {
927				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
928				DPRINTFN(3, ("%s: ip4csum-rx ok\n",
929				    device_xname(sc->sc_dev)));
930			}
931			/*
932			 * XXX
933			 * no way to check M_CSUM_TCP_UDP_BAD or
934			 * other protocols?
935			 */
936			if (flags & NFE_RX_UDP_CSUMOK) {
937				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
938				DPRINTFN(3, ("%s: udp4csum-rx ok\n",
939				    device_xname(sc->sc_dev)));
940			} else if (flags & NFE_RX_TCP_CSUMOK) {
941				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
942				DPRINTFN(3, ("%s: tcp4csum-rx ok\n",
943				    device_xname(sc->sc_dev)));
944			}
945		}
946		bpf_mtap(ifp, m);
947		ifp->if_ipackets++;
948		(*ifp->if_input)(ifp, m);
949
950skip1:
951		/* update mapping address in h/w descriptor */
952		if (sc->sc_flags & NFE_40BIT_ADDR) {
953#if defined(__LP64__)
954			desc64->physaddr[0] = htole32(physaddr >> 32);
955#endif
956			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
957		} else {
958			desc32->physaddr = htole32(physaddr);
959		}
960
961skip:
962		if (sc->sc_flags & NFE_40BIT_ADDR) {
963			desc64->length = htole16(sc->rxq.bufsz);
964			desc64->flags = htole16(NFE_RX_READY);
965
966			nfe_rxdesc64_sync(sc, desc64,
967			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
968		} else {
969			desc32->length = htole16(sc->rxq.bufsz);
970			desc32->flags = htole16(NFE_RX_READY);
971
972			nfe_rxdesc32_sync(sc, desc32,
973			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
974		}
975	}
976	/* update current RX pointer */
977	sc->rxq.cur = i;
978}
979
980void
981nfe_txeof(struct nfe_softc *sc)
982{
983	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
984	struct nfe_desc32 *desc32;
985	struct nfe_desc64 *desc64;
986	struct nfe_tx_data *data = NULL;
987	int i;
988	uint16_t flags;
989	char buf[128];
990
991	for (i = sc->txq.next;
992	    sc->txq.queued > 0;
993	    i = NFE_TX_NEXTDESC(i), sc->txq.queued--) {
994		if (sc->sc_flags & NFE_40BIT_ADDR) {
995			desc64 = &sc->txq.desc64[i];
996			nfe_txdesc64_sync(sc, desc64,
997			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
998
999			flags = le16toh(desc64->flags);
1000		} else {
1001			desc32 = &sc->txq.desc32[i];
1002			nfe_txdesc32_sync(sc, desc32,
1003			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1004
1005			flags = le16toh(desc32->flags);
1006		}
1007
1008		if ((flags & NFE_TX_VALID) != 0)
1009			break;
1010
1011		data = &sc->txq.data[i];
1012
1013		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
1014			if ((flags & NFE_TX_LASTFRAG_V1) == 0 &&
1015			    data->m == NULL)
1016				continue;
1017
1018			if ((flags & NFE_TX_ERROR_V1) != 0) {
1019				snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags);
1020				aprint_error_dev(sc->sc_dev, "tx v1 error %s\n",
1021				    buf);
1022				ifp->if_oerrors++;
1023			} else
1024				ifp->if_opackets++;
1025		} else {
1026			if ((flags & NFE_TX_LASTFRAG_V2) == 0 &&
1027			    data->m == NULL)
1028				continue;
1029
1030			if ((flags & NFE_TX_ERROR_V2) != 0) {
1031				snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags);
1032				aprint_error_dev(sc->sc_dev, "tx v2 error %s\n",
1033				    buf);
1034				ifp->if_oerrors++;
1035			} else
1036				ifp->if_opackets++;
1037		}
1038
1039		if (data->m == NULL) {	/* should not get there */
1040			aprint_error_dev(sc->sc_dev,
1041			    "last fragment bit w/o associated mbuf!\n");
1042			continue;
1043		}
1044
1045		/* last fragment of the mbuf chain transmitted */
1046		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1047		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1048		bus_dmamap_unload(sc->sc_dmat, data->active);
1049		m_freem(data->m);
1050		data->m = NULL;
1051	}
1052
1053	sc->txq.next = i;
1054
1055	if (sc->txq.queued < NFE_TX_RING_COUNT) {
1056		/* at least one slot freed */
1057		ifp->if_flags &= ~IFF_OACTIVE;
1058	}
1059
1060	if (sc->txq.queued == 0) {
1061		/* all queued packets are sent */
1062		ifp->if_timer = 0;
1063	}
1064}
1065
1066int
1067nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
1068{
1069	struct nfe_desc32 *desc32;
1070	struct nfe_desc64 *desc64;
1071	struct nfe_tx_data *data;
1072	bus_dmamap_t map;
1073	uint16_t flags, csumflags;
1074#if NVLAN > 0
1075	struct m_tag *mtag;
1076	uint32_t vtag = 0;
1077#endif
1078	int error, i, first;
1079
1080	desc32 = NULL;
1081	desc64 = NULL;
1082	data = NULL;
1083
1084	flags = 0;
1085	csumflags = 0;
1086	first = sc->txq.cur;
1087
1088	map = sc->txq.data[first].map;
1089
1090	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
1091	if (error != 0) {
1092		aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n",
1093		    error);
1094		return error;
1095	}
1096
1097	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
1098		bus_dmamap_unload(sc->sc_dmat, map);
1099		return ENOBUFS;
1100	}
1101
1102#if NVLAN > 0
1103	/* setup h/w VLAN tagging */
1104	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL)
1105		vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag);
1106#endif
1107	if ((sc->sc_flags & NFE_HW_CSUM) != 0) {
1108		if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4)
1109			csumflags |= NFE_TX_IP_CSUM;
1110		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4))
1111			csumflags |= NFE_TX_TCP_UDP_CSUM;
1112	}
1113
1114	for (i = 0; i < map->dm_nsegs; i++) {
1115		data = &sc->txq.data[sc->txq.cur];
1116
1117		if (sc->sc_flags & NFE_40BIT_ADDR) {
1118			desc64 = &sc->txq.desc64[sc->txq.cur];
1119#if defined(__LP64__)
1120			desc64->physaddr[0] =
1121			    htole32(map->dm_segs[i].ds_addr >> 32);
1122#endif
1123			desc64->physaddr[1] =
1124			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
1125			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
1126			desc64->flags = htole16(flags);
1127			desc64->vtag = 0;
1128		} else {
1129			desc32 = &sc->txq.desc32[sc->txq.cur];
1130
1131			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
1132			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
1133			desc32->flags = htole16(flags);
1134		}
1135
1136		/*
1137		 * Setting of the valid bit in the first descriptor is
1138		 * deferred until the whole chain is fully setup.
1139		 */
1140		flags |= NFE_TX_VALID;
1141
1142		sc->txq.queued++;
1143		sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur);
1144	}
1145
1146	/* the whole mbuf chain has been setup */
1147	if (sc->sc_flags & NFE_40BIT_ADDR) {
1148		/* fix last descriptor */
1149		flags |= NFE_TX_LASTFRAG_V2;
1150		desc64->flags = htole16(flags);
1151
1152		/* Checksum flags and vtag belong to the first fragment only. */
1153#if NVLAN > 0
1154		sc->txq.desc64[first].vtag = htole32(vtag);
1155#endif
1156		sc->txq.desc64[first].flags |= htole16(csumflags);
1157
1158		/* finally, set the valid bit in the first descriptor */
1159		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
1160	} else {
1161		/* fix last descriptor */
1162		if (sc->sc_flags & NFE_JUMBO_SUP)
1163			flags |= NFE_TX_LASTFRAG_V2;
1164		else
1165			flags |= NFE_TX_LASTFRAG_V1;
1166		desc32->flags = htole16(flags);
1167
1168		/* Checksum flags belong to the first fragment only. */
1169		sc->txq.desc32[first].flags |= htole16(csumflags);
1170
1171		/* finally, set the valid bit in the first descriptor */
1172		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
1173	}
1174
1175	data->m = m0;
1176	data->active = map;
1177
1178	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1179	    BUS_DMASYNC_PREWRITE);
1180
1181	return 0;
1182}
1183
1184void
1185nfe_start(struct ifnet *ifp)
1186{
1187	struct nfe_softc *sc = ifp->if_softc;
1188	int old = sc->txq.queued;
1189	struct mbuf *m0;
1190
1191	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1192		return;
1193
1194	for (;;) {
1195		IFQ_POLL(&ifp->if_snd, m0);
1196		if (m0 == NULL)
1197			break;
1198
1199		if (nfe_encap(sc, m0) != 0) {
1200			ifp->if_flags |= IFF_OACTIVE;
1201			break;
1202		}
1203
1204		/* packet put in h/w queue, remove from s/w queue */
1205		IFQ_DEQUEUE(&ifp->if_snd, m0);
1206
1207		bpf_mtap(ifp, m0);
1208	}
1209
1210	if (sc->txq.queued != old) {
1211		/* packets are queued */
1212		if (sc->sc_flags & NFE_40BIT_ADDR)
1213			nfe_txdesc64_rsync(sc, old, sc->txq.cur,
1214			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1215		else
1216			nfe_txdesc32_rsync(sc, old, sc->txq.cur,
1217			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1218		/* kick Tx */
1219		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1220
1221		/*
1222		 * Set a timeout in case the chip goes out to lunch.
1223		 */
1224		ifp->if_timer = 5;
1225	}
1226}
1227
1228void
1229nfe_watchdog(struct ifnet *ifp)
1230{
1231	struct nfe_softc *sc = ifp->if_softc;
1232
1233	aprint_error_dev(sc->sc_dev, "watchdog timeout\n");
1234
1235	ifp->if_flags &= ~IFF_RUNNING;
1236	nfe_init(ifp);
1237
1238	ifp->if_oerrors++;
1239}
1240
1241int
1242nfe_init(struct ifnet *ifp)
1243{
1244	struct nfe_softc *sc = ifp->if_softc;
1245	uint32_t tmp;
1246	int rc = 0, s;
1247
1248	if (ifp->if_flags & IFF_RUNNING)
1249		return 0;
1250
1251	nfe_stop(ifp, 0);
1252
1253	NFE_WRITE(sc, NFE_TX_UNK, 0);
1254	NFE_WRITE(sc, NFE_STATUS, 0);
1255
1256	sc->rxtxctl = NFE_RXTX_BIT2;
1257	if (sc->sc_flags & NFE_40BIT_ADDR)
1258		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1259	else if (sc->sc_flags & NFE_JUMBO_SUP)
1260		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1261	if (sc->sc_flags & NFE_HW_CSUM)
1262		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1263#if NVLAN > 0
1264	/*
1265	 * Although the adapter is capable of stripping VLAN tags from received
1266	 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1267	 * purpose.  This will be done in software by our network stack.
1268	 */
1269	if (sc->sc_flags & NFE_HW_VLAN)
1270		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1271#endif
1272	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1273	DELAY(10);
1274	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1275
1276#if NVLAN
1277	if (sc->sc_flags & NFE_HW_VLAN)
1278		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1279#endif
1280
1281	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1282
1283	/* set MAC address */
1284	nfe_set_macaddr(sc, sc->sc_enaddr);
1285
1286	/* tell MAC where rings are in memory */
1287#ifdef __LP64__
1288	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1289#endif
1290	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1291#ifdef __LP64__
1292	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1293#endif
1294	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1295
1296	NFE_WRITE(sc, NFE_RING_SIZE,
1297	    (NFE_RX_RING_COUNT - 1) << 16 |
1298	    (NFE_TX_RING_COUNT - 1));
1299
1300	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1301
1302	/* force MAC to wakeup */
1303	tmp = NFE_READ(sc, NFE_PWR_STATE);
1304	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1305	DELAY(10);
1306	tmp = NFE_READ(sc, NFE_PWR_STATE);
1307	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1308
1309	s = splnet();
1310	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1311	nfe_intr(sc); /* XXX clear IRQ status registers */
1312	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1313	splx(s);
1314
1315#if 1
1316	/* configure interrupts coalescing/mitigation */
1317	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1318#else
1319	/* no interrupt mitigation: one interrupt per packet */
1320	NFE_WRITE(sc, NFE_IMTIMER, 970);
1321#endif
1322
1323	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1324	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1325	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1326
1327	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1328	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1329
1330	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1331	NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1332
1333	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1334	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1335	DELAY(10);
1336	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1337
1338	/* set Rx filter */
1339	nfe_setmulti(sc);
1340
1341	if ((rc = ether_mediachange(ifp)) != 0)
1342		goto out;
1343
1344	nfe_tick(sc);
1345
1346	/* enable Rx */
1347	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1348
1349	/* enable Tx */
1350	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1351
1352	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1353
1354	/* enable interrupts */
1355	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1356
1357	callout_schedule(&sc->sc_tick_ch, hz);
1358
1359	ifp->if_flags |= IFF_RUNNING;
1360	ifp->if_flags &= ~IFF_OACTIVE;
1361
1362out:
1363	return rc;
1364}
1365
1366void
1367nfe_stop(struct ifnet *ifp, int disable)
1368{
1369	struct nfe_softc *sc = ifp->if_softc;
1370
1371	callout_stop(&sc->sc_tick_ch);
1372
1373	ifp->if_timer = 0;
1374	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1375
1376	mii_down(&sc->sc_mii);
1377
1378	/* abort Tx */
1379	NFE_WRITE(sc, NFE_TX_CTL, 0);
1380
1381	/* disable Rx */
1382	NFE_WRITE(sc, NFE_RX_CTL, 0);
1383
1384	/* disable interrupts */
1385	NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1386
1387	/* reset Tx and Rx rings */
1388	nfe_reset_tx_ring(sc, &sc->txq);
1389	nfe_reset_rx_ring(sc, &sc->rxq);
1390}
1391
1392int
1393nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1394{
1395	struct nfe_desc32 *desc32;
1396	struct nfe_desc64 *desc64;
1397	struct nfe_rx_data *data;
1398	struct nfe_jbuf *jbuf;
1399	void **desc;
1400	bus_addr_t physaddr;
1401	int i, nsegs, error, descsize;
1402
1403	if (sc->sc_flags & NFE_40BIT_ADDR) {
1404		desc = (void **)&ring->desc64;
1405		descsize = sizeof (struct nfe_desc64);
1406	} else {
1407		desc = (void **)&ring->desc32;
1408		descsize = sizeof (struct nfe_desc32);
1409	}
1410
1411	ring->cur = ring->next = 0;
1412	ring->bufsz = MCLBYTES;
1413
1414	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1415	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1416	if (error != 0) {
1417		aprint_error_dev(sc->sc_dev,
1418		    "could not create desc DMA map\n");
1419		ring->map = NULL;
1420		goto fail;
1421	}
1422
1423	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1424	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1425	if (error != 0) {
1426		aprint_error_dev(sc->sc_dev,
1427		    "could not allocate DMA memory\n");
1428		goto fail;
1429	}
1430
1431	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1432	    NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT);
1433	if (error != 0) {
1434		aprint_error_dev(sc->sc_dev,
1435		    "could not map desc DMA memory\n");
1436		goto fail;
1437	}
1438
1439	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1440	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1441	if (error != 0) {
1442		aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n");
1443		goto fail;
1444	}
1445
1446	memset(*desc, 0, NFE_RX_RING_COUNT * descsize);
1447	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1448
1449	if (sc->sc_flags & NFE_USE_JUMBO) {
1450		ring->bufsz = NFE_JBYTES;
1451		if ((error = nfe_jpool_alloc(sc)) != 0) {
1452			aprint_error_dev(sc->sc_dev,
1453			    "could not allocate jumbo frames\n");
1454			goto fail;
1455		}
1456	}
1457
1458	/*
1459	 * Pre-allocate Rx buffers and populate Rx ring.
1460	 */
1461	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1462		data = &sc->rxq.data[i];
1463
1464		MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1465		if (data->m == NULL) {
1466			aprint_error_dev(sc->sc_dev,
1467			    "could not allocate rx mbuf\n");
1468			error = ENOMEM;
1469			goto fail;
1470		}
1471
1472		if (sc->sc_flags & NFE_USE_JUMBO) {
1473			if ((jbuf = nfe_jalloc(sc, i)) == NULL) {
1474				aprint_error_dev(sc->sc_dev,
1475				    "could not allocate jumbo buffer\n");
1476				goto fail;
1477			}
1478			MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1479			    sc);
1480
1481			physaddr = jbuf->physaddr;
1482		} else {
1483			error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1484			    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1485			if (error != 0) {
1486				aprint_error_dev(sc->sc_dev,
1487				    "could not create DMA map\n");
1488				data->map = NULL;
1489				goto fail;
1490			}
1491			MCLGET(data->m, M_DONTWAIT);
1492			if (!(data->m->m_flags & M_EXT)) {
1493				aprint_error_dev(sc->sc_dev,
1494				    "could not allocate mbuf cluster\n");
1495				error = ENOMEM;
1496				goto fail;
1497			}
1498
1499			error = bus_dmamap_load(sc->sc_dmat, data->map,
1500			    mtod(data->m, void *), MCLBYTES, NULL,
1501			    BUS_DMA_READ | BUS_DMA_NOWAIT);
1502			if (error != 0) {
1503				aprint_error_dev(sc->sc_dev,
1504				    "could not load rx buf DMA map");
1505				goto fail;
1506			}
1507			physaddr = data->map->dm_segs[0].ds_addr;
1508		}
1509
1510		if (sc->sc_flags & NFE_40BIT_ADDR) {
1511			desc64 = &sc->rxq.desc64[i];
1512#if defined(__LP64__)
1513			desc64->physaddr[0] = htole32(physaddr >> 32);
1514#endif
1515			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1516			desc64->length = htole16(sc->rxq.bufsz);
1517			desc64->flags = htole16(NFE_RX_READY);
1518		} else {
1519			desc32 = &sc->rxq.desc32[i];
1520			desc32->physaddr = htole32(physaddr);
1521			desc32->length = htole16(sc->rxq.bufsz);
1522			desc32->flags = htole16(NFE_RX_READY);
1523		}
1524	}
1525
1526	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1527	    BUS_DMASYNC_PREWRITE);
1528
1529	return 0;
1530
1531fail:	nfe_free_rx_ring(sc, ring);
1532	return error;
1533}
1534
1535void
1536nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1537{
1538	int i;
1539
1540	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1541		if (sc->sc_flags & NFE_40BIT_ADDR) {
1542			ring->desc64[i].length = htole16(ring->bufsz);
1543			ring->desc64[i].flags = htole16(NFE_RX_READY);
1544		} else {
1545			ring->desc32[i].length = htole16(ring->bufsz);
1546			ring->desc32[i].flags = htole16(NFE_RX_READY);
1547		}
1548	}
1549
1550	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1551	    BUS_DMASYNC_PREWRITE);
1552
1553	ring->cur = ring->next = 0;
1554}
1555
1556void
1557nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1558{
1559	struct nfe_rx_data *data;
1560	void *desc;
1561	int i, descsize;
1562
1563	if (sc->sc_flags & NFE_40BIT_ADDR) {
1564		desc = ring->desc64;
1565		descsize = sizeof (struct nfe_desc64);
1566	} else {
1567		desc = ring->desc32;
1568		descsize = sizeof (struct nfe_desc32);
1569	}
1570
1571	if (desc != NULL) {
1572		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1573		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1574		bus_dmamap_unload(sc->sc_dmat, ring->map);
1575		bus_dmamem_unmap(sc->sc_dmat, (void *)desc,
1576		    NFE_RX_RING_COUNT * descsize);
1577		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1578	}
1579
1580	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1581		data = &ring->data[i];
1582
1583		if (data->map != NULL) {
1584			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1585			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1586			bus_dmamap_unload(sc->sc_dmat, data->map);
1587			bus_dmamap_destroy(sc->sc_dmat, data->map);
1588		}
1589		if (data->m != NULL)
1590			m_freem(data->m);
1591	}
1592
1593	nfe_jpool_free(sc);
1594}
1595
1596struct nfe_jbuf *
1597nfe_jalloc(struct nfe_softc *sc, int i)
1598{
1599	struct nfe_jbuf *jbuf;
1600
1601	mutex_enter(&sc->rxq.mtx);
1602	jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1603	if (jbuf != NULL)
1604		SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1605	mutex_exit(&sc->rxq.mtx);
1606	if (jbuf == NULL)
1607		return NULL;
1608	sc->rxq.jbufmap[i] =
1609	    ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES;
1610	return jbuf;
1611}
1612
1613/*
1614 * This is called automatically by the network stack when the mbuf is freed.
1615 * Caution must be taken that the NIC might be reset by the time the mbuf is
1616 * freed.
1617 */
1618void
1619nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
1620{
1621	struct nfe_softc *sc = arg;
1622	struct nfe_jbuf *jbuf;
1623	int i;
1624
1625	/* find the jbuf from the base pointer */
1626	i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES;
1627	if (i < 0 || i >= NFE_JPOOL_COUNT) {
1628		aprint_error_dev(sc->sc_dev,
1629		    "request to free a buffer (%p) not managed by us\n", buf);
1630		return;
1631	}
1632	jbuf = &sc->rxq.jbuf[i];
1633
1634	/* ..and put it back in the free list */
1635	mutex_enter(&sc->rxq.mtx);
1636	SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1637	mutex_exit(&sc->rxq.mtx);
1638
1639	if (m != NULL)
1640		pool_cache_put(mb_cache, m);
1641}
1642
1643int
1644nfe_jpool_alloc(struct nfe_softc *sc)
1645{
1646	struct nfe_rx_ring *ring = &sc->rxq;
1647	struct nfe_jbuf *jbuf;
1648	bus_addr_t physaddr;
1649	char *buf;
1650	int i, nsegs, error;
1651
1652	/*
1653	 * Allocate a big chunk of DMA'able memory.
1654	 */
1655	error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1656	    NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1657	if (error != 0) {
1658		aprint_error_dev(sc->sc_dev,
1659		    "could not create jumbo DMA map\n");
1660		ring->jmap = NULL;
1661		goto fail;
1662	}
1663
1664	error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1665	    &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1666	if (error != 0) {
1667		aprint_error_dev(sc->sc_dev,
1668		    "could not allocate jumbo DMA memory\n");
1669		goto fail;
1670	}
1671
1672	error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1673	    &ring->jpool, BUS_DMA_NOWAIT);
1674	if (error != 0) {
1675		aprint_error_dev(sc->sc_dev,
1676		    "could not map jumbo DMA memory\n");
1677		goto fail;
1678	}
1679
1680	error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1681	    NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1682	if (error != 0) {
1683		aprint_error_dev(sc->sc_dev,
1684		    "could not load jumbo DMA map\n");
1685		goto fail;
1686	}
1687
1688	/* ..and split it into 9KB chunks */
1689	SLIST_INIT(&ring->jfreelist);
1690
1691	buf = ring->jpool;
1692	physaddr = ring->jmap->dm_segs[0].ds_addr;
1693	for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1694		jbuf = &ring->jbuf[i];
1695
1696		jbuf->buf = buf;
1697		jbuf->physaddr = physaddr;
1698
1699		SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1700
1701		buf += NFE_JBYTES;
1702		physaddr += NFE_JBYTES;
1703	}
1704
1705	return 0;
1706
1707fail:	nfe_jpool_free(sc);
1708	return error;
1709}
1710
1711void
1712nfe_jpool_free(struct nfe_softc *sc)
1713{
1714	struct nfe_rx_ring *ring = &sc->rxq;
1715
1716	if (ring->jmap != NULL) {
1717		bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1718		    ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1719		bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1720		bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1721		ring->jmap = NULL;
1722	}
1723	if (ring->jpool != NULL) {
1724		bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1725		bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1726		ring->jpool = NULL;
1727	}
1728}
1729
1730int
1731nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1732{
1733	int i, nsegs, error;
1734	void **desc;
1735	int descsize;
1736
1737	if (sc->sc_flags & NFE_40BIT_ADDR) {
1738		desc = (void **)&ring->desc64;
1739		descsize = sizeof (struct nfe_desc64);
1740	} else {
1741		desc = (void **)&ring->desc32;
1742		descsize = sizeof (struct nfe_desc32);
1743	}
1744
1745	ring->queued = 0;
1746	ring->cur = ring->next = 0;
1747
1748	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1749	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1750
1751	if (error != 0) {
1752		aprint_error_dev(sc->sc_dev,
1753		    "could not create desc DMA map\n");
1754		ring->map = NULL;
1755		goto fail;
1756	}
1757
1758	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1759	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1760	if (error != 0) {
1761		aprint_error_dev(sc->sc_dev,
1762		    "could not allocate DMA memory\n");
1763		goto fail;
1764	}
1765
1766	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1767	    NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT);
1768	if (error != 0) {
1769		aprint_error_dev(sc->sc_dev,
1770		    "could not map desc DMA memory\n");
1771		goto fail;
1772	}
1773
1774	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1775	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1776	if (error != 0) {
1777		aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n");
1778		goto fail;
1779	}
1780
1781	memset(*desc, 0, NFE_TX_RING_COUNT * descsize);
1782	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1783
1784	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1785		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1786		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1787		    &ring->data[i].map);
1788		if (error != 0) {
1789			aprint_error_dev(sc->sc_dev,
1790			    "could not create DMA map\n");
1791			ring->data[i].map = NULL;
1792			goto fail;
1793		}
1794	}
1795
1796	return 0;
1797
1798fail:	nfe_free_tx_ring(sc, ring);
1799	return error;
1800}
1801
1802void
1803nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1804{
1805	struct nfe_tx_data *data;
1806	int i;
1807
1808	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1809		if (sc->sc_flags & NFE_40BIT_ADDR)
1810			ring->desc64[i].flags = 0;
1811		else
1812			ring->desc32[i].flags = 0;
1813
1814		data = &ring->data[i];
1815
1816		if (data->m != NULL) {
1817			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1818			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1819			bus_dmamap_unload(sc->sc_dmat, data->active);
1820			m_freem(data->m);
1821			data->m = NULL;
1822		}
1823	}
1824
1825	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1826	    BUS_DMASYNC_PREWRITE);
1827
1828	ring->queued = 0;
1829	ring->cur = ring->next = 0;
1830}
1831
1832void
1833nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1834{
1835	struct nfe_tx_data *data;
1836	void *desc;
1837	int i, descsize;
1838
1839	if (sc->sc_flags & NFE_40BIT_ADDR) {
1840		desc = ring->desc64;
1841		descsize = sizeof (struct nfe_desc64);
1842	} else {
1843		desc = ring->desc32;
1844		descsize = sizeof (struct nfe_desc32);
1845	}
1846
1847	if (desc != NULL) {
1848		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1849		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1850		bus_dmamap_unload(sc->sc_dmat, ring->map);
1851		bus_dmamem_unmap(sc->sc_dmat, (void *)desc,
1852		    NFE_TX_RING_COUNT * descsize);
1853		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1854	}
1855
1856	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1857		data = &ring->data[i];
1858
1859		if (data->m != NULL) {
1860			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1861			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1862			bus_dmamap_unload(sc->sc_dmat, data->active);
1863			m_freem(data->m);
1864		}
1865	}
1866
1867	/* ..and now actually destroy the DMA mappings */
1868	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1869		data = &ring->data[i];
1870		if (data->map == NULL)
1871			continue;
1872		bus_dmamap_destroy(sc->sc_dmat, data->map);
1873	}
1874}
1875
1876void
1877nfe_setmulti(struct nfe_softc *sc)
1878{
1879	struct ethercom *ec = &sc->sc_ethercom;
1880	struct ifnet *ifp = &ec->ec_if;
1881	struct ether_multi *enm;
1882	struct ether_multistep step;
1883	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1884	uint32_t filter = NFE_RXFILTER_MAGIC;
1885	int i;
1886
1887	if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1888		memset(addr, 0, ETHER_ADDR_LEN);
1889		memset(mask, 0, ETHER_ADDR_LEN);
1890		goto done;
1891	}
1892
1893	memcpy(addr, etherbroadcastaddr, ETHER_ADDR_LEN);
1894	memcpy(mask, etherbroadcastaddr, ETHER_ADDR_LEN);
1895
1896	ETHER_FIRST_MULTI(step, ec, enm);
1897	while (enm != NULL) {
1898		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1899			ifp->if_flags |= IFF_ALLMULTI;
1900			memset(addr, 0, ETHER_ADDR_LEN);
1901			memset(mask, 0, ETHER_ADDR_LEN);
1902			goto done;
1903		}
1904		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1905			addr[i] &=  enm->enm_addrlo[i];
1906			mask[i] &= ~enm->enm_addrlo[i];
1907		}
1908		ETHER_NEXT_MULTI(step, enm);
1909	}
1910	for (i = 0; i < ETHER_ADDR_LEN; i++)
1911		mask[i] |= addr[i];
1912
1913done:
1914	addr[0] |= 0x01;	/* make sure multicast bit is set */
1915
1916	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1917	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1918	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1919	    addr[5] <<  8 | addr[4]);
1920	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1921	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1922	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1923	    mask[5] <<  8 | mask[4]);
1924
1925	filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1926	NFE_WRITE(sc, NFE_RXFILTER, filter);
1927}
1928
1929void
1930nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1931{
1932	uint32_t tmp;
1933
1934	if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) {
1935		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1936		addr[0] = (tmp & 0xff);
1937		addr[1] = (tmp >>  8) & 0xff;
1938		addr[2] = (tmp >> 16) & 0xff;
1939		addr[3] = (tmp >> 24) & 0xff;
1940
1941		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1942		addr[4] = (tmp & 0xff);
1943		addr[5] = (tmp >> 8) & 0xff;
1944
1945	} else {
1946		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1947		addr[0] = (tmp >> 8) & 0xff;
1948		addr[1] = (tmp & 0xff);
1949
1950		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1951		addr[2] = (tmp >> 24) & 0xff;
1952		addr[3] = (tmp >> 16) & 0xff;
1953		addr[4] = (tmp >>  8) & 0xff;
1954		addr[5] = (tmp & 0xff);
1955	}
1956}
1957
1958void
1959nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1960{
1961	NFE_WRITE(sc, NFE_MACADDR_LO,
1962	    addr[5] <<  8 | addr[4]);
1963	NFE_WRITE(sc, NFE_MACADDR_HI,
1964	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1965}
1966
1967void
1968nfe_tick(void *arg)
1969{
1970	struct nfe_softc *sc = arg;
1971	int s;
1972
1973	s = splnet();
1974	mii_tick(&sc->sc_mii);
1975	splx(s);
1976
1977	callout_schedule(&sc->sc_tick_ch, hz);
1978}
1979
1980void
1981nfe_poweron(device_t self)
1982{
1983	struct nfe_softc *sc = device_private(self);
1984
1985	if ((sc->sc_flags & NFE_PWR_MGMT) != 0) {
1986		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
1987		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
1988		DELAY(100);
1989		NFE_WRITE(sc, NFE_MAC_RESET, 0);
1990		DELAY(100);
1991		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
1992		NFE_WRITE(sc, NFE_PWR2_CTL,
1993		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
1994	}
1995}
1996
1997bool
1998nfe_resume(device_t dv, const pmf_qual_t *qual)
1999{
2000	nfe_poweron(dv);
2001
2002	return true;
2003}
2004