if_nfe.c revision 1.109
1/*	$OpenBSD: if_nfe.c,v 1.109 2015/03/14 03:38:48 jsg Exp $	*/
2
3/*-
4 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr>
5 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
21
22#include "bpfilter.h"
23#include "vlan.h"
24
25#include <sys/param.h>
26#include <sys/endian.h>
27#include <sys/systm.h>
28#include <sys/types.h>
29#include <sys/sockio.h>
30#include <sys/mbuf.h>
31#include <sys/queue.h>
32#include <sys/kernel.h>
33#include <sys/device.h>
34#include <sys/timeout.h>
35#include <sys/socket.h>
36
37#include <machine/bus.h>
38
39#include <net/if.h>
40#include <net/if_dl.h>
41#include <net/if_media.h>
42
43#include <netinet/in.h>
44#include <netinet/if_ether.h>
45
46#if NVLAN > 0
47#include <net/if_types.h>
48#include <net/if_vlan_var.h>
49#endif
50
51#if NBPFILTER > 0
52#include <net/bpf.h>
53#endif
54
55#include <dev/mii/miivar.h>
56
57#include <dev/pci/pcireg.h>
58#include <dev/pci/pcivar.h>
59#include <dev/pci/pcidevs.h>
60
61#include <dev/pci/if_nfereg.h>
62#include <dev/pci/if_nfevar.h>
63
64int	nfe_match(struct device *, void *, void *);
65void	nfe_attach(struct device *, struct device *, void *);
66int	nfe_activate(struct device *, int);
67void	nfe_miibus_statchg(struct device *);
68int	nfe_miibus_readreg(struct device *, int, int);
69void	nfe_miibus_writereg(struct device *, int, int, int);
70int	nfe_intr(void *);
71int	nfe_ioctl(struct ifnet *, u_long, caddr_t);
72void	nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
73void	nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
74void	nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
75void	nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
76void	nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
77void	nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
78void	nfe_rxeof(struct nfe_softc *);
79void	nfe_txeof(struct nfe_softc *);
80int	nfe_encap(struct nfe_softc *, struct mbuf *);
81void	nfe_start(struct ifnet *);
82void	nfe_watchdog(struct ifnet *);
83int	nfe_init(struct ifnet *);
84void	nfe_stop(struct ifnet *, int);
85int	nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
86void	nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
87void	nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
88int	nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
89void	nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
90void	nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
91int	nfe_ifmedia_upd(struct ifnet *);
92void	nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
93void	nfe_iff(struct nfe_softc *);
94void	nfe_get_macaddr(struct nfe_softc *, uint8_t *);
95void	nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
96void	nfe_tick(void *);
97#ifndef SMALL_KERNEL
98int	nfe_wol(struct ifnet*, int);
99#endif
100
101struct cfattach nfe_ca = {
102	sizeof (struct nfe_softc), nfe_match, nfe_attach, NULL,
103	nfe_activate
104};
105
106struct cfdriver nfe_cd = {
107	NULL, "nfe", DV_IFNET
108};
109
110#ifdef NFE_DEBUG
111int nfedebug = 0;
112#define DPRINTF(x)	do { if (nfedebug) printf x; } while (0)
113#define DPRINTFN(n,x)	do { if (nfedebug >= (n)) printf x; } while (0)
114#else
115#define DPRINTF(x)
116#define DPRINTFN(n,x)
117#endif
118
119const struct pci_matchid nfe_devices[] = {
120	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
121	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
122	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
123	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
124	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
125	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
126	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
127	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
128	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
129	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
130	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
131	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
132	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
133	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
134	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
135	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
136	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
137	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
138	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
139	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
140	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
141	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
142	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 },
143	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 },
144	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 },
145	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 },
146	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 },
147	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 },
148	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 },
149	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 },
150	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 },
151	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 },
152	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 },
153	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 },
154	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 },
155	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 },
156	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 },
157	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 },
158	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 },
159	{ PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN }
160};
161
162int
163nfe_match(struct device *dev, void *match, void *aux)
164{
165	return pci_matchbyid((struct pci_attach_args *)aux, nfe_devices,
166	    sizeof (nfe_devices) / sizeof (nfe_devices[0]));
167}
168
169int
170nfe_activate(struct device *self, int act)
171{
172	struct nfe_softc *sc = (struct nfe_softc *)self;
173	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
174	int rv = 0;
175
176	switch (act) {
177	case DVACT_SUSPEND:
178		if (ifp->if_flags & IFF_RUNNING)
179			nfe_stop(ifp, 0);
180		rv = config_activate_children(self, act);
181		break;
182	case DVACT_RESUME:
183		if (ifp->if_flags & IFF_UP)
184			nfe_init(ifp);
185		break;
186	default:
187		rv = config_activate_children(self, act);
188		break;
189	}
190	return (rv);
191}
192
193
194void
195nfe_attach(struct device *parent, struct device *self, void *aux)
196{
197	struct nfe_softc *sc = (struct nfe_softc *)self;
198	struct pci_attach_args *pa = aux;
199	pci_chipset_tag_t pc = pa->pa_pc;
200	pci_intr_handle_t ih;
201	const char *intrstr;
202	struct ifnet *ifp;
203	bus_size_t memsize;
204	pcireg_t memtype;
205
206	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
207	if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
208	    &sc->sc_memh, NULL, &memsize, 0)) {
209		printf(": can't map mem space\n");
210		return;
211	}
212
213	if (pci_intr_map(pa, &ih) != 0) {
214		printf(": can't map interrupt\n");
215		return;
216	}
217
218	intrstr = pci_intr_string(pc, ih);
219	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc,
220	    sc->sc_dev.dv_xname);
221	if (sc->sc_ih == NULL) {
222		printf(": could not establish interrupt");
223		if (intrstr != NULL)
224			printf(" at %s", intrstr);
225		printf("\n");
226		return;
227	}
228	printf(": %s", intrstr);
229
230	sc->sc_dmat = pa->pa_dmat;
231	sc->sc_flags = 0;
232
233	switch (PCI_PRODUCT(pa->pa_id)) {
234	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
235	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
236	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
237	case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
238		sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
239		break;
240	case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
241	case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
242		sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
243		break;
244	case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
245	case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
246	case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
247	case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
248	case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
249	case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
250	case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
251	case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
252	case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
253	case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
254	case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
255	case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
256		sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR |
257		    NFE_PWR_MGMT;
258		break;
259	case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
260	case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
261	case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
262	case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
263		sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM |
264		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
265		break;
266	case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
267	case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
268	case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
269	case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
270	case PCI_PRODUCT_NVIDIA_MCP89_LAN:
271		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
272		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
273		break;
274	case PCI_PRODUCT_NVIDIA_CK804_LAN1:
275	case PCI_PRODUCT_NVIDIA_CK804_LAN2:
276	case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
277	case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
278		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
279		break;
280	case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
281	case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
282	case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
283	case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
284		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
285		    NFE_CORRECT_MACADDR | NFE_PWR_MGMT;
286		break;
287	case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
288	case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
289		sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
290		    NFE_HW_VLAN | NFE_PWR_MGMT;
291		break;
292	}
293
294	if (sc->sc_flags & NFE_PWR_MGMT) {
295		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
296		NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
297		DELAY(100);
298		NFE_WRITE(sc, NFE_MAC_RESET, 0);
299		DELAY(100);
300		NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
301		NFE_WRITE(sc, NFE_PWR2_CTL,
302		    NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK);
303	}
304
305	nfe_get_macaddr(sc, sc->sc_arpcom.ac_enaddr);
306	printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
307
308	/*
309	 * Allocate Tx and Rx rings.
310	 */
311	if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
312		printf("%s: could not allocate Tx ring\n",
313		    sc->sc_dev.dv_xname);
314		return;
315	}
316
317	if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
318		printf("%s: could not allocate Rx ring\n",
319		    sc->sc_dev.dv_xname);
320		nfe_free_tx_ring(sc, &sc->txq);
321		return;
322	}
323
324	ifp = &sc->sc_arpcom.ac_if;
325	ifp->if_softc = sc;
326	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
327	ifp->if_ioctl = nfe_ioctl;
328	ifp->if_start = nfe_start;
329	ifp->if_watchdog = nfe_watchdog;
330	IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
331	IFQ_SET_READY(&ifp->if_snd);
332	strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
333
334	ifp->if_capabilities = IFCAP_VLAN_MTU;
335
336#ifndef SMALL_KERNEL
337	ifp->if_capabilities |= IFCAP_WOL;
338	ifp->if_wol = nfe_wol;
339	nfe_wol(ifp, 0);
340#endif
341
342#if NVLAN > 0
343	if (sc->sc_flags & NFE_HW_VLAN)
344		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
345#endif
346
347	if (sc->sc_flags & NFE_HW_CSUM) {
348		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
349		    IFCAP_CSUM_UDPv4;
350	}
351
352	sc->sc_mii.mii_ifp = ifp;
353	sc->sc_mii.mii_readreg = nfe_miibus_readreg;
354	sc->sc_mii.mii_writereg = nfe_miibus_writereg;
355	sc->sc_mii.mii_statchg = nfe_miibus_statchg;
356
357	ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
358	    nfe_ifmedia_sts);
359	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 0, 0);
360	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
361		printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
362		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
363		    0, NULL);
364		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
365	} else
366		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
367
368	if_attach(ifp);
369	ether_ifattach(ifp);
370
371	timeout_set(&sc->sc_tick_ch, nfe_tick, sc);
372}
373
374void
375nfe_miibus_statchg(struct device *dev)
376{
377	struct nfe_softc *sc = (struct nfe_softc *)dev;
378	struct mii_data *mii = &sc->sc_mii;
379	uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
380
381	phy = NFE_READ(sc, NFE_PHY_IFACE);
382	phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
383
384	seed = NFE_READ(sc, NFE_RNDSEED);
385	seed &= ~NFE_SEED_MASK;
386
387	if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
388		phy  |= NFE_PHY_HDX;	/* half-duplex */
389		misc |= NFE_MISC1_HDX;
390	}
391
392	switch (IFM_SUBTYPE(mii->mii_media_active)) {
393	case IFM_1000_T:	/* full-duplex only */
394		link |= NFE_MEDIA_1000T;
395		seed |= NFE_SEED_1000T;
396		phy  |= NFE_PHY_1000T;
397		break;
398	case IFM_100_TX:
399		link |= NFE_MEDIA_100TX;
400		seed |= NFE_SEED_100TX;
401		phy  |= NFE_PHY_100TX;
402		break;
403	case IFM_10_T:
404		link |= NFE_MEDIA_10T;
405		seed |= NFE_SEED_10T;
406		break;
407	}
408
409	NFE_WRITE(sc, NFE_RNDSEED, seed);	/* XXX: gigabit NICs only? */
410
411	NFE_WRITE(sc, NFE_PHY_IFACE, phy);
412	NFE_WRITE(sc, NFE_MISC1, misc);
413	NFE_WRITE(sc, NFE_LINKSPEED, link);
414}
415
416int
417nfe_miibus_readreg(struct device *dev, int phy, int reg)
418{
419	struct nfe_softc *sc = (struct nfe_softc *)dev;
420	uint32_t val;
421	int ntries;
422
423	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
424
425	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
426		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
427		DELAY(100);
428	}
429
430	NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
431
432	for (ntries = 0; ntries < 1000; ntries++) {
433		DELAY(100);
434		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
435			break;
436	}
437	if (ntries == 1000) {
438		DPRINTFN(2, ("%s: timeout waiting for PHY\n",
439		    sc->sc_dev.dv_xname));
440		return 0;
441	}
442
443	if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
444		DPRINTFN(2, ("%s: could not read PHY\n",
445		    sc->sc_dev.dv_xname));
446		return 0;
447	}
448
449	val = NFE_READ(sc, NFE_PHY_DATA);
450	if (val != 0xffffffff && val != 0)
451		sc->mii_phyaddr = phy;
452
453	DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
454	    sc->sc_dev.dv_xname, phy, reg, val));
455
456	return val;
457}
458
459void
460nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
461{
462	struct nfe_softc *sc = (struct nfe_softc *)dev;
463	uint32_t ctl;
464	int ntries;
465
466	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
467
468	if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
469		NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
470		DELAY(100);
471	}
472
473	NFE_WRITE(sc, NFE_PHY_DATA, val);
474	ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
475	NFE_WRITE(sc, NFE_PHY_CTL, ctl);
476
477	for (ntries = 0; ntries < 1000; ntries++) {
478		DELAY(100);
479		if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
480			break;
481	}
482#ifdef NFE_DEBUG
483	if (nfedebug >= 2 && ntries == 1000)
484		printf("could not write to PHY\n");
485#endif
486}
487
488int
489nfe_intr(void *arg)
490{
491	struct nfe_softc *sc = arg;
492	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
493	uint32_t r;
494
495	if ((r = NFE_READ(sc, NFE_IRQ_STATUS) & NFE_IRQ_WANTED) == 0)
496		return 0;	/* not for us */
497	NFE_WRITE(sc, NFE_IRQ_STATUS, r);
498
499	DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
500
501	if (r & NFE_IRQ_LINK) {
502		NFE_READ(sc, NFE_PHY_STATUS);
503		NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
504		DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
505	}
506
507	if (ifp->if_flags & IFF_RUNNING) {
508		/* check Rx ring */
509		nfe_rxeof(sc);
510
511		/* check Tx ring */
512		nfe_txeof(sc);
513	}
514
515	return 1;
516}
517
518int
519nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
520{
521	struct nfe_softc *sc = ifp->if_softc;
522	struct ifaddr *ifa = (struct ifaddr *)data;
523	struct ifreq *ifr = (struct ifreq *)data;
524	int s, error = 0;
525
526	s = splnet();
527
528	switch (cmd) {
529	case SIOCSIFADDR:
530		ifp->if_flags |= IFF_UP;
531		if (!(ifp->if_flags & IFF_RUNNING))
532			nfe_init(ifp);
533		if (ifa->ifa_addr->sa_family == AF_INET)
534			arp_ifinit(&sc->sc_arpcom, ifa);
535		break;
536
537	case SIOCSIFFLAGS:
538		if (ifp->if_flags & IFF_UP) {
539			if (ifp->if_flags & IFF_RUNNING)
540				error = ENETRESET;
541			else
542				nfe_init(ifp);
543		} else {
544			if (ifp->if_flags & IFF_RUNNING)
545				nfe_stop(ifp, 1);
546		}
547		break;
548
549	case SIOCSIFMEDIA:
550	case SIOCGIFMEDIA:
551		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
552		break;
553
554	default:
555		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
556	}
557
558	if (error == ENETRESET) {
559		if (ifp->if_flags & IFF_RUNNING)
560			nfe_iff(sc);
561		error = 0;
562	}
563
564	splx(s);
565	return error;
566}
567
568void
569nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
570{
571	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
572	    (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
573	    sizeof (struct nfe_desc32), ops);
574}
575
576void
577nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
578{
579	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
580	    (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
581	    sizeof (struct nfe_desc64), ops);
582}
583
584void
585nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
586{
587	if (end > start) {
588		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
589		    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
590		    (caddr_t)&sc->txq.desc32[end] -
591		    (caddr_t)&sc->txq.desc32[start], ops);
592		return;
593	}
594	/* sync from 'start' to end of ring */
595	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
596	    (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
597	    (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
598	    (caddr_t)&sc->txq.desc32[start], ops);
599
600	/* sync from start of ring to 'end' */
601	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
602	    (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
603}
604
605void
606nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
607{
608	if (end > start) {
609		bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
610		    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
611		    (caddr_t)&sc->txq.desc64[end] -
612		    (caddr_t)&sc->txq.desc64[start], ops);
613		return;
614	}
615	/* sync from 'start' to end of ring */
616	bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
617	    (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
618	    (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
619	    (caddr_t)&sc->txq.desc64[start], ops);
620
621	/* sync from start of ring to 'end' */
622	bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
623	    (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
624}
625
626void
627nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
628{
629	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
630	    (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
631	    sizeof (struct nfe_desc32), ops);
632}
633
634void
635nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
636{
637	bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
638	    (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
639	    sizeof (struct nfe_desc64), ops);
640}
641
642void
643nfe_rxeof(struct nfe_softc *sc)
644{
645	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
646	struct nfe_desc32 *desc32;
647	struct nfe_desc64 *desc64;
648	struct nfe_rx_data *data;
649	struct mbuf *m, *mnew;
650	bus_addr_t physaddr;
651#if NVLAN > 0
652	uint32_t vtag;
653#endif
654	uint16_t flags;
655	int error, len;
656
657	for (;;) {
658		data = &sc->rxq.data[sc->rxq.cur];
659
660		if (sc->sc_flags & NFE_40BIT_ADDR) {
661			desc64 = &sc->rxq.desc64[sc->rxq.cur];
662			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
663
664			flags = letoh16(desc64->flags);
665			len = letoh16(desc64->length) & 0x3fff;
666#if NVLAN > 0
667			vtag = letoh32(desc64->physaddr[1]);
668#endif
669		} else {
670			desc32 = &sc->rxq.desc32[sc->rxq.cur];
671			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
672
673			flags = letoh16(desc32->flags);
674			len = letoh16(desc32->length) & 0x3fff;
675		}
676
677		if (flags & NFE_RX_READY)
678			break;
679
680		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
681			if (!(flags & NFE_RX_VALID_V1))
682				goto skip;
683
684			if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
685				flags &= ~NFE_RX_ERROR;
686				len--;	/* fix buffer length */
687			}
688		} else {
689			if (!(flags & NFE_RX_VALID_V2))
690				goto skip;
691
692			if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
693				flags &= ~NFE_RX_ERROR;
694				len--;	/* fix buffer length */
695			}
696		}
697
698		if (flags & NFE_RX_ERROR) {
699			ifp->if_ierrors++;
700			goto skip;
701		}
702
703		/*
704		 * Try to allocate a new mbuf for this ring element and load
705		 * it before processing the current mbuf. If the ring element
706		 * cannot be loaded, drop the received packet and reuse the
707		 * old mbuf. In the unlikely case that the old mbuf can't be
708		 * reloaded either, explicitly panic.
709		 */
710		mnew = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
711		if (mnew == NULL) {
712			ifp->if_ierrors++;
713			goto skip;
714		}
715		mnew->m_pkthdr.len = mnew->m_len = MCLBYTES;
716
717		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
718		    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
719		bus_dmamap_unload(sc->sc_dmat, data->map);
720
721		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, mnew,
722		    BUS_DMA_READ | BUS_DMA_NOWAIT);
723		if (error != 0) {
724			m_freem(mnew);
725
726			/* try to reload the old mbuf */
727			error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map,
728			    m, BUS_DMA_READ | BUS_DMA_NOWAIT);
729			if (error != 0) {
730				/* very unlikely that it will fail.. */
731				panic("%s: could not load old rx mbuf",
732				    sc->sc_dev.dv_xname);
733			}
734			ifp->if_ierrors++;
735			goto skip;
736		}
737		physaddr = data->map->dm_segs[0].ds_addr;
738
739		/*
740		 * New mbuf successfully loaded, update Rx ring and continue
741		 * processing.
742		 */
743		m = data->m;
744		data->m = mnew;
745
746		/* finalize mbuf */
747		m->m_pkthdr.len = m->m_len = len;
748		m->m_pkthdr.rcvif = ifp;
749
750		if ((sc->sc_flags & NFE_HW_CSUM) &&
751		    (flags & NFE_RX_IP_CSUMOK)) {
752			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
753			if (flags & NFE_RX_UDP_CSUMOK)
754				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
755			if (flags & NFE_RX_TCP_CSUMOK)
756				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
757		}
758
759#if NVLAN > 0
760		if ((vtag & NFE_RX_VTAG) && (sc->sc_flags & NFE_HW_VLAN)) {
761			m->m_pkthdr.ether_vtag = vtag & 0xffff;
762			m->m_flags |= M_VLANTAG;
763		}
764#endif
765
766#if NBPFILTER > 0
767		if (ifp->if_bpf)
768			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
769#endif
770		ifp->if_ipackets++;
771		ether_input_mbuf(ifp, m);
772
773		/* update mapping address in h/w descriptor */
774		if (sc->sc_flags & NFE_40BIT_ADDR) {
775#if defined(__LP64__)
776			desc64->physaddr[0] = htole32(physaddr >> 32);
777#endif
778			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
779		} else {
780			desc32->physaddr = htole32(physaddr);
781		}
782
783skip:		if (sc->sc_flags & NFE_40BIT_ADDR) {
784			desc64->length = htole16(sc->rxq.bufsz);
785			desc64->flags = htole16(NFE_RX_READY);
786
787			nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
788		} else {
789			desc32->length = htole16(sc->rxq.bufsz);
790			desc32->flags = htole16(NFE_RX_READY);
791
792			nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
793		}
794
795		sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
796	}
797}
798
799void
800nfe_txeof(struct nfe_softc *sc)
801{
802	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
803	struct nfe_desc32 *desc32;
804	struct nfe_desc64 *desc64;
805	struct nfe_tx_data *data = NULL;
806	uint16_t flags;
807
808	while (sc->txq.next != sc->txq.cur) {
809		if (sc->sc_flags & NFE_40BIT_ADDR) {
810			desc64 = &sc->txq.desc64[sc->txq.next];
811			nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
812
813			flags = letoh16(desc64->flags);
814		} else {
815			desc32 = &sc->txq.desc32[sc->txq.next];
816			nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
817
818			flags = letoh16(desc32->flags);
819		}
820
821		if (flags & NFE_TX_VALID)
822			break;
823
824		data = &sc->txq.data[sc->txq.next];
825
826		if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
827			if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
828				goto skip;
829
830			if ((flags & NFE_TX_ERROR_V1) != 0) {
831				printf("%s: tx v1 error %b\n",
832				    sc->sc_dev.dv_xname, flags, NFE_V1_TXERR);
833				ifp->if_oerrors++;
834			} else
835				ifp->if_opackets++;
836		} else {
837			if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
838				goto skip;
839
840			if ((flags & NFE_TX_ERROR_V2) != 0) {
841				printf("%s: tx v2 error %b\n",
842				    sc->sc_dev.dv_xname, flags, NFE_V2_TXERR);
843				ifp->if_oerrors++;
844			} else
845				ifp->if_opackets++;
846		}
847
848		if (data->m == NULL) {	/* should not get there */
849			printf("%s: last fragment bit w/o associated mbuf!\n",
850			    sc->sc_dev.dv_xname);
851			goto skip;
852		}
853
854		/* last fragment of the mbuf chain transmitted */
855		bus_dmamap_sync(sc->sc_dmat, data->active, 0,
856		    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
857		bus_dmamap_unload(sc->sc_dmat, data->active);
858		m_freem(data->m);
859		data->m = NULL;
860
861		ifp->if_timer = 0;
862
863skip:		sc->txq.queued--;
864		sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
865	}
866
867	if (data != NULL) {	/* at least one slot freed */
868		ifp->if_flags &= ~IFF_OACTIVE;
869		nfe_start(ifp);
870	}
871}
872
873int
874nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
875{
876	struct nfe_desc32 *desc32;
877	struct nfe_desc64 *desc64;
878	struct nfe_tx_data *data;
879	bus_dmamap_t map;
880	uint16_t flags = 0;
881	uint32_t vtag = 0;
882	int error, i, first = sc->txq.cur;
883
884	map = sc->txq.data[first].map;
885
886	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
887	if (error != 0) {
888		printf("%s: can't map mbuf (error %d)\n",
889		    sc->sc_dev.dv_xname, error);
890		return error;
891	}
892
893	if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
894		bus_dmamap_unload(sc->sc_dmat, map);
895		return ENOBUFS;
896	}
897
898#if NVLAN > 0
899	/* setup h/w VLAN tagging */
900	if (m0->m_flags & M_VLANTAG)
901		vtag = NFE_TX_VTAG | m0->m_pkthdr.ether_vtag;
902#endif
903	if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
904		flags |= NFE_TX_IP_CSUM;
905	if (m0->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
906		flags |= NFE_TX_TCP_UDP_CSUM;
907
908	for (i = 0; i < map->dm_nsegs; i++) {
909		data = &sc->txq.data[sc->txq.cur];
910
911		if (sc->sc_flags & NFE_40BIT_ADDR) {
912			desc64 = &sc->txq.desc64[sc->txq.cur];
913#if defined(__LP64__)
914			desc64->physaddr[0] =
915			    htole32(map->dm_segs[i].ds_addr >> 32);
916#endif
917			desc64->physaddr[1] =
918			    htole32(map->dm_segs[i].ds_addr & 0xffffffff);
919			desc64->length = htole16(map->dm_segs[i].ds_len - 1);
920			desc64->flags = htole16(flags);
921			desc64->vtag = htole32(vtag);
922		} else {
923			desc32 = &sc->txq.desc32[sc->txq.cur];
924
925			desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
926			desc32->length = htole16(map->dm_segs[i].ds_len - 1);
927			desc32->flags = htole16(flags);
928		}
929
930		if (map->dm_nsegs > 1) {
931			/*
932			 * Checksum flags and vtag belong to the first fragment
933			 * only.
934			 */
935			flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
936			vtag = 0;
937
938			/*
939			 * Setting of the valid bit in the first descriptor is
940			 * deferred until the whole chain is fully setup.
941			 */
942			flags |= NFE_TX_VALID;
943		}
944
945		sc->txq.queued++;
946		sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
947	}
948
949	/* the whole mbuf chain has been setup */
950	if (sc->sc_flags & NFE_40BIT_ADDR) {
951		/* fix last descriptor */
952		flags |= NFE_TX_LASTFRAG_V2;
953		desc64->flags = htole16(flags);
954
955		/* finally, set the valid bit in the first descriptor */
956		sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID);
957	} else {
958		/* fix last descriptor */
959		if (sc->sc_flags & NFE_JUMBO_SUP)
960			flags |= NFE_TX_LASTFRAG_V2;
961		else
962			flags |= NFE_TX_LASTFRAG_V1;
963		desc32->flags = htole16(flags);
964
965		/* finally, set the valid bit in the first descriptor */
966		sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID);
967	}
968
969	data->m = m0;
970	data->active = map;
971
972	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
973	    BUS_DMASYNC_PREWRITE);
974
975	return 0;
976}
977
978void
979nfe_start(struct ifnet *ifp)
980{
981	struct nfe_softc *sc = ifp->if_softc;
982	int old = sc->txq.cur;
983	struct mbuf *m0;
984
985	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
986		return;
987
988	for (;;) {
989		IFQ_POLL(&ifp->if_snd, m0);
990		if (m0 == NULL)
991			break;
992
993		if (nfe_encap(sc, m0) != 0) {
994			ifp->if_flags |= IFF_OACTIVE;
995			break;
996		}
997
998		/* packet put in h/w queue, remove from s/w queue */
999		IFQ_DEQUEUE(&ifp->if_snd, m0);
1000
1001#if NBPFILTER > 0
1002		if (ifp->if_bpf != NULL)
1003			bpf_mtap_ether(ifp->if_bpf, m0, BPF_DIRECTION_OUT);
1004#endif
1005	}
1006	if (sc->txq.cur == old)	/* nothing sent */
1007		return;
1008
1009	if (sc->sc_flags & NFE_40BIT_ADDR)
1010		nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1011	else
1012		nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1013
1014	/* kick Tx */
1015	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1016
1017	/*
1018	 * Set a timeout in case the chip goes out to lunch.
1019	 */
1020	ifp->if_timer = 5;
1021}
1022
1023void
1024nfe_watchdog(struct ifnet *ifp)
1025{
1026	struct nfe_softc *sc = ifp->if_softc;
1027
1028	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1029
1030	nfe_init(ifp);
1031
1032	ifp->if_oerrors++;
1033}
1034
1035int
1036nfe_init(struct ifnet *ifp)
1037{
1038	struct nfe_softc *sc = ifp->if_softc;
1039	uint32_t tmp;
1040
1041	nfe_stop(ifp, 0);
1042
1043	NFE_WRITE(sc, NFE_TX_UNK, 0);
1044	NFE_WRITE(sc, NFE_STATUS, 0);
1045
1046	sc->rxtxctl = NFE_RXTX_BIT2;
1047	if (sc->sc_flags & NFE_40BIT_ADDR)
1048		sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1049	else if (sc->sc_flags & NFE_JUMBO_SUP)
1050		sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1051
1052	if (sc->sc_flags & NFE_HW_CSUM)
1053		sc->rxtxctl |= NFE_RXTX_RXCSUM;
1054	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1055		sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
1056
1057	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1058	DELAY(10);
1059	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1060
1061	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1062		NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1063	else
1064		NFE_WRITE(sc, NFE_VTAG_CTL, 0);
1065
1066	NFE_WRITE(sc, NFE_SETUP_R6, 0);
1067
1068	/* set MAC address */
1069	nfe_set_macaddr(sc, sc->sc_arpcom.ac_enaddr);
1070
1071	/* tell MAC where rings are in memory */
1072#ifdef __LP64__
1073	NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1074#endif
1075	NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1076#ifdef __LP64__
1077	NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1078#endif
1079	NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1080
1081	NFE_WRITE(sc, NFE_RING_SIZE,
1082	    (NFE_RX_RING_COUNT - 1) << 16 |
1083	    (NFE_TX_RING_COUNT - 1));
1084
1085	NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1086
1087	/* force MAC to wakeup */
1088	tmp = NFE_READ(sc, NFE_PWR_STATE);
1089	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1090	DELAY(10);
1091	tmp = NFE_READ(sc, NFE_PWR_STATE);
1092	NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1093
1094#if 1
1095	/* configure interrupts coalescing/mitigation */
1096	NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1097#else
1098	/* no interrupt mitigation: one interrupt per packet */
1099	NFE_WRITE(sc, NFE_IMTIMER, 970);
1100#endif
1101
1102	NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1103	NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1104	NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1105
1106	/* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1107	NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1108
1109	NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1110
1111	sc->rxtxctl &= ~NFE_RXTX_BIT2;
1112	NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1113	DELAY(10);
1114	NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1115
1116	/* program promiscuous mode and multicast filters */
1117	nfe_iff(sc);
1118
1119	nfe_ifmedia_upd(ifp);
1120
1121	/* enable Rx */
1122	NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1123
1124	/* enable Tx */
1125	NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1126
1127	NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1128
1129	/* enable interrupts */
1130	NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1131
1132	timeout_add_sec(&sc->sc_tick_ch, 1);
1133
1134	ifp->if_flags |= IFF_RUNNING;
1135	ifp->if_flags &= ~IFF_OACTIVE;
1136
1137	return 0;
1138}
1139
1140void
1141nfe_stop(struct ifnet *ifp, int disable)
1142{
1143	struct nfe_softc *sc = ifp->if_softc;
1144
1145	timeout_del(&sc->sc_tick_ch);
1146
1147	ifp->if_timer = 0;
1148	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1149
1150	mii_down(&sc->sc_mii);
1151
1152	/* abort Tx */
1153	NFE_WRITE(sc, NFE_TX_CTL, 0);
1154
1155	if ((sc->sc_flags & NFE_WOL) == 0) {
1156		/* disable Rx */
1157		NFE_WRITE(sc, NFE_RX_CTL, 0);
1158
1159		/* disable interrupts */
1160		NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1161	}
1162
1163	/* reset Tx and Rx rings */
1164	nfe_reset_tx_ring(sc, &sc->txq);
1165	nfe_reset_rx_ring(sc, &sc->rxq);
1166}
1167
1168int
1169nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1170{
1171	struct nfe_desc32 *desc32;
1172	struct nfe_desc64 *desc64;
1173	struct nfe_rx_data *data;
1174	void **desc;
1175	bus_addr_t physaddr;
1176	int i, nsegs, error, descsize;
1177
1178	if (sc->sc_flags & NFE_40BIT_ADDR) {
1179		desc = (void **)&ring->desc64;
1180		descsize = sizeof (struct nfe_desc64);
1181	} else {
1182		desc = (void **)&ring->desc32;
1183		descsize = sizeof (struct nfe_desc32);
1184	}
1185
1186	ring->cur = ring->next = 0;
1187	ring->bufsz = MCLBYTES;
1188
1189	error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1190	    NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1191	if (error != 0) {
1192		printf("%s: could not create desc DMA map\n",
1193		    sc->sc_dev.dv_xname);
1194		goto fail;
1195	}
1196
1197	error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1198	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1199	if (error != 0) {
1200		printf("%s: could not allocate DMA memory\n",
1201		    sc->sc_dev.dv_xname);
1202		goto fail;
1203	}
1204
1205	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1206	    NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1207	if (error != 0) {
1208		printf("%s: can't map desc DMA memory\n",
1209		    sc->sc_dev.dv_xname);
1210		goto fail;
1211	}
1212
1213	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1214	    NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1215	if (error != 0) {
1216		printf("%s: could not load desc DMA map\n",
1217		    sc->sc_dev.dv_xname);
1218		goto fail;
1219	}
1220	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1221
1222	/*
1223	 * Pre-allocate Rx buffers and populate Rx ring.
1224	 */
1225	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1226		data = &sc->rxq.data[i];
1227
1228		data->m = MCLGETI(NULL, MCLBYTES, NULL, M_DONTWAIT);
1229		if (data->m == NULL) {
1230			printf("%s: could not allocate rx mbuf\n",
1231			    sc->sc_dev.dv_xname);
1232			error = ENOMEM;
1233			goto fail;
1234		}
1235		data->m->m_pkthdr.len = data->m->m_len = MCLBYTES;
1236
1237		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1238		    MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1239		if (error != 0) {
1240			printf("%s: could not create DMA map\n",
1241			    sc->sc_dev.dv_xname);
1242			goto fail;
1243		}
1244
1245		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, data->m,
1246		    BUS_DMA_READ | BUS_DMA_NOWAIT);
1247		if (error != 0) {
1248			printf("%s: could not load rx buf DMA map",
1249			    sc->sc_dev.dv_xname);
1250			goto fail;
1251		}
1252		physaddr = data->map->dm_segs[0].ds_addr;
1253
1254		if (sc->sc_flags & NFE_40BIT_ADDR) {
1255			desc64 = &sc->rxq.desc64[i];
1256#if defined(__LP64__)
1257			desc64->physaddr[0] = htole32(physaddr >> 32);
1258#endif
1259			desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1260			desc64->length = htole16(sc->rxq.bufsz);
1261			desc64->flags = htole16(NFE_RX_READY);
1262		} else {
1263			desc32 = &sc->rxq.desc32[i];
1264			desc32->physaddr = htole32(physaddr);
1265			desc32->length = htole16(sc->rxq.bufsz);
1266			desc32->flags = htole16(NFE_RX_READY);
1267		}
1268	}
1269
1270	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1271	    BUS_DMASYNC_PREWRITE);
1272
1273	return 0;
1274
1275fail:	nfe_free_rx_ring(sc, ring);
1276	return error;
1277}
1278
1279void
1280nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1281{
1282	int i;
1283
1284	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1285		if (sc->sc_flags & NFE_40BIT_ADDR) {
1286			ring->desc64[i].length = htole16(ring->bufsz);
1287			ring->desc64[i].flags = htole16(NFE_RX_READY);
1288		} else {
1289			ring->desc32[i].length = htole16(ring->bufsz);
1290			ring->desc32[i].flags = htole16(NFE_RX_READY);
1291		}
1292	}
1293
1294	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1295	    BUS_DMASYNC_PREWRITE);
1296
1297	ring->cur = ring->next = 0;
1298}
1299
1300void
1301nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1302{
1303	struct nfe_rx_data *data;
1304	void *desc;
1305	int i, descsize;
1306
1307	if (sc->sc_flags & NFE_40BIT_ADDR) {
1308		desc = ring->desc64;
1309		descsize = sizeof (struct nfe_desc64);
1310	} else {
1311		desc = ring->desc32;
1312		descsize = sizeof (struct nfe_desc32);
1313	}
1314
1315	if (desc != NULL) {
1316		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1317		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1318		bus_dmamap_unload(sc->sc_dmat, ring->map);
1319		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1320		    NFE_RX_RING_COUNT * descsize);
1321		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1322	}
1323
1324	for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1325		data = &ring->data[i];
1326
1327		if (data->map != NULL) {
1328			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1329			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1330			bus_dmamap_unload(sc->sc_dmat, data->map);
1331			bus_dmamap_destroy(sc->sc_dmat, data->map);
1332		}
1333		if (data->m != NULL)
1334			m_freem(data->m);
1335	}
1336}
1337
1338int
1339nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1340{
1341	int i, nsegs, error;
1342	void **desc;
1343	int descsize;
1344
1345	if (sc->sc_flags & NFE_40BIT_ADDR) {
1346		desc = (void **)&ring->desc64;
1347		descsize = sizeof (struct nfe_desc64);
1348	} else {
1349		desc = (void **)&ring->desc32;
1350		descsize = sizeof (struct nfe_desc32);
1351	}
1352
1353	ring->queued = 0;
1354	ring->cur = ring->next = 0;
1355
1356	error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1357	    NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1358
1359	if (error != 0) {
1360		printf("%s: could not create desc DMA map\n",
1361		    sc->sc_dev.dv_xname);
1362		goto fail;
1363	}
1364
1365	error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1366	    PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
1367	if (error != 0) {
1368		printf("%s: could not allocate DMA memory\n",
1369		    sc->sc_dev.dv_xname);
1370		goto fail;
1371	}
1372
1373	error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1374	    NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1375	if (error != 0) {
1376		printf("%s: can't map desc DMA memory\n",
1377		    sc->sc_dev.dv_xname);
1378		goto fail;
1379	}
1380
1381	error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1382	    NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1383	if (error != 0) {
1384		printf("%s: could not load desc DMA map\n",
1385		    sc->sc_dev.dv_xname);
1386		goto fail;
1387	}
1388	ring->physaddr = ring->map->dm_segs[0].ds_addr;
1389
1390	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1391		error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1392		    NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1393		    &ring->data[i].map);
1394		if (error != 0) {
1395			printf("%s: could not create DMA map\n",
1396			    sc->sc_dev.dv_xname);
1397			goto fail;
1398		}
1399	}
1400
1401	return 0;
1402
1403fail:	nfe_free_tx_ring(sc, ring);
1404	return error;
1405}
1406
1407void
1408nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1409{
1410	struct nfe_tx_data *data;
1411	int i;
1412
1413	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1414		if (sc->sc_flags & NFE_40BIT_ADDR)
1415			ring->desc64[i].flags = 0;
1416		else
1417			ring->desc32[i].flags = 0;
1418
1419		data = &ring->data[i];
1420
1421		if (data->m != NULL) {
1422			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1423			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1424			bus_dmamap_unload(sc->sc_dmat, data->active);
1425			m_freem(data->m);
1426			data->m = NULL;
1427		}
1428	}
1429
1430	bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1431	    BUS_DMASYNC_PREWRITE);
1432
1433	ring->queued = 0;
1434	ring->cur = ring->next = 0;
1435}
1436
1437void
1438nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1439{
1440	struct nfe_tx_data *data;
1441	void *desc;
1442	int i, descsize;
1443
1444	if (sc->sc_flags & NFE_40BIT_ADDR) {
1445		desc = ring->desc64;
1446		descsize = sizeof (struct nfe_desc64);
1447	} else {
1448		desc = ring->desc32;
1449		descsize = sizeof (struct nfe_desc32);
1450	}
1451
1452	if (desc != NULL) {
1453		bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1454		    ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1455		bus_dmamap_unload(sc->sc_dmat, ring->map);
1456		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1457		    NFE_TX_RING_COUNT * descsize);
1458		bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1459	}
1460
1461	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1462		data = &ring->data[i];
1463
1464		if (data->m != NULL) {
1465			bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1466			    data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1467			bus_dmamap_unload(sc->sc_dmat, data->active);
1468			m_freem(data->m);
1469		}
1470	}
1471
1472	/* ..and now actually destroy the DMA mappings */
1473	for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1474		data = &ring->data[i];
1475		if (data->map == NULL)
1476			continue;
1477		bus_dmamap_destroy(sc->sc_dmat, data->map);
1478	}
1479}
1480
1481int
1482nfe_ifmedia_upd(struct ifnet *ifp)
1483{
1484	struct nfe_softc *sc = ifp->if_softc;
1485	struct mii_data *mii = &sc->sc_mii;
1486	struct mii_softc *miisc;
1487
1488	if (mii->mii_instance != 0) {
1489		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1490			mii_phy_reset(miisc);
1491	}
1492	return mii_mediachg(mii);
1493}
1494
1495void
1496nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1497{
1498	struct nfe_softc *sc = ifp->if_softc;
1499	struct mii_data *mii = &sc->sc_mii;
1500
1501	mii_pollstat(mii);
1502	ifmr->ifm_status = mii->mii_media_status;
1503	ifmr->ifm_active = mii->mii_media_active;
1504}
1505
1506void
1507nfe_iff(struct nfe_softc *sc)
1508{
1509	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1510	struct arpcom *ac = &sc->sc_arpcom;
1511	struct ether_multi *enm;
1512	struct ether_multistep step;
1513	uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1514	uint32_t filter;
1515	int i;
1516
1517	filter = NFE_RXFILTER_MAGIC;
1518	ifp->if_flags &= ~IFF_ALLMULTI;
1519
1520	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
1521		ifp->if_flags |= IFF_ALLMULTI;
1522		if (ifp->if_flags & IFF_PROMISC)
1523			filter |= NFE_PROMISC;
1524		else
1525			filter |= NFE_U2M;
1526		bzero(addr, ETHER_ADDR_LEN);
1527		bzero(mask, ETHER_ADDR_LEN);
1528	} else {
1529		filter |= NFE_U2M;
1530
1531		bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1532		bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1533
1534		ETHER_FIRST_MULTI(step, ac, enm);
1535		while (enm != NULL) {
1536			for (i = 0; i < ETHER_ADDR_LEN; i++) {
1537				addr[i] &=  enm->enm_addrlo[i];
1538				mask[i] &= ~enm->enm_addrlo[i];
1539			}
1540
1541			ETHER_NEXT_MULTI(step, enm);
1542		}
1543
1544		for (i = 0; i < ETHER_ADDR_LEN; i++)
1545			mask[i] |= addr[i];
1546	}
1547
1548	addr[0] |= 0x01;	/* make sure multicast bit is set */
1549
1550	NFE_WRITE(sc, NFE_MULTIADDR_HI,
1551	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1552	NFE_WRITE(sc, NFE_MULTIADDR_LO,
1553	    addr[5] <<  8 | addr[4]);
1554	NFE_WRITE(sc, NFE_MULTIMASK_HI,
1555	    mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1556	NFE_WRITE(sc, NFE_MULTIMASK_LO,
1557	    mask[5] <<  8 | mask[4]);
1558	NFE_WRITE(sc, NFE_RXFILTER, filter);
1559}
1560
1561void
1562nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1563{
1564	uint32_t tmp;
1565
1566	if (sc->sc_flags & NFE_CORRECT_MACADDR) {
1567		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1568		addr[0] = (tmp & 0xff);
1569		addr[1] = (tmp >>  8) & 0xff;
1570		addr[2] = (tmp >> 16) & 0xff;
1571		addr[3] = (tmp >> 24) & 0xff;
1572
1573		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1574		addr[4] = (tmp & 0xff);
1575		addr[5] = (tmp >> 8) & 0xff;
1576
1577	} else {
1578		tmp = NFE_READ(sc, NFE_MACADDR_LO);
1579		addr[0] = (tmp >> 8) & 0xff;
1580		addr[1] = (tmp & 0xff);
1581
1582		tmp = NFE_READ(sc, NFE_MACADDR_HI);
1583		addr[2] = (tmp >> 24) & 0xff;
1584		addr[3] = (tmp >> 16) & 0xff;
1585		addr[4] = (tmp >>  8) & 0xff;
1586		addr[5] = (tmp & 0xff);
1587	}
1588}
1589
1590void
1591nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1592{
1593	NFE_WRITE(sc, NFE_MACADDR_LO,
1594	    addr[5] <<  8 | addr[4]);
1595	NFE_WRITE(sc, NFE_MACADDR_HI,
1596	    addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1597}
1598
1599void
1600nfe_tick(void *arg)
1601{
1602	struct nfe_softc *sc = arg;
1603	int s;
1604
1605	s = splnet();
1606	mii_tick(&sc->sc_mii);
1607	splx(s);
1608
1609	timeout_add_sec(&sc->sc_tick_ch, 1);
1610}
1611
1612#ifndef SMALL_KERNEL
1613int
1614nfe_wol(struct ifnet *ifp, int enable)
1615{
1616	struct nfe_softc *sc = ifp->if_softc;
1617
1618	if (enable) {
1619		sc->sc_flags |= NFE_WOL;
1620		NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE);
1621	} else {
1622		sc->sc_flags &= ~NFE_WOL;
1623		NFE_WRITE(sc, NFE_WOL_CTL, 0);
1624	}
1625
1626	return 0;
1627}
1628#endif
1629