if_alc.c revision 217379
1/*-
2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28/* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/alc/if_alc.c 217379 2011-01-13 21:49:14Z jhb $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/module.h>
42#include <sys/mutex.h>
43#include <sys/rman.h>
44#include <sys/queue.h>
45#include <sys/socket.h>
46#include <sys/sockio.h>
47#include <sys/sysctl.h>
48#include <sys/taskqueue.h>
49
50#include <net/bpf.h>
51#include <net/if.h>
52#include <net/if_arp.h>
53#include <net/ethernet.h>
54#include <net/if_dl.h>
55#include <net/if_llc.h>
56#include <net/if_media.h>
57#include <net/if_types.h>
58#include <net/if_vlan_var.h>
59
60#include <netinet/in.h>
61#include <netinet/in_systm.h>
62#include <netinet/ip.h>
63#include <netinet/tcp.h>
64
65#include <dev/mii/mii.h>
66#include <dev/mii/miivar.h>
67
68#include <dev/pci/pcireg.h>
69#include <dev/pci/pcivar.h>
70
71#include <machine/bus.h>
72#include <machine/in_cksum.h>
73
74#include <dev/alc/if_alcreg.h>
75#include <dev/alc/if_alcvar.h>
76
77/* "device miibus" required.  See GENERIC if you get errors here. */
78#include "miibus_if.h"
79#undef ALC_USE_CUSTOM_CSUM
80
81#ifdef ALC_USE_CUSTOM_CSUM
82#define	ALC_CSUM_FEATURES	(CSUM_TCP | CSUM_UDP)
83#else
84#define	ALC_CSUM_FEATURES	(CSUM_IP | CSUM_TCP | CSUM_UDP)
85#endif
86
87MODULE_DEPEND(alc, pci, 1, 1, 1);
88MODULE_DEPEND(alc, ether, 1, 1, 1);
89MODULE_DEPEND(alc, miibus, 1, 1, 1);
90
91/* Tunables. */
92static int msi_disable = 0;
93static int msix_disable = 0;
94TUNABLE_INT("hw.alc.msi_disable", &msi_disable);
95TUNABLE_INT("hw.alc.msix_disable", &msix_disable);
96
97/*
98 * Devices supported by this driver.
99 */
100static struct alc_ident alc_ident_table[] = {
101	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024,
102		"Atheros AR8131 PCIe Gigabit Ethernet" },
103	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024,
104		"Atheros AR8132 PCIe Fast Ethernet" },
105	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024,
106		"Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
107	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024,
108		"Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
109	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024,
110		"Atheros AR8152 v1.1 PCIe Fast Ethernet" },
111	{ VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024,
112		"Atheros AR8152 v2.0 PCIe Fast Ethernet" },
113	{ 0, 0, 0, NULL}
114};
115
116static void	alc_aspm(struct alc_softc *, int);
117static int	alc_attach(device_t);
118static int	alc_check_boundary(struct alc_softc *);
119static int	alc_detach(device_t);
120static void	alc_disable_l0s_l1(struct alc_softc *);
121static int	alc_dma_alloc(struct alc_softc *);
122static void	alc_dma_free(struct alc_softc *);
123static void	alc_dmamap_cb(void *, bus_dma_segment_t *, int, int);
124static int	alc_encap(struct alc_softc *, struct mbuf **);
125static struct alc_ident *
126		alc_find_ident(device_t);
127#ifndef __NO_STRICT_ALIGNMENT
128static struct mbuf *
129		alc_fixup_rx(struct ifnet *, struct mbuf *);
130#endif
131static void	alc_get_macaddr(struct alc_softc *);
132static void	alc_init(void *);
133static void	alc_init_cmb(struct alc_softc *);
134static void	alc_init_locked(struct alc_softc *);
135static void	alc_init_rr_ring(struct alc_softc *);
136static int	alc_init_rx_ring(struct alc_softc *);
137static void	alc_init_smb(struct alc_softc *);
138static void	alc_init_tx_ring(struct alc_softc *);
139static void	alc_int_task(void *, int);
140static int	alc_intr(void *);
141static int	alc_ioctl(struct ifnet *, u_long, caddr_t);
142static void	alc_mac_config(struct alc_softc *);
143static int	alc_miibus_readreg(device_t, int, int);
144static void	alc_miibus_statchg(device_t);
145static int	alc_miibus_writereg(device_t, int, int, int);
146static int	alc_mediachange(struct ifnet *);
147static void	alc_mediastatus(struct ifnet *, struct ifmediareq *);
148static int	alc_newbuf(struct alc_softc *, struct alc_rxdesc *);
149static void	alc_phy_down(struct alc_softc *);
150static void	alc_phy_reset(struct alc_softc *);
151static int	alc_probe(device_t);
152static void	alc_reset(struct alc_softc *);
153static int	alc_resume(device_t);
154static void	alc_rxeof(struct alc_softc *, struct rx_rdesc *);
155static int	alc_rxintr(struct alc_softc *, int);
156static void	alc_rxfilter(struct alc_softc *);
157static void	alc_rxvlan(struct alc_softc *);
158static void	alc_setlinkspeed(struct alc_softc *);
159static void	alc_setwol(struct alc_softc *);
160static int	alc_shutdown(device_t);
161static void	alc_start(struct ifnet *);
162static void	alc_start_locked(struct ifnet *);
163static void	alc_start_queue(struct alc_softc *);
164static void	alc_stats_clear(struct alc_softc *);
165static void	alc_stats_update(struct alc_softc *);
166static void	alc_stop(struct alc_softc *);
167static void	alc_stop_mac(struct alc_softc *);
168static void	alc_stop_queue(struct alc_softc *);
169static int	alc_suspend(device_t);
170static void	alc_sysctl_node(struct alc_softc *);
171static void	alc_tick(void *);
172static void	alc_txeof(struct alc_softc *);
173static void	alc_watchdog(struct alc_softc *);
174static int	sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
175static int	sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS);
176static int	sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS);
177
178static device_method_t alc_methods[] = {
179	/* Device interface. */
180	DEVMETHOD(device_probe,		alc_probe),
181	DEVMETHOD(device_attach,	alc_attach),
182	DEVMETHOD(device_detach,	alc_detach),
183	DEVMETHOD(device_shutdown,	alc_shutdown),
184	DEVMETHOD(device_suspend,	alc_suspend),
185	DEVMETHOD(device_resume,	alc_resume),
186
187	/* MII interface. */
188	DEVMETHOD(miibus_readreg,	alc_miibus_readreg),
189	DEVMETHOD(miibus_writereg,	alc_miibus_writereg),
190	DEVMETHOD(miibus_statchg,	alc_miibus_statchg),
191
192	{ NULL, NULL }
193};
194
195static driver_t alc_driver = {
196	"alc",
197	alc_methods,
198	sizeof(struct alc_softc)
199};
200
201static devclass_t alc_devclass;
202
203DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0);
204DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0);
205
206static struct resource_spec alc_res_spec_mem[] = {
207	{ SYS_RES_MEMORY,	PCIR_BAR(0),	RF_ACTIVE },
208	{ -1,			0,		0 }
209};
210
211static struct resource_spec alc_irq_spec_legacy[] = {
212	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
213	{ -1,			0,		0 }
214};
215
216static struct resource_spec alc_irq_spec_msi[] = {
217	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
218	{ -1,			0,		0 }
219};
220
221static struct resource_spec alc_irq_spec_msix[] = {
222	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
223	{ -1,			0,		0 }
224};
225
226static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
227
228static int
229alc_miibus_readreg(device_t dev, int phy, int reg)
230{
231	struct alc_softc *sc;
232	uint32_t v;
233	int i;
234
235	sc = device_get_softc(dev);
236
237	/*
238	 * For AR8132 fast ethernet controller, do not report 1000baseT
239	 * capability to mii(4). Even though AR8132 uses the same
240	 * model/revision number of F1 gigabit PHY, the PHY has no
241	 * ability to establish 1000baseT link.
242	 */
243	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 &&
244	    reg == MII_EXTSR)
245		return (0);
246
247	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
248	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
249	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
250		DELAY(5);
251		v = CSR_READ_4(sc, ALC_MDIO);
252		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
253			break;
254	}
255
256	if (i == 0) {
257		device_printf(sc->alc_dev, "phy read timeout : %d\n", reg);
258		return (0);
259	}
260
261	return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
262}
263
264static int
265alc_miibus_writereg(device_t dev, int phy, int reg, int val)
266{
267	struct alc_softc *sc;
268	uint32_t v;
269	int i;
270
271	sc = device_get_softc(dev);
272
273	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
274	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
275	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
276	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
277		DELAY(5);
278		v = CSR_READ_4(sc, ALC_MDIO);
279		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
280			break;
281	}
282
283	if (i == 0)
284		device_printf(sc->alc_dev, "phy write timeout : %d\n", reg);
285
286	return (0);
287}
288
289static void
290alc_miibus_statchg(device_t dev)
291{
292	struct alc_softc *sc;
293	struct mii_data *mii;
294	struct ifnet *ifp;
295	uint32_t reg;
296
297	sc = device_get_softc(dev);
298
299	mii = device_get_softc(sc->alc_miibus);
300	ifp = sc->alc_ifp;
301	if (mii == NULL || ifp == NULL ||
302	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
303		return;
304
305	sc->alc_flags &= ~ALC_FLAG_LINK;
306	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
307	    (IFM_ACTIVE | IFM_AVALID)) {
308		switch (IFM_SUBTYPE(mii->mii_media_active)) {
309		case IFM_10_T:
310		case IFM_100_TX:
311			sc->alc_flags |= ALC_FLAG_LINK;
312			break;
313		case IFM_1000_T:
314			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
315				sc->alc_flags |= ALC_FLAG_LINK;
316			break;
317		default:
318			break;
319		}
320	}
321	alc_stop_queue(sc);
322	/* Stop Rx/Tx MACs. */
323	alc_stop_mac(sc);
324
325	/* Program MACs with resolved speed/duplex/flow-control. */
326	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
327		alc_start_queue(sc);
328		alc_mac_config(sc);
329		/* Re-enable Tx/Rx MACs. */
330		reg = CSR_READ_4(sc, ALC_MAC_CFG);
331		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
332		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
333		alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
334	}
335}
336
337static void
338alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
339{
340	struct alc_softc *sc;
341	struct mii_data *mii;
342
343	sc = ifp->if_softc;
344	ALC_LOCK(sc);
345	if ((ifp->if_flags & IFF_UP) == 0) {
346		ALC_UNLOCK(sc);
347		return;
348	}
349	mii = device_get_softc(sc->alc_miibus);
350
351	mii_pollstat(mii);
352	ALC_UNLOCK(sc);
353	ifmr->ifm_status = mii->mii_media_status;
354	ifmr->ifm_active = mii->mii_media_active;
355}
356
357static int
358alc_mediachange(struct ifnet *ifp)
359{
360	struct alc_softc *sc;
361	struct mii_data *mii;
362	struct mii_softc *miisc;
363	int error;
364
365	sc = ifp->if_softc;
366	ALC_LOCK(sc);
367	mii = device_get_softc(sc->alc_miibus);
368	if (mii->mii_instance != 0) {
369		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
370			mii_phy_reset(miisc);
371	}
372	error = mii_mediachg(mii);
373	ALC_UNLOCK(sc);
374
375	return (error);
376}
377
378static struct alc_ident *
379alc_find_ident(device_t dev)
380{
381	struct alc_ident *ident;
382	uint16_t vendor, devid;
383
384	vendor = pci_get_vendor(dev);
385	devid = pci_get_device(dev);
386	for (ident = alc_ident_table; ident->name != NULL; ident++) {
387		if (vendor == ident->vendorid && devid == ident->deviceid)
388			return (ident);
389	}
390
391	return (NULL);
392}
393
394static int
395alc_probe(device_t dev)
396{
397	struct alc_ident *ident;
398
399	ident = alc_find_ident(dev);
400	if (ident != NULL) {
401		device_set_desc(dev, ident->name);
402		return (BUS_PROBE_DEFAULT);
403	}
404
405	return (ENXIO);
406}
407
408static void
409alc_get_macaddr(struct alc_softc *sc)
410{
411	uint32_t ea[2], opt;
412	uint16_t val;
413	int eeprom, i;
414
415	eeprom = 0;
416	opt = CSR_READ_4(sc, ALC_OPT_CFG);
417	if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
418	    (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
419		/*
420		 * EEPROM found, let TWSI reload EEPROM configuration.
421		 * This will set ethernet address of controller.
422		 */
423		eeprom++;
424		switch (sc->alc_ident->deviceid) {
425		case DEVICEID_ATHEROS_AR8131:
426		case DEVICEID_ATHEROS_AR8132:
427			if ((opt & OPT_CFG_CLK_ENB) == 0) {
428				opt |= OPT_CFG_CLK_ENB;
429				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
430				CSR_READ_4(sc, ALC_OPT_CFG);
431				DELAY(1000);
432			}
433			break;
434		case DEVICEID_ATHEROS_AR8151:
435		case DEVICEID_ATHEROS_AR8151_V2:
436		case DEVICEID_ATHEROS_AR8152_B:
437		case DEVICEID_ATHEROS_AR8152_B2:
438			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
439			    ALC_MII_DBG_ADDR, 0x00);
440			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
441			    ALC_MII_DBG_DATA);
442			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
443			    ALC_MII_DBG_DATA, val & 0xFF7F);
444			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
445			    ALC_MII_DBG_ADDR, 0x3B);
446			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
447			    ALC_MII_DBG_DATA);
448			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
449			    ALC_MII_DBG_DATA, val | 0x0008);
450			DELAY(20);
451			break;
452		}
453
454		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
455		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
456		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
457		CSR_READ_4(sc, ALC_WOL_CFG);
458
459		CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
460		    TWSI_CFG_SW_LD_START);
461		for (i = 100; i > 0; i--) {
462			DELAY(1000);
463			if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
464			    TWSI_CFG_SW_LD_START) == 0)
465				break;
466		}
467		if (i == 0)
468			device_printf(sc->alc_dev,
469			    "reloading EEPROM timeout!\n");
470	} else {
471		if (bootverbose)
472			device_printf(sc->alc_dev, "EEPROM not found!\n");
473	}
474	if (eeprom != 0) {
475		switch (sc->alc_ident->deviceid) {
476		case DEVICEID_ATHEROS_AR8131:
477		case DEVICEID_ATHEROS_AR8132:
478			if ((opt & OPT_CFG_CLK_ENB) != 0) {
479				opt &= ~OPT_CFG_CLK_ENB;
480				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
481				CSR_READ_4(sc, ALC_OPT_CFG);
482				DELAY(1000);
483			}
484			break;
485		case DEVICEID_ATHEROS_AR8151:
486		case DEVICEID_ATHEROS_AR8151_V2:
487		case DEVICEID_ATHEROS_AR8152_B:
488		case DEVICEID_ATHEROS_AR8152_B2:
489			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
490			    ALC_MII_DBG_ADDR, 0x00);
491			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
492			    ALC_MII_DBG_DATA);
493			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
494			    ALC_MII_DBG_DATA, val | 0x0080);
495			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
496			    ALC_MII_DBG_ADDR, 0x3B);
497			val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
498			    ALC_MII_DBG_DATA);
499			alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
500			    ALC_MII_DBG_DATA, val & 0xFFF7);
501			DELAY(20);
502			break;
503		}
504	}
505
506	ea[0] = CSR_READ_4(sc, ALC_PAR0);
507	ea[1] = CSR_READ_4(sc, ALC_PAR1);
508	sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
509	sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
510	sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
511	sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
512	sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
513	sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
514}
515
516static void
517alc_disable_l0s_l1(struct alc_softc *sc)
518{
519	uint32_t pmcfg;
520
521	/* Another magic from vendor. */
522	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
523	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
524	    PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
525	    PM_CFG_SERDES_PD_EX_L1);
526	pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
527	    PM_CFG_SERDES_L1_ENB;
528	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
529}
530
531static void
532alc_phy_reset(struct alc_softc *sc)
533{
534	uint16_t data;
535
536	/* Reset magic from Linux. */
537	CSR_WRITE_2(sc, ALC_GPHY_CFG,
538	    GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
539	CSR_READ_2(sc, ALC_GPHY_CFG);
540	DELAY(10 * 1000);
541
542	CSR_WRITE_2(sc, ALC_GPHY_CFG,
543	    GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
544	    GPHY_CFG_SEL_ANA_RESET);
545	CSR_READ_2(sc, ALC_GPHY_CFG);
546	DELAY(10 * 1000);
547
548	/* DSP fixup, Vendor magic. */
549	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
550		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
551		    ALC_MII_DBG_ADDR, 0x000A);
552		data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
553		    ALC_MII_DBG_DATA);
554		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
555		    ALC_MII_DBG_DATA, data & 0xDFFF);
556	}
557	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
558	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
559	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
560	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
561		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
562		    ALC_MII_DBG_ADDR, 0x003B);
563		data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr,
564		    ALC_MII_DBG_DATA);
565		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
566		    ALC_MII_DBG_DATA, data & 0xFFF7);
567		DELAY(20 * 1000);
568	}
569	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) {
570		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
571		    ALC_MII_DBG_ADDR, 0x0029);
572		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
573		    ALC_MII_DBG_DATA, 0x929D);
574	}
575	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
576	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 ||
577	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
578	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) {
579		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
580		    ALC_MII_DBG_ADDR, 0x0029);
581		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
582		    ALC_MII_DBG_DATA, 0xB6DD);
583	}
584
585	/* Load DSP codes, vendor magic. */
586	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
587	    ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
588	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
589	    ALC_MII_DBG_ADDR, MII_ANA_CFG18);
590	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
591	    ALC_MII_DBG_DATA, data);
592
593	data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
594	    ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
595	    ANA_SERDES_EN_LCKDT;
596	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
597	    ALC_MII_DBG_ADDR, MII_ANA_CFG5);
598	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
599	    ALC_MII_DBG_DATA, data);
600
601	data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
602	    ANA_LONG_CABLE_TH_100_MASK) |
603	    ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
604	    ANA_SHORT_CABLE_TH_100_SHIFT) |
605	    ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
606	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
607	    ALC_MII_DBG_ADDR, MII_ANA_CFG54);
608	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
609	    ALC_MII_DBG_DATA, data);
610
611	data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
612	    ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
613	    ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
614	    ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
615	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
616	    ALC_MII_DBG_ADDR, MII_ANA_CFG4);
617	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
618	    ALC_MII_DBG_DATA, data);
619
620	data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
621	    ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
622	    ANA_OEN_125M;
623	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
624	    ALC_MII_DBG_ADDR, MII_ANA_CFG0);
625	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
626	    ALC_MII_DBG_DATA, data);
627	DELAY(1000);
628}
629
630static void
631alc_phy_down(struct alc_softc *sc)
632{
633
634	switch (sc->alc_ident->deviceid) {
635	case DEVICEID_ATHEROS_AR8151:
636	case DEVICEID_ATHEROS_AR8151_V2:
637		/*
638		 * GPHY power down caused more problems on AR8151 v2.0.
639		 * When driver is reloaded after GPHY power down,
640		 * accesses to PHY/MAC registers hung the system. Only
641		 * cold boot recovered from it.  I'm not sure whether
642		 * AR8151 v1.0 also requires this one though.  I don't
643		 * have AR8151 v1.0 controller in hand.
644		 * The only option left is to isolate the PHY and
645		 * initiates power down the PHY which in turn saves
646		 * more power when driver is unloaded.
647		 */
648		alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
649		    MII_BMCR, BMCR_ISO | BMCR_PDOWN);
650		break;
651	default:
652		/* Force PHY down. */
653		CSR_WRITE_2(sc, ALC_GPHY_CFG,
654		    GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
655		    GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
656		    GPHY_CFG_PWDOWN_HW);
657		DELAY(1000);
658		break;
659	}
660}
661
662static void
663alc_aspm(struct alc_softc *sc, int media)
664{
665	uint32_t pmcfg;
666	uint16_t linkcfg;
667
668	ALC_LOCK_ASSERT(sc);
669
670	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
671	if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
672	    (ALC_FLAG_APS | ALC_FLAG_PCIE))
673		linkcfg = CSR_READ_2(sc, sc->alc_expcap +
674		    PCIR_EXPRESS_LINK_CTL);
675	else
676		linkcfg = 0;
677	pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
678	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
679	pmcfg |= PM_CFG_MAC_ASPM_CHK;
680	pmcfg |= PM_CFG_SERDES_ENB | PM_CFG_RBER_ENB;
681	pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
682
683	if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
684		/* Disable extended sync except AR8152 B v1.0 */
685		linkcfg &= ~0x80;
686		if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
687		    sc->alc_rev == ATHEROS_AR8152_B_V10)
688			linkcfg |= 0x80;
689		CSR_WRITE_2(sc, sc->alc_expcap + PCIR_EXPRESS_LINK_CTL,
690		    linkcfg);
691		pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
692		    PM_CFG_HOTRST);
693		pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
694		    PM_CFG_L1_ENTRY_TIMER_SHIFT);
695		pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
696		pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
697		    PM_CFG_PM_REQ_TIMER_SHIFT);
698		pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
699	}
700
701	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
702		if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
703			pmcfg |= PM_CFG_ASPM_L0S_ENB;
704		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
705			pmcfg |= PM_CFG_ASPM_L1_ENB;
706		if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
707			if (sc->alc_ident->deviceid ==
708			    DEVICEID_ATHEROS_AR8152_B)
709				pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
710			pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
711			    PM_CFG_SERDES_PLL_L1_ENB |
712			    PM_CFG_SERDES_BUDS_RX_L1_ENB);
713			pmcfg |= PM_CFG_CLK_SWH_L1;
714			if (media == IFM_100_TX || media == IFM_1000_T) {
715				pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
716				switch (sc->alc_ident->deviceid) {
717				case DEVICEID_ATHEROS_AR8152_B:
718					pmcfg |= (7 <<
719					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
720					break;
721				case DEVICEID_ATHEROS_AR8152_B2:
722				case DEVICEID_ATHEROS_AR8151_V2:
723					pmcfg |= (4 <<
724					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
725					break;
726				default:
727					pmcfg |= (15 <<
728					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
729					break;
730				}
731			}
732		} else {
733			pmcfg |= PM_CFG_SERDES_L1_ENB |
734			    PM_CFG_SERDES_PLL_L1_ENB |
735			    PM_CFG_SERDES_BUDS_RX_L1_ENB;
736			pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
737			    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
738		}
739	} else {
740		pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
741		    PM_CFG_SERDES_PLL_L1_ENB);
742		pmcfg |= PM_CFG_CLK_SWH_L1;
743		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
744			pmcfg |= PM_CFG_ASPM_L1_ENB;
745	}
746	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
747}
748
749static int
750alc_attach(device_t dev)
751{
752	struct alc_softc *sc;
753	struct ifnet *ifp;
754	char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
755	uint16_t burst;
756	int base, error, i, msic, msixc, state;
757	uint32_t cap, ctl, val;
758
759	error = 0;
760	sc = device_get_softc(dev);
761	sc->alc_dev = dev;
762
763	mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
764	    MTX_DEF);
765	callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0);
766	TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc);
767	sc->alc_ident = alc_find_ident(dev);
768
769	/* Map the device. */
770	pci_enable_busmaster(dev);
771	sc->alc_res_spec = alc_res_spec_mem;
772	sc->alc_irq_spec = alc_irq_spec_legacy;
773	error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res);
774	if (error != 0) {
775		device_printf(dev, "cannot allocate memory resources.\n");
776		goto fail;
777	}
778
779	/* Set PHY address. */
780	sc->alc_phyaddr = ALC_PHY_ADDR;
781
782	/* Initialize DMA parameters. */
783	sc->alc_dma_rd_burst = 0;
784	sc->alc_dma_wr_burst = 0;
785	sc->alc_rcb = DMA_CFG_RCB_64;
786	if (pci_find_extcap(dev, PCIY_EXPRESS, &base) == 0) {
787		sc->alc_flags |= ALC_FLAG_PCIE;
788		sc->alc_expcap = base;
789		burst = CSR_READ_2(sc, base + PCIR_EXPRESS_DEVICE_CTL);
790		sc->alc_dma_rd_burst =
791		    (burst & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12;
792		sc->alc_dma_wr_burst = (burst & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5;
793		if (bootverbose) {
794			device_printf(dev, "Read request size : %u bytes.\n",
795			    alc_dma_burst[sc->alc_dma_rd_burst]);
796			device_printf(dev, "TLP payload size : %u bytes.\n",
797			    alc_dma_burst[sc->alc_dma_wr_burst]);
798		}
799		if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
800			sc->alc_dma_rd_burst = 3;
801		if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
802			sc->alc_dma_wr_burst = 3;
803		/* Clear data link and flow-control protocol error. */
804		val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
805		val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
806		CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
807		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
808		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
809		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
810		    CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
811		    PCIE_PHYMISC_FORCE_RCV_DET);
812		if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B &&
813		    sc->alc_rev == ATHEROS_AR8152_B_V10) {
814			val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
815			val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
816			    PCIE_PHYMISC2_SERDES_TH_MASK);
817			val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
818			val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
819			CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
820		}
821		/* Disable ASPM L0S and L1. */
822		cap = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CAP);
823		if ((cap & PCIM_LINK_CAP_ASPM) != 0) {
824			ctl = CSR_READ_2(sc, base + PCIR_EXPRESS_LINK_CTL);
825			if ((ctl & 0x08) != 0)
826				sc->alc_rcb = DMA_CFG_RCB_128;
827			if (bootverbose)
828				device_printf(dev, "RCB %u bytes\n",
829				    sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
830			state = ctl & 0x03;
831			if (state & 0x01)
832				sc->alc_flags |= ALC_FLAG_L0S;
833			if (state & 0x02)
834				sc->alc_flags |= ALC_FLAG_L1S;
835			if (bootverbose)
836				device_printf(sc->alc_dev, "ASPM %s %s\n",
837				    aspm_state[state],
838				    state == 0 ? "disabled" : "enabled");
839			alc_disable_l0s_l1(sc);
840		} else {
841			if (bootverbose)
842				device_printf(sc->alc_dev,
843				    "no ASPM support\n");
844		}
845	}
846
847	/* Reset PHY. */
848	alc_phy_reset(sc);
849
850	/* Reset the ethernet controller. */
851	alc_reset(sc);
852
853	/*
854	 * One odd thing is AR8132 uses the same PHY hardware(F1
855	 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
856	 * the PHY supports 1000Mbps but that's not true. The PHY
857	 * used in AR8132 can't establish gigabit link even if it
858	 * shows the same PHY model/revision number of AR8131.
859	 */
860	switch (sc->alc_ident->deviceid) {
861	case DEVICEID_ATHEROS_AR8152_B:
862	case DEVICEID_ATHEROS_AR8152_B2:
863		sc->alc_flags |= ALC_FLAG_APS;
864		/* FALLTHROUGH */
865	case DEVICEID_ATHEROS_AR8132:
866		sc->alc_flags |= ALC_FLAG_FASTETHER;
867		break;
868	case DEVICEID_ATHEROS_AR8151:
869	case DEVICEID_ATHEROS_AR8151_V2:
870		sc->alc_flags |= ALC_FLAG_APS;
871		/* FALLTHROUGH */
872	default:
873		break;
874	}
875	sc->alc_flags |= ALC_FLAG_ASPM_MON | ALC_FLAG_JUMBO;
876
877	/*
878	 * It seems that AR813x/AR815x has silicon bug for SMB. In
879	 * addition, Atheros said that enabling SMB wouldn't improve
880	 * performance. However I think it's bad to access lots of
881	 * registers to extract MAC statistics.
882	 */
883	sc->alc_flags |= ALC_FLAG_SMB_BUG;
884	/*
885	 * Don't use Tx CMB. It is known to have silicon bug.
886	 */
887	sc->alc_flags |= ALC_FLAG_CMB_BUG;
888	sc->alc_rev = pci_get_revid(dev);
889	sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
890	    MASTER_CHIP_REV_SHIFT;
891	if (bootverbose) {
892		device_printf(dev, "PCI device revision : 0x%04x\n",
893		    sc->alc_rev);
894		device_printf(dev, "Chip id/revision : 0x%04x\n",
895		    sc->alc_chip_rev);
896	}
897	device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n",
898	    CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
899	    CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
900
901	/* Allocate IRQ resources. */
902	msixc = pci_msix_count(dev);
903	msic = pci_msi_count(dev);
904	if (bootverbose) {
905		device_printf(dev, "MSIX count : %d\n", msixc);
906		device_printf(dev, "MSI count : %d\n", msic);
907	}
908	/* Prefer MSIX over MSI. */
909	if (msix_disable == 0 || msi_disable == 0) {
910		if (msix_disable == 0 && msixc == ALC_MSIX_MESSAGES &&
911		    pci_alloc_msix(dev, &msixc) == 0) {
912			if (msic == ALC_MSIX_MESSAGES) {
913				device_printf(dev,
914				    "Using %d MSIX message(s).\n", msixc);
915				sc->alc_flags |= ALC_FLAG_MSIX;
916				sc->alc_irq_spec = alc_irq_spec_msix;
917			} else
918				pci_release_msi(dev);
919		}
920		if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 &&
921		    msic == ALC_MSI_MESSAGES &&
922		    pci_alloc_msi(dev, &msic) == 0) {
923			if (msic == ALC_MSI_MESSAGES) {
924				device_printf(dev,
925				    "Using %d MSI message(s).\n", msic);
926				sc->alc_flags |= ALC_FLAG_MSI;
927				sc->alc_irq_spec = alc_irq_spec_msi;
928			} else
929				pci_release_msi(dev);
930		}
931	}
932
933	error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq);
934	if (error != 0) {
935		device_printf(dev, "cannot allocate IRQ resources.\n");
936		goto fail;
937	}
938
939	/* Create device sysctl node. */
940	alc_sysctl_node(sc);
941
942	if ((error = alc_dma_alloc(sc) != 0))
943		goto fail;
944
945	/* Load station address. */
946	alc_get_macaddr(sc);
947
948	ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
949	if (ifp == NULL) {
950		device_printf(dev, "cannot allocate ifnet structure.\n");
951		error = ENXIO;
952		goto fail;
953	}
954
955	ifp->if_softc = sc;
956	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
957	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
958	ifp->if_ioctl = alc_ioctl;
959	ifp->if_start = alc_start;
960	ifp->if_init = alc_init;
961	ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1;
962	IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
963	IFQ_SET_READY(&ifp->if_snd);
964	ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4;
965	ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO;
966	if (pci_find_extcap(dev, PCIY_PMG, &base) == 0) {
967		ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
968		sc->alc_flags |= ALC_FLAG_PM;
969		sc->alc_pmcap = base;
970	}
971	ifp->if_capenable = ifp->if_capabilities;
972
973	/* Set up MII bus. */
974	error = mii_attach(dev, &sc->alc_miibus, ifp, alc_mediachange,
975	    alc_mediastatus, BMSR_DEFCAPMASK, sc->alc_phyaddr, MII_OFFSET_ANY,
976	    MIIF_DOPAUSE);
977	if (error != 0) {
978		device_printf(dev, "attaching PHYs failed\n");
979		goto fail;
980	}
981
982	ether_ifattach(ifp, sc->alc_eaddr);
983
984	/* VLAN capability setup. */
985	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
986	    IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
987	ifp->if_capenable = ifp->if_capabilities;
988	/*
989	 * XXX
990	 * It seems enabling Tx checksum offloading makes more trouble.
991	 * Sometimes the controller does not receive any frames when
992	 * Tx checksum offloading is enabled. I'm not sure whether this
993	 * is a bug in Tx checksum offloading logic or I got broken
994	 * sample boards. To safety, don't enable Tx checksum offloading
995	 * by default but give chance to users to toggle it if they know
996	 * their controllers work without problems.
997	 */
998	ifp->if_capenable &= ~IFCAP_TXCSUM;
999	ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
1000
1001	/* Tell the upper layer(s) we support long frames. */
1002	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1003
1004	/* Create local taskq. */
1005	sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
1006	    taskqueue_thread_enqueue, &sc->alc_tq);
1007	if (sc->alc_tq == NULL) {
1008		device_printf(dev, "could not create taskqueue.\n");
1009		ether_ifdetach(ifp);
1010		error = ENXIO;
1011		goto fail;
1012	}
1013	taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
1014	    device_get_nameunit(sc->alc_dev));
1015
1016	if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
1017		msic = ALC_MSIX_MESSAGES;
1018	else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1019		msic = ALC_MSI_MESSAGES;
1020	else
1021		msic = 1;
1022	for (i = 0; i < msic; i++) {
1023		error = bus_setup_intr(dev, sc->alc_irq[i],
1024		    INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc,
1025		    &sc->alc_intrhand[i]);
1026		if (error != 0)
1027			break;
1028	}
1029	if (error != 0) {
1030		device_printf(dev, "could not set up interrupt handler.\n");
1031		taskqueue_free(sc->alc_tq);
1032		sc->alc_tq = NULL;
1033		ether_ifdetach(ifp);
1034		goto fail;
1035	}
1036
1037fail:
1038	if (error != 0)
1039		alc_detach(dev);
1040
1041	return (error);
1042}
1043
1044static int
1045alc_detach(device_t dev)
1046{
1047	struct alc_softc *sc;
1048	struct ifnet *ifp;
1049	int i, msic;
1050
1051	sc = device_get_softc(dev);
1052
1053	ifp = sc->alc_ifp;
1054	if (device_is_attached(dev)) {
1055		ether_ifdetach(ifp);
1056		ALC_LOCK(sc);
1057		alc_stop(sc);
1058		ALC_UNLOCK(sc);
1059		callout_drain(&sc->alc_tick_ch);
1060		taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
1061	}
1062
1063	if (sc->alc_tq != NULL) {
1064		taskqueue_drain(sc->alc_tq, &sc->alc_int_task);
1065		taskqueue_free(sc->alc_tq);
1066		sc->alc_tq = NULL;
1067	}
1068
1069	if (sc->alc_miibus != NULL) {
1070		device_delete_child(dev, sc->alc_miibus);
1071		sc->alc_miibus = NULL;
1072	}
1073	bus_generic_detach(dev);
1074	alc_dma_free(sc);
1075
1076	if (ifp != NULL) {
1077		if_free(ifp);
1078		sc->alc_ifp = NULL;
1079	}
1080
1081	if ((sc->alc_flags & ALC_FLAG_MSIX) != 0)
1082		msic = ALC_MSIX_MESSAGES;
1083	else if ((sc->alc_flags & ALC_FLAG_MSI) != 0)
1084		msic = ALC_MSI_MESSAGES;
1085	else
1086		msic = 1;
1087	for (i = 0; i < msic; i++) {
1088		if (sc->alc_intrhand[i] != NULL) {
1089			bus_teardown_intr(dev, sc->alc_irq[i],
1090			    sc->alc_intrhand[i]);
1091			sc->alc_intrhand[i] = NULL;
1092		}
1093	}
1094	if (sc->alc_res[0] != NULL)
1095		alc_phy_down(sc);
1096	bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq);
1097	if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0)
1098		pci_release_msi(dev);
1099	bus_release_resources(dev, sc->alc_res_spec, sc->alc_res);
1100	mtx_destroy(&sc->alc_mtx);
1101
1102	return (0);
1103}
1104
1105#define	ALC_SYSCTL_STAT_ADD32(c, h, n, p, d)	\
1106	    SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
1107#define	ALC_SYSCTL_STAT_ADD64(c, h, n, p, d)	\
1108	    SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
1109
1110static void
1111alc_sysctl_node(struct alc_softc *sc)
1112{
1113	struct sysctl_ctx_list *ctx;
1114	struct sysctl_oid_list *child, *parent;
1115	struct sysctl_oid *tree;
1116	struct alc_hw_stats *stats;
1117	int error;
1118
1119	stats = &sc->alc_stats;
1120	ctx = device_get_sysctl_ctx(sc->alc_dev);
1121	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev));
1122
1123	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod",
1124	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0,
1125	    sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation");
1126	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod",
1127	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0,
1128	    sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation");
1129	/* Pull in device tunables. */
1130	sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1131	error = resource_int_value(device_get_name(sc->alc_dev),
1132	    device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod);
1133	if (error == 0) {
1134		if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN ||
1135		    sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) {
1136			device_printf(sc->alc_dev, "int_rx_mod value out of "
1137			    "range; using default: %d\n",
1138			    ALC_IM_RX_TIMER_DEFAULT);
1139			sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1140		}
1141	}
1142	sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1143	error = resource_int_value(device_get_name(sc->alc_dev),
1144	    device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod);
1145	if (error == 0) {
1146		if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN ||
1147		    sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) {
1148			device_printf(sc->alc_dev, "int_tx_mod value out of "
1149			    "range; using default: %d\n",
1150			    ALC_IM_TX_TIMER_DEFAULT);
1151			sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1152		}
1153	}
1154	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1155	    CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0,
1156	    sysctl_hw_alc_proc_limit, "I",
1157	    "max number of Rx events to process");
1158	/* Pull in device tunables. */
1159	sc->alc_process_limit = ALC_PROC_DEFAULT;
1160	error = resource_int_value(device_get_name(sc->alc_dev),
1161	    device_get_unit(sc->alc_dev), "process_limit",
1162	    &sc->alc_process_limit);
1163	if (error == 0) {
1164		if (sc->alc_process_limit < ALC_PROC_MIN ||
1165		    sc->alc_process_limit > ALC_PROC_MAX) {
1166			device_printf(sc->alc_dev,
1167			    "process_limit value out of range; "
1168			    "using default: %d\n", ALC_PROC_DEFAULT);
1169			sc->alc_process_limit = ALC_PROC_DEFAULT;
1170		}
1171	}
1172
1173	tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1174	    NULL, "ALC statistics");
1175	parent = SYSCTL_CHILDREN(tree);
1176
1177	/* Rx statistics. */
1178	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1179	    NULL, "Rx MAC statistics");
1180	child = SYSCTL_CHILDREN(tree);
1181	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1182	    &stats->rx_frames, "Good frames");
1183	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1184	    &stats->rx_bcast_frames, "Good broadcast frames");
1185	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1186	    &stats->rx_mcast_frames, "Good multicast frames");
1187	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1188	    &stats->rx_pause_frames, "Pause control frames");
1189	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1190	    &stats->rx_control_frames, "Control frames");
1191	ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1192	    &stats->rx_crcerrs, "CRC errors");
1193	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1194	    &stats->rx_lenerrs, "Frames with length mismatched");
1195	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1196	    &stats->rx_bytes, "Good octets");
1197	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1198	    &stats->rx_bcast_bytes, "Good broadcast octets");
1199	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1200	    &stats->rx_mcast_bytes, "Good multicast octets");
1201	ALC_SYSCTL_STAT_ADD32(ctx, child, "runts",
1202	    &stats->rx_runts, "Too short frames");
1203	ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments",
1204	    &stats->rx_fragments, "Fragmented frames");
1205	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1206	    &stats->rx_pkts_64, "64 bytes frames");
1207	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1208	    &stats->rx_pkts_65_127, "65 to 127 bytes frames");
1209	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1210	    &stats->rx_pkts_128_255, "128 to 255 bytes frames");
1211	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1212	    &stats->rx_pkts_256_511, "256 to 511 bytes frames");
1213	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1214	    &stats->rx_pkts_512_1023, "512 to 1023 bytes frames");
1215	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1216	    &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames");
1217	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1218	    &stats->rx_pkts_1519_max, "1519 to max frames");
1219	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1220	    &stats->rx_pkts_truncated, "Truncated frames due to MTU size");
1221	ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1222	    &stats->rx_fifo_oflows, "FIFO overflows");
1223	ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs",
1224	    &stats->rx_rrs_errs, "Return status write-back errors");
1225	ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs",
1226	    &stats->rx_alignerrs, "Alignment errors");
1227	ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered",
1228	    &stats->rx_pkts_filtered,
1229	    "Frames dropped due to address filtering");
1230
1231	/* Tx statistics. */
1232	tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1233	    NULL, "Tx MAC statistics");
1234	child = SYSCTL_CHILDREN(tree);
1235	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1236	    &stats->tx_frames, "Good frames");
1237	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
1238	    &stats->tx_bcast_frames, "Good broadcast frames");
1239	ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
1240	    &stats->tx_mcast_frames, "Good multicast frames");
1241	ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames",
1242	    &stats->tx_pause_frames, "Pause control frames");
1243	ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames",
1244	    &stats->tx_control_frames, "Control frames");
1245	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
1246	    &stats->tx_excess_defer, "Frames with excessive derferrals");
1247	ALC_SYSCTL_STAT_ADD32(ctx, child, "defers",
1248	    &stats->tx_excess_defer, "Frames with derferrals");
1249	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
1250	    &stats->tx_bytes, "Good octets");
1251	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets",
1252	    &stats->tx_bcast_bytes, "Good broadcast octets");
1253	ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets",
1254	    &stats->tx_mcast_bytes, "Good multicast octets");
1255	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64",
1256	    &stats->tx_pkts_64, "64 bytes frames");
1257	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127",
1258	    &stats->tx_pkts_65_127, "65 to 127 bytes frames");
1259	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255",
1260	    &stats->tx_pkts_128_255, "128 to 255 bytes frames");
1261	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511",
1262	    &stats->tx_pkts_256_511, "256 to 511 bytes frames");
1263	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023",
1264	    &stats->tx_pkts_512_1023, "512 to 1023 bytes frames");
1265	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518",
1266	    &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames");
1267	ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max",
1268	    &stats->tx_pkts_1519_max, "1519 to max frames");
1269	ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
1270	    &stats->tx_single_colls, "Single collisions");
1271	ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
1272	    &stats->tx_multi_colls, "Multiple collisions");
1273	ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
1274	    &stats->tx_late_colls, "Late collisions");
1275	ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls",
1276	    &stats->tx_excess_colls, "Excessive collisions");
1277	ALC_SYSCTL_STAT_ADD32(ctx, child, "abort",
1278	    &stats->tx_abort, "Aborted frames due to Excessive collisions");
1279	ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns",
1280	    &stats->tx_underrun, "FIFO underruns");
1281	ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns",
1282	    &stats->tx_desc_underrun, "Descriptor write-back errors");
1283	ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs",
1284	    &stats->tx_lenerrs, "Frames with length mismatched");
1285	ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs",
1286	    &stats->tx_pkts_truncated, "Truncated frames due to MTU size");
1287}
1288
1289#undef ALC_SYSCTL_STAT_ADD32
1290#undef ALC_SYSCTL_STAT_ADD64
1291
1292struct alc_dmamap_arg {
1293	bus_addr_t	alc_busaddr;
1294};
1295
1296static void
1297alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1298{
1299	struct alc_dmamap_arg *ctx;
1300
1301	if (error != 0)
1302		return;
1303
1304	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1305
1306	ctx = (struct alc_dmamap_arg *)arg;
1307	ctx->alc_busaddr = segs[0].ds_addr;
1308}
1309
1310/*
1311 * Normal and high Tx descriptors shares single Tx high address.
1312 * Four Rx descriptor/return rings and CMB shares the same Rx
1313 * high address.
1314 */
1315static int
1316alc_check_boundary(struct alc_softc *sc)
1317{
1318	bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end;
1319
1320	rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ;
1321	rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ;
1322	cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ;
1323	tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ;
1324
1325	/* 4GB boundary crossing is not allowed. */
1326	if ((ALC_ADDR_HI(rx_ring_end) !=
1327	    ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) ||
1328	    (ALC_ADDR_HI(rr_ring_end) !=
1329	    ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) ||
1330	    (ALC_ADDR_HI(cmb_end) !=
1331	    ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) ||
1332	    (ALC_ADDR_HI(tx_ring_end) !=
1333	    ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr)))
1334		return (EFBIG);
1335	/*
1336	 * Make sure Rx return descriptor/Rx descriptor/CMB use
1337	 * the same high address.
1338	 */
1339	if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) ||
1340	    (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end)))
1341		return (EFBIG);
1342
1343	return (0);
1344}
1345
1346static int
1347alc_dma_alloc(struct alc_softc *sc)
1348{
1349	struct alc_txdesc *txd;
1350	struct alc_rxdesc *rxd;
1351	bus_addr_t lowaddr;
1352	struct alc_dmamap_arg ctx;
1353	int error, i;
1354
1355	lowaddr = BUS_SPACE_MAXADDR;
1356again:
1357	/* Create parent DMA tag. */
1358	error = bus_dma_tag_create(
1359	    bus_get_dma_tag(sc->alc_dev), /* parent */
1360	    1, 0,			/* alignment, boundary */
1361	    lowaddr,			/* lowaddr */
1362	    BUS_SPACE_MAXADDR,		/* highaddr */
1363	    NULL, NULL,			/* filter, filterarg */
1364	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1365	    0,				/* nsegments */
1366	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1367	    0,				/* flags */
1368	    NULL, NULL,			/* lockfunc, lockarg */
1369	    &sc->alc_cdata.alc_parent_tag);
1370	if (error != 0) {
1371		device_printf(sc->alc_dev,
1372		    "could not create parent DMA tag.\n");
1373		goto fail;
1374	}
1375
1376	/* Create DMA tag for Tx descriptor ring. */
1377	error = bus_dma_tag_create(
1378	    sc->alc_cdata.alc_parent_tag, /* parent */
1379	    ALC_TX_RING_ALIGN, 0,	/* alignment, boundary */
1380	    BUS_SPACE_MAXADDR,		/* lowaddr */
1381	    BUS_SPACE_MAXADDR,		/* highaddr */
1382	    NULL, NULL,			/* filter, filterarg */
1383	    ALC_TX_RING_SZ,		/* maxsize */
1384	    1,				/* nsegments */
1385	    ALC_TX_RING_SZ,		/* maxsegsize */
1386	    0,				/* flags */
1387	    NULL, NULL,			/* lockfunc, lockarg */
1388	    &sc->alc_cdata.alc_tx_ring_tag);
1389	if (error != 0) {
1390		device_printf(sc->alc_dev,
1391		    "could not create Tx ring DMA tag.\n");
1392		goto fail;
1393	}
1394
1395	/* Create DMA tag for Rx free descriptor ring. */
1396	error = bus_dma_tag_create(
1397	    sc->alc_cdata.alc_parent_tag, /* parent */
1398	    ALC_RX_RING_ALIGN, 0,	/* alignment, boundary */
1399	    BUS_SPACE_MAXADDR,		/* lowaddr */
1400	    BUS_SPACE_MAXADDR,		/* highaddr */
1401	    NULL, NULL,			/* filter, filterarg */
1402	    ALC_RX_RING_SZ,		/* maxsize */
1403	    1,				/* nsegments */
1404	    ALC_RX_RING_SZ,		/* maxsegsize */
1405	    0,				/* flags */
1406	    NULL, NULL,			/* lockfunc, lockarg */
1407	    &sc->alc_cdata.alc_rx_ring_tag);
1408	if (error != 0) {
1409		device_printf(sc->alc_dev,
1410		    "could not create Rx ring DMA tag.\n");
1411		goto fail;
1412	}
1413	/* Create DMA tag for Rx return descriptor ring. */
1414	error = bus_dma_tag_create(
1415	    sc->alc_cdata.alc_parent_tag, /* parent */
1416	    ALC_RR_RING_ALIGN, 0,	/* alignment, boundary */
1417	    BUS_SPACE_MAXADDR,		/* lowaddr */
1418	    BUS_SPACE_MAXADDR,		/* highaddr */
1419	    NULL, NULL,			/* filter, filterarg */
1420	    ALC_RR_RING_SZ,		/* maxsize */
1421	    1,				/* nsegments */
1422	    ALC_RR_RING_SZ,		/* maxsegsize */
1423	    0,				/* flags */
1424	    NULL, NULL,			/* lockfunc, lockarg */
1425	    &sc->alc_cdata.alc_rr_ring_tag);
1426	if (error != 0) {
1427		device_printf(sc->alc_dev,
1428		    "could not create Rx return ring DMA tag.\n");
1429		goto fail;
1430	}
1431
1432	/* Create DMA tag for coalescing message block. */
1433	error = bus_dma_tag_create(
1434	    sc->alc_cdata.alc_parent_tag, /* parent */
1435	    ALC_CMB_ALIGN, 0,		/* alignment, boundary */
1436	    BUS_SPACE_MAXADDR,		/* lowaddr */
1437	    BUS_SPACE_MAXADDR,		/* highaddr */
1438	    NULL, NULL,			/* filter, filterarg */
1439	    ALC_CMB_SZ,			/* maxsize */
1440	    1,				/* nsegments */
1441	    ALC_CMB_SZ,			/* maxsegsize */
1442	    0,				/* flags */
1443	    NULL, NULL,			/* lockfunc, lockarg */
1444	    &sc->alc_cdata.alc_cmb_tag);
1445	if (error != 0) {
1446		device_printf(sc->alc_dev,
1447		    "could not create CMB DMA tag.\n");
1448		goto fail;
1449	}
1450	/* Create DMA tag for status message block. */
1451	error = bus_dma_tag_create(
1452	    sc->alc_cdata.alc_parent_tag, /* parent */
1453	    ALC_SMB_ALIGN, 0,		/* alignment, boundary */
1454	    BUS_SPACE_MAXADDR,		/* lowaddr */
1455	    BUS_SPACE_MAXADDR,		/* highaddr */
1456	    NULL, NULL,			/* filter, filterarg */
1457	    ALC_SMB_SZ,			/* maxsize */
1458	    1,				/* nsegments */
1459	    ALC_SMB_SZ,			/* maxsegsize */
1460	    0,				/* flags */
1461	    NULL, NULL,			/* lockfunc, lockarg */
1462	    &sc->alc_cdata.alc_smb_tag);
1463	if (error != 0) {
1464		device_printf(sc->alc_dev,
1465		    "could not create SMB DMA tag.\n");
1466		goto fail;
1467	}
1468
1469	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1470	error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag,
1471	    (void **)&sc->alc_rdata.alc_tx_ring,
1472	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1473	    &sc->alc_cdata.alc_tx_ring_map);
1474	if (error != 0) {
1475		device_printf(sc->alc_dev,
1476		    "could not allocate DMA'able memory for Tx ring.\n");
1477		goto fail;
1478	}
1479	ctx.alc_busaddr = 0;
1480	error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag,
1481	    sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring,
1482	    ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1483	if (error != 0 || ctx.alc_busaddr == 0) {
1484		device_printf(sc->alc_dev,
1485		    "could not load DMA'able memory for Tx ring.\n");
1486		goto fail;
1487	}
1488	sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr;
1489
1490	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1491	error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag,
1492	    (void **)&sc->alc_rdata.alc_rx_ring,
1493	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1494	    &sc->alc_cdata.alc_rx_ring_map);
1495	if (error != 0) {
1496		device_printf(sc->alc_dev,
1497		    "could not allocate DMA'able memory for Rx ring.\n");
1498		goto fail;
1499	}
1500	ctx.alc_busaddr = 0;
1501	error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag,
1502	    sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring,
1503	    ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0);
1504	if (error != 0 || ctx.alc_busaddr == 0) {
1505		device_printf(sc->alc_dev,
1506		    "could not load DMA'able memory for Rx ring.\n");
1507		goto fail;
1508	}
1509	sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr;
1510
1511	/* Allocate DMA'able memory and load the DMA map for Rx return ring. */
1512	error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag,
1513	    (void **)&sc->alc_rdata.alc_rr_ring,
1514	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1515	    &sc->alc_cdata.alc_rr_ring_map);
1516	if (error != 0) {
1517		device_printf(sc->alc_dev,
1518		    "could not allocate DMA'able memory for Rx return ring.\n");
1519		goto fail;
1520	}
1521	ctx.alc_busaddr = 0;
1522	error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag,
1523	    sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring,
1524	    ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0);
1525	if (error != 0 || ctx.alc_busaddr == 0) {
1526		device_printf(sc->alc_dev,
1527		    "could not load DMA'able memory for Tx ring.\n");
1528		goto fail;
1529	}
1530	sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr;
1531
1532	/* Allocate DMA'able memory and load the DMA map for CMB. */
1533	error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag,
1534	    (void **)&sc->alc_rdata.alc_cmb,
1535	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1536	    &sc->alc_cdata.alc_cmb_map);
1537	if (error != 0) {
1538		device_printf(sc->alc_dev,
1539		    "could not allocate DMA'able memory for CMB.\n");
1540		goto fail;
1541	}
1542	ctx.alc_busaddr = 0;
1543	error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag,
1544	    sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb,
1545	    ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0);
1546	if (error != 0 || ctx.alc_busaddr == 0) {
1547		device_printf(sc->alc_dev,
1548		    "could not load DMA'able memory for CMB.\n");
1549		goto fail;
1550	}
1551	sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr;
1552
1553	/* Allocate DMA'able memory and load the DMA map for SMB. */
1554	error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag,
1555	    (void **)&sc->alc_rdata.alc_smb,
1556	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1557	    &sc->alc_cdata.alc_smb_map);
1558	if (error != 0) {
1559		device_printf(sc->alc_dev,
1560		    "could not allocate DMA'able memory for SMB.\n");
1561		goto fail;
1562	}
1563	ctx.alc_busaddr = 0;
1564	error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag,
1565	    sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb,
1566	    ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0);
1567	if (error != 0 || ctx.alc_busaddr == 0) {
1568		device_printf(sc->alc_dev,
1569		    "could not load DMA'able memory for CMB.\n");
1570		goto fail;
1571	}
1572	sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr;
1573
1574	/* Make sure we've not crossed 4GB boundary. */
1575	if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1576	    (error = alc_check_boundary(sc)) != 0) {
1577		device_printf(sc->alc_dev, "4GB boundary crossed, "
1578		    "switching to 32bit DMA addressing mode.\n");
1579		alc_dma_free(sc);
1580		/*
1581		 * Limit max allowable DMA address space to 32bit
1582		 * and try again.
1583		 */
1584		lowaddr = BUS_SPACE_MAXADDR_32BIT;
1585		goto again;
1586	}
1587
1588	/*
1589	 * Create Tx buffer parent tag.
1590	 * AR813x/AR815x allows 64bit DMA addressing of Tx/Rx buffers
1591	 * so it needs separate parent DMA tag as parent DMA address
1592	 * space could be restricted to be within 32bit address space
1593	 * by 4GB boundary crossing.
1594	 */
1595	error = bus_dma_tag_create(
1596	    bus_get_dma_tag(sc->alc_dev), /* parent */
1597	    1, 0,			/* alignment, boundary */
1598	    BUS_SPACE_MAXADDR,		/* lowaddr */
1599	    BUS_SPACE_MAXADDR,		/* highaddr */
1600	    NULL, NULL,			/* filter, filterarg */
1601	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
1602	    0,				/* nsegments */
1603	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
1604	    0,				/* flags */
1605	    NULL, NULL,			/* lockfunc, lockarg */
1606	    &sc->alc_cdata.alc_buffer_tag);
1607	if (error != 0) {
1608		device_printf(sc->alc_dev,
1609		    "could not create parent buffer DMA tag.\n");
1610		goto fail;
1611	}
1612
1613	/* Create DMA tag for Tx buffers. */
1614	error = bus_dma_tag_create(
1615	    sc->alc_cdata.alc_buffer_tag, /* parent */
1616	    1, 0,			/* alignment, boundary */
1617	    BUS_SPACE_MAXADDR,		/* lowaddr */
1618	    BUS_SPACE_MAXADDR,		/* highaddr */
1619	    NULL, NULL,			/* filter, filterarg */
1620	    ALC_TSO_MAXSIZE,		/* maxsize */
1621	    ALC_MAXTXSEGS,		/* nsegments */
1622	    ALC_TSO_MAXSEGSIZE,		/* maxsegsize */
1623	    0,				/* flags */
1624	    NULL, NULL,			/* lockfunc, lockarg */
1625	    &sc->alc_cdata.alc_tx_tag);
1626	if (error != 0) {
1627		device_printf(sc->alc_dev, "could not create Tx DMA tag.\n");
1628		goto fail;
1629	}
1630
1631	/* Create DMA tag for Rx buffers. */
1632	error = bus_dma_tag_create(
1633	    sc->alc_cdata.alc_buffer_tag, /* parent */
1634	    ALC_RX_BUF_ALIGN, 0,	/* alignment, boundary */
1635	    BUS_SPACE_MAXADDR,		/* lowaddr */
1636	    BUS_SPACE_MAXADDR,		/* highaddr */
1637	    NULL, NULL,			/* filter, filterarg */
1638	    MCLBYTES,			/* maxsize */
1639	    1,				/* nsegments */
1640	    MCLBYTES,			/* maxsegsize */
1641	    0,				/* flags */
1642	    NULL, NULL,			/* lockfunc, lockarg */
1643	    &sc->alc_cdata.alc_rx_tag);
1644	if (error != 0) {
1645		device_printf(sc->alc_dev, "could not create Rx DMA tag.\n");
1646		goto fail;
1647	}
1648	/* Create DMA maps for Tx buffers. */
1649	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1650		txd = &sc->alc_cdata.alc_txdesc[i];
1651		txd->tx_m = NULL;
1652		txd->tx_dmamap = NULL;
1653		error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0,
1654		    &txd->tx_dmamap);
1655		if (error != 0) {
1656			device_printf(sc->alc_dev,
1657			    "could not create Tx dmamap.\n");
1658			goto fail;
1659		}
1660	}
1661	/* Create DMA maps for Rx buffers. */
1662	if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1663	    &sc->alc_cdata.alc_rx_sparemap)) != 0) {
1664		device_printf(sc->alc_dev,
1665		    "could not create spare Rx dmamap.\n");
1666		goto fail;
1667	}
1668	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1669		rxd = &sc->alc_cdata.alc_rxdesc[i];
1670		rxd->rx_m = NULL;
1671		rxd->rx_dmamap = NULL;
1672		error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0,
1673		    &rxd->rx_dmamap);
1674		if (error != 0) {
1675			device_printf(sc->alc_dev,
1676			    "could not create Rx dmamap.\n");
1677			goto fail;
1678		}
1679	}
1680
1681fail:
1682	return (error);
1683}
1684
1685static void
1686alc_dma_free(struct alc_softc *sc)
1687{
1688	struct alc_txdesc *txd;
1689	struct alc_rxdesc *rxd;
1690	int i;
1691
1692	/* Tx buffers. */
1693	if (sc->alc_cdata.alc_tx_tag != NULL) {
1694		for (i = 0; i < ALC_TX_RING_CNT; i++) {
1695			txd = &sc->alc_cdata.alc_txdesc[i];
1696			if (txd->tx_dmamap != NULL) {
1697				bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag,
1698				    txd->tx_dmamap);
1699				txd->tx_dmamap = NULL;
1700			}
1701		}
1702		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag);
1703		sc->alc_cdata.alc_tx_tag = NULL;
1704	}
1705	/* Rx buffers */
1706	if (sc->alc_cdata.alc_rx_tag != NULL) {
1707		for (i = 0; i < ALC_RX_RING_CNT; i++) {
1708			rxd = &sc->alc_cdata.alc_rxdesc[i];
1709			if (rxd->rx_dmamap != NULL) {
1710				bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1711				    rxd->rx_dmamap);
1712				rxd->rx_dmamap = NULL;
1713			}
1714		}
1715		if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1716			bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag,
1717			    sc->alc_cdata.alc_rx_sparemap);
1718			sc->alc_cdata.alc_rx_sparemap = NULL;
1719		}
1720		bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag);
1721		sc->alc_cdata.alc_rx_tag = NULL;
1722	}
1723	/* Tx descriptor ring. */
1724	if (sc->alc_cdata.alc_tx_ring_tag != NULL) {
1725		if (sc->alc_cdata.alc_tx_ring_map != NULL)
1726			bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag,
1727			    sc->alc_cdata.alc_tx_ring_map);
1728		if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1729		    sc->alc_rdata.alc_tx_ring != NULL)
1730			bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag,
1731			    sc->alc_rdata.alc_tx_ring,
1732			    sc->alc_cdata.alc_tx_ring_map);
1733		sc->alc_rdata.alc_tx_ring = NULL;
1734		sc->alc_cdata.alc_tx_ring_map = NULL;
1735		bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag);
1736		sc->alc_cdata.alc_tx_ring_tag = NULL;
1737	}
1738	/* Rx ring. */
1739	if (sc->alc_cdata.alc_rx_ring_tag != NULL) {
1740		if (sc->alc_cdata.alc_rx_ring_map != NULL)
1741			bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag,
1742			    sc->alc_cdata.alc_rx_ring_map);
1743		if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1744		    sc->alc_rdata.alc_rx_ring != NULL)
1745			bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag,
1746			    sc->alc_rdata.alc_rx_ring,
1747			    sc->alc_cdata.alc_rx_ring_map);
1748		sc->alc_rdata.alc_rx_ring = NULL;
1749		sc->alc_cdata.alc_rx_ring_map = NULL;
1750		bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag);
1751		sc->alc_cdata.alc_rx_ring_tag = NULL;
1752	}
1753	/* Rx return ring. */
1754	if (sc->alc_cdata.alc_rr_ring_tag != NULL) {
1755		if (sc->alc_cdata.alc_rr_ring_map != NULL)
1756			bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag,
1757			    sc->alc_cdata.alc_rr_ring_map);
1758		if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1759		    sc->alc_rdata.alc_rr_ring != NULL)
1760			bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag,
1761			    sc->alc_rdata.alc_rr_ring,
1762			    sc->alc_cdata.alc_rr_ring_map);
1763		sc->alc_rdata.alc_rr_ring = NULL;
1764		sc->alc_cdata.alc_rr_ring_map = NULL;
1765		bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag);
1766		sc->alc_cdata.alc_rr_ring_tag = NULL;
1767	}
1768	/* CMB block */
1769	if (sc->alc_cdata.alc_cmb_tag != NULL) {
1770		if (sc->alc_cdata.alc_cmb_map != NULL)
1771			bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag,
1772			    sc->alc_cdata.alc_cmb_map);
1773		if (sc->alc_cdata.alc_cmb_map != NULL &&
1774		    sc->alc_rdata.alc_cmb != NULL)
1775			bus_dmamem_free(sc->alc_cdata.alc_cmb_tag,
1776			    sc->alc_rdata.alc_cmb,
1777			    sc->alc_cdata.alc_cmb_map);
1778		sc->alc_rdata.alc_cmb = NULL;
1779		sc->alc_cdata.alc_cmb_map = NULL;
1780		bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag);
1781		sc->alc_cdata.alc_cmb_tag = NULL;
1782	}
1783	/* SMB block */
1784	if (sc->alc_cdata.alc_smb_tag != NULL) {
1785		if (sc->alc_cdata.alc_smb_map != NULL)
1786			bus_dmamap_unload(sc->alc_cdata.alc_smb_tag,
1787			    sc->alc_cdata.alc_smb_map);
1788		if (sc->alc_cdata.alc_smb_map != NULL &&
1789		    sc->alc_rdata.alc_smb != NULL)
1790			bus_dmamem_free(sc->alc_cdata.alc_smb_tag,
1791			    sc->alc_rdata.alc_smb,
1792			    sc->alc_cdata.alc_smb_map);
1793		sc->alc_rdata.alc_smb = NULL;
1794		sc->alc_cdata.alc_smb_map = NULL;
1795		bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag);
1796		sc->alc_cdata.alc_smb_tag = NULL;
1797	}
1798	if (sc->alc_cdata.alc_buffer_tag != NULL) {
1799		bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag);
1800		sc->alc_cdata.alc_buffer_tag = NULL;
1801	}
1802	if (sc->alc_cdata.alc_parent_tag != NULL) {
1803		bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag);
1804		sc->alc_cdata.alc_parent_tag = NULL;
1805	}
1806}
1807
1808static int
1809alc_shutdown(device_t dev)
1810{
1811
1812	return (alc_suspend(dev));
1813}
1814
1815/*
1816 * Note, this driver resets the link speed to 10/100Mbps by
1817 * restarting auto-negotiation in suspend/shutdown phase but we
1818 * don't know whether that auto-negotiation would succeed or not
1819 * as driver has no control after powering off/suspend operation.
1820 * If the renegotiation fail WOL may not work. Running at 1Gbps
1821 * will draw more power than 375mA at 3.3V which is specified in
1822 * PCI specification and that would result in complete
1823 * shutdowning power to ethernet controller.
1824 *
1825 * TODO
1826 * Save current negotiated media speed/duplex/flow-control to
1827 * softc and restore the same link again after resuming. PHY
1828 * handling such as power down/resetting to 100Mbps may be better
1829 * handled in suspend method in phy driver.
1830 */
1831static void
1832alc_setlinkspeed(struct alc_softc *sc)
1833{
1834	struct mii_data *mii;
1835	int aneg, i;
1836
1837	mii = device_get_softc(sc->alc_miibus);
1838	mii_pollstat(mii);
1839	aneg = 0;
1840	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
1841	    (IFM_ACTIVE | IFM_AVALID)) {
1842		switch IFM_SUBTYPE(mii->mii_media_active) {
1843		case IFM_10_T:
1844		case IFM_100_TX:
1845			return;
1846		case IFM_1000_T:
1847			aneg++;
1848			break;
1849		default:
1850			break;
1851		}
1852	}
1853	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0);
1854	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1855	    MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1856	alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr,
1857	    MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1858	DELAY(1000);
1859	if (aneg != 0) {
1860		/*
1861		 * Poll link state until alc(4) get a 10/100Mbps link.
1862		 */
1863		for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1864			mii_pollstat(mii);
1865			if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
1866			    == (IFM_ACTIVE | IFM_AVALID)) {
1867				switch (IFM_SUBTYPE(
1868				    mii->mii_media_active)) {
1869				case IFM_10_T:
1870				case IFM_100_TX:
1871					alc_mac_config(sc);
1872					return;
1873				default:
1874					break;
1875				}
1876			}
1877			ALC_UNLOCK(sc);
1878			pause("alclnk", hz);
1879			ALC_LOCK(sc);
1880		}
1881		if (i == MII_ANEGTICKS_GIGE)
1882			device_printf(sc->alc_dev,
1883			    "establishing a link failed, WOL may not work!");
1884	}
1885	/*
1886	 * No link, force MAC to have 100Mbps, full-duplex link.
1887	 * This is the last resort and may/may not work.
1888	 */
1889	mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1890	mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1891	alc_mac_config(sc);
1892}
1893
1894static void
1895alc_setwol(struct alc_softc *sc)
1896{
1897	struct ifnet *ifp;
1898	uint32_t reg, pmcs;
1899	uint16_t pmstat;
1900
1901	ALC_LOCK_ASSERT(sc);
1902
1903	alc_disable_l0s_l1(sc);
1904	ifp = sc->alc_ifp;
1905	if ((sc->alc_flags & ALC_FLAG_PM) == 0) {
1906		/* Disable WOL. */
1907		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1908		reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1909		reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1910		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1911		/* Force PHY power down. */
1912		alc_phy_down(sc);
1913		CSR_WRITE_4(sc, ALC_MASTER_CFG,
1914		    CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
1915		return;
1916	}
1917
1918	if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1919		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
1920			alc_setlinkspeed(sc);
1921		CSR_WRITE_4(sc, ALC_MASTER_CFG,
1922		    CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS);
1923	}
1924
1925	pmcs = 0;
1926	if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1927		pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1928	CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs);
1929	reg = CSR_READ_4(sc, ALC_MAC_CFG);
1930	reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI |
1931	    MAC_CFG_BCAST);
1932	if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1933		reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1934	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1935		reg |= MAC_CFG_RX_ENB;
1936	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1937
1938	reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC);
1939	reg |= PCIE_PHYMISC_FORCE_RCV_DET;
1940	CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg);
1941	if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1942		/* WOL disabled, PHY power down. */
1943		alc_phy_down(sc);
1944		CSR_WRITE_4(sc, ALC_MASTER_CFG,
1945		    CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
1946	}
1947	/* Request PME. */
1948	pmstat = pci_read_config(sc->alc_dev,
1949	    sc->alc_pmcap + PCIR_POWER_STATUS, 2);
1950	pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1951	if ((ifp->if_capenable & IFCAP_WOL) != 0)
1952		pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1953	pci_write_config(sc->alc_dev,
1954	    sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
1955}
1956
1957static int
1958alc_suspend(device_t dev)
1959{
1960	struct alc_softc *sc;
1961
1962	sc = device_get_softc(dev);
1963
1964	ALC_LOCK(sc);
1965	alc_stop(sc);
1966	alc_setwol(sc);
1967	ALC_UNLOCK(sc);
1968
1969	return (0);
1970}
1971
1972static int
1973alc_resume(device_t dev)
1974{
1975	struct alc_softc *sc;
1976	struct ifnet *ifp;
1977	uint16_t pmstat;
1978
1979	sc = device_get_softc(dev);
1980
1981	ALC_LOCK(sc);
1982	if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
1983		/* Disable PME and clear PME status. */
1984		pmstat = pci_read_config(sc->alc_dev,
1985		    sc->alc_pmcap + PCIR_POWER_STATUS, 2);
1986		if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
1987			pmstat &= ~PCIM_PSTAT_PMEENABLE;
1988			pci_write_config(sc->alc_dev,
1989			    sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
1990		}
1991	}
1992	/* Reset PHY. */
1993	alc_phy_reset(sc);
1994	ifp = sc->alc_ifp;
1995	if ((ifp->if_flags & IFF_UP) != 0) {
1996		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1997		alc_init_locked(sc);
1998	}
1999	ALC_UNLOCK(sc);
2000
2001	return (0);
2002}
2003
2004static int
2005alc_encap(struct alc_softc *sc, struct mbuf **m_head)
2006{
2007	struct alc_txdesc *txd, *txd_last;
2008	struct tx_desc *desc;
2009	struct mbuf *m;
2010	struct ip *ip;
2011	struct tcphdr *tcp;
2012	bus_dma_segment_t txsegs[ALC_MAXTXSEGS];
2013	bus_dmamap_t map;
2014	uint32_t cflags, hdrlen, ip_off, poff, vtag;
2015	int error, idx, nsegs, prod;
2016
2017	ALC_LOCK_ASSERT(sc);
2018
2019	M_ASSERTPKTHDR((*m_head));
2020
2021	m = *m_head;
2022	ip = NULL;
2023	tcp = NULL;
2024	ip_off = poff = 0;
2025	if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) {
2026		/*
2027		 * AR813x/AR815x requires offset of TCP/UDP header in its
2028		 * Tx descriptor to perform Tx checksum offloading. TSO
2029		 * also requires TCP header offset and modification of
2030		 * IP/TCP header. This kind of operation takes many CPU
2031		 * cycles on FreeBSD so fast host CPU is required to get
2032		 * smooth TSO performance.
2033		 */
2034		struct ether_header *eh;
2035
2036		if (M_WRITABLE(m) == 0) {
2037			/* Get a writable copy. */
2038			m = m_dup(*m_head, M_DONTWAIT);
2039			/* Release original mbufs. */
2040			m_freem(*m_head);
2041			if (m == NULL) {
2042				*m_head = NULL;
2043				return (ENOBUFS);
2044			}
2045			*m_head = m;
2046		}
2047
2048		ip_off = sizeof(struct ether_header);
2049		m = m_pullup(m, ip_off);
2050		if (m == NULL) {
2051			*m_head = NULL;
2052			return (ENOBUFS);
2053		}
2054		eh = mtod(m, struct ether_header *);
2055		/*
2056		 * Check if hardware VLAN insertion is off.
2057		 * Additional check for LLC/SNAP frame?
2058		 */
2059		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2060			ip_off = sizeof(struct ether_vlan_header);
2061			m = m_pullup(m, ip_off);
2062			if (m == NULL) {
2063				*m_head = NULL;
2064				return (ENOBUFS);
2065			}
2066		}
2067		m = m_pullup(m, ip_off + sizeof(struct ip));
2068		if (m == NULL) {
2069			*m_head = NULL;
2070			return (ENOBUFS);
2071		}
2072		ip = (struct ip *)(mtod(m, char *) + ip_off);
2073		poff = ip_off + (ip->ip_hl << 2);
2074		if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2075			m = m_pullup(m, poff + sizeof(struct tcphdr));
2076			if (m == NULL) {
2077				*m_head = NULL;
2078				return (ENOBUFS);
2079			}
2080			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
2081			m = m_pullup(m, poff + (tcp->th_off << 2));
2082			if (m == NULL) {
2083				*m_head = NULL;
2084				return (ENOBUFS);
2085			}
2086			/*
2087			 * Due to strict adherence of Microsoft NDIS
2088			 * Large Send specification, hardware expects
2089			 * a pseudo TCP checksum inserted by upper
2090			 * stack. Unfortunately the pseudo TCP
2091			 * checksum that NDIS refers to does not include
2092			 * TCP payload length so driver should recompute
2093			 * the pseudo checksum here. Hopefully this
2094			 * wouldn't be much burden on modern CPUs.
2095			 *
2096			 * Reset IP checksum and recompute TCP pseudo
2097			 * checksum as NDIS specification said.
2098			 */
2099			ip = (struct ip *)(mtod(m, char *) + ip_off);
2100			tcp = (struct tcphdr *)(mtod(m, char *) + poff);
2101			ip->ip_sum = 0;
2102			tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
2103			    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2104		}
2105		*m_head = m;
2106	}
2107
2108	prod = sc->alc_cdata.alc_tx_prod;
2109	txd = &sc->alc_cdata.alc_txdesc[prod];
2110	txd_last = txd;
2111	map = txd->tx_dmamap;
2112
2113	error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
2114	    *m_head, txsegs, &nsegs, 0);
2115	if (error == EFBIG) {
2116		m = m_collapse(*m_head, M_DONTWAIT, ALC_MAXTXSEGS);
2117		if (m == NULL) {
2118			m_freem(*m_head);
2119			*m_head = NULL;
2120			return (ENOMEM);
2121		}
2122		*m_head = m;
2123		error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
2124		    *m_head, txsegs, &nsegs, 0);
2125		if (error != 0) {
2126			m_freem(*m_head);
2127			*m_head = NULL;
2128			return (error);
2129		}
2130	} else if (error != 0)
2131		return (error);
2132	if (nsegs == 0) {
2133		m_freem(*m_head);
2134		*m_head = NULL;
2135		return (EIO);
2136	}
2137
2138	/* Check descriptor overrun. */
2139	if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
2140		bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map);
2141		return (ENOBUFS);
2142	}
2143	bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE);
2144
2145	m = *m_head;
2146	cflags = TD_ETHERNET;
2147	vtag = 0;
2148	desc = NULL;
2149	idx = 0;
2150	/* Configure VLAN hardware tag insertion. */
2151	if ((m->m_flags & M_VLANTAG) != 0) {
2152		vtag = htons(m->m_pkthdr.ether_vtag);
2153		vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
2154		cflags |= TD_INS_VLAN_TAG;
2155	}
2156	if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2157		/* Request TSO and set MSS. */
2158		cflags |= TD_TSO | TD_TSO_DESCV1;
2159		cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) &
2160		    TD_MSS_MASK;
2161		/* Set TCP header offset. */
2162		cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) &
2163		    TD_TCPHDR_OFFSET_MASK;
2164		/*
2165		 * AR813x/AR815x requires the first buffer should
2166		 * only hold IP/TCP header data. Payload should
2167		 * be handled in other descriptors.
2168		 */
2169		hdrlen = poff + (tcp->th_off << 2);
2170		desc = &sc->alc_rdata.alc_tx_ring[prod];
2171		desc->len = htole32(TX_BYTES(hdrlen | vtag));
2172		desc->flags = htole32(cflags);
2173		desc->addr = htole64(txsegs[0].ds_addr);
2174		sc->alc_cdata.alc_tx_cnt++;
2175		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2176		if (m->m_len - hdrlen > 0) {
2177			/* Handle remaining payload of the first fragment. */
2178			desc = &sc->alc_rdata.alc_tx_ring[prod];
2179			desc->len = htole32(TX_BYTES((m->m_len - hdrlen) |
2180			    vtag));
2181			desc->flags = htole32(cflags);
2182			desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
2183			sc->alc_cdata.alc_tx_cnt++;
2184			ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2185		}
2186		/* Handle remaining fragments. */
2187		idx = 1;
2188	} else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
2189		/* Configure Tx checksum offload. */
2190#ifdef ALC_USE_CUSTOM_CSUM
2191		cflags |= TD_CUSTOM_CSUM;
2192		/* Set checksum start offset. */
2193		cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
2194		    TD_PLOAD_OFFSET_MASK;
2195		/* Set checksum insertion position of TCP/UDP. */
2196		cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) <<
2197		    TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK;
2198#else
2199		if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2200			cflags |= TD_IPCSUM;
2201		if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2202			cflags |= TD_TCPCSUM;
2203		if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2204			cflags |= TD_UDPCSUM;
2205		/* Set TCP/UDP header offset. */
2206		cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) &
2207		    TD_L4HDR_OFFSET_MASK;
2208#endif
2209	}
2210	for (; idx < nsegs; idx++) {
2211		desc = &sc->alc_rdata.alc_tx_ring[prod];
2212		desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag);
2213		desc->flags = htole32(cflags);
2214		desc->addr = htole64(txsegs[idx].ds_addr);
2215		sc->alc_cdata.alc_tx_cnt++;
2216		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
2217	}
2218	/* Update producer index. */
2219	sc->alc_cdata.alc_tx_prod = prod;
2220
2221	/* Finally set EOP on the last descriptor. */
2222	prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
2223	desc = &sc->alc_rdata.alc_tx_ring[prod];
2224	desc->flags |= htole32(TD_EOP);
2225
2226	/* Swap dmamap of the first and the last. */
2227	txd = &sc->alc_cdata.alc_txdesc[prod];
2228	map = txd_last->tx_dmamap;
2229	txd_last->tx_dmamap = txd->tx_dmamap;
2230	txd->tx_dmamap = map;
2231	txd->tx_m = m;
2232
2233	return (0);
2234}
2235
2236static void
2237alc_start(struct ifnet *ifp)
2238{
2239	struct alc_softc *sc;
2240
2241	sc = ifp->if_softc;
2242	ALC_LOCK(sc);
2243	alc_start_locked(ifp);
2244	ALC_UNLOCK(sc);
2245}
2246
2247static void
2248alc_start_locked(struct ifnet *ifp)
2249{
2250	struct alc_softc *sc;
2251	struct mbuf *m_head;
2252	int enq;
2253
2254	sc = ifp->if_softc;
2255
2256	ALC_LOCK_ASSERT(sc);
2257
2258	/* Reclaim transmitted frames. */
2259	if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2260		alc_txeof(sc);
2261
2262	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2263	    IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0)
2264		return;
2265
2266	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
2267		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2268		if (m_head == NULL)
2269			break;
2270		/*
2271		 * Pack the data into the transmit ring. If we
2272		 * don't have room, set the OACTIVE flag and wait
2273		 * for the NIC to drain the ring.
2274		 */
2275		if (alc_encap(sc, &m_head)) {
2276			if (m_head == NULL)
2277				break;
2278			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2279			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2280			break;
2281		}
2282
2283		enq++;
2284		/*
2285		 * If there's a BPF listener, bounce a copy of this frame
2286		 * to him.
2287		 */
2288		ETHER_BPF_MTAP(ifp, m_head);
2289	}
2290
2291	if (enq > 0) {
2292		/* Sync descriptors. */
2293		bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2294		    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
2295		/* Kick. Assume we're using normal Tx priority queue. */
2296		CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2297		    (sc->alc_cdata.alc_tx_prod <<
2298		    MBOX_TD_PROD_LO_IDX_SHIFT) &
2299		    MBOX_TD_PROD_LO_IDX_MASK);
2300		/* Set a timeout in case the chip goes out to lunch. */
2301		sc->alc_watchdog_timer = ALC_TX_TIMEOUT;
2302	}
2303}
2304
2305static void
2306alc_watchdog(struct alc_softc *sc)
2307{
2308	struct ifnet *ifp;
2309
2310	ALC_LOCK_ASSERT(sc);
2311
2312	if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer)
2313		return;
2314
2315	ifp = sc->alc_ifp;
2316	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2317		if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n");
2318		ifp->if_oerrors++;
2319		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2320		alc_init_locked(sc);
2321		return;
2322	}
2323	if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n");
2324	ifp->if_oerrors++;
2325	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2326	alc_init_locked(sc);
2327	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2328		alc_start_locked(ifp);
2329}
2330
2331static int
2332alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2333{
2334	struct alc_softc *sc;
2335	struct ifreq *ifr;
2336	struct mii_data *mii;
2337	int error, mask;
2338
2339	sc = ifp->if_softc;
2340	ifr = (struct ifreq *)data;
2341	error = 0;
2342	switch (cmd) {
2343	case SIOCSIFMTU:
2344		if (ifr->ifr_mtu < ETHERMIN ||
2345		    ifr->ifr_mtu > (sc->alc_ident->max_framelen -
2346		    sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) ||
2347		    ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 &&
2348		    ifr->ifr_mtu > ETHERMTU))
2349			error = EINVAL;
2350		else if (ifp->if_mtu != ifr->ifr_mtu) {
2351			ALC_LOCK(sc);
2352			ifp->if_mtu = ifr->ifr_mtu;
2353			/* AR813x/AR815x has 13 bits MSS field. */
2354			if (ifp->if_mtu > ALC_TSO_MTU &&
2355			    (ifp->if_capenable & IFCAP_TSO4) != 0) {
2356				ifp->if_capenable &= ~IFCAP_TSO4;
2357				ifp->if_hwassist &= ~CSUM_TSO;
2358				VLAN_CAPABILITIES(ifp);
2359			}
2360			ALC_UNLOCK(sc);
2361		}
2362		break;
2363	case SIOCSIFFLAGS:
2364		ALC_LOCK(sc);
2365		if ((ifp->if_flags & IFF_UP) != 0) {
2366			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2367			    ((ifp->if_flags ^ sc->alc_if_flags) &
2368			    (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2369				alc_rxfilter(sc);
2370			else
2371				alc_init_locked(sc);
2372		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2373			alc_stop(sc);
2374		sc->alc_if_flags = ifp->if_flags;
2375		ALC_UNLOCK(sc);
2376		break;
2377	case SIOCADDMULTI:
2378	case SIOCDELMULTI:
2379		ALC_LOCK(sc);
2380		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2381			alc_rxfilter(sc);
2382		ALC_UNLOCK(sc);
2383		break;
2384	case SIOCSIFMEDIA:
2385	case SIOCGIFMEDIA:
2386		mii = device_get_softc(sc->alc_miibus);
2387		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2388		break;
2389	case SIOCSIFCAP:
2390		ALC_LOCK(sc);
2391		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2392		if ((mask & IFCAP_TXCSUM) != 0 &&
2393		    (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2394			ifp->if_capenable ^= IFCAP_TXCSUM;
2395			if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2396				ifp->if_hwassist |= ALC_CSUM_FEATURES;
2397			else
2398				ifp->if_hwassist &= ~ALC_CSUM_FEATURES;
2399		}
2400		if ((mask & IFCAP_TSO4) != 0 &&
2401		    (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2402			ifp->if_capenable ^= IFCAP_TSO4;
2403			if ((ifp->if_capenable & IFCAP_TSO4) != 0) {
2404				/* AR813x/AR815x has 13 bits MSS field. */
2405				if (ifp->if_mtu > ALC_TSO_MTU) {
2406					ifp->if_capenable &= ~IFCAP_TSO4;
2407					ifp->if_hwassist &= ~CSUM_TSO;
2408				} else
2409					ifp->if_hwassist |= CSUM_TSO;
2410			} else
2411				ifp->if_hwassist &= ~CSUM_TSO;
2412		}
2413		if ((mask & IFCAP_WOL_MCAST) != 0 &&
2414		    (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
2415			ifp->if_capenable ^= IFCAP_WOL_MCAST;
2416		if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2417		    (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
2418			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2419		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2420		    (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
2421			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2422			alc_rxvlan(sc);
2423		}
2424		if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2425		    (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2426			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2427		if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2428		    (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2429			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2430		if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
2431			ifp->if_capenable &=
2432			    ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
2433		ALC_UNLOCK(sc);
2434		VLAN_CAPABILITIES(ifp);
2435		break;
2436	default:
2437		error = ether_ioctl(ifp, cmd, data);
2438		break;
2439	}
2440
2441	return (error);
2442}
2443
2444static void
2445alc_mac_config(struct alc_softc *sc)
2446{
2447	struct mii_data *mii;
2448	uint32_t reg;
2449
2450	ALC_LOCK_ASSERT(sc);
2451
2452	mii = device_get_softc(sc->alc_miibus);
2453	reg = CSR_READ_4(sc, ALC_MAC_CFG);
2454	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2455	    MAC_CFG_SPEED_MASK);
2456	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
2457	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
2458	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
2459		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2460	/* Reprogram MAC with resolved speed/duplex. */
2461	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2462	case IFM_10_T:
2463	case IFM_100_TX:
2464		reg |= MAC_CFG_SPEED_10_100;
2465		break;
2466	case IFM_1000_T:
2467		reg |= MAC_CFG_SPEED_1000;
2468		break;
2469	}
2470	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2471		reg |= MAC_CFG_FULL_DUPLEX;
2472		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2473			reg |= MAC_CFG_TX_FC;
2474		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2475			reg |= MAC_CFG_RX_FC;
2476	}
2477	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2478}
2479
2480static void
2481alc_stats_clear(struct alc_softc *sc)
2482{
2483	struct smb sb, *smb;
2484	uint32_t *reg;
2485	int i;
2486
2487	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2488		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2489		    sc->alc_cdata.alc_smb_map,
2490		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2491		smb = sc->alc_rdata.alc_smb;
2492		/* Update done, clear. */
2493		smb->updated = 0;
2494		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2495		    sc->alc_cdata.alc_smb_map,
2496		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2497	} else {
2498		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2499		    reg++) {
2500			CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2501			i += sizeof(uint32_t);
2502		}
2503		/* Read Tx statistics. */
2504		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2505		    reg++) {
2506			CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2507			i += sizeof(uint32_t);
2508		}
2509	}
2510}
2511
2512static void
2513alc_stats_update(struct alc_softc *sc)
2514{
2515	struct alc_hw_stats *stat;
2516	struct smb sb, *smb;
2517	struct ifnet *ifp;
2518	uint32_t *reg;
2519	int i;
2520
2521	ALC_LOCK_ASSERT(sc);
2522
2523	ifp = sc->alc_ifp;
2524	stat = &sc->alc_stats;
2525	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2526		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2527		    sc->alc_cdata.alc_smb_map,
2528		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2529		smb = sc->alc_rdata.alc_smb;
2530		if (smb->updated == 0)
2531			return;
2532	} else {
2533		smb = &sb;
2534		/* Read Rx statistics. */
2535		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2536		    reg++) {
2537			*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2538			i += sizeof(uint32_t);
2539		}
2540		/* Read Tx statistics. */
2541		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2542		    reg++) {
2543			*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2544			i += sizeof(uint32_t);
2545		}
2546	}
2547
2548	/* Rx stats. */
2549	stat->rx_frames += smb->rx_frames;
2550	stat->rx_bcast_frames += smb->rx_bcast_frames;
2551	stat->rx_mcast_frames += smb->rx_mcast_frames;
2552	stat->rx_pause_frames += smb->rx_pause_frames;
2553	stat->rx_control_frames += smb->rx_control_frames;
2554	stat->rx_crcerrs += smb->rx_crcerrs;
2555	stat->rx_lenerrs += smb->rx_lenerrs;
2556	stat->rx_bytes += smb->rx_bytes;
2557	stat->rx_runts += smb->rx_runts;
2558	stat->rx_fragments += smb->rx_fragments;
2559	stat->rx_pkts_64 += smb->rx_pkts_64;
2560	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2561	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2562	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2563	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2564	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2565	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2566	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2567	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2568	stat->rx_rrs_errs += smb->rx_rrs_errs;
2569	stat->rx_alignerrs += smb->rx_alignerrs;
2570	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2571	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2572	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2573
2574	/* Tx stats. */
2575	stat->tx_frames += smb->tx_frames;
2576	stat->tx_bcast_frames += smb->tx_bcast_frames;
2577	stat->tx_mcast_frames += smb->tx_mcast_frames;
2578	stat->tx_pause_frames += smb->tx_pause_frames;
2579	stat->tx_excess_defer += smb->tx_excess_defer;
2580	stat->tx_control_frames += smb->tx_control_frames;
2581	stat->tx_deferred += smb->tx_deferred;
2582	stat->tx_bytes += smb->tx_bytes;
2583	stat->tx_pkts_64 += smb->tx_pkts_64;
2584	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2585	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2586	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2587	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2588	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2589	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2590	stat->tx_single_colls += smb->tx_single_colls;
2591	stat->tx_multi_colls += smb->tx_multi_colls;
2592	stat->tx_late_colls += smb->tx_late_colls;
2593	stat->tx_excess_colls += smb->tx_excess_colls;
2594	stat->tx_abort += smb->tx_abort;
2595	stat->tx_underrun += smb->tx_underrun;
2596	stat->tx_desc_underrun += smb->tx_desc_underrun;
2597	stat->tx_lenerrs += smb->tx_lenerrs;
2598	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2599	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2600	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2601
2602	/* Update counters in ifnet. */
2603	ifp->if_opackets += smb->tx_frames;
2604
2605	ifp->if_collisions += smb->tx_single_colls +
2606	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
2607	    smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
2608
2609	/*
2610	 * XXX
2611	 * tx_pkts_truncated counter looks suspicious. It constantly
2612	 * increments with no sign of Tx errors. This may indicate
2613	 * the counter name is not correct one so I've removed the
2614	 * counter in output errors.
2615	 */
2616	ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
2617	    smb->tx_underrun;
2618
2619	ifp->if_ipackets += smb->rx_frames;
2620
2621	ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2622	    smb->rx_runts + smb->rx_pkts_truncated +
2623	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
2624	    smb->rx_alignerrs;
2625
2626	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2627		/* Update done, clear. */
2628		smb->updated = 0;
2629		bus_dmamap_sync(sc->alc_cdata.alc_smb_tag,
2630		    sc->alc_cdata.alc_smb_map,
2631		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2632	}
2633}
2634
2635static int
2636alc_intr(void *arg)
2637{
2638	struct alc_softc *sc;
2639	uint32_t status;
2640
2641	sc = (struct alc_softc *)arg;
2642
2643	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2644	if ((status & ALC_INTRS) == 0)
2645		return (FILTER_STRAY);
2646	/* Disable interrupts. */
2647	CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT);
2648	taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2649
2650	return (FILTER_HANDLED);
2651}
2652
2653static void
2654alc_int_task(void *arg, int pending)
2655{
2656	struct alc_softc *sc;
2657	struct ifnet *ifp;
2658	uint32_t status;
2659	int more;
2660
2661	sc = (struct alc_softc *)arg;
2662	ifp = sc->alc_ifp;
2663
2664	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2665	ALC_LOCK(sc);
2666	if (sc->alc_morework != 0) {
2667		sc->alc_morework = 0;
2668		status |= INTR_RX_PKT;
2669	}
2670	if ((status & ALC_INTRS) == 0)
2671		goto done;
2672
2673	/* Acknowledge interrupts but still disable interrupts. */
2674	CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2675
2676	more = 0;
2677	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2678		if ((status & INTR_RX_PKT) != 0) {
2679			more = alc_rxintr(sc, sc->alc_process_limit);
2680			if (more == EAGAIN)
2681				sc->alc_morework = 1;
2682			else if (more == EIO) {
2683				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2684				alc_init_locked(sc);
2685				ALC_UNLOCK(sc);
2686				return;
2687			}
2688		}
2689		if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2690		    INTR_TXQ_TO_RST)) != 0) {
2691			if ((status & INTR_DMA_RD_TO_RST) != 0)
2692				device_printf(sc->alc_dev,
2693				    "DMA read error! -- resetting\n");
2694			if ((status & INTR_DMA_WR_TO_RST) != 0)
2695				device_printf(sc->alc_dev,
2696				    "DMA write error! -- resetting\n");
2697			if ((status & INTR_TXQ_TO_RST) != 0)
2698				device_printf(sc->alc_dev,
2699				    "TxQ reset! -- resetting\n");
2700			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2701			alc_init_locked(sc);
2702			ALC_UNLOCK(sc);
2703			return;
2704		}
2705		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
2706		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2707			alc_start_locked(ifp);
2708	}
2709
2710	if (more == EAGAIN ||
2711	    (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) {
2712		ALC_UNLOCK(sc);
2713		taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task);
2714		return;
2715	}
2716
2717done:
2718	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2719		/* Re-enable interrupts if we're running. */
2720		CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2721	}
2722	ALC_UNLOCK(sc);
2723}
2724
2725static void
2726alc_txeof(struct alc_softc *sc)
2727{
2728	struct ifnet *ifp;
2729	struct alc_txdesc *txd;
2730	uint32_t cons, prod;
2731	int prog;
2732
2733	ALC_LOCK_ASSERT(sc);
2734
2735	ifp = sc->alc_ifp;
2736
2737	if (sc->alc_cdata.alc_tx_cnt == 0)
2738		return;
2739	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
2740	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE);
2741	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2742		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2743		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD);
2744		prod = sc->alc_rdata.alc_cmb->cons;
2745	} else
2746		prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2747	/* Assume we're using normal Tx priority queue. */
2748	prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2749	    MBOX_TD_CONS_LO_IDX_SHIFT;
2750	cons = sc->alc_cdata.alc_tx_cons;
2751	/*
2752	 * Go through our Tx list and free mbufs for those
2753	 * frames which have been transmitted.
2754	 */
2755	for (prog = 0; cons != prod; prog++,
2756	    ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2757		if (sc->alc_cdata.alc_tx_cnt <= 0)
2758			break;
2759		prog++;
2760		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2761		sc->alc_cdata.alc_tx_cnt--;
2762		txd = &sc->alc_cdata.alc_txdesc[cons];
2763		if (txd->tx_m != NULL) {
2764			/* Reclaim transmitted mbufs. */
2765			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
2766			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2767			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
2768			    txd->tx_dmamap);
2769			m_freem(txd->tx_m);
2770			txd->tx_m = NULL;
2771		}
2772	}
2773
2774	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2775		bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag,
2776		    sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD);
2777	sc->alc_cdata.alc_tx_cons = cons;
2778	/*
2779	 * Unarm watchdog timer only when there is no pending
2780	 * frames in Tx queue.
2781	 */
2782	if (sc->alc_cdata.alc_tx_cnt == 0)
2783		sc->alc_watchdog_timer = 0;
2784}
2785
2786static int
2787alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
2788{
2789	struct mbuf *m;
2790	bus_dma_segment_t segs[1];
2791	bus_dmamap_t map;
2792	int nsegs;
2793
2794	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2795	if (m == NULL)
2796		return (ENOBUFS);
2797	m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2798#ifndef __NO_STRICT_ALIGNMENT
2799	m_adj(m, sizeof(uint64_t));
2800#endif
2801
2802	if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag,
2803	    sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2804		m_freem(m);
2805		return (ENOBUFS);
2806	}
2807	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2808
2809	if (rxd->rx_m != NULL) {
2810		bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2811		    BUS_DMASYNC_POSTREAD);
2812		bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap);
2813	}
2814	map = rxd->rx_dmamap;
2815	rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2816	sc->alc_cdata.alc_rx_sparemap = map;
2817	bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap,
2818	    BUS_DMASYNC_PREREAD);
2819	rxd->rx_m = m;
2820	rxd->rx_desc->addr = htole64(segs[0].ds_addr);
2821	return (0);
2822}
2823
2824static int
2825alc_rxintr(struct alc_softc *sc, int count)
2826{
2827	struct ifnet *ifp;
2828	struct rx_rdesc *rrd;
2829	uint32_t nsegs, status;
2830	int rr_cons, prog;
2831
2832	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2833	    sc->alc_cdata.alc_rr_ring_map,
2834	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2835	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2836	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2837	rr_cons = sc->alc_cdata.alc_rr_cons;
2838	ifp = sc->alc_ifp;
2839	for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) {
2840		if (count-- <= 0)
2841			break;
2842		rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2843		status = le32toh(rrd->status);
2844		if ((status & RRD_VALID) == 0)
2845			break;
2846		nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2847		if (nsegs == 0) {
2848			/* This should not happen! */
2849			device_printf(sc->alc_dev,
2850			    "unexpected segment count -- resetting\n");
2851			return (EIO);
2852		}
2853		alc_rxeof(sc, rrd);
2854		/* Clear Rx return status. */
2855		rrd->status = 0;
2856		ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2857		sc->alc_cdata.alc_rx_cons += nsegs;
2858		sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2859		prog += nsegs;
2860	}
2861
2862	if (prog > 0) {
2863		/* Update the consumer index. */
2864		sc->alc_cdata.alc_rr_cons = rr_cons;
2865		/* Sync Rx return descriptors. */
2866		bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
2867		    sc->alc_cdata.alc_rr_ring_map,
2868		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2869		/*
2870		 * Sync updated Rx descriptors such that controller see
2871		 * modified buffer addresses.
2872		 */
2873		bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
2874		    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
2875		/*
2876		 * Let controller know availability of new Rx buffers.
2877		 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2878		 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2879		 * only when Rx buffer pre-fetching is required. In
2880		 * addition we already set ALC_RX_RD_FREE_THRESH to
2881		 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2882		 * it still seems that pre-fetching needs more
2883		 * experimentation.
2884		 */
2885		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2886		    sc->alc_cdata.alc_rx_cons);
2887	}
2888
2889	return (count > 0 ? 0 : EAGAIN);
2890}
2891
2892#ifndef __NO_STRICT_ALIGNMENT
2893static struct mbuf *
2894alc_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2895{
2896	struct mbuf *n;
2897        int i;
2898        uint16_t *src, *dst;
2899
2900	src = mtod(m, uint16_t *);
2901	dst = src - 3;
2902
2903	if (m->m_next == NULL) {
2904		for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2905			*dst++ = *src++;
2906		m->m_data -= 6;
2907		return (m);
2908	}
2909	/*
2910	 * Append a new mbuf to received mbuf chain and copy ethernet
2911	 * header from the mbuf chain. This can save lots of CPU
2912	 * cycles for jumbo frame.
2913	 */
2914	MGETHDR(n, M_DONTWAIT, MT_DATA);
2915	if (n == NULL) {
2916		ifp->if_iqdrops++;
2917		m_freem(m);
2918		return (NULL);
2919	}
2920	bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2921	m->m_data += ETHER_HDR_LEN;
2922	m->m_len -= ETHER_HDR_LEN;
2923	n->m_len = ETHER_HDR_LEN;
2924	M_MOVE_PKTHDR(n, m);
2925	n->m_next = m;
2926	return (n);
2927}
2928#endif
2929
2930/* Receive a frame. */
2931static void
2932alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2933{
2934	struct alc_rxdesc *rxd;
2935	struct ifnet *ifp;
2936	struct mbuf *mp, *m;
2937	uint32_t rdinfo, status, vtag;
2938	int count, nsegs, rx_cons;
2939
2940	ifp = sc->alc_ifp;
2941	status = le32toh(rrd->status);
2942	rdinfo = le32toh(rrd->rdinfo);
2943	rx_cons = RRD_RD_IDX(rdinfo);
2944	nsegs = RRD_RD_CNT(rdinfo);
2945
2946	sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2947	if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) {
2948		/*
2949		 * We want to pass the following frames to upper
2950		 * layer regardless of error status of Rx return
2951		 * ring.
2952		 *
2953		 *  o IP/TCP/UDP checksum is bad.
2954		 *  o frame length and protocol specific length
2955		 *     does not match.
2956		 *
2957		 *  Force network stack compute checksum for
2958		 *  errored frames.
2959		 */
2960		status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2961		if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
2962		    RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
2963			return;
2964	}
2965
2966	for (count = 0; count < nsegs; count++,
2967	    ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2968		rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2969		mp = rxd->rx_m;
2970		/* Add a new receive buffer to the ring. */
2971		if (alc_newbuf(sc, rxd) != 0) {
2972			ifp->if_iqdrops++;
2973			/* Reuse Rx buffers. */
2974			if (sc->alc_cdata.alc_rxhead != NULL)
2975				m_freem(sc->alc_cdata.alc_rxhead);
2976			break;
2977		}
2978
2979		/*
2980		 * Assume we've received a full sized frame.
2981		 * Actual size is fixed when we encounter the end of
2982		 * multi-segmented frame.
2983		 */
2984		mp->m_len = sc->alc_buf_size;
2985
2986		/* Chain received mbufs. */
2987		if (sc->alc_cdata.alc_rxhead == NULL) {
2988			sc->alc_cdata.alc_rxhead = mp;
2989			sc->alc_cdata.alc_rxtail = mp;
2990		} else {
2991			mp->m_flags &= ~M_PKTHDR;
2992			sc->alc_cdata.alc_rxprev_tail =
2993			    sc->alc_cdata.alc_rxtail;
2994			sc->alc_cdata.alc_rxtail->m_next = mp;
2995			sc->alc_cdata.alc_rxtail = mp;
2996		}
2997
2998		if (count == nsegs - 1) {
2999			/* Last desc. for this frame. */
3000			m = sc->alc_cdata.alc_rxhead;
3001			m->m_flags |= M_PKTHDR;
3002			/*
3003			 * It seems that L1C/L2C controller has no way
3004			 * to tell hardware to strip CRC bytes.
3005			 */
3006			m->m_pkthdr.len =
3007			    sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
3008			if (nsegs > 1) {
3009				/* Set last mbuf size. */
3010				mp->m_len = sc->alc_cdata.alc_rxlen -
3011				    (nsegs - 1) * sc->alc_buf_size;
3012				/* Remove the CRC bytes in chained mbufs. */
3013				if (mp->m_len <= ETHER_CRC_LEN) {
3014					sc->alc_cdata.alc_rxtail =
3015					    sc->alc_cdata.alc_rxprev_tail;
3016					sc->alc_cdata.alc_rxtail->m_len -=
3017					    (ETHER_CRC_LEN - mp->m_len);
3018					sc->alc_cdata.alc_rxtail->m_next = NULL;
3019					m_freem(mp);
3020				} else {
3021					mp->m_len -= ETHER_CRC_LEN;
3022				}
3023			} else
3024				m->m_len = m->m_pkthdr.len;
3025			m->m_pkthdr.rcvif = ifp;
3026			/*
3027			 * Due to hardware bugs, Rx checksum offloading
3028			 * was intentionally disabled.
3029			 */
3030			if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
3031			    (status & RRD_VLAN_TAG) != 0) {
3032				vtag = RRD_VLAN(le32toh(rrd->vtag));
3033				m->m_pkthdr.ether_vtag = ntohs(vtag);
3034				m->m_flags |= M_VLANTAG;
3035			}
3036#ifndef __NO_STRICT_ALIGNMENT
3037			m = alc_fixup_rx(ifp, m);
3038			if (m != NULL)
3039#endif
3040			{
3041			/* Pass it on. */
3042			ALC_UNLOCK(sc);
3043			(*ifp->if_input)(ifp, m);
3044			ALC_LOCK(sc);
3045			}
3046		}
3047	}
3048	/* Reset mbuf chains. */
3049	ALC_RXCHAIN_RESET(sc);
3050}
3051
3052static void
3053alc_tick(void *arg)
3054{
3055	struct alc_softc *sc;
3056	struct mii_data *mii;
3057
3058	sc = (struct alc_softc *)arg;
3059
3060	ALC_LOCK_ASSERT(sc);
3061
3062	mii = device_get_softc(sc->alc_miibus);
3063	mii_tick(mii);
3064	alc_stats_update(sc);
3065	/*
3066	 * alc(4) does not rely on Tx completion interrupts to reclaim
3067	 * transferred buffers. Instead Tx completion interrupts are
3068	 * used to hint for scheduling Tx task. So it's necessary to
3069	 * release transmitted buffers by kicking Tx completion
3070	 * handler. This limits the maximum reclamation delay to a hz.
3071	 */
3072	alc_txeof(sc);
3073	alc_watchdog(sc);
3074	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3075}
3076
3077static void
3078alc_reset(struct alc_softc *sc)
3079{
3080	uint32_t reg;
3081	int i;
3082
3083	reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
3084	reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
3085	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3086	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
3087		DELAY(10);
3088		if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
3089			break;
3090	}
3091	if (i == 0)
3092		device_printf(sc->alc_dev, "master reset timeout!\n");
3093
3094	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
3095		if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
3096			break;
3097		DELAY(10);
3098	}
3099
3100	if (i == 0)
3101		device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg);
3102}
3103
3104static void
3105alc_init(void *xsc)
3106{
3107	struct alc_softc *sc;
3108
3109	sc = (struct alc_softc *)xsc;
3110	ALC_LOCK(sc);
3111	alc_init_locked(sc);
3112	ALC_UNLOCK(sc);
3113}
3114
3115static void
3116alc_init_locked(struct alc_softc *sc)
3117{
3118	struct ifnet *ifp;
3119	struct mii_data *mii;
3120	uint8_t eaddr[ETHER_ADDR_LEN];
3121	bus_addr_t paddr;
3122	uint32_t reg, rxf_hi, rxf_lo;
3123
3124	ALC_LOCK_ASSERT(sc);
3125
3126	ifp = sc->alc_ifp;
3127	mii = device_get_softc(sc->alc_miibus);
3128
3129	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
3130		return;
3131	/*
3132	 * Cancel any pending I/O.
3133	 */
3134	alc_stop(sc);
3135	/*
3136	 * Reset the chip to a known state.
3137	 */
3138	alc_reset(sc);
3139
3140	/* Initialize Rx descriptors. */
3141	if (alc_init_rx_ring(sc) != 0) {
3142		device_printf(sc->alc_dev, "no memory for Rx buffers.\n");
3143		alc_stop(sc);
3144		return;
3145	}
3146	alc_init_rr_ring(sc);
3147	alc_init_tx_ring(sc);
3148	alc_init_cmb(sc);
3149	alc_init_smb(sc);
3150
3151	/* Reprogram the station address. */
3152	bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3153	CSR_WRITE_4(sc, ALC_PAR0,
3154	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
3155	CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
3156	/*
3157	 * Clear WOL status and disable all WOL feature as WOL
3158	 * would interfere Rx operation under normal environments.
3159	 */
3160	CSR_READ_4(sc, ALC_WOL_CFG);
3161	CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
3162	/* Set Tx descriptor base addresses. */
3163	paddr = sc->alc_rdata.alc_tx_ring_paddr;
3164	CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3165	CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3166	/* We don't use high priority ring. */
3167	CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
3168	/* Set Tx descriptor counter. */
3169	CSR_WRITE_4(sc, ALC_TD_RING_CNT,
3170	    (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
3171	/* Set Rx descriptor base addresses. */
3172	paddr = sc->alc_rdata.alc_rx_ring_paddr;
3173	CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3174	CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3175	/* We use one Rx ring. */
3176	CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
3177	CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
3178	CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
3179	/* Set Rx descriptor counter. */
3180	CSR_WRITE_4(sc, ALC_RD_RING_CNT,
3181	    (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
3182
3183	/*
3184	 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
3185	 * if it do not fit the buffer size. Rx return descriptor holds
3186	 * a counter that indicates how many fragments were made by the
3187	 * hardware. The buffer size should be multiple of 8 bytes.
3188	 * Since hardware has limit on the size of buffer size, always
3189	 * use the maximum value.
3190	 * For strict-alignment architectures make sure to reduce buffer
3191	 * size by 8 bytes to make room for alignment fixup.
3192	 */
3193#ifndef __NO_STRICT_ALIGNMENT
3194	sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t);
3195#else
3196	sc->alc_buf_size = RX_BUF_SIZE_MAX;
3197#endif
3198	CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
3199
3200	paddr = sc->alc_rdata.alc_rr_ring_paddr;
3201	/* Set Rx return descriptor base addresses. */
3202	CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
3203	/* We use one Rx return ring. */
3204	CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
3205	CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
3206	CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
3207	/* Set Rx return descriptor counter. */
3208	CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
3209	    (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
3210	paddr = sc->alc_rdata.alc_cmb_paddr;
3211	CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3212	paddr = sc->alc_rdata.alc_smb_paddr;
3213	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
3214	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
3215
3216	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) {
3217		/* Reconfigure SRAM - Vendor magic. */
3218		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
3219		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
3220		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
3221		CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
3222		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
3223		CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
3224		CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
3225		CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
3226	}
3227
3228	/* Tell hardware that we're ready to load DMA blocks. */
3229	CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
3230
3231	/* Configure interrupt moderation timer. */
3232	reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
3233	reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
3234	CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
3235	/*
3236	 * We don't want to automatic interrupt clear as task queue
3237	 * for the interrupt should know interrupt status.
3238	 */
3239	reg = MASTER_SA_TIMER_ENB;
3240	if (ALC_USECS(sc->alc_int_rx_mod) != 0)
3241		reg |= MASTER_IM_RX_TIMER_ENB;
3242	if (ALC_USECS(sc->alc_int_tx_mod) != 0)
3243		reg |= MASTER_IM_TX_TIMER_ENB;
3244	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
3245	/*
3246	 * Disable interrupt re-trigger timer. We don't want automatic
3247	 * re-triggering of un-ACKed interrupts.
3248	 */
3249	CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
3250	/* Configure CMB. */
3251	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
3252		CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
3253		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
3254	} else
3255		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
3256	/*
3257	 * Hardware can be configured to issue SMB interrupt based
3258	 * on programmed interval. Since there is a callout that is
3259	 * invoked for every hz in driver we use that instead of
3260	 * relying on periodic SMB interrupt.
3261	 */
3262	CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
3263	/* Clear MAC statistics. */
3264	alc_stats_clear(sc);
3265
3266	/*
3267	 * Always use maximum frame size that controller can support.
3268	 * Otherwise received frames that has larger frame length
3269	 * than alc(4) MTU would be silently dropped in hardware. This
3270	 * would make path-MTU discovery hard as sender wouldn't get
3271	 * any responses from receiver. alc(4) supports
3272	 * multi-fragmented frames on Rx path so it has no issue on
3273	 * assembling fragmented frames. Using maximum frame size also
3274	 * removes the need to reinitialize hardware when interface
3275	 * MTU configuration was changed.
3276	 *
3277	 * Be conservative in what you do, be liberal in what you
3278	 * accept from others - RFC 793.
3279	 */
3280	CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
3281
3282	/* Disable header split(?) */
3283	CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
3284
3285	/* Configure IPG/IFG parameters. */
3286	CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
3287	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
3288	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
3289	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
3290	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
3291	/* Set parameters for half-duplex media. */
3292	CSR_WRITE_4(sc, ALC_HDPX_CFG,
3293	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3294	    HDPX_CFG_LCOL_MASK) |
3295	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3296	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3297	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3298	    HDPX_CFG_ABEBT_MASK) |
3299	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3300	    HDPX_CFG_JAMIPG_MASK));
3301	/*
3302	 * Set TSO/checksum offload threshold. For frames that is
3303	 * larger than this threshold, hardware wouldn't do
3304	 * TSO/checksum offloading.
3305	 */
3306	CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
3307	    (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3308	    TSO_OFFLOAD_THRESH_MASK);
3309	/* Configure TxQ. */
3310	reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3311	    TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3312	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3313	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
3314		reg >>= 1;
3315	reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3316	    TXQ_CFG_TD_BURST_MASK;
3317	CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3318
3319	/* Configure Rx free descriptor pre-fetching. */
3320	CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3321	    ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
3322	    RX_RD_FREE_THRESH_HI_MASK) |
3323	    ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
3324	    RX_RD_FREE_THRESH_LO_MASK));
3325
3326	/*
3327	 * Configure flow control parameters.
3328	 * XON  : 80% of Rx FIFO
3329	 * XOFF : 30% of Rx FIFO
3330	 */
3331	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 ||
3332	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) {
3333		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3334		rxf_hi = (reg * 8) / 10;
3335		rxf_lo = (reg * 3) / 10;
3336		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3337		    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3338		     RX_FIFO_PAUSE_THRESH_LO_MASK) |
3339		    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3340		     RX_FIFO_PAUSE_THRESH_HI_MASK));
3341	}
3342
3343	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B ||
3344	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2)
3345		CSR_WRITE_4(sc, ALC_SERDES_LOCK,
3346		    CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
3347		    SERDES_PHY_CLK_SLOWDOWN);
3348
3349	/* Disable RSS until I understand L1C/L2C's RSS logic. */
3350	CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3351	CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3352
3353	/* Configure RxQ. */
3354	reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3355	    RXQ_CFG_RD_BURST_MASK;
3356	reg |= RXQ_CFG_RSS_MODE_DIS;
3357	if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
3358		reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
3359	CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3360
3361	/* Configure DMA parameters. */
3362	reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3363	reg |= sc->alc_rcb;
3364	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3365		reg |= DMA_CFG_CMB_ENB;
3366	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3367		reg |= DMA_CFG_SMB_ENB;
3368	else
3369		reg |= DMA_CFG_SMB_DIS;
3370	reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3371	    DMA_CFG_RD_BURST_SHIFT;
3372	reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3373	    DMA_CFG_WR_BURST_SHIFT;
3374	reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3375	    DMA_CFG_RD_DELAY_CNT_MASK;
3376	reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3377	    DMA_CFG_WR_DELAY_CNT_MASK;
3378	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3379
3380	/*
3381	 * Configure Tx/Rx MACs.
3382	 *  - Auto-padding for short frames.
3383	 *  - Enable CRC generation.
3384	 *  Actual reconfiguration of MAC for resolved speed/duplex
3385	 *  is followed after detection of link establishment.
3386	 *  AR813x/AR815x always does checksum computation regardless
3387	 *  of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3388	 *  have bug in protocol field in Rx return structure so
3389	 *  these controllers can't handle fragmented frames. Disable
3390	 *  Rx checksum offloading until there is a newer controller
3391	 *  that has sane implementation.
3392	 */
3393	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3394	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3395	    MAC_CFG_PREAMBLE_MASK);
3396	if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 ||
3397	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 ||
3398	    sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2)
3399		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3400	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3401		reg |= MAC_CFG_SPEED_10_100;
3402	else
3403		reg |= MAC_CFG_SPEED_1000;
3404	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3405
3406	/* Set up the receive filter. */
3407	alc_rxfilter(sc);
3408	alc_rxvlan(sc);
3409
3410	/* Acknowledge all pending interrupts and clear it. */
3411	CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3412	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3413	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3414
3415	sc->alc_flags &= ~ALC_FLAG_LINK;
3416	/* Switch to the current media. */
3417	mii_mediachg(mii);
3418
3419	callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc);
3420
3421	ifp->if_drv_flags |= IFF_DRV_RUNNING;
3422	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3423}
3424
3425static void
3426alc_stop(struct alc_softc *sc)
3427{
3428	struct ifnet *ifp;
3429	struct alc_txdesc *txd;
3430	struct alc_rxdesc *rxd;
3431	uint32_t reg;
3432	int i;
3433
3434	ALC_LOCK_ASSERT(sc);
3435	/*
3436	 * Mark the interface down and cancel the watchdog timer.
3437	 */
3438	ifp = sc->alc_ifp;
3439	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3440	sc->alc_flags &= ~ALC_FLAG_LINK;
3441	callout_stop(&sc->alc_tick_ch);
3442	sc->alc_watchdog_timer = 0;
3443	alc_stats_update(sc);
3444	/* Disable interrupts. */
3445	CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3446	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3447	alc_stop_queue(sc);
3448	/* Disable DMA. */
3449	reg = CSR_READ_4(sc, ALC_DMA_CFG);
3450	reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3451	reg |= DMA_CFG_SMB_DIS;
3452	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3453	DELAY(1000);
3454	/* Stop Rx/Tx MACs. */
3455	alc_stop_mac(sc);
3456	/* Disable interrupts which might be touched in taskq handler. */
3457	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3458
3459	/* Reclaim Rx buffers that have been processed. */
3460	if (sc->alc_cdata.alc_rxhead != NULL)
3461		m_freem(sc->alc_cdata.alc_rxhead);
3462	ALC_RXCHAIN_RESET(sc);
3463	/*
3464	 * Free Tx/Rx mbufs still in the queues.
3465	 */
3466	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3467		rxd = &sc->alc_cdata.alc_rxdesc[i];
3468		if (rxd->rx_m != NULL) {
3469			bus_dmamap_sync(sc->alc_cdata.alc_rx_tag,
3470			    rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3471			bus_dmamap_unload(sc->alc_cdata.alc_rx_tag,
3472			    rxd->rx_dmamap);
3473			m_freem(rxd->rx_m);
3474			rxd->rx_m = NULL;
3475		}
3476	}
3477	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3478		txd = &sc->alc_cdata.alc_txdesc[i];
3479		if (txd->tx_m != NULL) {
3480			bus_dmamap_sync(sc->alc_cdata.alc_tx_tag,
3481			    txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3482			bus_dmamap_unload(sc->alc_cdata.alc_tx_tag,
3483			    txd->tx_dmamap);
3484			m_freem(txd->tx_m);
3485			txd->tx_m = NULL;
3486		}
3487	}
3488}
3489
3490static void
3491alc_stop_mac(struct alc_softc *sc)
3492{
3493	uint32_t reg;
3494	int i;
3495
3496	ALC_LOCK_ASSERT(sc);
3497
3498	/* Disable Rx/Tx MAC. */
3499	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3500	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3501		reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
3502		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3503	}
3504	for (i = ALC_TIMEOUT; i > 0; i--) {
3505		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3506		if (reg == 0)
3507			break;
3508		DELAY(10);
3509	}
3510	if (i == 0)
3511		device_printf(sc->alc_dev,
3512		    "could not disable Rx/Tx MAC(0x%08x)!\n", reg);
3513}
3514
3515static void
3516alc_start_queue(struct alc_softc *sc)
3517{
3518	uint32_t qcfg[] = {
3519		0,
3520		RXQ_CFG_QUEUE0_ENB,
3521		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3522		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3523		RXQ_CFG_ENB
3524	};
3525	uint32_t cfg;
3526
3527	ALC_LOCK_ASSERT(sc);
3528
3529	/* Enable RxQ. */
3530	cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3531	cfg &= ~RXQ_CFG_ENB;
3532	cfg |= qcfg[1];
3533	CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3534	/* Enable TxQ. */
3535	cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3536	cfg |= TXQ_CFG_ENB;
3537	CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3538}
3539
3540static void
3541alc_stop_queue(struct alc_softc *sc)
3542{
3543	uint32_t reg;
3544	int i;
3545
3546	ALC_LOCK_ASSERT(sc);
3547
3548	/* Disable RxQ. */
3549	reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3550	if ((reg & RXQ_CFG_ENB) != 0) {
3551		reg &= ~RXQ_CFG_ENB;
3552		CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3553	}
3554	/* Disable TxQ. */
3555	reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3556	if ((reg & TXQ_CFG_ENB) == 0) {
3557		reg &= ~TXQ_CFG_ENB;
3558		CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3559	}
3560	for (i = ALC_TIMEOUT; i > 0; i--) {
3561		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3562		if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3563			break;
3564		DELAY(10);
3565	}
3566	if (i == 0)
3567		device_printf(sc->alc_dev,
3568		    "could not disable RxQ/TxQ (0x%08x)!\n", reg);
3569}
3570
3571static void
3572alc_init_tx_ring(struct alc_softc *sc)
3573{
3574	struct alc_ring_data *rd;
3575	struct alc_txdesc *txd;
3576	int i;
3577
3578	ALC_LOCK_ASSERT(sc);
3579
3580	sc->alc_cdata.alc_tx_prod = 0;
3581	sc->alc_cdata.alc_tx_cons = 0;
3582	sc->alc_cdata.alc_tx_cnt = 0;
3583
3584	rd = &sc->alc_rdata;
3585	bzero(rd->alc_tx_ring, ALC_TX_RING_SZ);
3586	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3587		txd = &sc->alc_cdata.alc_txdesc[i];
3588		txd->tx_m = NULL;
3589	}
3590
3591	bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag,
3592	    sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE);
3593}
3594
3595static int
3596alc_init_rx_ring(struct alc_softc *sc)
3597{
3598	struct alc_ring_data *rd;
3599	struct alc_rxdesc *rxd;
3600	int i;
3601
3602	ALC_LOCK_ASSERT(sc);
3603
3604	sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3605	sc->alc_morework = 0;
3606	rd = &sc->alc_rdata;
3607	bzero(rd->alc_rx_ring, ALC_RX_RING_SZ);
3608	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3609		rxd = &sc->alc_cdata.alc_rxdesc[i];
3610		rxd->rx_m = NULL;
3611		rxd->rx_desc = &rd->alc_rx_ring[i];
3612		if (alc_newbuf(sc, rxd) != 0)
3613			return (ENOBUFS);
3614	}
3615
3616	/*
3617	 * Since controller does not update Rx descriptors, driver
3618	 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3619	 * is enough to ensure coherence.
3620	 */
3621	bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag,
3622	    sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE);
3623	/* Let controller know availability of new Rx buffers. */
3624	CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3625
3626	return (0);
3627}
3628
3629static void
3630alc_init_rr_ring(struct alc_softc *sc)
3631{
3632	struct alc_ring_data *rd;
3633
3634	ALC_LOCK_ASSERT(sc);
3635
3636	sc->alc_cdata.alc_rr_cons = 0;
3637	ALC_RXCHAIN_RESET(sc);
3638
3639	rd = &sc->alc_rdata;
3640	bzero(rd->alc_rr_ring, ALC_RR_RING_SZ);
3641	bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag,
3642	    sc->alc_cdata.alc_rr_ring_map,
3643	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3644}
3645
3646static void
3647alc_init_cmb(struct alc_softc *sc)
3648{
3649	struct alc_ring_data *rd;
3650
3651	ALC_LOCK_ASSERT(sc);
3652
3653	rd = &sc->alc_rdata;
3654	bzero(rd->alc_cmb, ALC_CMB_SZ);
3655	bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map,
3656	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3657}
3658
3659static void
3660alc_init_smb(struct alc_softc *sc)
3661{
3662	struct alc_ring_data *rd;
3663
3664	ALC_LOCK_ASSERT(sc);
3665
3666	rd = &sc->alc_rdata;
3667	bzero(rd->alc_smb, ALC_SMB_SZ);
3668	bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map,
3669	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3670}
3671
3672static void
3673alc_rxvlan(struct alc_softc *sc)
3674{
3675	struct ifnet *ifp;
3676	uint32_t reg;
3677
3678	ALC_LOCK_ASSERT(sc);
3679
3680	ifp = sc->alc_ifp;
3681	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3682	if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3683		reg |= MAC_CFG_VLAN_TAG_STRIP;
3684	else
3685		reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3686	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3687}
3688
3689static void
3690alc_rxfilter(struct alc_softc *sc)
3691{
3692	struct ifnet *ifp;
3693	struct ifmultiaddr *ifma;
3694	uint32_t crc;
3695	uint32_t mchash[2];
3696	uint32_t rxcfg;
3697
3698	ALC_LOCK_ASSERT(sc);
3699
3700	ifp = sc->alc_ifp;
3701
3702	bzero(mchash, sizeof(mchash));
3703	rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3704	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3705	if ((ifp->if_flags & IFF_BROADCAST) != 0)
3706		rxcfg |= MAC_CFG_BCAST;
3707	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3708		if ((ifp->if_flags & IFF_PROMISC) != 0)
3709			rxcfg |= MAC_CFG_PROMISC;
3710		if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3711			rxcfg |= MAC_CFG_ALLMULTI;
3712		mchash[0] = 0xFFFFFFFF;
3713		mchash[1] = 0xFFFFFFFF;
3714		goto chipit;
3715	}
3716
3717	if_maddr_rlock(ifp);
3718	TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) {
3719		if (ifma->ifma_addr->sa_family != AF_LINK)
3720			continue;
3721		crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3722		    ifma->ifma_addr), ETHER_ADDR_LEN);
3723		mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3724	}
3725	if_maddr_runlock(ifp);
3726
3727chipit:
3728	CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3729	CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3730	CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3731}
3732
3733static int
3734sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3735{
3736	int error, value;
3737
3738	if (arg1 == NULL)
3739		return (EINVAL);
3740	value = *(int *)arg1;
3741	error = sysctl_handle_int(oidp, &value, 0, req);
3742	if (error || req->newptr == NULL)
3743		return (error);
3744	if (value < low || value > high)
3745		return (EINVAL);
3746	*(int *)arg1 = value;
3747
3748	return (0);
3749}
3750
3751static int
3752sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS)
3753{
3754	return (sysctl_int_range(oidp, arg1, arg2, req,
3755	    ALC_PROC_MIN, ALC_PROC_MAX));
3756}
3757
3758static int
3759sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS)
3760{
3761
3762	return (sysctl_int_range(oidp, arg1, arg2, req,
3763	    ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX));
3764}
3765