1/*	$NetBSD: if_alc.c,v 1.53 2022/09/17 13:55:35 thorpej Exp $	*/
2/*	$OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $	*/
3/*-
4 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
31
32#ifdef _KERNEL_OPT
33#include "vlan.h"
34#endif
35
36#include <sys/param.h>
37#include <sys/proc.h>
38#include <sys/endian.h>
39#include <sys/systm.h>
40#include <sys/types.h>
41#include <sys/sockio.h>
42#include <sys/mbuf.h>
43#include <sys/queue.h>
44#include <sys/kernel.h>
45#include <sys/device.h>
46#include <sys/callout.h>
47#include <sys/socket.h>
48#include <sys/module.h>
49
50#include <sys/bus.h>
51
52#include <net/bpf.h>
53#include <net/if.h>
54#include <net/if_dl.h>
55#include <net/if_llc.h>
56#include <net/if_media.h>
57#include <net/if_ether.h>
58
59#ifdef INET
60#include <netinet/in.h>
61#include <netinet/in_systm.h>
62#include <netinet/in_var.h>
63#include <netinet/ip.h>
64#endif
65
66#include <net/if_types.h>
67#include <net/if_vlanvar.h>
68
69#include <dev/mii/mii.h>
70#include <dev/mii/miivar.h>
71
72#include <dev/pci/pcireg.h>
73#include <dev/pci/pcivar.h>
74#include <dev/pci/pcidevs.h>
75
76#include <dev/pci/if_alcreg.h>
77
78/*
79 * Devices supported by this driver.
80 */
81static const struct alc_ident alc_ident_table[] = {
82	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8131, 9 * 1024,
83		"Atheros AR8131 PCIe Gigabit Ethernet" },
84	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8132, 9 * 1024,
85		"Atheros AR8132 PCIe Fast Ethernet" },
86	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151, 6 * 1024,
87		"Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
88	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151_V2, 6 * 1024,
89		"Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
90	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B, 6 * 1024,
91		"Atheros AR8152 v1.1 PCIe Fast Ethernet" },
92	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B2, 6 * 1024,
93		"Atheros AR8152 v2.0 PCIe Fast Ethernet" },
94	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8161, 9 * 1024,
95		"Atheros AR8161 PCIe Gigabit Ethernet" },
96	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8162, 9 * 1024,
97		"Atheros AR8162 PCIe Fast Ethernet" },
98	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8171, 9 * 1024,
99		"Atheros AR8171 PCIe Gigabit Ethernet" },
100	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8172, 9 * 1024,
101		"Atheros AR8172 PCIe Fast Ethernet" },
102	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2200, 9 * 1024,
103		"Killer E2200 Gigabit Ethernet" },
104	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2400, 9 * 1024,
105		"Killer E2400 Gigabit Ethernet" },
106	{ PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_E2500, 9 * 1024,
107		"Killer E2500 Gigabit Ethernet" },
108	{ 0, 0, 0, NULL },
109};
110
111static int	alc_match(device_t, cfdata_t, void *);
112static void	alc_attach(device_t, device_t, void *);
113static int	alc_detach(device_t, int);
114
115static int	alc_init(struct ifnet *);
116static int	alc_init_backend(struct ifnet *, bool);
117static void	alc_start(struct ifnet *);
118static int	alc_ioctl(struct ifnet *, u_long, void *);
119static void	alc_watchdog(struct ifnet *);
120static int	alc_mediachange(struct ifnet *);
121static void	alc_mediastatus(struct ifnet *, struct ifmediareq *);
122
123static void	alc_aspm(struct alc_softc *, int, int);
124static void	alc_aspm_813x(struct alc_softc *, int);
125static void	alc_aspm_816x(struct alc_softc *, int);
126static void	alc_disable_l0s_l1(struct alc_softc *);
127static int	alc_dma_alloc(struct alc_softc *);
128static void	alc_dma_free(struct alc_softc *);
129static void	alc_dsp_fixup(struct alc_softc *, int);
130static int	alc_encap(struct alc_softc *, struct mbuf **);
131static const struct alc_ident *
132		alc_find_ident(struct pci_attach_args *);
133static void	alc_get_macaddr(struct alc_softc *);
134static void	alc_get_macaddr_813x(struct alc_softc *);
135static void	alc_get_macaddr_816x(struct alc_softc *);
136static void	alc_get_macaddr_par(struct alc_softc *);
137static void	alc_init_cmb(struct alc_softc *);
138static void	alc_init_rr_ring(struct alc_softc *);
139static int	alc_init_rx_ring(struct alc_softc *, bool);
140static void	alc_init_smb(struct alc_softc *);
141static void	alc_init_tx_ring(struct alc_softc *);
142static int	alc_intr(void *);
143static void	alc_mac_config(struct alc_softc *);
144static int	alc_mii_readreg_813x(struct alc_softc *, int, int, uint16_t *);
145static int	alc_mii_readreg_816x(struct alc_softc *, int, int, uint16_t *);
146static int	alc_mii_writereg_813x(struct alc_softc *, int, int, uint16_t);
147static int	alc_mii_writereg_816x(struct alc_softc *, int, int, uint16_t);
148static int	alc_miibus_readreg(device_t, int, int, uint16_t *);
149static void	alc_miibus_statchg(struct ifnet *);
150static int	alc_miibus_writereg(device_t, int, int, uint16_t);
151static int	alc_miidbg_readreg(struct alc_softc *, int, uint16_t *);
152static int	alc_miidbg_writereg(struct alc_softc *, int, uint16_t);
153static int	alc_miiext_readreg(struct alc_softc *, int, int, uint16_t *);
154static int	alc_miiext_writereg(struct alc_softc *, int, int, uint16_t);
155static int	alc_newbuf(struct alc_softc *, struct alc_rxdesc *, bool);
156static void	alc_phy_down(struct alc_softc *);
157static void	alc_phy_reset(struct alc_softc *);
158static void	alc_phy_reset_813x(struct alc_softc *);
159static void	alc_phy_reset_816x(struct alc_softc *);
160static void	alc_reset(struct alc_softc *);
161static void	alc_rxeof(struct alc_softc *, struct rx_rdesc *);
162static int	alc_rxintr(struct alc_softc *);
163static void	alc_iff(struct alc_softc *);
164static void	alc_rxvlan(struct alc_softc *);
165static void	alc_start_queue(struct alc_softc *);
166static void	alc_stats_clear(struct alc_softc *);
167static void	alc_stats_update(struct alc_softc *);
168static void	alc_stop(struct ifnet *, int);
169static void	alc_stop_mac(struct alc_softc *);
170static void	alc_stop_queue(struct alc_softc *);
171static void	alc_tick(void *);
172static void	alc_txeof(struct alc_softc *);
173static void	alc_init_pcie(struct alc_softc *);
174
175static const uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 };
176
177CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc),
178    alc_match, alc_attach, alc_detach, NULL);
179
180int alcdebug = 0;
181#define	DPRINTF(x)	do { if (alcdebug) printf x; } while (0)
182
183#define ALC_CSUM_FEATURES	(M_CSUM_TCPv4 | M_CSUM_UDPv4)
184
185static int
186alc_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
187{
188	struct alc_softc *sc = device_private(dev);
189	int v;
190
191	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
192		v = alc_mii_readreg_816x(sc, phy, reg, val);
193	else
194		v = alc_mii_readreg_813x(sc, phy, reg, val);
195	return (v);
196}
197
198static int
199alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg, uint16_t *val)
200{
201	uint32_t v;
202	int i;
203
204	if (phy != sc->alc_phyaddr)
205		return -1;
206
207	/*
208	 * For AR8132 fast ethernet controller, do not report 1000baseT
209	 * capability to mii(4). Even though AR8132 uses the same
210	 * model/revision number of F1 gigabit PHY, the PHY has no
211	 * ability to establish 1000baseT link.
212	 */
213	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && reg == MII_EXTSR) {
214		*val = 0;
215		return 0;
216	}
217
218	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
219	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
220	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
221		DELAY(5);
222		v = CSR_READ_4(sc, ALC_MDIO);
223		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
224			break;
225	}
226
227	if (i == 0) {
228		printf("%s: phy read timeout: phy %d, reg %d\n",
229		    device_xname(sc->sc_dev), phy, reg);
230		return ETIMEDOUT;
231	}
232
233	*val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT;
234	return 0;
235}
236
237static int
238alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg, uint16_t *val)
239{
240	uint32_t clk, v;
241	int i;
242
243	if (phy != sc->alc_phyaddr)
244		return -1;
245
246	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
247		clk = MDIO_CLK_25_128;
248	else
249		clk = MDIO_CLK_25_4;
250	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
251	    MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg));
252	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
253		DELAY(5);
254		v = CSR_READ_4(sc, ALC_MDIO);
255		if ((v & MDIO_OP_BUSY) == 0)
256			break;
257	}
258
259	if (i == 0) {
260		printf("%s: phy read timeout: phy %d, reg %d\n",
261		    device_xname(sc->sc_dev), phy, reg);
262		return ETIMEDOUT;
263	}
264
265	*val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT;
266	return 0;
267}
268
269static int
270alc_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
271{
272	struct alc_softc *sc = device_private(dev);
273	int rv;
274
275	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
276		rv = alc_mii_writereg_816x(sc, phy, reg, val);
277	else
278		rv = alc_mii_writereg_813x(sc, phy, reg, val);
279
280	return rv;
281}
282
283static int
284alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, uint16_t val)
285{
286	uint32_t v;
287	int i;
288
289	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
290	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
291	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
292	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
293		DELAY(5);
294		v = CSR_READ_4(sc, ALC_MDIO);
295		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
296			break;
297	}
298
299	if (i == 0) {
300		printf("%s: phy write timeout: phy %d, reg %d\n",
301		    device_xname(sc->sc_dev), phy, reg);
302		return ETIMEDOUT;
303	}
304
305	return 0;
306}
307
308static int
309alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, uint16_t val)
310{
311	uint32_t clk, v;
312	int i;
313
314	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
315		clk = MDIO_CLK_25_128;
316	else
317		clk = MDIO_CLK_25_4;
318	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
319	    ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) |
320	    MDIO_SUP_PREAMBLE | clk);
321	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
322		DELAY(5);
323		v = CSR_READ_4(sc, ALC_MDIO);
324		if ((v & MDIO_OP_BUSY) == 0)
325			break;
326	}
327
328	if (i == 0) {
329		printf("%s: phy write timeout: phy %d, reg %d\n",
330		    device_xname(sc->sc_dev), phy, reg);
331		return ETIMEDOUT;
332	}
333
334	return 0;
335}
336
337static void
338alc_miibus_statchg(struct ifnet *ifp)
339{
340	struct alc_softc *sc = ifp->if_softc;
341	struct mii_data *mii = &sc->sc_miibus;
342	uint32_t reg;
343
344	if ((ifp->if_flags & IFF_RUNNING) == 0)
345		return;
346
347	sc->alc_flags &= ~ALC_FLAG_LINK;
348	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
349	    (IFM_ACTIVE | IFM_AVALID)) {
350		switch (IFM_SUBTYPE(mii->mii_media_active)) {
351		case IFM_10_T:
352		case IFM_100_TX:
353			sc->alc_flags |= ALC_FLAG_LINK;
354			break;
355		case IFM_1000_T:
356			if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
357				sc->alc_flags |= ALC_FLAG_LINK;
358			break;
359		default:
360			break;
361		}
362	}
363	/* Stop Rx/Tx MACs. */
364	alc_stop_mac(sc);
365
366	/* Program MACs with resolved speed/duplex/flow-control. */
367	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
368		alc_start_queue(sc);
369		alc_mac_config(sc);
370		/* Re-enable Tx/Rx MACs. */
371		reg = CSR_READ_4(sc, ALC_MAC_CFG);
372		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
373		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
374	}
375	alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active));
376	alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active));
377}
378
379static int
380alc_miidbg_readreg(struct alc_softc *sc, int reg, uint16_t *val)
381{
382	int rv;
383
384	rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
385	    reg);
386	if (rv != 0)
387		return rv;
388
389	return (alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
390		ALC_MII_DBG_DATA, val));
391}
392
393static int
394alc_miidbg_writereg(struct alc_softc *sc, int reg, uint16_t val)
395{
396	int rv;
397
398	rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
399	    reg);
400	if (rv != 0)
401		return rv;
402
403	rv = alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
404	    val);
405
406	return rv;
407}
408
409static int
410alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg, uint16_t *val)
411{
412	uint32_t clk, v;
413	int i;
414
415	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
416	    EXT_MDIO_DEVADDR(devaddr));
417	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
418		clk = MDIO_CLK_25_128;
419	else
420		clk = MDIO_CLK_25_4;
421	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
422	    MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
423	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
424		DELAY(5);
425		v = CSR_READ_4(sc, ALC_MDIO);
426		if ((v & MDIO_OP_BUSY) == 0)
427			break;
428	}
429
430	if (i == 0) {
431		printf("%s: phy ext read timeout: %d\n",
432		    device_xname(sc->sc_dev), reg);
433		return ETIMEDOUT;
434	}
435
436	*val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT;
437	return 0;
438}
439
440static int
441alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, uint16_t val)
442{
443	uint32_t clk, v;
444	int i;
445
446	CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) |
447	    EXT_MDIO_DEVADDR(devaddr));
448	if ((sc->alc_flags & ALC_FLAG_LINK) != 0)
449		clk = MDIO_CLK_25_128;
450	else
451		clk = MDIO_CLK_25_4;
452	CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
453	    ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) |
454	    MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT);
455	for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
456		DELAY(5);
457		v = CSR_READ_4(sc, ALC_MDIO);
458		if ((v & MDIO_OP_BUSY) == 0)
459			break;
460	}
461
462	if (i == 0) {
463		printf("%s: phy ext write timeout: reg %d\n",
464		    device_xname(sc->sc_dev), reg);
465		return ETIMEDOUT;
466	}
467
468	return 0;
469}
470
471static void
472alc_dsp_fixup(struct alc_softc *sc, int media)
473{
474	uint16_t agc, len, val;
475
476	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
477		return;
478	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0)
479		return;
480
481	/*
482	 * Vendor PHY magic.
483	 * 1000BT/AZ, wrong cable length
484	 */
485	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
486		alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6, &len);
487		len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) &
488		    EXT_CLDCTL6_CAB_LEN_MASK;
489		/* XXX: used to be (alc >> shift) & mask which is 0 */
490		alc_miidbg_readreg(sc, MII_DBG_AGC, &agc);
491		agc &= DBG_AGC_2_VGA_MASK;
492		agc >>= DBG_AGC_2_VGA_SHIFT;
493		if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G &&
494		    agc > DBG_AGC_LONG1G_LIMT) ||
495		    (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT &&
496		    agc > DBG_AGC_LONG1G_LIMT)) {
497			alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
498			    DBG_AZ_ANADECT_LONG);
499			alc_miiext_readreg(sc, MII_EXT_ANEG,
500			    MII_EXT_ANEG_AFE, &val);
501			val |= ANEG_AFEE_10BT_100M_TH;
502			alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
503			    val);
504		} else {
505			alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT,
506			    DBG_AZ_ANADECT_DEFAULT);
507			alc_miiext_readreg(sc, MII_EXT_ANEG,
508			    MII_EXT_ANEG_AFE, &val);
509			val &= ~ANEG_AFEE_10BT_100M_TH;
510			alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE,
511			    val);
512		}
513		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
514		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
515			if (media == IFM_1000_T) {
516				/*
517				 * Giga link threshold, raise the tolerance of
518				 * noise 50%.
519				 */
520				alc_miidbg_readreg(sc, MII_DBG_MSE20DB, &val);
521				val &= ~DBG_MSE20DB_TH_MASK;
522				val |= (DBG_MSE20DB_TH_HI <<
523				    DBG_MSE20DB_TH_SHIFT);
524				alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
525			} else if (media == IFM_100_TX)
526				alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
527				    DBG_MSE16DB_UP);
528		}
529	} else {
530		alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, &val);
531		val &= ~ANEG_AFEE_10BT_100M_TH;
532		alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val);
533		if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 &&
534		    AR816X_REV(sc->alc_rev) == AR816X_REV_B0) {
535			alc_miidbg_writereg(sc, MII_DBG_MSE16DB,
536			    DBG_MSE16DB_DOWN);
537			alc_miidbg_readreg(sc, MII_DBG_MSE20DB, &val);
538			val &= ~DBG_MSE20DB_TH_MASK;
539			val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT);
540			alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val);
541		}
542	}
543}
544
545static void
546alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
547{
548	struct alc_softc *sc = ifp->if_softc;
549	struct mii_data *mii = &sc->sc_miibus;
550
551	if ((ifp->if_flags & IFF_UP) == 0)
552		return;
553
554	mii_pollstat(mii);
555	ifmr->ifm_status = mii->mii_media_status;
556	ifmr->ifm_active = mii->mii_media_active;
557}
558
559static int
560alc_mediachange(struct ifnet *ifp)
561{
562	struct alc_softc *sc = ifp->if_softc;
563	struct mii_data *mii = &sc->sc_miibus;
564	int error;
565
566	if (mii->mii_instance != 0) {
567		struct mii_softc *miisc;
568
569		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
570			mii_phy_reset(miisc);
571	}
572	error = mii_mediachg(mii);
573
574	return (error);
575}
576
577static const struct alc_ident *
578alc_find_ident(struct pci_attach_args *pa)
579{
580	const struct alc_ident *ident;
581	uint16_t vendor, devid;
582
583	vendor = PCI_VENDOR(pa->pa_id);
584	devid = PCI_PRODUCT(pa->pa_id);
585	for (ident = alc_ident_table; ident->name != NULL; ident++) {
586		if (vendor == ident->vendorid && devid == ident->deviceid)
587			return (ident);
588	}
589
590	return (NULL);
591}
592
593static int
594alc_match(device_t dev, cfdata_t match, void *aux)
595{
596	struct pci_attach_args *pa = aux;
597
598	return alc_find_ident(pa) != NULL;
599}
600
601static void
602alc_get_macaddr(struct alc_softc *sc)
603{
604
605	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
606		alc_get_macaddr_816x(sc);
607	else
608		alc_get_macaddr_813x(sc);
609}
610
611static void
612alc_get_macaddr_813x(struct alc_softc *sc)
613{
614	uint32_t opt;
615	uint16_t val;
616	int eeprom, i;
617
618	eeprom = 0;
619	opt = CSR_READ_4(sc, ALC_OPT_CFG);
620	if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
621	    (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
622		/*
623		 * EEPROM found, let TWSI reload EEPROM configuration.
624		 * This will set ethernet address of controller.
625		 */
626		eeprom++;
627		switch (sc->alc_ident->deviceid) {
628		case PCI_PRODUCT_ATTANSIC_AR8131:
629		case PCI_PRODUCT_ATTANSIC_AR8132:
630			if ((opt & OPT_CFG_CLK_ENB) == 0) {
631				opt |= OPT_CFG_CLK_ENB;
632				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
633				CSR_READ_4(sc, ALC_OPT_CFG);
634				DELAY(1000);
635			}
636			break;
637		case PCI_PRODUCT_ATTANSIC_AR8151:
638		case PCI_PRODUCT_ATTANSIC_AR8151_V2:
639		case PCI_PRODUCT_ATTANSIC_AR8152_B:
640		case PCI_PRODUCT_ATTANSIC_AR8152_B2:
641			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
642			    ALC_MII_DBG_ADDR, 0x00);
643			alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
644			    ALC_MII_DBG_DATA, &val);
645			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
646			    ALC_MII_DBG_DATA, val & 0xFF7F);
647			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
648			    ALC_MII_DBG_ADDR, 0x3B);
649			alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
650			    ALC_MII_DBG_DATA, &val);
651			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
652			    ALC_MII_DBG_DATA, val | 0x0008);
653			DELAY(20);
654			break;
655		}
656
657		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
658		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
659		CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
660		CSR_READ_4(sc, ALC_WOL_CFG);
661
662		CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
663		    TWSI_CFG_SW_LD_START);
664		for (i = 100; i > 0; i--) {
665			DELAY(1000);
666			if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
667			    TWSI_CFG_SW_LD_START) == 0)
668				break;
669		}
670		if (i == 0)
671			printf("%s: reloading EEPROM timeout!\n",
672			    device_xname(sc->sc_dev));
673	} else {
674		if (alcdebug)
675			printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev));
676	}
677	if (eeprom != 0) {
678		switch (sc->alc_ident->deviceid) {
679		case PCI_PRODUCT_ATTANSIC_AR8131:
680		case PCI_PRODUCT_ATTANSIC_AR8132:
681			if ((opt & OPT_CFG_CLK_ENB) != 0) {
682				opt &= ~OPT_CFG_CLK_ENB;
683				CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
684				CSR_READ_4(sc, ALC_OPT_CFG);
685				DELAY(1000);
686			}
687			break;
688		case PCI_PRODUCT_ATTANSIC_AR8151:
689		case PCI_PRODUCT_ATTANSIC_AR8151_V2:
690		case PCI_PRODUCT_ATTANSIC_AR8152_B:
691		case PCI_PRODUCT_ATTANSIC_AR8152_B2:
692			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
693			    ALC_MII_DBG_ADDR, 0x00);
694			alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
695			    ALC_MII_DBG_DATA, &val);
696			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
697			    ALC_MII_DBG_DATA, val | 0x0080);
698			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
699			    ALC_MII_DBG_ADDR, 0x3B);
700			alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
701			    ALC_MII_DBG_DATA, &val);
702			alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
703			    ALC_MII_DBG_DATA, val & 0xFFF7);
704			DELAY(20);
705			break;
706		}
707	}
708
709	alc_get_macaddr_par(sc);
710}
711
712static void
713alc_get_macaddr_816x(struct alc_softc *sc)
714{
715	uint32_t reg;
716	int i, reloaded;
717
718	reloaded = 0;
719	/* Try to reload station address via TWSI. */
720	for (i = 100; i > 0; i--) {
721		reg = CSR_READ_4(sc, ALC_SLD);
722		if ((reg & (SLD_PROGRESS | SLD_START)) == 0)
723			break;
724		DELAY(1000);
725	}
726	if (i != 0) {
727		CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START);
728		for (i = 100; i > 0; i--) {
729			DELAY(1000);
730			reg = CSR_READ_4(sc, ALC_SLD);
731			if ((reg & SLD_START) == 0)
732				break;
733		}
734		if (i != 0)
735			reloaded++;
736		else if (alcdebug)
737			printf("%s: reloading station address via TWSI timed out!\n",
738			    device_xname(sc->sc_dev));
739	}
740
741	/* Try to reload station address from EEPROM or FLASH. */
742	if (reloaded == 0) {
743		reg = CSR_READ_4(sc, ALC_EEPROM_LD);
744		if ((reg & (EEPROM_LD_EEPROM_EXIST |
745		    EEPROM_LD_FLASH_EXIST)) != 0) {
746			for (i = 100; i > 0; i--) {
747				reg = CSR_READ_4(sc, ALC_EEPROM_LD);
748				if ((reg & (EEPROM_LD_PROGRESS |
749				    EEPROM_LD_START)) == 0)
750					break;
751				DELAY(1000);
752			}
753			if (i != 0) {
754				CSR_WRITE_4(sc, ALC_EEPROM_LD, reg |
755				    EEPROM_LD_START);
756				for (i = 100; i > 0; i--) {
757					DELAY(1000);
758					reg = CSR_READ_4(sc, ALC_EEPROM_LD);
759					if ((reg & EEPROM_LD_START) == 0)
760						break;
761				}
762			} else if (alcdebug)
763				printf("%s: reloading EEPROM/FLASH timed out!\n",
764				  device_xname(sc->sc_dev));
765		}
766	}
767
768	alc_get_macaddr_par(sc);
769}
770
771static void
772alc_get_macaddr_par(struct alc_softc *sc)
773{
774	uint32_t ea[2];
775
776	ea[0] = CSR_READ_4(sc, ALC_PAR0);
777	ea[1] = CSR_READ_4(sc, ALC_PAR1);
778	sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
779	sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
780	sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
781	sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
782	sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
783	sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
784}
785
786static void
787alc_disable_l0s_l1(struct alc_softc *sc)
788{
789	uint32_t pmcfg;
790
791	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
792		/* Another magic from vendor. */
793		pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
794		pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
795		    PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
796		    PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1);
797		pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB |
798		    PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB;
799		CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
800	}
801}
802
803static void
804alc_phy_reset(struct alc_softc *sc)
805{
806
807	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
808		alc_phy_reset_816x(sc);
809	else
810		alc_phy_reset_813x(sc);
811}
812
813static void
814alc_phy_reset_813x(struct alc_softc *sc)
815{
816	uint16_t data;
817
818	/* Reset magic from Linux. */
819	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET);
820	CSR_READ_2(sc, ALC_GPHY_CFG);
821	DELAY(10 * 1000);
822
823	CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
824	    GPHY_CFG_SEL_ANA_RESET);
825	CSR_READ_2(sc, ALC_GPHY_CFG);
826	DELAY(10 * 1000);
827
828	/* DSP fixup, Vendor magic. */
829	if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
830		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
831		    ALC_MII_DBG_ADDR, 0x000A);
832		alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
833		    ALC_MII_DBG_DATA, &data);
834		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
835		    ALC_MII_DBG_DATA, data & 0xDFFF);
836	}
837	if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
838	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
839	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
840	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
841		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
842		    ALC_MII_DBG_ADDR, 0x003B);
843		alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
844		    ALC_MII_DBG_DATA, &data);
845		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
846		    ALC_MII_DBG_DATA, data & 0xFFF7);
847		DELAY(20 * 1000);
848	}
849	if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151) {
850		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
851		    ALC_MII_DBG_ADDR, 0x0029);
852		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
853		    ALC_MII_DBG_DATA, 0x929D);
854	}
855	if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
856	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132 ||
857	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
858	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
859		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
860		    ALC_MII_DBG_ADDR, 0x0029);
861		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
862		    ALC_MII_DBG_DATA, 0xB6DD);
863	}
864
865	/* Load DSP codes, vendor magic. */
866	data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
867	    ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
868	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
869	    ALC_MII_DBG_ADDR, MII_ANA_CFG18);
870	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
871	    ALC_MII_DBG_DATA, data);
872
873	data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
874	    ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
875	    ANA_SERDES_EN_LCKDT;
876	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
877	    ALC_MII_DBG_ADDR, MII_ANA_CFG5);
878	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
879	    ALC_MII_DBG_DATA, data);
880
881	data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
882	    ANA_LONG_CABLE_TH_100_MASK) |
883	    ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
884	    ANA_SHORT_CABLE_TH_100_SHIFT) |
885	    ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
886	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
887	    ALC_MII_DBG_ADDR, MII_ANA_CFG54);
888	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
889	    ALC_MII_DBG_DATA, data);
890
891	data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
892	    ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
893	    ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
894	    ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
895	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
896	    ALC_MII_DBG_ADDR, MII_ANA_CFG4);
897	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
898	    ALC_MII_DBG_DATA, data);
899
900	data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
901	    ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
902	    ANA_OEN_125M;
903	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
904	    ALC_MII_DBG_ADDR, MII_ANA_CFG0);
905	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
906	    ALC_MII_DBG_DATA, data);
907	DELAY(1000);
908
909	/* Disable hibernation. */
910	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
911	    0x0029);
912	alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
913	    ALC_MII_DBG_DATA, &data);
914	data &= ~0x8000;
915	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
916	    data);
917
918	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR,
919	    0x000B);
920	alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
921	    ALC_MII_DBG_DATA, &data);
922	data &= ~0x8000;
923	alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA,
924	    data);
925}
926
927static void
928alc_phy_reset_816x(struct alc_softc *sc)
929{
930	uint32_t val;
931	uint16_t phyval;
932
933	val = CSR_READ_4(sc, ALC_GPHY_CFG);
934	val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
935	    GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON |
936	    GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB);
937	val |= GPHY_CFG_SEL_ANA_RESET;
938#ifdef notyet
939	val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET;
940#else
941	/* Disable PHY hibernation. */
942	val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN);
943#endif
944	CSR_WRITE_4(sc, ALC_GPHY_CFG, val);
945	DELAY(10);
946	CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET);
947	DELAY(800);
948
949	/* Vendor PHY magic. */
950#ifdef notyet
951	alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT);
952	alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT);
953	alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS,
954	    EXT_VDRVBIAS_DEFAULT);
955#else
956	/* Disable PHY hibernation. */
957	alc_miidbg_writereg(sc, MII_DBG_LEGCYPS,
958	    DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB);
959	alc_miidbg_writereg(sc, MII_DBG_HIBNEG,
960	    DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE));
961	alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT);
962#endif
963
964	/* XXX Disable EEE. */
965	val = CSR_READ_4(sc, ALC_LPI_CTL);
966	val &= ~LPI_CTL_ENB;
967	CSR_WRITE_4(sc, ALC_LPI_CTL, val);
968	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0);
969
970	/* PHY power saving. */
971	alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT);
972	alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT);
973	alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT);
974	alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT);
975	alc_miidbg_readreg(sc, MII_DBG_GREENCFG2, &phyval);
976	phyval &= ~DBG_GREENCFG2_GATE_DFSE_EN;
977	alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, phyval);
978
979	/* RTL8139C, 120m issue. */
980	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78,
981	    ANEG_NLP78_120M_DEFAULT);
982	alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10,
983	    ANEG_S3DIG10_DEFAULT);
984
985	if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) {
986		/* Turn off half amplitude. */
987		alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, &phyval);
988		phyval |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT;
989		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, phyval);
990		/* Turn off Green feature. */
991		alc_miidbg_readreg(sc, MII_DBG_GREENCFG2, &phyval);
992		phyval |= DBG_GREENCFG2_BP_GREEN;
993		alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, phyval);
994		/* Turn off half bias. */
995		alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, &phyval);
996		val |= EXT_CLDCTL5_BP_VD_HLFBIAS;
997		alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, phyval);
998	}
999}
1000
1001static void
1002alc_phy_down(struct alc_softc *sc)
1003{
1004	uint32_t gphy;
1005
1006	switch (sc->alc_ident->deviceid) {
1007	case PCI_PRODUCT_ATTANSIC_AR8161:
1008	case PCI_PRODUCT_ATTANSIC_E2200:
1009	case PCI_PRODUCT_ATTANSIC_E2400:
1010	case PCI_PRODUCT_ATTANSIC_E2500:
1011	case PCI_PRODUCT_ATTANSIC_AR8162:
1012	case PCI_PRODUCT_ATTANSIC_AR8171:
1013	case PCI_PRODUCT_ATTANSIC_AR8172:
1014		gphy = CSR_READ_4(sc, ALC_GPHY_CFG);
1015		gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE |
1016		    GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON);
1017		gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
1018		    GPHY_CFG_SEL_ANA_RESET;
1019		gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW;
1020		CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy);
1021		break;
1022	case PCI_PRODUCT_ATTANSIC_AR8151:
1023	case PCI_PRODUCT_ATTANSIC_AR8151_V2:
1024	case PCI_PRODUCT_ATTANSIC_AR8152_B:
1025	case PCI_PRODUCT_ATTANSIC_AR8152_B2:
1026		/*
1027		 * GPHY power down caused more problems on AR8151 v2.0.
1028		 * When driver is reloaded after GPHY power down,
1029		 * accesses to PHY/MAC registers hung the system. Only
1030		 * cold boot recovered from it.  I'm not sure whether
1031		 * AR8151 v1.0 also requires this one though.  I don't
1032		 * have AR8151 v1.0 controller in hand.
1033		 * The only option left is to isolate the PHY and
1034		 * initiates power down the PHY which in turn saves
1035		 * more power when driver is unloaded.
1036		 */
1037		alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
1038		    MII_BMCR, BMCR_ISO | BMCR_PDOWN);
1039		break;
1040	default:
1041		/* Force PHY down. */
1042		CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET |
1043		    GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
1044		    GPHY_CFG_PWDOWN_HW);
1045		DELAY(1000);
1046		break;
1047	}
1048}
1049
1050static void
1051alc_aspm(struct alc_softc *sc, int init, int media)
1052{
1053
1054	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
1055		alc_aspm_816x(sc, init);
1056	else
1057		alc_aspm_813x(sc, media);
1058}
1059
1060static void
1061alc_aspm_813x(struct alc_softc *sc, int media)
1062{
1063	uint32_t pmcfg;
1064	uint16_t linkcfg;
1065
1066	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
1067	if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
1068	    (ALC_FLAG_APS | ALC_FLAG_PCIE))
1069		linkcfg = CSR_READ_2(sc, sc->alc_expcap +
1070		    PCIE_LCSR);
1071	else
1072		linkcfg = 0;
1073	pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
1074	pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
1075	pmcfg |= PM_CFG_MAC_ASPM_CHK;
1076	pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
1077	pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
1078
1079	if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
1080		/* Disable extended sync except AR8152 B v1.0 */
1081		linkcfg &= ~0x80;
1082		if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
1083		    sc->alc_rev == ATHEROS_AR8152_B_V10)
1084			linkcfg |= 0x80;
1085		CSR_WRITE_2(sc, sc->alc_expcap + PCIE_LCSR,
1086		    linkcfg);
1087		pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
1088		    PM_CFG_HOTRST);
1089		pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
1090		    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1091		pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1092		pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
1093		    PM_CFG_PM_REQ_TIMER_SHIFT);
1094		pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
1095	}
1096
1097	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1098		if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
1099			pmcfg |= PM_CFG_ASPM_L0S_ENB;
1100		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1101			pmcfg |= PM_CFG_ASPM_L1_ENB;
1102		if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
1103			if (sc->alc_ident->deviceid ==
1104			    PCI_PRODUCT_ATTANSIC_AR8152_B)
1105				pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
1106			pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
1107			    PM_CFG_SERDES_PLL_L1_ENB |
1108			    PM_CFG_SERDES_BUDS_RX_L1_ENB);
1109			pmcfg |= PM_CFG_CLK_SWH_L1;
1110			if (media == IFM_100_TX || media == IFM_1000_T) {
1111				pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
1112				switch (sc->alc_ident->deviceid) {
1113				case PCI_PRODUCT_ATTANSIC_AR8152_B:
1114					pmcfg |= (7 <<
1115					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1116					break;
1117				case PCI_PRODUCT_ATTANSIC_AR8152_B2:
1118				case PCI_PRODUCT_ATTANSIC_AR8151_V2:
1119					pmcfg |= (4 <<
1120					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1121					break;
1122				default:
1123					pmcfg |= (15 <<
1124					    PM_CFG_L1_ENTRY_TIMER_SHIFT);
1125					break;
1126				}
1127			}
1128		} else {
1129			pmcfg |= PM_CFG_SERDES_L1_ENB |
1130			    PM_CFG_SERDES_PLL_L1_ENB |
1131			    PM_CFG_SERDES_BUDS_RX_L1_ENB;
1132			pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
1133			    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
1134		}
1135	} else {
1136		pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
1137		    PM_CFG_SERDES_PLL_L1_ENB);
1138		pmcfg |= PM_CFG_CLK_SWH_L1;
1139		if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
1140			pmcfg |= PM_CFG_ASPM_L1_ENB;
1141	}
1142	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1143}
1144
1145static void
1146alc_aspm_816x(struct alc_softc *sc, int init)
1147{
1148	uint32_t pmcfg;
1149
1150	pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
1151	pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK;
1152	pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT;
1153	pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
1154	pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT;
1155	pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK;
1156	pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT;
1157	pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV;
1158	pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S |
1159	    PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB |
1160	    PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
1161	    PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB |
1162	    PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST);
1163	if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1164	    (sc->alc_rev & 0x01) != 0)
1165		pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB;
1166	if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
1167		/* Link up, enable both L0s, L1s. */
1168		pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1169		    PM_CFG_MAC_ASPM_CHK;
1170	} else {
1171		if (init != 0)
1172			pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB |
1173			    PM_CFG_MAC_ASPM_CHK;
1174		else if ((sc->sc_ec.ec_if.if_flags & IFF_RUNNING) != 0)
1175			pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK;
1176	}
1177	CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
1178}
1179
1180static void
1181alc_init_pcie(struct alc_softc *sc)
1182{
1183	const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
1184	uint32_t cap, ctl, val;
1185	int state;
1186
1187	/* Clear data link and flow-control protocol error. */
1188	val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
1189	val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
1190	CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
1191
1192	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1193		CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
1194		    CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
1195		CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
1196		    CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
1197		    PCIE_PHYMISC_FORCE_RCV_DET);
1198		if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
1199		    sc->alc_rev == ATHEROS_AR8152_B_V10) {
1200			val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
1201			val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
1202			    PCIE_PHYMISC2_SERDES_TH_MASK);
1203			val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
1204			val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
1205			CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
1206		}
1207		/* Disable ASPM L0S and L1. */
1208		cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1209		    sc->alc_expcap + PCIE_LCAP) >> 16;
1210		if ((cap & PCIE_LCAP_ASPM) != 0) {
1211			ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1212			    sc->alc_expcap + PCIE_LCSR) >> 16;
1213			if ((ctl & 0x08) != 0)
1214				sc->alc_rcb = DMA_CFG_RCB_128;
1215			if (alcdebug)
1216				printf("%s: RCB %u bytes\n",
1217				    device_xname(sc->sc_dev),
1218				    sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
1219			state = ctl & 0x03;
1220			if (state & 0x01)
1221				sc->alc_flags |= ALC_FLAG_L0S;
1222			if (state & 0x02)
1223				sc->alc_flags |= ALC_FLAG_L1S;
1224			if (alcdebug)
1225				printf("%s: ASPM %s %s\n",
1226				    device_xname(sc->sc_dev),
1227				    aspm_state[state],
1228				    state == 0 ? "disabled" : "enabled");
1229			alc_disable_l0s_l1(sc);
1230		} else {
1231			aprint_debug_dev(sc->sc_dev, "no ASPM support\n");
1232		}
1233	} else {
1234		val = CSR_READ_4(sc, ALC_PDLL_TRNS1);
1235		val &= ~PDLL_TRNS1_D3PLLOFF_ENB;
1236		CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val);
1237		val = CSR_READ_4(sc, ALC_MASTER_CFG);
1238		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
1239		    (sc->alc_rev & 0x01) != 0) {
1240			if ((val & MASTER_WAKEN_25M) == 0 ||
1241			    (val & MASTER_CLK_SEL_DIS) == 0) {
1242				val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS;
1243				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1244			}
1245		} else {
1246			if ((val & MASTER_WAKEN_25M) == 0 ||
1247			    (val & MASTER_CLK_SEL_DIS) != 0) {
1248				val |= MASTER_WAKEN_25M;
1249				val &= ~MASTER_CLK_SEL_DIS;
1250				CSR_WRITE_4(sc, ALC_MASTER_CFG, val);
1251			}
1252		}
1253	}
1254	alc_aspm(sc, 1, IFM_UNKNOWN);
1255}
1256
1257static void
1258alc_attach(device_t parent, device_t self, void *aux)
1259{
1260
1261	struct alc_softc *sc = device_private(self);
1262	struct pci_attach_args *pa = aux;
1263	pci_chipset_tag_t pc = pa->pa_pc;
1264	pci_intr_handle_t ih;
1265	const char *intrstr;
1266	struct ifnet *ifp;
1267	struct mii_data * const mii = &sc->sc_miibus;
1268	pcireg_t memtype;
1269	uint16_t burst;
1270	int base, mii_flags, error = 0;
1271	char intrbuf[PCI_INTRSTR_LEN];
1272
1273	sc->alc_ident = alc_find_ident(pa);
1274	sc->alc_rev = PCI_REVISION(pa->pa_class);
1275
1276	aprint_naive("\n");
1277	aprint_normal(": %s\n", sc->alc_ident->name);
1278
1279	sc->sc_dev = self;
1280	sc->sc_pct = pa->pa_pc;
1281	sc->sc_pcitag = pa->pa_tag;
1282
1283	if (pci_dma64_available(pa))
1284		sc->sc_dmat = pa->pa_dmat64;
1285	else
1286		sc->sc_dmat = pa->pa_dmat;
1287
1288	/*
1289	 * Allocate IO memory
1290	 */
1291	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
1292	switch (memtype) {
1293	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1294	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
1295	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1296		break;
1297	default:
1298		aprint_error_dev(self, "invalid base address register\n");
1299		break;
1300	}
1301
1302	if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
1303	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
1304		aprint_error_dev(self, "could not map mem space\n");
1305		return;
1306	}
1307
1308	if (pci_intr_map(pa, &ih) != 0) {
1309		printf(": can't map interrupt\n");
1310		goto fail;
1311	}
1312
1313	/*
1314	 * Allocate IRQ
1315	 */
1316	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
1317	sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, alc_intr,
1318	    sc, device_xname(self));
1319	if (sc->sc_irq_handle == NULL) {
1320		printf(": could not establish interrupt");
1321		if (intrstr != NULL)
1322			printf(" at %s", intrstr);
1323		printf("\n");
1324		goto fail;
1325	}
1326	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1327
1328	/* Set PHY address. */
1329	sc->alc_phyaddr = ALC_PHY_ADDR;
1330
1331	/* Initialize DMA parameters. */
1332	sc->alc_dma_rd_burst = 0;
1333	sc->alc_dma_wr_burst = 0;
1334	sc->alc_rcb = DMA_CFG_RCB_64;
1335	if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
1336	    &base, NULL)) {
1337		sc->alc_flags |= ALC_FLAG_PCIE;
1338		sc->alc_expcap = base;
1339		burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1340		    base + PCIE_DCSR) >> 16;
1341		sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
1342		sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
1343		if (alcdebug) {
1344			printf("%s: Read request size : %u bytes.\n",
1345			    device_xname(sc->sc_dev),
1346			    alc_dma_burst[sc->alc_dma_rd_burst]);
1347			printf("%s: TLP payload size : %u bytes.\n",
1348			    device_xname(sc->sc_dev),
1349			    alc_dma_burst[sc->alc_dma_wr_burst]);
1350		}
1351		if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024)
1352			sc->alc_dma_rd_burst = 3;
1353		if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024)
1354			sc->alc_dma_wr_burst = 3;
1355		/*
1356		 * Force maximum payload size to 128 bytes for
1357		 * E2200/E2400/E2500.
1358		 * Otherwise it triggers DMA write error.
1359		 */
1360		if ((sc->alc_flags & ALC_FLAG_E2X00) != 0)
1361			sc->alc_dma_wr_burst = 0;
1362		alc_init_pcie(sc);
1363	}
1364
1365	/* Reset PHY. */
1366	alc_phy_reset(sc);
1367
1368	/* Reset the ethernet controller. */
1369	alc_stop_mac(sc);
1370	alc_reset(sc);
1371
1372	/*
1373	 * One odd thing is AR8132 uses the same PHY hardware(F1
1374	 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
1375	 * the PHY supports 1000Mbps but that's not true. The PHY
1376	 * used in AR8132 can't establish gigabit link even if it
1377	 * shows the same PHY model/revision number of AR8131.
1378	 */
1379	switch (sc->alc_ident->deviceid) {
1380	case PCI_PRODUCT_ATTANSIC_E2200:
1381	case PCI_PRODUCT_ATTANSIC_E2400:
1382	case PCI_PRODUCT_ATTANSIC_E2500:
1383		sc->alc_flags |= ALC_FLAG_E2X00;
1384		/* FALLTHROUGH */
1385	case PCI_PRODUCT_ATTANSIC_AR8161:
1386		if (PCI_SUBSYS_ID(pci_conf_read(
1387		   sc->sc_pct, sc->sc_pcitag, PCI_SUBSYS_ID_REG)) == 0x0091 &&
1388		   sc->alc_rev == 0)
1389			sc->alc_flags |= ALC_FLAG_LINK_WAR;
1390		/* FALLTHROUGH */
1391	case PCI_PRODUCT_ATTANSIC_AR8171:
1392		sc->alc_flags |= ALC_FLAG_AR816X_FAMILY;
1393		break;
1394	case PCI_PRODUCT_ATTANSIC_AR8162:
1395	case PCI_PRODUCT_ATTANSIC_AR8172:
1396		sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY;
1397		break;
1398	case PCI_PRODUCT_ATTANSIC_AR8152_B:
1399	case PCI_PRODUCT_ATTANSIC_AR8152_B2:
1400		sc->alc_flags |= ALC_FLAG_APS;
1401		/* FALLTHROUGH */
1402	case PCI_PRODUCT_ATTANSIC_AR8132:
1403		sc->alc_flags |= ALC_FLAG_FASTETHER;
1404		break;
1405	case PCI_PRODUCT_ATTANSIC_AR8151:
1406	case PCI_PRODUCT_ATTANSIC_AR8151_V2:
1407		sc->alc_flags |= ALC_FLAG_APS;
1408		/* FALLTHROUGH */
1409	default:
1410		break;
1411	}
1412	sc->alc_flags |= ALC_FLAG_JUMBO;
1413
1414	/*
1415	 * It seems that AR813x/AR815x has silicon bug for SMB. In
1416	 * addition, Atheros said that enabling SMB wouldn't improve
1417	 * performance. However I think it's bad to access lots of
1418	 * registers to extract MAC statistics.
1419	 */
1420	sc->alc_flags |= ALC_FLAG_SMB_BUG;
1421	/*
1422	 * Don't use Tx CMB. It is known to have silicon bug.
1423	 */
1424	sc->alc_flags |= ALC_FLAG_CMB_BUG;
1425	sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
1426	    MASTER_CHIP_REV_SHIFT;
1427	if (alcdebug) {
1428		printf("%s: PCI device revision : 0x%04x\n",
1429		    device_xname(sc->sc_dev), sc->alc_rev);
1430		printf("%s: Chip id/revision : 0x%04x\n",
1431		    device_xname(sc->sc_dev), sc->alc_chip_rev);
1432		printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev),
1433		    CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
1434		    CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
1435	}
1436
1437	error = alc_dma_alloc(sc);
1438	if (error)
1439		goto fail;
1440
1441	callout_init(&sc->sc_tick_ch, 0);
1442	callout_setfunc(&sc->sc_tick_ch, alc_tick, sc);
1443
1444	/* Load station address. */
1445	alc_get_macaddr(sc);
1446
1447	aprint_normal_dev(self, "Ethernet address %s\n",
1448	    ether_sprintf(sc->alc_eaddr));
1449
1450	ifp = &sc->sc_ec.ec_if;
1451	ifp->if_softc = sc;
1452	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1453	ifp->if_init = alc_init;
1454	ifp->if_ioctl = alc_ioctl;
1455	ifp->if_start = alc_start;
1456	ifp->if_stop = alc_stop;
1457	ifp->if_watchdog = alc_watchdog;
1458	IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
1459	IFQ_SET_READY(&ifp->if_snd);
1460	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
1461
1462	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
1463
1464#ifdef ALC_CHECKSUM
1465	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1466				IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1467				IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1468#endif
1469
1470#if NVLAN > 0
1471	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1472	sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
1473#endif
1474
1475	/*
1476	 * XXX
1477	 * It seems enabling Tx checksum offloading makes more trouble.
1478	 * Sometimes the controller does not receive any frames when
1479	 * Tx checksum offloading is enabled. I'm not sure whether this
1480	 * is a bug in Tx checksum offloading logic or I got broken
1481	 * sample boards. To safety, don't enable Tx checksum offloading
1482	 * by default but give chance to users to toggle it if they know
1483	 * their controllers work without problems.
1484	 * Fortunately, Tx checksum offloading for AR816x family
1485	 * seems to work.
1486	 */
1487	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
1488		ifp->if_capenable &= ~IFCAP_CSUM_IPv4_Tx;
1489		ifp->if_capabilities &= ~ALC_CSUM_FEATURES;
1490	}
1491
1492	/* Set up MII bus. */
1493	mii->mii_ifp = ifp;
1494	mii->mii_readreg = alc_miibus_readreg;
1495	mii->mii_writereg = alc_miibus_writereg;
1496	mii->mii_statchg = alc_miibus_statchg;
1497
1498	sc->sc_ec.ec_mii = mii;
1499	ifmedia_init(&mii->mii_media, 0, alc_mediachange, alc_mediastatus);
1500	mii_flags = 0;
1501	if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0)
1502		mii_flags |= MIIF_DOPAUSE;
1503	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
1504		MII_OFFSET_ANY, mii_flags);
1505
1506	if (LIST_FIRST(&mii->mii_phys) == NULL) {
1507		printf("%s: no PHY found!\n", device_xname(sc->sc_dev));
1508		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL,
1509		    0, NULL);
1510		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
1511	} else
1512		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
1513
1514	if_attach(ifp);
1515	if_deferred_start_init(ifp, NULL);
1516	ether_ifattach(ifp, sc->alc_eaddr);
1517
1518	if (!pmf_device_register(self, NULL, NULL))
1519		aprint_error_dev(self, "couldn't establish power handler\n");
1520	else
1521		pmf_class_network_register(self, ifp);
1522
1523	return;
1524fail:
1525	alc_dma_free(sc);
1526	if (sc->sc_irq_handle != NULL) {
1527		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
1528		sc->sc_irq_handle = NULL;
1529	}
1530	if (sc->sc_mem_size) {
1531		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
1532		sc->sc_mem_size = 0;
1533	}
1534}
1535
1536static int
1537alc_detach(device_t self, int flags)
1538{
1539	struct alc_softc *sc = device_private(self);
1540	struct ifnet *ifp = &sc->sc_ec.ec_if;
1541	int s;
1542
1543	s = splnet();
1544	alc_stop(ifp, 0);
1545	splx(s);
1546
1547	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
1548
1549	ether_ifdetach(ifp);
1550	if_detach(ifp);
1551	alc_dma_free(sc);
1552
1553	/* Delete all remaining media. */
1554	ifmedia_fini(&sc->sc_miibus.mii_media);
1555
1556	alc_phy_down(sc);
1557	if (sc->sc_irq_handle != NULL) {
1558		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
1559		sc->sc_irq_handle = NULL;
1560	}
1561	if (sc->sc_mem_size) {
1562		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
1563		sc->sc_mem_size = 0;
1564	}
1565
1566	return (0);
1567}
1568
1569static int
1570alc_dma_alloc(struct alc_softc *sc)
1571{
1572	struct alc_txdesc *txd;
1573	struct alc_rxdesc *rxd;
1574	int nsegs, error, i;
1575
1576	/*
1577	 * Create DMA stuffs for TX ring
1578	 */
1579	error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
1580	    ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
1581	if (error) {
1582		sc->alc_cdata.alc_tx_ring_map = NULL;
1583		return (ENOBUFS);
1584	}
1585
1586	/* Allocate DMA'able memory for TX ring */
1587	error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
1588	    PAGE_SIZE, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
1589	    &nsegs, BUS_DMA_NOWAIT);
1590	if (error) {
1591		printf("%s: could not allocate DMA'able memory for Tx ring.\n",
1592		    device_xname(sc->sc_dev));
1593		return error;
1594	}
1595
1596	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
1597	    nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring,
1598	    BUS_DMA_NOWAIT);
1599	if (error)
1600		return (ENOBUFS);
1601
1602	/* Load the DMA map for Tx ring. */
1603	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
1604	    sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
1605	if (error) {
1606		printf("%s: could not load DMA'able memory for Tx ring.\n",
1607		    device_xname(sc->sc_dev));
1608		bus_dmamem_free(sc->sc_dmat,
1609		    &sc->alc_rdata.alc_tx_ring_seg, 1);
1610		return error;
1611	}
1612
1613	sc->alc_rdata.alc_tx_ring_paddr =
1614	    sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
1615
1616	/*
1617	 * Create DMA stuffs for RX ring
1618	 */
1619	error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
1620	    ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
1621	if (error)
1622		return (ENOBUFS);
1623
1624	/* Allocate DMA'able memory for RX ring */
1625	error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1626	    PAGE_SIZE, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1627	    &nsegs, BUS_DMA_NOWAIT);
1628	if (error) {
1629		printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1630		    device_xname(sc->sc_dev));
1631		return error;
1632	}
1633
1634	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1635	    nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring,
1636	    BUS_DMA_NOWAIT);
1637	if (error)
1638		return (ENOBUFS);
1639
1640	/* Load the DMA map for Rx ring. */
1641	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1642	    sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1643	if (error) {
1644		printf("%s: could not load DMA'able memory for Rx ring.\n",
1645		    device_xname(sc->sc_dev));
1646		bus_dmamem_free(sc->sc_dmat,
1647		    &sc->alc_rdata.alc_rx_ring_seg, 1);
1648		return error;
1649	}
1650
1651	sc->alc_rdata.alc_rx_ring_paddr =
1652	    sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1653
1654	/*
1655	 * Create DMA stuffs for RX return ring
1656	 */
1657	error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1658	    ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1659	if (error)
1660		return (ENOBUFS);
1661
1662	/* Allocate DMA'able memory for RX return ring */
1663	error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1664	    PAGE_SIZE, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1665	    &nsegs, BUS_DMA_NOWAIT);
1666	if (error) {
1667		printf("%s: could not allocate DMA'able memory for Rx "
1668		    "return ring.\n", device_xname(sc->sc_dev));
1669		return error;
1670	}
1671
1672	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1673	    nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring,
1674	    BUS_DMA_NOWAIT);
1675	if (error)
1676		return (ENOBUFS);
1677
1678	/*  Load the DMA map for Rx return ring. */
1679	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1680	    sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1681	if (error) {
1682		printf("%s: could not load DMA'able memory for Rx return ring."
1683		    "\n", device_xname(sc->sc_dev));
1684		bus_dmamem_free(sc->sc_dmat,
1685		    &sc->alc_rdata.alc_rr_ring_seg, 1);
1686		return error;
1687	}
1688
1689	sc->alc_rdata.alc_rr_ring_paddr =
1690	    sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1691
1692	/*
1693	 * All of the memory we allocated for the Rx ring / Rx Return
1694	 * ring need to be in the same 4GB segment.  Make sure this is
1695	 * so.
1696	 *
1697	 * XXX We don't care WHAT 4GB segment they're in, just that
1698	 * XXX they're all in the same one.  Need some bus_dma API
1699	 * XXX help to make this easier to enforce when we actually
1700	 * XXX perform the allocation.
1701	 */
1702	if (ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr) !=
1703	    ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) {
1704		aprint_error_dev(sc->sc_dev,
1705		    "Rx control data allocation constraints failed\n");
1706		return ENOBUFS;
1707	}
1708
1709	/*
1710	 * Create DMA stuffs for CMB block
1711	 */
1712	error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1713	    ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1714	    &sc->alc_cdata.alc_cmb_map);
1715	if (error)
1716		return (ENOBUFS);
1717
1718	/* Allocate DMA'able memory for CMB block */
1719	error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1720	    PAGE_SIZE, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1721	    &nsegs, BUS_DMA_NOWAIT);
1722	if (error) {
1723		printf("%s: could not allocate DMA'able memory for "
1724		    "CMB block\n", device_xname(sc->sc_dev));
1725		return error;
1726	}
1727
1728	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1729	    nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb,
1730	    BUS_DMA_NOWAIT);
1731	if (error)
1732		return (ENOBUFS);
1733
1734	/*  Load the DMA map for CMB block. */
1735	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1736	    sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1737	    BUS_DMA_WAITOK);
1738	if (error) {
1739		printf("%s: could not load DMA'able memory for CMB block\n",
1740		    device_xname(sc->sc_dev));
1741		bus_dmamem_free(sc->sc_dmat,
1742		    &sc->alc_rdata.alc_cmb_seg, 1);
1743		return error;
1744	}
1745
1746	sc->alc_rdata.alc_cmb_paddr =
1747	    sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1748
1749	/*
1750	 * Create DMA stuffs for SMB block
1751	 */
1752	error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1753	    ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1754	    &sc->alc_cdata.alc_smb_map);
1755	if (error)
1756		return (ENOBUFS);
1757
1758	/* Allocate DMA'able memory for SMB block */
1759	error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1760	    PAGE_SIZE, 0, &sc->alc_rdata.alc_smb_seg, 1,
1761	    &nsegs, BUS_DMA_NOWAIT);
1762	if (error) {
1763		printf("%s: could not allocate DMA'able memory for "
1764		    "SMB block\n", device_xname(sc->sc_dev));
1765		return error;
1766	}
1767
1768	error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1769	    nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb,
1770	    BUS_DMA_NOWAIT);
1771	if (error)
1772		return (ENOBUFS);
1773
1774	/*  Load the DMA map for SMB block */
1775	error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1776	    sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1777	    BUS_DMA_WAITOK);
1778	if (error) {
1779		printf("%s: could not load DMA'able memory for SMB block\n",
1780		    device_xname(sc->sc_dev));
1781		bus_dmamem_free(sc->sc_dmat,
1782		    &sc->alc_rdata.alc_smb_seg, 1);
1783		return error;
1784	}
1785
1786	sc->alc_rdata.alc_smb_paddr =
1787	    sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1788
1789
1790	/* Create DMA maps for Tx buffers. */
1791	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1792		txd = &sc->alc_cdata.alc_txdesc[i];
1793		txd->tx_m = NULL;
1794		txd->tx_dmamap = NULL;
1795		error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1796		    ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1797		    &txd->tx_dmamap);
1798		if (error) {
1799			printf("%s: could not create Tx dmamap.\n",
1800			    device_xname(sc->sc_dev));
1801			return error;
1802		}
1803	}
1804
1805	/* Create DMA maps for Rx buffers. */
1806	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1807	    BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1808	if (error) {
1809		printf("%s: could not create spare Rx dmamap.\n",
1810		    device_xname(sc->sc_dev));
1811		return error;
1812	}
1813
1814	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1815		rxd = &sc->alc_cdata.alc_rxdesc[i];
1816		rxd->rx_m = NULL;
1817		rxd->rx_dmamap = NULL;
1818		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1819		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1820		if (error) {
1821			printf("%s: could not create Rx dmamap.\n",
1822			    device_xname(sc->sc_dev));
1823			return error;
1824		}
1825	}
1826
1827	return (0);
1828}
1829
1830static void
1831alc_dma_free(struct alc_softc *sc)
1832{
1833	struct alc_txdesc *txd;
1834	struct alc_rxdesc *rxd;
1835	int i;
1836
1837	/* Tx buffers */
1838	for (i = 0; i < ALC_TX_RING_CNT; i++) {
1839		txd = &sc->alc_cdata.alc_txdesc[i];
1840		if (txd->tx_dmamap != NULL) {
1841			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1842			txd->tx_dmamap = NULL;
1843		}
1844	}
1845	/* Rx buffers */
1846	for (i = 0; i < ALC_RX_RING_CNT; i++) {
1847		rxd = &sc->alc_cdata.alc_rxdesc[i];
1848		if (rxd->rx_dmamap != NULL) {
1849			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1850			rxd->rx_dmamap = NULL;
1851		}
1852	}
1853	if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1854		bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1855		sc->alc_cdata.alc_rx_sparemap = NULL;
1856	}
1857
1858	/* Tx ring. */
1859	if (sc->alc_cdata.alc_tx_ring_map != NULL)
1860		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1861	if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1862	    sc->alc_rdata.alc_tx_ring != NULL)
1863		bus_dmamem_free(sc->sc_dmat,
1864		    &sc->alc_rdata.alc_tx_ring_seg, 1);
1865	sc->alc_rdata.alc_tx_ring = NULL;
1866	sc->alc_cdata.alc_tx_ring_map = NULL;
1867
1868	/* Rx ring. */
1869	if (sc->alc_cdata.alc_rx_ring_map != NULL)
1870		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1871	if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1872	    sc->alc_rdata.alc_rx_ring != NULL)
1873		bus_dmamem_free(sc->sc_dmat,
1874		    &sc->alc_rdata.alc_rx_ring_seg, 1);
1875	sc->alc_rdata.alc_rx_ring = NULL;
1876	sc->alc_cdata.alc_rx_ring_map = NULL;
1877
1878	/* Rx return ring. */
1879	if (sc->alc_cdata.alc_rr_ring_map != NULL)
1880		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1881	if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1882	    sc->alc_rdata.alc_rr_ring != NULL)
1883		bus_dmamem_free(sc->sc_dmat,
1884		    &sc->alc_rdata.alc_rr_ring_seg, 1);
1885	sc->alc_rdata.alc_rr_ring = NULL;
1886	sc->alc_cdata.alc_rr_ring_map = NULL;
1887
1888	/* CMB block */
1889	if (sc->alc_cdata.alc_cmb_map != NULL)
1890		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1891	if (sc->alc_cdata.alc_cmb_map != NULL &&
1892	    sc->alc_rdata.alc_cmb != NULL)
1893		bus_dmamem_free(sc->sc_dmat,
1894		    &sc->alc_rdata.alc_cmb_seg, 1);
1895	sc->alc_rdata.alc_cmb = NULL;
1896	sc->alc_cdata.alc_cmb_map = NULL;
1897
1898	/* SMB block */
1899	if (sc->alc_cdata.alc_smb_map != NULL)
1900		bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1901	if (sc->alc_cdata.alc_smb_map != NULL &&
1902	    sc->alc_rdata.alc_smb != NULL)
1903		bus_dmamem_free(sc->sc_dmat,
1904		    &sc->alc_rdata.alc_smb_seg, 1);
1905	sc->alc_rdata.alc_smb = NULL;
1906	sc->alc_cdata.alc_smb_map = NULL;
1907}
1908
1909static int
1910alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1911{
1912	struct alc_txdesc *txd, *txd_last;
1913	struct tx_desc *desc;
1914	struct mbuf *m;
1915	bus_dmamap_t map;
1916	uint32_t cflags, poff, vtag;
1917	int error, idx, nsegs, prod;
1918
1919	m = *m_head;
1920	cflags = vtag = 0;
1921	poff = 0;
1922
1923	prod = sc->alc_cdata.alc_tx_prod;
1924	txd = &sc->alc_cdata.alc_txdesc[prod];
1925	txd_last = txd;
1926	map = txd->tx_dmamap;
1927
1928	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1929
1930	if (error == EFBIG) {
1931		error = 0;
1932
1933		*m_head = m_pullup(*m_head, MHLEN);
1934		if (*m_head == NULL) {
1935			printf("%s: can't defrag TX mbuf\n",
1936			    device_xname(sc->sc_dev));
1937			return ENOBUFS;
1938		}
1939
1940		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1941		    BUS_DMA_NOWAIT);
1942
1943		if (error != 0) {
1944			printf("%s: could not load defragged TX mbuf\n",
1945			    device_xname(sc->sc_dev));
1946			m_freem(*m_head);
1947			*m_head = NULL;
1948			return error;
1949		}
1950	} else if (error) {
1951		printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1952		return (error);
1953	}
1954
1955	nsegs = map->dm_nsegs;
1956
1957	if (nsegs == 0) {
1958		m_freem(*m_head);
1959		*m_head = NULL;
1960		return (EIO);
1961	}
1962
1963	/* Check descriptor overrun. */
1964	if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1965		bus_dmamap_unload(sc->sc_dmat, map);
1966		return (ENOBUFS);
1967	}
1968	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1969	    BUS_DMASYNC_PREWRITE);
1970
1971	m = *m_head;
1972	desc = NULL;
1973	idx = 0;
1974#if NVLAN > 0
1975	/* Configure VLAN hardware tag insertion. */
1976	if (vlan_has_tag(m)) {
1977		vtag = htons(vlan_get_tag(m));
1978		vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1979		cflags |= TD_INS_VLAN_TAG;
1980	}
1981#endif
1982	/* Configure Tx checksum offload. */
1983	if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1984		cflags |= TD_CUSTOM_CSUM;
1985		/* Set checksum start offset. */
1986		cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1987		    TD_PLOAD_OFFSET_MASK;
1988	}
1989	for (; idx < nsegs; idx++) {
1990		desc = &sc->alc_rdata.alc_tx_ring[prod];
1991		desc->len =
1992		    htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1993		desc->flags = htole32(cflags);
1994		desc->addr = htole64(map->dm_segs[idx].ds_addr);
1995		sc->alc_cdata.alc_tx_cnt++;
1996		ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1997	}
1998	/* Update producer index. */
1999	sc->alc_cdata.alc_tx_prod = prod;
2000
2001	/* Finally set EOP on the last descriptor. */
2002	prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
2003	desc = &sc->alc_rdata.alc_tx_ring[prod];
2004	desc->flags |= htole32(TD_EOP);
2005
2006	/* Swap dmamap of the first and the last. */
2007	txd = &sc->alc_cdata.alc_txdesc[prod];
2008	map = txd_last->tx_dmamap;
2009	txd_last->tx_dmamap = txd->tx_dmamap;
2010	txd->tx_dmamap = map;
2011	txd->tx_m = m;
2012
2013	return (0);
2014}
2015
2016static void
2017alc_start(struct ifnet *ifp)
2018{
2019	struct alc_softc *sc = ifp->if_softc;
2020	struct mbuf *m_head;
2021	int enq;
2022
2023	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2024		return;
2025	if ((sc->alc_flags & ALC_FLAG_LINK) == 0)
2026		return;
2027
2028	/* Reclaim transmitted frames. */
2029	if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
2030		alc_txeof(sc);
2031
2032	enq = 0;
2033	for (;;) {
2034		IFQ_DEQUEUE(&ifp->if_snd, m_head);
2035		if (m_head == NULL)
2036			break;
2037
2038		/*
2039		 * Pack the data into the transmit ring. If we
2040		 * don't have room, set the OACTIVE flag and wait
2041		 * for the NIC to drain the ring.
2042		 */
2043		if (alc_encap(sc, &m_head)) {
2044			if (m_head == NULL)
2045				break;
2046			ifp->if_flags |= IFF_OACTIVE;
2047			break;
2048		}
2049		enq = 1;
2050
2051		/*
2052		 * If there's a BPF listener, bounce a copy of this frame
2053		 * to him.
2054		 */
2055		bpf_mtap(ifp, m_head, BPF_D_OUT);
2056	}
2057
2058	if (enq) {
2059		/* Sync descriptors. */
2060		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2061		    sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
2062		    BUS_DMASYNC_PREWRITE);
2063		/* Kick. Assume we're using normal Tx priority queue. */
2064		CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
2065		    (sc->alc_cdata.alc_tx_prod <<
2066		    MBOX_TD_PROD_LO_IDX_SHIFT) &
2067		    MBOX_TD_PROD_LO_IDX_MASK);
2068		/* Set a timeout in case the chip goes out to lunch. */
2069		ifp->if_timer = ALC_TX_TIMEOUT;
2070	}
2071}
2072
2073static void
2074alc_watchdog(struct ifnet *ifp)
2075{
2076	struct alc_softc *sc = ifp->if_softc;
2077
2078	if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
2079		printf("%s: watchdog timeout (missed link)\n",
2080		    device_xname(sc->sc_dev));
2081		if_statinc(ifp, if_oerrors);
2082		alc_init_backend(ifp, false);
2083		return;
2084	}
2085
2086	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
2087	if_statinc(ifp, if_oerrors);
2088	alc_init_backend(ifp, false);
2089	alc_start(ifp);
2090}
2091
2092static int
2093alc_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2094{
2095	struct alc_softc *sc = ifp->if_softc;
2096	int s, error = 0;
2097
2098	s = splnet();
2099
2100	switch (cmd) {
2101	case SIOCSIFADDR:
2102		error = ether_ioctl(ifp, cmd, data);
2103		ifp->if_flags |= IFF_UP;
2104		if (!(ifp->if_flags & IFF_RUNNING))
2105			alc_init(ifp);
2106		break;
2107
2108	case SIOCSIFFLAGS:
2109		error = ether_ioctl(ifp, cmd, data);
2110		if (ifp->if_flags & IFF_UP) {
2111			if (ifp->if_flags & IFF_RUNNING)
2112				error = ENETRESET;
2113			else
2114				alc_init(ifp);
2115		} else {
2116			if (ifp->if_flags & IFF_RUNNING)
2117				alc_stop(ifp, 0);
2118		}
2119		break;
2120
2121	default:
2122		error = ether_ioctl(ifp, cmd, data);
2123		break;
2124	}
2125
2126	if (error == ENETRESET) {
2127		if (ifp->if_flags & IFF_RUNNING)
2128			alc_iff(sc);
2129		error = 0;
2130	}
2131
2132	splx(s);
2133	return (error);
2134}
2135
2136static void
2137alc_mac_config(struct alc_softc *sc)
2138{
2139	struct mii_data *mii;
2140	uint32_t reg;
2141
2142	mii = &sc->sc_miibus;
2143	reg = CSR_READ_4(sc, ALC_MAC_CFG);
2144	reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
2145	    MAC_CFG_SPEED_MASK);
2146	if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
2147	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
2148	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2149		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2150	/* Reprogram MAC with resolved speed/duplex. */
2151	switch (IFM_SUBTYPE(mii->mii_media_active)) {
2152	case IFM_10_T:
2153	case IFM_100_TX:
2154		reg |= MAC_CFG_SPEED_10_100;
2155		break;
2156	case IFM_1000_T:
2157		reg |= MAC_CFG_SPEED_1000;
2158		break;
2159	}
2160	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2161		reg |= MAC_CFG_FULL_DUPLEX;
2162		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2163			reg |= MAC_CFG_TX_FC;
2164		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2165			reg |= MAC_CFG_RX_FC;
2166	}
2167	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2168}
2169
2170static void
2171alc_stats_clear(struct alc_softc *sc)
2172{
2173	struct smb sb, *smb;
2174	uint32_t *reg;
2175	int i;
2176
2177	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2178		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2179		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2180		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2181		smb = sc->alc_rdata.alc_smb;
2182		/* Update done, clear. */
2183		smb->updated = 0;
2184		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2185		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2186		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2187	} else {
2188		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2189		    reg++) {
2190			CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2191			i += sizeof(uint32_t);
2192		}
2193		/* Read Tx statistics. */
2194		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2195		    reg++) {
2196			CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2197			i += sizeof(uint32_t);
2198		}
2199	}
2200}
2201
2202static void
2203alc_stats_update(struct alc_softc *sc)
2204{
2205	struct ifnet *ifp = &sc->sc_ec.ec_if;
2206	struct alc_hw_stats *stat;
2207	struct smb sb, *smb;
2208	uint32_t *reg;
2209	int i;
2210
2211	stat = &sc->alc_stats;
2212	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2213		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2214		    sc->alc_cdata.alc_smb_map->dm_mapsize,
2215		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2216		smb = sc->alc_rdata.alc_smb;
2217		if (smb->updated == 0)
2218			return;
2219	} else {
2220		smb = &sb;
2221		/* Read Rx statistics. */
2222		for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
2223		    reg++) {
2224			*reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
2225			i += sizeof(uint32_t);
2226		}
2227		/* Read Tx statistics. */
2228		for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
2229		    reg++) {
2230			*reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
2231			i += sizeof(uint32_t);
2232		}
2233	}
2234
2235	/* Rx stats. */
2236	stat->rx_frames += smb->rx_frames;
2237	stat->rx_bcast_frames += smb->rx_bcast_frames;
2238	stat->rx_mcast_frames += smb->rx_mcast_frames;
2239	stat->rx_pause_frames += smb->rx_pause_frames;
2240	stat->rx_control_frames += smb->rx_control_frames;
2241	stat->rx_crcerrs += smb->rx_crcerrs;
2242	stat->rx_lenerrs += smb->rx_lenerrs;
2243	stat->rx_bytes += smb->rx_bytes;
2244	stat->rx_runts += smb->rx_runts;
2245	stat->rx_fragments += smb->rx_fragments;
2246	stat->rx_pkts_64 += smb->rx_pkts_64;
2247	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2248	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2249	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2250	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2251	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2252	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2253	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2254	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2255	stat->rx_rrs_errs += smb->rx_rrs_errs;
2256	stat->rx_alignerrs += smb->rx_alignerrs;
2257	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2258	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2259	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2260
2261	/* Tx stats. */
2262	stat->tx_frames += smb->tx_frames;
2263	stat->tx_bcast_frames += smb->tx_bcast_frames;
2264	stat->tx_mcast_frames += smb->tx_mcast_frames;
2265	stat->tx_pause_frames += smb->tx_pause_frames;
2266	stat->tx_excess_defer += smb->tx_excess_defer;
2267	stat->tx_control_frames += smb->tx_control_frames;
2268	stat->tx_deferred += smb->tx_deferred;
2269	stat->tx_bytes += smb->tx_bytes;
2270	stat->tx_pkts_64 += smb->tx_pkts_64;
2271	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2272	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2273	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2274	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2275	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2276	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2277	stat->tx_single_colls += smb->tx_single_colls;
2278	stat->tx_multi_colls += smb->tx_multi_colls;
2279	stat->tx_late_colls += smb->tx_late_colls;
2280	stat->tx_excess_colls += smb->tx_excess_colls;
2281	stat->tx_underrun += smb->tx_underrun;
2282	stat->tx_desc_underrun += smb->tx_desc_underrun;
2283	stat->tx_lenerrs += smb->tx_lenerrs;
2284	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2285	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2286	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2287
2288	/* Update counters in ifnet. */
2289	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2290
2291	if_statadd_ref(nsr, if_opackets, smb->tx_frames);
2292
2293	if_statadd_ref(nsr, if_collisions,
2294	    smb->tx_single_colls +
2295	    smb->tx_multi_colls * 2 + smb->tx_late_colls +
2296	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
2297
2298	if_statadd_ref(nsr, if_oerrors,
2299	    smb->tx_late_colls + smb->tx_excess_colls +
2300	    smb->tx_underrun + smb->tx_pkts_truncated);
2301
2302	if_statadd_ref(nsr, if_ierrors,
2303	    smb->rx_crcerrs + smb->rx_lenerrs +
2304	    smb->rx_runts + smb->rx_pkts_truncated +
2305	    smb->rx_fifo_oflows + smb->rx_rrs_errs +
2306	    smb->rx_alignerrs);
2307
2308	IF_STAT_PUTREF(ifp);
2309
2310	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
2311		/* Update done, clear. */
2312		smb->updated = 0;
2313		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2314		sc->alc_cdata.alc_smb_map->dm_mapsize,
2315		BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2316	}
2317}
2318
2319static int
2320alc_intr(void *arg)
2321{
2322	struct alc_softc *sc = arg;
2323	struct ifnet *ifp = &sc->sc_ec.ec_if;
2324	uint32_t status;
2325
2326	status = CSR_READ_4(sc, ALC_INTR_STATUS);
2327	if ((status & ALC_INTRS) == 0)
2328		return (0);
2329
2330	/* Acknowledge and disable interrupts. */
2331	CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
2332
2333	if (ifp->if_flags & IFF_RUNNING) {
2334		if (status & INTR_RX_PKT) {
2335			int error;
2336
2337			error = alc_rxintr(sc);
2338			if (error) {
2339				alc_init_backend(ifp, false);
2340				return (0);
2341			}
2342		}
2343
2344		if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
2345		    INTR_TXQ_TO_RST)) {
2346			if (status & INTR_DMA_RD_TO_RST)
2347				printf("%s: DMA read error! -- resetting\n",
2348				    device_xname(sc->sc_dev));
2349			if (status & INTR_DMA_WR_TO_RST)
2350				printf("%s: DMA write error! -- resetting\n",
2351				    device_xname(sc->sc_dev));
2352			if (status & INTR_TXQ_TO_RST)
2353				printf("%s: TxQ reset! -- resetting\n",
2354				    device_xname(sc->sc_dev));
2355			alc_init_backend(ifp, false);
2356			return (0);
2357		}
2358
2359		alc_txeof(sc);
2360		if_schedule_deferred_start(ifp);
2361	}
2362
2363	/* Re-enable interrupts. */
2364	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
2365	return (1);
2366}
2367
2368static void
2369alc_txeof(struct alc_softc *sc)
2370{
2371	struct ifnet *ifp = &sc->sc_ec.ec_if;
2372	struct alc_txdesc *txd;
2373	uint32_t cons, prod;
2374	int prog;
2375
2376	if (sc->alc_cdata.alc_tx_cnt == 0)
2377		return;
2378	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2379	    sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
2380	    BUS_DMASYNC_POSTREAD);
2381	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2382		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2383		    sc->alc_cdata.alc_cmb_map->dm_mapsize,
2384		    BUS_DMASYNC_POSTREAD);
2385		prod = sc->alc_rdata.alc_cmb->cons;
2386	} else
2387		prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
2388	/* Assume we're using normal Tx priority queue. */
2389	prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
2390	    MBOX_TD_CONS_LO_IDX_SHIFT;
2391	cons = sc->alc_cdata.alc_tx_cons;
2392	/*
2393	 * Go through our Tx list and free mbufs for those
2394	 * frames which have been transmitted.
2395	 */
2396	for (prog = 0; cons != prod; prog++,
2397	    ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
2398		if (sc->alc_cdata.alc_tx_cnt <= 0)
2399			break;
2400		prog++;
2401		ifp->if_flags &= ~IFF_OACTIVE;
2402		sc->alc_cdata.alc_tx_cnt--;
2403		txd = &sc->alc_cdata.alc_txdesc[cons];
2404		if (txd->tx_m != NULL) {
2405			/* Reclaim transmitted mbufs. */
2406			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
2407			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2408			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2409			m_freem(txd->tx_m);
2410			txd->tx_m = NULL;
2411		}
2412	}
2413
2414	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2415	    bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2416		sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2417	sc->alc_cdata.alc_tx_cons = cons;
2418	/*
2419	 * Unarm watchdog timer only when there is no pending
2420	 * frames in Tx queue.
2421	 */
2422	if (sc->alc_cdata.alc_tx_cnt == 0)
2423		ifp->if_timer = 0;
2424}
2425
2426static int
2427alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, bool init)
2428{
2429	struct mbuf *m;
2430	bus_dmamap_t map;
2431	int error;
2432
2433	MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2434	if (m == NULL)
2435		return (ENOBUFS);
2436	MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2437	if (!(m->m_flags & M_EXT)) {
2438		m_freem(m);
2439		return (ENOBUFS);
2440	}
2441
2442	m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
2443
2444	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2445	    sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
2446
2447	if (error != 0) {
2448		m_freem(m);
2449
2450		if (init)
2451			printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2452
2453		return (error);
2454	}
2455
2456	if (rxd->rx_m != NULL) {
2457		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2458		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2459		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2460	}
2461	map = rxd->rx_dmamap;
2462	rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
2463	sc->alc_cdata.alc_rx_sparemap = map;
2464	bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0, rxd->rx_dmamap->dm_mapsize,
2465	    BUS_DMASYNC_PREREAD);
2466	rxd->rx_m = m;
2467	rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2468	return (0);
2469}
2470
2471static int
2472alc_rxintr(struct alc_softc *sc)
2473{
2474	struct ifnet *ifp = &sc->sc_ec.ec_if;
2475	struct rx_rdesc *rrd;
2476	uint32_t nsegs, status;
2477	int rr_cons, prog;
2478
2479	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2480	    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2481	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2482	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2483	    sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2484	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2485	rr_cons = sc->alc_cdata.alc_rr_cons;
2486	for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
2487		rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
2488		status = le32toh(rrd->status);
2489		if ((status & RRD_VALID) == 0)
2490			break;
2491		nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
2492		if (nsegs == 0) {
2493			/* This should not happen! */
2494			if (alcdebug)
2495				printf("%s: unexpected segment count -- "
2496				    "resetting\n", device_xname(sc->sc_dev));
2497			return (EIO);
2498		}
2499		alc_rxeof(sc, rrd);
2500		/* Clear Rx return status. */
2501		rrd->status = 0;
2502		ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
2503		sc->alc_cdata.alc_rx_cons += nsegs;
2504		sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
2505		prog += nsegs;
2506	}
2507
2508	if (prog > 0) {
2509		/* Update the consumer index. */
2510		sc->alc_cdata.alc_rr_cons = rr_cons;
2511		/* Sync Rx return descriptors. */
2512		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2513		    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
2514		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2515		/*
2516		 * Sync updated Rx descriptors such that controller see
2517		 * modified buffer addresses.
2518		 */
2519		bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2520		    sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
2521		    BUS_DMASYNC_PREWRITE);
2522		/*
2523		 * Let controller know availability of new Rx buffers.
2524		 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
2525		 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
2526		 * only when Rx buffer pre-fetching is required. In
2527		 * addition we already set ALC_RX_RD_FREE_THRESH to
2528		 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
2529		 * it still seems that pre-fetching needs more
2530		 * experimentation.
2531		 */
2532		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
2533		    sc->alc_cdata.alc_rx_cons);
2534	}
2535
2536	return (0);
2537}
2538
2539/* Receive a frame. */
2540static void
2541alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
2542{
2543	struct ifnet *ifp = &sc->sc_ec.ec_if;
2544	struct alc_rxdesc *rxd;
2545	struct mbuf *mp, *m;
2546	uint32_t rdinfo, status;
2547	int count, nsegs, rx_cons;
2548
2549	status = le32toh(rrd->status);
2550	rdinfo = le32toh(rrd->rdinfo);
2551	rx_cons = RRD_RD_IDX(rdinfo);
2552	nsegs = RRD_RD_CNT(rdinfo);
2553
2554	sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
2555	if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
2556		/*
2557		 * We want to pass the following frames to upper
2558		 * layer regardless of error status of Rx return
2559		 * ring.
2560		 *
2561		 *  o IP/TCP/UDP checksum is bad.
2562		 *  o frame length and protocol specific length
2563		 *     does not match.
2564		 *
2565		 *  Force network stack compute checksum for
2566		 *  errored frames.
2567		 */
2568		status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
2569		if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
2570		    RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
2571			return;
2572	}
2573
2574	for (count = 0; count < nsegs; count++,
2575	    ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
2576		rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
2577		mp = rxd->rx_m;
2578		/* Add a new receive buffer to the ring. */
2579		if (alc_newbuf(sc, rxd, false) != 0) {
2580			if_statinc(ifp, if_iqdrops);
2581			/* Reuse Rx buffers. */
2582			if (sc->alc_cdata.alc_rxhead != NULL)
2583				m_freem(sc->alc_cdata.alc_rxhead);
2584			break;
2585		}
2586
2587		/*
2588		 * Assume we've received a full sized frame.
2589		 * Actual size is fixed when we encounter the end of
2590		 * multi-segmented frame.
2591		 */
2592		mp->m_len = sc->alc_buf_size;
2593
2594		/* Chain received mbufs. */
2595		if (sc->alc_cdata.alc_rxhead == NULL) {
2596			sc->alc_cdata.alc_rxhead = mp;
2597			sc->alc_cdata.alc_rxtail = mp;
2598		} else {
2599			m_remove_pkthdr(mp);
2600			sc->alc_cdata.alc_rxprev_tail =
2601			    sc->alc_cdata.alc_rxtail;
2602			sc->alc_cdata.alc_rxtail->m_next = mp;
2603			sc->alc_cdata.alc_rxtail = mp;
2604		}
2605
2606		if (count == nsegs - 1) {
2607			/* Last desc. for this frame. */
2608			m = sc->alc_cdata.alc_rxhead;
2609			KASSERT(m->m_flags & M_PKTHDR);
2610			/*
2611			 * It seems that L1C/L2C controller has no way
2612			 * to tell hardware to strip CRC bytes.
2613			 */
2614			m->m_pkthdr.len =
2615			    sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
2616			if (nsegs > 1) {
2617				/* Set last mbuf size. */
2618				mp->m_len = sc->alc_cdata.alc_rxlen -
2619				    (nsegs - 1) * sc->alc_buf_size;
2620				/* Remove the CRC bytes in chained mbufs. */
2621				if (mp->m_len <= ETHER_CRC_LEN) {
2622					sc->alc_cdata.alc_rxtail =
2623					    sc->alc_cdata.alc_rxprev_tail;
2624					sc->alc_cdata.alc_rxtail->m_len -=
2625					    (ETHER_CRC_LEN - mp->m_len);
2626					sc->alc_cdata.alc_rxtail->m_next = NULL;
2627					m_freem(mp);
2628				} else {
2629					mp->m_len -= ETHER_CRC_LEN;
2630				}
2631			} else
2632				m->m_len = m->m_pkthdr.len;
2633			m_set_rcvif(m, ifp);
2634#if NVLAN > 0
2635			/*
2636			 * Due to hardware bugs, Rx checksum offloading
2637			 * was intentionally disabled.
2638			 */
2639			if (status & RRD_VLAN_TAG) {
2640				uint32_t vtag = RRD_VLAN(le32toh(rrd->vtag));
2641				vlan_set_tag(m, ntohs(vtag));
2642			}
2643#endif
2644
2645			/* Pass it on. */
2646			if_percpuq_enqueue(ifp->if_percpuq, m);
2647		}
2648	}
2649	/* Reset mbuf chains. */
2650	ALC_RXCHAIN_RESET(sc);
2651}
2652
2653static void
2654alc_tick(void *xsc)
2655{
2656	struct alc_softc *sc = xsc;
2657	struct mii_data *mii = &sc->sc_miibus;
2658	int s;
2659
2660	s = splnet();
2661	mii_tick(mii);
2662	alc_stats_update(sc);
2663	splx(s);
2664
2665	callout_schedule(&sc->sc_tick_ch, hz);
2666}
2667
2668static void
2669alc_osc_reset(struct alc_softc *sc)
2670{
2671	uint32_t reg;
2672
2673	reg = CSR_READ_4(sc, ALC_MISC3);
2674	reg &= ~MISC3_25M_BY_SW;
2675	reg |= MISC3_25M_NOTO_INTNL;
2676	CSR_WRITE_4(sc, ALC_MISC3, reg);
2677
2678	reg = CSR_READ_4(sc, ALC_MISC);
2679	if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) {
2680		/*
2681		 * Restore over-current protection default value.
2682		 * This value could be reset by MAC reset.
2683		 */
2684		reg &= ~MISC_PSW_OCP_MASK;
2685		reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT);
2686		reg &= ~MISC_INTNLOSC_OPEN;
2687		CSR_WRITE_4(sc, ALC_MISC, reg);
2688		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2689		reg = CSR_READ_4(sc, ALC_MISC2);
2690		reg &= ~MISC2_CALB_START;
2691		CSR_WRITE_4(sc, ALC_MISC2, reg);
2692		CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START);
2693
2694	} else {
2695		reg &= ~MISC_INTNLOSC_OPEN;
2696		/* Disable isolate for revision A devices. */
2697		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2698			reg &= ~MISC_ISO_ENB;
2699		CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN);
2700		CSR_WRITE_4(sc, ALC_MISC, reg);
2701	}
2702
2703	DELAY(20);
2704}
2705
2706static void
2707alc_reset(struct alc_softc *sc)
2708{
2709	uint32_t pmcfg, reg;
2710	int i;
2711
2712	pmcfg = 0;
2713	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2714		/* Reset workaround. */
2715		CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1);
2716		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2717		    (sc->alc_rev & 0x01) != 0) {
2718			/* Disable L0s/L1s before reset. */
2719			pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
2720			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
2721			    != 0) {
2722				pmcfg &= ~(PM_CFG_ASPM_L0S_ENB |
2723				    PM_CFG_ASPM_L1_ENB);
2724				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2725			}
2726		}
2727	}
2728	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2729	reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2730	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2731
2732	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2733		for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2734			DELAY(10);
2735			if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0)
2736				break;
2737		}
2738		if (i == 0)
2739			printf("%s: MAC reset timeout!\n", device_xname(sc->sc_dev));
2740	}
2741	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2742		DELAY(10);
2743		if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2744			break;
2745	}
2746	if (i == 0)
2747		printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
2748
2749	for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2750		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2751		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC |
2752		    IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2753			break;
2754		DELAY(10);
2755	}
2756	if (i == 0)
2757		printf("%s: reset timeout(0x%08x)!\n",
2758		    device_xname(sc->sc_dev), reg);
2759
2760	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2761		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 &&
2762		    (sc->alc_rev & 0x01) != 0) {
2763			reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2764			reg |= MASTER_CLK_SEL_DIS;
2765			CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2766			/* Restore L0s/L1s config. */
2767			if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB))
2768			    != 0)
2769				CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
2770		}
2771
2772		alc_osc_reset(sc);
2773		reg = CSR_READ_4(sc, ALC_MISC3);
2774		reg &= ~MISC3_25M_BY_SW;
2775		reg |= MISC3_25M_NOTO_INTNL;
2776		CSR_WRITE_4(sc, ALC_MISC3, reg);
2777		reg = CSR_READ_4(sc, ALC_MISC);
2778		reg &= ~MISC_INTNLOSC_OPEN;
2779		if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1)
2780			reg &= ~MISC_ISO_ENB;
2781		CSR_WRITE_4(sc, ALC_MISC, reg);
2782		DELAY(20);
2783	}
2784	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
2785	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2786	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2)
2787		CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2788		    CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
2789		    SERDES_PHY_CLK_SLOWDOWN);
2790}
2791
2792static int
2793alc_init(struct ifnet *ifp)
2794{
2795
2796	return alc_init_backend(ifp, true);
2797}
2798
2799static int
2800alc_init_backend(struct ifnet *ifp, bool init)
2801{
2802	struct alc_softc *sc = ifp->if_softc;
2803	struct mii_data *mii;
2804	uint8_t eaddr[ETHER_ADDR_LEN];
2805	bus_addr_t paddr;
2806	uint32_t reg, rxf_hi, rxf_lo;
2807	int error;
2808
2809	/*
2810	 * Cancel any pending I/O.
2811	 */
2812	alc_stop(ifp, 0);
2813	/*
2814	 * Reset the chip to a known state.
2815	 */
2816	alc_reset(sc);
2817
2818	/* Initialize Rx descriptors. */
2819	error = alc_init_rx_ring(sc, init);
2820	if (error != 0) {
2821		printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
2822		alc_stop(ifp, 0);
2823		return (error);
2824	}
2825	alc_init_rr_ring(sc);
2826	alc_init_tx_ring(sc);
2827	alc_init_cmb(sc);
2828	alc_init_smb(sc);
2829
2830	/* Enable all clocks. */
2831	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2832		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB |
2833		    CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB |
2834		    CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB |
2835		    CLK_GATING_RXMAC_ENB);
2836		if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0)
2837			CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER,
2838			    IDLE_DECISN_TIMER_DEFAULT_1MS);
2839	} else
2840		CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2841
2842	/* Reprogram the station address. */
2843	memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
2844	CSR_WRITE_4(sc, ALC_PAR0, (uint32_t)eaddr[2] << 24
2845	    | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2846	CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2847	/*
2848	 * Clear WOL status and disable all WOL feature as WOL
2849	 * would interfere Rx operation under normal environments.
2850	 */
2851	CSR_READ_4(sc, ALC_WOL_CFG);
2852	CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2853	/* Set Tx descriptor base addresses. */
2854	paddr = sc->alc_rdata.alc_tx_ring_paddr;
2855	CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2856	CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2857	/* We don't use high priority ring. */
2858	CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2859	/* Set Tx descriptor counter. */
2860	CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2861	    (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2862	/* Set Rx descriptor base addresses. */
2863	paddr = sc->alc_rdata.alc_rx_ring_paddr;
2864	CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2865	CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2866	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2867		/* We use one Rx ring. */
2868		CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2869		CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2870		CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2871	}
2872	/* Set Rx descriptor counter. */
2873	CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2874	    (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2875
2876	/*
2877	 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2878	 * if it do not fit the buffer size. Rx return descriptor holds
2879	 * a counter that indicates how many fragments were made by the
2880	 * hardware. The buffer size should be multiple of 8 bytes.
2881	 * Since hardware has limit on the size of buffer size, always
2882	 * use the maximum value.
2883	 * For strict-alignment architectures make sure to reduce buffer
2884	 * size by 8 bytes to make room for alignment fixup.
2885	 */
2886	sc->alc_buf_size = RX_BUF_SIZE_MAX;
2887	CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2888
2889	paddr = sc->alc_rdata.alc_rr_ring_paddr;
2890	/* Set Rx return descriptor base addresses. */
2891	CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2892	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2893		/* We use one Rx return ring. */
2894		CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2895		CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2896		CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2897	}
2898	/* Set Rx return descriptor counter. */
2899	CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2900	    (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2901	paddr = sc->alc_rdata.alc_cmb_paddr;
2902	CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2903	paddr = sc->alc_rdata.alc_smb_paddr;
2904	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2905	CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2906
2907	if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
2908		/* Reconfigure SRAM - Vendor magic. */
2909		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2910		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2911		CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2912		CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2913		CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2914		CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2915		CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2916		CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2917	}
2918
2919	/* Tell hardware that we're ready to load DMA blocks. */
2920	CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2921
2922	/* Configure interrupt moderation timer. */
2923	sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2924	sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2925	reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2926	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0)
2927		reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2928	CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2929	/*
2930	 * We don't want to automatic interrupt clear as task queue
2931	 * for the interrupt should know interrupt status.
2932	 */
2933	reg = CSR_READ_4(sc, ALC_MASTER_CFG);
2934	reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
2935	reg |= MASTER_SA_TIMER_ENB;
2936	if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2937		reg |= MASTER_IM_RX_TIMER_ENB;
2938	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 &&
2939	    ALC_USECS(sc->alc_int_tx_mod) != 0)
2940		reg |= MASTER_IM_TX_TIMER_ENB;
2941	CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2942	/*
2943	 * Disable interrupt re-trigger timer. We don't want automatic
2944	 * re-triggering of un-ACKed interrupts.
2945	 */
2946	CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2947	/* Configure CMB. */
2948	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
2949		CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3);
2950		CSR_WRITE_4(sc, ALC_CMB_TX_TIMER,
2951		    ALC_USECS(sc->alc_int_tx_mod));
2952	} else {
2953		if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
2954			CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2955			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2956		} else
2957			CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2958	}
2959	/*
2960	 * Hardware can be configured to issue SMB interrupt based
2961	 * on programmed interval. Since there is a callout that is
2962	 * invoked for every hz in driver we use that instead of
2963	 * relying on periodic SMB interrupt.
2964	 */
2965	CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2966	/* Clear MAC statistics. */
2967	alc_stats_clear(sc);
2968
2969	/*
2970	 * Always use maximum frame size that controller can support.
2971	 * Otherwise received frames that has larger frame length
2972	 * than alc(4) MTU would be silently dropped in hardware. This
2973	 * would make path-MTU discovery hard as sender wouldn't get
2974	 * any responses from receiver. alc(4) supports
2975	 * multi-fragmented frames on Rx path so it has no issue on
2976	 * assembling fragmented frames. Using maximum frame size also
2977	 * removes the need to reinitialize hardware when interface
2978	 * MTU configuration was changed.
2979	 *
2980	 * Be conservative in what you do, be liberal in what you
2981	 * accept from others - RFC 793.
2982	 */
2983	CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
2984
2985	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
2986		/* Disable header split(?) */
2987		CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2988
2989		/* Configure IPG/IFG parameters. */
2990		CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2991		    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) &
2992		    IPG_IFG_IPGT_MASK) |
2993		    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) &
2994		    IPG_IFG_MIFG_MASK) |
2995		    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) &
2996		    IPG_IFG_IPG1_MASK) |
2997		    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) &
2998		    IPG_IFG_IPG2_MASK));
2999		/* Set parameters for half-duplex media. */
3000		CSR_WRITE_4(sc, ALC_HDPX_CFG,
3001		    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
3002		    HDPX_CFG_LCOL_MASK) |
3003		    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
3004		    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
3005		    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
3006		    HDPX_CFG_ABEBT_MASK) |
3007		    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
3008		    HDPX_CFG_JAMIPG_MASK));
3009	}
3010
3011	/*
3012	 * Set TSO/checksum offload threshold. For frames that is
3013	 * larger than this threshold, hardware wouldn't do
3014	 * TSO/checksum offloading.
3015	 */
3016	reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
3017	    TSO_OFFLOAD_THRESH_MASK;
3018	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0)
3019		reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB;
3020	CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg);
3021	/* Configure TxQ. */
3022	reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
3023	    TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
3024	if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
3025	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
3026		reg >>= 1;
3027	reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
3028	    TXQ_CFG_TD_BURST_MASK;
3029	reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB;
3030	CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
3031	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3032		reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT |
3033		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT |
3034		    TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT |
3035		    HQTD_CFG_BURST_ENB);
3036		CSR_WRITE_4(sc, ALC_HQTD_CFG, reg);
3037		reg = WRR_PRI_RESTRICT_NONE;
3038		reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT |
3039		    WRR_PRI_DEFAULT << WRR_PRI1_SHIFT |
3040		    WRR_PRI_DEFAULT << WRR_PRI2_SHIFT |
3041		    WRR_PRI_DEFAULT << WRR_PRI3_SHIFT);
3042		CSR_WRITE_4(sc, ALC_WRR, reg);
3043	} else {
3044		/* Configure Rx free descriptor pre-fetching. */
3045		CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
3046		    ((RX_RD_FREE_THRESH_HI_DEFAULT <<
3047		    RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) |
3048		    ((RX_RD_FREE_THRESH_LO_DEFAULT <<
3049		    RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK));
3050	}
3051
3052	/*
3053	 * Configure flow control parameters.
3054	 * XON  : 80% of Rx FIFO
3055	 * XOFF : 30% of Rx FIFO
3056	 */
3057	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3058		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3059		reg &= SRAM_RX_FIFO_LEN_MASK;
3060		reg *= 8;
3061		if (reg > 8 * 1024)
3062			reg -= RX_FIFO_PAUSE_816X_RSVD;
3063		else
3064			reg -= RX_BUF_SIZE_MAX;
3065		reg /= 8;
3066		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3067		    ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3068		    RX_FIFO_PAUSE_THRESH_LO_MASK) |
3069		    (((RX_FIFO_PAUSE_816X_RSVD / 8) <<
3070		    RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3071		    RX_FIFO_PAUSE_THRESH_HI_MASK));
3072	} else if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
3073	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132) {
3074		reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
3075		rxf_hi = (reg * 8) / 10;
3076		rxf_lo = (reg * 3) / 10;
3077		CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
3078		    ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
3079		     RX_FIFO_PAUSE_THRESH_LO_MASK) |
3080		    ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
3081		     RX_FIFO_PAUSE_THRESH_HI_MASK));
3082	}
3083
3084	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3085		/* Disable RSS until I understand L1C/L2C's RSS logic. */
3086		CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
3087		CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
3088	}
3089
3090	/* Configure RxQ. */
3091	reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
3092	    RXQ_CFG_RD_BURST_MASK;
3093	reg |= RXQ_CFG_RSS_MODE_DIS;
3094	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3095		reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT <<
3096		    RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) &
3097		    RXQ_CFG_816X_IDT_TBL_SIZE_MASK;
3098		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
3099			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
3100	} else {
3101		if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 &&
3102		    sc->alc_ident->deviceid != PCI_PRODUCT_ATTANSIC_AR8151_V2)
3103			reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
3104	}
3105	CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3106
3107	/* Configure DMA parameters. */
3108	reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
3109	reg |= sc->alc_rcb;
3110	if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
3111		reg |= DMA_CFG_CMB_ENB;
3112	if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
3113		reg |= DMA_CFG_SMB_ENB;
3114	else
3115		reg |= DMA_CFG_SMB_DIS;
3116	reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
3117	    DMA_CFG_RD_BURST_SHIFT;
3118	reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
3119	    DMA_CFG_WR_BURST_SHIFT;
3120	reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
3121	    DMA_CFG_RD_DELAY_CNT_MASK;
3122	reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
3123	    DMA_CFG_WR_DELAY_CNT_MASK;
3124	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) {
3125		switch (AR816X_REV(sc->alc_rev)) {
3126		case AR816X_REV_A0:
3127		case AR816X_REV_A1:
3128			reg |= DMA_CFG_RD_CHNL_SEL_2;
3129			break;
3130		case AR816X_REV_B0:
3131			/* FALLTHROUGH */
3132		default:
3133			reg |= DMA_CFG_RD_CHNL_SEL_4;
3134			break;
3135		}
3136	}
3137	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3138
3139	/*
3140	 * Configure Tx/Rx MACs.
3141	 *  - Auto-padding for short frames.
3142	 *  - Enable CRC generation.
3143	 *  Actual reconfiguration of MAC for resolved speed/duplex
3144	 *  is followed after detection of link establishment.
3145	 *  AR813x/AR815x always does checksum computation regardless
3146	 *  of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
3147	 *  have bug in protocol field in Rx return structure so
3148	 *  these controllers can't handle fragmented frames. Disable
3149	 *  Rx checksum offloading until there is a newer controller
3150	 *  that has sane implementation.
3151	 */
3152	reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
3153	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
3154	    MAC_CFG_PREAMBLE_MASK);
3155	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 ||
3156	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
3157	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
3158	    sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
3159		reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
3160	if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
3161		reg |= MAC_CFG_SPEED_10_100;
3162	else
3163		reg |= MAC_CFG_SPEED_1000;
3164	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3165
3166	/* Set up the receive filter. */
3167	alc_iff(sc);
3168	alc_rxvlan(sc);
3169
3170	/* Acknowledge all pending interrupts and clear it. */
3171	CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
3172	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3173	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
3174
3175	sc->alc_flags &= ~ALC_FLAG_LINK;
3176	/* Switch to the current media. */
3177	mii = &sc->sc_miibus;
3178	mii_mediachg(mii);
3179
3180	callout_schedule(&sc->sc_tick_ch, hz);
3181
3182	ifp->if_flags |= IFF_RUNNING;
3183	ifp->if_flags &= ~IFF_OACTIVE;
3184
3185	return (0);
3186}
3187
3188static void
3189alc_stop(struct ifnet *ifp, int disable)
3190{
3191	struct alc_softc *sc = ifp->if_softc;
3192	struct alc_txdesc *txd;
3193	struct alc_rxdesc *rxd;
3194	uint32_t reg;
3195	int i;
3196
3197	callout_stop(&sc->sc_tick_ch);
3198
3199	/*
3200	 * Mark the interface down and cancel the watchdog timer.
3201	 */
3202	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3203	ifp->if_timer = 0;
3204
3205	sc->alc_flags &= ~ALC_FLAG_LINK;
3206
3207	alc_stats_update(sc);
3208
3209	mii_down(&sc->sc_miibus);
3210
3211	/* Disable interrupts. */
3212	CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
3213	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3214
3215	/* Disable DMA. */
3216	reg = CSR_READ_4(sc, ALC_DMA_CFG);
3217	reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
3218	reg |= DMA_CFG_SMB_DIS;
3219	CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
3220	DELAY(1000);
3221
3222	/* Stop Rx/Tx MACs. */
3223	alc_stop_mac(sc);
3224
3225	/* Disable interrupts which might be touched in taskq handler. */
3226	CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
3227
3228	/* Disable L0s/L1s */
3229	alc_aspm(sc, 0, IFM_UNKNOWN);
3230
3231	/* Reclaim Rx buffers that have been processed. */
3232	if (sc->alc_cdata.alc_rxhead != NULL)
3233		m_freem(sc->alc_cdata.alc_rxhead);
3234	ALC_RXCHAIN_RESET(sc);
3235	/*
3236	 * Free Tx/Rx mbufs still in the queues.
3237	 */
3238	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3239		rxd = &sc->alc_cdata.alc_rxdesc[i];
3240		if (rxd->rx_m != NULL) {
3241			bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
3242			    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3243			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
3244			m_freem(rxd->rx_m);
3245			rxd->rx_m = NULL;
3246		}
3247	}
3248	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3249		txd = &sc->alc_cdata.alc_txdesc[i];
3250		if (txd->tx_m != NULL) {
3251			bus_dmamap_sync(sc->sc_dmat, txd->tx_dmamap, 0,
3252			    txd->tx_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3253			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
3254			m_freem(txd->tx_m);
3255			txd->tx_m = NULL;
3256		}
3257	}
3258}
3259
3260static void
3261alc_stop_mac(struct alc_softc *sc)
3262{
3263	uint32_t reg;
3264	int i;
3265
3266	alc_stop_queue(sc);
3267	/* Disable Rx/Tx MAC. */
3268	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3269	if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
3270		reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
3271		CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3272	}
3273	for (i = ALC_TIMEOUT; i > 0; i--) {
3274		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3275		if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0)
3276			break;
3277		DELAY(10);
3278	}
3279	if (i == 0)
3280		printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
3281		    device_xname(sc->sc_dev), reg);
3282}
3283
3284static void
3285alc_start_queue(struct alc_softc *sc)
3286{
3287	uint32_t qcfg[] = {
3288		0,
3289		RXQ_CFG_QUEUE0_ENB,
3290		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
3291		RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
3292		RXQ_CFG_ENB
3293	};
3294	uint32_t cfg;
3295
3296	/* Enable RxQ. */
3297	cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
3298	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3299		cfg &= ~RXQ_CFG_ENB;
3300		cfg |= qcfg[1];
3301	} else
3302		cfg |= RXQ_CFG_QUEUE0_ENB;
3303	CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
3304	/* Enable TxQ. */
3305	cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
3306	cfg |= TXQ_CFG_ENB;
3307	CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
3308}
3309
3310static void
3311alc_stop_queue(struct alc_softc *sc)
3312{
3313	uint32_t reg;
3314	int i;
3315
3316	/* Disable RxQ. */
3317	reg = CSR_READ_4(sc, ALC_RXQ_CFG);
3318	if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) {
3319		if ((reg & RXQ_CFG_ENB) != 0) {
3320			reg &= ~RXQ_CFG_ENB;
3321			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3322		}
3323	} else {
3324		if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) {
3325			reg &= ~RXQ_CFG_QUEUE0_ENB;
3326			CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
3327		}
3328	}
3329	/* Disable TxQ. */
3330	reg = CSR_READ_4(sc, ALC_TXQ_CFG);
3331	if ((reg & TXQ_CFG_ENB) != 0) {
3332		reg &= ~TXQ_CFG_ENB;
3333		CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
3334	}
3335	DELAY(40);
3336	for (i = ALC_TIMEOUT; i > 0; i--) {
3337		reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
3338		if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
3339			break;
3340		DELAY(10);
3341	}
3342	if (i == 0)
3343		printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
3344		    device_xname(sc->sc_dev), reg);
3345}
3346
3347static void
3348alc_init_tx_ring(struct alc_softc *sc)
3349{
3350	struct alc_ring_data *rd;
3351	struct alc_txdesc *txd;
3352	int i;
3353
3354	sc->alc_cdata.alc_tx_prod = 0;
3355	sc->alc_cdata.alc_tx_cons = 0;
3356	sc->alc_cdata.alc_tx_cnt = 0;
3357
3358	rd = &sc->alc_rdata;
3359	memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ);
3360	for (i = 0; i < ALC_TX_RING_CNT; i++) {
3361		txd = &sc->alc_cdata.alc_txdesc[i];
3362		txd->tx_m = NULL;
3363	}
3364
3365	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
3366	    sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3367}
3368
3369static int
3370alc_init_rx_ring(struct alc_softc *sc, bool init)
3371{
3372	struct alc_ring_data *rd;
3373	struct alc_rxdesc *rxd;
3374	int i;
3375
3376	sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
3377	rd = &sc->alc_rdata;
3378	memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ);
3379	for (i = 0; i < ALC_RX_RING_CNT; i++) {
3380		rxd = &sc->alc_cdata.alc_rxdesc[i];
3381		rxd->rx_m = NULL;
3382		rxd->rx_desc = &rd->alc_rx_ring[i];
3383		if (alc_newbuf(sc, rxd, init) != 0)
3384			return (ENOBUFS);
3385	}
3386
3387	/*
3388	 * Since controller does not update Rx descriptors, driver
3389	 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
3390	 * is enough to ensure coherence.
3391	 */
3392	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
3393	    sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3394	/* Let controller know availability of new Rx buffers. */
3395	CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
3396
3397	return (0);
3398}
3399
3400static void
3401alc_init_rr_ring(struct alc_softc *sc)
3402{
3403	struct alc_ring_data *rd;
3404
3405	sc->alc_cdata.alc_rr_cons = 0;
3406	ALC_RXCHAIN_RESET(sc);
3407
3408	rd = &sc->alc_rdata;
3409	memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ);
3410	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
3411	    sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
3412	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3413}
3414
3415static void
3416alc_init_cmb(struct alc_softc *sc)
3417{
3418	struct alc_ring_data *rd;
3419
3420	rd = &sc->alc_rdata;
3421	memset(rd->alc_cmb, 0, ALC_CMB_SZ);
3422	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
3423	    sc->alc_cdata.alc_cmb_map->dm_mapsize,
3424	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3425}
3426
3427static void
3428alc_init_smb(struct alc_softc *sc)
3429{
3430	struct alc_ring_data *rd;
3431
3432	rd = &sc->alc_rdata;
3433	memset(rd->alc_smb, 0, ALC_SMB_SZ);
3434	bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
3435	    sc->alc_cdata.alc_smb_map->dm_mapsize,
3436	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3437}
3438
3439static void
3440alc_rxvlan(struct alc_softc *sc)
3441{
3442	uint32_t reg;
3443
3444	reg = CSR_READ_4(sc, ALC_MAC_CFG);
3445	if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
3446		reg |= MAC_CFG_VLAN_TAG_STRIP;
3447	else
3448		reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3449	CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
3450}
3451
3452static void
3453alc_iff(struct alc_softc *sc)
3454{
3455	struct ethercom *ec = &sc->sc_ec;
3456	struct ifnet *ifp = &ec->ec_if;
3457	struct ether_multi *enm;
3458	struct ether_multistep step;
3459	uint32_t crc;
3460	uint32_t mchash[2];
3461	uint32_t rxcfg;
3462
3463	rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
3464	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3465	ifp->if_flags &= ~IFF_ALLMULTI;
3466
3467	/*
3468	 * Always accept broadcast frames.
3469	 */
3470	rxcfg |= MAC_CFG_BCAST;
3471
3472	/* Program new filter. */
3473	if ((ifp->if_flags & IFF_PROMISC) != 0)
3474		goto update;
3475
3476	memset(mchash, 0, sizeof(mchash));
3477
3478	ETHER_LOCK(ec);
3479	ETHER_FIRST_MULTI(step, ec, enm);
3480	while (enm != NULL) {
3481		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3482			/* XXX Use ETHER_F_ALLMULTI in future. */
3483			ifp->if_flags |= IFF_ALLMULTI;
3484			ETHER_UNLOCK(ec);
3485			goto update;
3486		}
3487		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
3488		mchash[crc >> 31] |= 1U << ((crc >> 26) & 0x1f);
3489		ETHER_NEXT_MULTI(step, enm);
3490	}
3491	ETHER_UNLOCK(ec);
3492
3493update:
3494	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3495		if (ifp->if_flags & IFF_PROMISC) {
3496			rxcfg |= MAC_CFG_PROMISC;
3497			/* XXX Use ETHER_F_ALLMULTI in future. */
3498			ifp->if_flags |= IFF_ALLMULTI;
3499		} else
3500			rxcfg |= MAC_CFG_ALLMULTI;
3501		mchash[0] = mchash[1] = 0xFFFFFFFF;
3502	}
3503	CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
3504	CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
3505	CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
3506}
3507
3508MODULE(MODULE_CLASS_DRIVER, if_alc, "pci");
3509
3510#ifdef _MODULE
3511#include "ioconf.c"
3512#endif
3513
3514static int
3515if_alc_modcmd(modcmd_t cmd, void *opaque)
3516{
3517	int error = 0;
3518
3519	switch (cmd) {
3520	case MODULE_CMD_INIT:
3521#ifdef _MODULE
3522		error = config_init_component(cfdriver_ioconf_if_alc,
3523		    cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
3524#endif
3525		return error;
3526	case MODULE_CMD_FINI:
3527#ifdef _MODULE
3528		error = config_fini_component(cfdriver_ioconf_if_alc,
3529		    cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
3530#endif
3531		return error;
3532	default:
3533		return ENOTTY;
3534	}
3535}
3536