1/*	$NetBSD: if_age.c,v 1.73 2022/09/17 13:51:09 thorpej Exp $ */
2/*	$OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $	*/
3
4/*-
5 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31/* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32
33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.73 2022/09/17 13:51:09 thorpej Exp $");
35
36#include "vlan.h"
37
38#include <sys/param.h>
39#include <sys/proc.h>
40#include <sys/endian.h>
41#include <sys/systm.h>
42#include <sys/types.h>
43#include <sys/sockio.h>
44#include <sys/mbuf.h>
45#include <sys/queue.h>
46#include <sys/kernel.h>
47#include <sys/device.h>
48#include <sys/callout.h>
49#include <sys/socket.h>
50
51#include <net/if.h>
52#include <net/if_dl.h>
53#include <net/if_media.h>
54#include <net/if_ether.h>
55
56#ifdef INET
57#include <netinet/in.h>
58#include <netinet/in_systm.h>
59#include <netinet/in_var.h>
60#include <netinet/ip.h>
61#endif
62
63#include <net/if_types.h>
64#include <net/if_vlanvar.h>
65
66#include <net/bpf.h>
67
68#include <dev/mii/mii.h>
69#include <dev/mii/miivar.h>
70
71#include <dev/pci/pcireg.h>
72#include <dev/pci/pcivar.h>
73#include <dev/pci/pcidevs.h>
74
75#include <dev/pci/if_agereg.h>
76
77static int	age_match(device_t, cfdata_t, void *);
78static void	age_attach(device_t, device_t, void *);
79static int	age_detach(device_t, int);
80
81static bool	age_resume(device_t, const pmf_qual_t *);
82
83static int	age_miibus_readreg(device_t, int, int, uint16_t *);
84static int	age_miibus_writereg(device_t, int, int, uint16_t);
85static void	age_miibus_statchg(struct ifnet *);
86
87static int	age_init(struct ifnet *);
88static int	age_ioctl(struct ifnet *, u_long, void *);
89static void	age_start(struct ifnet *);
90static void	age_watchdog(struct ifnet *);
91static bool	age_shutdown(device_t, int);
92static void	age_mediastatus(struct ifnet *, struct ifmediareq *);
93static int	age_mediachange(struct ifnet *);
94
95static int	age_intr(void *);
96static int	age_dma_alloc(struct age_softc *);
97static void	age_dma_free(struct age_softc *);
98static void	age_get_macaddr(struct age_softc *, uint8_t[]);
99static void	age_phy_reset(struct age_softc *);
100
101static int	age_encap(struct age_softc *, struct mbuf *);
102static void	age_init_tx_ring(struct age_softc *);
103static int	age_init_rx_ring(struct age_softc *);
104static void	age_init_rr_ring(struct age_softc *);
105static void	age_init_cmb_block(struct age_softc *);
106static void	age_init_smb_block(struct age_softc *);
107static int	age_newbuf(struct age_softc *, struct age_rxdesc *, int);
108static void	age_mac_config(struct age_softc *);
109static void	age_txintr(struct age_softc *, int);
110static void	age_rxeof(struct age_softc *sc, struct rx_rdesc *);
111static void	age_rxintr(struct age_softc *, int);
112static void	age_tick(void *);
113static void	age_reset(struct age_softc *);
114static void	age_stop(struct ifnet *, int);
115static void	age_stats_update(struct age_softc *);
116static void	age_stop_txmac(struct age_softc *);
117static void	age_stop_rxmac(struct age_softc *);
118static void	age_rxvlan(struct age_softc *sc);
119static void	age_rxfilter(struct age_softc *);
120
121CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
122    age_match, age_attach, age_detach, NULL);
123
124int agedebug = 0;
125#define	DPRINTF(x)	do { if (agedebug) printf x; } while (0)
126
127#define AGE_CSUM_FEATURES	(M_CSUM_TCPv4 | M_CSUM_UDPv4)
128
129static int
130age_match(device_t dev, cfdata_t match, void *aux)
131{
132	struct pci_attach_args *pa = aux;
133
134	return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
135	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
136}
137
138static void
139age_attach(device_t parent, device_t self, void *aux)
140{
141	struct age_softc *sc = device_private(self);
142	struct pci_attach_args *pa = aux;
143	pci_intr_handle_t ih;
144	const char *intrstr;
145	struct ifnet *ifp = &sc->sc_ec.ec_if;
146	struct mii_data * const mii = &sc->sc_miibus;
147	pcireg_t memtype;
148	int error = 0;
149	char intrbuf[PCI_INTRSTR_LEN];
150
151	aprint_naive("\n");
152	aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
153
154	sc->sc_dev = self;
155	sc->sc_pct = pa->pa_pc;
156	sc->sc_pcitag = pa->pa_tag;
157
158	if (pci_dma64_available(pa))
159		sc->sc_dmat = pa->pa_dmat64;
160	else
161		sc->sc_dmat = pa->pa_dmat;
162
163	/*
164	 * Allocate IO memory
165	 */
166	memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
167	switch (memtype) {
168	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
169	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
170	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
171		break;
172	default:
173		aprint_error_dev(self, "invalid base address register\n");
174		break;
175	}
176
177	if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
178	    &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
179		aprint_error_dev(self, "could not map mem space\n");
180		return;
181	}
182
183	if (pci_intr_map(pa, &ih) != 0) {
184		aprint_error_dev(self, "could not map interrupt\n");
185		goto fail;
186	}
187
188	/*
189	 * Allocate IRQ
190	 */
191	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
192	sc->sc_irq_handle = pci_intr_establish_xname(sc->sc_pct, ih, IPL_NET,
193	    age_intr, sc, device_xname(self));
194	if (sc->sc_irq_handle == NULL) {
195		aprint_error_dev(self, "could not establish interrupt");
196		if (intrstr != NULL)
197			aprint_error(" at %s", intrstr);
198		aprint_error("\n");
199		goto fail;
200	}
201	aprint_normal_dev(self, "%s\n", intrstr);
202
203	/* Set PHY address. */
204	sc->age_phyaddr = AGE_PHY_ADDR;
205
206	/* Reset PHY. */
207	age_phy_reset(sc);
208
209	/* Reset the ethernet controller. */
210	age_reset(sc);
211
212	/* Get PCI and chip id/revision. */
213	sc->age_rev = PCI_REVISION(pa->pa_class);
214	sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
215	    MASTER_CHIP_REV_SHIFT;
216
217	aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
218	aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
219
220	if (agedebug) {
221		aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
222		    CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
223		    CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
224	}
225
226	/* Set max allowable DMA size. */
227	sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
228	sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
229
230	/* Allocate DMA stuffs */
231	error = age_dma_alloc(sc);
232	if (error)
233		goto fail;
234
235	callout_init(&sc->sc_tick_ch, 0);
236	callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
237
238	/* Load station address. */
239	age_get_macaddr(sc, sc->sc_enaddr);
240
241	aprint_normal_dev(self, "Ethernet address %s\n",
242	    ether_sprintf(sc->sc_enaddr));
243
244	ifp->if_softc = sc;
245	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
246	ifp->if_init = age_init;
247	ifp->if_ioctl = age_ioctl;
248	ifp->if_start = age_start;
249	ifp->if_stop = age_stop;
250	ifp->if_watchdog = age_watchdog;
251	ifp->if_baudrate = IF_Gbps(1);
252	IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
253	IFQ_SET_READY(&ifp->if_snd);
254	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
255
256	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
257
258	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
259				IFCAP_CSUM_TCPv4_Rx |
260				IFCAP_CSUM_UDPv4_Rx;
261#ifdef AGE_CHECKSUM
262	ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx |
263				IFCAP_CSUM_TCPv4_Tx |
264				IFCAP_CSUM_UDPv4_Tx;
265#endif
266
267#if NVLAN > 0
268	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
269	sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
270#endif
271
272	/* Set up MII bus. */
273	mii->mii_ifp = ifp;
274	mii->mii_readreg = age_miibus_readreg;
275	mii->mii_writereg = age_miibus_writereg;
276	mii->mii_statchg = age_miibus_statchg;
277
278	sc->sc_ec.ec_mii = mii;
279	ifmedia_init(&mii->mii_media, 0, age_mediachange, age_mediastatus);
280	mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
281	   MII_OFFSET_ANY, MIIF_DOPAUSE);
282
283	if (LIST_FIRST(&mii->mii_phys) == NULL) {
284		aprint_error_dev(self, "no PHY found!\n");
285		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
286		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
287	} else
288		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
289
290	if_attach(ifp);
291	if_deferred_start_init(ifp, NULL);
292	ether_ifattach(ifp, sc->sc_enaddr);
293
294	if (pmf_device_register1(self, NULL, age_resume, age_shutdown))
295		pmf_class_network_register(self, ifp);
296	else
297		aprint_error_dev(self, "couldn't establish power handler\n");
298
299	return;
300
301fail:
302	age_dma_free(sc);
303	if (sc->sc_irq_handle != NULL) {
304		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
305		sc->sc_irq_handle = NULL;
306	}
307	if (sc->sc_mem_size) {
308		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
309		sc->sc_mem_size = 0;
310	}
311}
312
313static int
314age_detach(device_t self, int flags)
315{
316	struct age_softc *sc = device_private(self);
317	struct ifnet *ifp = &sc->sc_ec.ec_if;
318	int s;
319
320	pmf_device_deregister(self);
321	s = splnet();
322	age_stop(ifp, 0);
323	splx(s);
324
325	mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
326
327	ether_ifdetach(ifp);
328	if_detach(ifp);
329	age_dma_free(sc);
330
331	/* Delete all remaining media. */
332	ifmedia_fini(&sc->sc_miibus.mii_media);
333
334	if (sc->sc_irq_handle != NULL) {
335		pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
336		sc->sc_irq_handle = NULL;
337	}
338	if (sc->sc_mem_size) {
339		bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
340		sc->sc_mem_size = 0;
341	}
342	return 0;
343}
344
345/*
346 *	Read a PHY register on the MII of the L1.
347 */
348static int
349age_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
350{
351	struct age_softc *sc = device_private(dev);
352	uint32_t v;
353	int i;
354
355	if (phy != sc->age_phyaddr)
356		return -1;
357
358	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
359	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
360	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
361		DELAY(1);
362		v = CSR_READ_4(sc, AGE_MDIO);
363		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
364			break;
365	}
366
367	if (i == 0) {
368		printf("%s: phy read timeout: phy %d, reg %d\n",
369			device_xname(sc->sc_dev), phy, reg);
370		return ETIMEDOUT;
371	}
372
373	*val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT;
374	return 0;
375}
376
377/*
378 *	Write a PHY register on the MII of the L1.
379 */
380static int
381age_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
382{
383	struct age_softc *sc = device_private(dev);
384	uint32_t v;
385	int i;
386
387	if (phy != sc->age_phyaddr)
388		return -1;
389
390	CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
391	    (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
392	    MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
393
394	for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
395		DELAY(1);
396		v = CSR_READ_4(sc, AGE_MDIO);
397		if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
398			break;
399	}
400
401	if (i == 0) {
402		printf("%s: phy write timeout: phy %d, reg %d\n",
403		    device_xname(sc->sc_dev), phy, reg);
404		return ETIMEDOUT;
405	}
406
407	return 0;
408}
409
410/*
411 *	Callback from MII layer when media changes.
412 */
413static void
414age_miibus_statchg(struct ifnet *ifp)
415{
416	struct age_softc *sc = ifp->if_softc;
417	struct mii_data *mii = &sc->sc_miibus;
418
419	if ((ifp->if_flags & IFF_RUNNING) == 0)
420		return;
421
422	sc->age_flags &= ~AGE_FLAG_LINK;
423	if ((mii->mii_media_status & IFM_AVALID) != 0) {
424		switch (IFM_SUBTYPE(mii->mii_media_active)) {
425		case IFM_10_T:
426		case IFM_100_TX:
427		case IFM_1000_T:
428			sc->age_flags |= AGE_FLAG_LINK;
429			break;
430		default:
431			break;
432		}
433	}
434
435	/* Stop Rx/Tx MACs. */
436	age_stop_rxmac(sc);
437	age_stop_txmac(sc);
438
439	/* Program MACs with resolved speed/duplex/flow-control. */
440	if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
441		uint32_t reg;
442
443		age_mac_config(sc);
444		reg = CSR_READ_4(sc, AGE_MAC_CFG);
445		/* Restart DMA engine and Tx/Rx MAC. */
446		CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
447		    DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
448		reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
449		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
450	}
451}
452
453/*
454 *	Get the current interface media status.
455 */
456static void
457age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
458{
459	struct age_softc *sc = ifp->if_softc;
460	struct mii_data *mii = &sc->sc_miibus;
461
462	mii_pollstat(mii);
463	ifmr->ifm_status = mii->mii_media_status;
464	ifmr->ifm_active = mii->mii_media_active;
465}
466
467/*
468 *	Set hardware to newly-selected media.
469 */
470static int
471age_mediachange(struct ifnet *ifp)
472{
473	struct age_softc *sc = ifp->if_softc;
474	struct mii_data *mii = &sc->sc_miibus;
475	int error;
476
477	if (mii->mii_instance != 0) {
478		struct mii_softc *miisc;
479
480		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
481			mii_phy_reset(miisc);
482	}
483	error = mii_mediachg(mii);
484
485	return error;
486}
487
488static int
489age_intr(void *arg)
490{
491	struct age_softc *sc = arg;
492	struct ifnet *ifp = &sc->sc_ec.ec_if;
493	struct cmb *cmb;
494	uint32_t status;
495
496	status = CSR_READ_4(sc, AGE_INTR_STATUS);
497	if (status == 0 || (status & AGE_INTRS) == 0)
498		return 0;
499
500	cmb = sc->age_rdata.age_cmb_block;
501	if (cmb == NULL) {
502		/* Happens when bringing up the interface
503		 * w/o having a carrier. Ack the interrupt.
504		 */
505		CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
506		return 0;
507	}
508
509	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
510	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
511	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
512	status = le32toh(cmb->intr_status);
513	/* ACK/reenable interrupts */
514	CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
515	while ((status & AGE_INTRS) != 0) {
516		sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
517		    TPD_CONS_SHIFT;
518		sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
519		    RRD_PROD_SHIFT;
520
521		/* Let hardware know CMB was served. */
522		cmb->intr_status = 0;
523		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
524		    sc->age_cdata.age_cmb_block_map->dm_mapsize,
525		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
526
527		if (ifp->if_flags & IFF_RUNNING) {
528			if (status & INTR_CMB_RX)
529				age_rxintr(sc, sc->age_rr_prod);
530
531			if (status & INTR_CMB_TX)
532				age_txintr(sc, sc->age_tpd_cons);
533
534			if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
535				if (status & INTR_DMA_RD_TO_RST)
536					printf("%s: DMA read error! -- "
537					    "resetting\n",
538					    device_xname(sc->sc_dev));
539				if (status & INTR_DMA_WR_TO_RST)
540					printf("%s: DMA write error! -- "
541					    "resetting\n",
542					    device_xname(sc->sc_dev));
543				age_init(ifp);
544			}
545
546			if_schedule_deferred_start(ifp);
547
548			if (status & INTR_SMB)
549				age_stats_update(sc);
550		}
551		/* check if more interrupts did came in */
552		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
553		    sc->age_cdata.age_cmb_block_map->dm_mapsize,
554		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
555		status = le32toh(cmb->intr_status);
556	}
557
558	return 1;
559}
560
561static void
562age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
563{
564	uint32_t ea[2], reg;
565	int i, vpdc;
566
567	reg = CSR_READ_4(sc, AGE_SPI_CTRL);
568	if ((reg & SPI_VPD_ENB) != 0) {
569		/* Get VPD stored in TWSI EEPROM. */
570		reg &= ~SPI_VPD_ENB;
571		CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
572	}
573
574	if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
575	    PCI_CAP_VPD, &vpdc, NULL)) {
576		/*
577		 * PCI VPD capability found, let TWSI reload EEPROM.
578		 * This will set Ethernet address of controller.
579		 */
580		CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
581		    TWSI_CTRL_SW_LD_START);
582		for (i = 100; i > 0; i--) {
583			DELAY(1000);
584			reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
585			if ((reg & TWSI_CTRL_SW_LD_START) == 0)
586				break;
587		}
588		if (i == 0)
589			printf("%s: reloading EEPROM timeout!\n",
590			    device_xname(sc->sc_dev));
591	} else {
592		if (agedebug)
593			printf("%s: PCI VPD capability not found!\n",
594			    device_xname(sc->sc_dev));
595	}
596
597	ea[0] = CSR_READ_4(sc, AGE_PAR0);
598	ea[1] = CSR_READ_4(sc, AGE_PAR1);
599
600	eaddr[0] = (ea[1] >> 8) & 0xFF;
601	eaddr[1] = (ea[1] >> 0) & 0xFF;
602	eaddr[2] = (ea[0] >> 24) & 0xFF;
603	eaddr[3] = (ea[0] >> 16) & 0xFF;
604	eaddr[4] = (ea[0] >> 8) & 0xFF;
605	eaddr[5] = (ea[0] >> 0) & 0xFF;
606}
607
608static void
609age_phy_reset(struct age_softc *sc)
610{
611	uint16_t reg, pn;
612	int i, linkup;
613
614	/* Reset PHY. */
615	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
616	DELAY(2000);
617	CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
618	DELAY(2000);
619
620#define ATPHY_DBG_ADDR		0x1D
621#define ATPHY_DBG_DATA		0x1E
622#define ATPHY_CDTC		0x16
623#define PHY_CDTC_ENB		0x0001
624#define PHY_CDTC_POFF		8
625#define ATPHY_CDTS		0x1C
626#define PHY_CDTS_STAT_OK	0x0000
627#define PHY_CDTS_STAT_SHORT	0x0100
628#define PHY_CDTS_STAT_OPEN	0x0200
629#define PHY_CDTS_STAT_INVAL	0x0300
630#define PHY_CDTS_STAT_MASK	0x0300
631
632	/* Check power saving mode. Magic from Linux. */
633	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
634	for (linkup = 0, pn = 0; pn < 4; pn++) {
635		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
636		    (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
637		for (i = 200; i > 0; i--) {
638			DELAY(1000);
639			age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
640			    ATPHY_CDTC, &reg);
641			if ((reg & PHY_CDTC_ENB) == 0)
642				break;
643		}
644		DELAY(1000);
645		age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
646		    ATPHY_CDTS, &reg);
647		if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
648			linkup++;
649			break;
650		}
651	}
652	age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR,
653	    BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
654	if (linkup == 0) {
655		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
656		    ATPHY_DBG_ADDR, 0);
657		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
658		    ATPHY_DBG_DATA, 0x124E);
659		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
660		    ATPHY_DBG_ADDR, 1);
661		age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
662		    ATPHY_DBG_DATA, &reg);
663		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
664		    ATPHY_DBG_DATA, reg | 0x03);
665		/* XXX */
666		DELAY(1500 * 1000);
667		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
668		    ATPHY_DBG_ADDR, 0);
669		age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
670		    ATPHY_DBG_DATA, 0x024E);
671	}
672
673#undef ATPHY_DBG_ADDR
674#undef ATPHY_DBG_DATA
675#undef ATPHY_CDTC
676#undef PHY_CDTC_ENB
677#undef PHY_CDTC_POFF
678#undef ATPHY_CDTS
679#undef PHY_CDTS_STAT_OK
680#undef PHY_CDTS_STAT_SHORT
681#undef PHY_CDTS_STAT_OPEN
682#undef PHY_CDTS_STAT_INVAL
683#undef PHY_CDTS_STAT_MASK
684}
685
686static int
687age_dma_alloc(struct age_softc *sc)
688{
689	struct age_txdesc *txd;
690	struct age_rxdesc *rxd;
691	int nsegs, error, i;
692
693	/*
694	 * Create DMA stuffs for TX ring
695	 */
696	error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
697	    AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
698	if (error) {
699		sc->age_cdata.age_tx_ring_map = NULL;
700		return ENOBUFS;
701	}
702
703	/* Allocate DMA'able memory for TX ring */
704	error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
705	    PAGE_SIZE, 0, &sc->age_rdata.age_tx_ring_seg, 1,
706	    &nsegs, BUS_DMA_NOWAIT);
707	if (error) {
708		printf("%s: could not allocate DMA'able memory for Tx ring, "
709		    "error = %i\n", device_xname(sc->sc_dev), error);
710		return error;
711	}
712
713	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
714	    nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
715	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
716	if (error)
717		return ENOBUFS;
718
719	memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
720
721	/*  Load the DMA map for Tx ring. */
722	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
723	    sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
724	if (error) {
725		printf("%s: could not load DMA'able memory for Tx ring, "
726		    "error = %i\n", device_xname(sc->sc_dev), error);
727		bus_dmamem_free(sc->sc_dmat,
728		    &sc->age_rdata.age_tx_ring_seg, 1);
729		return error;
730	}
731
732	sc->age_rdata.age_tx_ring_paddr =
733	    sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
734
735	/*
736	 * Create DMA stuffs for RX ring
737	 */
738	error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
739	    AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
740	if (error) {
741		sc->age_cdata.age_rx_ring_map = NULL;
742		return ENOBUFS;
743	}
744
745	/* Allocate DMA'able memory for RX ring */
746	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
747	    PAGE_SIZE, 0, &sc->age_rdata.age_rx_ring_seg, 1,
748	    &nsegs, BUS_DMA_NOWAIT);
749	if (error) {
750		printf("%s: could not allocate DMA'able memory for Rx ring, "
751		    "error = %i.\n", device_xname(sc->sc_dev), error);
752		return error;
753	}
754
755	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
756	    nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
757	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
758	if (error)
759		return ENOBUFS;
760
761	memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
762
763	/* Load the DMA map for Rx ring. */
764	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
765	    sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
766	if (error) {
767		printf("%s: could not load DMA'able memory for Rx ring, "
768		    "error = %i.\n", device_xname(sc->sc_dev), error);
769		bus_dmamem_free(sc->sc_dmat,
770		    &sc->age_rdata.age_rx_ring_seg, 1);
771		return error;
772	}
773
774	sc->age_rdata.age_rx_ring_paddr =
775	    sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
776
777	/*
778	 * Create DMA stuffs for RX return ring
779	 */
780	error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
781	    AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
782	if (error) {
783		sc->age_cdata.age_rr_ring_map = NULL;
784		return ENOBUFS;
785	}
786
787	/* Allocate DMA'able memory for RX return ring */
788	error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
789	    PAGE_SIZE, 0, &sc->age_rdata.age_rr_ring_seg, 1,
790	    &nsegs, BUS_DMA_NOWAIT);
791	if (error) {
792		printf("%s: could not allocate DMA'able memory for Rx "
793		    "return ring, error = %i.\n",
794		    device_xname(sc->sc_dev), error);
795		return error;
796	}
797
798	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
799	    nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
800	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
801	if (error)
802		return ENOBUFS;
803
804	memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
805
806	/*  Load the DMA map for Rx return ring. */
807	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
808	    sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT);
809	if (error) {
810		printf("%s: could not load DMA'able memory for Rx return ring, "
811		    "error = %i\n", device_xname(sc->sc_dev), error);
812		bus_dmamem_free(sc->sc_dmat,
813		    &sc->age_rdata.age_rr_ring_seg, 1);
814		return error;
815	}
816
817	sc->age_rdata.age_rr_ring_paddr =
818	    sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
819
820	/*
821	 * Create DMA stuffs for CMB block
822	 */
823	error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
824	    AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
825	    &sc->age_cdata.age_cmb_block_map);
826	if (error) {
827		sc->age_cdata.age_cmb_block_map = NULL;
828		return ENOBUFS;
829	}
830
831	/* Allocate DMA'able memory for CMB block */
832	error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
833	    PAGE_SIZE, 0, &sc->age_rdata.age_cmb_block_seg, 1,
834	    &nsegs, BUS_DMA_NOWAIT);
835	if (error) {
836		printf("%s: could not allocate DMA'able memory for "
837		    "CMB block, error = %i\n", device_xname(sc->sc_dev), error);
838		return error;
839	}
840
841	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
842	    nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
843	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
844	if (error)
845		return ENOBUFS;
846
847	memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
848
849	/*  Load the DMA map for CMB block. */
850	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
851	    sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
852	    BUS_DMA_NOWAIT);
853	if (error) {
854		printf("%s: could not load DMA'able memory for CMB block, "
855		    "error = %i\n", device_xname(sc->sc_dev), error);
856		bus_dmamem_free(sc->sc_dmat,
857		    &sc->age_rdata.age_cmb_block_seg, 1);
858		return error;
859	}
860
861	sc->age_rdata.age_cmb_block_paddr =
862	    sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
863
864	/*
865	 * Create DMA stuffs for SMB block
866	 */
867	error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
868	    AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
869	    &sc->age_cdata.age_smb_block_map);
870	if (error) {
871		sc->age_cdata.age_smb_block_map = NULL;
872		return ENOBUFS;
873	}
874
875	/* Allocate DMA'able memory for SMB block */
876	error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
877	    PAGE_SIZE, 0, &sc->age_rdata.age_smb_block_seg, 1,
878	    &nsegs, BUS_DMA_NOWAIT);
879	if (error) {
880		printf("%s: could not allocate DMA'able memory for "
881		    "SMB block, error = %i\n", device_xname(sc->sc_dev), error);
882		return error;
883	}
884
885	error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
886	    nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
887	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
888	if (error)
889		return ENOBUFS;
890
891	memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
892
893	/*  Load the DMA map for SMB block */
894	error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
895	    sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
896	    BUS_DMA_NOWAIT);
897	if (error) {
898		printf("%s: could not load DMA'able memory for SMB block, "
899		    "error = %i\n", device_xname(sc->sc_dev), error);
900		bus_dmamem_free(sc->sc_dmat,
901		    &sc->age_rdata.age_smb_block_seg, 1);
902		return error;
903	}
904
905	sc->age_rdata.age_smb_block_paddr =
906	    sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
907
908	/*
909	 * All of the memory we allocated above needs to be within
910	 * the same 4GB segment.  Make sure this is so.
911	 *
912	 * XXX We don't care WHAT 4GB segment they're in, just that
913	 * XXX they're all in the same one.  Need some bus_dma API
914	 * XXX help to make this easier to enforce when we actually
915	 * XXX perform the allocation.
916	 */
917	if (! (AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
918	       AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)
919
920	    && AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
921	       AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)
922
923	    && AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
924	       AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)
925
926	    && AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr) ==
927	       AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr))) {
928		aprint_error_dev(sc->sc_dev,
929		    "control data allocation constraints failed\n");
930		return ENOBUFS;
931	}
932
933	/* Create DMA maps for Tx buffers. */
934	for (i = 0; i < AGE_TX_RING_CNT; i++) {
935		txd = &sc->age_cdata.age_txdesc[i];
936		txd->tx_m = NULL;
937		txd->tx_dmamap = NULL;
938		error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
939		    AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
940		    &txd->tx_dmamap);
941		if (error) {
942			txd->tx_dmamap = NULL;
943			printf("%s: could not create Tx dmamap, error = %i.\n",
944			    device_xname(sc->sc_dev), error);
945			return error;
946		}
947	}
948
949	/* Create DMA maps for Rx buffers. */
950	error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
951	    BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
952	if (error) {
953		sc->age_cdata.age_rx_sparemap = NULL;
954		printf("%s: could not create spare Rx dmamap, error = %i.\n",
955		    device_xname(sc->sc_dev), error);
956		return error;
957	}
958	for (i = 0; i < AGE_RX_RING_CNT; i++) {
959		rxd = &sc->age_cdata.age_rxdesc[i];
960		rxd->rx_m = NULL;
961		rxd->rx_dmamap = NULL;
962		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
963		    MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
964		if (error) {
965			rxd->rx_dmamap = NULL;
966			printf("%s: could not create Rx dmamap, error = %i.\n",
967			    device_xname(sc->sc_dev), error);
968			return error;
969		}
970	}
971
972	return 0;
973}
974
975static void
976age_dma_free(struct age_softc *sc)
977{
978	struct age_txdesc *txd;
979	struct age_rxdesc *rxd;
980	int i;
981
982	/* Tx buffers */
983	for (i = 0; i < AGE_TX_RING_CNT; i++) {
984		txd = &sc->age_cdata.age_txdesc[i];
985		if (txd->tx_dmamap != NULL) {
986			bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
987			txd->tx_dmamap = NULL;
988		}
989	}
990	/* Rx buffers */
991	for (i = 0; i < AGE_RX_RING_CNT; i++) {
992		rxd = &sc->age_cdata.age_rxdesc[i];
993		if (rxd->rx_dmamap != NULL) {
994			bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
995			rxd->rx_dmamap = NULL;
996		}
997	}
998	if (sc->age_cdata.age_rx_sparemap != NULL) {
999		bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
1000		sc->age_cdata.age_rx_sparemap = NULL;
1001	}
1002
1003	/* Tx ring. */
1004	if (sc->age_cdata.age_tx_ring_map != NULL)
1005		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
1006	if (sc->age_cdata.age_tx_ring_map != NULL &&
1007	    sc->age_rdata.age_tx_ring != NULL)
1008		bus_dmamem_free(sc->sc_dmat,
1009		    &sc->age_rdata.age_tx_ring_seg, 1);
1010	sc->age_rdata.age_tx_ring = NULL;
1011	sc->age_cdata.age_tx_ring_map = NULL;
1012
1013	/* Rx ring. */
1014	if (sc->age_cdata.age_rx_ring_map != NULL)
1015		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
1016	if (sc->age_cdata.age_rx_ring_map != NULL &&
1017	    sc->age_rdata.age_rx_ring != NULL)
1018		bus_dmamem_free(sc->sc_dmat,
1019		    &sc->age_rdata.age_rx_ring_seg, 1);
1020	sc->age_rdata.age_rx_ring = NULL;
1021	sc->age_cdata.age_rx_ring_map = NULL;
1022
1023	/* Rx return ring. */
1024	if (sc->age_cdata.age_rr_ring_map != NULL)
1025		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
1026	if (sc->age_cdata.age_rr_ring_map != NULL &&
1027	    sc->age_rdata.age_rr_ring != NULL)
1028		bus_dmamem_free(sc->sc_dmat,
1029		    &sc->age_rdata.age_rr_ring_seg, 1);
1030	sc->age_rdata.age_rr_ring = NULL;
1031	sc->age_cdata.age_rr_ring_map = NULL;
1032
1033	/* CMB block */
1034	if (sc->age_cdata.age_cmb_block_map != NULL)
1035		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
1036	if (sc->age_cdata.age_cmb_block_map != NULL &&
1037	    sc->age_rdata.age_cmb_block != NULL)
1038		bus_dmamem_free(sc->sc_dmat,
1039		    &sc->age_rdata.age_cmb_block_seg, 1);
1040	sc->age_rdata.age_cmb_block = NULL;
1041	sc->age_cdata.age_cmb_block_map = NULL;
1042
1043	/* SMB block */
1044	if (sc->age_cdata.age_smb_block_map != NULL)
1045		bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1046	if (sc->age_cdata.age_smb_block_map != NULL &&
1047	    sc->age_rdata.age_smb_block != NULL)
1048		bus_dmamem_free(sc->sc_dmat,
1049		    &sc->age_rdata.age_smb_block_seg, 1);
1050	sc->age_rdata.age_smb_block = NULL;
1051	sc->age_cdata.age_smb_block_map = NULL;
1052}
1053
1054static void
1055age_start(struct ifnet *ifp)
1056{
1057	struct age_softc *sc = ifp->if_softc;
1058	struct mbuf *m_head;
1059	int enq, error;
1060
1061	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1062		return;
1063	if ((sc->age_flags & AGE_FLAG_LINK) == 0)
1064		return;
1065
1066	enq = 0;
1067	for (;;) {
1068		IFQ_POLL(&ifp->if_snd, m_head);
1069		if (m_head == NULL)
1070			break;
1071
1072		/*
1073		 * Pack the data into the transmit ring. If we
1074		 * don't have room, set the OACTIVE flag and wait
1075		 * for the NIC to drain the ring.
1076		 */
1077		if ((error = age_encap(sc, m_head)) != 0) {
1078			if (error == EFBIG) {
1079				/* This is fatal for the packet. */
1080				IFQ_DEQUEUE(&ifp->if_snd, m_head);
1081				m_freem(m_head);
1082				if_statinc(ifp, if_oerrors);
1083				continue;
1084			}
1085			ifp->if_flags |= IFF_OACTIVE;
1086			break;
1087		}
1088		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1089		enq = 1;
1090
1091		/*
1092		 * If there's a BPF listener, bounce a copy of this frame
1093		 * to him.
1094		 */
1095		bpf_mtap(ifp, m_head, BPF_D_OUT);
1096	}
1097
1098	if (enq) {
1099		/* Update mbox. */
1100		AGE_COMMIT_MBOX(sc);
1101		/* Set a timeout in case the chip goes out to lunch. */
1102		ifp->if_timer = AGE_TX_TIMEOUT;
1103	}
1104}
1105
1106static void
1107age_watchdog(struct ifnet *ifp)
1108{
1109	struct age_softc *sc = ifp->if_softc;
1110
1111	if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1112		printf("%s: watchdog timeout (missed link)\n",
1113		    device_xname(sc->sc_dev));
1114		if_statinc(ifp, if_oerrors);
1115		age_init(ifp);
1116		return;
1117	}
1118
1119	if (sc->age_cdata.age_tx_cnt == 0) {
1120		printf("%s: watchdog timeout (missed Tx interrupts) "
1121		    "-- recovering\n", device_xname(sc->sc_dev));
1122		age_start(ifp);
1123		return;
1124	}
1125
1126	printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1127	if_statinc(ifp, if_oerrors);
1128	age_init(ifp);
1129	age_start(ifp);
1130}
1131
1132static bool
1133age_shutdown(device_t self, int howto)
1134{
1135	struct age_softc *sc;
1136	struct ifnet *ifp;
1137
1138	sc = device_private(self);
1139	ifp = &sc->sc_ec.ec_if;
1140	age_stop(ifp, 1);
1141
1142	return true;
1143}
1144
1145static int
1146age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1147{
1148	struct age_softc *sc = ifp->if_softc;
1149	int s, error;
1150
1151	s = splnet();
1152
1153	error = ether_ioctl(ifp, cmd, data);
1154	if (error == ENETRESET) {
1155		if (ifp->if_flags & IFF_RUNNING)
1156			age_rxfilter(sc);
1157		error = 0;
1158	}
1159
1160	splx(s);
1161	return error;
1162}
1163
1164static void
1165age_mac_config(struct age_softc *sc)
1166{
1167	struct mii_data *mii;
1168	uint32_t reg;
1169
1170	mii = &sc->sc_miibus;
1171
1172	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1173	reg &= ~MAC_CFG_FULL_DUPLEX;
1174	reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1175	reg &= ~MAC_CFG_SPEED_MASK;
1176
1177	/* Reprogram MAC with resolved speed/duplex. */
1178	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1179	case IFM_10_T:
1180	case IFM_100_TX:
1181		reg |= MAC_CFG_SPEED_10_100;
1182		break;
1183	case IFM_1000_T:
1184		reg |= MAC_CFG_SPEED_1000;
1185		break;
1186	}
1187	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1188		reg |= MAC_CFG_FULL_DUPLEX;
1189		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1190			reg |= MAC_CFG_TX_FC;
1191		if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1192			reg |= MAC_CFG_RX_FC;
1193	}
1194
1195	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1196}
1197
1198static bool
1199age_resume(device_t dv, const pmf_qual_t *qual)
1200{
1201	struct age_softc *sc = device_private(dv);
1202	uint16_t cmd;
1203
1204	/*
1205	 * Clear INTx emulation disable for hardware that
1206	 * is set in resume event. From Linux.
1207	 */
1208	cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1209	if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) {
1210		cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE;
1211		pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1212		    PCI_COMMAND_STATUS_REG, cmd);
1213	}
1214
1215	return true;
1216}
1217
1218static int
1219age_encap(struct age_softc *sc, struct mbuf * const m)
1220{
1221	struct age_txdesc *txd, *txd_last;
1222	struct tx_desc *desc;
1223	bus_dmamap_t map;
1224	uint32_t cflags, poff, vtag;
1225	int error, i, nsegs, prod;
1226
1227	cflags = vtag = 0;
1228	poff = 0;
1229
1230	prod = sc->age_cdata.age_tx_prod;
1231	txd = &sc->age_cdata.age_txdesc[prod];
1232	txd_last = txd;
1233	map = txd->tx_dmamap;
1234
1235	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT);
1236	if (error == EFBIG) {
1237		struct mbuf *mnew = m_defrag(m, M_NOWAIT);
1238		if (mnew != NULL) {
1239			KASSERT(m == mnew);
1240			error = bus_dmamap_load_mbuf(sc->sc_dmat, map, mnew,
1241			    BUS_DMA_NOWAIT);
1242		} else {
1243			/* Just drop if we can't defrag. */
1244			error = EFBIG;
1245		}
1246		if (error) {
1247			if (error == EFBIG) {
1248				printf("%s: Tx packet consumes too many "
1249				    "DMA segments, dropping...\n",
1250				    device_xname(sc->sc_dev));
1251			}
1252			return error;
1253		}
1254	} else if (error) {
1255		return error;
1256	}
1257
1258	nsegs = map->dm_nsegs;
1259	KASSERT(nsegs != 0);
1260
1261	/* Check descriptor overrun. */
1262	if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1263		bus_dmamap_unload(sc->sc_dmat, map);
1264		return ENOBUFS;
1265	}
1266	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1267	    BUS_DMASYNC_PREWRITE);
1268
1269	/* Configure Tx IP/TCP/UDP checksum offload. */
1270	if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1271		cflags |= AGE_TD_CSUM;
1272		if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1273			cflags |= AGE_TD_TCPCSUM;
1274		if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1275			cflags |= AGE_TD_UDPCSUM;
1276		/* Set checksum start offset. */
1277		cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1278	}
1279
1280#if NVLAN > 0
1281	/* Configure VLAN hardware tag insertion. */
1282	if (vlan_has_tag(m)) {
1283		vtag = AGE_TX_VLAN_TAG(htons(vlan_get_tag(m)));
1284		vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1285		cflags |= AGE_TD_INSERT_VLAN_TAG;
1286	}
1287#endif
1288
1289	desc = NULL;
1290	KASSERT(nsegs > 0);
1291	for (i = 0; ; i++) {
1292		desc = &sc->age_rdata.age_tx_ring[prod];
1293		desc->addr = htole64(map->dm_segs[i].ds_addr);
1294		desc->len =
1295		    htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1296		desc->flags = htole32(cflags);
1297		sc->age_cdata.age_tx_cnt++;
1298		if (i == (nsegs - 1))
1299			break;
1300
1301		/* Sync this descriptor and go to the next one */
1302		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1303		    prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1304		    BUS_DMASYNC_PREWRITE);
1305		AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1306	}
1307
1308	/* Set EOP on the last descriptor and sync it. */
1309	desc->flags |= htole32(AGE_TD_EOP);
1310	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1311	    prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1312	    BUS_DMASYNC_PREWRITE);
1313
1314	if (nsegs > 1) {
1315		/* Swap dmamap of the first and the last. */
1316		txd = &sc->age_cdata.age_txdesc[prod];
1317		map = txd_last->tx_dmamap;
1318		txd_last->tx_dmamap = txd->tx_dmamap;
1319		txd->tx_dmamap = map;
1320		txd->tx_m = m;
1321		KASSERT(txd_last->tx_m == NULL);
1322	} else {
1323		KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]);
1324		txd_last->tx_m = m;
1325	}
1326
1327	/* Update producer index. */
1328	AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1329	sc->age_cdata.age_tx_prod = prod;
1330
1331	return 0;
1332}
1333
1334static void
1335age_txintr(struct age_softc *sc, int tpd_cons)
1336{
1337	struct ifnet *ifp = &sc->sc_ec.ec_if;
1338	struct age_txdesc *txd;
1339	int cons, prog;
1340
1341	if (sc->age_cdata.age_tx_cnt <= 0) {
1342		if (ifp->if_timer != 0)
1343			printf("timer running without packets\n");
1344		if (sc->age_cdata.age_tx_cnt)
1345			printf("age_tx_cnt corrupted\n");
1346	}
1347
1348	/*
1349	 * Go through our Tx list and free mbufs for those
1350	 * frames which have been transmitted.
1351	 */
1352	cons = sc->age_cdata.age_tx_cons;
1353	for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1354		if (sc->age_cdata.age_tx_cnt <= 0)
1355			break;
1356		prog++;
1357		ifp->if_flags &= ~IFF_OACTIVE;
1358		sc->age_cdata.age_tx_cnt--;
1359		txd = &sc->age_cdata.age_txdesc[cons];
1360		/*
1361		 * Clear Tx descriptors, it's not required but would
1362		 * help debugging in case of Tx issues.
1363		 */
1364		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1365		    cons * sizeof(struct tx_desc), sizeof(struct tx_desc),
1366		    BUS_DMASYNC_POSTWRITE);
1367		txd->tx_desc->addr = 0;
1368		txd->tx_desc->len = 0;
1369		txd->tx_desc->flags = 0;
1370
1371		if (txd->tx_m == NULL)
1372			continue;
1373		/* Reclaim transmitted mbufs. */
1374		bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1375		m_freem(txd->tx_m);
1376		txd->tx_m = NULL;
1377	}
1378
1379	if (prog > 0) {
1380		sc->age_cdata.age_tx_cons = cons;
1381
1382		/*
1383		 * Unarm watchdog timer only when there are no pending
1384		 * Tx descriptors in queue.
1385		 */
1386		if (sc->age_cdata.age_tx_cnt == 0)
1387			ifp->if_timer = 0;
1388	}
1389}
1390
1391/* Receive a frame. */
1392static void
1393age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1394{
1395	struct ifnet *ifp = &sc->sc_ec.ec_if;
1396	struct age_rxdesc *rxd;
1397	struct rx_desc *desc;
1398	struct mbuf *mp, *m;
1399	uint32_t status, index;
1400	int count, nsegs, pktlen;
1401	int rx_cons;
1402
1403	status = le32toh(rxrd->flags);
1404	index = le32toh(rxrd->index);
1405	rx_cons = AGE_RX_CONS(index);
1406	nsegs = AGE_RX_NSEGS(index);
1407
1408	sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1409	if ((status & AGE_RRD_ERROR) != 0 &&
1410	    (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1411	    AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1412		/*
1413		 * We want to pass the following frames to upper
1414		 * layer regardless of error status of Rx return
1415		 * ring.
1416		 *
1417		 *  o IP/TCP/UDP checksum is bad.
1418		 *  o frame length and protocol specific length
1419		 *     does not match.
1420		 */
1421		sc->age_cdata.age_rx_cons += nsegs;
1422		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1423		return;
1424	}
1425
1426	pktlen = 0;
1427	for (count = 0; count < nsegs; count++,
1428	    AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1429		rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1430		mp = rxd->rx_m;
1431		desc = rxd->rx_desc;
1432		/* Add a new receive buffer to the ring. */
1433		if (age_newbuf(sc, rxd, 0) != 0) {
1434			if_statinc(ifp, if_iqdrops);
1435			/* Reuse Rx buffers. */
1436			if (sc->age_cdata.age_rxhead != NULL) {
1437				m_freem(sc->age_cdata.age_rxhead);
1438				AGE_RXCHAIN_RESET(sc);
1439			}
1440			break;
1441		}
1442
1443		/* The length of the first mbuf is computed last. */
1444		if (count != 0) {
1445			mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1446			pktlen += mp->m_len;
1447		}
1448
1449		/* Chain received mbufs. */
1450		if (sc->age_cdata.age_rxhead == NULL) {
1451			sc->age_cdata.age_rxhead = mp;
1452			sc->age_cdata.age_rxtail = mp;
1453		} else {
1454			m_remove_pkthdr(mp);
1455			sc->age_cdata.age_rxprev_tail =
1456			    sc->age_cdata.age_rxtail;
1457			sc->age_cdata.age_rxtail->m_next = mp;
1458			sc->age_cdata.age_rxtail = mp;
1459		}
1460
1461		if (count == nsegs - 1) {
1462			/*
1463			 * It seems that L1 controller has no way
1464			 * to tell hardware to strip CRC bytes.
1465			 */
1466			sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1467			if (nsegs > 1) {
1468				/* Remove the CRC bytes in chained mbufs. */
1469				pktlen -= ETHER_CRC_LEN;
1470				if (mp->m_len <= ETHER_CRC_LEN) {
1471					sc->age_cdata.age_rxtail =
1472					    sc->age_cdata.age_rxprev_tail;
1473					sc->age_cdata.age_rxtail->m_len -=
1474					    (ETHER_CRC_LEN - mp->m_len);
1475					sc->age_cdata.age_rxtail->m_next = NULL;
1476					m_freem(mp);
1477				} else {
1478					mp->m_len -= ETHER_CRC_LEN;
1479				}
1480			}
1481
1482			m = sc->age_cdata.age_rxhead;
1483			KASSERT(m->m_flags & M_PKTHDR);
1484			m_set_rcvif(m, ifp);
1485			m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1486			/* Set the first mbuf length. */
1487			m->m_len = sc->age_cdata.age_rxlen - pktlen;
1488
1489			/*
1490			 * Set checksum information.
1491			 * It seems that L1 controller can compute partial
1492			 * checksum. The partial checksum value can be used
1493			 * to accelerate checksum computation for fragmented
1494			 * TCP/UDP packets. Upper network stack already
1495			 * takes advantage of the partial checksum value in
1496			 * IP reassembly stage. But I'm not sure the
1497			 * correctness of the partial hardware checksum
1498			 * assistance due to lack of data sheet. If it is
1499			 * proven to work on L1 I'll enable it.
1500			 */
1501			if (status & AGE_RRD_IPV4) {
1502				if (status & AGE_RRD_IPCSUM_NOK)
1503					m->m_pkthdr.csum_flags |=
1504					    M_CSUM_IPv4_BAD;
1505				if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1506				    (status & AGE_RRD_TCP_UDPCSUM_NOK)) {
1507					m->m_pkthdr.csum_flags |=
1508					    M_CSUM_TCP_UDP_BAD;
1509				}
1510				/*
1511				 * Don't mark bad checksum for TCP/UDP frames
1512				 * as fragmented frames may always have set
1513				 * bad checksummed bit of descriptor status.
1514				 */
1515			}
1516#if NVLAN > 0
1517			/* Check for VLAN tagged frames. */
1518			if (status & AGE_RRD_VLAN) {
1519				uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1520				vlan_set_tag(m, AGE_RX_VLAN_TAG(vtag));
1521			}
1522#endif
1523
1524			/* Pass it on. */
1525			if_percpuq_enqueue(ifp->if_percpuq, m);
1526
1527			/* Reset mbuf chains. */
1528			AGE_RXCHAIN_RESET(sc);
1529		}
1530	}
1531
1532	if (count != nsegs) {
1533		sc->age_cdata.age_rx_cons += nsegs;
1534		sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1535	} else
1536		sc->age_cdata.age_rx_cons = rx_cons;
1537}
1538
1539static void
1540age_rxintr(struct age_softc *sc, int rr_prod)
1541{
1542	struct rx_rdesc *rxrd;
1543	int rr_cons, nsegs, pktlen, prog;
1544
1545	rr_cons = sc->age_cdata.age_rr_cons;
1546	if (rr_cons == rr_prod)
1547		return;
1548
1549	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1550	    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1551	    BUS_DMASYNC_POSTREAD);
1552
1553	for (prog = 0; rr_cons != rr_prod; prog++) {
1554		rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1555		nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1556		if (nsegs == 0)
1557			break;
1558		/*
1559		 * Check number of segments against received bytes
1560		 * Non-matching value would indicate that hardware
1561		 * is still trying to update Rx return descriptors.
1562		 * I'm not sure whether this check is really needed.
1563		 */
1564		pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1565		if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1566		    (MCLBYTES - ETHER_ALIGN)))
1567			break;
1568
1569		/* Received a frame. */
1570		age_rxeof(sc, rxrd);
1571
1572		/* Clear return ring. */
1573		rxrd->index = 0;
1574		AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1575	}
1576
1577	if (prog > 0) {
1578		/* Update the consumer index. */
1579		sc->age_cdata.age_rr_cons = rr_cons;
1580
1581		/* Sync descriptors. */
1582		bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1583		    sc->age_cdata.age_rr_ring_map->dm_mapsize,
1584		    BUS_DMASYNC_PREWRITE);
1585
1586		/* Notify hardware availability of new Rx buffers. */
1587		AGE_COMMIT_MBOX(sc);
1588	}
1589}
1590
1591static void
1592age_tick(void *xsc)
1593{
1594	struct age_softc *sc = xsc;
1595	struct mii_data *mii = &sc->sc_miibus;
1596	int s;
1597
1598	s = splnet();
1599	mii_tick(mii);
1600	splx(s);
1601
1602	callout_schedule(&sc->sc_tick_ch, hz);
1603}
1604
1605static void
1606age_reset(struct age_softc *sc)
1607{
1608	uint32_t reg;
1609	int i;
1610
1611	CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1612	CSR_READ_4(sc, AGE_MASTER_CFG);
1613	DELAY(1000);
1614	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1615		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1616			break;
1617		DELAY(10);
1618	}
1619
1620	if (i == 0)
1621		printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1622		    reg);
1623
1624	/* Initialize PCIe module. From Linux. */
1625	CSR_WRITE_4(sc, 0x12FC, 0x6500);
1626	CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1627}
1628
1629static int
1630age_init(struct ifnet *ifp)
1631{
1632	struct age_softc *sc = ifp->if_softc;
1633	struct mii_data *mii;
1634	uint8_t eaddr[ETHER_ADDR_LEN];
1635	bus_addr_t paddr;
1636	uint32_t reg, fsize;
1637	uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1638	int error;
1639
1640	/*
1641	 * Cancel any pending I/O.
1642	 */
1643	age_stop(ifp, 0);
1644
1645	/*
1646	 * Reset the chip to a known state.
1647	 */
1648	age_reset(sc);
1649
1650	/* Initialize descriptors. */
1651	error = age_init_rx_ring(sc);
1652	if (error != 0) {
1653		printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1654		age_stop(ifp, 0);
1655		return error;
1656	}
1657	age_init_rr_ring(sc);
1658	age_init_tx_ring(sc);
1659	age_init_cmb_block(sc);
1660	age_init_smb_block(sc);
1661
1662	/* Reprogram the station address. */
1663	memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1664	CSR_WRITE_4(sc, AGE_PAR0,
1665	    eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1666	CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1667
1668	/* Set descriptor base addresses. */
1669	paddr = sc->age_rdata.age_tx_ring_paddr;
1670	CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1671	paddr = sc->age_rdata.age_rx_ring_paddr;
1672	CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1673	paddr = sc->age_rdata.age_rr_ring_paddr;
1674	CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1675	paddr = sc->age_rdata.age_tx_ring_paddr;
1676	CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1677	paddr = sc->age_rdata.age_cmb_block_paddr;
1678	CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1679	paddr = sc->age_rdata.age_smb_block_paddr;
1680	CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1681
1682	/* Set Rx/Rx return descriptor counter. */
1683	CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1684	    ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1685	    DESC_RRD_CNT_MASK) |
1686	    ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1687
1688	/* Set Tx descriptor counter. */
1689	CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1690	    (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1691
1692	/* Tell hardware that we're ready to load descriptors. */
1693	CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1694
1695	/*
1696	 * Initialize mailbox register.
1697	 * Updated producer/consumer index information is exchanged
1698	 * through this mailbox register. However Tx producer and
1699	 * Rx return consumer/Rx producer are all shared such that
1700	 * it's hard to separate code path between Tx and Rx without
1701	 * locking. If L1 hardware have a separate mail box register
1702	 * for Tx and Rx consumer/producer management we could have
1703	 * independent Tx/Rx handler which in turn Rx handler could have
1704	 * been run without any locking.
1705	*/
1706	AGE_COMMIT_MBOX(sc);
1707
1708	/* Configure IPG/IFG parameters. */
1709	CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1710	    ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1711	    ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1712	    ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1713	    ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1714
1715	/* Set parameters for half-duplex media. */
1716	CSR_WRITE_4(sc, AGE_HDPX_CFG,
1717	    ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1718	    HDPX_CFG_LCOL_MASK) |
1719	    ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1720	    HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1721	    ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1722	    HDPX_CFG_ABEBT_MASK) |
1723	    ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1724	     HDPX_CFG_JAMIPG_MASK));
1725
1726	/* Configure interrupt moderation timer. */
1727	sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1728	CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1729	reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1730	reg &= ~MASTER_MTIMER_ENB;
1731	if (AGE_USECS(sc->age_int_mod) == 0)
1732		reg &= ~MASTER_ITIMER_ENB;
1733	else
1734		reg |= MASTER_ITIMER_ENB;
1735	CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1736	if (agedebug)
1737		printf("%s: interrupt moderation is %d us.\n",
1738		    device_xname(sc->sc_dev), sc->age_int_mod);
1739	CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1740
1741	/* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1742	if (ifp->if_mtu < ETHERMTU)
1743		sc->age_max_frame_size = ETHERMTU;
1744	else
1745		sc->age_max_frame_size = ifp->if_mtu;
1746	sc->age_max_frame_size += ETHER_HDR_LEN +
1747	    sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1748	CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1749
1750	/* Configure jumbo frame. */
1751	fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1752	CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1753	    (((fsize / sizeof(uint64_t)) <<
1754	    RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1755	    ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1756	    RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1757	    ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1758	    RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1759
1760	/* Configure flow-control parameters. From Linux. */
1761	if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1762		/*
1763		 * Magic workaround for old-L1.
1764		 * Don't know which hw revision requires this magic.
1765		 */
1766		CSR_WRITE_4(sc, 0x12FC, 0x6500);
1767		/*
1768		 * Another magic workaround for flow-control mode
1769		 * change. From Linux.
1770		 */
1771		CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1772	}
1773	/*
1774	 * TODO
1775	 *  Should understand pause parameter relationships between FIFO
1776	 *  size and number of Rx descriptors and Rx return descriptors.
1777	 *
1778	 *  Magic parameters came from Linux.
1779	 */
1780	switch (sc->age_chip_rev) {
1781	case 0x8001:
1782	case 0x9001:
1783	case 0x9002:
1784	case 0x9003:
1785		rxf_hi = AGE_RX_RING_CNT / 16;
1786		rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1787		rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1788		rrd_lo = AGE_RR_RING_CNT / 16;
1789		break;
1790	default:
1791		reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1792		rxf_lo = reg / 16;
1793		if (rxf_lo < 192)
1794			rxf_lo = 192;
1795		rxf_hi = (reg * 7) / 8;
1796		if (rxf_hi < rxf_lo)
1797			rxf_hi = rxf_lo + 16;
1798		reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1799		rrd_lo = reg / 8;
1800		rrd_hi = (reg * 7) / 8;
1801		if (rrd_lo < 2)
1802			rrd_lo = 2;
1803		if (rrd_hi < rrd_lo)
1804			rrd_hi = rrd_lo + 3;
1805		break;
1806	}
1807	CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1808	    ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1809	    RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1810	    ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1811	    RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1812	CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1813	    ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1814	    RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1815	    ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1816	    RXQ_RRD_PAUSE_THRESH_HI_MASK));
1817
1818	/* Configure RxQ. */
1819	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1820	    ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1821	    RXQ_CFG_RD_BURST_MASK) |
1822	    ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1823	    RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1824	    ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1825	    RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1826	    RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1827
1828	/* Configure TxQ. */
1829	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1830	    ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1831	    TXQ_CFG_TPD_BURST_MASK) |
1832	    ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1833	    TXQ_CFG_TX_FIFO_BURST_MASK) |
1834	    ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1835	    TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1836	    TXQ_CFG_ENB);
1837
1838	/* Configure DMA parameters. */
1839	CSR_WRITE_4(sc, AGE_DMA_CFG,
1840	    DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1841	    sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1842	    sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1843
1844	/* Configure CMB DMA write threshold. */
1845	CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1846	    ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1847	    CMB_WR_THRESH_RRD_MASK) |
1848	    ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1849	    CMB_WR_THRESH_TPD_MASK));
1850
1851	/* Set CMB/SMB timer and enable them. */
1852	CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1853	    ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1854	    ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1855
1856	/* Request SMB updates for every seconds. */
1857	CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1858	CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1859
1860	/*
1861	 * Disable all WOL bits as WOL can interfere normal Rx
1862	 * operation.
1863	 */
1864	CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1865
1866	/*
1867	 * Configure Tx/Rx MACs.
1868	 *  - Auto-padding for short frames.
1869	 *  - Enable CRC generation.
1870	 *  Start with full-duplex/1000Mbps media. Actual reconfiguration
1871	 *  of MAC is followed after link establishment.
1872	 */
1873	CSR_WRITE_4(sc, AGE_MAC_CFG,
1874	    MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1875	    MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1876	    ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1877	    MAC_CFG_PREAMBLE_MASK));
1878
1879	/* Set up the receive filter. */
1880	age_rxfilter(sc);
1881	age_rxvlan(sc);
1882
1883	reg = CSR_READ_4(sc, AGE_MAC_CFG);
1884	reg |= MAC_CFG_RXCSUM_ENB;
1885
1886	/* Ack all pending interrupts and clear it. */
1887	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1888	CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1889
1890	/* Finally enable Tx/Rx MAC. */
1891	CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1892
1893	sc->age_flags &= ~AGE_FLAG_LINK;
1894
1895	/* Switch to the current media. */
1896	mii = &sc->sc_miibus;
1897	mii_mediachg(mii);
1898
1899	callout_schedule(&sc->sc_tick_ch, hz);
1900
1901	ifp->if_flags |= IFF_RUNNING;
1902	ifp->if_flags &= ~IFF_OACTIVE;
1903
1904	return 0;
1905}
1906
1907static void
1908age_stop(struct ifnet *ifp, int disable)
1909{
1910	struct age_softc *sc = ifp->if_softc;
1911	struct age_txdesc *txd;
1912	struct age_rxdesc *rxd;
1913	uint32_t reg;
1914	int i;
1915
1916	callout_stop(&sc->sc_tick_ch);
1917
1918	/*
1919	 * Mark the interface down and cancel the watchdog timer.
1920	 */
1921	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1922	ifp->if_timer = 0;
1923
1924	sc->age_flags &= ~AGE_FLAG_LINK;
1925
1926	mii_down(&sc->sc_miibus);
1927
1928	/*
1929	 * Disable interrupts.
1930	 */
1931	CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1932	CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1933
1934	/* Stop CMB/SMB updates. */
1935	CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1936
1937	/* Stop Rx/Tx MAC. */
1938	age_stop_rxmac(sc);
1939	age_stop_txmac(sc);
1940
1941	/* Stop DMA. */
1942	CSR_WRITE_4(sc, AGE_DMA_CFG,
1943	    CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1944
1945	/* Stop TxQ/RxQ. */
1946	CSR_WRITE_4(sc, AGE_TXQ_CFG,
1947	    CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1948	CSR_WRITE_4(sc, AGE_RXQ_CFG,
1949	    CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1950	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1951		if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1952			break;
1953		DELAY(10);
1954	}
1955	if (i == 0)
1956		printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1957		    device_xname(sc->sc_dev), reg);
1958
1959	/* Reclaim Rx buffers that have been processed. */
1960	if (sc->age_cdata.age_rxhead != NULL)
1961		m_freem(sc->age_cdata.age_rxhead);
1962	AGE_RXCHAIN_RESET(sc);
1963
1964	/*
1965	 * Free RX and TX mbufs still in the queues.
1966	 */
1967	for (i = 0; i < AGE_RX_RING_CNT; i++) {
1968		rxd = &sc->age_cdata.age_rxdesc[i];
1969		if (rxd->rx_m != NULL) {
1970			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1971			m_freem(rxd->rx_m);
1972			rxd->rx_m = NULL;
1973		}
1974	}
1975	for (i = 0; i < AGE_TX_RING_CNT; i++) {
1976		txd = &sc->age_cdata.age_txdesc[i];
1977		if (txd->tx_m != NULL) {
1978			bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1979			m_freem(txd->tx_m);
1980			txd->tx_m = NULL;
1981		}
1982	}
1983}
1984
1985static void
1986age_stats_update(struct age_softc *sc)
1987{
1988	struct ifnet *ifp = &sc->sc_ec.ec_if;
1989	struct age_stats *stat;
1990	struct smb *smb;
1991
1992	stat = &sc->age_stat;
1993
1994	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1995	    sc->age_cdata.age_smb_block_map->dm_mapsize,
1996	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1997
1998	smb = sc->age_rdata.age_smb_block;
1999	if (smb->updated == 0)
2000		return;
2001
2002	/* Rx stats. */
2003	stat->rx_frames += smb->rx_frames;
2004	stat->rx_bcast_frames += smb->rx_bcast_frames;
2005	stat->rx_mcast_frames += smb->rx_mcast_frames;
2006	stat->rx_pause_frames += smb->rx_pause_frames;
2007	stat->rx_control_frames += smb->rx_control_frames;
2008	stat->rx_crcerrs += smb->rx_crcerrs;
2009	stat->rx_lenerrs += smb->rx_lenerrs;
2010	stat->rx_bytes += smb->rx_bytes;
2011	stat->rx_runts += smb->rx_runts;
2012	stat->rx_fragments += smb->rx_fragments;
2013	stat->rx_pkts_64 += smb->rx_pkts_64;
2014	stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2015	stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2016	stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2017	stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2018	stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2019	stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2020	stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2021	stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2022	stat->rx_desc_oflows += smb->rx_desc_oflows;
2023	stat->rx_alignerrs += smb->rx_alignerrs;
2024	stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2025	stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2026	stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2027
2028	/* Tx stats. */
2029	stat->tx_frames += smb->tx_frames;
2030	stat->tx_bcast_frames += smb->tx_bcast_frames;
2031	stat->tx_mcast_frames += smb->tx_mcast_frames;
2032	stat->tx_pause_frames += smb->tx_pause_frames;
2033	stat->tx_excess_defer += smb->tx_excess_defer;
2034	stat->tx_control_frames += smb->tx_control_frames;
2035	stat->tx_deferred += smb->tx_deferred;
2036	stat->tx_bytes += smb->tx_bytes;
2037	stat->tx_pkts_64 += smb->tx_pkts_64;
2038	stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2039	stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2040	stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2041	stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2042	stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2043	stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2044	stat->tx_single_colls += smb->tx_single_colls;
2045	stat->tx_multi_colls += smb->tx_multi_colls;
2046	stat->tx_late_colls += smb->tx_late_colls;
2047	stat->tx_excess_colls += smb->tx_excess_colls;
2048	stat->tx_underrun += smb->tx_underrun;
2049	stat->tx_desc_underrun += smb->tx_desc_underrun;
2050	stat->tx_lenerrs += smb->tx_lenerrs;
2051	stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2052	stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2053	stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2054
2055	/* Update counters in ifnet. */
2056	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2057
2058	if_statadd_ref(nsr, if_opackets, smb->tx_frames);
2059
2060	if_statadd_ref(nsr, if_collisions,
2061	    smb->tx_single_colls +
2062	    smb->tx_multi_colls + smb->tx_late_colls +
2063	    smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT);
2064
2065	if_statadd_ref(nsr, if_oerrors,
2066	    smb->tx_excess_colls +
2067	    smb->tx_late_colls + smb->tx_underrun +
2068	    smb->tx_pkts_truncated);
2069
2070	if_statadd_ref(nsr, if_ierrors,
2071	    smb->rx_crcerrs + smb->rx_lenerrs +
2072	    smb->rx_runts + smb->rx_pkts_truncated +
2073	    smb->rx_fifo_oflows + smb->rx_desc_oflows +
2074	    smb->rx_alignerrs);
2075
2076	IF_STAT_PUTREF(ifp);
2077
2078	/* Update done, clear. */
2079	smb->updated = 0;
2080
2081	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2082	    sc->age_cdata.age_smb_block_map->dm_mapsize,
2083	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2084}
2085
2086static void
2087age_stop_txmac(struct age_softc *sc)
2088{
2089	uint32_t reg;
2090	int i;
2091
2092	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2093	if ((reg & MAC_CFG_TX_ENB) != 0) {
2094		reg &= ~MAC_CFG_TX_ENB;
2095		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2096	}
2097	/* Stop Tx DMA engine. */
2098	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2099	if ((reg & DMA_CFG_RD_ENB) != 0) {
2100		reg &= ~DMA_CFG_RD_ENB;
2101		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2102	}
2103	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2104		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2105		    (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2106			break;
2107		DELAY(10);
2108	}
2109	if (i == 0)
2110		printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2111}
2112
2113static void
2114age_stop_rxmac(struct age_softc *sc)
2115{
2116	uint32_t reg;
2117	int i;
2118
2119	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2120	if ((reg & MAC_CFG_RX_ENB) != 0) {
2121		reg &= ~MAC_CFG_RX_ENB;
2122		CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2123	}
2124	/* Stop Rx DMA engine. */
2125	reg = CSR_READ_4(sc, AGE_DMA_CFG);
2126	if ((reg & DMA_CFG_WR_ENB) != 0) {
2127		reg &= ~DMA_CFG_WR_ENB;
2128		CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2129	}
2130	for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2131		if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2132		    (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2133			break;
2134		DELAY(10);
2135	}
2136	if (i == 0)
2137		printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2138}
2139
2140static void
2141age_init_tx_ring(struct age_softc *sc)
2142{
2143	struct age_ring_data *rd;
2144	struct age_txdesc *txd;
2145	int i;
2146
2147	sc->age_cdata.age_tx_prod = 0;
2148	sc->age_cdata.age_tx_cons = 0;
2149	sc->age_cdata.age_tx_cnt = 0;
2150
2151	rd = &sc->age_rdata;
2152	memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2153	for (i = 0; i < AGE_TX_RING_CNT; i++) {
2154		txd = &sc->age_cdata.age_txdesc[i];
2155		txd->tx_desc = &rd->age_tx_ring[i];
2156		txd->tx_m = NULL;
2157	}
2158	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2159	    sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2160}
2161
2162static int
2163age_init_rx_ring(struct age_softc *sc)
2164{
2165	struct age_ring_data *rd;
2166	struct age_rxdesc *rxd;
2167	int i;
2168
2169	sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2170	rd = &sc->age_rdata;
2171	memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2172	for (i = 0; i < AGE_RX_RING_CNT; i++) {
2173		rxd = &sc->age_cdata.age_rxdesc[i];
2174		rxd->rx_m = NULL;
2175		rxd->rx_desc = &rd->age_rx_ring[i];
2176		if (age_newbuf(sc, rxd, 1) != 0)
2177			return ENOBUFS;
2178	}
2179
2180	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2181	    sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2182
2183	return 0;
2184}
2185
2186static void
2187age_init_rr_ring(struct age_softc *sc)
2188{
2189	struct age_ring_data *rd;
2190
2191	sc->age_cdata.age_rr_cons = 0;
2192	AGE_RXCHAIN_RESET(sc);
2193
2194	rd = &sc->age_rdata;
2195	memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2196	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2197	    sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2198}
2199
2200static void
2201age_init_cmb_block(struct age_softc *sc)
2202{
2203	struct age_ring_data *rd;
2204
2205	rd = &sc->age_rdata;
2206	memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2207	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2208	    sc->age_cdata.age_cmb_block_map->dm_mapsize,
2209	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2210}
2211
2212static void
2213age_init_smb_block(struct age_softc *sc)
2214{
2215	struct age_ring_data *rd;
2216
2217	rd = &sc->age_rdata;
2218	memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2219	bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2220	    sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2221}
2222
2223static int
2224age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2225{
2226	struct rx_desc *desc;
2227	struct mbuf *m;
2228	bus_dmamap_t map;
2229	int error;
2230
2231	MGETHDR(m, M_DONTWAIT, MT_DATA);
2232	if (m == NULL)
2233		return ENOBUFS;
2234	MCLGET(m, M_DONTWAIT);
2235	if (!(m->m_flags & M_EXT)) {
2236		 m_freem(m);
2237		 return ENOBUFS;
2238	}
2239
2240	m->m_len = m->m_pkthdr.len = MCLBYTES;
2241	m_adj(m, ETHER_ALIGN);
2242
2243	error = bus_dmamap_load_mbuf(sc->sc_dmat,
2244	    sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2245
2246	if (error != 0) {
2247		m_freem(m);
2248
2249		if (init)
2250			printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2251		return error;
2252	}
2253
2254	if (rxd->rx_m != NULL) {
2255		bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2256		    rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2257		bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2258	}
2259	map = rxd->rx_dmamap;
2260	rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2261	sc->age_cdata.age_rx_sparemap = map;
2262	rxd->rx_m = m;
2263
2264	desc = rxd->rx_desc;
2265	desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2266	desc->len =
2267	    htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2268	    AGE_RD_LEN_SHIFT);
2269
2270	return 0;
2271}
2272
2273static void
2274age_rxvlan(struct age_softc *sc)
2275{
2276	uint32_t reg;
2277
2278	reg = CSR_READ_4(sc, AGE_MAC_CFG);
2279	reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2280	if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2281		reg |= MAC_CFG_VLAN_TAG_STRIP;
2282	CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2283}
2284
2285static void
2286age_rxfilter(struct age_softc *sc)
2287{
2288	struct ethercom *ec = &sc->sc_ec;
2289	struct ifnet *ifp = &sc->sc_ec.ec_if;
2290	struct ether_multi *enm;
2291	struct ether_multistep step;
2292	uint32_t crc;
2293	uint32_t mchash[2];
2294	uint32_t rxcfg;
2295
2296	rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2297	rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2298	ifp->if_flags &= ~IFF_ALLMULTI;
2299
2300	/*
2301	 * Always accept broadcast frames.
2302	 */
2303	rxcfg |= MAC_CFG_BCAST;
2304
2305	/* Program new filter. */
2306	if ((ifp->if_flags & IFF_PROMISC) != 0)
2307		goto update;
2308
2309	memset(mchash, 0, sizeof(mchash));
2310
2311	ETHER_LOCK(ec);
2312	ETHER_FIRST_MULTI(step, ec, enm);
2313	while (enm != NULL) {
2314		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2315			/* XXX Use ETHER_F_ALLMULTI in future. */
2316			ifp->if_flags |= IFF_ALLMULTI;
2317			ETHER_UNLOCK(ec);
2318			goto update;
2319		}
2320		crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2321		mchash[crc >> 31] |= 1U << ((crc >> 26) & 0x1f);
2322		ETHER_NEXT_MULTI(step, enm);
2323	}
2324	ETHER_UNLOCK(ec);
2325
2326update:
2327	if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2328		if (ifp->if_flags & IFF_PROMISC) {
2329			rxcfg |= MAC_CFG_PROMISC;
2330			/* XXX Use ETHER_F_ALLMULTI in future. */
2331			ifp->if_flags |= IFF_ALLMULTI;
2332		} else
2333			rxcfg |= MAC_CFG_ALLMULTI;
2334		mchash[0] = mchash[1] = 0xFFFFFFFF;
2335	}
2336	CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2337	CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2338	CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2339}
2340