1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * Driver for Attansic Technology Corp. L2 FastEthernet adapter.
28 *
29 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon.
30 */
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/bus.h>
35#include <sys/endian.h>
36#include <sys/kernel.h>
37#include <sys/lock.h>
38#include <sys/malloc.h>
39#include <sys/mbuf.h>
40#include <sys/mutex.h>
41#include <sys/rman.h>
42#include <sys/module.h>
43#include <sys/queue.h>
44#include <sys/socket.h>
45#include <sys/sockio.h>
46#include <sys/sysctl.h>
47#include <sys/taskqueue.h>
48
49#include <net/bpf.h>
50#include <net/if.h>
51#include <net/if_var.h>
52#include <net/if_arp.h>
53#include <net/ethernet.h>
54#include <net/if_dl.h>
55#include <net/if_media.h>
56#include <net/if_types.h>
57#include <net/if_vlan_var.h>
58
59#include <netinet/in.h>
60#include <netinet/in_systm.h>
61#include <netinet/ip.h>
62#include <netinet/tcp.h>
63
64#include <dev/mii/mii.h>
65#include <dev/mii/miivar.h>
66#include <dev/pci/pcireg.h>
67#include <dev/pci/pcivar.h>
68
69#include <machine/bus.h>
70
71#include "miibus_if.h"
72
73#include "if_aereg.h"
74#include "if_aevar.h"
75
76/*
77 * Devices supported by this driver.
78 */
79static struct ae_dev {
80	uint16_t	vendorid;
81	uint16_t	deviceid;
82	const char	*name;
83} ae_devs[] = {
84	{ VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2,
85		"Attansic Technology Corp, L2 FastEthernet" },
86};
87#define	AE_DEVS_COUNT nitems(ae_devs)
88
89static struct resource_spec ae_res_spec_mem[] = {
90	{ SYS_RES_MEMORY,       PCIR_BAR(0),    RF_ACTIVE },
91	{ -1,			0,		0 }
92};
93static struct resource_spec ae_res_spec_irq[] = {
94	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
95	{ -1,			0,		0 }
96};
97static struct resource_spec ae_res_spec_msi[] = {
98	{ SYS_RES_IRQ,		1,		RF_ACTIVE },
99	{ -1,			0,		0 }
100};
101
102static int	ae_probe(device_t dev);
103static int	ae_attach(device_t dev);
104static void	ae_pcie_init(ae_softc_t *sc);
105static void	ae_phy_reset(ae_softc_t *sc);
106static void	ae_phy_init(ae_softc_t *sc);
107static int	ae_reset(ae_softc_t *sc);
108static void	ae_init(void *arg);
109static int	ae_init_locked(ae_softc_t *sc);
110static int	ae_detach(device_t dev);
111static int	ae_miibus_readreg(device_t dev, int phy, int reg);
112static int	ae_miibus_writereg(device_t dev, int phy, int reg, int val);
113static void	ae_miibus_statchg(device_t dev);
114static void	ae_mediastatus(if_t ifp, struct ifmediareq *ifmr);
115static int	ae_mediachange(if_t ifp);
116static void	ae_retrieve_address(ae_softc_t *sc);
117static void	ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs,
118    int error);
119static int	ae_alloc_rings(ae_softc_t *sc);
120static void	ae_dma_free(ae_softc_t *sc);
121static int	ae_shutdown(device_t dev);
122static int	ae_suspend(device_t dev);
123static void	ae_powersave_disable(ae_softc_t *sc);
124static void	ae_powersave_enable(ae_softc_t *sc);
125static int	ae_resume(device_t dev);
126static unsigned int	ae_tx_avail_size(ae_softc_t *sc);
127static int	ae_encap(ae_softc_t *sc, struct mbuf **m_head);
128static void	ae_start(if_t ifp);
129static void	ae_start_locked(if_t ifp);
130static void	ae_link_task(void *arg, int pending);
131static void	ae_stop_rxmac(ae_softc_t *sc);
132static void	ae_stop_txmac(ae_softc_t *sc);
133static void	ae_mac_config(ae_softc_t *sc);
134static int	ae_intr(void *arg);
135static void	ae_int_task(void *arg, int pending);
136static void	ae_tx_intr(ae_softc_t *sc);
137static void	ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd);
138static void	ae_rx_intr(ae_softc_t *sc);
139static void	ae_watchdog(ae_softc_t *sc);
140static void	ae_tick(void *arg);
141static void	ae_rxfilter(ae_softc_t *sc);
142static void	ae_rxvlan(ae_softc_t *sc);
143static int	ae_ioctl(if_t ifp, u_long cmd, caddr_t data);
144static void	ae_stop(ae_softc_t *sc);
145static int	ae_check_eeprom_present(ae_softc_t *sc, int *vpdc);
146static int	ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word);
147static int	ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr);
148static int	ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr);
149static void	ae_update_stats_rx(uint16_t flags, ae_stats_t *stats);
150static void	ae_update_stats_tx(uint16_t flags, ae_stats_t *stats);
151static void	ae_init_tunables(ae_softc_t *sc);
152
153static device_method_t ae_methods[] = {
154	/* Device interface. */
155	DEVMETHOD(device_probe,		ae_probe),
156	DEVMETHOD(device_attach,	ae_attach),
157	DEVMETHOD(device_detach,	ae_detach),
158	DEVMETHOD(device_shutdown,	ae_shutdown),
159	DEVMETHOD(device_suspend,	ae_suspend),
160	DEVMETHOD(device_resume,	ae_resume),
161
162	/* MII interface. */
163	DEVMETHOD(miibus_readreg,	ae_miibus_readreg),
164	DEVMETHOD(miibus_writereg,	ae_miibus_writereg),
165	DEVMETHOD(miibus_statchg,	ae_miibus_statchg),
166	{ NULL, NULL }
167};
168static driver_t ae_driver = {
169        "ae",
170        ae_methods,
171        sizeof(ae_softc_t)
172};
173
174DRIVER_MODULE(ae, pci, ae_driver, 0, 0);
175MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs,
176    nitems(ae_devs));
177DRIVER_MODULE(miibus, ae, miibus_driver, 0, 0);
178MODULE_DEPEND(ae, pci, 1, 1, 1);
179MODULE_DEPEND(ae, ether, 1, 1, 1);
180MODULE_DEPEND(ae, miibus, 1, 1, 1);
181
182/*
183 * Tunables.
184 */
185static int msi_disable = 0;
186TUNABLE_INT("hw.ae.msi_disable", &msi_disable);
187
188#define	AE_READ_4(sc, reg) \
189	bus_read_4((sc)->mem[0], (reg))
190#define	AE_READ_2(sc, reg) \
191	bus_read_2((sc)->mem[0], (reg))
192#define	AE_READ_1(sc, reg) \
193	bus_read_1((sc)->mem[0], (reg))
194#define	AE_WRITE_4(sc, reg, val) \
195	bus_write_4((sc)->mem[0], (reg), (val))
196#define	AE_WRITE_2(sc, reg, val) \
197	bus_write_2((sc)->mem[0], (reg), (val))
198#define	AE_WRITE_1(sc, reg, val) \
199	bus_write_1((sc)->mem[0], (reg), (val))
200#define	AE_PHY_READ(sc, reg) \
201	ae_miibus_readreg(sc->dev, 0, reg)
202#define	AE_PHY_WRITE(sc, reg, val) \
203	ae_miibus_writereg(sc->dev, 0, reg, val)
204#define	AE_CHECK_EADDR_VALID(eaddr) \
205	((eaddr[0] == 0 && eaddr[1] == 0) || \
206	(eaddr[0] == 0xffffffff && eaddr[1] == 0xffff))
207#define	AE_RXD_VLAN(vtag) \
208	(((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9))
209#define	AE_TXD_VLAN(vtag) \
210	(((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08))
211
212static int
213ae_probe(device_t dev)
214{
215	uint16_t deviceid, vendorid;
216	int i;
217
218	vendorid = pci_get_vendor(dev);
219	deviceid = pci_get_device(dev);
220
221	/*
222	 * Search through the list of supported devs for matching one.
223	 */
224	for (i = 0; i < AE_DEVS_COUNT; i++) {
225		if (vendorid == ae_devs[i].vendorid &&
226		    deviceid == ae_devs[i].deviceid) {
227			device_set_desc(dev, ae_devs[i].name);
228			return (BUS_PROBE_DEFAULT);
229		}
230	}
231	return (ENXIO);
232}
233
234static int
235ae_attach(device_t dev)
236{
237	ae_softc_t *sc;
238	if_t ifp;
239	uint8_t chiprev;
240	uint32_t pcirev;
241	int nmsi, pmc;
242	int error;
243
244	sc = device_get_softc(dev); /* Automatically allocated and zeroed
245				       on attach. */
246	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
247	sc->dev = dev;
248
249	/*
250	 * Initialize mutexes and tasks.
251	 */
252	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
253	callout_init_mtx(&sc->tick_ch, &sc->mtx, 0);
254	TASK_INIT(&sc->int_task, 0, ae_int_task, sc);
255	TASK_INIT(&sc->link_task, 0, ae_link_task, sc);
256
257	pci_enable_busmaster(dev);		/* Enable bus mastering. */
258
259	sc->spec_mem = ae_res_spec_mem;
260
261	/*
262	 * Allocate memory-mapped registers.
263	 */
264	error = bus_alloc_resources(dev, sc->spec_mem, sc->mem);
265	if (error != 0) {
266		device_printf(dev, "could not allocate memory resources.\n");
267		sc->spec_mem = NULL;
268		goto fail;
269	}
270
271	/*
272	 * Retrieve PCI and chip revisions.
273	 */
274	pcirev = pci_get_revid(dev);
275	chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) &
276	    AE_MASTER_REVNUM_MASK;
277	if (bootverbose) {
278		device_printf(dev, "pci device revision: %#04x\n", pcirev);
279		device_printf(dev, "chip id: %#02x\n", chiprev);
280	}
281	nmsi = pci_msi_count(dev);
282	if (bootverbose)
283		device_printf(dev, "MSI count: %d.\n", nmsi);
284
285	/*
286	 * Allocate interrupt resources.
287	 */
288	if (msi_disable == 0 && nmsi == 1) {
289		error = pci_alloc_msi(dev, &nmsi);
290		if (error == 0) {
291			device_printf(dev, "Using MSI messages.\n");
292			sc->spec_irq = ae_res_spec_msi;
293			error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
294			if (error != 0) {
295				device_printf(dev, "MSI allocation failed.\n");
296				sc->spec_irq = NULL;
297				pci_release_msi(dev);
298			} else {
299				sc->flags |= AE_FLAG_MSI;
300			}
301		}
302	}
303	if (sc->spec_irq == NULL) {
304		sc->spec_irq = ae_res_spec_irq;
305		error = bus_alloc_resources(dev, sc->spec_irq, sc->irq);
306		if (error != 0) {
307			device_printf(dev, "could not allocate IRQ resources.\n");
308			sc->spec_irq = NULL;
309			goto fail;
310		}
311	}
312
313	ae_init_tunables(sc);
314
315	ae_phy_reset(sc);		/* Reset PHY. */
316	error = ae_reset(sc);		/* Reset the controller itself. */
317	if (error != 0)
318		goto fail;
319
320	ae_pcie_init(sc);
321
322	ae_retrieve_address(sc);	/* Load MAC address. */
323
324	error = ae_alloc_rings(sc);	/* Allocate ring buffers. */
325	if (error != 0)
326		goto fail;
327
328	ifp = sc->ifp = if_alloc(IFT_ETHER);
329	if (ifp == NULL) {
330		device_printf(dev, "could not allocate ifnet structure.\n");
331		error = ENXIO;
332		goto fail;
333	}
334
335	if_setsoftc(ifp, sc);
336	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
337	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
338	if_setioctlfn(ifp, ae_ioctl);
339	if_setstartfn(ifp, ae_start);
340	if_setinitfn(ifp, ae_init);
341	if_setcapabilities(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING);
342	if_sethwassist(ifp, 0);
343	if_setsendqlen(ifp, ifqmaxlen);
344	if_setsendqready(ifp);
345	if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
346		if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
347		sc->flags |= AE_FLAG_PMG;
348	}
349	if_setcapenable(ifp, if_getcapabilities(ifp));
350
351	/*
352	 * Configure and attach MII bus.
353	 */
354	error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange,
355	    ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT,
356	    MII_OFFSET_ANY, 0);
357	if (error != 0) {
358		device_printf(dev, "attaching PHYs failed\n");
359		goto fail;
360	}
361
362	ether_ifattach(ifp, sc->eaddr);
363	/* Tell the upper layer(s) we support long frames. */
364	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
365
366	/*
367	 * Create and run all helper tasks.
368	 */
369	sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
370            taskqueue_thread_enqueue, &sc->tq);
371	if (sc->tq == NULL) {
372		device_printf(dev, "could not create taskqueue.\n");
373		ether_ifdetach(ifp);
374		error = ENXIO;
375		goto fail;
376	}
377	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
378	    device_get_nameunit(sc->dev));
379
380	/*
381	 * Configure interrupt handlers.
382	 */
383	error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE,
384	    ae_intr, NULL, sc, &sc->intrhand);
385	if (error != 0) {
386		device_printf(dev, "could not set up interrupt handler.\n");
387		taskqueue_free(sc->tq);
388		sc->tq = NULL;
389		ether_ifdetach(ifp);
390		goto fail;
391	}
392
393fail:
394	if (error != 0)
395		ae_detach(dev);
396
397	return (error);
398}
399
400#define	AE_SYSCTL(stx, parent, name, desc, ptr)	\
401	SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc)
402
403static void
404ae_init_tunables(ae_softc_t *sc)
405{
406	struct sysctl_ctx_list *ctx;
407	struct sysctl_oid *root, *stats, *stats_rx, *stats_tx;
408	struct ae_stats *ae_stats;
409
410	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
411	ae_stats = &sc->stats;
412
413	ctx = device_get_sysctl_ctx(sc->dev);
414	root = device_get_sysctl_tree(sc->dev);
415	stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats",
416	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics");
417
418	/*
419	 * Receiver statistcics.
420	 */
421	stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx",
422	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
423	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast",
424	    "broadcast frames", &ae_stats->rx_bcast);
425	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast",
426	    "multicast frames", &ae_stats->rx_mcast);
427	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause",
428	    "PAUSE frames", &ae_stats->rx_pause);
429	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control",
430	    "control frames", &ae_stats->rx_ctrl);
431	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors",
432	    "frames with CRC errors", &ae_stats->rx_crcerr);
433	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors",
434	    "frames with invalid opcode", &ae_stats->rx_codeerr);
435	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt",
436	    "runt frames", &ae_stats->rx_runt);
437	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag",
438	    "fragmented frames", &ae_stats->rx_frag);
439	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors",
440	    "frames with alignment errors", &ae_stats->rx_align);
441	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated",
442	    "frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc);
443
444	/*
445	 * Receiver statistcics.
446	 */
447	stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx",
448	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
449	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast",
450	    "broadcast frames", &ae_stats->tx_bcast);
451	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast",
452	    "multicast frames", &ae_stats->tx_mcast);
453	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause",
454	    "PAUSE frames", &ae_stats->tx_pause);
455	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control",
456	    "control frames", &ae_stats->tx_ctrl);
457	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers",
458	    "deferrals occuried", &ae_stats->tx_defer);
459	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers",
460	    "excessive deferrals occuried", &ae_stats->tx_excdefer);
461	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols",
462	    "single collisions occuried", &ae_stats->tx_singlecol);
463	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols",
464	    "multiple collisions occuried", &ae_stats->tx_multicol);
465	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols",
466	    "late collisions occuried", &ae_stats->tx_latecol);
467	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts",
468	    "transmit aborts due collisions", &ae_stats->tx_abortcol);
469	AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns",
470	    "Tx FIFO underruns", &ae_stats->tx_underrun);
471}
472
473static void
474ae_pcie_init(ae_softc_t *sc)
475{
476
477	AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT);
478	AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT);
479}
480
481static void
482ae_phy_reset(ae_softc_t *sc)
483{
484
485	AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE);
486	DELAY(1000);	/* XXX: pause(9) ? */
487}
488
489static int
490ae_reset(ae_softc_t *sc)
491{
492	int i;
493
494	/*
495	 * Issue a soft reset.
496	 */
497	AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET);
498	bus_barrier(sc->mem[0], AE_MASTER_REG, 4,
499	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
500
501	/*
502	 * Wait for reset to complete.
503	 */
504	for (i = 0; i < AE_RESET_TIMEOUT; i++) {
505		if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0)
506			break;
507		DELAY(10);
508	}
509	if (i == AE_RESET_TIMEOUT) {
510		device_printf(sc->dev, "reset timeout.\n");
511		return (ENXIO);
512	}
513
514	/*
515	 * Wait for everything to enter idle state.
516	 */
517	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
518		if (AE_READ_4(sc, AE_IDLE_REG) == 0)
519			break;
520		DELAY(100);
521	}
522	if (i == AE_IDLE_TIMEOUT) {
523		device_printf(sc->dev, "could not enter idle state.\n");
524		return (ENXIO);
525	}
526	return (0);
527}
528
529static void
530ae_init(void *arg)
531{
532	ae_softc_t *sc;
533
534	sc = (ae_softc_t *)arg;
535	AE_LOCK(sc);
536	ae_init_locked(sc);
537	AE_UNLOCK(sc);
538}
539
540static void
541ae_phy_init(ae_softc_t *sc)
542{
543
544	/*
545	 * Enable link status change interrupt.
546	 * XXX magic numbers.
547	 */
548#ifdef notyet
549	AE_PHY_WRITE(sc, 18, 0xc00);
550#endif
551}
552
553static int
554ae_init_locked(ae_softc_t *sc)
555{
556	if_t ifp;
557	struct mii_data *mii;
558	uint8_t eaddr[ETHER_ADDR_LEN];
559	uint32_t val;
560	bus_addr_t addr;
561
562	AE_LOCK_ASSERT(sc);
563
564	ifp = sc->ifp;
565	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
566		return (0);
567	mii = device_get_softc(sc->miibus);
568
569	ae_stop(sc);
570	ae_reset(sc);
571	ae_pcie_init(sc);		/* Initialize PCIE stuff. */
572	ae_phy_init(sc);
573	ae_powersave_disable(sc);
574
575	/*
576	 * Clear and disable interrupts.
577	 */
578	AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
579
580	/*
581	 * Set the MAC address.
582	 */
583	bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN);
584	val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5];
585	AE_WRITE_4(sc, AE_EADDR0_REG, val);
586	val = eaddr[0] << 8 | eaddr[1];
587	AE_WRITE_4(sc, AE_EADDR1_REG, val);
588
589	bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING);
590	bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT);
591	bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4);
592	/*
593	 * Set ring buffers base addresses.
594	 */
595	addr = sc->dma_rxd_busaddr;
596	AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr));
597	AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
598	addr = sc->dma_txd_busaddr;
599	AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr));
600	addr = sc->dma_txs_busaddr;
601	AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr));
602
603	/*
604	 * Configure ring buffers sizes.
605	 */
606	AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT);
607	AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4);
608	AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT);
609
610	/*
611	 * Configure interframe gap parameters.
612	 */
613	val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) &
614	    AE_IFG_TXIPG_MASK) |
615	    ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) &
616	    AE_IFG_RXIPG_MASK) |
617	    ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) &
618	    AE_IFG_IPGR1_MASK) |
619	    ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) &
620	    AE_IFG_IPGR2_MASK);
621	AE_WRITE_4(sc, AE_IFG_REG, val);
622
623	/*
624	 * Configure half-duplex operation.
625	 */
626	val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) &
627	    AE_HDPX_LCOL_MASK) |
628	    ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) &
629	    AE_HDPX_RETRY_MASK) |
630	    ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) &
631	    AE_HDPX_ABEBT_MASK) |
632	    ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) &
633	    AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN;
634	AE_WRITE_4(sc, AE_HDPX_REG, val);
635
636	/*
637	 * Configure interrupt moderate timer.
638	 */
639	AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT);
640	val = AE_READ_4(sc, AE_MASTER_REG);
641	val |= AE_MASTER_IMT_EN;
642	AE_WRITE_4(sc, AE_MASTER_REG, val);
643
644	/*
645	 * Configure interrupt clearing timer.
646	 */
647	AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT);
648
649	/*
650	 * Configure MTU.
651	 */
652	val = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
653	    ETHER_CRC_LEN;
654	AE_WRITE_2(sc, AE_MTU_REG, val);
655
656	/*
657	 * Configure cut-through threshold.
658	 */
659	AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT);
660
661	/*
662	 * Configure flow control.
663	 */
664	AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7);
665	AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) >
666	    (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) :
667	    (AE_RXD_COUNT_DEFAULT / 12));
668
669	/*
670	 * Init mailboxes.
671	 */
672	sc->txd_cur = sc->rxd_cur = 0;
673	sc->txs_ack = sc->txd_ack = 0;
674	sc->rxd_cur = 0;
675	AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur);
676	AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
677
678	sc->tx_inproc = 0;	/* Number of packets the chip processes now. */
679	sc->flags |= AE_FLAG_TXAVAIL;	/* Free Tx's available. */
680
681	/*
682	 * Enable DMA.
683	 */
684	AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
685	AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
686
687	/*
688	 * Check if everything is OK.
689	 */
690	val = AE_READ_4(sc, AE_ISR_REG);
691	if ((val & AE_ISR_PHY_LINKDOWN) != 0) {
692		device_printf(sc->dev, "Initialization failed.\n");
693		return (ENXIO);
694	}
695
696	/*
697	 * Clear interrupt status.
698	 */
699	AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff);
700	AE_WRITE_4(sc, AE_ISR_REG, 0x0);
701
702	/*
703	 * Enable interrupts.
704	 */
705	val = AE_READ_4(sc, AE_MASTER_REG);
706	AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT);
707	AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT);
708
709	/*
710	 * Disable WOL.
711	 */
712	AE_WRITE_4(sc, AE_WOL_REG, 0);
713
714	/*
715	 * Configure MAC.
716	 */
717	val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD |
718	    AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY |
719	    AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN |
720	    ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) |
721	    ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) &
722	    AE_MAC_PREAMBLE_MASK);
723	AE_WRITE_4(sc, AE_MAC_REG, val);
724
725	/*
726	 * Configure Rx MAC.
727	 */
728	ae_rxfilter(sc);
729	ae_rxvlan(sc);
730
731	/*
732	 * Enable Tx/Rx.
733	 */
734	val = AE_READ_4(sc, AE_MAC_REG);
735	AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN);
736
737	sc->flags &= ~AE_FLAG_LINK;
738	mii_mediachg(mii);	/* Switch to the current media. */
739
740	callout_reset(&sc->tick_ch, hz, ae_tick, sc);
741
742	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
743	if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
744
745#ifdef AE_DEBUG
746	device_printf(sc->dev, "Initialization complete.\n");
747#endif
748
749	return (0);
750}
751
752static int
753ae_detach(device_t dev)
754{
755	struct ae_softc *sc;
756	if_t ifp;
757
758	sc = device_get_softc(dev);
759	KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
760	ifp = sc->ifp;
761	if (device_is_attached(dev)) {
762		AE_LOCK(sc);
763		sc->flags |= AE_FLAG_DETACH;
764		ae_stop(sc);
765		AE_UNLOCK(sc);
766		callout_drain(&sc->tick_ch);
767		taskqueue_drain(sc->tq, &sc->int_task);
768		taskqueue_drain(taskqueue_swi, &sc->link_task);
769		ether_ifdetach(ifp);
770	}
771	if (sc->tq != NULL) {
772		taskqueue_drain(sc->tq, &sc->int_task);
773		taskqueue_free(sc->tq);
774		sc->tq = NULL;
775	}
776	if (sc->miibus != NULL) {
777		device_delete_child(dev, sc->miibus);
778		sc->miibus = NULL;
779	}
780	bus_generic_detach(sc->dev);
781	ae_dma_free(sc);
782	if (sc->intrhand != NULL) {
783		bus_teardown_intr(dev, sc->irq[0], sc->intrhand);
784		sc->intrhand = NULL;
785	}
786	if (ifp != NULL) {
787		if_free(ifp);
788		sc->ifp = NULL;
789	}
790	if (sc->spec_irq != NULL)
791		bus_release_resources(dev, sc->spec_irq, sc->irq);
792	if (sc->spec_mem != NULL)
793		bus_release_resources(dev, sc->spec_mem, sc->mem);
794	if ((sc->flags & AE_FLAG_MSI) != 0)
795		pci_release_msi(dev);
796	mtx_destroy(&sc->mtx);
797
798	return (0);
799}
800
801static int
802ae_miibus_readreg(device_t dev, int phy, int reg)
803{
804	ae_softc_t *sc;
805	uint32_t val;
806	int i;
807
808	sc = device_get_softc(dev);
809	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
810
811	/*
812	 * Locking is done in upper layers.
813	 */
814
815	val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
816	    AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE |
817	    ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK);
818	AE_WRITE_4(sc, AE_MDIO_REG, val);
819
820	/*
821	 * Wait for operation to complete.
822	 */
823	for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
824		DELAY(2);
825		val = AE_READ_4(sc, AE_MDIO_REG);
826		if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
827			break;
828	}
829	if (i == AE_MDIO_TIMEOUT) {
830		device_printf(sc->dev, "phy read timeout: %d.\n", reg);
831		return (0);
832	}
833	return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
834}
835
836static int
837ae_miibus_writereg(device_t dev, int phy, int reg, int val)
838{
839	ae_softc_t *sc;
840	uint32_t aereg;
841	int i;
842
843	sc = device_get_softc(dev);
844	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
845
846	/*
847	 * Locking is done in upper layers.
848	 */
849
850	aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) |
851	    AE_MDIO_START | AE_MDIO_SUP_PREAMBLE |
852	    ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) |
853	    ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK);
854	AE_WRITE_4(sc, AE_MDIO_REG, aereg);
855
856	/*
857	 * Wait for operation to complete.
858	 */
859	for (i = 0; i < AE_MDIO_TIMEOUT; i++) {
860		DELAY(2);
861		aereg = AE_READ_4(sc, AE_MDIO_REG);
862		if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0)
863			break;
864	}
865	if (i == AE_MDIO_TIMEOUT) {
866		device_printf(sc->dev, "phy write timeout: %d.\n", reg);
867	}
868	return (0);
869}
870
871static void
872ae_miibus_statchg(device_t dev)
873{
874	ae_softc_t *sc;
875
876	sc = device_get_softc(dev);
877	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
878}
879
880static void
881ae_mediastatus(if_t ifp, struct ifmediareq *ifmr)
882{
883	ae_softc_t *sc;
884	struct mii_data *mii;
885
886	sc = if_getsoftc(ifp);
887	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
888
889	AE_LOCK(sc);
890	mii = device_get_softc(sc->miibus);
891	mii_pollstat(mii);
892	ifmr->ifm_status = mii->mii_media_status;
893	ifmr->ifm_active = mii->mii_media_active;
894	AE_UNLOCK(sc);
895}
896
897static int
898ae_mediachange(if_t ifp)
899{
900	ae_softc_t *sc;
901	struct mii_data *mii;
902	struct mii_softc *mii_sc;
903	int error;
904
905	/* XXX: check IFF_UP ?? */
906	sc = if_getsoftc(ifp);
907	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
908	AE_LOCK(sc);
909	mii = device_get_softc(sc->miibus);
910	LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list)
911		PHY_RESET(mii_sc);
912	error = mii_mediachg(mii);
913	AE_UNLOCK(sc);
914
915	return (error);
916}
917
918static int
919ae_check_eeprom_present(ae_softc_t *sc, int *vpdc)
920{
921	int error;
922	uint32_t val;
923
924	KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__));
925
926	/*
927	 * Not sure why, but Linux does this.
928	 */
929	val = AE_READ_4(sc, AE_SPICTL_REG);
930	if ((val & AE_SPICTL_VPD_EN) != 0) {
931		val &= ~AE_SPICTL_VPD_EN;
932		AE_WRITE_4(sc, AE_SPICTL_REG, val);
933	}
934	error = pci_find_cap(sc->dev, PCIY_VPD, vpdc);
935	return (error);
936}
937
938static int
939ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word)
940{
941	uint32_t val;
942	int i;
943
944	AE_WRITE_4(sc, AE_VPD_DATA_REG, 0);	/* Clear register value. */
945
946	/*
947	 * VPD registers start at offset 0x100. Read them.
948	 */
949	val = 0x100 + reg * 4;
950	AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) &
951	    AE_VPD_CAP_ADDR_MASK);
952	for (i = 0; i < AE_VPD_TIMEOUT; i++) {
953		DELAY(2000);
954		val = AE_READ_4(sc, AE_VPD_CAP_REG);
955		if ((val & AE_VPD_CAP_DONE) != 0)
956			break;
957	}
958	if (i == AE_VPD_TIMEOUT) {
959		device_printf(sc->dev, "timeout reading VPD register %d.\n",
960		    reg);
961		return (ETIMEDOUT);
962	}
963	*word = AE_READ_4(sc, AE_VPD_DATA_REG);
964	return (0);
965}
966
967static int
968ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr)
969{
970	uint32_t word, reg, val;
971	int error;
972	int found;
973	int vpdc;
974	int i;
975
976	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
977	KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__));
978
979	/*
980	 * Check for EEPROM.
981	 */
982	error = ae_check_eeprom_present(sc, &vpdc);
983	if (error != 0)
984		return (error);
985
986	/*
987	 * Read the VPD configuration space.
988	 * Each register is prefixed with signature,
989	 * so we can check if it is valid.
990	 */
991	for (i = 0, found = 0; i < AE_VPD_NREGS; i++) {
992		error = ae_vpd_read_word(sc, i, &word);
993		if (error != 0)
994			break;
995
996		/*
997		 * Check signature.
998		 */
999		if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG)
1000			break;
1001		reg = word >> AE_VPD_REG_SHIFT;
1002		i++;	/* Move to the next word. */
1003
1004		if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG)
1005			continue;
1006
1007		error = ae_vpd_read_word(sc, i, &val);
1008		if (error != 0)
1009			break;
1010		if (reg == AE_EADDR0_REG)
1011			eaddr[0] = val;
1012		else
1013			eaddr[1] = val;
1014		found++;
1015	}
1016
1017	if (found < 2)
1018		return (ENOENT);
1019
1020	eaddr[1] &= 0xffff;	/* Only last 2 bytes are used. */
1021	if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1022		if (bootverbose)
1023			device_printf(sc->dev,
1024			    "VPD ethernet address registers are invalid.\n");
1025		return (EINVAL);
1026	}
1027	return (0);
1028}
1029
1030static int
1031ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr)
1032{
1033
1034	/*
1035	 * BIOS is supposed to set this.
1036	 */
1037	eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG);
1038	eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG);
1039	eaddr[1] &= 0xffff;	/* Only last 2 bytes are used. */
1040
1041	if (AE_CHECK_EADDR_VALID(eaddr) != 0) {
1042		if (bootverbose)
1043			device_printf(sc->dev,
1044			    "Ethernet address registers are invalid.\n");
1045		return (EINVAL);
1046	}
1047	return (0);
1048}
1049
1050static void
1051ae_retrieve_address(ae_softc_t *sc)
1052{
1053	uint32_t eaddr[2] = {0, 0};
1054	int error;
1055
1056	/*
1057	 *Check for EEPROM.
1058	 */
1059	error = ae_get_vpd_eaddr(sc, eaddr);
1060	if (error != 0)
1061		error = ae_get_reg_eaddr(sc, eaddr);
1062	if (error != 0) {
1063		if (bootverbose)
1064			device_printf(sc->dev,
1065			    "Generating random ethernet address.\n");
1066		eaddr[0] = arc4random();
1067
1068		/*
1069		 * Set OUI to ASUSTek COMPUTER INC.
1070		 */
1071		sc->eaddr[0] = 0x02;	/* U/L bit set. */
1072		sc->eaddr[1] = 0x1f;
1073		sc->eaddr[2] = 0xc6;
1074		sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1075		sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1076		sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1077	} else {
1078		sc->eaddr[0] = (eaddr[1] >> 8) & 0xff;
1079		sc->eaddr[1] = (eaddr[1] >> 0) & 0xff;
1080		sc->eaddr[2] = (eaddr[0] >> 24) & 0xff;
1081		sc->eaddr[3] = (eaddr[0] >> 16) & 0xff;
1082		sc->eaddr[4] = (eaddr[0] >> 8) & 0xff;
1083		sc->eaddr[5] = (eaddr[0] >> 0) & 0xff;
1084	}
1085}
1086
1087static void
1088ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1089{
1090	bus_addr_t *addr = arg;
1091
1092	if (error != 0)
1093		return;
1094	KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__,
1095	    nsegs));
1096	*addr = segs[0].ds_addr;
1097}
1098
1099static int
1100ae_alloc_rings(ae_softc_t *sc)
1101{
1102	bus_addr_t busaddr;
1103	int error;
1104
1105	/*
1106	 * Create parent DMA tag.
1107	 */
1108	error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1109	    1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1110	    NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0,
1111	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
1112	    &sc->dma_parent_tag);
1113	if (error != 0) {
1114		device_printf(sc->dev, "could not creare parent DMA tag.\n");
1115		return (error);
1116	}
1117
1118	/*
1119	 * Create DMA tag for TxD.
1120	 */
1121	error = bus_dma_tag_create(sc->dma_parent_tag,
1122	    8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1123	    NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1,
1124	    AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL,
1125	    &sc->dma_txd_tag);
1126	if (error != 0) {
1127		device_printf(sc->dev, "could not creare TxD DMA tag.\n");
1128		return (error);
1129	}
1130
1131	/*
1132	 * Create DMA tag for TxS.
1133	 */
1134	error = bus_dma_tag_create(sc->dma_parent_tag,
1135	    8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1136	    NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1,
1137	    AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL,
1138	    &sc->dma_txs_tag);
1139	if (error != 0) {
1140		device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1141		return (error);
1142	}
1143
1144	/*
1145	 * Create DMA tag for RxD.
1146	 */
1147	error = bus_dma_tag_create(sc->dma_parent_tag,
1148	    128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1149	    NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1,
1150	    AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL,
1151	    &sc->dma_rxd_tag);
1152	if (error != 0) {
1153		device_printf(sc->dev, "could not creare TxS DMA tag.\n");
1154		return (error);
1155	}
1156
1157	/*
1158	 * Allocate TxD DMA memory.
1159	 */
1160	error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base,
1161	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1162	    &sc->dma_txd_map);
1163	if (error != 0) {
1164		device_printf(sc->dev,
1165		    "could not allocate DMA memory for TxD ring.\n");
1166		return (error);
1167	}
1168	error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base,
1169	    AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1170	if (error != 0 || busaddr == 0) {
1171		device_printf(sc->dev,
1172		    "could not load DMA map for TxD ring.\n");
1173		return (error);
1174	}
1175	sc->dma_txd_busaddr = busaddr;
1176
1177	/*
1178	 * Allocate TxS DMA memory.
1179	 */
1180	error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base,
1181	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1182	    &sc->dma_txs_map);
1183	if (error != 0) {
1184		device_printf(sc->dev,
1185		    "could not allocate DMA memory for TxS ring.\n");
1186		return (error);
1187	}
1188	error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base,
1189	    AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1190	if (error != 0 || busaddr == 0) {
1191		device_printf(sc->dev,
1192		    "could not load DMA map for TxS ring.\n");
1193		return (error);
1194	}
1195	sc->dma_txs_busaddr = busaddr;
1196
1197	/*
1198	 * Allocate RxD DMA memory.
1199	 */
1200	error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma,
1201	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1202	    &sc->dma_rxd_map);
1203	if (error != 0) {
1204		device_printf(sc->dev,
1205		    "could not allocate DMA memory for RxD ring.\n");
1206		return (error);
1207	}
1208	error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map,
1209	    sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING,
1210	    ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT);
1211	if (error != 0 || busaddr == 0) {
1212		device_printf(sc->dev,
1213		    "could not load DMA map for RxD ring.\n");
1214		return (error);
1215	}
1216	sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING;
1217	sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING);
1218
1219	return (0);
1220}
1221
1222static void
1223ae_dma_free(ae_softc_t *sc)
1224{
1225
1226	if (sc->dma_txd_tag != NULL) {
1227		if (sc->dma_txd_busaddr != 0)
1228			bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map);
1229		if (sc->txd_base != NULL)
1230			bus_dmamem_free(sc->dma_txd_tag, sc->txd_base,
1231			    sc->dma_txd_map);
1232		bus_dma_tag_destroy(sc->dma_txd_tag);
1233		sc->dma_txd_tag = NULL;
1234		sc->txd_base = NULL;
1235		sc->dma_txd_busaddr = 0;
1236	}
1237	if (sc->dma_txs_tag != NULL) {
1238		if (sc->dma_txs_busaddr != 0)
1239			bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map);
1240		if (sc->txs_base != NULL)
1241			bus_dmamem_free(sc->dma_txs_tag, sc->txs_base,
1242			    sc->dma_txs_map);
1243		bus_dma_tag_destroy(sc->dma_txs_tag);
1244		sc->dma_txs_tag = NULL;
1245		sc->txs_base = NULL;
1246		sc->dma_txs_busaddr = 0;
1247	}
1248	if (sc->dma_rxd_tag != NULL) {
1249		if (sc->dma_rxd_busaddr != 0)
1250			bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map);
1251		if (sc->rxd_base_dma != NULL)
1252			bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma,
1253			    sc->dma_rxd_map);
1254		bus_dma_tag_destroy(sc->dma_rxd_tag);
1255		sc->dma_rxd_tag = NULL;
1256		sc->rxd_base_dma = NULL;
1257		sc->dma_rxd_busaddr = 0;
1258	}
1259	if (sc->dma_parent_tag != NULL) {
1260		bus_dma_tag_destroy(sc->dma_parent_tag);
1261		sc->dma_parent_tag = NULL;
1262	}
1263}
1264
1265static int
1266ae_shutdown(device_t dev)
1267{
1268	ae_softc_t *sc;
1269	int error;
1270
1271	sc = device_get_softc(dev);
1272	KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__));
1273
1274	error = ae_suspend(dev);
1275	AE_LOCK(sc);
1276	ae_powersave_enable(sc);
1277	AE_UNLOCK(sc);
1278	return (error);
1279}
1280
1281static void
1282ae_powersave_disable(ae_softc_t *sc)
1283{
1284	uint32_t val;
1285
1286	AE_LOCK_ASSERT(sc);
1287
1288	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1289	val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1290	if (val & AE_PHY_DBG_POWERSAVE) {
1291		val &= ~AE_PHY_DBG_POWERSAVE;
1292		AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val);
1293		DELAY(1000);
1294	}
1295}
1296
1297static void
1298ae_powersave_enable(ae_softc_t *sc)
1299{
1300	uint32_t val;
1301
1302	AE_LOCK_ASSERT(sc);
1303
1304	/*
1305	 * XXX magic numbers.
1306	 */
1307	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0);
1308	val = AE_PHY_READ(sc, AE_PHY_DBG_DATA);
1309	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000);
1310	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2);
1311	AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000);
1312	AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3);
1313	AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0);
1314}
1315
1316static void
1317ae_pm_init(ae_softc_t *sc)
1318{
1319	if_t ifp;
1320	uint32_t val;
1321	uint16_t pmstat;
1322	struct mii_data *mii;
1323	int pmc;
1324
1325	AE_LOCK_ASSERT(sc);
1326
1327	ifp = sc->ifp;
1328	if ((sc->flags & AE_FLAG_PMG) == 0) {
1329		/* Disable WOL entirely. */
1330		AE_WRITE_4(sc, AE_WOL_REG, 0);
1331		return;
1332	}
1333
1334	/*
1335	 * Configure WOL if enabled.
1336	 */
1337	if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) {
1338		mii = device_get_softc(sc->miibus);
1339		mii_pollstat(mii);
1340		if ((mii->mii_media_status & IFM_AVALID) != 0 &&
1341		    (mii->mii_media_status & IFM_ACTIVE) != 0) {
1342			AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \
1343			    AE_WOL_MAGIC_PME);
1344
1345			/*
1346			 * Configure MAC.
1347			 */
1348			val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \
1349			    AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \
1350			    ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \
1351			    AE_HALFBUF_MASK) | \
1352			    ((AE_MAC_PREAMBLE_DEFAULT << \
1353			    AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \
1354			    AE_MAC_BCAST_EN | AE_MAC_MCAST_EN;
1355			if ((IFM_OPTIONS(mii->mii_media_active) & \
1356			    IFM_FDX) != 0)
1357				val |= AE_MAC_FULL_DUPLEX;
1358			AE_WRITE_4(sc, AE_MAC_REG, val);
1359
1360		} else {	/* No link. */
1361			AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \
1362			    AE_WOL_LNKCHG_PME);
1363			AE_WRITE_4(sc, AE_MAC_REG, 0);
1364		}
1365	} else {
1366		ae_powersave_enable(sc);
1367	}
1368
1369	/*
1370	 * PCIE hacks. Magic numbers.
1371	 */
1372	val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG);
1373	val |= AE_PCIE_PHYMISC_FORCE_RCV_DET;
1374	AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val);
1375	val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG);
1376	val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK;
1377	AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val);
1378
1379	/*
1380	 * Configure PME.
1381	 */
1382	if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
1383		pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
1384		pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1385		if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
1386			pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1387		pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1388	}
1389}
1390
1391static int
1392ae_suspend(device_t dev)
1393{
1394	ae_softc_t *sc;
1395
1396	sc = device_get_softc(dev);
1397
1398	AE_LOCK(sc);
1399	ae_stop(sc);
1400	ae_pm_init(sc);
1401	AE_UNLOCK(sc);
1402
1403	return (0);
1404}
1405
1406static int
1407ae_resume(device_t dev)
1408{
1409	ae_softc_t *sc;
1410
1411	sc = device_get_softc(dev);
1412	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1413
1414	AE_LOCK(sc);
1415	AE_READ_4(sc, AE_WOL_REG);	/* Clear WOL status. */
1416	if ((if_getflags(sc->ifp) & IFF_UP) != 0)
1417		ae_init_locked(sc);
1418	AE_UNLOCK(sc);
1419
1420	return (0);
1421}
1422
1423static unsigned int
1424ae_tx_avail_size(ae_softc_t *sc)
1425{
1426	unsigned int avail;
1427
1428	if (sc->txd_cur >= sc->txd_ack)
1429		avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack);
1430	else
1431		avail = sc->txd_ack - sc->txd_cur;
1432
1433	return (avail);
1434}
1435
1436static int
1437ae_encap(ae_softc_t *sc, struct mbuf **m_head)
1438{
1439	struct mbuf *m0;
1440	ae_txd_t *hdr;
1441	unsigned int to_end;
1442	uint16_t len;
1443
1444	AE_LOCK_ASSERT(sc);
1445
1446	m0 = *m_head;
1447	len = m0->m_pkthdr.len;
1448
1449	if ((sc->flags & AE_FLAG_TXAVAIL) == 0 ||
1450	    len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) {
1451#ifdef AE_DEBUG
1452		if_printf(sc->ifp, "No free Tx available.\n");
1453#endif
1454		return ENOBUFS;
1455	}
1456
1457	hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur);
1458	bzero(hdr, sizeof(*hdr));
1459	/* Skip header size. */
1460	sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT;
1461	/* Space available to the end of the ring */
1462	to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur;
1463	if (to_end >= len) {
1464		m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur));
1465	} else {
1466		m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base +
1467		    sc->txd_cur));
1468		m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base);
1469	}
1470
1471	/*
1472	 * Set TxD flags and parameters.
1473	 */
1474	if ((m0->m_flags & M_VLANTAG) != 0) {
1475		hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag));
1476		hdr->len = htole16(len | AE_TXD_INSERT_VTAG);
1477	} else {
1478		hdr->len = htole16(len);
1479	}
1480
1481	/*
1482	 * Set current TxD position and round up to a 4-byte boundary.
1483	 */
1484	sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1485	if (sc->txd_cur == sc->txd_ack)
1486		sc->flags &= ~AE_FLAG_TXAVAIL;
1487#ifdef AE_DEBUG
1488	if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur);
1489#endif
1490
1491	/*
1492	 * Update TxS position and check if there are empty TxS available.
1493	 */
1494	sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE);
1495	sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT;
1496	if (sc->txs_cur == sc->txs_ack)
1497		sc->flags &= ~AE_FLAG_TXAVAIL;
1498
1499	/*
1500	 * Synchronize DMA memory.
1501	 */
1502	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD |
1503	    BUS_DMASYNC_PREWRITE);
1504	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1505	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1506
1507	return (0);
1508}
1509
1510static void
1511ae_start(if_t ifp)
1512{
1513	ae_softc_t *sc;
1514
1515	sc = if_getsoftc(ifp);
1516	AE_LOCK(sc);
1517	ae_start_locked(ifp);
1518	AE_UNLOCK(sc);
1519}
1520
1521static void
1522ae_start_locked(if_t ifp)
1523{
1524	ae_softc_t *sc;
1525	unsigned int count;
1526	struct mbuf *m0;
1527	int error;
1528
1529	sc = if_getsoftc(ifp);
1530	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1531	AE_LOCK_ASSERT(sc);
1532
1533#ifdef AE_DEBUG
1534	if_printf(ifp, "Start called.\n");
1535#endif
1536
1537	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1538	    IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0)
1539		return;
1540
1541	count = 0;
1542	while (!if_sendq_empty(ifp)) {
1543		m0 = if_dequeue(ifp);
1544		if (m0 == NULL)
1545			break;	/* Nothing to do. */
1546
1547		error = ae_encap(sc, &m0);
1548		if (error != 0) {
1549			if (m0 != NULL) {
1550				if_sendq_prepend(ifp, m0);
1551				if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
1552#ifdef AE_DEBUG
1553				if_printf(ifp, "Setting OACTIVE.\n");
1554#endif
1555			}
1556			break;
1557		}
1558		count++;
1559		sc->tx_inproc++;
1560
1561		/* Bounce a copy of the frame to BPF. */
1562		ETHER_BPF_MTAP(ifp, m0);
1563
1564		m_freem(m0);
1565	}
1566
1567	if (count > 0) {	/* Something was dequeued. */
1568		AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4);
1569		sc->wd_timer = AE_TX_TIMEOUT;	/* Load watchdog. */
1570#ifdef AE_DEBUG
1571		if_printf(ifp, "%d packets dequeued.\n", count);
1572		if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur);
1573#endif
1574	}
1575}
1576
1577static void
1578ae_link_task(void *arg, int pending)
1579{
1580	ae_softc_t *sc;
1581	struct mii_data *mii;
1582	if_t ifp;
1583	uint32_t val;
1584
1585	sc = (ae_softc_t *)arg;
1586	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1587	AE_LOCK(sc);
1588
1589	ifp = sc->ifp;
1590	mii = device_get_softc(sc->miibus);
1591	if (mii == NULL || ifp == NULL ||
1592	    (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1593		AE_UNLOCK(sc);	/* XXX: could happen? */
1594		return;
1595	}
1596
1597	sc->flags &= ~AE_FLAG_LINK;
1598	if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
1599	    (IFM_AVALID | IFM_ACTIVE)) {
1600		switch(IFM_SUBTYPE(mii->mii_media_active)) {
1601		case IFM_10_T:
1602		case IFM_100_TX:
1603			sc->flags |= AE_FLAG_LINK;
1604			break;
1605		default:
1606			break;
1607		}
1608	}
1609
1610	/*
1611	 * Stop Rx/Tx MACs.
1612	 */
1613	ae_stop_rxmac(sc);
1614	ae_stop_txmac(sc);
1615
1616	if ((sc->flags & AE_FLAG_LINK) != 0) {
1617		ae_mac_config(sc);
1618
1619		/*
1620		 * Restart DMA engines.
1621		 */
1622		AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN);
1623		AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN);
1624
1625		/*
1626		 * Enable Rx and Tx MACs.
1627		 */
1628		val = AE_READ_4(sc, AE_MAC_REG);
1629		val |= AE_MAC_TX_EN | AE_MAC_RX_EN;
1630		AE_WRITE_4(sc, AE_MAC_REG, val);
1631	}
1632	AE_UNLOCK(sc);
1633}
1634
1635static void
1636ae_stop_rxmac(ae_softc_t *sc)
1637{
1638	uint32_t val;
1639	int i;
1640
1641	AE_LOCK_ASSERT(sc);
1642
1643	/*
1644	 * Stop Rx MAC engine.
1645	 */
1646	val = AE_READ_4(sc, AE_MAC_REG);
1647	if ((val & AE_MAC_RX_EN) != 0) {
1648		val &= ~AE_MAC_RX_EN;
1649		AE_WRITE_4(sc, AE_MAC_REG, val);
1650	}
1651
1652	/*
1653	 * Stop Rx DMA engine.
1654	 */
1655	if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN)
1656		AE_WRITE_1(sc, AE_DMAWRITE_REG, 0);
1657
1658	/*
1659	 * Wait for IDLE state.
1660	 */
1661	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1662		val = AE_READ_4(sc, AE_IDLE_REG);
1663		if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0)
1664			break;
1665		DELAY(100);
1666	}
1667	if (i == AE_IDLE_TIMEOUT)
1668		device_printf(sc->dev, "timed out while stopping Rx MAC.\n");
1669}
1670
1671static void
1672ae_stop_txmac(ae_softc_t *sc)
1673{
1674	uint32_t val;
1675	int i;
1676
1677	AE_LOCK_ASSERT(sc);
1678
1679	/*
1680	 * Stop Tx MAC engine.
1681	 */
1682	val = AE_READ_4(sc, AE_MAC_REG);
1683	if ((val & AE_MAC_TX_EN) != 0) {
1684		val &= ~AE_MAC_TX_EN;
1685		AE_WRITE_4(sc, AE_MAC_REG, val);
1686	}
1687
1688	/*
1689	 * Stop Tx DMA engine.
1690	 */
1691	if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN)
1692		AE_WRITE_1(sc, AE_DMAREAD_REG, 0);
1693
1694	/*
1695	 * Wait for IDLE state.
1696	 */
1697	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
1698		val = AE_READ_4(sc, AE_IDLE_REG);
1699		if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0)
1700			break;
1701		DELAY(100);
1702	}
1703	if (i == AE_IDLE_TIMEOUT)
1704		device_printf(sc->dev, "timed out while stopping Tx MAC.\n");
1705}
1706
1707static void
1708ae_mac_config(ae_softc_t *sc)
1709{
1710	struct mii_data *mii;
1711	uint32_t val;
1712
1713	AE_LOCK_ASSERT(sc);
1714
1715	mii = device_get_softc(sc->miibus);
1716	val = AE_READ_4(sc, AE_MAC_REG);
1717	val &= ~AE_MAC_FULL_DUPLEX;
1718	/* XXX disable AE_MAC_TX_FLOW_EN? */
1719
1720	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
1721		val |= AE_MAC_FULL_DUPLEX;
1722
1723	AE_WRITE_4(sc, AE_MAC_REG, val);
1724}
1725
1726static int
1727ae_intr(void *arg)
1728{
1729	ae_softc_t *sc;
1730	uint32_t val;
1731
1732	sc = (ae_softc_t *)arg;
1733	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__));
1734
1735	val = AE_READ_4(sc, AE_ISR_REG);
1736	if (val == 0 || (val & AE_IMR_DEFAULT) == 0)
1737		return (FILTER_STRAY);
1738
1739	/* Disable interrupts. */
1740	AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE);
1741
1742	/* Schedule interrupt processing. */
1743	taskqueue_enqueue(sc->tq, &sc->int_task);
1744
1745	return (FILTER_HANDLED);
1746}
1747
1748static void
1749ae_int_task(void *arg, int pending)
1750{
1751	ae_softc_t *sc;
1752	if_t ifp;
1753	uint32_t val;
1754
1755	sc = (ae_softc_t *)arg;
1756
1757	AE_LOCK(sc);
1758
1759	ifp = sc->ifp;
1760
1761	val = AE_READ_4(sc, AE_ISR_REG);	/* Read interrupt status. */
1762	if (val == 0) {
1763		AE_UNLOCK(sc);
1764		return;
1765	}
1766
1767	/*
1768	 * Clear interrupts and disable them.
1769	 */
1770	AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE);
1771
1772#ifdef AE_DEBUG
1773	if_printf(ifp, "Interrupt received: 0x%08x\n", val);
1774#endif
1775
1776	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1777		if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT |
1778		    AE_ISR_PHY_LINKDOWN)) != 0) {
1779			if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1780			ae_init_locked(sc);
1781			AE_UNLOCK(sc);
1782			return;
1783		}
1784		if ((val & AE_ISR_TX_EVENT) != 0)
1785			ae_tx_intr(sc);
1786		if ((val & AE_ISR_RX_EVENT) != 0)
1787			ae_rx_intr(sc);
1788		/*
1789		 * Re-enable interrupts.
1790		 */
1791		AE_WRITE_4(sc, AE_ISR_REG, 0);
1792
1793		if ((sc->flags & AE_FLAG_TXAVAIL) != 0) {
1794			if (!if_sendq_empty(ifp))
1795				ae_start_locked(ifp);
1796		}
1797	}
1798
1799	AE_UNLOCK(sc);
1800}
1801
1802static void
1803ae_tx_intr(ae_softc_t *sc)
1804{
1805	if_t ifp;
1806	ae_txd_t *txd;
1807	ae_txs_t *txs;
1808	uint16_t flags;
1809
1810	AE_LOCK_ASSERT(sc);
1811
1812	ifp = sc->ifp;
1813
1814#ifdef AE_DEBUG
1815	if_printf(ifp, "Tx interrupt occuried.\n");
1816#endif
1817
1818	/*
1819	 * Syncronize DMA buffers.
1820	 */
1821	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1822	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1823	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1824	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1825
1826	for (;;) {
1827		txs = sc->txs_base + sc->txs_ack;
1828		flags = le16toh(txs->flags);
1829		if ((flags & AE_TXS_UPDATE) == 0)
1830			break;
1831		txs->flags = htole16(flags & ~AE_TXS_UPDATE);
1832		/* Update stats. */
1833		ae_update_stats_tx(flags, &sc->stats);
1834
1835		/*
1836		 * Update TxS position.
1837		 */
1838		sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT;
1839		sc->flags |= AE_FLAG_TXAVAIL;
1840
1841		txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack);
1842		if (txs->len != txd->len)
1843			device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n",
1844			    le16toh(txs->len), le16toh(txd->len));
1845
1846		/*
1847		 * Move txd ack and align on 4-byte boundary.
1848		 */
1849		sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) +
1850		    sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT;
1851
1852		if ((flags & AE_TXS_SUCCESS) != 0)
1853			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1854		else
1855			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1856
1857		sc->tx_inproc--;
1858	}
1859
1860	if ((sc->flags & AE_FLAG_TXAVAIL) != 0)
1861		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
1862	if (sc->tx_inproc < 0) {
1863		if_printf(ifp, "Received stray Tx interrupt(s).\n");
1864		sc->tx_inproc = 0;
1865	}
1866
1867	if (sc->tx_inproc == 0)
1868		sc->wd_timer = 0;	/* Unarm watchdog. */
1869
1870	/*
1871	 * Syncronize DMA buffers.
1872	 */
1873	bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map,
1874	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1875	bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map,
1876	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1877}
1878
1879static void
1880ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd)
1881{
1882	if_t ifp;
1883	struct mbuf *m;
1884	unsigned int size;
1885	uint16_t flags;
1886
1887	AE_LOCK_ASSERT(sc);
1888
1889	ifp = sc->ifp;
1890	flags = le16toh(rxd->flags);
1891
1892#ifdef AE_DEBUG
1893	if_printf(ifp, "Rx interrupt occuried.\n");
1894#endif
1895	size = le16toh(rxd->len) - ETHER_CRC_LEN;
1896	if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) {
1897		if_printf(ifp, "Runt frame received.");
1898		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1899		return;
1900	}
1901
1902	m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL);
1903	if (m == NULL) {
1904		if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1905		return;
1906	}
1907
1908	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 &&
1909	    (flags & AE_RXD_HAS_VLAN) != 0) {
1910		m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan));
1911		m->m_flags |= M_VLANTAG;
1912	}
1913
1914	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1915	/*
1916	 * Pass it through.
1917	 */
1918	AE_UNLOCK(sc);
1919	if_input(ifp, m);
1920	AE_LOCK(sc);
1921}
1922
1923static void
1924ae_rx_intr(ae_softc_t *sc)
1925{
1926	ae_rxd_t *rxd;
1927	if_t ifp;
1928	uint16_t flags;
1929	int count;
1930
1931	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1932
1933	AE_LOCK_ASSERT(sc);
1934
1935	ifp = sc->ifp;
1936
1937	/*
1938	 * Syncronize DMA buffers.
1939	 */
1940	bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1941	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1942
1943	for (count = 0;; count++) {
1944		rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur);
1945		flags = le16toh(rxd->flags);
1946		if ((flags & AE_RXD_UPDATE) == 0)
1947			break;
1948		rxd->flags = htole16(flags & ~AE_RXD_UPDATE);
1949		/* Update stats. */
1950		ae_update_stats_rx(flags, &sc->stats);
1951
1952		/*
1953		 * Update position index.
1954		 */
1955		sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT;
1956
1957		if ((flags & AE_RXD_SUCCESS) != 0)
1958			ae_rxeof(sc, rxd);
1959		else
1960			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1961	}
1962
1963	if (count > 0) {
1964		bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map,
1965		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1966		/*
1967		 * Update Rx index.
1968		 */
1969		AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur);
1970	}
1971}
1972
1973static void
1974ae_watchdog(ae_softc_t *sc)
1975{
1976	if_t ifp;
1977
1978	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
1979	AE_LOCK_ASSERT(sc);
1980	ifp = sc->ifp;
1981
1982	if (sc->wd_timer == 0 || --sc->wd_timer != 0)
1983		return;		/* Noting to do. */
1984
1985	if ((sc->flags & AE_FLAG_LINK) == 0)
1986		if_printf(ifp, "watchdog timeout (missed link).\n");
1987	else
1988		if_printf(ifp, "watchdog timeout - resetting.\n");
1989
1990	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1991	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1992	ae_init_locked(sc);
1993	if (!if_sendq_empty(ifp))
1994		ae_start_locked(ifp);
1995}
1996
1997static void
1998ae_tick(void *arg)
1999{
2000	ae_softc_t *sc;
2001	struct mii_data *mii;
2002
2003	sc = (ae_softc_t *)arg;
2004	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2005	AE_LOCK_ASSERT(sc);
2006
2007	mii = device_get_softc(sc->miibus);
2008	mii_tick(mii);
2009	ae_watchdog(sc);	/* Watchdog check. */
2010	callout_reset(&sc->tick_ch, hz, ae_tick, sc);
2011}
2012
2013static void
2014ae_rxvlan(ae_softc_t *sc)
2015{
2016	if_t ifp;
2017	uint32_t val;
2018
2019	AE_LOCK_ASSERT(sc);
2020	ifp = sc->ifp;
2021	val = AE_READ_4(sc, AE_MAC_REG);
2022	val &= ~AE_MAC_RMVLAN_EN;
2023	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2024		val |= AE_MAC_RMVLAN_EN;
2025	AE_WRITE_4(sc, AE_MAC_REG, val);
2026}
2027
2028static u_int
2029ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2030{
2031	uint32_t crc, *mchash = arg;
2032
2033	crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN);
2034	mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2035
2036	return (1);
2037}
2038
2039static void
2040ae_rxfilter(ae_softc_t *sc)
2041{
2042	if_t ifp;
2043	uint32_t mchash[2];
2044	uint32_t rxcfg;
2045
2046	KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__));
2047
2048	AE_LOCK_ASSERT(sc);
2049
2050	ifp = sc->ifp;
2051
2052	rxcfg = AE_READ_4(sc, AE_MAC_REG);
2053	rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN);
2054
2055	if ((if_getflags(ifp) & IFF_BROADCAST) != 0)
2056		rxcfg |= AE_MAC_BCAST_EN;
2057	if ((if_getflags(ifp) & IFF_PROMISC) != 0)
2058		rxcfg |= AE_MAC_PROMISC_EN;
2059	if ((if_getflags(ifp) & IFF_ALLMULTI) != 0)
2060		rxcfg |= AE_MAC_MCAST_EN;
2061
2062	/*
2063	 * Wipe old settings.
2064	 */
2065	AE_WRITE_4(sc, AE_REG_MHT0, 0);
2066	AE_WRITE_4(sc, AE_REG_MHT1, 0);
2067	if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2068		AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff);
2069		AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff);
2070		AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2071		return;
2072	}
2073
2074	/*
2075	 * Load multicast tables.
2076	 */
2077	bzero(mchash, sizeof(mchash));
2078	if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash);
2079	AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]);
2080	AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]);
2081	AE_WRITE_4(sc, AE_MAC_REG, rxcfg);
2082}
2083
2084static int
2085ae_ioctl(if_t ifp, u_long cmd, caddr_t data)
2086{
2087	struct ae_softc *sc;
2088	struct ifreq *ifr;
2089	struct mii_data *mii;
2090	int error, mask;
2091
2092	sc = if_getsoftc(ifp);
2093	ifr = (struct ifreq *)data;
2094	error = 0;
2095
2096	switch (cmd) {
2097	case SIOCSIFMTU:
2098		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU)
2099			error = EINVAL;
2100		else if (if_getmtu(ifp) != ifr->ifr_mtu) {
2101			AE_LOCK(sc);
2102			if_setmtu(ifp, ifr->ifr_mtu);
2103			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2104				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2105				ae_init_locked(sc);
2106			}
2107			AE_UNLOCK(sc);
2108		}
2109		break;
2110	case SIOCSIFFLAGS:
2111		AE_LOCK(sc);
2112		if ((if_getflags(ifp) & IFF_UP) != 0) {
2113			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2114				if (((if_getflags(ifp) ^ sc->if_flags)
2115				    & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2116					ae_rxfilter(sc);
2117			} else {
2118				if ((sc->flags & AE_FLAG_DETACH) == 0)
2119					ae_init_locked(sc);
2120			}
2121		} else {
2122			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2123				ae_stop(sc);
2124		}
2125		sc->if_flags = if_getflags(ifp);
2126		AE_UNLOCK(sc);
2127		break;
2128	case SIOCADDMULTI:
2129	case SIOCDELMULTI:
2130		AE_LOCK(sc);
2131		if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)
2132			ae_rxfilter(sc);
2133		AE_UNLOCK(sc);
2134		break;
2135	case SIOCSIFMEDIA:
2136	case SIOCGIFMEDIA:
2137		mii = device_get_softc(sc->miibus);
2138		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2139		break;
2140	case SIOCSIFCAP:
2141		AE_LOCK(sc);
2142		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2143		if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2144		    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2145			if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
2146			ae_rxvlan(sc);
2147		}
2148		VLAN_CAPABILITIES(ifp);
2149		AE_UNLOCK(sc);
2150		break;
2151	default:
2152		error = ether_ioctl(ifp, cmd, data);
2153		break;
2154	}
2155	return (error);
2156}
2157
2158static void
2159ae_stop(ae_softc_t *sc)
2160{
2161	if_t ifp;
2162	int i;
2163
2164	AE_LOCK_ASSERT(sc);
2165
2166	ifp = sc->ifp;
2167	if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2168	sc->flags &= ~AE_FLAG_LINK;
2169	sc->wd_timer = 0;	/* Cancel watchdog. */
2170	callout_stop(&sc->tick_ch);
2171
2172	/*
2173	 * Clear and disable interrupts.
2174	 */
2175	AE_WRITE_4(sc, AE_IMR_REG, 0);
2176	AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff);
2177
2178	/*
2179	 * Stop Rx/Tx MACs.
2180	 */
2181	ae_stop_txmac(sc);
2182	ae_stop_rxmac(sc);
2183
2184	/*
2185	 * Stop DMA engines.
2186	 */
2187	AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN);
2188	AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN);
2189
2190	/*
2191	 * Wait for everything to enter idle state.
2192	 */
2193	for (i = 0; i < AE_IDLE_TIMEOUT; i++) {
2194		if (AE_READ_4(sc, AE_IDLE_REG) == 0)
2195			break;
2196		DELAY(100);
2197	}
2198	if (i == AE_IDLE_TIMEOUT)
2199		device_printf(sc->dev, "could not enter idle state in stop.\n");
2200}
2201
2202static void
2203ae_update_stats_tx(uint16_t flags, ae_stats_t *stats)
2204{
2205
2206	if ((flags & AE_TXS_BCAST) != 0)
2207		stats->tx_bcast++;
2208	if ((flags & AE_TXS_MCAST) != 0)
2209		stats->tx_mcast++;
2210	if ((flags & AE_TXS_PAUSE) != 0)
2211		stats->tx_pause++;
2212	if ((flags & AE_TXS_CTRL) != 0)
2213		stats->tx_ctrl++;
2214	if ((flags & AE_TXS_DEFER) != 0)
2215		stats->tx_defer++;
2216	if ((flags & AE_TXS_EXCDEFER) != 0)
2217		stats->tx_excdefer++;
2218	if ((flags & AE_TXS_SINGLECOL) != 0)
2219		stats->tx_singlecol++;
2220	if ((flags & AE_TXS_MULTICOL) != 0)
2221		stats->tx_multicol++;
2222	if ((flags & AE_TXS_LATECOL) != 0)
2223		stats->tx_latecol++;
2224	if ((flags & AE_TXS_ABORTCOL) != 0)
2225		stats->tx_abortcol++;
2226	if ((flags & AE_TXS_UNDERRUN) != 0)
2227		stats->tx_underrun++;
2228}
2229
2230static void
2231ae_update_stats_rx(uint16_t flags, ae_stats_t *stats)
2232{
2233
2234	if ((flags & AE_RXD_BCAST) != 0)
2235		stats->rx_bcast++;
2236	if ((flags & AE_RXD_MCAST) != 0)
2237		stats->rx_mcast++;
2238	if ((flags & AE_RXD_PAUSE) != 0)
2239		stats->rx_pause++;
2240	if ((flags & AE_RXD_CTRL) != 0)
2241		stats->rx_ctrl++;
2242	if ((flags & AE_RXD_CRCERR) != 0)
2243		stats->rx_crcerr++;
2244	if ((flags & AE_RXD_CODEERR) != 0)
2245		stats->rx_codeerr++;
2246	if ((flags & AE_RXD_RUNT) != 0)
2247		stats->rx_runt++;
2248	if ((flags & AE_RXD_FRAG) != 0)
2249		stats->rx_frag++;
2250	if ((flags & AE_RXD_TRUNC) != 0)
2251		stats->rx_trunc++;
2252	if ((flags & AE_RXD_ALIGN) != 0)
2253		stats->rx_align++;
2254}
2255