if_lpe.c revision 243882
1/*-
2 * Copyright (c) 2011 Jakub Wojciech Klama <jceel@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/arm/lpc/if_lpe.c 243882 2012-12-05 08:04:20Z glebius $");
29
30#include <sys/param.h>
31#include <sys/endian.h>
32#include <sys/systm.h>
33#include <sys/sockio.h>
34#include <sys/mbuf.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/module.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/rman.h>
41#include <sys/bus.h>
42#include <sys/socket.h>
43#include <machine/bus.h>
44#include <machine/intr.h>
45
46#include <net/if.h>
47#include <net/if_arp.h>
48#include <net/ethernet.h>
49#include <net/if_dl.h>
50#include <net/if_media.h>
51#include <net/if_types.h>
52
53#include <net/bpf.h>
54
55#include <dev/ofw/ofw_bus.h>
56#include <dev/ofw/ofw_bus_subr.h>
57
58#include <dev/mii/mii.h>
59#include <dev/mii/miivar.h>
60
61#include <arm/lpc/lpcreg.h>
62#include <arm/lpc/lpcvar.h>
63#include <arm/lpc/if_lpereg.h>
64
65#include "miibus_if.h"
66
67#define	DEBUG
68#undef	DEBUG
69
70#ifdef DEBUG
71#define debugf(fmt, args...) do { printf("%s(): ", __func__);   \
72    printf(fmt,##args); } while (0)
73#else
74#define debugf(fmt, args...)
75#endif
76
77struct lpe_dmamap_arg {
78	bus_addr_t		lpe_dma_busaddr;
79};
80
81struct lpe_rxdesc {
82	struct mbuf *		lpe_rxdesc_mbuf;
83	bus_dmamap_t		lpe_rxdesc_dmamap;
84};
85
86struct lpe_txdesc {
87	int			lpe_txdesc_first;
88	struct mbuf *		lpe_txdesc_mbuf;
89	bus_dmamap_t		lpe_txdesc_dmamap;
90};
91
92struct lpe_chain_data {
93	bus_dma_tag_t		lpe_parent_tag;
94	bus_dma_tag_t		lpe_tx_ring_tag;
95	bus_dmamap_t		lpe_tx_ring_map;
96	bus_dma_tag_t		lpe_tx_status_tag;
97	bus_dmamap_t		lpe_tx_status_map;
98	bus_dma_tag_t		lpe_tx_buf_tag;
99	bus_dma_tag_t		lpe_rx_ring_tag;
100	bus_dmamap_t		lpe_rx_ring_map;
101	bus_dma_tag_t		lpe_rx_status_tag;
102	bus_dmamap_t		lpe_rx_status_map;
103	bus_dma_tag_t		lpe_rx_buf_tag;
104	struct lpe_rxdesc	lpe_rx_desc[LPE_RXDESC_NUM];
105	struct lpe_txdesc	lpe_tx_desc[LPE_TXDESC_NUM];
106	int			lpe_tx_prod;
107	int			lpe_tx_last;
108	int			lpe_tx_used;
109};
110
111struct lpe_ring_data {
112	struct lpe_hwdesc *	lpe_rx_ring;
113	struct lpe_hwstatus *	lpe_rx_status;
114	bus_addr_t		lpe_rx_ring_phys;
115	bus_addr_t		lpe_rx_status_phys;
116	struct lpe_hwdesc *	lpe_tx_ring;
117	struct lpe_hwstatus *	lpe_tx_status;
118	bus_addr_t		lpe_tx_ring_phys;
119	bus_addr_t		lpe_tx_status_phys;
120};
121
122struct lpe_softc {
123	struct ifnet *		lpe_ifp;
124	struct mtx		lpe_mtx;
125	phandle_t		lpe_ofw;
126	device_t		lpe_dev;
127	device_t		lpe_miibus;
128	uint8_t			lpe_enaddr[6];
129	struct resource	*	lpe_mem_res;
130	struct resource *	lpe_irq_res;
131	void *			lpe_intrhand;
132	bus_space_tag_t		lpe_bst;
133	bus_space_handle_t	lpe_bsh;
134#define	LPE_FLAG_LINK		(1 << 0)
135	uint32_t		lpe_flags;
136	int			lpe_watchdog_timer;
137	struct callout		lpe_tick;
138	struct lpe_chain_data	lpe_cdata;
139	struct lpe_ring_data	lpe_rdata;
140};
141
142static int lpe_probe(device_t);
143static int lpe_attach(device_t);
144static int lpe_detach(device_t);
145static int lpe_miibus_readreg(device_t, int, int);
146static int lpe_miibus_writereg(device_t, int, int, int);
147static void lpe_miibus_statchg(device_t);
148
149static void lpe_reset(struct lpe_softc *);
150static void lpe_init(void *);
151static void lpe_init_locked(struct lpe_softc *);
152static void lpe_start(struct ifnet *);
153static void lpe_start_locked(struct ifnet *);
154static void lpe_stop(struct lpe_softc *);
155static void lpe_stop_locked(struct lpe_softc *);
156static int lpe_ioctl(struct ifnet *, u_long, caddr_t);
157static void lpe_set_rxmode(struct lpe_softc *);
158static void lpe_set_rxfilter(struct lpe_softc *);
159static void lpe_intr(void *);
160static void lpe_rxintr(struct lpe_softc *);
161static void lpe_txintr(struct lpe_softc *);
162static void lpe_tick(void *);
163static void lpe_watchdog(struct lpe_softc *);
164static int lpe_encap(struct lpe_softc *, struct mbuf **);
165static int lpe_dma_alloc(struct lpe_softc *);
166static int lpe_dma_alloc_rx(struct lpe_softc *);
167static int lpe_dma_alloc_tx(struct lpe_softc *);
168static int lpe_init_rx(struct lpe_softc *);
169static int lpe_init_rxbuf(struct lpe_softc *, int);
170static void lpe_discard_rxbuf(struct lpe_softc *, int);
171static void lpe_dmamap_cb(void *, bus_dma_segment_t *, int, int);
172static int lpe_ifmedia_upd(struct ifnet *);
173static void lpe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
174
175#define	lpe_lock(_sc)		mtx_lock(&(_sc)->lpe_mtx)
176#define	lpe_unlock(_sc)		mtx_unlock(&(_sc)->lpe_mtx)
177#define	lpe_lock_assert(sc)	mtx_assert(&(_sc)->lpe_mtx, MA_OWNED)
178
179#define	lpe_read_4(_sc, _reg)		\
180    bus_space_read_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg))
181#define	lpe_write_4(_sc, _reg, _val)	\
182    bus_space_write_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg), (_val))
183
184#define	LPE_HWDESC_RXERRS	(LPE_HWDESC_CRCERROR | LPE_HWDESC_SYMBOLERROR | \
185    LPE_HWDESC_LENGTHERROR | LPE_HWDESC_ALIGNERROR | LPE_HWDESC_OVERRUN | \
186    LPE_HWDESC_RXNODESCR)
187
188#define	LPE_HWDESC_TXERRS	(LPE_HWDESC_EXCDEFER | LPE_HWDESC_EXCCOLL | \
189    LPE_HWDESC_LATECOLL | LPE_HWDESC_UNDERRUN | LPE_HWDESC_TXNODESCR)
190
191static int
192lpe_probe(device_t dev)
193{
194
195	if (!ofw_bus_is_compatible(dev, "lpc,ethernet"))
196		return (ENXIO);
197
198	device_set_desc(dev, "LPC32x0 10/100 Ethernet");
199	return (BUS_PROBE_DEFAULT);
200}
201
202static int
203lpe_attach(device_t dev)
204{
205	struct lpe_softc *sc = device_get_softc(dev);
206	struct ifnet *ifp;
207	int rid, i;
208	uint32_t val;
209
210	sc->lpe_dev = dev;
211	sc->lpe_ofw = ofw_bus_get_node(dev);
212
213	i = OF_getprop(sc->lpe_ofw, "local-mac-address", (void *)&sc->lpe_enaddr, 6);
214	if (i != 6) {
215		sc->lpe_enaddr[0] = 0x00;
216		sc->lpe_enaddr[1] = 0x11;
217		sc->lpe_enaddr[2] = 0x22;
218		sc->lpe_enaddr[3] = 0x33;
219		sc->lpe_enaddr[4] = 0x44;
220		sc->lpe_enaddr[5] = 0x55;
221	}
222
223	mtx_init(&sc->lpe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
224	    MTX_DEF);
225
226	callout_init_mtx(&sc->lpe_tick, &sc->lpe_mtx, 0);
227
228	rid = 0;
229	sc->lpe_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
230	    RF_ACTIVE);
231	if (!sc->lpe_mem_res) {
232		device_printf(dev, "cannot allocate memory window\n");
233		goto fail;
234	}
235
236	sc->lpe_bst = rman_get_bustag(sc->lpe_mem_res);
237	sc->lpe_bsh = rman_get_bushandle(sc->lpe_mem_res);
238
239	rid = 0;
240	sc->lpe_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
241	    RF_ACTIVE);
242	if (!sc->lpe_irq_res) {
243		device_printf(dev, "cannot allocate interrupt\n");
244		goto fail;
245	}
246
247	sc->lpe_ifp = if_alloc(IFT_ETHER);
248	if (!sc->lpe_ifp) {
249		device_printf(dev, "cannot allocated ifnet\n");
250		goto fail;
251	}
252
253	ifp = sc->lpe_ifp;
254
255	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
256	ifp->if_softc = sc;
257	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
258	ifp->if_start = lpe_start;
259	ifp->if_ioctl = lpe_ioctl;
260	ifp->if_init = lpe_init;
261	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
262	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
263	IFQ_SET_READY(&ifp->if_snd);
264
265	ether_ifattach(ifp, sc->lpe_enaddr);
266
267	if (bus_setup_intr(dev, sc->lpe_irq_res, INTR_TYPE_NET, NULL,
268	    lpe_intr, sc, &sc->lpe_intrhand)) {
269		device_printf(dev, "cannot establish interrupt handler\n");
270		ether_ifdetach(ifp);
271		goto fail;
272	}
273
274	/* Enable Ethernet clock */
275	lpc_pwr_write(dev, LPC_CLKPWR_MACCLK_CTRL,
276	    LPC_CLKPWR_MACCLK_CTRL_REG |
277	    LPC_CLKPWR_MACCLK_CTRL_SLAVE |
278	    LPC_CLKPWR_MACCLK_CTRL_MASTER |
279	    LPC_CLKPWR_MACCLK_CTRL_HDWINF(3));
280
281	/* Reset chip */
282	lpe_reset(sc);
283
284	/* Initialize MII */
285	val = lpe_read_4(sc, LPE_COMMAND);
286	lpe_write_4(sc, LPE_COMMAND, val | LPE_COMMAND_RMII);
287
288	if (mii_attach(dev, &sc->lpe_miibus, ifp, lpe_ifmedia_upd,
289	    lpe_ifmedia_sts, BMSR_DEFCAPMASK, 0x01,
290	    MII_OFFSET_ANY, 0)) {
291		device_printf(dev, "cannot find PHY\n");
292		goto fail;
293	}
294
295	lpe_dma_alloc(sc);
296
297	return (0);
298
299fail:
300	if (sc->lpe_ifp)
301		if_free(sc->lpe_ifp);
302	if (sc->lpe_intrhand)
303		bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
304	if (sc->lpe_irq_res)
305		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
306	if (sc->lpe_mem_res)
307		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
308	return (ENXIO);
309}
310
311static int
312lpe_detach(device_t dev)
313{
314	struct lpe_softc *sc = device_get_softc(dev);
315
316	lpe_stop(sc);
317
318	if_free(sc->lpe_ifp);
319	bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
320	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
321	bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
322
323	return (0);
324}
325
326static int
327lpe_miibus_readreg(device_t dev, int phy, int reg)
328{
329	struct lpe_softc *sc = device_get_softc(dev);
330	uint32_t val;
331	int result;
332
333	lpe_write_4(sc, LPE_MCMD, LPE_MCMD_READ);
334	lpe_write_4(sc, LPE_MADR,
335	    (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
336	    (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
337
338	val = lpe_read_4(sc, LPE_MIND);
339
340	/* Wait until request is completed */
341	while (val & LPE_MIND_BUSY) {
342		val = lpe_read_4(sc, LPE_MIND);
343		DELAY(10);
344	}
345
346	if (val & LPE_MIND_INVALID)
347		return (0);
348
349	lpe_write_4(sc, LPE_MCMD, 0);
350	result = (lpe_read_4(sc, LPE_MRDD) & LPE_MRDD_DATAMASK);
351	debugf("phy=%d reg=%d result=0x%04x\n", phy, reg, result);
352
353	return (result);
354}
355
356static int
357lpe_miibus_writereg(device_t dev, int phy, int reg, int data)
358{
359	struct lpe_softc *sc = device_get_softc(dev);
360	uint32_t val;
361
362	debugf("phy=%d reg=%d data=0x%04x\n", phy, reg, data);
363
364	lpe_write_4(sc, LPE_MCMD, LPE_MCMD_WRITE);
365	lpe_write_4(sc, LPE_MADR,
366	    (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
367	    (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
368
369	lpe_write_4(sc, LPE_MWTD, (data & LPE_MWTD_DATAMASK));
370
371	val = lpe_read_4(sc, LPE_MIND);
372
373	/* Wait until request is completed */
374	while (val & LPE_MIND_BUSY) {
375		val = lpe_read_4(sc, LPE_MIND);
376		DELAY(10);
377	}
378
379	return (0);
380}
381
382static void
383lpe_miibus_statchg(device_t dev)
384{
385	struct lpe_softc *sc = device_get_softc(dev);
386	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
387
388	lpe_lock(sc);
389
390	if ((mii->mii_media_status & IFM_ACTIVE) &&
391	    (mii->mii_media_status & IFM_AVALID))
392		sc->lpe_flags |= LPE_FLAG_LINK;
393	else
394		sc->lpe_flags &= ~LPE_FLAG_LINK;
395
396	lpe_unlock(sc);
397}
398
399static void
400lpe_reset(struct lpe_softc *sc)
401{
402	uint32_t mac1;
403
404	/* Enter soft reset mode */
405	mac1 = lpe_read_4(sc, LPE_MAC1);
406	lpe_write_4(sc, LPE_MAC1, mac1 | LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
407	    LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX);
408
409	/* Reset registers, Tx path and Rx path */
410	lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_REGRESET |
411	    LPE_COMMAND_TXRESET | LPE_COMMAND_RXRESET);
412
413	/* Set station address */
414	lpe_write_4(sc, LPE_SA2, sc->lpe_enaddr[1] << 8 | sc->lpe_enaddr[0]);
415	lpe_write_4(sc, LPE_SA1, sc->lpe_enaddr[3] << 8 | sc->lpe_enaddr[2]);
416	lpe_write_4(sc, LPE_SA0, sc->lpe_enaddr[5] << 8 | sc->lpe_enaddr[4]);
417
418	/* Leave soft reset mode */
419	mac1 = lpe_read_4(sc, LPE_MAC1);
420	lpe_write_4(sc, LPE_MAC1, mac1 & ~(LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
421	    LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX));
422}
423
424static void
425lpe_init(void *arg)
426{
427	struct lpe_softc *sc = (struct lpe_softc *)arg;
428
429	lpe_lock(sc);
430	lpe_init_locked(sc);
431	lpe_unlock(sc);
432}
433
434static void
435lpe_init_locked(struct lpe_softc *sc)
436{
437	struct ifnet *ifp = sc->lpe_ifp;
438	uint32_t cmd, mac1;
439
440	lpe_lock_assert(sc);
441
442	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
443		return;
444
445	/* Enable Tx and Rx */
446	cmd = lpe_read_4(sc, LPE_COMMAND);
447	lpe_write_4(sc, LPE_COMMAND, cmd | LPE_COMMAND_RXENABLE |
448	    LPE_COMMAND_TXENABLE | LPE_COMMAND_PASSRUNTFRAME);
449
450	/* Enable receive */
451	mac1 = lpe_read_4(sc, LPE_MAC1);
452	lpe_write_4(sc, LPE_MAC1, /*mac1 |*/ LPE_MAC1_RXENABLE | LPE_MAC1_PASSALL);
453
454	lpe_write_4(sc, LPE_MAC2, LPE_MAC2_CRCENABLE | LPE_MAC2_PADCRCENABLE |
455	    LPE_MAC2_FULLDUPLEX);
456
457	lpe_write_4(sc, LPE_MCFG, LPE_MCFG_CLKSEL(7));
458
459	/* Set up Rx filter */
460	lpe_set_rxmode(sc);
461
462	/* Enable interrupts */
463	lpe_write_4(sc, LPE_INTENABLE, LPE_INT_RXOVERRUN | LPE_INT_RXERROR |
464	    LPE_INT_RXFINISH | LPE_INT_RXDONE | LPE_INT_TXUNDERRUN |
465	    LPE_INT_TXERROR | LPE_INT_TXFINISH | LPE_INT_TXDONE);
466
467	sc->lpe_cdata.lpe_tx_prod = 0;
468	sc->lpe_cdata.lpe_tx_last = 0;
469	sc->lpe_cdata.lpe_tx_used = 0;
470
471	lpe_init_rx(sc);
472
473	/* Initialize Rx packet and status descriptor heads */
474	lpe_write_4(sc, LPE_RXDESC, sc->lpe_rdata.lpe_rx_ring_phys);
475	lpe_write_4(sc, LPE_RXSTATUS, sc->lpe_rdata.lpe_rx_status_phys);
476	lpe_write_4(sc, LPE_RXDESC_NUMBER, LPE_RXDESC_NUM - 1);
477	lpe_write_4(sc, LPE_RXDESC_CONS, 0);
478
479	/* Initialize Tx packet and status descriptor heads */
480	lpe_write_4(sc, LPE_TXDESC, sc->lpe_rdata.lpe_tx_ring_phys);
481	lpe_write_4(sc, LPE_TXSTATUS, sc->lpe_rdata.lpe_tx_status_phys);
482	lpe_write_4(sc, LPE_TXDESC_NUMBER, LPE_TXDESC_NUM - 1);
483	lpe_write_4(sc, LPE_TXDESC_PROD, 0);
484
485	ifp->if_drv_flags |= IFF_DRV_RUNNING;
486	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
487
488	callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
489}
490
491static void
492lpe_start(struct ifnet *ifp)
493{
494	struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
495
496	lpe_lock(sc);
497	lpe_start_locked(ifp);
498	lpe_unlock(sc);
499}
500
501static void
502lpe_start_locked(struct ifnet *ifp)
503{
504	struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
505	struct mbuf *m_head;
506	int encap = 0;
507
508	lpe_lock_assert(sc);
509
510	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
511		if (lpe_read_4(sc, LPE_TXDESC_PROD) ==
512		    lpe_read_4(sc, LPE_TXDESC_CONS) - 5)
513			break;
514
515		/* Dequeue first packet */
516		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
517		if (!m_head)
518			break;
519
520		lpe_encap(sc, &m_head);
521
522		encap++;
523	}
524
525	/* Submit new descriptor list */
526	if (encap) {
527		lpe_write_4(sc, LPE_TXDESC_PROD, sc->lpe_cdata.lpe_tx_prod);
528		sc->lpe_watchdog_timer = 5;
529	}
530
531}
532
533static int
534lpe_encap(struct lpe_softc *sc, struct mbuf **m_head)
535{
536	struct lpe_txdesc *txd;
537	struct lpe_hwdesc *hwd;
538	bus_dma_segment_t segs[LPE_MAXFRAGS];
539	int i, err, nsegs, prod;
540
541	lpe_lock_assert(sc);
542	M_ASSERTPKTHDR((*m_head));
543
544	prod = sc->lpe_cdata.lpe_tx_prod;
545	txd = &sc->lpe_cdata.lpe_tx_desc[prod];
546
547	debugf("starting with prod=%d\n", prod);
548
549	err = bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_tx_buf_tag,
550	    txd->lpe_txdesc_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
551
552	if (err)
553		return (err);
554
555	if (nsegs == 0) {
556		m_freem(*m_head);
557		*m_head = NULL;
558		return (EIO);
559	}
560
561        bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag, txd->lpe_txdesc_dmamap,
562          BUS_DMASYNC_PREREAD);
563        bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
564            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
565
566	txd->lpe_txdesc_first = 1;
567	txd->lpe_txdesc_mbuf = *m_head;
568
569	for (i = 0; i < nsegs; i++) {
570		hwd = &sc->lpe_rdata.lpe_tx_ring[prod];
571		hwd->lhr_data = segs[i].ds_addr;
572		hwd->lhr_control = segs[i].ds_len - 1;
573
574		if (i == nsegs - 1) {
575			hwd->lhr_control |= LPE_HWDESC_LASTFLAG;
576			hwd->lhr_control |= LPE_HWDESC_INTERRUPT;
577			hwd->lhr_control |= LPE_HWDESC_CRC;
578			hwd->lhr_control |= LPE_HWDESC_PAD;
579		}
580
581		LPE_INC(prod, LPE_TXDESC_NUM);
582	}
583
584	bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
585	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
586
587	sc->lpe_cdata.lpe_tx_used += nsegs;
588	sc->lpe_cdata.lpe_tx_prod = prod;
589
590	return (0);
591}
592
593static void
594lpe_stop(struct lpe_softc *sc)
595{
596	lpe_lock(sc);
597	lpe_stop_locked(sc);
598	lpe_unlock(sc);
599}
600
601static void
602lpe_stop_locked(struct lpe_softc *sc)
603{
604	lpe_lock_assert(sc);
605
606	callout_stop(&sc->lpe_tick);
607
608	/* Disable interrupts */
609	lpe_write_4(sc, LPE_INTCLEAR, 0xffffffff);
610
611	/* Stop EMAC */
612	lpe_write_4(sc, LPE_MAC1, 0);
613	lpe_write_4(sc, LPE_MAC2, 0);
614	lpe_write_4(sc, LPE_COMMAND, 0);
615
616	sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
617	sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
618}
619
620static int
621lpe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
622{
623	struct lpe_softc *sc = ifp->if_softc;
624	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
625	struct ifreq *ifr = (struct ifreq *)data;
626	int err = 0;
627
628	switch (cmd) {
629	case SIOCSIFFLAGS:
630		lpe_lock(sc);
631		if (ifp->if_flags & IFF_UP) {
632			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
633				lpe_set_rxmode(sc);
634				lpe_set_rxfilter(sc);
635			} else
636				lpe_init_locked(sc);
637		} else
638			lpe_stop(sc);
639		lpe_unlock(sc);
640		break;
641	case SIOCADDMULTI:
642	case SIOCDELMULTI:
643		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
644			lpe_lock(sc);
645			lpe_set_rxfilter(sc);
646			lpe_unlock(sc);
647		}
648		break;
649	case SIOCGIFMEDIA:
650	case SIOCSIFMEDIA:
651		err = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
652		break;
653	default:
654		err = ether_ioctl(ifp, cmd, data);
655		break;
656	}
657
658	return (err);
659}
660
661static void lpe_set_rxmode(struct lpe_softc *sc)
662{
663	struct ifnet *ifp = sc->lpe_ifp;
664	uint32_t rxfilt;
665
666	rxfilt = LPE_RXFILTER_UNIHASH | LPE_RXFILTER_MULTIHASH | LPE_RXFILTER_PERFECT;
667
668	if (ifp->if_flags & IFF_BROADCAST)
669		rxfilt |= LPE_RXFILTER_BROADCAST;
670
671	if (ifp->if_flags & IFF_PROMISC)
672		rxfilt |= LPE_RXFILTER_UNICAST | LPE_RXFILTER_MULTICAST;
673
674	if (ifp->if_flags & IFF_ALLMULTI)
675		rxfilt |= LPE_RXFILTER_MULTICAST;
676
677	lpe_write_4(sc, LPE_RXFILTER_CTRL, rxfilt);
678}
679
680static void lpe_set_rxfilter(struct lpe_softc *sc)
681{
682	struct ifnet *ifp = sc->lpe_ifp;
683	struct ifmultiaddr *ifma;
684	int index;
685	uint32_t hashl, hashh;
686
687	hashl = 0;
688	hashh = 0;
689
690	if_maddr_rlock(ifp);
691	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
692		if (ifma->ifma_addr->sa_family != AF_LINK)
693			continue;
694
695		index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
696		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 23 & 0x3f;
697
698		if (index > 31)
699			hashh |= (1 << (index - 32));
700		else
701			hashl |= (1 << index);
702	}
703	if_maddr_runlock(ifp);
704
705	/* Program new hash filter */
706	lpe_write_4(sc, LPE_HASHFILTER_L, hashl);
707	lpe_write_4(sc, LPE_HASHFILTER_H, hashh);
708}
709
710static void
711lpe_intr(void *arg)
712{
713	struct lpe_softc *sc = (struct lpe_softc *)arg;
714	uint32_t intstatus;
715
716	debugf("status=0x%08x\n", lpe_read_4(sc, LPE_INTSTATUS));
717
718	lpe_lock(sc);
719
720	while ((intstatus = lpe_read_4(sc, LPE_INTSTATUS))) {
721		if (intstatus & LPE_INT_RXDONE)
722			lpe_rxintr(sc);
723
724		if (intstatus & LPE_INT_TXDONE)
725			lpe_txintr(sc);
726
727		lpe_write_4(sc, LPE_INTCLEAR, 0xffff);
728	}
729
730	lpe_unlock(sc);
731}
732
733static void
734lpe_rxintr(struct lpe_softc *sc)
735{
736	struct ifnet *ifp = sc->lpe_ifp;
737	struct lpe_hwdesc *hwd;
738	struct lpe_hwstatus *hws;
739	struct lpe_rxdesc *rxd;
740	struct mbuf *m;
741	int prod, cons;
742
743	for (;;) {
744		prod = lpe_read_4(sc, LPE_RXDESC_PROD);
745		cons = lpe_read_4(sc, LPE_RXDESC_CONS);
746
747		if (prod == cons)
748			break;
749
750		rxd = &sc->lpe_cdata.lpe_rx_desc[cons];
751		hwd = &sc->lpe_rdata.lpe_rx_ring[cons];
752		hws = &sc->lpe_rdata.lpe_rx_status[cons];
753
754		/* Check received frame for errors */
755		if (hws->lhs_info & LPE_HWDESC_RXERRS) {
756			ifp->if_ierrors++;
757			lpe_discard_rxbuf(sc, cons);
758			lpe_init_rxbuf(sc, cons);
759			goto skip;
760		}
761
762		m = rxd->lpe_rxdesc_mbuf;
763		m->m_pkthdr.rcvif = ifp;
764		m->m_data += 2;
765
766		ifp->if_ipackets++;
767
768		lpe_unlock(sc);
769		(*ifp->if_input)(ifp, m);
770		lpe_lock(sc);
771
772		lpe_init_rxbuf(sc, cons);
773skip:
774		LPE_INC(cons, LPE_RXDESC_NUM);
775		lpe_write_4(sc, LPE_RXDESC_CONS, cons);
776	}
777}
778
779static void
780lpe_txintr(struct lpe_softc *sc)
781{
782	struct ifnet *ifp = sc->lpe_ifp;
783	struct lpe_hwdesc *hwd;
784	struct lpe_hwstatus *hws;
785	struct lpe_txdesc *txd;
786	int cons, last;
787
788	for (;;) {
789		cons = lpe_read_4(sc, LPE_TXDESC_CONS);
790		last = sc->lpe_cdata.lpe_tx_last;
791
792		if (cons == last)
793			break;
794
795		txd = &sc->lpe_cdata.lpe_tx_desc[last];
796		hwd = &sc->lpe_rdata.lpe_tx_ring[last];
797		hws = &sc->lpe_rdata.lpe_tx_status[last];
798
799		bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag,
800		    txd->lpe_txdesc_dmamap, BUS_DMASYNC_POSTWRITE);
801
802		ifp->if_collisions += LPE_HWDESC_COLLISIONS(hws->lhs_info);
803
804		if (hws->lhs_info & LPE_HWDESC_TXERRS)
805			ifp->if_oerrors++;
806		else
807			ifp->if_opackets++;
808
809		if (txd->lpe_txdesc_first) {
810			bus_dmamap_unload(sc->lpe_cdata.lpe_tx_buf_tag,
811			    txd->lpe_txdesc_dmamap);
812
813			m_freem(txd->lpe_txdesc_mbuf);
814			txd->lpe_txdesc_mbuf = NULL;
815			txd->lpe_txdesc_first = 0;
816		}
817
818		sc->lpe_cdata.lpe_tx_used--;
819		LPE_INC(sc->lpe_cdata.lpe_tx_last, LPE_TXDESC_NUM);
820	}
821
822	if (!sc->lpe_cdata.lpe_tx_used)
823		sc->lpe_watchdog_timer = 0;
824}
825
826static void
827lpe_tick(void *arg)
828{
829	struct lpe_softc *sc = (struct lpe_softc *)arg;
830	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
831
832	lpe_lock_assert(sc);
833
834	mii_tick(mii);
835	lpe_watchdog(sc);
836
837	callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
838}
839
840static void
841lpe_watchdog(struct lpe_softc *sc)
842{
843	struct ifnet *ifp = sc->lpe_ifp;
844
845	lpe_lock_assert(sc);
846
847	if (sc->lpe_watchdog_timer == 0 || sc->lpe_watchdog_timer--)
848		return;
849
850	/* Chip has stopped responding */
851	device_printf(sc->lpe_dev, "WARNING: chip hangup, restarting...\n");
852	lpe_stop_locked(sc);
853	lpe_init_locked(sc);
854
855	/* Try to resend packets */
856	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
857		lpe_start_locked(ifp);
858}
859
860static int
861lpe_dma_alloc(struct lpe_softc *sc)
862{
863	int err;
864
865	/* Create parent DMA tag */
866	err = bus_dma_tag_create(
867	    bus_get_dma_tag(sc->lpe_dev),
868	    1, 0,			/* alignment, boundary */
869	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
870	    BUS_SPACE_MAXADDR,		/* highaddr */
871	    NULL, NULL,			/* filter, filterarg */
872	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsize, nsegments */
873	    BUS_SPACE_MAXSIZE_32BIT, 0,	/* maxsegsize, flags */
874	    NULL, NULL,			/* lockfunc, lockarg */
875	    &sc->lpe_cdata.lpe_parent_tag);
876
877	if (err) {
878		device_printf(sc->lpe_dev, "cannot create parent DMA tag\n");
879		return (err);
880	}
881
882	err = lpe_dma_alloc_rx(sc);
883	if (err)
884		return (err);
885
886	err = lpe_dma_alloc_tx(sc);
887	if (err)
888		return (err);
889
890	return (0);
891}
892
893static int
894lpe_dma_alloc_rx(struct lpe_softc *sc)
895{
896	struct lpe_rxdesc *rxd;
897	struct lpe_dmamap_arg ctx;
898	int err, i;
899
900	/* Create tag for Rx ring */
901	err = bus_dma_tag_create(
902	    sc->lpe_cdata.lpe_parent_tag,
903	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
904	    BUS_SPACE_MAXADDR,		/* lowaddr */
905	    BUS_SPACE_MAXADDR,		/* highaddr */
906	    NULL, NULL,			/* filter, filterarg */
907	    LPE_RXDESC_SIZE, 1,		/* maxsize, nsegments */
908	    LPE_RXDESC_SIZE, 0,		/* maxsegsize, flags */
909	    NULL, NULL,			/* lockfunc, lockarg */
910	    &sc->lpe_cdata.lpe_rx_ring_tag);
911
912	if (err) {
913		device_printf(sc->lpe_dev, "cannot create Rx ring DMA tag\n");
914		goto fail;
915	}
916
917	/* Create tag for Rx status ring */
918	err = bus_dma_tag_create(
919	    sc->lpe_cdata.lpe_parent_tag,
920	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
921	    BUS_SPACE_MAXADDR,		/* lowaddr */
922	    BUS_SPACE_MAXADDR,		/* highaddr */
923	    NULL, NULL,			/* filter, filterarg */
924	    LPE_RXSTATUS_SIZE, 1,	/* maxsize, nsegments */
925	    LPE_RXSTATUS_SIZE, 0,	/* maxsegsize, flags */
926	    NULL, NULL,			/* lockfunc, lockarg */
927	    &sc->lpe_cdata.lpe_rx_status_tag);
928
929	if (err) {
930		device_printf(sc->lpe_dev, "cannot create Rx status ring DMA tag\n");
931		goto fail;
932	}
933
934	/* Create tag for Rx buffers */
935	err = bus_dma_tag_create(
936	    sc->lpe_cdata.lpe_parent_tag,
937	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
938	    BUS_SPACE_MAXADDR,		/* lowaddr */
939	    BUS_SPACE_MAXADDR,		/* highaddr */
940	    NULL, NULL,			/* filter, filterarg */
941	    MCLBYTES * LPE_RXDESC_NUM,	/* maxsize */
942	    LPE_RXDESC_NUM,		/* segments */
943	    MCLBYTES, 0,		/* maxsegsize, flags */
944	    NULL, NULL,			/* lockfunc, lockarg */
945	    &sc->lpe_cdata.lpe_rx_buf_tag);
946
947	if (err) {
948		device_printf(sc->lpe_dev, "cannot create Rx buffers DMA tag\n");
949		goto fail;
950	}
951
952	/* Allocate Rx DMA ring */
953	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_ring_tag,
954	    (void **)&sc->lpe_rdata.lpe_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
955	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_ring_map);
956
957	err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_ring_tag,
958	    sc->lpe_cdata.lpe_rx_ring_map, sc->lpe_rdata.lpe_rx_ring,
959	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
960
961	sc->lpe_rdata.lpe_rx_ring_phys = ctx.lpe_dma_busaddr;
962
963	/* Allocate Rx status ring */
964	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_status_tag,
965	    (void **)&sc->lpe_rdata.lpe_rx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
966	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_status_map);
967
968	err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_status_tag,
969	    sc->lpe_cdata.lpe_rx_status_map, sc->lpe_rdata.lpe_rx_status,
970	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
971
972	sc->lpe_rdata.lpe_rx_status_phys = ctx.lpe_dma_busaddr;
973
974
975	/* Create Rx buffers DMA map */
976	for (i = 0; i < LPE_RXDESC_NUM; i++) {
977		rxd = &sc->lpe_cdata.lpe_rx_desc[i];
978		rxd->lpe_rxdesc_mbuf = NULL;
979		rxd->lpe_rxdesc_dmamap = NULL;
980
981		err = bus_dmamap_create(sc->lpe_cdata.lpe_rx_buf_tag, 0,
982		    &rxd->lpe_rxdesc_dmamap);
983
984		if (err) {
985			device_printf(sc->lpe_dev, "cannot create Rx DMA map\n");
986			return (err);
987		}
988	}
989
990	return (0);
991fail:
992	return (err);
993}
994
995static int
996lpe_dma_alloc_tx(struct lpe_softc *sc)
997{
998	struct lpe_txdesc *txd;
999	struct lpe_dmamap_arg ctx;
1000	int err, i;
1001
1002	/* Create tag for Tx ring */
1003	err = bus_dma_tag_create(
1004	    sc->lpe_cdata.lpe_parent_tag,
1005	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
1006	    BUS_SPACE_MAXADDR,		/* lowaddr */
1007	    BUS_SPACE_MAXADDR,		/* highaddr */
1008	    NULL, NULL,			/* filter, filterarg */
1009	    LPE_TXDESC_SIZE, 1,		/* maxsize, nsegments */
1010	    LPE_TXDESC_SIZE, 0,		/* maxsegsize, flags */
1011	    NULL, NULL,			/* lockfunc, lockarg */
1012	    &sc->lpe_cdata.lpe_tx_ring_tag);
1013
1014	if (err) {
1015		device_printf(sc->lpe_dev, "cannot create Tx ring DMA tag\n");
1016		goto fail;
1017	}
1018
1019	/* Create tag for Tx status ring */
1020	err = bus_dma_tag_create(
1021	    sc->lpe_cdata.lpe_parent_tag,
1022	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
1023	    BUS_SPACE_MAXADDR,		/* lowaddr */
1024	    BUS_SPACE_MAXADDR,		/* highaddr */
1025	    NULL, NULL,			/* filter, filterarg */
1026	    LPE_TXSTATUS_SIZE, 1,	/* maxsize, nsegments */
1027	    LPE_TXSTATUS_SIZE, 0,	/* maxsegsize, flags */
1028	    NULL, NULL,			/* lockfunc, lockarg */
1029	    &sc->lpe_cdata.lpe_tx_status_tag);
1030
1031	if (err) {
1032		device_printf(sc->lpe_dev, "cannot create Tx status ring DMA tag\n");
1033		goto fail;
1034	}
1035
1036	/* Create tag for Tx buffers */
1037	err = bus_dma_tag_create(
1038	    sc->lpe_cdata.lpe_parent_tag,
1039	    LPE_DESC_ALIGN, 0,		/* alignment, boundary */
1040	    BUS_SPACE_MAXADDR,		/* lowaddr */
1041	    BUS_SPACE_MAXADDR,		/* highaddr */
1042	    NULL, NULL,			/* filter, filterarg */
1043	    MCLBYTES * LPE_TXDESC_NUM,	/* maxsize */
1044	    LPE_TXDESC_NUM,		/* segments */
1045	    MCLBYTES, 0,		/* maxsegsize, flags */
1046	    NULL, NULL,			/* lockfunc, lockarg */
1047	    &sc->lpe_cdata.lpe_tx_buf_tag);
1048
1049	if (err) {
1050		device_printf(sc->lpe_dev, "cannot create Tx buffers DMA tag\n");
1051		goto fail;
1052	}
1053
1054	/* Allocate Tx DMA ring */
1055	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_ring_tag,
1056	    (void **)&sc->lpe_rdata.lpe_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1057	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_ring_map);
1058
1059	err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_ring_tag,
1060	    sc->lpe_cdata.lpe_tx_ring_map, sc->lpe_rdata.lpe_tx_ring,
1061	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1062
1063	sc->lpe_rdata.lpe_tx_ring_phys = ctx.lpe_dma_busaddr;
1064
1065	/* Allocate Tx status ring */
1066	err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_status_tag,
1067	    (void **)&sc->lpe_rdata.lpe_tx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1068	    BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_status_map);
1069
1070	err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_status_tag,
1071	    sc->lpe_cdata.lpe_tx_status_map, sc->lpe_rdata.lpe_tx_status,
1072	    LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1073
1074	sc->lpe_rdata.lpe_tx_status_phys = ctx.lpe_dma_busaddr;
1075
1076
1077	/* Create Tx buffers DMA map */
1078	for (i = 0; i < LPE_TXDESC_NUM; i++) {
1079		txd = &sc->lpe_cdata.lpe_tx_desc[i];
1080		txd->lpe_txdesc_mbuf = NULL;
1081		txd->lpe_txdesc_dmamap = NULL;
1082		txd->lpe_txdesc_first = 0;
1083
1084		err = bus_dmamap_create(sc->lpe_cdata.lpe_tx_buf_tag, 0,
1085		    &txd->lpe_txdesc_dmamap);
1086
1087		if (err) {
1088			device_printf(sc->lpe_dev, "cannot create Tx DMA map\n");
1089			return (err);
1090		}
1091	}
1092
1093	return (0);
1094fail:
1095	return (err);
1096}
1097
1098static int
1099lpe_init_rx(struct lpe_softc *sc)
1100{
1101	int i, err;
1102
1103	for (i = 0; i < LPE_RXDESC_NUM; i++) {
1104		err = lpe_init_rxbuf(sc, i);
1105		if (err)
1106			return (err);
1107	}
1108
1109	return (0);
1110}
1111
1112static int
1113lpe_init_rxbuf(struct lpe_softc *sc, int n)
1114{
1115	struct lpe_rxdesc *rxd;
1116	struct lpe_hwdesc *hwd;
1117	struct lpe_hwstatus *hws;
1118	struct mbuf *m;
1119	bus_dma_segment_t segs[1];
1120	int nsegs;
1121
1122	rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1123	hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1124	hws = &sc->lpe_rdata.lpe_rx_status[n];
1125	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1126
1127	if (!m) {
1128		device_printf(sc->lpe_dev, "WARNING: mbufs exhausted!\n");
1129		return (ENOBUFS);
1130	}
1131
1132	m->m_len = m->m_pkthdr.len = MCLBYTES;
1133
1134	bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1135
1136	if (bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_rx_buf_tag,
1137	    rxd->lpe_rxdesc_dmamap, m, segs, &nsegs, 0)) {
1138		m_freem(m);
1139		return (ENOBUFS);
1140	}
1141
1142	bus_dmamap_sync(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap,
1143	    BUS_DMASYNC_PREREAD);
1144
1145	rxd->lpe_rxdesc_mbuf = m;
1146	hwd->lhr_data = segs[0].ds_addr + 2;
1147	hwd->lhr_control = (segs[0].ds_len - 1) | LPE_HWDESC_INTERRUPT;
1148
1149	return (0);
1150}
1151
1152static void
1153lpe_discard_rxbuf(struct lpe_softc *sc, int n)
1154{
1155	struct lpe_rxdesc *rxd;
1156	struct lpe_hwdesc *hwd;
1157
1158	rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1159	hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1160
1161	bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1162
1163	hwd->lhr_data = 0;
1164	hwd->lhr_control = 0;
1165
1166	if (rxd->lpe_rxdesc_mbuf) {
1167		m_freem(rxd->lpe_rxdesc_mbuf);
1168		rxd->lpe_rxdesc_mbuf = NULL;
1169	}
1170}
1171
1172static void
1173lpe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1174{
1175	struct lpe_dmamap_arg *ctx;
1176
1177	if (error)
1178		return;
1179
1180	ctx = (struct lpe_dmamap_arg *)arg;
1181	ctx->lpe_dma_busaddr = segs[0].ds_addr;
1182}
1183
1184static int
1185lpe_ifmedia_upd(struct ifnet *ifp)
1186{
1187	return (0);
1188}
1189
1190static void
1191lpe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1192{
1193	struct lpe_softc *sc = ifp->if_softc;
1194	struct mii_data *mii = device_get_softc(sc->lpe_miibus);
1195
1196	lpe_lock(sc);
1197	mii_pollstat(mii);
1198	ifmr->ifm_active = mii->mii_media_active;
1199	ifmr->ifm_status = mii->mii_media_status;
1200	lpe_unlock(sc);
1201}
1202
1203static device_method_t lpe_methods[] = {
1204	/* Device interface */
1205	DEVMETHOD(device_probe,		lpe_probe),
1206	DEVMETHOD(device_attach,	lpe_attach),
1207	DEVMETHOD(device_detach,	lpe_detach),
1208
1209	/* Bus interface */
1210	DEVMETHOD(bus_print_child,	bus_generic_print_child),
1211
1212	/* MII interface */
1213	DEVMETHOD(miibus_readreg,	lpe_miibus_readreg),
1214	DEVMETHOD(miibus_writereg,	lpe_miibus_writereg),
1215	DEVMETHOD(miibus_statchg,	lpe_miibus_statchg),
1216	{ 0, 0 }
1217};
1218
1219static driver_t lpe_driver = {
1220	"lpe",
1221	lpe_methods,
1222	sizeof(struct lpe_softc),
1223};
1224
1225static devclass_t lpe_devclass;
1226
1227DRIVER_MODULE(lpe, simplebus, lpe_driver, lpe_devclass, 0, 0);
1228DRIVER_MODULE(miibus, lpe, miibus_driver, miibus_devclass, 0, 0);
1229MODULE_DEPEND(lpe, obio, 1, 1, 1);
1230MODULE_DEPEND(lpe, miibus, 1, 1, 1);
1231MODULE_DEPEND(lpe, ether, 1, 1, 1);
1232