if_bm.c revision 271830
1/*-
2 * Copyright 2008 Nathan Whitehorn. All rights reserved.
3 * Copyright 2003 by Peter Grehan. All rights reserved.
4 * Copyright (C) 1998, 1999, 2000 Tsubai Masanari.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * From:
30 *   NetBSD: if_bm.c,v 1.9.2.1 2000/11/01 15:02:49 tv Exp
31 */
32
33/*
34 * BMAC/BMAC+ Macio cell 10/100 ethernet driver
35 * 	The low-cost, low-feature Apple variant of the Sun HME
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/dev/bm/if_bm.c 271830 2014-09-18 21:05:59Z glebius $");
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/sockio.h>
44#include <sys/endian.h>
45#include <sys/mbuf.h>
46#include <sys/module.h>
47#include <sys/malloc.h>
48#include <sys/kernel.h>
49#include <sys/socket.h>
50
51#include <net/bpf.h>
52#include <net/if.h>
53#include <net/if_var.h>
54#include <net/if_arp.h>
55#include <net/ethernet.h>
56#include <net/if_dl.h>
57#include <net/if_media.h>
58#include <net/if_types.h>
59
60#include <machine/pio.h>
61#include <machine/bus.h>
62#include <machine/resource.h>
63#include <sys/bus.h>
64#include <sys/rman.h>
65
66#include <dev/mii/mii.h>
67#include <dev/mii/mii_bitbang.h>
68#include <dev/mii/miivar.h>
69
70#include <dev/ofw/ofw_bus.h>
71#include <dev/ofw/openfirm.h>
72#include <machine/dbdma.h>
73
74MODULE_DEPEND(bm, ether, 1, 1, 1);
75MODULE_DEPEND(bm, miibus, 1, 1, 1);
76
77/* "controller miibus0" required.  See GENERIC if you get errors here. */
78#include "miibus_if.h"
79
80#include "if_bmreg.h"
81#include "if_bmvar.h"
82
83static int bm_probe		(device_t);
84static int bm_attach		(device_t);
85static int bm_detach		(device_t);
86static int bm_shutdown		(device_t);
87
88static void bm_start		(struct ifnet *);
89static void bm_start_locked	(struct ifnet *);
90static int bm_encap 		(struct bm_softc *sc, struct mbuf **m_head);
91static int bm_ioctl		(struct ifnet *, u_long, caddr_t);
92static void bm_init		(void *);
93static void bm_init_locked	(struct bm_softc *sc);
94static void bm_chip_setup	(struct bm_softc *sc);
95static void bm_stop		(struct bm_softc *sc);
96static void bm_setladrf		(struct bm_softc *sc);
97static void bm_dummypacket	(struct bm_softc *sc);
98static void bm_txintr		(void *xsc);
99static void bm_rxintr		(void *xsc);
100
101static int bm_add_rxbuf		(struct bm_softc *sc, int i);
102static int bm_add_rxbuf_dma	(struct bm_softc *sc, int i);
103static void bm_enable_interrupts (struct bm_softc *sc);
104static void bm_disable_interrupts (struct bm_softc *sc);
105static void bm_tick		(void *xsc);
106
107static int bm_ifmedia_upd	(struct ifnet *);
108static void bm_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
109
110static int bm_miibus_readreg	(device_t, int, int);
111static int bm_miibus_writereg	(device_t, int, int, int);
112static void bm_miibus_statchg	(device_t);
113
114/*
115 * MII bit-bang glue
116 */
117static uint32_t bm_mii_bitbang_read(device_t);
118static void bm_mii_bitbang_write(device_t, uint32_t);
119
120static const struct mii_bitbang_ops bm_mii_bitbang_ops = {
121	bm_mii_bitbang_read,
122	bm_mii_bitbang_write,
123	{
124		BM_MII_DATAOUT,	/* MII_BIT_MDO */
125		BM_MII_DATAIN,	/* MII_BIT_MDI */
126		BM_MII_CLK,	/* MII_BIT_MDC */
127		BM_MII_OENABLE,	/* MII_BIT_DIR_HOST_PHY */
128		0,		/* MII_BIT_DIR_PHY_HOST */
129	}
130};
131
132static device_method_t bm_methods[] = {
133	/* Device interface */
134	DEVMETHOD(device_probe,		bm_probe),
135	DEVMETHOD(device_attach,	bm_attach),
136	DEVMETHOD(device_detach,	bm_detach),
137	DEVMETHOD(device_shutdown,	bm_shutdown),
138
139	/* MII interface */
140	DEVMETHOD(miibus_readreg,	bm_miibus_readreg),
141	DEVMETHOD(miibus_writereg,	bm_miibus_writereg),
142	DEVMETHOD(miibus_statchg,	bm_miibus_statchg),
143
144	DEVMETHOD_END
145};
146
147static driver_t bm_macio_driver = {
148	"bm",
149	bm_methods,
150	sizeof(struct bm_softc)
151};
152
153static devclass_t bm_devclass;
154
155DRIVER_MODULE(bm, macio, bm_macio_driver, bm_devclass, 0, 0);
156DRIVER_MODULE(miibus, bm, miibus_driver, miibus_devclass, 0, 0);
157
158/*
159 * MII internal routines
160 */
161
162/*
163 * Write the MII serial port for the MII bit-bang module.
164 */
165static void
166bm_mii_bitbang_write(device_t dev, uint32_t val)
167{
168	struct bm_softc *sc;
169
170	sc = device_get_softc(dev);
171
172	CSR_WRITE_2(sc, BM_MII_CSR, val);
173	CSR_BARRIER(sc, BM_MII_CSR, 2,
174	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
175}
176
177/*
178 * Read the MII serial port for the MII bit-bang module.
179 */
180static uint32_t
181bm_mii_bitbang_read(device_t dev)
182{
183	struct bm_softc *sc;
184	uint32_t reg;
185
186	sc = device_get_softc(dev);
187
188	reg = CSR_READ_2(sc, BM_MII_CSR);
189	CSR_BARRIER(sc, BM_MII_CSR, 2,
190	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
191
192	return (reg);
193}
194
195/*
196 * MII bus i/f
197 */
198static int
199bm_miibus_readreg(device_t dev, int phy, int reg)
200{
201
202	return (mii_bitbang_readreg(dev, &bm_mii_bitbang_ops, phy, reg));
203}
204
205static int
206bm_miibus_writereg(device_t dev, int phy, int reg, int data)
207{
208
209	mii_bitbang_readreg(dev, &bm_mii_bitbang_ops, phy, reg);
210
211	return (0);
212}
213
214static void
215bm_miibus_statchg(device_t dev)
216{
217	struct bm_softc *sc = device_get_softc(dev);
218	uint16_t reg;
219	int new_duplex;
220
221	reg = CSR_READ_2(sc, BM_TX_CONFIG);
222	new_duplex = IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX;
223
224	if (new_duplex != sc->sc_duplex) {
225		/* Turn off TX MAC while we fiddle its settings */
226		reg &= ~BM_ENABLE;
227
228		CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
229		while (CSR_READ_2(sc, BM_TX_CONFIG) & BM_ENABLE)
230			DELAY(10);
231	}
232
233	if (new_duplex && !sc->sc_duplex)
234		reg |= BM_TX_IGNORECOLL | BM_TX_FULLDPX;
235	else if (!new_duplex && sc->sc_duplex)
236		reg &= ~(BM_TX_IGNORECOLL | BM_TX_FULLDPX);
237
238	if (new_duplex != sc->sc_duplex) {
239		/* Turn TX MAC back on */
240		reg |= BM_ENABLE;
241
242		CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
243		sc->sc_duplex = new_duplex;
244	}
245}
246
247/*
248 * ifmedia/mii callbacks
249 */
250static int
251bm_ifmedia_upd(struct ifnet *ifp)
252{
253	struct bm_softc *sc = ifp->if_softc;
254	int error;
255
256	BM_LOCK(sc);
257	error = mii_mediachg(sc->sc_mii);
258	BM_UNLOCK(sc);
259	return (error);
260}
261
262static void
263bm_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifm)
264{
265	struct bm_softc *sc = ifp->if_softc;
266
267	BM_LOCK(sc);
268	mii_pollstat(sc->sc_mii);
269	ifm->ifm_active = sc->sc_mii->mii_media_active;
270	ifm->ifm_status = sc->sc_mii->mii_media_status;
271	BM_UNLOCK(sc);
272}
273
274/*
275 * Macio probe/attach
276 */
277static int
278bm_probe(device_t dev)
279{
280	const char *dname = ofw_bus_get_name(dev);
281	const char *dcompat = ofw_bus_get_compat(dev);
282
283	/*
284	 * BMAC+ cells have a name of "ethernet" and
285	 * a compatible property of "bmac+"
286	 */
287	if (strcmp(dname, "bmac") == 0) {
288		device_set_desc(dev, "Apple BMAC Ethernet Adaptor");
289	} else if (strcmp(dcompat, "bmac+") == 0) {
290		device_set_desc(dev, "Apple BMAC+ Ethernet Adaptor");
291	} else
292		return (ENXIO);
293
294	return (0);
295}
296
297static int
298bm_attach(device_t dev)
299{
300	phandle_t node;
301	u_char *eaddr;
302	struct ifnet *ifp;
303	int error, cellid, i;
304	struct bm_txsoft *txs;
305	struct bm_softc *sc = device_get_softc(dev);
306
307	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
308	ifp->if_softc = sc;
309	sc->sc_dev = dev;
310	sc->sc_duplex = ~IFM_FDX;
311
312	error = 0;
313	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
314	    MTX_DEF);
315	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
316
317	/* Check for an improved version of Paddington */
318	sc->sc_streaming = 0;
319	cellid = -1;
320	node = ofw_bus_get_node(dev);
321
322	OF_getprop(node, "cell-id", &cellid, sizeof(cellid));
323	if (cellid >= 0xc4)
324		sc->sc_streaming = 1;
325
326	sc->sc_memrid = 0;
327	sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
328	    &sc->sc_memrid, RF_ACTIVE);
329	if (sc->sc_memr == NULL) {
330		device_printf(dev, "Could not alloc chip registers!\n");
331		return (ENXIO);
332	}
333
334	sc->sc_txdmarid = BM_TXDMA_REGISTERS;
335	sc->sc_rxdmarid = BM_RXDMA_REGISTERS;
336
337	sc->sc_txdmar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
338	    &sc->sc_txdmarid, RF_ACTIVE);
339	sc->sc_rxdmar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
340	    &sc->sc_rxdmarid, RF_ACTIVE);
341
342	if (sc->sc_txdmar == NULL || sc->sc_rxdmar == NULL) {
343		device_printf(dev, "Could not map DBDMA registers!\n");
344		return (ENXIO);
345	}
346
347	error = dbdma_allocate_channel(sc->sc_txdmar, 0, bus_get_dma_tag(dev),
348	    BM_MAX_DMA_COMMANDS, &sc->sc_txdma);
349	error += dbdma_allocate_channel(sc->sc_rxdmar, 0, bus_get_dma_tag(dev),
350	    BM_MAX_DMA_COMMANDS, &sc->sc_rxdma);
351
352	if (error) {
353		device_printf(dev,"Could not allocate DBDMA channel!\n");
354		return (ENXIO);
355	}
356
357	/* alloc DMA tags and buffers */
358	error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0,
359	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
360	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,
361	    NULL, &sc->sc_pdma_tag);
362
363	if (error) {
364		device_printf(dev,"Could not allocate DMA tag!\n");
365		return (ENXIO);
366	}
367
368	error = bus_dma_tag_create(sc->sc_pdma_tag, 1, 0, BUS_SPACE_MAXADDR,
369	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES,
370	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdma_tag);
371
372	if (error) {
373		device_printf(dev,"Could not allocate RX DMA channel!\n");
374		return (ENXIO);
375	}
376
377	error = bus_dma_tag_create(sc->sc_pdma_tag, 1, 0, BUS_SPACE_MAXADDR,
378	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * BM_NTXSEGS, BM_NTXSEGS,
379	    MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdma_tag);
380
381	if (error) {
382		device_printf(dev,"Could not allocate TX DMA tag!\n");
383		return (ENXIO);
384	}
385
386	/* init transmit descriptors */
387	STAILQ_INIT(&sc->sc_txfreeq);
388	STAILQ_INIT(&sc->sc_txdirtyq);
389
390	/* create TX DMA maps */
391	error = ENOMEM;
392	for (i = 0; i < BM_MAX_TX_PACKETS; i++) {
393		txs = &sc->sc_txsoft[i];
394		txs->txs_mbuf = NULL;
395		error = bus_dmamap_create(sc->sc_tdma_tag, 0, &txs->txs_dmamap);
396		if (error) {
397			device_printf(sc->sc_dev,
398			    "unable to create TX DMA map %d, error = %d\n",
399			    i, error);
400		}
401		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
402	}
403
404	/* Create the receive buffer DMA maps. */
405	for (i = 0; i < BM_MAX_RX_PACKETS; i++) {
406		error = bus_dmamap_create(sc->sc_rdma_tag, 0,
407		    &sc->sc_rxsoft[i].rxs_dmamap);
408		if (error) {
409			device_printf(sc->sc_dev,
410			    "unable to create RX DMA map %d, error = %d\n",
411			    i, error);
412		}
413		sc->sc_rxsoft[i].rxs_mbuf = NULL;
414	}
415
416	/* alloc interrupt */
417	bm_disable_interrupts(sc);
418
419	sc->sc_txdmairqid = BM_TXDMA_INTERRUPT;
420	sc->sc_txdmairq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
421	    &sc->sc_txdmairqid, RF_ACTIVE);
422
423	if (error) {
424		device_printf(dev,"Could not allocate TX interrupt!\n");
425		return (ENXIO);
426	}
427
428	bus_setup_intr(dev,sc->sc_txdmairq,
429	    INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY, NULL, bm_txintr, sc,
430	    &sc->sc_txihtx);
431
432	sc->sc_rxdmairqid = BM_RXDMA_INTERRUPT;
433	sc->sc_rxdmairq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
434	    &sc->sc_rxdmairqid, RF_ACTIVE);
435
436	if (error) {
437		device_printf(dev,"Could not allocate RX interrupt!\n");
438		return (ENXIO);
439	}
440
441	bus_setup_intr(dev,sc->sc_rxdmairq,
442	    INTR_TYPE_MISC | INTR_MPSAFE | INTR_ENTROPY, NULL, bm_rxintr, sc,
443	    &sc->sc_rxih);
444
445	/*
446	 * Get the ethernet address from OpenFirmware
447	 */
448	eaddr = sc->sc_enaddr;
449	OF_getprop(node, "local-mac-address", eaddr, ETHER_ADDR_LEN);
450
451	/*
452	 * Setup MII
453	 * On Apple BMAC controllers, we end up in a weird state of
454	 * partially-completed autonegotiation on boot.  So we force
455	 * autonegotation to try again.
456	 */
457	error = mii_attach(dev, &sc->sc_miibus, ifp, bm_ifmedia_upd,
458	    bm_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
459	    MIIF_FORCEANEG);
460	if (error != 0) {
461		device_printf(dev, "attaching PHYs failed\n");
462		return (error);
463	}
464
465	/* reset the adapter  */
466	bm_chip_setup(sc);
467
468	sc->sc_mii = device_get_softc(sc->sc_miibus);
469
470	if_initname(ifp, device_get_name(sc->sc_dev),
471	    device_get_unit(sc->sc_dev));
472	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
473	ifp->if_start = bm_start;
474	ifp->if_ioctl = bm_ioctl;
475	ifp->if_init = bm_init;
476	IFQ_SET_MAXLEN(&ifp->if_snd, BM_MAX_TX_PACKETS);
477	ifp->if_snd.ifq_drv_maxlen = BM_MAX_TX_PACKETS;
478	IFQ_SET_READY(&ifp->if_snd);
479
480	/* Attach the interface. */
481	ether_ifattach(ifp, sc->sc_enaddr);
482	ifp->if_hwassist = 0;
483
484	return (0);
485}
486
487static int
488bm_detach(device_t dev)
489{
490	struct bm_softc *sc = device_get_softc(dev);
491
492	BM_LOCK(sc);
493	bm_stop(sc);
494	BM_UNLOCK(sc);
495
496	callout_drain(&sc->sc_tick_ch);
497	ether_ifdetach(sc->sc_ifp);
498	bus_teardown_intr(dev, sc->sc_txdmairq, sc->sc_txihtx);
499	bus_teardown_intr(dev, sc->sc_rxdmairq, sc->sc_rxih);
500
501	dbdma_free_channel(sc->sc_txdma);
502	dbdma_free_channel(sc->sc_rxdma);
503
504	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_memrid, sc->sc_memr);
505	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_txdmarid,
506	    sc->sc_txdmar);
507	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rxdmarid,
508	    sc->sc_rxdmar);
509
510	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_txdmairqid,
511	    sc->sc_txdmairq);
512	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rxdmairqid,
513	    sc->sc_rxdmairq);
514
515	mtx_destroy(&sc->sc_mtx);
516	if_free(sc->sc_ifp);
517
518	return (0);
519}
520
521static int
522bm_shutdown(device_t dev)
523{
524	struct bm_softc *sc;
525
526	sc = device_get_softc(dev);
527
528	BM_LOCK(sc);
529	bm_stop(sc);
530	BM_UNLOCK(sc);
531
532	return (0);
533}
534
535static void
536bm_dummypacket(struct bm_softc *sc)
537{
538	struct mbuf *m;
539	struct ifnet *ifp;
540
541	ifp = sc->sc_ifp;
542
543	MGETHDR(m, M_NOWAIT, MT_DATA);
544
545	if (m == NULL)
546		return;
547
548	bcopy(sc->sc_enaddr,
549	    mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
550	bcopy(sc->sc_enaddr,
551	    mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
552	mtod(m, struct ether_header *)->ether_type = htons(3);
553	mtod(m, unsigned char *)[14] = 0;
554	mtod(m, unsigned char *)[15] = 0;
555	mtod(m, unsigned char *)[16] = 0xE3;
556	m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
557	IF_ENQUEUE(&ifp->if_snd, m);
558	bm_start_locked(ifp);
559}
560
561static void
562bm_rxintr(void *xsc)
563{
564	struct bm_softc *sc = xsc;
565	struct ifnet *ifp = sc->sc_ifp;
566	struct mbuf *m;
567	int i, prev_stop, new_stop;
568	uint16_t status;
569
570	BM_LOCK(sc);
571
572	status = dbdma_get_chan_status(sc->sc_rxdma);
573	if (status & DBDMA_STATUS_DEAD) {
574		dbdma_reset(sc->sc_rxdma);
575		BM_UNLOCK(sc);
576		return;
577	}
578	if (!(status & DBDMA_STATUS_RUN)) {
579		device_printf(sc->sc_dev,"Bad RX Interrupt!\n");
580		BM_UNLOCK(sc);
581		return;
582	}
583
584	prev_stop = sc->next_rxdma_slot - 1;
585	if (prev_stop < 0)
586		prev_stop = sc->rxdma_loop_slot - 1;
587
588	if (prev_stop < 0) {
589		BM_UNLOCK(sc);
590		return;
591	}
592
593	new_stop = -1;
594	dbdma_sync_commands(sc->sc_rxdma, BUS_DMASYNC_POSTREAD);
595
596	for (i = sc->next_rxdma_slot; i < BM_MAX_RX_PACKETS; i++) {
597		if (i == sc->rxdma_loop_slot)
598			i = 0;
599
600		if (i == prev_stop)
601			break;
602
603		status = dbdma_get_cmd_status(sc->sc_rxdma, i);
604
605		if (status == 0)
606			break;
607
608		m = sc->sc_rxsoft[i].rxs_mbuf;
609
610		if (bm_add_rxbuf(sc, i)) {
611			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
612			m = NULL;
613			continue;
614		}
615
616		if (m == NULL)
617			continue;
618
619		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
620		m->m_pkthdr.rcvif = ifp;
621		m->m_len -= (dbdma_get_residuals(sc->sc_rxdma, i) + 2);
622		m->m_pkthdr.len = m->m_len;
623
624		/* Send up the stack */
625		BM_UNLOCK(sc);
626		(*ifp->if_input)(ifp, m);
627		BM_LOCK(sc);
628
629		/* Clear all fields on this command */
630		bm_add_rxbuf_dma(sc, i);
631
632		new_stop = i;
633	}
634
635	/* Change the last packet we processed to the ring buffer terminator,
636	 * and restore a receive buffer to the old terminator */
637	if (new_stop >= 0) {
638		dbdma_insert_stop(sc->sc_rxdma, new_stop);
639		bm_add_rxbuf_dma(sc, prev_stop);
640		if (i < sc->rxdma_loop_slot)
641			sc->next_rxdma_slot = i;
642		else
643			sc->next_rxdma_slot = 0;
644	}
645	dbdma_sync_commands(sc->sc_rxdma, BUS_DMASYNC_PREWRITE);
646
647	dbdma_wake(sc->sc_rxdma);
648
649	BM_UNLOCK(sc);
650}
651
652static void
653bm_txintr(void *xsc)
654{
655	struct bm_softc *sc = xsc;
656	struct ifnet *ifp = sc->sc_ifp;
657	struct bm_txsoft *txs;
658	int progress = 0;
659
660	BM_LOCK(sc);
661
662	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
663		if (!dbdma_get_cmd_status(sc->sc_txdma, txs->txs_lastdesc))
664			break;
665
666		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
667		bus_dmamap_unload(sc->sc_tdma_tag, txs->txs_dmamap);
668
669		if (txs->txs_mbuf != NULL) {
670			m_freem(txs->txs_mbuf);
671			txs->txs_mbuf = NULL;
672		}
673
674		/* Set the first used TXDMA slot to the location of the
675		 * STOP/NOP command associated with this packet. */
676
677		sc->first_used_txdma_slot = txs->txs_stopdesc;
678
679		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
680
681		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
682		progress = 1;
683	}
684
685	if (progress) {
686		/*
687		 * We freed some descriptors, so reset IFF_DRV_OACTIVE
688		 * and restart.
689		 */
690		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
691		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
692
693		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
694		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
695			bm_start_locked(ifp);
696	}
697
698	BM_UNLOCK(sc);
699}
700
701static void
702bm_start(struct ifnet *ifp)
703{
704	struct bm_softc *sc = ifp->if_softc;
705
706	BM_LOCK(sc);
707	bm_start_locked(ifp);
708	BM_UNLOCK(sc);
709}
710
711static void
712bm_start_locked(struct ifnet *ifp)
713{
714	struct bm_softc *sc = ifp->if_softc;
715	struct mbuf *mb_head;
716	int prev_stop;
717	int txqueued = 0;
718
719	/*
720	 * We lay out our DBDMA program in the following manner:
721	 *	OUTPUT_MORE
722	 *	...
723	 *	OUTPUT_LAST (+ Interrupt)
724	 *	STOP
725	 *
726	 * To extend the channel, we append a new program,
727	 * then replace STOP with NOP and wake the channel.
728	 * If we stalled on the STOP already, the program proceeds,
729	 * if not it will sail through the NOP.
730	 */
731
732	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
733		IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
734
735		if (mb_head == NULL)
736			break;
737
738		prev_stop = sc->next_txdma_slot - 1;
739
740		if (bm_encap(sc, &mb_head)) {
741			/* Put the packet back and stop */
742			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
743			IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
744			break;
745		}
746
747		dbdma_insert_nop(sc->sc_txdma, prev_stop);
748
749		txqueued = 1;
750
751		BPF_MTAP(ifp, mb_head);
752	}
753
754	dbdma_sync_commands(sc->sc_txdma, BUS_DMASYNC_PREWRITE);
755
756	if (txqueued) {
757		dbdma_wake(sc->sc_txdma);
758		sc->sc_wdog_timer = 5;
759	}
760}
761
762static int
763bm_encap(struct bm_softc *sc, struct mbuf **m_head)
764{
765	bus_dma_segment_t segs[BM_NTXSEGS];
766	struct bm_txsoft *txs;
767	struct mbuf *m;
768	int nsegs = BM_NTXSEGS;
769	int error = 0;
770	uint8_t branch_type;
771	int i;
772
773	/* Limit the command size to the number of free DBDMA slots */
774
775	if (sc->next_txdma_slot >= sc->first_used_txdma_slot)
776		nsegs = BM_MAX_DMA_COMMANDS - 2 - sc->next_txdma_slot +
777		    sc->first_used_txdma_slot;  /* -2 for branch and indexing */
778	else
779		nsegs = sc->first_used_txdma_slot - sc->next_txdma_slot;
780
781	/* Remove one slot for the STOP/NOP terminator */
782	nsegs--;
783
784	if (nsegs > BM_NTXSEGS)
785		nsegs = BM_NTXSEGS;
786
787	/* Get a work queue entry. */
788	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
789		/* Ran out of descriptors. */
790		return (ENOBUFS);
791	}
792
793	error = bus_dmamap_load_mbuf_sg(sc->sc_tdma_tag, txs->txs_dmamap,
794	    *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
795
796	if (error == EFBIG) {
797		m = m_collapse(*m_head, M_NOWAIT, nsegs);
798		if (m == NULL) {
799			m_freem(*m_head);
800			*m_head = NULL;
801			return (ENOBUFS);
802		}
803		*m_head = m;
804
805		error = bus_dmamap_load_mbuf_sg(sc->sc_tdma_tag,
806		    txs->txs_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
807		if (error != 0) {
808			m_freem(*m_head);
809			*m_head = NULL;
810			return (error);
811		}
812	} else if (error != 0)
813		return (error);
814
815	if (nsegs == 0) {
816		m_freem(*m_head);
817		*m_head = NULL;
818		return (EIO);
819	}
820
821	txs->txs_ndescs = nsegs;
822	txs->txs_firstdesc = sc->next_txdma_slot;
823
824	for (i = 0; i < nsegs; i++) {
825		/* Loop back to the beginning if this is our last slot */
826		if (sc->next_txdma_slot == (BM_MAX_DMA_COMMANDS - 1))
827			branch_type = DBDMA_ALWAYS;
828		else
829			branch_type = DBDMA_NEVER;
830
831		if (i+1 == nsegs)
832			txs->txs_lastdesc = sc->next_txdma_slot;
833
834		dbdma_insert_command(sc->sc_txdma, sc->next_txdma_slot++,
835		    (i + 1 < nsegs) ? DBDMA_OUTPUT_MORE : DBDMA_OUTPUT_LAST,
836		    0, segs[i].ds_addr, segs[i].ds_len,
837		    (i + 1 < nsegs) ? DBDMA_NEVER : DBDMA_ALWAYS,
838		    branch_type, DBDMA_NEVER, 0);
839
840		if (branch_type == DBDMA_ALWAYS)
841			sc->next_txdma_slot = 0;
842	}
843
844	/* We have a corner case where the STOP command is the last slot,
845	 * but you can't branch in STOP commands. So add a NOP branch here
846	 * and the STOP in slot 0. */
847
848	if (sc->next_txdma_slot == (BM_MAX_DMA_COMMANDS - 1)) {
849		dbdma_insert_branch(sc->sc_txdma, sc->next_txdma_slot, 0);
850		sc->next_txdma_slot = 0;
851	}
852
853	txs->txs_stopdesc = sc->next_txdma_slot;
854	dbdma_insert_stop(sc->sc_txdma, sc->next_txdma_slot++);
855
856	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
857	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
858	txs->txs_mbuf = *m_head;
859
860	return (0);
861}
862
863static int
864bm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
865{
866	struct bm_softc *sc = ifp->if_softc;
867	struct ifreq *ifr = (struct ifreq *)data;
868	int error;
869
870	error = 0;
871
872	switch(cmd) {
873	case SIOCSIFFLAGS:
874		BM_LOCK(sc);
875		if ((ifp->if_flags & IFF_UP) != 0) {
876			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
877			   ((ifp->if_flags ^ sc->sc_ifpflags) &
878			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
879				bm_setladrf(sc);
880			else
881				bm_init_locked(sc);
882		} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
883			bm_stop(sc);
884		sc->sc_ifpflags = ifp->if_flags;
885		BM_UNLOCK(sc);
886		break;
887	case SIOCADDMULTI:
888	case SIOCDELMULTI:
889		BM_LOCK(sc);
890		bm_setladrf(sc);
891		BM_UNLOCK(sc);
892	case SIOCGIFMEDIA:
893	case SIOCSIFMEDIA:
894		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
895		break;
896	default:
897		error = ether_ioctl(ifp, cmd, data);
898		break;
899	}
900
901	return (error);
902}
903
904static void
905bm_setladrf(struct bm_softc *sc)
906{
907	struct ifnet *ifp = sc->sc_ifp;
908	struct ifmultiaddr *inm;
909	uint16_t hash[4];
910	uint16_t reg;
911	uint32_t crc;
912
913	reg = BM_CRC_ENABLE | BM_REJECT_OWN_PKTS;
914
915	/* Turn off RX MAC while we fiddle its settings */
916	CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
917	while (CSR_READ_2(sc, BM_RX_CONFIG) & BM_ENABLE)
918		DELAY(10);
919
920	if ((ifp->if_flags & IFF_PROMISC) != 0) {
921		reg |= BM_PROMISC;
922
923		CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
924
925		DELAY(15);
926
927		reg = CSR_READ_2(sc, BM_RX_CONFIG);
928		reg |= BM_ENABLE;
929		CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
930		return;
931	}
932
933	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
934		hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
935	} else {
936		/* Clear the hash table. */
937		memset(hash, 0, sizeof(hash));
938
939		if_maddr_rlock(ifp);
940		TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
941			if (inm->ifma_addr->sa_family != AF_LINK)
942				continue;
943			crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
944			    inm->ifma_addr), ETHER_ADDR_LEN);
945
946			/* We just want the 6 most significant bits */
947			crc >>= 26;
948
949			/* Set the corresponding bit in the filter. */
950			hash[crc >> 4] |= 1 << (crc & 0xf);
951		}
952		if_maddr_runlock(ifp);
953	}
954
955	/* Write out new hash table */
956	CSR_WRITE_2(sc, BM_HASHTAB0, hash[0]);
957	CSR_WRITE_2(sc, BM_HASHTAB1, hash[1]);
958	CSR_WRITE_2(sc, BM_HASHTAB2, hash[2]);
959	CSR_WRITE_2(sc, BM_HASHTAB3, hash[3]);
960
961	/* And turn the RX MAC back on, this time with the hash bit set */
962	reg |= BM_HASH_FILTER_ENABLE;
963	CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
964
965	while (!(CSR_READ_2(sc, BM_RX_CONFIG) & BM_HASH_FILTER_ENABLE))
966		DELAY(10);
967
968	reg = CSR_READ_2(sc, BM_RX_CONFIG);
969	reg |= BM_ENABLE;
970	CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
971}
972
973static void
974bm_init(void *xsc)
975{
976	struct bm_softc *sc = xsc;
977
978	BM_LOCK(sc);
979	bm_init_locked(sc);
980	BM_UNLOCK(sc);
981}
982
983static void
984bm_chip_setup(struct bm_softc *sc)
985{
986	uint16_t reg;
987	uint16_t *eaddr_sect;
988
989	eaddr_sect = (uint16_t *)(sc->sc_enaddr);
990	dbdma_stop(sc->sc_txdma);
991	dbdma_stop(sc->sc_rxdma);
992
993	/* Reset chip */
994	CSR_WRITE_2(sc, BM_RX_RESET, 0x0000);
995	CSR_WRITE_2(sc, BM_TX_RESET, 0x0001);
996	do {
997		DELAY(10);
998		reg = CSR_READ_2(sc, BM_TX_RESET);
999	} while (reg & 0x0001);
1000
1001	/* Some random junk. OS X uses the system time. We use
1002	 * the low 16 bits of the MAC address. */
1003	CSR_WRITE_2(sc,	BM_TX_RANDSEED, eaddr_sect[2]);
1004
1005	/* Enable transmit */
1006	reg = CSR_READ_2(sc, BM_TX_IFC);
1007	reg |= BM_ENABLE;
1008	CSR_WRITE_2(sc, BM_TX_IFC, reg);
1009
1010	CSR_READ_2(sc, BM_TX_PEAKCNT);
1011}
1012
1013static void
1014bm_stop(struct bm_softc *sc)
1015{
1016	struct bm_txsoft *txs;
1017	uint16_t reg;
1018
1019	/* Disable TX and RX MACs */
1020	reg = CSR_READ_2(sc, BM_TX_CONFIG);
1021	reg &= ~BM_ENABLE;
1022	CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
1023
1024	reg = CSR_READ_2(sc, BM_RX_CONFIG);
1025	reg &= ~BM_ENABLE;
1026	CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
1027
1028	DELAY(100);
1029
1030	/* Stop DMA engine */
1031	dbdma_stop(sc->sc_rxdma);
1032	dbdma_stop(sc->sc_txdma);
1033	sc->next_rxdma_slot = 0;
1034	sc->rxdma_loop_slot = 0;
1035
1036	/* Disable interrupts */
1037	bm_disable_interrupts(sc);
1038
1039	/* Don't worry about pending transmits anymore */
1040	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1041		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1042		if (txs->txs_ndescs != 0) {
1043			bus_dmamap_sync(sc->sc_tdma_tag, txs->txs_dmamap,
1044			    BUS_DMASYNC_POSTWRITE);
1045			bus_dmamap_unload(sc->sc_tdma_tag, txs->txs_dmamap);
1046			if (txs->txs_mbuf != NULL) {
1047				m_freem(txs->txs_mbuf);
1048				txs->txs_mbuf = NULL;
1049			}
1050		}
1051		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1052	}
1053
1054	/* And we're down */
1055	sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1056	sc->sc_wdog_timer = 0;
1057	callout_stop(&sc->sc_tick_ch);
1058}
1059
1060static void
1061bm_init_locked(struct bm_softc *sc)
1062{
1063	uint16_t reg;
1064	uint16_t *eaddr_sect;
1065	struct bm_rxsoft *rxs;
1066	int i;
1067
1068	eaddr_sect = (uint16_t *)(sc->sc_enaddr);
1069
1070	/* Zero RX slot info and stop DMA */
1071	dbdma_stop(sc->sc_rxdma);
1072	dbdma_stop(sc->sc_txdma);
1073	sc->next_rxdma_slot = 0;
1074	sc->rxdma_loop_slot = 0;
1075
1076	/* Initialize TX/RX DBDMA programs */
1077	dbdma_insert_stop(sc->sc_rxdma, 0);
1078	dbdma_insert_stop(sc->sc_txdma, 0);
1079	dbdma_set_current_cmd(sc->sc_rxdma, 0);
1080	dbdma_set_current_cmd(sc->sc_txdma, 0);
1081
1082	sc->next_rxdma_slot = 0;
1083	sc->next_txdma_slot = 1;
1084	sc->first_used_txdma_slot = 0;
1085
1086	for (i = 0; i < BM_MAX_RX_PACKETS; i++) {
1087		rxs = &sc->sc_rxsoft[i];
1088		rxs->dbdma_slot = i;
1089
1090		if (rxs->rxs_mbuf == NULL) {
1091			bm_add_rxbuf(sc, i);
1092
1093			if (rxs->rxs_mbuf == NULL) {
1094				/* If we can't add anymore, mark the problem */
1095				rxs->dbdma_slot = -1;
1096				break;
1097			}
1098		}
1099
1100		if (i > 0)
1101			bm_add_rxbuf_dma(sc, i);
1102	}
1103
1104	/*
1105	 * Now terminate the RX ring buffer, and follow with the loop to
1106	 * the beginning.
1107	 */
1108	dbdma_insert_stop(sc->sc_rxdma, i - 1);
1109	dbdma_insert_branch(sc->sc_rxdma, i, 0);
1110	sc->rxdma_loop_slot = i;
1111
1112	/* Now add in the first element of the RX DMA chain */
1113	bm_add_rxbuf_dma(sc, 0);
1114
1115	dbdma_sync_commands(sc->sc_rxdma, BUS_DMASYNC_PREWRITE);
1116	dbdma_sync_commands(sc->sc_txdma, BUS_DMASYNC_PREWRITE);
1117
1118	/* Zero collision counters */
1119	CSR_WRITE_2(sc, BM_TX_NCCNT, 0);
1120	CSR_WRITE_2(sc, BM_TX_FCCNT, 0);
1121	CSR_WRITE_2(sc, BM_TX_EXCNT, 0);
1122	CSR_WRITE_2(sc, BM_TX_LTCNT, 0);
1123
1124	/* Zero receive counters */
1125	CSR_WRITE_2(sc, BM_RX_FRCNT, 0);
1126	CSR_WRITE_2(sc, BM_RX_LECNT, 0);
1127	CSR_WRITE_2(sc, BM_RX_AECNT, 0);
1128	CSR_WRITE_2(sc, BM_RX_FECNT, 0);
1129	CSR_WRITE_2(sc, BM_RXCV, 0);
1130
1131	/* Prime transmit */
1132	CSR_WRITE_2(sc, BM_TX_THRESH, 0xff);
1133
1134	CSR_WRITE_2(sc, BM_TXFIFO_CSR, 0);
1135	CSR_WRITE_2(sc, BM_TXFIFO_CSR, 0x0001);
1136
1137	/* Prime receive */
1138	CSR_WRITE_2(sc, BM_RXFIFO_CSR, 0);
1139	CSR_WRITE_2(sc, BM_RXFIFO_CSR, 0x0001);
1140
1141	/* Clear status reg */
1142	CSR_READ_2(sc, BM_STATUS);
1143
1144	/* Zero hash filters */
1145	CSR_WRITE_2(sc, BM_HASHTAB0, 0);
1146	CSR_WRITE_2(sc, BM_HASHTAB1, 0);
1147	CSR_WRITE_2(sc, BM_HASHTAB2, 0);
1148	CSR_WRITE_2(sc, BM_HASHTAB3, 0);
1149
1150	/* Write MAC address to chip */
1151	CSR_WRITE_2(sc, BM_MACADDR0, eaddr_sect[0]);
1152	CSR_WRITE_2(sc, BM_MACADDR1, eaddr_sect[1]);
1153	CSR_WRITE_2(sc, BM_MACADDR2, eaddr_sect[2]);
1154
1155	/* Final receive engine setup */
1156	reg = BM_CRC_ENABLE | BM_REJECT_OWN_PKTS | BM_HASH_FILTER_ENABLE;
1157	CSR_WRITE_2(sc, BM_RX_CONFIG, reg);
1158
1159	/* Now turn it all on! */
1160	dbdma_reset(sc->sc_rxdma);
1161	dbdma_reset(sc->sc_txdma);
1162
1163	/* Enable RX and TX MACs. Setting the address filter has
1164	 * the side effect of enabling the RX MAC. */
1165	bm_setladrf(sc);
1166
1167	reg = CSR_READ_2(sc, BM_TX_CONFIG);
1168	reg |= BM_ENABLE;
1169	CSR_WRITE_2(sc, BM_TX_CONFIG, reg);
1170
1171	/*
1172	 * Enable interrupts, unwedge the controller with a dummy packet,
1173	 * and nudge the DMA queue.
1174	 */
1175	bm_enable_interrupts(sc);
1176	bm_dummypacket(sc);
1177	dbdma_wake(sc->sc_rxdma); /* Nudge RXDMA */
1178
1179	sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING;
1180	sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1181	sc->sc_ifpflags = sc->sc_ifp->if_flags;
1182
1183	/* Resync PHY and MAC states */
1184	sc->sc_mii = device_get_softc(sc->sc_miibus);
1185	sc->sc_duplex = ~IFM_FDX;
1186	mii_mediachg(sc->sc_mii);
1187
1188	/* Start the one second timer. */
1189	sc->sc_wdog_timer = 0;
1190	callout_reset(&sc->sc_tick_ch, hz, bm_tick, sc);
1191}
1192
1193static void
1194bm_tick(void *arg)
1195{
1196	struct bm_softc *sc = arg;
1197
1198	/* Read error counters */
1199	if_inc_counter(sc->sc_ifp, IFCOUNTER_COLLISIONS,
1200	    CSR_READ_2(sc, BM_TX_NCCNT) + CSR_READ_2(sc, BM_TX_FCCNT) +
1201	    CSR_READ_2(sc, BM_TX_EXCNT) + CSR_READ_2(sc, BM_TX_LTCNT));
1202
1203	if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS,
1204	    CSR_READ_2(sc, BM_RX_LECNT) + CSR_READ_2(sc, BM_RX_AECNT) +
1205	    CSR_READ_2(sc, BM_RX_FECNT));
1206
1207	/* Zero collision counters */
1208	CSR_WRITE_2(sc, BM_TX_NCCNT, 0);
1209	CSR_WRITE_2(sc, BM_TX_FCCNT, 0);
1210	CSR_WRITE_2(sc, BM_TX_EXCNT, 0);
1211	CSR_WRITE_2(sc, BM_TX_LTCNT, 0);
1212
1213	/* Zero receive counters */
1214	CSR_WRITE_2(sc, BM_RX_FRCNT, 0);
1215	CSR_WRITE_2(sc, BM_RX_LECNT, 0);
1216	CSR_WRITE_2(sc, BM_RX_AECNT, 0);
1217	CSR_WRITE_2(sc, BM_RX_FECNT, 0);
1218	CSR_WRITE_2(sc, BM_RXCV, 0);
1219
1220	/* Check for link changes and run watchdog */
1221	mii_tick(sc->sc_mii);
1222	bm_miibus_statchg(sc->sc_dev);
1223
1224	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) {
1225		callout_reset(&sc->sc_tick_ch, hz, bm_tick, sc);
1226		return;
1227	}
1228
1229	/* Problems */
1230	device_printf(sc->sc_dev, "device timeout\n");
1231
1232	bm_init_locked(sc);
1233}
1234
1235static int
1236bm_add_rxbuf(struct bm_softc *sc, int idx)
1237{
1238	struct bm_rxsoft *rxs = &sc->sc_rxsoft[idx];
1239	struct mbuf *m;
1240	bus_dma_segment_t segs[1];
1241	int error, nsegs;
1242
1243	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1244	if (m == NULL)
1245		return (ENOBUFS);
1246	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1247
1248	if (rxs->rxs_mbuf != NULL) {
1249		bus_dmamap_sync(sc->sc_rdma_tag, rxs->rxs_dmamap,
1250		    BUS_DMASYNC_POSTREAD);
1251		bus_dmamap_unload(sc->sc_rdma_tag, rxs->rxs_dmamap);
1252	}
1253
1254	error = bus_dmamap_load_mbuf_sg(sc->sc_rdma_tag, rxs->rxs_dmamap, m,
1255	    segs, &nsegs, BUS_DMA_NOWAIT);
1256	if (error != 0) {
1257		device_printf(sc->sc_dev,
1258		    "cannot load RS DMA map %d, error = %d\n", idx, error);
1259		m_freem(m);
1260		return (error);
1261	}
1262	/* If nsegs is wrong then the stack is corrupt. */
1263	KASSERT(nsegs == 1,
1264	    ("%s: too many DMA segments (%d)", __func__, nsegs));
1265	rxs->rxs_mbuf = m;
1266	rxs->segment = segs[0];
1267
1268	bus_dmamap_sync(sc->sc_rdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
1269
1270	return (0);
1271}
1272
1273static int
1274bm_add_rxbuf_dma(struct bm_softc *sc, int idx)
1275{
1276	struct bm_rxsoft *rxs = &sc->sc_rxsoft[idx];
1277
1278	dbdma_insert_command(sc->sc_rxdma, idx, DBDMA_INPUT_LAST, 0,
1279	    rxs->segment.ds_addr, rxs->segment.ds_len, DBDMA_ALWAYS,
1280	    DBDMA_NEVER, DBDMA_NEVER, 0);
1281
1282	return (0);
1283}
1284
1285static void
1286bm_enable_interrupts(struct bm_softc *sc)
1287{
1288	CSR_WRITE_2(sc, BM_INTR_DISABLE,
1289	    (sc->sc_streaming) ? BM_INTR_NONE : BM_INTR_NORMAL);
1290}
1291
1292static void
1293bm_disable_interrupts(struct bm_softc *sc)
1294{
1295	CSR_WRITE_2(sc, BM_INTR_DISABLE, BM_INTR_NONE);
1296}
1297