if_tsec.c revision 177110
1/*-
2 * Copyright (C) 2006-2008 Semihalf
3 * All rights reserved.
4 *
5 * Written by: Piotr Kruszynski <ppk@semihalf.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver.
32 */
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/dev/tsec/if_tsec.c 177110 2008-03-12 16:32:08Z raj $");
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <sys/endian.h>
39#include <sys/mbuf.h>
40#include <sys/kernel.h>
41#include <sys/module.h>
42#include <sys/socket.h>
43#include <sys/sysctl.h>
44
45#include <net/if.h>
46#include <net/if_dl.h>
47#include <net/if_media.h>
48
49#include <net/bpf.h>
50#include <sys/sockio.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <sys/rman.h>
54#include <machine/resource.h>
55
56#include <net/ethernet.h>
57#include <net/if_arp.h>
58
59#include <net/if_types.h>
60#include <net/if_vlan_var.h>
61
62#include <dev/mii/mii.h>
63#include <dev/mii/miivar.h>
64
65#include <machine/ocpbus.h>
66
67#include <dev/tsec/if_tsec.h>
68#include <dev/tsec/if_tsecreg.h>
69
70#include "miibus_if.h"
71
72#define TSEC_DEBUG
73
74#ifdef TSEC_DEBUG
75#define PDEBUG(a) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");}
76#else
77#define PDEBUG(a) /* nop */
78#endif
79
80static int	tsec_probe(device_t dev);
81static int	tsec_attach(device_t dev);
82static int	tsec_setup_intr(device_t dev, struct resource **ires,
83    void **ihand, int *irid, driver_intr_t handler, const char *iname);
84static void	tsec_release_intr(device_t dev, struct resource *ires,
85    void *ihand, int irid, const char *iname);
86static void	tsec_free_dma(struct tsec_softc *sc);
87static int	tsec_detach(device_t dev);
88static void	tsec_shutdown(device_t dev);
89static int	tsec_suspend(device_t dev); /* XXX */
90static int	tsec_resume(device_t dev); /* XXX */
91
92static void	tsec_init(void *xsc);
93static void	tsec_init_locked(struct tsec_softc *sc);
94static void	tsec_set_mac_address(struct tsec_softc *sc);
95static void	tsec_dma_ctl(struct tsec_softc *sc, int state);
96static void	tsec_intrs_ctl(struct tsec_softc *sc, int state);
97static void	tsec_reset_mac(struct tsec_softc *sc);
98
99static void	tsec_watchdog(struct ifnet *ifp);
100static void	tsec_start(struct ifnet *ifp);
101static void	tsec_start_locked(struct ifnet *ifp);
102static int	tsec_encap(struct tsec_softc *sc,
103    struct mbuf *m_head);
104static void	tsec_setfilter(struct tsec_softc *sc);
105static int	tsec_ioctl(struct ifnet *ifp, u_long command,
106    caddr_t data);
107static int	tsec_ifmedia_upd(struct ifnet *ifp);
108static void	tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
109static int	tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map,
110    struct mbuf **mbufp, uint32_t *paddr);
111static void	tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs,
112    int nseg, int error);
113static int	tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag,
114    bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr,
115    const char *dname);
116static void	tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap,
117    void *vaddr);
118
119static void	tsec_stop(struct tsec_softc *sc);
120
121static void	tsec_receive_intr(void *arg);
122static void	tsec_transmit_intr(void *arg);
123static void	tsec_error_intr(void *arg);
124
125static void	tsec_tick(void *arg);
126static int	tsec_miibus_readreg(device_t dev, int phy, int reg);
127static void	tsec_miibus_writereg(device_t dev, int phy, int reg, int value);
128static void	tsec_miibus_statchg(device_t dev);
129
130static struct tsec_softc *tsec0_sc = NULL; /* XXX ugly hack! */
131
132static device_method_t tsec_methods[] = {
133	/* Device interface */
134	DEVMETHOD(device_probe,		tsec_probe),
135	DEVMETHOD(device_attach,	tsec_attach),
136	DEVMETHOD(device_detach,	tsec_detach),
137	DEVMETHOD(device_shutdown,	tsec_shutdown),
138	DEVMETHOD(device_suspend,	tsec_suspend),
139	DEVMETHOD(device_resume,	tsec_resume),
140
141	/* bus interface */
142	DEVMETHOD(bus_print_child,	bus_generic_print_child),
143	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
144
145	/* MII interface */
146	DEVMETHOD(miibus_readreg,	tsec_miibus_readreg),
147	DEVMETHOD(miibus_writereg,	tsec_miibus_writereg),
148	DEVMETHOD(miibus_statchg,	tsec_miibus_statchg),
149	{ 0, 0 }
150};
151
152static driver_t tsec_driver = {
153	"tsec",
154	tsec_methods,
155	sizeof(struct tsec_softc),
156};
157
158static devclass_t tsec_devclass;
159
160DRIVER_MODULE(tsec, ocpbus, tsec_driver, tsec_devclass, 0, 0);
161DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0);
162MODULE_DEPEND(tsec, ether, 1, 1, 1);
163MODULE_DEPEND(tsec, miibus, 1, 1, 1);
164
165static void
166tsec_get_hwaddr(struct tsec_softc *sc, uint8_t *addr)
167{
168	union {
169		uint32_t reg[2];
170		uint8_t addr[6];
171	} curmac;
172	uint32_t a[6];
173	device_t parent;
174	uintptr_t macaddr;
175	int i;
176
177	parent = device_get_parent(sc->dev);
178	if (BUS_READ_IVAR(parent, sc->dev, OCPBUS_IVAR_MACADDR,
179	    &macaddr) == 0) {
180		bcopy((uint8_t *)macaddr, addr, 6);
181		return;
182	}
183
184	/*
185	 * Fall back -- use the currently programmed address in the hope that
186	 * it was set be firmware...
187	 */
188	curmac.reg[0] = TSEC_READ(sc, TSEC_REG_MACSTNADDR1);
189	curmac.reg[1] = TSEC_READ(sc, TSEC_REG_MACSTNADDR2);
190	for (i = 0; i < 6; i++)
191		a[5-i] = curmac.addr[i];
192
193	addr[0] = a[0];
194	addr[1] = a[1];
195	addr[2] = a[2];
196	addr[3] = a[3];
197	addr[4] = a[4];
198	addr[5] = a[5];
199}
200
201static void
202tsec_init(void *xsc)
203{
204	struct tsec_softc *sc = xsc;
205
206	TSEC_GLOBAL_LOCK(sc);
207	tsec_init_locked(sc);
208	TSEC_GLOBAL_UNLOCK(sc);
209}
210
211static void
212tsec_init_locked(struct tsec_softc *sc)
213{
214	struct tsec_desc *tx_desc = sc->tsec_tx_vaddr;
215	struct tsec_desc *rx_desc = sc->tsec_rx_vaddr;
216	struct ifnet *ifp = sc->tsec_ifp;
217	uint32_t timeout;
218	uint32_t val;
219	uint32_t i;
220
221	TSEC_GLOBAL_LOCK_ASSERT(sc);
222	tsec_stop(sc);
223
224	/*
225	 * These steps are according to the MPC8555E PowerQUICCIII RM:
226	 * 14.7 Initialization/Application Information
227	 */
228
229	/* Step 1: soft reset MAC */
230	tsec_reset_mac(sc);
231
232	/* Step 2: Initialize MACCFG2 */
233	TSEC_WRITE(sc, TSEC_REG_MACCFG2,
234	    TSEC_MACCFG2_FULLDUPLEX |	/* Full Duplex = 1 */
235	    TSEC_MACCFG2_PADCRC |	/* PAD/CRC append */
236	    TSEC_MACCFG2_GMII |		/* I/F Mode bit */
237	    TSEC_MACCFG2_PRECNT		/* Preamble count = 7 */
238	);
239
240	/* Step 3: Initialize ECNTRL
241	 * While the documentation states that R100M is ignored if RPM is
242	 * not set, it does seem to be needed to get the orange boxes to
243	 * work (which have a Marvell 88E1111 PHY). Go figure.
244	 */
245
246	/*
247	 * XXX kludge - use circumstancial evidence to program ECNTRL
248	 * correctly. Ideally we need some board information to guide
249	 * us here.
250	 */
251	i = TSEC_READ(sc, TSEC_REG_ID2);
252	val = (i & 0xffff)
253	    ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM)	/* Sumatra */
254	    : TSEC_ECNTRL_R100M;			/* Orange + CDS */
255	TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val);
256
257	/* Step 4: Initialize MAC station address */
258	tsec_set_mac_address(sc);
259
260	/*
261	 * Step 5: Assign a Physical address to the TBI so as to not conflict
262	 * with the external PHY physical address
263	 */
264	TSEC_WRITE(sc, TSEC_REG_TBIPA, 5);
265
266	/* Step 6: Reset the management interface */
267	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT);
268
269	/* Step 7: Setup the MII Mgmt clock speed */
270	TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28);
271
272	/* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */
273	timeout = TSEC_READ_RETRY;
274	while (--timeout && (TSEC_READ(tsec0_sc, TSEC_REG_MIIMIND) &
275	    TSEC_MIIMIND_BUSY))
276		DELAY(TSEC_READ_DELAY);
277	if (timeout == 0) {
278		if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n");
279		return;
280	}
281
282	/* Step 9: Setup the MII Mgmt */
283	mii_mediachg(sc->tsec_mii);
284
285	/* Step 10: Clear IEVENT register */
286	TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff);
287
288	/* Step 11: Initialize IMASK */
289	tsec_intrs_ctl(sc, 1);
290
291	/* Step 12: Initialize IADDRn */
292	TSEC_WRITE(sc, TSEC_REG_IADDR0, 0);
293	TSEC_WRITE(sc, TSEC_REG_IADDR1, 0);
294	TSEC_WRITE(sc, TSEC_REG_IADDR2, 0);
295	TSEC_WRITE(sc, TSEC_REG_IADDR3, 0);
296	TSEC_WRITE(sc, TSEC_REG_IADDR4, 0);
297	TSEC_WRITE(sc, TSEC_REG_IADDR5, 0);
298	TSEC_WRITE(sc, TSEC_REG_IADDR6, 0);
299	TSEC_WRITE(sc, TSEC_REG_IADDR7, 0);
300
301	/* Step 13: Initialize GADDRn */
302	TSEC_WRITE(sc, TSEC_REG_GADDR0, 0);
303	TSEC_WRITE(sc, TSEC_REG_GADDR1, 0);
304	TSEC_WRITE(sc, TSEC_REG_GADDR2, 0);
305	TSEC_WRITE(sc, TSEC_REG_GADDR3, 0);
306	TSEC_WRITE(sc, TSEC_REG_GADDR4, 0);
307	TSEC_WRITE(sc, TSEC_REG_GADDR5, 0);
308	TSEC_WRITE(sc, TSEC_REG_GADDR6, 0);
309	TSEC_WRITE(sc, TSEC_REG_GADDR7, 0);
310
311	/* Step 14: Initialize RCTRL */
312	TSEC_WRITE(sc, TSEC_REG_RCTRL, 0);
313
314	/* Step 15: Initialize DMACTRL */
315	tsec_dma_ctl(sc, 1);
316
317	/* Step 16: Initialize FIFO_PAUSE_CTRL */
318	TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN);
319
320	/*
321	 * Step 17: Initialize transmit/receive descriptor rings.
322	 * Initialize TBASE and RBASE.
323	 */
324	TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr);
325	TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr);
326
327	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
328		tx_desc[i].bufptr = 0;
329		tx_desc[i].length = 0;
330		tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC-1) ? TSEC_TXBD_W : 0);
331	}
332	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD |
333	    BUS_DMASYNC_PREWRITE);
334
335	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
336		rx_desc[i].bufptr = sc->rx_data[i].paddr;
337		rx_desc[i].length = 0;
338		rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I |
339		    ((i == TSEC_RX_NUM_DESC-1) ? TSEC_RXBD_W : 0);
340	}
341	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD |
342	    BUS_DMASYNC_PREWRITE);
343
344	/* Step 18: Initialize the maximum and minimum receive buffer length */
345	TSEC_WRITE(sc, TSEC_REG_MRBLR, TSEC_DEFAULT_MAX_RX_BUFFER_SIZE);
346	TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_DEFAULT_MIN_RX_BUFFER_SIZE);
347
348	/* Step 19: Enable Rx and RxBD sdata snooping */
349	TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN);
350	TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0);
351
352	/* Step 20: Reset collision counters in hardware */
353	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
354	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
355	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
356	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
357	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
358
359	/* Step 21: Mask all CAM interrupts */
360	TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff);
361	TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff);
362
363	/* Step 22: Enable Rx and Tx */
364	val = TSEC_READ(sc, TSEC_REG_MACCFG1);
365	val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
366	TSEC_WRITE(sc, TSEC_REG_MACCFG1, val);
367
368	/* Step 23: Reset TSEC counters for Tx and Rx rings */
369	TSEC_TX_RX_COUNTERS_INIT(sc);
370
371	/* Step 24: Activate timer for PHY */
372	callout_reset(&sc->tsec_tick_ch, hz, tsec_tick, sc);
373
374	/* Step 25: Activate network interface */
375	ifp->if_drv_flags |= IFF_DRV_RUNNING;
376	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
377	ifp->if_timer = 0;
378	sc->tsec_if_flags = ifp->if_flags;
379}
380
381static void
382tsec_set_mac_address(struct tsec_softc *sc)
383{
384	uint32_t macbuf[2] = { 0, 0 };
385	int i;
386	char *macbufp;
387	char *curmac;
388
389	TSEC_GLOBAL_LOCK_ASSERT(sc);
390
391	KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)),
392	    ("tsec_set_mac_address: (%d <= %d",
393	    ETHER_ADDR_LEN, sizeof(macbuf)));
394
395	macbufp = (char *)macbuf;
396	curmac = (char *)IF_LLADDR(sc->tsec_ifp);
397
398	/* Correct order of MAC address bytes */
399	for (i = 1; i <= ETHER_ADDR_LEN; i++)
400		macbufp[ETHER_ADDR_LEN-i] = curmac[i-1];
401
402	/* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */
403	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]);
404	TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]);
405}
406
407/*
408 * DMA control function, if argument state is:
409 * 0 - DMA engine will be disabled
410 * 1 - DMA engine will be enabled
411 */
412static void
413tsec_dma_ctl(struct tsec_softc *sc, int state)
414{
415	device_t dev;
416	uint32_t dma_flags;
417	uint32_t timeout;
418
419	dev = sc->dev;
420
421	dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL);
422
423	switch (state) {
424	case 0:
425		/* Temporarily clear stop graceful stop bits. */
426		tsec_dma_ctl(sc, 1000);
427
428		/* Set it again */
429		dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
430		break;
431	case 1000:
432	case 1:
433		/* Set write with response (WWR), wait (WOP) and snoop bits */
434		dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN |
435		    DMACTRL_WWR | DMACTRL_WOP);
436
437		/* Clear graceful stop bits */
438		dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS);
439		break;
440	default:
441		device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n",
442		    state);
443	}
444
445	TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags);
446
447	switch (state) {
448	case 0:
449		/* Wait for DMA stop */
450		timeout = TSEC_READ_RETRY;
451		while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) &
452		    (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC))))
453			DELAY(TSEC_READ_DELAY);
454
455		if (timeout == 0)
456			device_printf(dev, "tsec_dma_ctl(): timeout!\n");
457		break;
458	case 1:
459		/* Restart transmission function */
460		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
461	}
462}
463
464/*
465 * Interrupts control function, if argument state is:
466 * 0 - all TSEC interrupts will be masked
467 * 1 - all TSEC interrupts will be unmasked
468 */
469static void
470tsec_intrs_ctl(struct tsec_softc *sc, int state)
471{
472	device_t dev;
473
474	dev = sc->dev;
475
476	switch (state) {
477	case 0:
478		TSEC_WRITE(sc, TSEC_REG_IMASK, 0);
479		break;
480	case 1:
481		TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN |
482		    TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN |
483		    TSEC_IMASK_EBERREN | TSEC_IMASK_BTEN |
484		    TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN |
485		    TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN |
486		    TSEC_IMASK_RXFEN
487		  );
488		break;
489	default:
490		device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n",
491		    state);
492	}
493}
494
495static void
496tsec_reset_mac(struct tsec_softc *sc)
497{
498	uint32_t maccfg1_flags;
499
500	/* Set soft reset bit */
501	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
502	maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET;
503	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
504
505	/* Clear soft reset bit */
506	maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1);
507	maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET;
508	TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags);
509}
510
511static void
512tsec_watchdog(struct ifnet *ifp)
513{
514	struct tsec_softc *sc = ifp->if_softc;
515
516	TSEC_GLOBAL_LOCK(sc);
517
518	ifp->if_oerrors++;
519	if_printf(ifp, "watchdog timeout\n");
520
521	tsec_stop(sc);
522	tsec_init_locked(sc);
523
524	TSEC_GLOBAL_UNLOCK(sc);
525}
526
527static void
528tsec_start(struct ifnet *ifp)
529{
530	struct tsec_softc *sc = ifp->if_softc;
531
532	TSEC_TRANSMIT_LOCK(sc);
533	tsec_start_locked(ifp);
534	TSEC_TRANSMIT_UNLOCK(sc);
535}
536
537
538static void
539tsec_start_locked(struct ifnet *ifp)
540{
541	struct tsec_softc *sc;
542	struct mbuf *m0;
543	struct mbuf *mtmp;
544	unsigned int queued = 0;
545
546	sc = ifp->if_softc;
547
548	TSEC_TRANSMIT_LOCK_ASSERT(sc);
549
550	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
551	    IFF_DRV_RUNNING)
552		return;
553
554	if (sc->tsec_link == 0)
555		return;
556
557	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
558	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
559
560	for (;;) {
561		/* Get packet from the queue */
562		IF_DEQUEUE(&ifp->if_snd, m0);
563		if (m0 == NULL)
564			break;
565
566		mtmp = m_defrag(m0, M_DONTWAIT);
567		if (mtmp)
568			m0 = mtmp;
569
570		if (tsec_encap(sc, m0)) {
571			IF_PREPEND(&ifp->if_snd, m0);
572			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
573			break;
574		}
575		queued++;
576		BPF_MTAP(ifp, m0);
577	}
578	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
579	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
580
581	if (queued) {
582		/* Enable transmitter and watchdog timer */
583		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
584		ifp->if_timer = 5;
585	}
586}
587
588static int
589tsec_encap(struct tsec_softc *sc, struct mbuf *m0)
590{
591	struct tsec_desc *tx_desc = NULL;
592	struct ifnet *ifp;
593	bus_dma_segment_t segs[TSEC_TX_NUM_DESC];
594	bus_dmamap_t *mapp;
595	int error;
596	int seg, nsegs;
597
598	TSEC_TRANSMIT_LOCK_ASSERT(sc);
599
600	ifp = sc->tsec_ifp;
601
602	if (TSEC_FREE_TX_DESC(sc) == 0) {
603		/* No free descriptors */
604		return (-1);
605	}
606
607	/* Fetch unused map */
608	mapp = TSEC_ALLOC_TX_MAP(sc);
609
610	/* Create mapping in DMA memory */
611	error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag,
612	   *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT);
613	if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) {
614		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
615		TSEC_FREE_TX_MAP(sc, mapp);
616		return ((error != 0) ? error : -1);
617	}
618	bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE);
619
620	if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1))
621		if_printf(ifp, "TX buffer has %d segments\n", nsegs);
622
623	/* Everything is ok, now we can send buffers */
624	for (seg = 0; seg < nsegs; seg++) {
625		tx_desc = TSEC_GET_CUR_TX_DESC(sc);
626
627		tx_desc->length = segs[seg].ds_len;
628		tx_desc->bufptr = segs[seg].ds_addr;
629
630		tx_desc->flags =
631		    (tx_desc->flags & TSEC_TXBD_W) | /* wrap */
632		    TSEC_TXBD_I |		/* interrupt */
633		    TSEC_TXBD_R |		/* ready to send */
634		    TSEC_TXBD_TC |		/* transmit the CRC sequence
635						 * after the last data byte */
636		    ((seg == nsegs-1) ? TSEC_TXBD_L : 0);/* last in frame */
637	}
638
639	/* Save mbuf and DMA mapping for release at later stage */
640	TSEC_PUT_TX_MBUF(sc, m0);
641	TSEC_PUT_TX_MAP(sc, mapp);
642
643	return (0);
644}
645
646static void
647tsec_setfilter(struct tsec_softc *sc)
648{
649	struct ifnet *ifp;
650	uint32_t flags;
651
652	ifp = sc->tsec_ifp;
653	flags = TSEC_READ(sc, TSEC_REG_RCTRL);
654
655	/* Promiscuous mode */
656	if (ifp->if_flags & IFF_PROMISC)
657		flags |= TSEC_RCTRL_PROM;
658	else
659		flags &= ~TSEC_RCTRL_PROM;
660
661	TSEC_WRITE(sc, TSEC_REG_RCTRL, flags);
662}
663
664static int
665tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
666{
667	struct tsec_softc *sc = ifp->if_softc;
668	struct ifreq *ifr = (struct ifreq *)data;
669	device_t dev;
670	int error = 0;
671
672	dev = sc->dev;
673
674	switch (command) {
675	case SIOCSIFFLAGS:
676		TSEC_GLOBAL_LOCK(sc);
677		if (ifp->if_flags & IFF_UP) {
678			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
679				if ((sc->tsec_if_flags ^ ifp->if_flags) &
680				    IFF_PROMISC)
681					tsec_setfilter(sc);
682			} else
683				tsec_init_locked(sc);
684		} else {
685			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
686				tsec_stop(sc);
687		}
688		sc->tsec_if_flags = ifp->if_flags;
689		TSEC_GLOBAL_UNLOCK(sc);
690		break;
691	case SIOCGIFMEDIA:
692	case SIOCSIFMEDIA:
693		error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media,
694		    command);
695		break;
696	default:
697		error = ether_ioctl(ifp, command, data);
698	}
699
700	/* Flush buffers if not empty */
701	if (ifp->if_flags & IFF_UP)
702		tsec_start(ifp);
703	return (error);
704}
705
706static int
707tsec_ifmedia_upd(struct ifnet *ifp)
708{
709	struct tsec_softc *sc = ifp->if_softc;
710	struct mii_data *mii;
711
712	TSEC_TRANSMIT_LOCK(sc);
713
714	mii = sc->tsec_mii;
715	mii_mediachg(mii);
716
717	TSEC_TRANSMIT_UNLOCK(sc);
718	return (0);
719}
720
721static void
722tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
723{
724	struct tsec_softc *sc = ifp->if_softc;
725	struct mii_data *mii;
726
727	TSEC_TRANSMIT_LOCK(sc);
728
729	mii = sc->tsec_mii;
730	mii_pollstat(mii);
731
732	ifmr->ifm_active = mii->mii_media_active;
733	ifmr->ifm_status = mii->mii_media_status;
734
735	TSEC_TRANSMIT_UNLOCK(sc);
736}
737
738static int
739tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
740	       uint32_t *paddr)
741{
742	struct mbuf *new_mbuf;
743	bus_dma_segment_t seg[1];
744	int error;
745	int nsegs;
746
747	KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
748
749	new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
750	if (new_mbuf == NULL)
751		return (ENOBUFS);
752	new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
753
754	if (*mbufp) {
755		bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD);
756		bus_dmamap_unload(tag, map);
757	}
758
759	error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs,
760	    BUS_DMA_NOWAIT);
761	KASSERT(nsegs == 1, ("Too many segments returned!"));
762	if (nsegs != 1 || error)
763		panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error);
764
765#if 0
766	if (error) {
767		printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n",
768			error);
769		m_freem(new_mbuf);
770		return (ENOBUFS);
771	}
772#endif
773
774#if 0
775	KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0,
776		("Wrong alignment of RX buffer!"));
777#endif
778	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD);
779
780	(*mbufp) = new_mbuf;
781	(*paddr) = seg->ds_addr;
782	return (0);
783}
784
785static void
786tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
787{
788	u_int32_t *paddr;
789
790	KASSERT(nseg == 1, ("wrong number of segments, should be 1"));
791	paddr = arg;
792	*paddr = segs->ds_addr;
793}
794
795static int
796tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap,
797    bus_size_t dsize, void **vaddr, void *raddr, const char *dname)
798{
799	int error;
800
801	/* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */
802	error = bus_dma_tag_create(NULL,	/* parent */
803	    PAGE_SIZE, 0,			/* alignment, boundary */
804	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
805	    BUS_SPACE_MAXADDR,			/* highaddr */
806	    NULL, NULL,				/* filtfunc, filtfuncarg */
807	    dsize, 1,				/* maxsize, nsegments */
808	    dsize, 0,				/* maxsegsz, flags */
809	    NULL, NULL,				/* lockfunc, lockfuncarg */
810	    dtag);				/* dmat */
811
812	if (error) {
813		device_printf(dev, "failed to allocate busdma %s tag\n", dname);
814		(*vaddr) = NULL;
815		return (ENXIO);
816	}
817
818	error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO,
819	    dmap);
820	if (error) {
821		device_printf(dev, "failed to allocate %s DMA safe memory\n",
822		    dname);
823		bus_dma_tag_destroy(*dtag);
824		(*vaddr) = NULL;
825		return (ENXIO);
826	}
827
828	error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, tsec_map_dma_addr,
829	    raddr, BUS_DMA_NOWAIT);
830	if (error) {
831		device_printf(dev, "cannot get address of the %s descriptors\n",
832		    dname);
833		bus_dmamem_free(*dtag, *vaddr, *dmap);
834		bus_dma_tag_destroy(*dtag);
835		(*vaddr) = NULL;
836		return (ENXIO);
837	}
838
839	return (0);
840}
841
842static void
843tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr)
844{
845
846	if (vaddr == NULL)
847		return;
848
849	/* Unmap descriptors from DMA memory */
850	bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
851	bus_dmamap_unload(dtag, dmap);
852
853	/* Free descriptors memory */
854	bus_dmamem_free(dtag, vaddr, dmap);
855
856	/* Destroy descriptors tag */
857	bus_dma_tag_destroy(dtag);
858}
859
860static int
861tsec_probe(device_t dev)
862{
863	struct tsec_softc *sc;
864	device_t parent;
865	uintptr_t devtype;
866	int error;
867	uint32_t id;
868
869	parent = device_get_parent(dev);
870
871	error = BUS_READ_IVAR(parent, dev, OCPBUS_IVAR_DEVTYPE, &devtype);
872	if (error)
873		return (error);
874	if (devtype != OCPBUS_DEVTYPE_TSEC)
875		return (ENXIO);
876
877	sc = device_get_softc(dev);
878
879	sc->sc_rrid = 0;
880	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
881	    0ul, ~0ul, TSEC_IO_SIZE, RF_ACTIVE);
882	if (sc->sc_rres == NULL)
883		return (ENXIO);
884
885	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
886	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
887
888	/* Check that we actually have a TSEC at this address */
889	id = TSEC_READ(sc, TSEC_REG_ID) | TSEC_READ(sc, TSEC_REG_ID2);
890
891	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
892
893	if (id == 0)
894		return (ENXIO);
895
896	device_set_desc(dev, "Three-Speed Ethernet Controller");
897	return (BUS_PROBE_DEFAULT);
898}
899
900static int
901tsec_attach(device_t dev)
902{
903	uint8_t hwaddr[ETHER_ADDR_LEN];
904	struct tsec_softc *sc;
905	struct ifnet *ifp;
906	bus_dmamap_t *map_ptr;
907	bus_dmamap_t **map_pptr;
908	int error = 0;
909	int i;
910
911	sc = device_get_softc(dev);
912	sc->dev = dev;
913
914	if (device_get_unit(dev) == 0)
915		tsec0_sc = sc; /* XXX */
916
917	callout_init(&sc->tsec_tick_ch, 1);
918	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "TSEC TX lock",
919	    MTX_DEF);
920	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "TSEC RX lock",
921	    MTX_DEF);
922
923	/* Reset all TSEC counters */
924	TSEC_TX_RX_COUNTERS_INIT(sc);
925
926	/* Allocate IO memory for TSEC registers */
927	sc->sc_rrid = 0;
928	sc->sc_rres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_rrid,
929	    0ul, ~0ul, TSEC_IO_SIZE, RF_ACTIVE);
930	if (sc->sc_rres == NULL) {
931		device_printf(dev, "could not allocate IO memory range!\n");
932		tsec_detach(dev);
933		return (ENXIO);
934	}
935	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
936	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
937
938	/* Stop DMA engine if enabled by firmware */
939	tsec_dma_ctl(sc, 0);
940
941	/* Reset MAC */
942	tsec_reset_mac(sc);
943
944	/* Disable interrupts for now */
945	tsec_intrs_ctl(sc, 0);
946
947	/* Allocate a busdma tag and DMA safe memory for TX descriptors. */
948	error = tsec_alloc_dma_desc(dev, &sc->tsec_tx_dtag, &sc->tsec_tx_dmap,
949	    sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC,
950	    (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX");
951	if (error) {
952		tsec_detach(dev);
953		return (ENXIO);
954	}
955
956	/* Allocate a busdma tag and DMA safe memory for RX descriptors. */
957	error = tsec_alloc_dma_desc(dev, &sc->tsec_rx_dtag, &sc->tsec_rx_dmap,
958	    sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC,
959	    (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX");
960	if (error) {
961		tsec_detach(dev);
962		return (ENXIO);
963	}
964
965	/* Allocate a busdma tag for TX mbufs. */
966	error = bus_dma_tag_create(NULL,	/* parent */
967	    TSEC_TXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
968	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
969	    BUS_SPACE_MAXADDR,			/* highaddr */
970	    NULL, NULL,				/* filtfunc, filtfuncarg */
971	    MCLBYTES * (TSEC_TX_NUM_DESC - 1),	/* maxsize */
972	    TSEC_TX_NUM_DESC - 1,		/* nsegments */
973	    MCLBYTES, 0,			/* maxsegsz, flags */
974	    NULL, NULL,				/* lockfunc, lockfuncarg */
975	    &sc->tsec_tx_mtag);			/* dmat */
976	if (error) {
977		device_printf(dev, "failed to allocate busdma tag(tx mbufs)\n");
978		tsec_detach(dev);
979		return (ENXIO);
980	}
981
982	/* Allocate a busdma tag for RX mbufs. */
983	error = bus_dma_tag_create(NULL,	/* parent */
984	    TSEC_RXBUFFER_ALIGNMENT, 0,		/* alignment, boundary */
985	    BUS_SPACE_MAXADDR_32BIT,		/* lowaddr */
986	    BUS_SPACE_MAXADDR,			/* highaddr */
987	    NULL, NULL,				/* filtfunc, filtfuncarg */
988	    MCLBYTES,				/* maxsize */
989	    1,					/* nsegments */
990	    MCLBYTES, 0,				/* maxsegsz, flags */
991	    NULL, NULL,			/* lockfunc, lockfuncarg */
992	    &sc->tsec_rx_mtag);			/* dmat */
993	if (error) {
994		device_printf(dev, "failed to allocate busdma tag(rx mbufs)\n");
995		tsec_detach(dev);
996		return (ENXIO);
997	}
998
999	/* Create TX busdma maps */
1000	map_ptr = sc->tx_map_data;
1001	map_pptr = sc->tx_map_unused_data;
1002
1003	for (i = 0; i < TSEC_TX_NUM_DESC; i++) {
1004		map_pptr[i] = &map_ptr[i];
1005		error = bus_dmamap_create(sc->tsec_tx_mtag, 0,
1006		    map_pptr[i]);
1007		if (error) {
1008			device_printf(dev, "failed to init TX ring\n");
1009			tsec_detach(dev);
1010			return (ENXIO);
1011		}
1012	}
1013
1014	/* Create RX busdma maps and zero mbuf handlers */
1015	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1016		error = bus_dmamap_create(sc->tsec_rx_mtag, 0,
1017		    &sc->rx_data[i].map);
1018		if (error) {
1019			device_printf(dev, "failed to init RX ring\n");
1020			tsec_detach(dev);
1021			return (ENXIO);
1022		}
1023		sc->rx_data[i].mbuf = NULL;
1024	}
1025
1026	/* Create mbufs for RX buffers */
1027	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1028		error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map,
1029		    &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr);
1030		if (error) {
1031			device_printf(dev, "can't load rx DMA map %d, error = "
1032			    "%d\n", i, error);
1033			tsec_detach(dev);
1034			return (error);
1035		}
1036	}
1037
1038	/* Create network interface for upper layers */
1039	ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
1040	if (ifp == NULL) {
1041		device_printf(dev, "if_alloc() failed\n");
1042		tsec_detach(dev);
1043		return (ENOMEM);
1044	}
1045
1046	ifp->if_softc = sc;
1047	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1048	ifp->if_mtu = ETHERMTU;
1049	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST;
1050	ifp->if_init = tsec_init;
1051	ifp->if_start = tsec_start;
1052	ifp->if_watchdog = tsec_watchdog;
1053	ifp->if_ioctl = tsec_ioctl;
1054
1055	IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1);
1056	ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1;
1057	IFQ_SET_READY(&ifp->if_snd);
1058
1059	/* XXX No special features of TSEC are supported currently */
1060	ifp->if_capabilities = 0;
1061	ifp->if_capenable = ifp->if_capabilities;
1062
1063	/* Probe PHY(s) */
1064	error = mii_phy_probe(dev, &sc->tsec_miibus, tsec_ifmedia_upd,
1065	    tsec_ifmedia_sts);
1066	if (error) {
1067		device_printf(dev, "MII failed to find PHY!\n");
1068		if_free(ifp);
1069		sc->tsec_ifp = NULL;
1070		tsec_detach(dev);
1071		return (error);
1072	}
1073	sc->tsec_mii = device_get_softc(sc->tsec_miibus);
1074
1075	tsec_get_hwaddr(sc, hwaddr);
1076	ether_ifattach(ifp, hwaddr);
1077
1078	/* Interrupts configuration (TX/RX/ERR) */
1079	sc->sc_transmit_irid = OCP_TSEC_RID_TXIRQ;
1080	error = tsec_setup_intr(dev, &sc->sc_transmit_ires,
1081	    &sc->sc_transmit_ihand, &sc->sc_transmit_irid,
1082	    tsec_transmit_intr, "TX");
1083	if (error) {
1084		tsec_detach(dev);
1085		return (error);
1086	}
1087
1088	sc->sc_receive_irid = OCP_TSEC_RID_RXIRQ;
1089	error = tsec_setup_intr(dev, &sc->sc_receive_ires,
1090	    &sc->sc_receive_ihand, &sc->sc_receive_irid,
1091	    tsec_receive_intr, "RX");
1092	if (error) {
1093		tsec_detach(dev);
1094		return (error);
1095	}
1096
1097	sc->sc_error_irid = OCP_TSEC_RID_ERRIRQ;
1098	error = tsec_setup_intr(dev, &sc->sc_error_ires,
1099	    &sc->sc_error_ihand, &sc->sc_error_irid,
1100	    tsec_error_intr, "ERR");
1101	if (error) {
1102		tsec_detach(dev);
1103		return (error);
1104	}
1105
1106	return (0);
1107}
1108
1109static int
1110tsec_setup_intr(device_t dev, struct resource **ires, void **ihand, int *irid,
1111    driver_intr_t handler, const char *iname)
1112{
1113	struct tsec_softc *sc;
1114	int error;
1115
1116	sc = device_get_softc(dev);
1117
1118	(*ires) = bus_alloc_resource_any(dev, SYS_RES_IRQ, irid, RF_ACTIVE);
1119	if ((*ires) == NULL) {
1120		device_printf(dev, "could not allocate %s IRQ\n", iname);
1121		return (ENXIO);
1122	}
1123	error = bus_setup_intr(dev, *ires, INTR_TYPE_NET | INTR_MPSAFE,
1124	    NULL, handler, sc, ihand);
1125	if (error) {
1126		device_printf(dev, "failed to set up %s IRQ\n", iname);
1127		if (bus_release_resource(dev, SYS_RES_IRQ, *irid, *ires))
1128			device_printf(dev, "could not release %s IRQ\n", iname);
1129		(*ires) = NULL;
1130		return (error);
1131	}
1132	return (0);
1133}
1134
1135static void
1136tsec_release_intr(device_t dev, struct resource *ires, void *ihand, int irid,
1137    const char *iname)
1138{
1139	int error;
1140
1141	if (ires == NULL)
1142		return;
1143
1144	error = bus_teardown_intr(dev, ires, ihand);
1145	if (error)
1146		device_printf(dev, "bus_teardown_intr() failed for %s intr"
1147		    ", error %d\n", iname, error);
1148
1149	error = bus_release_resource(dev, SYS_RES_IRQ, irid, ires);
1150	if (error)
1151		device_printf(dev, "bus_release_resource() failed for %s intr"
1152		    ", error %d\n", iname, error);
1153}
1154
1155static void
1156tsec_free_dma(struct tsec_softc *sc)
1157{
1158	int i;
1159
1160	/* Free TX maps */
1161	for (i = 0; i < TSEC_TX_NUM_DESC; i++)
1162		if (sc->tx_map_data[i] != NULL)
1163			bus_dmamap_destroy(sc->tsec_tx_mtag,
1164			    sc->tx_map_data[i]);
1165	/* Destroy tag for Tx mbufs */
1166	bus_dma_tag_destroy(sc->tsec_tx_mtag);
1167
1168	/* Free RX mbufs and maps */
1169	for (i = 0; i < TSEC_RX_NUM_DESC; i++) {
1170		if (sc->rx_data[i].mbuf) {
1171			/* Unload buffer from DMA */
1172			bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map,
1173			    BUS_DMASYNC_POSTREAD);
1174			bus_dmamap_unload(sc->tsec_rx_mtag, sc->rx_data[i].map);
1175
1176			/* Free buffer */
1177			m_freem(sc->rx_data[i].mbuf);
1178		}
1179		/* Destroy map for this buffer */
1180		if (sc->rx_data[i].map != NULL)
1181			bus_dmamap_destroy(sc->tsec_rx_mtag,
1182			    sc->rx_data[i].map);
1183	}
1184	/* Destroy tag for Rx mbufs */
1185	bus_dma_tag_destroy(sc->tsec_rx_mtag);
1186
1187	/* Unload TX/RX descriptors */
1188	tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1189	    sc->tsec_tx_vaddr);
1190	tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1191	    sc->tsec_rx_vaddr);
1192}
1193
1194static int
1195tsec_detach(device_t dev)
1196{
1197	struct tsec_softc *sc;
1198	int error;
1199
1200	sc = device_get_softc(dev);
1201
1202	/* Stop TSEC controller and free TX queue */
1203	if (sc->sc_rres && sc->tsec_ifp)
1204		tsec_shutdown(dev);
1205
1206	/* Wait for stopping TSEC ticks */
1207	callout_drain(&sc->tsec_tick_ch);
1208
1209	/* Stop and release all interrupts */
1210	tsec_release_intr(dev, sc->sc_transmit_ires, sc->sc_transmit_ihand,
1211	    sc->sc_transmit_irid, "TX");
1212	tsec_release_intr(dev, sc->sc_receive_ires, sc->sc_receive_ihand,
1213	    sc->sc_receive_irid, "RX");
1214	tsec_release_intr(dev, sc->sc_error_ires, sc->sc_error_ihand,
1215	    sc->sc_error_irid, "ERR");
1216
1217	/* Detach network interface */
1218	if (sc->tsec_ifp) {
1219		ether_ifdetach(sc->tsec_ifp);
1220		if_free(sc->tsec_ifp);
1221		sc->tsec_ifp = NULL;
1222	}
1223
1224	/* Free DMA resources */
1225	tsec_free_dma(sc);
1226
1227	/* Free IO memory handler */
1228	if (sc->sc_rres) {
1229		error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
1230		    sc->sc_rres);
1231		if (error)
1232			device_printf(dev, "bus_release_resource() failed for"
1233			    " IO memory, error %d\n", error);
1234	}
1235
1236	/* Destroy locks */
1237	mtx_destroy(&sc->receive_lock);
1238	mtx_destroy(&sc->transmit_lock);
1239	return (0);
1240}
1241
1242static void
1243tsec_shutdown(device_t dev)
1244{
1245	struct tsec_softc *sc;
1246
1247	sc = device_get_softc(dev);
1248
1249	TSEC_GLOBAL_LOCK(sc);
1250	tsec_stop(sc);
1251	TSEC_GLOBAL_UNLOCK(sc);
1252}
1253
1254static int
1255tsec_suspend(device_t dev)
1256{
1257
1258	/* TODO not implemented! */
1259	return (ENODEV);
1260}
1261
1262static int
1263tsec_resume(device_t dev)
1264{
1265
1266	/* TODO not implemented! */
1267	return (ENODEV);
1268}
1269
1270static void
1271tsec_stop(struct tsec_softc *sc)
1272{
1273	struct ifnet *ifp;
1274	struct mbuf *m0;
1275	bus_dmamap_t *mapp;
1276	uint32_t tmpval;
1277
1278	TSEC_GLOBAL_LOCK_ASSERT(sc);
1279
1280	ifp = sc->tsec_ifp;
1281
1282	/* Stop PHY tick engine */
1283	callout_stop(&sc->tsec_tick_ch);
1284
1285	/* Disable interface and watchdog timer */
1286	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1287	ifp->if_timer = 0;
1288
1289	/* Disable all interrupts and stop DMA */
1290	tsec_intrs_ctl(sc, 0);
1291	tsec_dma_ctl(sc, 0);
1292
1293	/* Remove pending data from TX queue */
1294	while (!TSEC_EMPTYQ_TX_MBUF(sc)) {
1295		m0 = TSEC_GET_TX_MBUF(sc);
1296		mapp = TSEC_GET_TX_MAP(sc);
1297
1298		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1299		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1300
1301		TSEC_FREE_TX_MAP(sc, mapp);
1302		m_freem(m0);
1303	}
1304
1305	/* Disable Rx and Tx */
1306	tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1);
1307	tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN);
1308	TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval);
1309	DELAY(10);
1310}
1311
1312static void
1313tsec_receive_intr(void *arg)
1314{
1315	struct mbuf *rcv_mbufs[TSEC_RX_NUM_DESC];
1316	struct tsec_softc *sc = arg;
1317	struct tsec_desc *rx_desc;
1318	struct ifnet *ifp;
1319	struct rx_data_type *rx_data;
1320	struct mbuf *m;
1321	device_t dev;
1322	uint32_t i;
1323	int count;
1324	int c1 = 0;
1325	int c2;
1326	uint16_t flags;
1327	uint16_t length;
1328
1329	ifp = sc->tsec_ifp;
1330	rx_data = sc->rx_data;
1331	dev = sc->dev;
1332
1333	/* Confirm the interrupt was received by driver */
1334	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1335
1336	TSEC_RECEIVE_LOCK(sc);
1337
1338	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_POSTREAD |
1339	    BUS_DMASYNC_POSTWRITE);
1340
1341	for (count = 0; /* count < TSEC_RX_NUM_DESC */; count++) {
1342		rx_desc = TSEC_GET_CUR_RX_DESC(sc);
1343		flags = rx_desc->flags;
1344
1345		/* Check if there is anything to receive */
1346		if ((flags & TSEC_RXBD_E) || (count >= TSEC_RX_NUM_DESC)) {
1347			/*
1348			 * Avoid generating another interrupt
1349			 */
1350			if (flags & TSEC_RXBD_E)
1351				TSEC_WRITE(sc, TSEC_REG_IEVENT,
1352				    TSEC_IEVENT_RXB | TSEC_IEVENT_RXF);
1353			/*
1354			 * We didn't consume current descriptor and have to
1355			 * return it to the queue
1356			 */
1357			TSEC_BACK_CUR_RX_DESC(sc);
1358			break;
1359		}
1360
1361		if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO |
1362		    TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) {
1363			rx_desc->length = 0;
1364			rx_desc->flags = (rx_desc->flags &
1365			    ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I;
1366			continue;
1367		}
1368
1369		if ((flags & TSEC_RXBD_L) == 0)
1370			device_printf(dev, "buf is not the last in frame!\n");
1371
1372		/* Ok... process frame */
1373		length = rx_desc->length - ETHER_CRC_LEN;
1374		i = TSEC_GET_CUR_RX_DESC_CNT(sc);
1375
1376		m = rx_data[i].mbuf;
1377
1378		if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map,
1379		    &rx_data[i].mbuf, &rx_data[i].paddr)) {
1380			ifp->if_ierrors++;
1381			continue;
1382		}
1383		/* Attach new buffer to descriptor, and clear flags */
1384		rx_desc->bufptr = rx_data[i].paddr;
1385		rx_desc->length = 0;
1386		rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) |
1387		    TSEC_RXBD_E | TSEC_RXBD_I;
1388
1389		/* Prepare buffer for upper layers */
1390		m->m_pkthdr.rcvif = ifp;
1391		m->m_pkthdr.len = m->m_len = length;
1392
1393		/* Save it for push */
1394		rcv_mbufs[c1++] = m;
1395	}
1396
1397	bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap,
1398	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1399
1400	TSEC_RECEIVE_UNLOCK(sc);
1401
1402	/* Push it now */
1403	for (c2 = 0; c2 < c1; c2++)
1404		(*ifp->if_input)(ifp, rcv_mbufs[c2]);
1405}
1406
1407static void
1408tsec_transmit_intr(void *arg)
1409{
1410	struct tsec_softc *sc = arg;
1411	struct tsec_desc *tx_desc;
1412	struct ifnet *ifp;
1413	struct mbuf *m0;
1414	bus_dmamap_t *mapp;
1415	int send = 0;
1416
1417	ifp = sc->tsec_ifp;
1418
1419	/* Confirm the interrupt was received by driver */
1420	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF);
1421
1422	TSEC_TRANSMIT_LOCK(sc);
1423
1424	/* Update collision statistics */
1425	ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL);
1426
1427	/* Reset collision counters in hardware */
1428	TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0);
1429	TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0);
1430	TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0);
1431	TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0);
1432	TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0);
1433
1434	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD |
1435	    BUS_DMASYNC_POSTWRITE);
1436
1437	while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) {
1438		tx_desc = TSEC_GET_DIRTY_TX_DESC(sc);
1439		if (tx_desc->flags & TSEC_TXBD_R) {
1440			TSEC_BACK_DIRTY_TX_DESC(sc);
1441			break;
1442		}
1443
1444		if ((tx_desc->flags & TSEC_TXBD_L) == 0)
1445			continue;
1446
1447		/*
1448		 * This is the last buf in this packet, so unmap and free it.
1449		 */
1450		m0 = TSEC_GET_TX_MBUF(sc);
1451		mapp = TSEC_GET_TX_MAP(sc);
1452
1453		bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE);
1454		bus_dmamap_unload(sc->tsec_tx_mtag, *mapp);
1455
1456		TSEC_FREE_TX_MAP(sc, mapp);
1457		m_freem(m0);
1458
1459		ifp->if_opackets++;
1460		send = 1;
1461	}
1462	bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap,
1463	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1464
1465	if (send) {
1466		/* Now send anything that was pending */
1467		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1468		tsec_start_locked(ifp);
1469
1470		/* Stop wathdog if all sent */
1471		if (TSEC_EMPTYQ_TX_MBUF(sc))
1472			ifp->if_timer = 0;
1473	}
1474	TSEC_TRANSMIT_UNLOCK(sc);
1475}
1476
1477static void
1478tsec_error_intr(void *arg)
1479{
1480	struct tsec_softc *sc = arg;
1481	struct ifnet *ifp;
1482	uint32_t eflags;
1483
1484	ifp = sc->tsec_ifp;
1485
1486	eflags = TSEC_READ(sc, TSEC_REG_IEVENT);
1487
1488	if (ifp->if_flags & IFF_DEBUG)
1489		if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", eflags);
1490
1491	/* Clear events bits in hardware */
1492	TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY |
1493	    TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT |
1494	    TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC |
1495	    TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN);
1496
1497	if (eflags & TSEC_IEVENT_EBERR)
1498		if_printf(ifp, "System bus error occurred during"
1499		    " a DMA transaction (flags: 0x%x)\n", eflags);
1500
1501	/* Check transmitter errors */
1502	if (eflags & TSEC_IEVENT_TXE) {
1503		ifp->if_oerrors++;
1504
1505		if (eflags & TSEC_IEVENT_LC)
1506			ifp->if_collisions++;
1507
1508		TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT);
1509	}
1510	if (eflags & TSEC_IEVENT_BABT)
1511		ifp->if_oerrors++;
1512
1513	/* Check receiver errors */
1514	if (eflags & TSEC_IEVENT_BSY) {
1515		ifp->if_ierrors++;
1516		ifp->if_iqdrops++;
1517
1518		/* Get data from RX buffers */
1519		tsec_receive_intr(arg);
1520
1521		/* Make receiver again active */
1522		TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT);
1523	}
1524	if (eflags & TSEC_IEVENT_BABR)
1525		ifp->if_ierrors++;
1526}
1527
1528static void
1529tsec_tick(void *arg)
1530{
1531	struct tsec_softc *sc = arg;
1532	struct ifnet *ifp;
1533	int link;
1534
1535	TSEC_TRANSMIT_LOCK(sc);
1536
1537	ifp = sc->tsec_ifp;
1538	link = sc->tsec_link;
1539
1540	mii_tick(sc->tsec_mii);
1541
1542	if (link == 0 && sc->tsec_link == 1 && (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)))
1543		tsec_start_locked(ifp);
1544
1545	callout_reset(&sc->tsec_tick_ch, hz, tsec_tick, sc);
1546	TSEC_TRANSMIT_UNLOCK(sc);
1547}
1548
1549static int
1550tsec_miibus_readreg(device_t dev, int phy, int reg)
1551{
1552	struct tsec_softc *sc;
1553	uint32_t timeout;
1554
1555	sc = device_get_softc(dev);
1556
1557	if (device_get_unit(dev) != phy)
1558		return (0);
1559
1560	sc = tsec0_sc;
1561
1562	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1563	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, 0);
1564	TSEC_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE);
1565
1566	timeout = TSEC_READ_RETRY;
1567	while (--timeout && TSEC_READ(sc, TSEC_REG_MIIMIND) &
1568	    (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY))
1569		DELAY(TSEC_READ_DELAY);
1570
1571	if (timeout == 0)
1572		device_printf(dev, "Timeout while reading from PHY!\n");
1573
1574	return (TSEC_READ(sc, TSEC_REG_MIIMSTAT));
1575}
1576
1577static void
1578tsec_miibus_writereg(device_t dev, int phy, int reg, int value)
1579{
1580	struct tsec_softc *sc;
1581	uint32_t timeout;
1582
1583	sc = device_get_softc(dev);
1584
1585	if (device_get_unit(dev) != phy)
1586		device_printf(dev, "Trying to write to an alien PHY(%d)\n", phy);
1587
1588	sc = tsec0_sc;
1589
1590	TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg);
1591	TSEC_WRITE(sc, TSEC_REG_MIIMCON, value);
1592
1593	timeout = TSEC_READ_RETRY;
1594	while (--timeout && (TSEC_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY))
1595		DELAY(TSEC_READ_DELAY);
1596
1597	if (timeout == 0)
1598		device_printf(dev, "Timeout while writing to PHY!\n");
1599}
1600
1601static void
1602tsec_miibus_statchg(device_t dev)
1603{
1604	struct tsec_softc *sc;
1605	struct mii_data *mii;
1606	uint32_t ecntrl, id, tmp;
1607	int link;
1608
1609	sc = device_get_softc(dev);
1610	mii = sc->tsec_mii;
1611	link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0);
1612
1613	tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF;
1614
1615	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
1616		tmp |= TSEC_MACCFG2_FULLDUPLEX;
1617	else
1618		tmp &= ~TSEC_MACCFG2_FULLDUPLEX;
1619
1620	switch (IFM_SUBTYPE(mii->mii_media_active)) {
1621	case IFM_1000_T:
1622	case IFM_1000_SX:
1623		tmp |= TSEC_MACCFG2_GMII;
1624		sc->tsec_link = link;
1625		break;
1626	case IFM_100_TX:
1627	case IFM_10_T:
1628		tmp |= TSEC_MACCFG2_MII;
1629		sc->tsec_link = link;
1630		break;
1631	case IFM_NONE:
1632		if (link)
1633			device_printf(dev, "No speed selected but link active!\n");
1634		sc->tsec_link = 0;
1635		return;
1636	default:
1637		sc->tsec_link = 0;
1638		device_printf(dev, "Unknown speed (%d), link %s!\n",
1639		    IFM_SUBTYPE(mii->mii_media_active),
1640		    ((link) ? "up" : "down"));
1641		return;
1642	}
1643	TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp);
1644
1645	/* XXX kludge - use circumstantial evidence for reduced mode. */
1646	id = TSEC_READ(sc, TSEC_REG_ID2);
1647	if (id & 0xffff) {
1648		ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M;
1649		ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0;
1650		TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl);
1651	}
1652}
1653