1/*-
2 * Copyright (C) 2007
3 *	Oleksandr Tymoshenko <gonzo@freebsd.org>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
18 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
22 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
23 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $Id: $
27 *
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33/*
34 * RC32434 Ethernet interface driver
35 */
36#include <sys/param.h>
37#include <sys/endian.h>
38#include <sys/systm.h>
39#include <sys/sockio.h>
40#include <sys/mbuf.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/lock.h>
44#include <sys/module.h>
45#include <sys/mutex.h>
46#include <sys/socket.h>
47#include <sys/taskqueue.h>
48
49#include <net/if.h>
50#include <net/if_arp.h>
51#include <net/ethernet.h>
52#include <net/if_dl.h>
53#include <net/if_media.h>
54#include <net/if_types.h>
55#include <net/if_var.h>
56
57#include <net/bpf.h>
58
59#include <machine/bus.h>
60#include <machine/resource.h>
61#include <sys/bus.h>
62#include <sys/rman.h>
63
64#include <dev/mii/mii.h>
65#include <dev/mii/miivar.h>
66
67#include <dev/pci/pcireg.h>
68#include <dev/pci/pcivar.h>
69
70MODULE_DEPEND(kr, ether, 1, 1, 1);
71MODULE_DEPEND(kr, miibus, 1, 1, 1);
72
73#include "miibus_if.h"
74
75#include <mips/idt/if_krreg.h>
76
77#define KR_DEBUG
78
79static int kr_attach(device_t);
80static int kr_detach(device_t);
81static int kr_ifmedia_upd(struct ifnet *);
82static void kr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
83static int kr_ioctl(struct ifnet *, u_long, caddr_t);
84static void kr_init(void *);
85static void kr_init_locked(struct kr_softc *);
86static void kr_link_task(void *, int);
87static int kr_miibus_readreg(device_t, int, int);
88static void kr_miibus_statchg(device_t);
89static int kr_miibus_writereg(device_t, int, int, int);
90static int kr_probe(device_t);
91static void kr_reset(struct kr_softc *);
92static int kr_resume(device_t);
93static int kr_rx_ring_init(struct kr_softc *);
94static int kr_tx_ring_init(struct kr_softc *);
95static int kr_shutdown(device_t);
96static void kr_start(struct ifnet *);
97static void kr_start_locked(struct ifnet *);
98static void kr_stop(struct kr_softc *);
99static int kr_suspend(device_t);
100
101static void kr_rx(struct kr_softc *);
102static void kr_tx(struct kr_softc *);
103static void kr_rx_intr(void *);
104static void kr_tx_intr(void *);
105static void kr_rx_und_intr(void *);
106static void kr_tx_ovr_intr(void *);
107static void kr_tick(void *);
108
109static void kr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
110static int kr_dma_alloc(struct kr_softc *);
111static void kr_dma_free(struct kr_softc *);
112static int kr_newbuf(struct kr_softc *, int);
113static __inline void kr_fixup_rx(struct mbuf *);
114
115static device_method_t kr_methods[] = {
116	/* Device interface */
117	DEVMETHOD(device_probe,		kr_probe),
118	DEVMETHOD(device_attach,	kr_attach),
119	DEVMETHOD(device_detach,	kr_detach),
120	DEVMETHOD(device_suspend,	kr_suspend),
121	DEVMETHOD(device_resume,	kr_resume),
122	DEVMETHOD(device_shutdown,	kr_shutdown),
123
124	/* MII interface */
125	DEVMETHOD(miibus_readreg,	kr_miibus_readreg),
126	DEVMETHOD(miibus_writereg,	kr_miibus_writereg),
127	DEVMETHOD(miibus_statchg,	kr_miibus_statchg),
128
129	DEVMETHOD_END
130};
131
132static driver_t kr_driver = {
133	"kr",
134	kr_methods,
135	sizeof(struct kr_softc)
136};
137
138static devclass_t kr_devclass;
139
140DRIVER_MODULE(kr, obio, kr_driver, kr_devclass, 0, 0);
141DRIVER_MODULE(miibus, kr, miibus_driver, miibus_devclass, 0, 0);
142
143static int
144kr_probe(device_t dev)
145{
146
147	device_set_desc(dev, "RC32434 Ethernet interface");
148	return (0);
149}
150
151static int
152kr_attach(device_t dev)
153{
154	uint8_t			eaddr[ETHER_ADDR_LEN];
155	struct ifnet		*ifp;
156	struct kr_softc		*sc;
157	int			error = 0, rid;
158	int			unit;
159
160	sc = device_get_softc(dev);
161	unit = device_get_unit(dev);
162	sc->kr_dev = dev;
163
164	mtx_init(&sc->kr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
165	    MTX_DEF);
166	callout_init_mtx(&sc->kr_stat_callout, &sc->kr_mtx, 0);
167	TASK_INIT(&sc->kr_link_task, 0, kr_link_task, sc);
168	pci_enable_busmaster(dev);
169
170	/* Map control/status registers. */
171	sc->kr_rid = 0;
172	sc->kr_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->kr_rid,
173	    RF_ACTIVE);
174
175	if (sc->kr_res == NULL) {
176		device_printf(dev, "couldn't map memory\n");
177		error = ENXIO;
178		goto fail;
179	}
180
181	sc->kr_btag = rman_get_bustag(sc->kr_res);
182	sc->kr_bhandle = rman_get_bushandle(sc->kr_res);
183
184	/* Allocate interrupts */
185	rid = 0;
186	sc->kr_rx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_RX_IRQ,
187	    KR_RX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
188
189	if (sc->kr_rx_irq == NULL) {
190		device_printf(dev, "couldn't map rx interrupt\n");
191		error = ENXIO;
192		goto fail;
193	}
194
195	rid = 0;
196	sc->kr_tx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_TX_IRQ,
197	    KR_TX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
198
199	if (sc->kr_tx_irq == NULL) {
200		device_printf(dev, "couldn't map tx interrupt\n");
201		error = ENXIO;
202		goto fail;
203	}
204
205	rid = 0;
206	sc->kr_rx_und_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
207	    KR_RX_UND_IRQ, KR_RX_UND_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
208
209	if (sc->kr_rx_und_irq == NULL) {
210		device_printf(dev, "couldn't map rx underrun interrupt\n");
211		error = ENXIO;
212		goto fail;
213	}
214
215	rid = 0;
216	sc->kr_tx_ovr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
217	    KR_TX_OVR_IRQ, KR_TX_OVR_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
218
219	if (sc->kr_tx_ovr_irq == NULL) {
220		device_printf(dev, "couldn't map tx overrun interrupt\n");
221		error = ENXIO;
222		goto fail;
223	}
224
225	/* Allocate ifnet structure. */
226	ifp = sc->kr_ifp = if_alloc(IFT_ETHER);
227
228	if (ifp == NULL) {
229		device_printf(dev, "couldn't allocate ifnet structure\n");
230		error = ENOSPC;
231		goto fail;
232	}
233	ifp->if_softc = sc;
234	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
235	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
236	ifp->if_ioctl = kr_ioctl;
237	ifp->if_start = kr_start;
238	ifp->if_init = kr_init;
239
240	/* XXX: add real size */
241	IFQ_SET_MAXLEN(&ifp->if_snd, 9);
242	ifp->if_snd.ifq_maxlen = 9;
243	IFQ_SET_READY(&ifp->if_snd);
244
245	ifp->if_capenable = ifp->if_capabilities;
246
247	eaddr[0] = 0x00;
248	eaddr[1] = 0x0C;
249	eaddr[2] = 0x42;
250	eaddr[3] = 0x09;
251	eaddr[4] = 0x5E;
252	eaddr[5] = 0x6B;
253
254	if (kr_dma_alloc(sc) != 0) {
255		error = ENXIO;
256		goto fail;
257	}
258
259	/* TODO: calculate prescale */
260	CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);
261
262	CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
263	DELAY(1000);
264	CSR_WRITE_4(sc, KR_MIIMCFG, 0);
265
266	/* Do MII setup. */
267	error = mii_attach(dev, &sc->kr_miibus, ifp, kr_ifmedia_upd,
268	    kr_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
269	if (error != 0) {
270		device_printf(dev, "attaching PHYs failed\n");
271		goto fail;
272	}
273
274	/* Call MI attach routine. */
275	ether_ifattach(ifp, eaddr);
276
277	/* Hook interrupt last to avoid having to lock softc */
278	error = bus_setup_intr(dev, sc->kr_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
279	    NULL, kr_rx_intr, sc, &sc->kr_rx_intrhand);
280
281	if (error) {
282		device_printf(dev, "couldn't set up rx irq\n");
283		ether_ifdetach(ifp);
284		goto fail;
285	}
286
287	error = bus_setup_intr(dev, sc->kr_tx_irq, INTR_TYPE_NET | INTR_MPSAFE,
288	    NULL, kr_tx_intr, sc, &sc->kr_tx_intrhand);
289
290	if (error) {
291		device_printf(dev, "couldn't set up tx irq\n");
292		ether_ifdetach(ifp);
293		goto fail;
294	}
295
296	error = bus_setup_intr(dev, sc->kr_rx_und_irq,
297	    INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_rx_und_intr, sc,
298	    &sc->kr_rx_und_intrhand);
299
300	if (error) {
301		device_printf(dev, "couldn't set up rx underrun irq\n");
302		ether_ifdetach(ifp);
303		goto fail;
304	}
305
306	error = bus_setup_intr(dev, sc->kr_tx_ovr_irq,
307	    INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_tx_ovr_intr, sc,
308	    &sc->kr_tx_ovr_intrhand);
309
310	if (error) {
311		device_printf(dev, "couldn't set up tx overrun irq\n");
312		ether_ifdetach(ifp);
313		goto fail;
314	}
315
316fail:
317	if (error)
318		kr_detach(dev);
319
320	return (error);
321}
322
323static int
324kr_detach(device_t dev)
325{
326	struct kr_softc		*sc = device_get_softc(dev);
327	struct ifnet		*ifp = sc->kr_ifp;
328
329	KASSERT(mtx_initialized(&sc->kr_mtx), ("vr mutex not initialized"));
330
331	/* These should only be active if attach succeeded */
332	if (device_is_attached(dev)) {
333		KR_LOCK(sc);
334		sc->kr_detach = 1;
335		kr_stop(sc);
336		KR_UNLOCK(sc);
337		taskqueue_drain(taskqueue_swi, &sc->kr_link_task);
338		ether_ifdetach(ifp);
339	}
340	if (sc->kr_miibus)
341		device_delete_child(dev, sc->kr_miibus);
342	bus_generic_detach(dev);
343
344	if (sc->kr_rx_intrhand)
345		bus_teardown_intr(dev, sc->kr_rx_irq, sc->kr_rx_intrhand);
346	if (sc->kr_rx_irq)
347		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_irq);
348	if (sc->kr_tx_intrhand)
349		bus_teardown_intr(dev, sc->kr_tx_irq, sc->kr_tx_intrhand);
350	if (sc->kr_tx_irq)
351		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_irq);
352	if (sc->kr_rx_und_intrhand)
353		bus_teardown_intr(dev, sc->kr_rx_und_irq,
354		    sc->kr_rx_und_intrhand);
355	if (sc->kr_rx_und_irq)
356		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_und_irq);
357	if (sc->kr_tx_ovr_intrhand)
358		bus_teardown_intr(dev, sc->kr_tx_ovr_irq,
359		    sc->kr_tx_ovr_intrhand);
360	if (sc->kr_tx_ovr_irq)
361		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_ovr_irq);
362
363	if (sc->kr_res)
364		bus_release_resource(dev, SYS_RES_MEMORY, sc->kr_rid,
365		    sc->kr_res);
366
367	if (ifp)
368		if_free(ifp);
369
370	kr_dma_free(sc);
371
372	mtx_destroy(&sc->kr_mtx);
373
374	return (0);
375
376}
377
378static int
379kr_suspend(device_t dev)
380{
381
382	panic("%s", __func__);
383	return 0;
384}
385
386static int
387kr_resume(device_t dev)
388{
389
390	panic("%s", __func__);
391	return 0;
392}
393
394static int
395kr_shutdown(device_t dev)
396{
397	struct kr_softc	*sc;
398
399	sc = device_get_softc(dev);
400
401	KR_LOCK(sc);
402	kr_stop(sc);
403	KR_UNLOCK(sc);
404
405	return (0);
406}
407
408static int
409kr_miibus_readreg(device_t dev, int phy, int reg)
410{
411	struct kr_softc * sc = device_get_softc(dev);
412	int i, result;
413
414	i = KR_MII_TIMEOUT;
415	while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
416		i--;
417
418	if (i == 0)
419		device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
420
421	CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg);
422
423	i = KR_MII_TIMEOUT;
424	while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
425		i--;
426
427	if (i == 0)
428		device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
429
430	CSR_WRITE_4(sc, KR_MIIMCMD, KR_MIIMCMD_RD);
431
432	i = KR_MII_TIMEOUT;
433	while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
434		i--;
435
436	if (i == 0)
437		device_printf(dev, "phy mii read is timed out %d:%d\n", phy,
438		    reg);
439
440	if (CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_NV)
441		printf("phy mii readreg failed %d:%d: data not valid\n",
442		    phy, reg);
443
444	result = CSR_READ_4(sc , KR_MIIMRDD);
445	CSR_WRITE_4(sc, KR_MIIMCMD, 0);
446
447	return (result);
448}
449
450static int
451kr_miibus_writereg(device_t dev, int phy, int reg, int data)
452{
453	struct kr_softc * sc = device_get_softc(dev);
454	int i;
455
456	i = KR_MII_TIMEOUT;
457	while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
458		i--;
459
460	if (i == 0)
461		device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
462
463	CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg);
464
465	i = KR_MII_TIMEOUT;
466	while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
467		i--;
468
469	if (i == 0)
470		device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
471
472	CSR_WRITE_4(sc, KR_MIIMWTD, data);
473
474	i = KR_MII_TIMEOUT;
475	while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
476		i--;
477
478	if (i == 0)
479		device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
480
481	return (0);
482}
483
484static void
485kr_miibus_statchg(device_t dev)
486{
487	struct kr_softc		*sc;
488
489	sc = device_get_softc(dev);
490	taskqueue_enqueue(taskqueue_swi, &sc->kr_link_task);
491}
492
493static void
494kr_link_task(void *arg, int pending)
495{
496	struct kr_softc		*sc;
497	struct mii_data		*mii;
498	struct ifnet		*ifp;
499	/* int			lfdx, mfdx; */
500
501	sc = (struct kr_softc *)arg;
502
503	KR_LOCK(sc);
504	mii = device_get_softc(sc->kr_miibus);
505	ifp = sc->kr_ifp;
506	if (mii == NULL || ifp == NULL ||
507	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
508		KR_UNLOCK(sc);
509		return;
510	}
511
512	if (mii->mii_media_status & IFM_ACTIVE) {
513		if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
514			sc->kr_link_status = 1;
515	} else
516		sc->kr_link_status = 0;
517
518	KR_UNLOCK(sc);
519}
520
521static void
522kr_reset(struct kr_softc *sc)
523{
524	int		i;
525
526	CSR_WRITE_4(sc, KR_ETHINTFC, 0);
527
528	for (i = 0; i < KR_TIMEOUT; i++) {
529		DELAY(10);
530		if (!(CSR_READ_4(sc, KR_ETHINTFC) & ETH_INTFC_RIP))
531			break;
532	}
533
534	if (i == KR_TIMEOUT)
535		device_printf(sc->kr_dev, "reset time out\n");
536}
537
538static void
539kr_init(void *xsc)
540{
541	struct kr_softc	 *sc = xsc;
542
543	KR_LOCK(sc);
544	kr_init_locked(sc);
545	KR_UNLOCK(sc);
546}
547
548static void
549kr_init_locked(struct kr_softc *sc)
550{
551	struct ifnet		*ifp = sc->kr_ifp;
552	struct mii_data		*mii;
553
554	KR_LOCK_ASSERT(sc);
555
556	mii = device_get_softc(sc->kr_miibus);
557
558	kr_stop(sc);
559	kr_reset(sc);
560
561	CSR_WRITE_4(sc, KR_ETHINTFC, ETH_INTFC_EN);
562
563	/* Init circular RX list. */
564	if (kr_rx_ring_init(sc) != 0) {
565		device_printf(sc->kr_dev,
566		    "initialization failed: no memory for rx buffers\n");
567		kr_stop(sc);
568		return;
569	}
570
571	/* Init tx descriptors. */
572	kr_tx_ring_init(sc);
573
574	KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0);
575	KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0);
576	KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR,
577	    sc->kr_rdata.kr_rx_ring_paddr);
578
579
580	KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM,
581	    DMA_SM_H | DMA_SM_E | DMA_SM_D) ;
582
583	KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0);
584	KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0);
585	KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0);
586	KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM,
587	    DMA_SM_F | DMA_SM_E);
588
589
590	/* Accept only packets destined for THIS Ethernet device address */
591	CSR_WRITE_4(sc, KR_ETHARC, 1);
592
593	/*
594	 * Set all Ethernet address registers to the same initial values
595	 * set all four addresses to 66-88-aa-cc-dd-ee
596	 */
597	CSR_WRITE_4(sc, KR_ETHSAL0, 0x42095E6B);
598	CSR_WRITE_4(sc, KR_ETHSAH0, 0x0000000C);
599
600	CSR_WRITE_4(sc, KR_ETHSAL1, 0x42095E6B);
601	CSR_WRITE_4(sc, KR_ETHSAH1, 0x0000000C);
602
603	CSR_WRITE_4(sc, KR_ETHSAL2, 0x42095E6B);
604	CSR_WRITE_4(sc, KR_ETHSAH2, 0x0000000C);
605
606	CSR_WRITE_4(sc, KR_ETHSAL3, 0x42095E6B);
607	CSR_WRITE_4(sc, KR_ETHSAH3, 0x0000000C);
608
609	CSR_WRITE_4(sc, KR_ETHMAC2,
610	    KR_ETH_MAC2_PEN | KR_ETH_MAC2_CEN | KR_ETH_MAC2_FD);
611
612	CSR_WRITE_4(sc, KR_ETHIPGT, KR_ETHIPGT_FULL_DUPLEX);
613	CSR_WRITE_4(sc, KR_ETHIPGR, 0x12); /* minimum value */
614
615	CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
616	DELAY(1000);
617	CSR_WRITE_4(sc, KR_MIIMCFG, 0);
618
619	/* TODO: calculate prescale */
620	CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);
621
622	/* FIFO Tx threshold level */
623	CSR_WRITE_4(sc, KR_ETHFIFOTT, 0x30);
624
625	CSR_WRITE_4(sc, KR_ETHMAC1, KR_ETH_MAC1_RE);
626
627	sc->kr_link_status = 0;
628	mii_mediachg(mii);
629
630	ifp->if_drv_flags |= IFF_DRV_RUNNING;
631	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
632
633	callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc);
634}
635
636static void
637kr_start(struct ifnet *ifp)
638{
639	struct kr_softc	 *sc;
640
641	sc = ifp->if_softc;
642
643	KR_LOCK(sc);
644	kr_start_locked(ifp);
645	KR_UNLOCK(sc);
646}
647
648/*
649 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
650 * pointers to the fragment pointers.
651 */
652static int
653kr_encap(struct kr_softc *sc, struct mbuf **m_head)
654{
655	struct kr_txdesc	*txd;
656	struct kr_desc		*desc, *prev_desc;
657	bus_dma_segment_t	txsegs[KR_MAXFRAGS];
658	uint32_t		link_addr;
659	int			error, i, nsegs, prod, si, prev_prod;
660
661	KR_LOCK_ASSERT(sc);
662
663	prod = sc->kr_cdata.kr_tx_prod;
664	txd = &sc->kr_cdata.kr_txdesc[prod];
665	error = bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
666	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
667	if (error == EFBIG) {
668		panic("EFBIG");
669	} else if (error != 0)
670		return (error);
671	if (nsegs == 0) {
672		m_freem(*m_head);
673		*m_head = NULL;
674		return (EIO);
675	}
676
677	/* Check number of available descriptors. */
678	if (sc->kr_cdata.kr_tx_cnt + nsegs >= (KR_TX_RING_CNT - 1)) {
679		bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
680		return (ENOBUFS);
681	}
682
683	txd->tx_m = *m_head;
684	bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
685	    BUS_DMASYNC_PREWRITE);
686
687	si = prod;
688
689	/*
690	 * Make a list of descriptors for this packet. DMA controller will
691	 * walk through it while kr_link is not zero. The last one should
692	 * have COF flag set, to pickup next chain from NDPTR
693	 */
694	prev_prod = prod;
695	desc = prev_desc = NULL;
696	for (i = 0; i < nsegs; i++) {
697		desc = &sc->kr_rdata.kr_tx_ring[prod];
698		desc->kr_ctl = KR_DMASIZE(txsegs[i].ds_len) | KR_CTL_IOF;
699		if (i == 0)
700			desc->kr_devcs = KR_DMATX_DEVCS_FD;
701		desc->kr_ca = txsegs[i].ds_addr;
702		desc->kr_link = 0;
703		/* link with previous descriptor */
704		if (prev_desc)
705			prev_desc->kr_link = KR_TX_RING_ADDR(sc, prod);
706
707		sc->kr_cdata.kr_tx_cnt++;
708		prev_desc = desc;
709		KR_INC(prod, KR_TX_RING_CNT);
710	}
711
712	/*
713	 * Set COF for last descriptor and mark last fragment with LD flag
714	 */
715	if (desc) {
716		desc->kr_ctl |=  KR_CTL_COF;
717		desc->kr_devcs |= KR_DMATX_DEVCS_LD;
718	}
719
720	/* Update producer index. */
721	sc->kr_cdata.kr_tx_prod = prod;
722
723	/* Sync descriptors. */
724	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
725	    sc->kr_cdata.kr_tx_ring_map,
726	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
727
728	/* Start transmitting */
729	/* Check if new list is queued in NDPTR */
730	if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_NDPTR) == 0) {
731		/* NDPTR is not busy - start new list */
732		KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR,
733		    KR_TX_RING_ADDR(sc, si));
734	}
735	else {
736		link_addr = KR_TX_RING_ADDR(sc, si);
737		/* Get previous descriptor */
738		si = (si + KR_TX_RING_CNT - 1) % KR_TX_RING_CNT;
739		desc = &sc->kr_rdata.kr_tx_ring[si];
740		desc->kr_link = link_addr;
741	}
742
743	return (0);
744}
745
746static void
747kr_start_locked(struct ifnet *ifp)
748{
749	struct kr_softc		*sc;
750	struct mbuf		*m_head;
751	int			enq;
752
753	sc = ifp->if_softc;
754
755	KR_LOCK_ASSERT(sc);
756
757	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
758	    IFF_DRV_RUNNING || sc->kr_link_status == 0 )
759		return;
760
761	for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
762	    sc->kr_cdata.kr_tx_cnt < KR_TX_RING_CNT - 2; ) {
763		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
764		if (m_head == NULL)
765			break;
766		/*
767		 * Pack the data into the transmit ring. If we
768		 * don't have room, set the OACTIVE flag and wait
769		 * for the NIC to drain the ring.
770		 */
771		if (kr_encap(sc, &m_head)) {
772			if (m_head == NULL)
773				break;
774			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
775			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
776			break;
777		}
778
779		enq++;
780		/*
781		 * If there's a BPF listener, bounce a copy of this frame
782		 * to him.
783		 */
784		ETHER_BPF_MTAP(ifp, m_head);
785	}
786}
787
788static void
789kr_stop(struct kr_softc *sc)
790{
791	struct ifnet	    *ifp;
792
793	KR_LOCK_ASSERT(sc);
794
795
796	ifp = sc->kr_ifp;
797	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
798	callout_stop(&sc->kr_stat_callout);
799
800	/* mask out RX interrupts */
801	KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM,
802	    DMA_SM_D | DMA_SM_H | DMA_SM_E);
803
804	/* mask out TX interrupts */
805	KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM,
806	    DMA_SM_F | DMA_SM_E);
807
808	/* Abort RX DMA transactions */
809	if (KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_C) & DMA_C_R) {
810		/* Set ABORT bit if trunsuction is in progress */
811		KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_C, DMA_C_ABORT);
812		/* XXX: Add timeout */
813		while ((KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S) & DMA_S_H) == 0)
814			DELAY(10);
815		KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0);
816	}
817	KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 0);
818	KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0);
819
820	/* Abort TX DMA transactions */
821	if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_C) & DMA_C_R) {
822		/* Set ABORT bit if trunsuction is in progress */
823		KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_C, DMA_C_ABORT);
824		/* XXX: Add timeout */
825		while ((KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S) & DMA_S_H) == 0)
826			DELAY(10);
827		KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0);
828	}
829	KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0);
830	KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0);
831
832	CSR_WRITE_4(sc, KR_ETHINTFC, 0);
833}
834
835
836static int
837kr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
838{
839	struct kr_softc		*sc = ifp->if_softc;
840	struct ifreq		*ifr = (struct ifreq *) data;
841	struct mii_data		*mii;
842	int			error;
843
844	switch (command) {
845	case SIOCSIFFLAGS:
846#if 0
847		KR_LOCK(sc);
848		if (ifp->if_flags & IFF_UP) {
849			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
850				if ((ifp->if_flags ^ sc->kr_if_flags) &
851				    (IFF_PROMISC | IFF_ALLMULTI))
852					kr_set_filter(sc);
853			} else {
854				if (sc->kr_detach == 0)
855					kr_init_locked(sc);
856			}
857		} else {
858			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
859				kr_stop(sc);
860		}
861		sc->kr_if_flags = ifp->if_flags;
862		KR_UNLOCK(sc);
863#endif
864		error = 0;
865		break;
866	case SIOCADDMULTI:
867	case SIOCDELMULTI:
868#if 0
869		KR_LOCK(sc);
870		kr_set_filter(sc);
871		KR_UNLOCK(sc);
872#endif
873		error = 0;
874		break;
875	case SIOCGIFMEDIA:
876	case SIOCSIFMEDIA:
877		mii = device_get_softc(sc->kr_miibus);
878		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
879		break;
880	case SIOCSIFCAP:
881		error = 0;
882#if 0
883		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
884		if ((mask & IFCAP_HWCSUM) != 0) {
885			ifp->if_capenable ^= IFCAP_HWCSUM;
886			if ((IFCAP_HWCSUM & ifp->if_capenable) &&
887			    (IFCAP_HWCSUM & ifp->if_capabilities))
888				ifp->if_hwassist = KR_CSUM_FEATURES;
889			else
890				ifp->if_hwassist = 0;
891		}
892		if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
893			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
894			if (IFCAP_VLAN_HWTAGGING & ifp->if_capenable &&
895			    IFCAP_VLAN_HWTAGGING & ifp->if_capabilities &&
896			    ifp->if_drv_flags & IFF_DRV_RUNNING) {
897				KR_LOCK(sc);
898				kr_vlan_setup(sc);
899				KR_UNLOCK(sc);
900			}
901		}
902		VLAN_CAPABILITIES(ifp);
903#endif
904		break;
905	default:
906		error = ether_ioctl(ifp, command, data);
907		break;
908	}
909
910	return (error);
911}
912
913/*
914 * Set media options.
915 */
916static int
917kr_ifmedia_upd(struct ifnet *ifp)
918{
919	struct kr_softc		*sc;
920	struct mii_data		*mii;
921	struct mii_softc	*miisc;
922	int			error;
923
924	sc = ifp->if_softc;
925	KR_LOCK(sc);
926	mii = device_get_softc(sc->kr_miibus);
927	LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
928		PHY_RESET(miisc);
929	error = mii_mediachg(mii);
930	KR_UNLOCK(sc);
931
932	return (error);
933}
934
935/*
936 * Report current media status.
937 */
938static void
939kr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
940{
941	struct kr_softc		*sc = ifp->if_softc;
942	struct mii_data		*mii;
943
944	mii = device_get_softc(sc->kr_miibus);
945	KR_LOCK(sc);
946	mii_pollstat(mii);
947	ifmr->ifm_active = mii->mii_media_active;
948	ifmr->ifm_status = mii->mii_media_status;
949	KR_UNLOCK(sc);
950}
951
952struct kr_dmamap_arg {
953	bus_addr_t	kr_busaddr;
954};
955
956static void
957kr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
958{
959	struct kr_dmamap_arg	*ctx;
960
961	if (error != 0)
962		return;
963	ctx = arg;
964	ctx->kr_busaddr = segs[0].ds_addr;
965}
966
967static int
968kr_dma_alloc(struct kr_softc *sc)
969{
970	struct kr_dmamap_arg	ctx;
971	struct kr_txdesc	*txd;
972	struct kr_rxdesc	*rxd;
973	int			error, i;
974
975	/* Create parent DMA tag. */
976	error = bus_dma_tag_create(
977	    bus_get_dma_tag(sc->kr_dev),	/* parent */
978	    1, 0,			/* alignment, boundary */
979	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
980	    BUS_SPACE_MAXADDR,		/* highaddr */
981	    NULL, NULL,			/* filter, filterarg */
982	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsize */
983	    0,				/* nsegments */
984	    BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
985	    0,				/* flags */
986	    NULL, NULL,			/* lockfunc, lockarg */
987	    &sc->kr_cdata.kr_parent_tag);
988	if (error != 0) {
989		device_printf(sc->kr_dev, "failed to create parent DMA tag\n");
990		goto fail;
991	}
992	/* Create tag for Tx ring. */
993	error = bus_dma_tag_create(
994	    sc->kr_cdata.kr_parent_tag,	/* parent */
995	    KR_RING_ALIGN, 0,		/* alignment, boundary */
996	    BUS_SPACE_MAXADDR,		/* lowaddr */
997	    BUS_SPACE_MAXADDR,		/* highaddr */
998	    NULL, NULL,			/* filter, filterarg */
999	    KR_TX_RING_SIZE,		/* maxsize */
1000	    1,				/* nsegments */
1001	    KR_TX_RING_SIZE,		/* maxsegsize */
1002	    0,				/* flags */
1003	    NULL, NULL,			/* lockfunc, lockarg */
1004	    &sc->kr_cdata.kr_tx_ring_tag);
1005	if (error != 0) {
1006		device_printf(sc->kr_dev, "failed to create Tx ring DMA tag\n");
1007		goto fail;
1008	}
1009
1010	/* Create tag for Rx ring. */
1011	error = bus_dma_tag_create(
1012	    sc->kr_cdata.kr_parent_tag,	/* parent */
1013	    KR_RING_ALIGN, 0,		/* alignment, boundary */
1014	    BUS_SPACE_MAXADDR,		/* lowaddr */
1015	    BUS_SPACE_MAXADDR,		/* highaddr */
1016	    NULL, NULL,			/* filter, filterarg */
1017	    KR_RX_RING_SIZE,		/* maxsize */
1018	    1,				/* nsegments */
1019	    KR_RX_RING_SIZE,		/* maxsegsize */
1020	    0,				/* flags */
1021	    NULL, NULL,			/* lockfunc, lockarg */
1022	    &sc->kr_cdata.kr_rx_ring_tag);
1023	if (error != 0) {
1024		device_printf(sc->kr_dev, "failed to create Rx ring DMA tag\n");
1025		goto fail;
1026	}
1027
1028	/* Create tag for Tx buffers. */
1029	error = bus_dma_tag_create(
1030	    sc->kr_cdata.kr_parent_tag,	/* parent */
1031	    sizeof(uint32_t), 0,	/* alignment, boundary */
1032	    BUS_SPACE_MAXADDR,		/* lowaddr */
1033	    BUS_SPACE_MAXADDR,		/* highaddr */
1034	    NULL, NULL,			/* filter, filterarg */
1035	    MCLBYTES * KR_MAXFRAGS,	/* maxsize */
1036	    KR_MAXFRAGS,		/* nsegments */
1037	    MCLBYTES,			/* maxsegsize */
1038	    0,				/* flags */
1039	    NULL, NULL,			/* lockfunc, lockarg */
1040	    &sc->kr_cdata.kr_tx_tag);
1041	if (error != 0) {
1042		device_printf(sc->kr_dev, "failed to create Tx DMA tag\n");
1043		goto fail;
1044	}
1045
1046	/* Create tag for Rx buffers. */
1047	error = bus_dma_tag_create(
1048	    sc->kr_cdata.kr_parent_tag,	/* parent */
1049	    KR_RX_ALIGN, 0,		/* alignment, boundary */
1050	    BUS_SPACE_MAXADDR,		/* lowaddr */
1051	    BUS_SPACE_MAXADDR,		/* highaddr */
1052	    NULL, NULL,			/* filter, filterarg */
1053	    MCLBYTES,			/* maxsize */
1054	    1,				/* nsegments */
1055	    MCLBYTES,			/* maxsegsize */
1056	    0,				/* flags */
1057	    NULL, NULL,			/* lockfunc, lockarg */
1058	    &sc->kr_cdata.kr_rx_tag);
1059	if (error != 0) {
1060		device_printf(sc->kr_dev, "failed to create Rx DMA tag\n");
1061		goto fail;
1062	}
1063
1064	/* Allocate DMA'able memory and load the DMA map for Tx ring. */
1065	error = bus_dmamem_alloc(sc->kr_cdata.kr_tx_ring_tag,
1066	    (void **)&sc->kr_rdata.kr_tx_ring, BUS_DMA_WAITOK |
1067	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_tx_ring_map);
1068	if (error != 0) {
1069		device_printf(sc->kr_dev,
1070		    "failed to allocate DMA'able memory for Tx ring\n");
1071		goto fail;
1072	}
1073
1074	ctx.kr_busaddr = 0;
1075	error = bus_dmamap_load(sc->kr_cdata.kr_tx_ring_tag,
1076	    sc->kr_cdata.kr_tx_ring_map, sc->kr_rdata.kr_tx_ring,
1077	    KR_TX_RING_SIZE, kr_dmamap_cb, &ctx, 0);
1078	if (error != 0 || ctx.kr_busaddr == 0) {
1079		device_printf(sc->kr_dev,
1080		    "failed to load DMA'able memory for Tx ring\n");
1081		goto fail;
1082	}
1083	sc->kr_rdata.kr_tx_ring_paddr = ctx.kr_busaddr;
1084
1085	/* Allocate DMA'able memory and load the DMA map for Rx ring. */
1086	error = bus_dmamem_alloc(sc->kr_cdata.kr_rx_ring_tag,
1087	    (void **)&sc->kr_rdata.kr_rx_ring, BUS_DMA_WAITOK |
1088	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_rx_ring_map);
1089	if (error != 0) {
1090		device_printf(sc->kr_dev,
1091		    "failed to allocate DMA'able memory for Rx ring\n");
1092		goto fail;
1093	}
1094
1095	ctx.kr_busaddr = 0;
1096	error = bus_dmamap_load(sc->kr_cdata.kr_rx_ring_tag,
1097	    sc->kr_cdata.kr_rx_ring_map, sc->kr_rdata.kr_rx_ring,
1098	    KR_RX_RING_SIZE, kr_dmamap_cb, &ctx, 0);
1099	if (error != 0 || ctx.kr_busaddr == 0) {
1100		device_printf(sc->kr_dev,
1101		    "failed to load DMA'able memory for Rx ring\n");
1102		goto fail;
1103	}
1104	sc->kr_rdata.kr_rx_ring_paddr = ctx.kr_busaddr;
1105
1106	/* Create DMA maps for Tx buffers. */
1107	for (i = 0; i < KR_TX_RING_CNT; i++) {
1108		txd = &sc->kr_cdata.kr_txdesc[i];
1109		txd->tx_m = NULL;
1110		txd->tx_dmamap = NULL;
1111		error = bus_dmamap_create(sc->kr_cdata.kr_tx_tag, 0,
1112		    &txd->tx_dmamap);
1113		if (error != 0) {
1114			device_printf(sc->kr_dev,
1115			    "failed to create Tx dmamap\n");
1116			goto fail;
1117		}
1118	}
1119	/* Create DMA maps for Rx buffers. */
1120	if ((error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0,
1121	    &sc->kr_cdata.kr_rx_sparemap)) != 0) {
1122		device_printf(sc->kr_dev,
1123		    "failed to create spare Rx dmamap\n");
1124		goto fail;
1125	}
1126	for (i = 0; i < KR_RX_RING_CNT; i++) {
1127		rxd = &sc->kr_cdata.kr_rxdesc[i];
1128		rxd->rx_m = NULL;
1129		rxd->rx_dmamap = NULL;
1130		error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0,
1131		    &rxd->rx_dmamap);
1132		if (error != 0) {
1133			device_printf(sc->kr_dev,
1134			    "failed to create Rx dmamap\n");
1135			goto fail;
1136		}
1137	}
1138
1139fail:
1140	return (error);
1141}
1142
1143static void
1144kr_dma_free(struct kr_softc *sc)
1145{
1146	struct kr_txdesc	*txd;
1147	struct kr_rxdesc	*rxd;
1148	int			i;
1149
1150	/* Tx ring. */
1151	if (sc->kr_cdata.kr_tx_ring_tag) {
1152		if (sc->kr_rdata.kr_tx_ring_paddr)
1153			bus_dmamap_unload(sc->kr_cdata.kr_tx_ring_tag,
1154			    sc->kr_cdata.kr_tx_ring_map);
1155		if (sc->kr_rdata.kr_tx_ring)
1156			bus_dmamem_free(sc->kr_cdata.kr_tx_ring_tag,
1157			    sc->kr_rdata.kr_tx_ring,
1158			    sc->kr_cdata.kr_tx_ring_map);
1159		sc->kr_rdata.kr_tx_ring = NULL;
1160		sc->kr_rdata.kr_tx_ring_paddr = 0;
1161		bus_dma_tag_destroy(sc->kr_cdata.kr_tx_ring_tag);
1162		sc->kr_cdata.kr_tx_ring_tag = NULL;
1163	}
1164	/* Rx ring. */
1165	if (sc->kr_cdata.kr_rx_ring_tag) {
1166		if (sc->kr_rdata.kr_rx_ring_paddr)
1167			bus_dmamap_unload(sc->kr_cdata.kr_rx_ring_tag,
1168			    sc->kr_cdata.kr_rx_ring_map);
1169		if (sc->kr_rdata.kr_rx_ring)
1170			bus_dmamem_free(sc->kr_cdata.kr_rx_ring_tag,
1171			    sc->kr_rdata.kr_rx_ring,
1172			    sc->kr_cdata.kr_rx_ring_map);
1173		sc->kr_rdata.kr_rx_ring = NULL;
1174		sc->kr_rdata.kr_rx_ring_paddr = 0;
1175		bus_dma_tag_destroy(sc->kr_cdata.kr_rx_ring_tag);
1176		sc->kr_cdata.kr_rx_ring_tag = NULL;
1177	}
1178	/* Tx buffers. */
1179	if (sc->kr_cdata.kr_tx_tag) {
1180		for (i = 0; i < KR_TX_RING_CNT; i++) {
1181			txd = &sc->kr_cdata.kr_txdesc[i];
1182			if (txd->tx_dmamap) {
1183				bus_dmamap_destroy(sc->kr_cdata.kr_tx_tag,
1184				    txd->tx_dmamap);
1185				txd->tx_dmamap = NULL;
1186			}
1187		}
1188		bus_dma_tag_destroy(sc->kr_cdata.kr_tx_tag);
1189		sc->kr_cdata.kr_tx_tag = NULL;
1190	}
1191	/* Rx buffers. */
1192	if (sc->kr_cdata.kr_rx_tag) {
1193		for (i = 0; i < KR_RX_RING_CNT; i++) {
1194			rxd = &sc->kr_cdata.kr_rxdesc[i];
1195			if (rxd->rx_dmamap) {
1196				bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
1197				    rxd->rx_dmamap);
1198				rxd->rx_dmamap = NULL;
1199			}
1200		}
1201		if (sc->kr_cdata.kr_rx_sparemap) {
1202			bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
1203			    sc->kr_cdata.kr_rx_sparemap);
1204			sc->kr_cdata.kr_rx_sparemap = 0;
1205		}
1206		bus_dma_tag_destroy(sc->kr_cdata.kr_rx_tag);
1207		sc->kr_cdata.kr_rx_tag = NULL;
1208	}
1209
1210	if (sc->kr_cdata.kr_parent_tag) {
1211		bus_dma_tag_destroy(sc->kr_cdata.kr_parent_tag);
1212		sc->kr_cdata.kr_parent_tag = NULL;
1213	}
1214}
1215
1216/*
1217 * Initialize the transmit descriptors.
1218 */
1219static int
1220kr_tx_ring_init(struct kr_softc *sc)
1221{
1222	struct kr_ring_data	*rd;
1223	struct kr_txdesc	*txd;
1224	bus_addr_t		addr;
1225	int			i;
1226
1227	sc->kr_cdata.kr_tx_prod = 0;
1228	sc->kr_cdata.kr_tx_cons = 0;
1229	sc->kr_cdata.kr_tx_cnt = 0;
1230	sc->kr_cdata.kr_tx_pkts = 0;
1231
1232	rd = &sc->kr_rdata;
1233	bzero(rd->kr_tx_ring, KR_TX_RING_SIZE);
1234	for (i = 0; i < KR_TX_RING_CNT; i++) {
1235		if (i == KR_TX_RING_CNT - 1)
1236			addr = KR_TX_RING_ADDR(sc, 0);
1237		else
1238			addr = KR_TX_RING_ADDR(sc, i + 1);
1239		rd->kr_tx_ring[i].kr_ctl = KR_CTL_IOF;
1240		rd->kr_tx_ring[i].kr_ca = 0;
1241		rd->kr_tx_ring[i].kr_devcs = 0;
1242		rd->kr_tx_ring[i].kr_link = 0;
1243		txd = &sc->kr_cdata.kr_txdesc[i];
1244		txd->tx_m = NULL;
1245	}
1246
1247	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1248	    sc->kr_cdata.kr_tx_ring_map,
1249	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1250
1251	return (0);
1252}
1253
1254/*
1255 * Initialize the RX descriptors and allocate mbufs for them. Note that
1256 * we arrange the descriptors in a closed ring, so that the last descriptor
1257 * points back to the first.
1258 */
1259static int
1260kr_rx_ring_init(struct kr_softc *sc)
1261{
1262	struct kr_ring_data	*rd;
1263	struct kr_rxdesc	*rxd;
1264	bus_addr_t		addr;
1265	int			i;
1266
1267	sc->kr_cdata.kr_rx_cons = 0;
1268
1269	rd = &sc->kr_rdata;
1270	bzero(rd->kr_rx_ring, KR_RX_RING_SIZE);
1271	for (i = 0; i < KR_RX_RING_CNT; i++) {
1272		rxd = &sc->kr_cdata.kr_rxdesc[i];
1273		rxd->rx_m = NULL;
1274		rxd->desc = &rd->kr_rx_ring[i];
1275		if (i == KR_RX_RING_CNT - 1)
1276			addr = KR_RX_RING_ADDR(sc, 0);
1277		else
1278			addr = KR_RX_RING_ADDR(sc, i + 1);
1279		rd->kr_rx_ring[i].kr_ctl = KR_CTL_IOD;
1280		if (i == KR_RX_RING_CNT - 1)
1281			rd->kr_rx_ring[i].kr_ctl |= KR_CTL_COD;
1282		rd->kr_rx_ring[i].kr_devcs = 0;
1283		rd->kr_rx_ring[i].kr_ca = 0;
1284		rd->kr_rx_ring[i].kr_link = addr;
1285		if (kr_newbuf(sc, i) != 0)
1286			return (ENOBUFS);
1287	}
1288
1289	bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1290	    sc->kr_cdata.kr_rx_ring_map,
1291	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1292
1293	return (0);
1294}
1295
1296/*
1297 * Initialize an RX descriptor and attach an MBUF cluster.
1298 */
1299static int
1300kr_newbuf(struct kr_softc *sc, int idx)
1301{
1302	struct kr_desc		*desc;
1303	struct kr_rxdesc	*rxd;
1304	struct mbuf		*m;
1305	bus_dma_segment_t	segs[1];
1306	bus_dmamap_t		map;
1307	int			nsegs;
1308
1309	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1310	if (m == NULL)
1311		return (ENOBUFS);
1312	m->m_len = m->m_pkthdr.len = MCLBYTES;
1313	m_adj(m, sizeof(uint64_t));
1314
1315	if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag,
1316	    sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1317		m_freem(m);
1318		return (ENOBUFS);
1319	}
1320	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1321
1322	rxd = &sc->kr_cdata.kr_rxdesc[idx];
1323	if (rxd->rx_m != NULL) {
1324		bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1325		    BUS_DMASYNC_POSTREAD);
1326		bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap);
1327	}
1328	map = rxd->rx_dmamap;
1329	rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap;
1330	sc->kr_cdata.kr_rx_sparemap = map;
1331	bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1332	    BUS_DMASYNC_PREREAD);
1333	rxd->rx_m = m;
1334	desc = rxd->desc;
1335	desc->kr_ca = segs[0].ds_addr;
1336	desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len);
1337	rxd->saved_ca = desc->kr_ca ;
1338	rxd->saved_ctl = desc->kr_ctl ;
1339
1340	return (0);
1341}
1342
1343static __inline void
1344kr_fixup_rx(struct mbuf *m)
1345{
1346        int		i;
1347        uint16_t	*src, *dst;
1348
1349	src = mtod(m, uint16_t *);
1350	dst = src - 1;
1351
1352	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1353		*dst++ = *src++;
1354
1355	m->m_data -= ETHER_ALIGN;
1356}
1357
1358
1359static void
1360kr_tx(struct kr_softc *sc)
1361{
1362	struct kr_txdesc	*txd;
1363	struct kr_desc		*cur_tx;
1364	struct ifnet		*ifp;
1365	uint32_t		ctl, devcs;
1366	int			cons, prod;
1367
1368	KR_LOCK_ASSERT(sc);
1369
1370	cons = sc->kr_cdata.kr_tx_cons;
1371	prod = sc->kr_cdata.kr_tx_prod;
1372	if (cons == prod)
1373		return;
1374
1375	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1376	    sc->kr_cdata.kr_tx_ring_map,
1377	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1378
1379	ifp = sc->kr_ifp;
1380	/*
1381	 * Go through our tx list and free mbufs for those
1382	 * frames that have been transmitted.
1383	 */
1384	for (; cons != prod; KR_INC(cons, KR_TX_RING_CNT)) {
1385		cur_tx = &sc->kr_rdata.kr_tx_ring[cons];
1386		ctl = cur_tx->kr_ctl;
1387		devcs = cur_tx->kr_devcs;
1388		/* Check if descriptor has "finished" flag */
1389		if ((ctl & KR_CTL_F) == 0)
1390			break;
1391
1392		sc->kr_cdata.kr_tx_cnt--;
1393		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1394
1395		txd = &sc->kr_cdata.kr_txdesc[cons];
1396
1397		if (devcs & KR_DMATX_DEVCS_TOK)
1398			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1399		else {
1400			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1401			/* collisions: medium busy, late collision */
1402			if ((devcs & KR_DMATX_DEVCS_EC) ||
1403			    (devcs & KR_DMATX_DEVCS_LC))
1404				if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
1405		}
1406
1407		bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
1408		    BUS_DMASYNC_POSTWRITE);
1409		bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
1410
1411		/* Free only if it's first descriptor in list */
1412		if (txd->tx_m)
1413			m_freem(txd->tx_m);
1414		txd->tx_m = NULL;
1415
1416		/* reset descriptor */
1417		cur_tx->kr_ctl = KR_CTL_IOF;
1418		cur_tx->kr_devcs = 0;
1419		cur_tx->kr_ca = 0;
1420		cur_tx->kr_link = 0;
1421	}
1422
1423	sc->kr_cdata.kr_tx_cons = cons;
1424
1425	bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1426	    sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_PREWRITE);
1427}
1428
1429
1430static void
1431kr_rx(struct kr_softc *sc)
1432{
1433	struct kr_rxdesc	*rxd;
1434	struct ifnet		*ifp = sc->kr_ifp;
1435	int			cons, prog, packet_len, count, error;
1436	struct kr_desc		*cur_rx;
1437	struct mbuf		*m;
1438
1439	KR_LOCK_ASSERT(sc);
1440
1441	cons = sc->kr_cdata.kr_rx_cons;
1442
1443	bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1444	    sc->kr_cdata.kr_rx_ring_map,
1445	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1446
1447	for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) {
1448		cur_rx = &sc->kr_rdata.kr_rx_ring[cons];
1449		rxd = &sc->kr_cdata.kr_rxdesc[cons];
1450		m = rxd->rx_m;
1451
1452		if ((cur_rx->kr_ctl & KR_CTL_D) == 0)
1453		       break;
1454
1455		prog++;
1456
1457		packet_len = KR_PKTSIZE(cur_rx->kr_devcs);
1458		count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl);
1459		/* Assume it's error */
1460		error = 1;
1461
1462		if (packet_len != count)
1463			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1464		else if (count < 64)
1465			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1466		else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0)
1467			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1468		else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) {
1469			error = 0;
1470			bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1471			    BUS_DMASYNC_PREREAD);
1472			m = rxd->rx_m;
1473			kr_fixup_rx(m);
1474			m->m_pkthdr.rcvif = ifp;
1475			/* Skip 4 bytes of CRC */
1476			m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1477			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1478
1479			KR_UNLOCK(sc);
1480			(*ifp->if_input)(ifp, m);
1481			KR_LOCK(sc);
1482		}
1483
1484		if (error) {
1485			/* Restore CONTROL and CA values, reset DEVCS */
1486			cur_rx->kr_ctl = rxd->saved_ctl;
1487			cur_rx->kr_ca = rxd->saved_ca;
1488			cur_rx->kr_devcs = 0;
1489		}
1490		else {
1491			/* Reinit descriptor */
1492			cur_rx->kr_ctl = KR_CTL_IOD;
1493			if (cons == KR_RX_RING_CNT - 1)
1494				cur_rx->kr_ctl |= KR_CTL_COD;
1495			cur_rx->kr_devcs = 0;
1496			cur_rx->kr_ca = 0;
1497			if (kr_newbuf(sc, cons) != 0) {
1498				device_printf(sc->kr_dev,
1499				    "Failed to allocate buffer\n");
1500				break;
1501			}
1502		}
1503
1504		bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1505		    sc->kr_cdata.kr_rx_ring_map,
1506		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1507
1508	}
1509
1510	if (prog > 0) {
1511		sc->kr_cdata.kr_rx_cons = cons;
1512
1513		bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1514		    sc->kr_cdata.kr_rx_ring_map,
1515		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1516	}
1517}
1518
1519static void
1520kr_rx_intr(void *arg)
1521{
1522	struct kr_softc		*sc = arg;
1523	uint32_t		status;
1524
1525	KR_LOCK(sc);
1526
1527	/* mask out interrupts */
1528	KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM,
1529	    DMA_SM_D | DMA_SM_H | DMA_SM_E);
1530
1531	status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S);
1532	if (status & (DMA_S_D | DMA_S_E | DMA_S_H)) {
1533		kr_rx(sc);
1534
1535		if (status & DMA_S_E)
1536			device_printf(sc->kr_dev, "RX DMA error\n");
1537	}
1538
1539	/* Reread status */
1540	status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S);
1541
1542	/* restart DMA RX  if it has been halted */
1543	if (status & DMA_S_H) {
1544		KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR,
1545		    KR_RX_RING_ADDR(sc, sc->kr_cdata.kr_rx_cons));
1546	}
1547
1548	KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, ~status);
1549
1550	/* Enable F, H, E interrupts */
1551	KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM,
1552	    DMA_SM_D | DMA_SM_H | DMA_SM_E);
1553
1554	KR_UNLOCK(sc);
1555}
1556
1557static void
1558kr_tx_intr(void *arg)
1559{
1560	struct kr_softc		*sc = arg;
1561	uint32_t		status;
1562
1563	KR_LOCK(sc);
1564
1565	/* mask out interrupts */
1566	KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM,
1567	    DMA_SM_F | DMA_SM_E);
1568
1569	status = KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S);
1570	if (status & (DMA_S_F | DMA_S_E)) {
1571		kr_tx(sc);
1572		if (status & DMA_S_E)
1573			device_printf(sc->kr_dev, "DMA error\n");
1574	}
1575
1576	KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, ~status);
1577
1578	/* Enable F, E interrupts */
1579	KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM,
1580	    DMA_SM_F | DMA_SM_E);
1581
1582	KR_UNLOCK(sc);
1583
1584}
1585
1586static void
1587kr_rx_und_intr(void *arg)
1588{
1589
1590	panic("interrupt: %s\n", __func__);
1591}
1592
1593static void
1594kr_tx_ovr_intr(void *arg)
1595{
1596
1597	panic("interrupt: %s\n", __func__);
1598}
1599
1600static void
1601kr_tick(void *xsc)
1602{
1603	struct kr_softc		*sc = xsc;
1604	struct mii_data		*mii;
1605
1606	KR_LOCK_ASSERT(sc);
1607
1608	mii = device_get_softc(sc->kr_miibus);
1609	mii_tick(mii);
1610	callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc);
1611}
1612