Deleted Added
full compact
if_kr.c (180332) if_kr.c (182901)
1/*-
2 * Copyright (C) 2007
3 * Oleksandr Tymoshenko <gonzo@freebsd.org>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
18 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
22 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
23 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $Id: $
27 *
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2007
3 * Oleksandr Tymoshenko <gonzo@freebsd.org>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
18 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
22 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
23 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $Id: $
27 *
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/mips/idt/if_kr.c 178173 2008-04-13 07:44:55Z imp $");
31__FBSDID("$FreeBSD: head/sys/mips/idt/if_kr.c 182901 2008-09-10 03:49:08Z gonzo $");
32
33/*
34 * RC32434 Ethernet interface driver
35 */
36#include <sys/param.h>
37#include <sys/endian.h>
38#include <sys/systm.h>
39#include <sys/sockio.h>
40#include <sys/mbuf.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/module.h>
44#include <sys/socket.h>
45#include <sys/taskqueue.h>
46
47#include <net/if.h>
48#include <net/if_arp.h>
49#include <net/ethernet.h>
50#include <net/if_dl.h>
51#include <net/if_media.h>
52#include <net/if_types.h>
53
54#include <net/bpf.h>
55
56#include <machine/bus.h>
57#include <machine/resource.h>
58#include <sys/bus.h>
59#include <sys/rman.h>
60
61#include <dev/mii/mii.h>
62#include <dev/mii/miivar.h>
63
64#include <dev/pci/pcireg.h>
65#include <dev/pci/pcivar.h>
66
67MODULE_DEPEND(kr, ether, 1, 1, 1);
68MODULE_DEPEND(kr, miibus, 1, 1, 1);
69
70#include "miibus_if.h"
71
32
33/*
34 * RC32434 Ethernet interface driver
35 */
36#include <sys/param.h>
37#include <sys/endian.h>
38#include <sys/systm.h>
39#include <sys/sockio.h>
40#include <sys/mbuf.h>
41#include <sys/malloc.h>
42#include <sys/kernel.h>
43#include <sys/module.h>
44#include <sys/socket.h>
45#include <sys/taskqueue.h>
46
47#include <net/if.h>
48#include <net/if_arp.h>
49#include <net/ethernet.h>
50#include <net/if_dl.h>
51#include <net/if_media.h>
52#include <net/if_types.h>
53
54#include <net/bpf.h>
55
56#include <machine/bus.h>
57#include <machine/resource.h>
58#include <sys/bus.h>
59#include <sys/rman.h>
60
61#include <dev/mii/mii.h>
62#include <dev/mii/miivar.h>
63
64#include <dev/pci/pcireg.h>
65#include <dev/pci/pcivar.h>
66
67MODULE_DEPEND(kr, ether, 1, 1, 1);
68MODULE_DEPEND(kr, miibus, 1, 1, 1);
69
70#include "miibus_if.h"
71
72#include <mips/mips32/idt/if_krreg.h>
72#include
73
74#define KR_DEBUG
75
76static int kr_attach(device_t);
77static int kr_detach(device_t);
78static int kr_ifmedia_upd(struct ifnet *);
79static void kr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
80static int kr_ioctl(struct ifnet *, u_long, caddr_t);
81static void kr_init(void *);
82static void kr_init_locked(struct kr_softc *);
83static void kr_link_task(void *, int);
84static int kr_miibus_readreg(device_t, int, int);
85static void kr_miibus_statchg(device_t);
86static int kr_miibus_writereg(device_t, int, int, int);
87static int kr_probe(device_t);
88static void kr_reset(struct kr_softc *);
89static int kr_resume(device_t);
90static int kr_rx_ring_init(struct kr_softc *);
91static int kr_tx_ring_init(struct kr_softc *);
92static void kr_shutdown(device_t);
93static void kr_start(struct ifnet *);
94static void kr_start_locked(struct ifnet *);
95static void kr_stop(struct kr_softc *);
96static int kr_suspend(device_t);
97
98static void kr_rx(struct kr_softc *);
99static void kr_tx(struct kr_softc *);
100static void kr_rx_intr(void *);
101static void kr_tx_intr(void *);
102static void kr_rx_und_intr(void *);
103static void kr_tx_ovr_intr(void *);
104static void kr_tick(void *);
105
106static void kr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
107static int kr_dma_alloc(struct kr_softc *);
108static void kr_dma_free(struct kr_softc *);
109static int kr_newbuf(struct kr_softc *, int);
110static __inline void kr_fixup_rx(struct mbuf *);
111
112static device_method_t kr_methods[] = {
113 /* Device interface */
114 DEVMETHOD(device_probe, kr_probe),
115 DEVMETHOD(device_attach, kr_attach),
116 DEVMETHOD(device_detach, kr_detach),
117 DEVMETHOD(device_suspend, kr_suspend),
118 DEVMETHOD(device_resume, kr_resume),
119 DEVMETHOD(device_shutdown, kr_shutdown),
120
121 /* bus interface */
122 DEVMETHOD(bus_print_child, bus_generic_print_child),
123 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
124
125 /* MII interface */
126 DEVMETHOD(miibus_readreg, kr_miibus_readreg),
127 DEVMETHOD(miibus_writereg, kr_miibus_writereg),
128 DEVMETHOD(miibus_statchg, kr_miibus_statchg),
129
130 { 0, 0 }
131};
132
133static driver_t kr_driver = {
134 "kr",
135 kr_methods,
136 sizeof(struct kr_softc)
137};
138
139static devclass_t kr_devclass;
140
141DRIVER_MODULE(kr, obio, kr_driver, kr_devclass, 0, 0);
142DRIVER_MODULE(kr, cardbus, kr_driver, kr_devclass, 0, 0);
143DRIVER_MODULE(miibus, kr, miibus_driver, miibus_devclass, 0, 0);
144
145static int
146kr_probe(device_t dev)
147{
148
149 device_set_desc(dev, "RC32434 Ethernet interface");
150 return (0);
151}
152
153static int
154kr_attach(device_t dev)
155{
156 uint8_t eaddr[ETHER_ADDR_LEN];
157 struct ifnet *ifp;
158 struct kr_softc *sc;
159 int error = 0, rid;
160 int unit;
161
162 sc = device_get_softc(dev);
163 unit = device_get_unit(dev);
164 sc->kr_dev = dev;
165
166 mtx_init(&sc->kr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
167 MTX_DEF);
168 callout_init_mtx(&sc->kr_stat_callout, &sc->kr_mtx, 0);
169 TASK_INIT(&sc->kr_link_task, 0, kr_link_task, sc);
170 pci_enable_busmaster(dev);
171
172 /* Map control/status registers. */
173 sc->kr_rid = 0;
174 sc->kr_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->kr_rid,
175 RF_ACTIVE);
176
177 if (sc->kr_res == NULL) {
178 device_printf(dev, "couldn't map memory\n");
179 error = ENXIO;
180 goto fail;
181 }
182
183 sc->kr_btag = rman_get_bustag(sc->kr_res);
184 sc->kr_bhandle = rman_get_bushandle(sc->kr_res);
185
186 /* Allocate interrupts */
187 rid = 0;
188 sc->kr_rx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_RX_IRQ,
189 KR_RX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
190
191 if (sc->kr_rx_irq == NULL) {
192 device_printf(dev, "couldn't map rx interrupt\n");
193 error = ENXIO;
194 goto fail;
195 }
196
197 rid = 0;
198 sc->kr_tx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_TX_IRQ,
199 KR_TX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
200
201 if (sc->kr_tx_irq == NULL) {
202 device_printf(dev, "couldn't map tx interrupt\n");
203 error = ENXIO;
204 goto fail;
205 }
206
207 rid = 0;
208 sc->kr_rx_und_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
209 KR_RX_UND_IRQ, KR_RX_UND_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
210
211 if (sc->kr_rx_und_irq == NULL) {
212 device_printf(dev, "couldn't map rx underrun interrupt\n");
213 error = ENXIO;
214 goto fail;
215 }
216
217 rid = 0;
218 sc->kr_tx_ovr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
219 KR_TX_OVR_IRQ, KR_TX_OVR_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
220
221 if (sc->kr_tx_ovr_irq == NULL) {
222 device_printf(dev, "couldn't map tx overrun interrupt\n");
223 error = ENXIO;
224 goto fail;
225 }
226
227 /* Allocate ifnet structure. */
228 ifp = sc->kr_ifp = if_alloc(IFT_ETHER);
229
230 if (ifp == NULL) {
231 device_printf(dev, "couldn't allocate ifnet structure\n");
232 error = ENOSPC;
233 goto fail;
234 }
235 ifp->if_softc = sc;
236 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
237 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
238 ifp->if_ioctl = kr_ioctl;
239 ifp->if_start = kr_start;
240 ifp->if_init = kr_init;
241
242 /* XXX: add real size */
243 IFQ_SET_MAXLEN(&ifp->if_snd, 9);
244 ifp->if_snd.ifq_maxlen = 9;
245 IFQ_SET_READY(&ifp->if_snd);
246
247 ifp->if_capenable = ifp->if_capabilities;
248
249 eaddr[0] = 0x00;
250 eaddr[1] = 0x0C;
251 eaddr[2] = 0x42;
252 eaddr[3] = 0x09;
253 eaddr[4] = 0x5E;
254 eaddr[5] = 0x6B;
255
256 if (kr_dma_alloc(sc) != 0) {
257 error = ENXIO;
258 goto fail;
259 }
260
261 /* TODO: calculate prescale */
262 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);
263
264 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
265 DELAY(1000);
266 CSR_WRITE_4(sc, KR_MIIMCFG, 0);
267
268 /* Do MII setup. */
269 if (mii_phy_probe(dev, &sc->kr_miibus,
270 kr_ifmedia_upd, kr_ifmedia_sts)) {
271 device_printf(dev, "MII without any phy!\n");
272 error = ENXIO;
273 goto fail;
274 }
275
276 /* Call MI attach routine. */
277 ether_ifattach(ifp, eaddr);
278
279 /* Hook interrupt last to avoid having to lock softc */
280 error = bus_setup_intr(dev, sc->kr_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
281 NULL, kr_rx_intr, sc, &sc->kr_rx_intrhand);
282
283 if (error) {
284 device_printf(dev, "couldn't set up rx irq\n");
285 ether_ifdetach(ifp);
286 goto fail;
287 }
288
289 error = bus_setup_intr(dev, sc->kr_tx_irq, INTR_TYPE_NET | INTR_MPSAFE,
290 NULL, kr_tx_intr, sc, &sc->kr_tx_intrhand);
291
292 if (error) {
293 device_printf(dev, "couldn't set up tx irq\n");
294 ether_ifdetach(ifp);
295 goto fail;
296 }
297
298 error = bus_setup_intr(dev, sc->kr_rx_und_irq,
299 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_rx_und_intr, sc,
300 &sc->kr_rx_und_intrhand);
301
302 if (error) {
303 device_printf(dev, "couldn't set up rx underrun irq\n");
304 ether_ifdetach(ifp);
305 goto fail;
306 }
307
308 error = bus_setup_intr(dev, sc->kr_tx_ovr_irq,
309 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_tx_ovr_intr, sc,
310 &sc->kr_tx_ovr_intrhand);
311
312 if (error) {
313 device_printf(dev, "couldn't set up tx overrun irq\n");
314 ether_ifdetach(ifp);
315 goto fail;
316 }
317
318fail:
319 if (error)
320 kr_detach(dev);
321
322 return (error);
323}
324
325static int
326kr_detach(device_t dev)
327{
328 struct kr_softc *sc = device_get_softc(dev);
329 struct ifnet *ifp = sc->kr_ifp;
330
331 KASSERT(mtx_initialized(&sc->kr_mtx), ("vr mutex not initialized"));
332
333 /* These should only be active if attach succeeded */
334 if (device_is_attached(dev)) {
335 KR_LOCK(sc);
336 sc->kr_detach = 1;
337 kr_stop(sc);
338 KR_UNLOCK(sc);
339 taskqueue_drain(taskqueue_swi, &sc->kr_link_task);
340 ether_ifdetach(ifp);
341 }
342 if (sc->kr_miibus)
343 device_delete_child(dev, sc->kr_miibus);
344 bus_generic_detach(dev);
345
346 if (sc->kr_rx_intrhand)
347 bus_teardown_intr(dev, sc->kr_rx_irq, sc->kr_rx_intrhand);
348 if (sc->kr_rx_irq)
349 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_irq);
350 if (sc->kr_tx_intrhand)
351 bus_teardown_intr(dev, sc->kr_tx_irq, sc->kr_tx_intrhand);
352 if (sc->kr_tx_irq)
353 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_irq);
354 if (sc->kr_rx_und_intrhand)
355 bus_teardown_intr(dev, sc->kr_rx_und_irq,
356 sc->kr_rx_und_intrhand);
357 if (sc->kr_rx_und_irq)
358 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_und_irq);
359 if (sc->kr_tx_ovr_intrhand)
360 bus_teardown_intr(dev, sc->kr_tx_ovr_irq,
361 sc->kr_tx_ovr_intrhand);
362 if (sc->kr_tx_ovr_irq)
363 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_ovr_irq);
364
365 if (sc->kr_res)
366 bus_release_resource(dev, SYS_RES_MEMORY, sc->kr_rid,
367 sc->kr_res);
368
369 if (ifp)
370 if_free(ifp);
371
372 kr_dma_free(sc);
373
374 mtx_destroy(&sc->kr_mtx);
375
376 return (0);
377
378}
379
380static int
381kr_suspend(device_t dev)
382{
383
384 panic("%s", __func__);
385 return 0;
386}
387
388static int
389kr_resume(device_t dev)
390{
391
392 panic("%s", __func__);
393 return 0;
394}
395
396static void
397kr_shutdown(device_t dev)
398{
399 struct kr_softc *sc;
400
401 sc = device_get_softc(dev);
402
403 KR_LOCK(sc);
404 kr_stop(sc);
405 KR_UNLOCK(sc);
406}
407
408static int
409kr_miibus_readreg(device_t dev, int phy, int reg)
410{
411 struct kr_softc * sc = device_get_softc(dev);
412 int i, result;
413
414 i = KR_MII_TIMEOUT;
415 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
416 i--;
417
418 if (i == 0)
419 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
420
421 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg);
422
423 i = KR_MII_TIMEOUT;
424 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
425 i--;
426
427 if (i == 0)
428 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
429
430 CSR_WRITE_4(sc, KR_MIIMCMD, KR_MIIMCMD_RD);
431
432 i = KR_MII_TIMEOUT;
433 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
434 i--;
435
436 if (i == 0)
437 device_printf(dev, "phy mii read is timed out %d:%d\n", phy,
438 reg);
439
440 if (CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_NV)
441 printf("phy mii readreg failed %d:%d: data not valid\n",
442 phy, reg);
443
444 result = CSR_READ_4(sc , KR_MIIMRDD);
445 CSR_WRITE_4(sc, KR_MIIMCMD, 0);
446
447 return (result);
448}
449
450static int
451kr_miibus_writereg(device_t dev, int phy, int reg, int data)
452{
453 struct kr_softc * sc = device_get_softc(dev);
454 int i;
455
456 i = KR_MII_TIMEOUT;
457 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
458 i--;
459
460 if (i == 0)
461 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
462
463 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg);
464
465 i = KR_MII_TIMEOUT;
466 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
467 i--;
468
469 if (i == 0)
470 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
471
472 CSR_WRITE_4(sc, KR_MIIMWTD, data);
473
474 i = KR_MII_TIMEOUT;
475 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
476 i--;
477
478 if (i == 0)
479 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
480
481 return (0);
482}
483
484static void
485kr_miibus_statchg(device_t dev)
486{
487 struct kr_softc *sc;
488
489 sc = device_get_softc(dev);
490 taskqueue_enqueue(taskqueue_swi, &sc->kr_link_task);
491}
492
493static void
494kr_link_task(void *arg, int pending)
495{
496 struct kr_softc *sc;
497 struct mii_data *mii;
498 struct ifnet *ifp;
499 /* int lfdx, mfdx; */
500
501 sc = (struct kr_softc *)arg;
502
503 KR_LOCK(sc);
504 mii = device_get_softc(sc->kr_miibus);
505 ifp = sc->kr_ifp;
506 if (mii == NULL || ifp == NULL ||
507 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
508 KR_UNLOCK(sc);
509 return;
510 }
511
512 if (mii->mii_media_status & IFM_ACTIVE) {
513 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
514 sc->kr_link_status = 1;
515 } else
516 sc->kr_link_status = 0;
517
518 KR_UNLOCK(sc);
519}
520
521static void
522kr_reset(struct kr_softc *sc)
523{
524 int i;
525
526 CSR_WRITE_4(sc, KR_ETHINTFC, 0);
527
528 for (i = 0; i < KR_TIMEOUT; i++) {
529 DELAY(10);
530 if (!(CSR_READ_4(sc, KR_ETHINTFC) & ETH_INTFC_RIP))
531 break;
532 }
533
534 if (i == KR_TIMEOUT)
535 device_printf(sc->kr_dev, "reset time out\n");
536}
537
538static void
539kr_init(void *xsc)
540{
541 struct kr_softc *sc = xsc;
542
543 KR_LOCK(sc);
544 kr_init_locked(sc);
545 KR_UNLOCK(sc);
546}
547
548static void
549kr_init_locked(struct kr_softc *sc)
550{
551 struct ifnet *ifp = sc->kr_ifp;
552 struct mii_data *mii;
553
554 KR_LOCK_ASSERT(sc);
555
556 mii = device_get_softc(sc->kr_miibus);
557
558 kr_stop(sc);
559 kr_reset(sc);
560
561 CSR_WRITE_4(sc, KR_ETHINTFC, ETH_INTFC_EN);
562
563 /* Init circular RX list. */
564 if (kr_rx_ring_init(sc) != 0) {
565 device_printf(sc->kr_dev,
566 "initialization failed: no memory for rx buffers\n");
567 kr_stop(sc);
568 return;
569 }
570
571 /* Init tx descriptors. */
572 kr_tx_ring_init(sc);
573
574 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0);
575 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0);
576 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR,
577 sc->kr_rdata.kr_rx_ring_paddr);
578
579
580 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM,
581 DMA_SM_H | DMA_SM_E | DMA_SM_D) ;
582
583 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0);
584 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0);
585 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0);
586 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM,
587 DMA_SM_F | DMA_SM_E);
588
589
590 /* Accept only packets destined for THIS Ethernet device address */
591 CSR_WRITE_4(sc, KR_ETHARC, 1);
592
593 /*
594 * Set all Ethernet address registers to the same initial values
595 * set all four addresses to 66-88-aa-cc-dd-ee
596 */
597 CSR_WRITE_4(sc, KR_ETHSAL0, 0x42095E6B);
598 CSR_WRITE_4(sc, KR_ETHSAH0, 0x0000000C);
599
600 CSR_WRITE_4(sc, KR_ETHSAL1, 0x42095E6B);
601 CSR_WRITE_4(sc, KR_ETHSAH1, 0x0000000C);
602
603 CSR_WRITE_4(sc, KR_ETHSAL2, 0x42095E6B);
604 CSR_WRITE_4(sc, KR_ETHSAH2, 0x0000000C);
605
606 CSR_WRITE_4(sc, KR_ETHSAL3, 0x42095E6B);
607 CSR_WRITE_4(sc, KR_ETHSAH3, 0x0000000C);
608
609 CSR_WRITE_4(sc, KR_ETHMAC2,
610 KR_ETH_MAC2_PEN | KR_ETH_MAC2_CEN | KR_ETH_MAC2_FD);
611
612 CSR_WRITE_4(sc, KR_ETHIPGT, KR_ETHIPGT_FULL_DUPLEX);
613 CSR_WRITE_4(sc, KR_ETHIPGR, 0x12); /* minimum value */
614
615 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
616 DELAY(1000);
617 CSR_WRITE_4(sc, KR_MIIMCFG, 0);
618
619 /* TODO: calculate prescale */
620 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);
621
622 /* FIFO Tx threshold level */
623 CSR_WRITE_4(sc, KR_ETHFIFOTT, 0x30);
624
625 CSR_WRITE_4(sc, KR_ETHMAC1, KR_ETH_MAC1_RE);
626
627 sc->kr_link_status = 0;
628 mii_mediachg(mii);
629
630 ifp->if_drv_flags |= IFF_DRV_RUNNING;
631 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
632
633 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc);
634}
635
636static void
637kr_start(struct ifnet *ifp)
638{
639 struct kr_softc *sc;
640
641 sc = ifp->if_softc;
642
643 KR_LOCK(sc);
644 kr_start_locked(ifp);
645 KR_UNLOCK(sc);
646}
647
648/*
649 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
650 * pointers to the fragment pointers.
651 */
652static int
653kr_encap(struct kr_softc *sc, struct mbuf **m_head)
654{
655 struct kr_txdesc *txd;
656 struct kr_desc *desc, *prev_desc;
657 bus_dma_segment_t txsegs[KR_MAXFRAGS];
658 uint32_t link_addr;
659 int error, i, nsegs, prod, si, prev_prod;
660
661 KR_LOCK_ASSERT(sc);
662
663 prod = sc->kr_cdata.kr_tx_prod;
664 txd = &sc->kr_cdata.kr_txdesc[prod];
665 error = bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
666 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
667 if (error == EFBIG) {
668 panic("EFBIG");
669 } else if (error != 0)
670 return (error);
671 if (nsegs == 0) {
672 m_freem(*m_head);
673 *m_head = NULL;
674 return (EIO);
675 }
676
677 /* Check number of available descriptors. */
678 if (sc->kr_cdata.kr_tx_cnt + nsegs >= (KR_TX_RING_CNT - 1)) {
679 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
680 return (ENOBUFS);
681 }
682
683 txd->tx_m = *m_head;
684 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
685 BUS_DMASYNC_PREWRITE);
686
687 si = prod;
688
689 /*
690 * Make a list of descriptors for this packet. DMA controller will
691 * walk through it while kr_link is not zero. The last one should
692 * have COF flag set, to pickup next chain from NDPTR
693 */
694 prev_prod = prod;
695 desc = prev_desc = NULL;
696 for (i = 0; i < nsegs; i++) {
697 desc = &sc->kr_rdata.kr_tx_ring[prod];
698 desc->kr_ctl = KR_DMASIZE(txsegs[i].ds_len) | KR_CTL_IOF;
699 if (i == 0)
700 desc->kr_devcs = KR_DMATX_DEVCS_FD;
701 desc->kr_ca = txsegs[i].ds_addr;
702 desc->kr_link = 0;
703 /* link with previous descriptor */
704 if (prev_desc)
705 prev_desc->kr_link = KR_TX_RING_ADDR(sc, prod);
706
707 sc->kr_cdata.kr_tx_cnt++;
708 prev_desc = desc;
709 KR_INC(prod, KR_TX_RING_CNT);
710 }
711
712 /*
713 * Set COF for last descriptor and mark last fragment with LD flag
714 */
715 if (desc) {
716 desc->kr_ctl |= KR_CTL_COF;
717 desc->kr_devcs |= KR_DMATX_DEVCS_LD;
718 }
719
720 /* Update producer index. */
721 sc->kr_cdata.kr_tx_prod = prod;
722
723 /* Sync descriptors. */
724 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
725 sc->kr_cdata.kr_tx_ring_map,
726 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
727
728 /* Start transmitting */
729 /* Check if new list is queued in NDPTR */
730 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_NDPTR) == 0) {
731 /* NDPTR is not busy - start new list */
732 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR,
733 KR_TX_RING_ADDR(sc, si));
734 }
735 else {
736 link_addr = KR_TX_RING_ADDR(sc, si);
737 /* Get previous descriptor */
738 si = (si + KR_TX_RING_CNT - 1) % KR_TX_RING_CNT;
739 desc = &sc->kr_rdata.kr_tx_ring[si];
740 desc->kr_link = link_addr;
741 }
742
743 return (0);
744}
745
746static void
747kr_start_locked(struct ifnet *ifp)
748{
749 struct kr_softc *sc;
750 struct mbuf *m_head;
751 int enq;
752
753 sc = ifp->if_softc;
754
755 KR_LOCK_ASSERT(sc);
756
757 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
758 IFF_DRV_RUNNING || sc->kr_link_status == 0 )
759 return;
760
761 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
762 sc->kr_cdata.kr_tx_cnt < KR_TX_RING_CNT - 2; ) {
763 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
764 if (m_head == NULL)
765 break;
766 /*
767 * Pack the data into the transmit ring. If we
768 * don't have room, set the OACTIVE flag and wait
769 * for the NIC to drain the ring.
770 */
771 if (kr_encap(sc, &m_head)) {
772 if (m_head == NULL)
773 break;
774 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
775 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
776 break;
777 }
778
779 enq++;
780 /*
781 * If there's a BPF listener, bounce a copy of this frame
782 * to him.
783 */
784 ETHER_BPF_MTAP(ifp, m_head);
785 }
786}
787
788static void
789kr_stop(struct kr_softc *sc)
790{
791 struct ifnet *ifp;
792
793 KR_LOCK_ASSERT(sc);
794
795
796 ifp = sc->kr_ifp;
797 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
798 callout_stop(&sc->kr_stat_callout);
799
800 /* mask out RX interrupts */
801 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM,
802 DMA_SM_D | DMA_SM_H | DMA_SM_E);
803
804 /* mask out TX interrupts */
805 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM,
806 DMA_SM_F | DMA_SM_E);
807
808 /* Abort RX DMA transactions */
809 if (KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_C) & DMA_C_R) {
810 /* Set ABORT bit if trunsuction is in progress */
811 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_C, DMA_C_ABORT);
812 /* XXX: Add timeout */
813 while ((KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S) & DMA_S_H) == 0)
814 DELAY(10);
815 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0);
816 }
817 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 0);
818 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0);
819
820 /* Abort TX DMA transactions */
821 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_C) & DMA_C_R) {
822 /* Set ABORT bit if trunsuction is in progress */
823 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_C, DMA_C_ABORT);
824 /* XXX: Add timeout */
825 while ((KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S) & DMA_S_H) == 0)
826 DELAY(10);
827 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0);
828 }
829 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0);
830 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0);
831
832 CSR_WRITE_4(sc, KR_ETHINTFC, 0);
833}
834
835
836static int
837kr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
838{
839 struct kr_softc *sc = ifp->if_softc;
840 struct ifreq *ifr = (struct ifreq *) data;
841 struct mii_data *mii;
842 int error;
843
844 switch (command) {
845 case SIOCSIFFLAGS:
846#if 0
847 KR_LOCK(sc);
848 if (ifp->if_flags & IFF_UP) {
849 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
850 if ((ifp->if_flags ^ sc->kr_if_flags) &
851 (IFF_PROMISC | IFF_ALLMULTI))
852 kr_set_filter(sc);
853 } else {
854 if (sc->kr_detach == 0)
855 kr_init_locked(sc);
856 }
857 } else {
858 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
859 kr_stop(sc);
860 }
861 sc->kr_if_flags = ifp->if_flags;
862 KR_UNLOCK(sc);
863#endif
864 error = 0;
865 break;
866 case SIOCADDMULTI:
867 case SIOCDELMULTI:
868#if 0
869 KR_LOCK(sc);
870 kr_set_filter(sc);
871 KR_UNLOCK(sc);
872#endif
873 error = 0;
874 break;
875 case SIOCGIFMEDIA:
876 case SIOCSIFMEDIA:
877 mii = device_get_softc(sc->kr_miibus);
878 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
879 break;
880 case SIOCSIFCAP:
881 error = 0;
882#if 0
883 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
884 if ((mask & IFCAP_HWCSUM) != 0) {
885 ifp->if_capenable ^= IFCAP_HWCSUM;
886 if ((IFCAP_HWCSUM & ifp->if_capenable) &&
887 (IFCAP_HWCSUM & ifp->if_capabilities))
888 ifp->if_hwassist = KR_CSUM_FEATURES;
889 else
890 ifp->if_hwassist = 0;
891 }
892 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
893 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
894 if (IFCAP_VLAN_HWTAGGING & ifp->if_capenable &&
895 IFCAP_VLAN_HWTAGGING & ifp->if_capabilities &&
896 ifp->if_drv_flags & IFF_DRV_RUNNING) {
897 KR_LOCK(sc);
898 kr_vlan_setup(sc);
899 KR_UNLOCK(sc);
900 }
901 }
902 VLAN_CAPABILITIES(ifp);
903#endif
904 break;
905 default:
906 error = ether_ioctl(ifp, command, data);
907 break;
908 }
909
910 return (error);
911}
912
913/*
914 * Set media options.
915 */
916static int
917kr_ifmedia_upd(struct ifnet *ifp)
918{
919 struct kr_softc *sc;
920 struct mii_data *mii;
921 struct mii_softc *miisc;
922 int error;
923
924 sc = ifp->if_softc;
925 KR_LOCK(sc);
926 mii = device_get_softc(sc->kr_miibus);
927 if (mii->mii_instance) {
928 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
929 mii_phy_reset(miisc);
930 }
931 error = mii_mediachg(mii);
932 KR_UNLOCK(sc);
933
934 return (error);
935}
936
937/*
938 * Report current media status.
939 */
940static void
941kr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
942{
943 struct kr_softc *sc = ifp->if_softc;
944 struct mii_data *mii;
945
946 mii = device_get_softc(sc->kr_miibus);
947 KR_LOCK(sc);
948 mii_pollstat(mii);
949 KR_UNLOCK(sc);
950 ifmr->ifm_active = mii->mii_media_active;
951 ifmr->ifm_status = mii->mii_media_status;
952}
953
954struct kr_dmamap_arg {
955 bus_addr_t kr_busaddr;
956};
957
958static void
959kr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
960{
961 struct kr_dmamap_arg *ctx;
962
963 if (error != 0)
964 return;
965 ctx = arg;
966 ctx->kr_busaddr = segs[0].ds_addr;
967}
968
969static int
970kr_dma_alloc(struct kr_softc *sc)
971{
972 struct kr_dmamap_arg ctx;
973 struct kr_txdesc *txd;
974 struct kr_rxdesc *rxd;
975 int error, i;
976
977 /* Create parent DMA tag. */
978 error = bus_dma_tag_create(
979 bus_get_dma_tag(sc->kr_dev), /* parent */
980 1, 0, /* alignment, boundary */
981 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
982 BUS_SPACE_MAXADDR, /* highaddr */
983 NULL, NULL, /* filter, filterarg */
984 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
985 0, /* nsegments */
986 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
987 0, /* flags */
988 NULL, NULL, /* lockfunc, lockarg */
989 &sc->kr_cdata.kr_parent_tag);
990 if (error != 0) {
991 device_printf(sc->kr_dev, "failed to create parent DMA tag\n");
992 goto fail;
993 }
994 /* Create tag for Tx ring. */
995 error = bus_dma_tag_create(
996 sc->kr_cdata.kr_parent_tag, /* parent */
997 KR_RING_ALIGN, 0, /* alignment, boundary */
998 BUS_SPACE_MAXADDR, /* lowaddr */
999 BUS_SPACE_MAXADDR, /* highaddr */
1000 NULL, NULL, /* filter, filterarg */
1001 KR_TX_RING_SIZE, /* maxsize */
1002 1, /* nsegments */
1003 KR_TX_RING_SIZE, /* maxsegsize */
1004 0, /* flags */
1005 NULL, NULL, /* lockfunc, lockarg */
1006 &sc->kr_cdata.kr_tx_ring_tag);
1007 if (error != 0) {
1008 device_printf(sc->kr_dev, "failed to create Tx ring DMA tag\n");
1009 goto fail;
1010 }
1011
1012 /* Create tag for Rx ring. */
1013 error = bus_dma_tag_create(
1014 sc->kr_cdata.kr_parent_tag, /* parent */
1015 KR_RING_ALIGN, 0, /* alignment, boundary */
1016 BUS_SPACE_MAXADDR, /* lowaddr */
1017 BUS_SPACE_MAXADDR, /* highaddr */
1018 NULL, NULL, /* filter, filterarg */
1019 KR_RX_RING_SIZE, /* maxsize */
1020 1, /* nsegments */
1021 KR_RX_RING_SIZE, /* maxsegsize */
1022 0, /* flags */
1023 NULL, NULL, /* lockfunc, lockarg */
1024 &sc->kr_cdata.kr_rx_ring_tag);
1025 if (error != 0) {
1026 device_printf(sc->kr_dev, "failed to create Rx ring DMA tag\n");
1027 goto fail;
1028 }
1029
1030 /* Create tag for Tx buffers. */
1031 error = bus_dma_tag_create(
1032 sc->kr_cdata.kr_parent_tag, /* parent */
1033 sizeof(uint32_t), 0, /* alignment, boundary */
1034 BUS_SPACE_MAXADDR, /* lowaddr */
1035 BUS_SPACE_MAXADDR, /* highaddr */
1036 NULL, NULL, /* filter, filterarg */
1037 MCLBYTES * KR_MAXFRAGS, /* maxsize */
1038 KR_MAXFRAGS, /* nsegments */
1039 MCLBYTES, /* maxsegsize */
1040 0, /* flags */
1041 NULL, NULL, /* lockfunc, lockarg */
1042 &sc->kr_cdata.kr_tx_tag);
1043 if (error != 0) {
1044 device_printf(sc->kr_dev, "failed to create Tx DMA tag\n");
1045 goto fail;
1046 }
1047
1048 /* Create tag for Rx buffers. */
1049 error = bus_dma_tag_create(
1050 sc->kr_cdata.kr_parent_tag, /* parent */
1051 KR_RX_ALIGN, 0, /* alignment, boundary */
1052 BUS_SPACE_MAXADDR, /* lowaddr */
1053 BUS_SPACE_MAXADDR, /* highaddr */
1054 NULL, NULL, /* filter, filterarg */
1055 MCLBYTES, /* maxsize */
1056 1, /* nsegments */
1057 MCLBYTES, /* maxsegsize */
1058 0, /* flags */
1059 NULL, NULL, /* lockfunc, lockarg */
1060 &sc->kr_cdata.kr_rx_tag);
1061 if (error != 0) {
1062 device_printf(sc->kr_dev, "failed to create Rx DMA tag\n");
1063 goto fail;
1064 }
1065
1066 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1067 error = bus_dmamem_alloc(sc->kr_cdata.kr_tx_ring_tag,
1068 (void **)&sc->kr_rdata.kr_tx_ring, BUS_DMA_WAITOK |
1069 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_tx_ring_map);
1070 if (error != 0) {
1071 device_printf(sc->kr_dev,
1072 "failed to allocate DMA'able memory for Tx ring\n");
1073 goto fail;
1074 }
1075
1076 ctx.kr_busaddr = 0;
1077 error = bus_dmamap_load(sc->kr_cdata.kr_tx_ring_tag,
1078 sc->kr_cdata.kr_tx_ring_map, sc->kr_rdata.kr_tx_ring,
1079 KR_TX_RING_SIZE, kr_dmamap_cb, &ctx, 0);
1080 if (error != 0 || ctx.kr_busaddr == 0) {
1081 device_printf(sc->kr_dev,
1082 "failed to load DMA'able memory for Tx ring\n");
1083 goto fail;
1084 }
1085 sc->kr_rdata.kr_tx_ring_paddr = ctx.kr_busaddr;
1086
1087 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1088 error = bus_dmamem_alloc(sc->kr_cdata.kr_rx_ring_tag,
1089 (void **)&sc->kr_rdata.kr_rx_ring, BUS_DMA_WAITOK |
1090 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_rx_ring_map);
1091 if (error != 0) {
1092 device_printf(sc->kr_dev,
1093 "failed to allocate DMA'able memory for Rx ring\n");
1094 goto fail;
1095 }
1096
1097 ctx.kr_busaddr = 0;
1098 error = bus_dmamap_load(sc->kr_cdata.kr_rx_ring_tag,
1099 sc->kr_cdata.kr_rx_ring_map, sc->kr_rdata.kr_rx_ring,
1100 KR_RX_RING_SIZE, kr_dmamap_cb, &ctx, 0);
1101 if (error != 0 || ctx.kr_busaddr == 0) {
1102 device_printf(sc->kr_dev,
1103 "failed to load DMA'able memory for Rx ring\n");
1104 goto fail;
1105 }
1106 sc->kr_rdata.kr_rx_ring_paddr = ctx.kr_busaddr;
1107
1108 /* Create DMA maps for Tx buffers. */
1109 for (i = 0; i < KR_TX_RING_CNT; i++) {
1110 txd = &sc->kr_cdata.kr_txdesc[i];
1111 txd->tx_m = NULL;
1112 txd->tx_dmamap = NULL;
1113 error = bus_dmamap_create(sc->kr_cdata.kr_tx_tag, 0,
1114 &txd->tx_dmamap);
1115 if (error != 0) {
1116 device_printf(sc->kr_dev,
1117 "failed to create Tx dmamap\n");
1118 goto fail;
1119 }
1120 }
1121 /* Create DMA maps for Rx buffers. */
1122 if ((error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0,
1123 &sc->kr_cdata.kr_rx_sparemap)) != 0) {
1124 device_printf(sc->kr_dev,
1125 "failed to create spare Rx dmamap\n");
1126 goto fail;
1127 }
1128 for (i = 0; i < KR_RX_RING_CNT; i++) {
1129 rxd = &sc->kr_cdata.kr_rxdesc[i];
1130 rxd->rx_m = NULL;
1131 rxd->rx_dmamap = NULL;
1132 error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0,
1133 &rxd->rx_dmamap);
1134 if (error != 0) {
1135 device_printf(sc->kr_dev,
1136 "failed to create Rx dmamap\n");
1137 goto fail;
1138 }
1139 }
1140
1141fail:
1142 return (error);
1143}
1144
1145static void
1146kr_dma_free(struct kr_softc *sc)
1147{
1148 struct kr_txdesc *txd;
1149 struct kr_rxdesc *rxd;
1150 int i;
1151
1152 /* Tx ring. */
1153 if (sc->kr_cdata.kr_tx_ring_tag) {
1154 if (sc->kr_cdata.kr_tx_ring_map)
1155 bus_dmamap_unload(sc->kr_cdata.kr_tx_ring_tag,
1156 sc->kr_cdata.kr_tx_ring_map);
1157 if (sc->kr_cdata.kr_tx_ring_map &&
1158 sc->kr_rdata.kr_tx_ring)
1159 bus_dmamem_free(sc->kr_cdata.kr_tx_ring_tag,
1160 sc->kr_rdata.kr_tx_ring,
1161 sc->kr_cdata.kr_tx_ring_map);
1162 sc->kr_rdata.kr_tx_ring = NULL;
1163 sc->kr_cdata.kr_tx_ring_map = NULL;
1164 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_ring_tag);
1165 sc->kr_cdata.kr_tx_ring_tag = NULL;
1166 }
1167 /* Rx ring. */
1168 if (sc->kr_cdata.kr_rx_ring_tag) {
1169 if (sc->kr_cdata.kr_rx_ring_map)
1170 bus_dmamap_unload(sc->kr_cdata.kr_rx_ring_tag,
1171 sc->kr_cdata.kr_rx_ring_map);
1172 if (sc->kr_cdata.kr_rx_ring_map &&
1173 sc->kr_rdata.kr_rx_ring)
1174 bus_dmamem_free(sc->kr_cdata.kr_rx_ring_tag,
1175 sc->kr_rdata.kr_rx_ring,
1176 sc->kr_cdata.kr_rx_ring_map);
1177 sc->kr_rdata.kr_rx_ring = NULL;
1178 sc->kr_cdata.kr_rx_ring_map = NULL;
1179 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_ring_tag);
1180 sc->kr_cdata.kr_rx_ring_tag = NULL;
1181 }
1182 /* Tx buffers. */
1183 if (sc->kr_cdata.kr_tx_tag) {
1184 for (i = 0; i < KR_TX_RING_CNT; i++) {
1185 txd = &sc->kr_cdata.kr_txdesc[i];
1186 if (txd->tx_dmamap) {
1187 bus_dmamap_destroy(sc->kr_cdata.kr_tx_tag,
1188 txd->tx_dmamap);
1189 txd->tx_dmamap = NULL;
1190 }
1191 }
1192 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_tag);
1193 sc->kr_cdata.kr_tx_tag = NULL;
1194 }
1195 /* Rx buffers. */
1196 if (sc->kr_cdata.kr_rx_tag) {
1197 for (i = 0; i < KR_RX_RING_CNT; i++) {
1198 rxd = &sc->kr_cdata.kr_rxdesc[i];
1199 if (rxd->rx_dmamap) {
1200 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
1201 rxd->rx_dmamap);
1202 rxd->rx_dmamap = NULL;
1203 }
1204 }
1205 if (sc->kr_cdata.kr_rx_sparemap) {
1206 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
1207 sc->kr_cdata.kr_rx_sparemap);
1208 sc->kr_cdata.kr_rx_sparemap = 0;
1209 }
1210 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_tag);
1211 sc->kr_cdata.kr_rx_tag = NULL;
1212 }
1213
1214 if (sc->kr_cdata.kr_parent_tag) {
1215 bus_dma_tag_destroy(sc->kr_cdata.kr_parent_tag);
1216 sc->kr_cdata.kr_parent_tag = NULL;
1217 }
1218}
1219
1220/*
1221 * Initialize the transmit descriptors.
1222 */
1223static int
1224kr_tx_ring_init(struct kr_softc *sc)
1225{
1226 struct kr_ring_data *rd;
1227 struct kr_txdesc *txd;
1228 bus_addr_t addr;
1229 int i;
1230
1231 sc->kr_cdata.kr_tx_prod = 0;
1232 sc->kr_cdata.kr_tx_cons = 0;
1233 sc->kr_cdata.kr_tx_cnt = 0;
1234 sc->kr_cdata.kr_tx_pkts = 0;
1235
1236 rd = &sc->kr_rdata;
1237 bzero(rd->kr_tx_ring, KR_TX_RING_SIZE);
1238 for (i = 0; i < KR_TX_RING_CNT; i++) {
1239 if (i == KR_TX_RING_CNT - 1)
1240 addr = KR_TX_RING_ADDR(sc, 0);
1241 else
1242 addr = KR_TX_RING_ADDR(sc, i + 1);
1243 rd->kr_tx_ring[i].kr_ctl = KR_CTL_IOF;
1244 rd->kr_tx_ring[i].kr_ca = 0;
1245 rd->kr_tx_ring[i].kr_devcs = 0;
1246 rd->kr_tx_ring[i].kr_link = 0;
1247 txd = &sc->kr_cdata.kr_txdesc[i];
1248 txd->tx_m = NULL;
1249 }
1250
1251 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1252 sc->kr_cdata.kr_tx_ring_map,
1253 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1254
1255 return (0);
1256}
1257
1258/*
1259 * Initialize the RX descriptors and allocate mbufs for them. Note that
1260 * we arrange the descriptors in a closed ring, so that the last descriptor
1261 * points back to the first.
1262 */
1263static int
1264kr_rx_ring_init(struct kr_softc *sc)
1265{
1266 struct kr_ring_data *rd;
1267 struct kr_rxdesc *rxd;
1268 bus_addr_t addr;
1269 int i;
1270
1271 sc->kr_cdata.kr_rx_cons = 0;
1272
1273 rd = &sc->kr_rdata;
1274 bzero(rd->kr_rx_ring, KR_RX_RING_SIZE);
1275 for (i = 0; i < KR_RX_RING_CNT; i++) {
1276 rxd = &sc->kr_cdata.kr_rxdesc[i];
1277 rxd->rx_m = NULL;
1278 rxd->desc = &rd->kr_rx_ring[i];
1279 if (i == KR_RX_RING_CNT - 1)
1280 addr = KR_RX_RING_ADDR(sc, 0);
1281 else
1282 addr = KR_RX_RING_ADDR(sc, i + 1);
1283 rd->kr_rx_ring[i].kr_ctl = KR_CTL_IOD;
1284 if (i == KR_RX_RING_CNT - 1)
1285 rd->kr_rx_ring[i].kr_ctl |= KR_CTL_COD;
1286 rd->kr_rx_ring[i].kr_devcs = 0;
1287 rd->kr_rx_ring[i].kr_ca = 0;
1288 rd->kr_rx_ring[i].kr_link = addr;
1289 if (kr_newbuf(sc, i) != 0)
1290 return (ENOBUFS);
1291 }
1292
1293 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1294 sc->kr_cdata.kr_rx_ring_map,
1295 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1296
1297 return (0);
1298}
1299
1300/*
1301 * Initialize an RX descriptor and attach an MBUF cluster.
1302 */
1303static int
1304kr_newbuf(struct kr_softc *sc, int idx)
1305{
1306 struct kr_desc *desc;
1307 struct kr_rxdesc *rxd;
1308 struct mbuf *m;
1309 bus_dma_segment_t segs[1];
1310 bus_dmamap_t map;
1311 int nsegs;
1312
1313 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1314 if (m == NULL)
1315 return (ENOBUFS);
1316 m->m_len = m->m_pkthdr.len = MCLBYTES;
1317 m_adj(m, sizeof(uint64_t));
1318
1319 if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag,
1320 sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1321 m_freem(m);
1322 return (ENOBUFS);
1323 }
1324 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1325
1326 rxd = &sc->kr_cdata.kr_rxdesc[idx];
1327 if (rxd->rx_m != NULL) {
1328 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1329 BUS_DMASYNC_POSTREAD);
1330 bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap);
1331 }
1332 map = rxd->rx_dmamap;
1333 rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap;
1334 sc->kr_cdata.kr_rx_sparemap = map;
1335 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1336 BUS_DMASYNC_PREREAD);
1337 rxd->rx_m = m;
1338 desc = rxd->desc;
1339 desc->kr_ca = segs[0].ds_addr;
1340 desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len);
1341 rxd->saved_ca = desc->kr_ca ;
1342 rxd->saved_ctl = desc->kr_ctl ;
1343
1344 return (0);
1345}
1346
1347static __inline void
1348kr_fixup_rx(struct mbuf *m)
1349{
1350 int i;
1351 uint16_t *src, *dst;
1352
1353 src = mtod(m, uint16_t *);
1354 dst = src - 1;
1355
1356 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1357 *dst++ = *src++;
1358
1359 m->m_data -= ETHER_ALIGN;
1360}
1361
1362
1363static void
1364kr_tx(struct kr_softc *sc)
1365{
1366 struct kr_txdesc *txd;
1367 struct kr_desc *cur_tx;
1368 struct ifnet *ifp;
1369 uint32_t ctl, devcs;
1370 int cons, prod;
1371
1372 KR_LOCK_ASSERT(sc);
1373
1374 cons = sc->kr_cdata.kr_tx_cons;
1375 prod = sc->kr_cdata.kr_tx_prod;
1376 if (cons == prod)
1377 return;
1378
1379 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1380 sc->kr_cdata.kr_tx_ring_map,
1381 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1382
1383 ifp = sc->kr_ifp;
1384 /*
1385 * Go through our tx list and free mbufs for those
1386 * frames that have been transmitted.
1387 */
1388 for (; cons != prod; KR_INC(cons, KR_TX_RING_CNT)) {
1389 cur_tx = &sc->kr_rdata.kr_tx_ring[cons];
1390 ctl = cur_tx->kr_ctl;
1391 devcs = cur_tx->kr_devcs;
1392 /* Check if descriptor has "finished" flag */
1393 if ((ctl & KR_CTL_F) == 0)
1394 break;
1395
1396 sc->kr_cdata.kr_tx_cnt--;
1397 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1398
1399 txd = &sc->kr_cdata.kr_txdesc[cons];
1400
1401 if (devcs & KR_DMATX_DEVCS_TOK)
1402 ifp->if_opackets++;
1403 else {
1404 ifp->if_oerrors++;
1405 /* collisions: medium busy, late collision */
1406 if ((devcs & KR_DMATX_DEVCS_EC) ||
1407 (devcs & KR_DMATX_DEVCS_LC))
1408 ifp->if_collisions++;
1409 }
1410
1411 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
1412 BUS_DMASYNC_POSTWRITE);
1413 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
1414
1415 /* Free only if it's first descriptor in list */
1416 if (txd->tx_m)
1417 m_freem(txd->tx_m);
1418 txd->tx_m = NULL;
1419
1420 /* reset descriptor */
1421 cur_tx->kr_ctl = KR_CTL_IOF;
1422 cur_tx->kr_devcs = 0;
1423 cur_tx->kr_ca = 0;
1424 cur_tx->kr_link = 0;
1425 }
1426
1427 sc->kr_cdata.kr_tx_cons = cons;
1428
1429 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1430 sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_PREWRITE);
1431}
1432
1433
1434static void
1435kr_rx(struct kr_softc *sc)
1436{
1437 struct kr_rxdesc *rxd;
1438 struct ifnet *ifp = sc->kr_ifp;
1439 int cons, prog, packet_len, count, error;
1440 struct kr_desc *cur_rx;
1441 struct mbuf *m;
1442
1443 KR_LOCK_ASSERT(sc);
1444
1445 cons = sc->kr_cdata.kr_rx_cons;
1446
1447 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1448 sc->kr_cdata.kr_rx_ring_map,
1449 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1450
1451 for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) {
1452 cur_rx = &sc->kr_rdata.kr_rx_ring[cons];
1453 rxd = &sc->kr_cdata.kr_rxdesc[cons];
1454 m = rxd->rx_m;
1455
1456 if ((cur_rx->kr_ctl & KR_CTL_D) == 0)
1457 break;
1458
1459 prog++;
1460
1461 packet_len = KR_PKTSIZE(cur_rx->kr_devcs);
1462 count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl);
1463 /* Assume it's error */
1464 error = 1;
1465
1466 if (packet_len != count)
1467 ifp->if_ierrors++;
1468 else if (count < 64)
1469 ifp->if_ierrors++;
1470 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0)
1471 ifp->if_ierrors++;
1472 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) {
1473 error = 0;
1474 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1475 BUS_DMASYNC_PREREAD);
1476 m = rxd->rx_m;
1477 kr_fixup_rx(m);
1478 m->m_pkthdr.rcvif = ifp;
1479 /* Skip 4 bytes of CRC */
1480 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1481 ifp->if_ipackets++;
1482
1483 KR_UNLOCK(sc);
1484 (*ifp->if_input)(ifp, m);
1485 KR_LOCK(sc);
1486 }
1487
1488 if (error) {
1489 /* Restore CONTROL and CA values, reset DEVCS */
1490 cur_rx->kr_ctl = rxd->saved_ctl;
1491 cur_rx->kr_ca = rxd->saved_ca;
1492 cur_rx->kr_devcs = 0;
1493 }
1494 else {
1495 /* Reinit descriptor */
1496 cur_rx->kr_ctl = KR_CTL_IOD;
1497 if (cons == KR_RX_RING_CNT - 1)
1498 cur_rx->kr_ctl |= KR_CTL_COD;
1499 cur_rx->kr_devcs = 0;
1500 cur_rx->kr_ca = 0;
1501 if (kr_newbuf(sc, cons) != 0) {
1502 device_printf(sc->kr_dev,
1503 "Failed to allocate buffer\n");
1504 break;
1505 }
1506 }
1507
1508 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1509 sc->kr_cdata.kr_rx_ring_map,
1510 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1511
1512 }
1513
1514 if (prog > 0) {
1515 sc->kr_cdata.kr_rx_cons = cons;
1516
1517 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1518 sc->kr_cdata.kr_rx_ring_map,
1519 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1520 }
1521}
1522
1523static void
1524kr_rx_intr(void *arg)
1525{
1526 struct kr_softc *sc = arg;
1527 uint32_t status;
1528
1529 KR_LOCK(sc);
1530
1531 /* mask out interrupts */
1532 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM,
1533 DMA_SM_D | DMA_SM_H | DMA_SM_E);
1534
1535 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S);
1536 if (status & (DMA_S_D | DMA_S_E | DMA_S_H)) {
1537 kr_rx(sc);
1538
1539 if (status & DMA_S_E)
1540 device_printf(sc->kr_dev, "RX DMA error\n");
1541 }
1542
1543 /* Reread status */
1544 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S);
1545
1546 /* restart DMA RX if it has been halted */
1547 if (status & DMA_S_H) {
1548 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR,
1549 KR_RX_RING_ADDR(sc, sc->kr_cdata.kr_rx_cons));
1550 }
1551
1552 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, ~status);
1553
1554 /* Enable F, H, E interrupts */
1555 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM,
1556 DMA_SM_D | DMA_SM_H | DMA_SM_E);
1557
1558 KR_UNLOCK(sc);
1559}
1560
1561static void
1562kr_tx_intr(void *arg)
1563{
1564 struct kr_softc *sc = arg;
1565 uint32_t status;
1566
1567 KR_LOCK(sc);
1568
1569 /* mask out interrupts */
1570 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM,
1571 DMA_SM_F | DMA_SM_E);
1572
1573 status = KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S);
1574 if (status & (DMA_S_F | DMA_S_E)) {
1575 kr_tx(sc);
1576 if (status & DMA_S_E)
1577 device_printf(sc->kr_dev, "DMA error\n");
1578 }
1579
1580 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, ~status);
1581
1582 /* Enable F, E interrupts */
1583 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM,
1584 DMA_SM_F | DMA_SM_E);
1585
1586 KR_UNLOCK(sc);
1587
1588}
1589
1590static void
1591kr_rx_und_intr(void *arg)
1592{
1593
1594 panic("interrupt: %s\n", __func__);
1595}
1596
1597static void
1598kr_tx_ovr_intr(void *arg)
1599{
1600
1601 panic("interrupt: %s\n", __func__);
1602}
1603
1604static void
1605kr_tick(void *xsc)
1606{
1607 struct kr_softc *sc = xsc;
1608 struct mii_data *mii;
1609
1610 KR_LOCK_ASSERT(sc);
1611
1612 mii = device_get_softc(sc->kr_miibus);
1613 mii_tick(mii);
1614 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc);
1615}
73
74#define KR_DEBUG
75
76static int kr_attach(device_t);
77static int kr_detach(device_t);
78static int kr_ifmedia_upd(struct ifnet *);
79static void kr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
80static int kr_ioctl(struct ifnet *, u_long, caddr_t);
81static void kr_init(void *);
82static void kr_init_locked(struct kr_softc *);
83static void kr_link_task(void *, int);
84static int kr_miibus_readreg(device_t, int, int);
85static void kr_miibus_statchg(device_t);
86static int kr_miibus_writereg(device_t, int, int, int);
87static int kr_probe(device_t);
88static void kr_reset(struct kr_softc *);
89static int kr_resume(device_t);
90static int kr_rx_ring_init(struct kr_softc *);
91static int kr_tx_ring_init(struct kr_softc *);
92static void kr_shutdown(device_t);
93static void kr_start(struct ifnet *);
94static void kr_start_locked(struct ifnet *);
95static void kr_stop(struct kr_softc *);
96static int kr_suspend(device_t);
97
98static void kr_rx(struct kr_softc *);
99static void kr_tx(struct kr_softc *);
100static void kr_rx_intr(void *);
101static void kr_tx_intr(void *);
102static void kr_rx_und_intr(void *);
103static void kr_tx_ovr_intr(void *);
104static void kr_tick(void *);
105
106static void kr_dmamap_cb(void *, bus_dma_segment_t *, int, int);
107static int kr_dma_alloc(struct kr_softc *);
108static void kr_dma_free(struct kr_softc *);
109static int kr_newbuf(struct kr_softc *, int);
110static __inline void kr_fixup_rx(struct mbuf *);
111
112static device_method_t kr_methods[] = {
113 /* Device interface */
114 DEVMETHOD(device_probe, kr_probe),
115 DEVMETHOD(device_attach, kr_attach),
116 DEVMETHOD(device_detach, kr_detach),
117 DEVMETHOD(device_suspend, kr_suspend),
118 DEVMETHOD(device_resume, kr_resume),
119 DEVMETHOD(device_shutdown, kr_shutdown),
120
121 /* bus interface */
122 DEVMETHOD(bus_print_child, bus_generic_print_child),
123 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
124
125 /* MII interface */
126 DEVMETHOD(miibus_readreg, kr_miibus_readreg),
127 DEVMETHOD(miibus_writereg, kr_miibus_writereg),
128 DEVMETHOD(miibus_statchg, kr_miibus_statchg),
129
130 { 0, 0 }
131};
132
133static driver_t kr_driver = {
134 "kr",
135 kr_methods,
136 sizeof(struct kr_softc)
137};
138
139static devclass_t kr_devclass;
140
141DRIVER_MODULE(kr, obio, kr_driver, kr_devclass, 0, 0);
142DRIVER_MODULE(kr, cardbus, kr_driver, kr_devclass, 0, 0);
143DRIVER_MODULE(miibus, kr, miibus_driver, miibus_devclass, 0, 0);
144
145static int
146kr_probe(device_t dev)
147{
148
149 device_set_desc(dev, "RC32434 Ethernet interface");
150 return (0);
151}
152
153static int
154kr_attach(device_t dev)
155{
156 uint8_t eaddr[ETHER_ADDR_LEN];
157 struct ifnet *ifp;
158 struct kr_softc *sc;
159 int error = 0, rid;
160 int unit;
161
162 sc = device_get_softc(dev);
163 unit = device_get_unit(dev);
164 sc->kr_dev = dev;
165
166 mtx_init(&sc->kr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
167 MTX_DEF);
168 callout_init_mtx(&sc->kr_stat_callout, &sc->kr_mtx, 0);
169 TASK_INIT(&sc->kr_link_task, 0, kr_link_task, sc);
170 pci_enable_busmaster(dev);
171
172 /* Map control/status registers. */
173 sc->kr_rid = 0;
174 sc->kr_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->kr_rid,
175 RF_ACTIVE);
176
177 if (sc->kr_res == NULL) {
178 device_printf(dev, "couldn't map memory\n");
179 error = ENXIO;
180 goto fail;
181 }
182
183 sc->kr_btag = rman_get_bustag(sc->kr_res);
184 sc->kr_bhandle = rman_get_bushandle(sc->kr_res);
185
186 /* Allocate interrupts */
187 rid = 0;
188 sc->kr_rx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_RX_IRQ,
189 KR_RX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
190
191 if (sc->kr_rx_irq == NULL) {
192 device_printf(dev, "couldn't map rx interrupt\n");
193 error = ENXIO;
194 goto fail;
195 }
196
197 rid = 0;
198 sc->kr_tx_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, KR_TX_IRQ,
199 KR_TX_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
200
201 if (sc->kr_tx_irq == NULL) {
202 device_printf(dev, "couldn't map tx interrupt\n");
203 error = ENXIO;
204 goto fail;
205 }
206
207 rid = 0;
208 sc->kr_rx_und_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
209 KR_RX_UND_IRQ, KR_RX_UND_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
210
211 if (sc->kr_rx_und_irq == NULL) {
212 device_printf(dev, "couldn't map rx underrun interrupt\n");
213 error = ENXIO;
214 goto fail;
215 }
216
217 rid = 0;
218 sc->kr_tx_ovr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
219 KR_TX_OVR_IRQ, KR_TX_OVR_IRQ, 1, RF_SHAREABLE | RF_ACTIVE);
220
221 if (sc->kr_tx_ovr_irq == NULL) {
222 device_printf(dev, "couldn't map tx overrun interrupt\n");
223 error = ENXIO;
224 goto fail;
225 }
226
227 /* Allocate ifnet structure. */
228 ifp = sc->kr_ifp = if_alloc(IFT_ETHER);
229
230 if (ifp == NULL) {
231 device_printf(dev, "couldn't allocate ifnet structure\n");
232 error = ENOSPC;
233 goto fail;
234 }
235 ifp->if_softc = sc;
236 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
237 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
238 ifp->if_ioctl = kr_ioctl;
239 ifp->if_start = kr_start;
240 ifp->if_init = kr_init;
241
242 /* XXX: add real size */
243 IFQ_SET_MAXLEN(&ifp->if_snd, 9);
244 ifp->if_snd.ifq_maxlen = 9;
245 IFQ_SET_READY(&ifp->if_snd);
246
247 ifp->if_capenable = ifp->if_capabilities;
248
249 eaddr[0] = 0x00;
250 eaddr[1] = 0x0C;
251 eaddr[2] = 0x42;
252 eaddr[3] = 0x09;
253 eaddr[4] = 0x5E;
254 eaddr[5] = 0x6B;
255
256 if (kr_dma_alloc(sc) != 0) {
257 error = ENXIO;
258 goto fail;
259 }
260
261 /* TODO: calculate prescale */
262 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);
263
264 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
265 DELAY(1000);
266 CSR_WRITE_4(sc, KR_MIIMCFG, 0);
267
268 /* Do MII setup. */
269 if (mii_phy_probe(dev, &sc->kr_miibus,
270 kr_ifmedia_upd, kr_ifmedia_sts)) {
271 device_printf(dev, "MII without any phy!\n");
272 error = ENXIO;
273 goto fail;
274 }
275
276 /* Call MI attach routine. */
277 ether_ifattach(ifp, eaddr);
278
279 /* Hook interrupt last to avoid having to lock softc */
280 error = bus_setup_intr(dev, sc->kr_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
281 NULL, kr_rx_intr, sc, &sc->kr_rx_intrhand);
282
283 if (error) {
284 device_printf(dev, "couldn't set up rx irq\n");
285 ether_ifdetach(ifp);
286 goto fail;
287 }
288
289 error = bus_setup_intr(dev, sc->kr_tx_irq, INTR_TYPE_NET | INTR_MPSAFE,
290 NULL, kr_tx_intr, sc, &sc->kr_tx_intrhand);
291
292 if (error) {
293 device_printf(dev, "couldn't set up tx irq\n");
294 ether_ifdetach(ifp);
295 goto fail;
296 }
297
298 error = bus_setup_intr(dev, sc->kr_rx_und_irq,
299 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_rx_und_intr, sc,
300 &sc->kr_rx_und_intrhand);
301
302 if (error) {
303 device_printf(dev, "couldn't set up rx underrun irq\n");
304 ether_ifdetach(ifp);
305 goto fail;
306 }
307
308 error = bus_setup_intr(dev, sc->kr_tx_ovr_irq,
309 INTR_TYPE_NET | INTR_MPSAFE, NULL, kr_tx_ovr_intr, sc,
310 &sc->kr_tx_ovr_intrhand);
311
312 if (error) {
313 device_printf(dev, "couldn't set up tx overrun irq\n");
314 ether_ifdetach(ifp);
315 goto fail;
316 }
317
318fail:
319 if (error)
320 kr_detach(dev);
321
322 return (error);
323}
324
325static int
326kr_detach(device_t dev)
327{
328 struct kr_softc *sc = device_get_softc(dev);
329 struct ifnet *ifp = sc->kr_ifp;
330
331 KASSERT(mtx_initialized(&sc->kr_mtx), ("vr mutex not initialized"));
332
333 /* These should only be active if attach succeeded */
334 if (device_is_attached(dev)) {
335 KR_LOCK(sc);
336 sc->kr_detach = 1;
337 kr_stop(sc);
338 KR_UNLOCK(sc);
339 taskqueue_drain(taskqueue_swi, &sc->kr_link_task);
340 ether_ifdetach(ifp);
341 }
342 if (sc->kr_miibus)
343 device_delete_child(dev, sc->kr_miibus);
344 bus_generic_detach(dev);
345
346 if (sc->kr_rx_intrhand)
347 bus_teardown_intr(dev, sc->kr_rx_irq, sc->kr_rx_intrhand);
348 if (sc->kr_rx_irq)
349 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_irq);
350 if (sc->kr_tx_intrhand)
351 bus_teardown_intr(dev, sc->kr_tx_irq, sc->kr_tx_intrhand);
352 if (sc->kr_tx_irq)
353 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_irq);
354 if (sc->kr_rx_und_intrhand)
355 bus_teardown_intr(dev, sc->kr_rx_und_irq,
356 sc->kr_rx_und_intrhand);
357 if (sc->kr_rx_und_irq)
358 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_und_irq);
359 if (sc->kr_tx_ovr_intrhand)
360 bus_teardown_intr(dev, sc->kr_tx_ovr_irq,
361 sc->kr_tx_ovr_intrhand);
362 if (sc->kr_tx_ovr_irq)
363 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_ovr_irq);
364
365 if (sc->kr_res)
366 bus_release_resource(dev, SYS_RES_MEMORY, sc->kr_rid,
367 sc->kr_res);
368
369 if (ifp)
370 if_free(ifp);
371
372 kr_dma_free(sc);
373
374 mtx_destroy(&sc->kr_mtx);
375
376 return (0);
377
378}
379
380static int
381kr_suspend(device_t dev)
382{
383
384 panic("%s", __func__);
385 return 0;
386}
387
388static int
389kr_resume(device_t dev)
390{
391
392 panic("%s", __func__);
393 return 0;
394}
395
396static void
397kr_shutdown(device_t dev)
398{
399 struct kr_softc *sc;
400
401 sc = device_get_softc(dev);
402
403 KR_LOCK(sc);
404 kr_stop(sc);
405 KR_UNLOCK(sc);
406}
407
408static int
409kr_miibus_readreg(device_t dev, int phy, int reg)
410{
411 struct kr_softc * sc = device_get_softc(dev);
412 int i, result;
413
414 i = KR_MII_TIMEOUT;
415 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
416 i--;
417
418 if (i == 0)
419 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
420
421 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg);
422
423 i = KR_MII_TIMEOUT;
424 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
425 i--;
426
427 if (i == 0)
428 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
429
430 CSR_WRITE_4(sc, KR_MIIMCMD, KR_MIIMCMD_RD);
431
432 i = KR_MII_TIMEOUT;
433 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
434 i--;
435
436 if (i == 0)
437 device_printf(dev, "phy mii read is timed out %d:%d\n", phy,
438 reg);
439
440 if (CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_NV)
441 printf("phy mii readreg failed %d:%d: data not valid\n",
442 phy, reg);
443
444 result = CSR_READ_4(sc , KR_MIIMRDD);
445 CSR_WRITE_4(sc, KR_MIIMCMD, 0);
446
447 return (result);
448}
449
450static int
451kr_miibus_writereg(device_t dev, int phy, int reg, int data)
452{
453 struct kr_softc * sc = device_get_softc(dev);
454 int i;
455
456 i = KR_MII_TIMEOUT;
457 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
458 i--;
459
460 if (i == 0)
461 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
462
463 CSR_WRITE_4(sc, KR_MIIMADDR, (phy << 8) | reg);
464
465 i = KR_MII_TIMEOUT;
466 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
467 i--;
468
469 if (i == 0)
470 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
471
472 CSR_WRITE_4(sc, KR_MIIMWTD, data);
473
474 i = KR_MII_TIMEOUT;
475 while ((CSR_READ_4(sc, KR_MIIMIND) & KR_MIIMIND_BSY) && i)
476 i--;
477
478 if (i == 0)
479 device_printf(dev, "phy mii is busy %d:%d\n", phy, reg);
480
481 return (0);
482}
483
484static void
485kr_miibus_statchg(device_t dev)
486{
487 struct kr_softc *sc;
488
489 sc = device_get_softc(dev);
490 taskqueue_enqueue(taskqueue_swi, &sc->kr_link_task);
491}
492
493static void
494kr_link_task(void *arg, int pending)
495{
496 struct kr_softc *sc;
497 struct mii_data *mii;
498 struct ifnet *ifp;
499 /* int lfdx, mfdx; */
500
501 sc = (struct kr_softc *)arg;
502
503 KR_LOCK(sc);
504 mii = device_get_softc(sc->kr_miibus);
505 ifp = sc->kr_ifp;
506 if (mii == NULL || ifp == NULL ||
507 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
508 KR_UNLOCK(sc);
509 return;
510 }
511
512 if (mii->mii_media_status & IFM_ACTIVE) {
513 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
514 sc->kr_link_status = 1;
515 } else
516 sc->kr_link_status = 0;
517
518 KR_UNLOCK(sc);
519}
520
521static void
522kr_reset(struct kr_softc *sc)
523{
524 int i;
525
526 CSR_WRITE_4(sc, KR_ETHINTFC, 0);
527
528 for (i = 0; i < KR_TIMEOUT; i++) {
529 DELAY(10);
530 if (!(CSR_READ_4(sc, KR_ETHINTFC) & ETH_INTFC_RIP))
531 break;
532 }
533
534 if (i == KR_TIMEOUT)
535 device_printf(sc->kr_dev, "reset time out\n");
536}
537
538static void
539kr_init(void *xsc)
540{
541 struct kr_softc *sc = xsc;
542
543 KR_LOCK(sc);
544 kr_init_locked(sc);
545 KR_UNLOCK(sc);
546}
547
548static void
549kr_init_locked(struct kr_softc *sc)
550{
551 struct ifnet *ifp = sc->kr_ifp;
552 struct mii_data *mii;
553
554 KR_LOCK_ASSERT(sc);
555
556 mii = device_get_softc(sc->kr_miibus);
557
558 kr_stop(sc);
559 kr_reset(sc);
560
561 CSR_WRITE_4(sc, KR_ETHINTFC, ETH_INTFC_EN);
562
563 /* Init circular RX list. */
564 if (kr_rx_ring_init(sc) != 0) {
565 device_printf(sc->kr_dev,
566 "initialization failed: no memory for rx buffers\n");
567 kr_stop(sc);
568 return;
569 }
570
571 /* Init tx descriptors. */
572 kr_tx_ring_init(sc);
573
574 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0);
575 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0);
576 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR,
577 sc->kr_rdata.kr_rx_ring_paddr);
578
579
580 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM,
581 DMA_SM_H | DMA_SM_E | DMA_SM_D) ;
582
583 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0);
584 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0);
585 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0);
586 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM,
587 DMA_SM_F | DMA_SM_E);
588
589
590 /* Accept only packets destined for THIS Ethernet device address */
591 CSR_WRITE_4(sc, KR_ETHARC, 1);
592
593 /*
594 * Set all Ethernet address registers to the same initial values
595 * set all four addresses to 66-88-aa-cc-dd-ee
596 */
597 CSR_WRITE_4(sc, KR_ETHSAL0, 0x42095E6B);
598 CSR_WRITE_4(sc, KR_ETHSAH0, 0x0000000C);
599
600 CSR_WRITE_4(sc, KR_ETHSAL1, 0x42095E6B);
601 CSR_WRITE_4(sc, KR_ETHSAH1, 0x0000000C);
602
603 CSR_WRITE_4(sc, KR_ETHSAL2, 0x42095E6B);
604 CSR_WRITE_4(sc, KR_ETHSAH2, 0x0000000C);
605
606 CSR_WRITE_4(sc, KR_ETHSAL3, 0x42095E6B);
607 CSR_WRITE_4(sc, KR_ETHSAH3, 0x0000000C);
608
609 CSR_WRITE_4(sc, KR_ETHMAC2,
610 KR_ETH_MAC2_PEN | KR_ETH_MAC2_CEN | KR_ETH_MAC2_FD);
611
612 CSR_WRITE_4(sc, KR_ETHIPGT, KR_ETHIPGT_FULL_DUPLEX);
613 CSR_WRITE_4(sc, KR_ETHIPGR, 0x12); /* minimum value */
614
615 CSR_WRITE_4(sc, KR_MIIMCFG, KR_MIIMCFG_R);
616 DELAY(1000);
617 CSR_WRITE_4(sc, KR_MIIMCFG, 0);
618
619 /* TODO: calculate prescale */
620 CSR_WRITE_4(sc, KR_ETHMCP, (165000000 / (1250000 + 1)) & ~1);
621
622 /* FIFO Tx threshold level */
623 CSR_WRITE_4(sc, KR_ETHFIFOTT, 0x30);
624
625 CSR_WRITE_4(sc, KR_ETHMAC1, KR_ETH_MAC1_RE);
626
627 sc->kr_link_status = 0;
628 mii_mediachg(mii);
629
630 ifp->if_drv_flags |= IFF_DRV_RUNNING;
631 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
632
633 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc);
634}
635
636static void
637kr_start(struct ifnet *ifp)
638{
639 struct kr_softc *sc;
640
641 sc = ifp->if_softc;
642
643 KR_LOCK(sc);
644 kr_start_locked(ifp);
645 KR_UNLOCK(sc);
646}
647
648/*
649 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
650 * pointers to the fragment pointers.
651 */
652static int
653kr_encap(struct kr_softc *sc, struct mbuf **m_head)
654{
655 struct kr_txdesc *txd;
656 struct kr_desc *desc, *prev_desc;
657 bus_dma_segment_t txsegs[KR_MAXFRAGS];
658 uint32_t link_addr;
659 int error, i, nsegs, prod, si, prev_prod;
660
661 KR_LOCK_ASSERT(sc);
662
663 prod = sc->kr_cdata.kr_tx_prod;
664 txd = &sc->kr_cdata.kr_txdesc[prod];
665 error = bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
666 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
667 if (error == EFBIG) {
668 panic("EFBIG");
669 } else if (error != 0)
670 return (error);
671 if (nsegs == 0) {
672 m_freem(*m_head);
673 *m_head = NULL;
674 return (EIO);
675 }
676
677 /* Check number of available descriptors. */
678 if (sc->kr_cdata.kr_tx_cnt + nsegs >= (KR_TX_RING_CNT - 1)) {
679 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
680 return (ENOBUFS);
681 }
682
683 txd->tx_m = *m_head;
684 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
685 BUS_DMASYNC_PREWRITE);
686
687 si = prod;
688
689 /*
690 * Make a list of descriptors for this packet. DMA controller will
691 * walk through it while kr_link is not zero. The last one should
692 * have COF flag set, to pickup next chain from NDPTR
693 */
694 prev_prod = prod;
695 desc = prev_desc = NULL;
696 for (i = 0; i < nsegs; i++) {
697 desc = &sc->kr_rdata.kr_tx_ring[prod];
698 desc->kr_ctl = KR_DMASIZE(txsegs[i].ds_len) | KR_CTL_IOF;
699 if (i == 0)
700 desc->kr_devcs = KR_DMATX_DEVCS_FD;
701 desc->kr_ca = txsegs[i].ds_addr;
702 desc->kr_link = 0;
703 /* link with previous descriptor */
704 if (prev_desc)
705 prev_desc->kr_link = KR_TX_RING_ADDR(sc, prod);
706
707 sc->kr_cdata.kr_tx_cnt++;
708 prev_desc = desc;
709 KR_INC(prod, KR_TX_RING_CNT);
710 }
711
712 /*
713 * Set COF for last descriptor and mark last fragment with LD flag
714 */
715 if (desc) {
716 desc->kr_ctl |= KR_CTL_COF;
717 desc->kr_devcs |= KR_DMATX_DEVCS_LD;
718 }
719
720 /* Update producer index. */
721 sc->kr_cdata.kr_tx_prod = prod;
722
723 /* Sync descriptors. */
724 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
725 sc->kr_cdata.kr_tx_ring_map,
726 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
727
728 /* Start transmitting */
729 /* Check if new list is queued in NDPTR */
730 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_NDPTR) == 0) {
731 /* NDPTR is not busy - start new list */
732 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR,
733 KR_TX_RING_ADDR(sc, si));
734 }
735 else {
736 link_addr = KR_TX_RING_ADDR(sc, si);
737 /* Get previous descriptor */
738 si = (si + KR_TX_RING_CNT - 1) % KR_TX_RING_CNT;
739 desc = &sc->kr_rdata.kr_tx_ring[si];
740 desc->kr_link = link_addr;
741 }
742
743 return (0);
744}
745
746static void
747kr_start_locked(struct ifnet *ifp)
748{
749 struct kr_softc *sc;
750 struct mbuf *m_head;
751 int enq;
752
753 sc = ifp->if_softc;
754
755 KR_LOCK_ASSERT(sc);
756
757 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
758 IFF_DRV_RUNNING || sc->kr_link_status == 0 )
759 return;
760
761 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
762 sc->kr_cdata.kr_tx_cnt < KR_TX_RING_CNT - 2; ) {
763 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
764 if (m_head == NULL)
765 break;
766 /*
767 * Pack the data into the transmit ring. If we
768 * don't have room, set the OACTIVE flag and wait
769 * for the NIC to drain the ring.
770 */
771 if (kr_encap(sc, &m_head)) {
772 if (m_head == NULL)
773 break;
774 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
775 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
776 break;
777 }
778
779 enq++;
780 /*
781 * If there's a BPF listener, bounce a copy of this frame
782 * to him.
783 */
784 ETHER_BPF_MTAP(ifp, m_head);
785 }
786}
787
788static void
789kr_stop(struct kr_softc *sc)
790{
791 struct ifnet *ifp;
792
793 KR_LOCK_ASSERT(sc);
794
795
796 ifp = sc->kr_ifp;
797 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
798 callout_stop(&sc->kr_stat_callout);
799
800 /* mask out RX interrupts */
801 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM,
802 DMA_SM_D | DMA_SM_H | DMA_SM_E);
803
804 /* mask out TX interrupts */
805 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM,
806 DMA_SM_F | DMA_SM_E);
807
808 /* Abort RX DMA transactions */
809 if (KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_C) & DMA_C_R) {
810 /* Set ABORT bit if trunsuction is in progress */
811 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_C, DMA_C_ABORT);
812 /* XXX: Add timeout */
813 while ((KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S) & DMA_S_H) == 0)
814 DELAY(10);
815 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, 0);
816 }
817 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR, 0);
818 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_NDPTR, 0);
819
820 /* Abort TX DMA transactions */
821 if (KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_C) & DMA_C_R) {
822 /* Set ABORT bit if trunsuction is in progress */
823 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_C, DMA_C_ABORT);
824 /* XXX: Add timeout */
825 while ((KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S) & DMA_S_H) == 0)
826 DELAY(10);
827 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, 0);
828 }
829 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_DPTR, 0);
830 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_NDPTR, 0);
831
832 CSR_WRITE_4(sc, KR_ETHINTFC, 0);
833}
834
835
836static int
837kr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
838{
839 struct kr_softc *sc = ifp->if_softc;
840 struct ifreq *ifr = (struct ifreq *) data;
841 struct mii_data *mii;
842 int error;
843
844 switch (command) {
845 case SIOCSIFFLAGS:
846#if 0
847 KR_LOCK(sc);
848 if (ifp->if_flags & IFF_UP) {
849 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
850 if ((ifp->if_flags ^ sc->kr_if_flags) &
851 (IFF_PROMISC | IFF_ALLMULTI))
852 kr_set_filter(sc);
853 } else {
854 if (sc->kr_detach == 0)
855 kr_init_locked(sc);
856 }
857 } else {
858 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
859 kr_stop(sc);
860 }
861 sc->kr_if_flags = ifp->if_flags;
862 KR_UNLOCK(sc);
863#endif
864 error = 0;
865 break;
866 case SIOCADDMULTI:
867 case SIOCDELMULTI:
868#if 0
869 KR_LOCK(sc);
870 kr_set_filter(sc);
871 KR_UNLOCK(sc);
872#endif
873 error = 0;
874 break;
875 case SIOCGIFMEDIA:
876 case SIOCSIFMEDIA:
877 mii = device_get_softc(sc->kr_miibus);
878 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
879 break;
880 case SIOCSIFCAP:
881 error = 0;
882#if 0
883 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
884 if ((mask & IFCAP_HWCSUM) != 0) {
885 ifp->if_capenable ^= IFCAP_HWCSUM;
886 if ((IFCAP_HWCSUM & ifp->if_capenable) &&
887 (IFCAP_HWCSUM & ifp->if_capabilities))
888 ifp->if_hwassist = KR_CSUM_FEATURES;
889 else
890 ifp->if_hwassist = 0;
891 }
892 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
893 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
894 if (IFCAP_VLAN_HWTAGGING & ifp->if_capenable &&
895 IFCAP_VLAN_HWTAGGING & ifp->if_capabilities &&
896 ifp->if_drv_flags & IFF_DRV_RUNNING) {
897 KR_LOCK(sc);
898 kr_vlan_setup(sc);
899 KR_UNLOCK(sc);
900 }
901 }
902 VLAN_CAPABILITIES(ifp);
903#endif
904 break;
905 default:
906 error = ether_ioctl(ifp, command, data);
907 break;
908 }
909
910 return (error);
911}
912
913/*
914 * Set media options.
915 */
916static int
917kr_ifmedia_upd(struct ifnet *ifp)
918{
919 struct kr_softc *sc;
920 struct mii_data *mii;
921 struct mii_softc *miisc;
922 int error;
923
924 sc = ifp->if_softc;
925 KR_LOCK(sc);
926 mii = device_get_softc(sc->kr_miibus);
927 if (mii->mii_instance) {
928 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
929 mii_phy_reset(miisc);
930 }
931 error = mii_mediachg(mii);
932 KR_UNLOCK(sc);
933
934 return (error);
935}
936
937/*
938 * Report current media status.
939 */
940static void
941kr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
942{
943 struct kr_softc *sc = ifp->if_softc;
944 struct mii_data *mii;
945
946 mii = device_get_softc(sc->kr_miibus);
947 KR_LOCK(sc);
948 mii_pollstat(mii);
949 KR_UNLOCK(sc);
950 ifmr->ifm_active = mii->mii_media_active;
951 ifmr->ifm_status = mii->mii_media_status;
952}
953
954struct kr_dmamap_arg {
955 bus_addr_t kr_busaddr;
956};
957
958static void
959kr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
960{
961 struct kr_dmamap_arg *ctx;
962
963 if (error != 0)
964 return;
965 ctx = arg;
966 ctx->kr_busaddr = segs[0].ds_addr;
967}
968
969static int
970kr_dma_alloc(struct kr_softc *sc)
971{
972 struct kr_dmamap_arg ctx;
973 struct kr_txdesc *txd;
974 struct kr_rxdesc *rxd;
975 int error, i;
976
977 /* Create parent DMA tag. */
978 error = bus_dma_tag_create(
979 bus_get_dma_tag(sc->kr_dev), /* parent */
980 1, 0, /* alignment, boundary */
981 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
982 BUS_SPACE_MAXADDR, /* highaddr */
983 NULL, NULL, /* filter, filterarg */
984 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
985 0, /* nsegments */
986 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
987 0, /* flags */
988 NULL, NULL, /* lockfunc, lockarg */
989 &sc->kr_cdata.kr_parent_tag);
990 if (error != 0) {
991 device_printf(sc->kr_dev, "failed to create parent DMA tag\n");
992 goto fail;
993 }
994 /* Create tag for Tx ring. */
995 error = bus_dma_tag_create(
996 sc->kr_cdata.kr_parent_tag, /* parent */
997 KR_RING_ALIGN, 0, /* alignment, boundary */
998 BUS_SPACE_MAXADDR, /* lowaddr */
999 BUS_SPACE_MAXADDR, /* highaddr */
1000 NULL, NULL, /* filter, filterarg */
1001 KR_TX_RING_SIZE, /* maxsize */
1002 1, /* nsegments */
1003 KR_TX_RING_SIZE, /* maxsegsize */
1004 0, /* flags */
1005 NULL, NULL, /* lockfunc, lockarg */
1006 &sc->kr_cdata.kr_tx_ring_tag);
1007 if (error != 0) {
1008 device_printf(sc->kr_dev, "failed to create Tx ring DMA tag\n");
1009 goto fail;
1010 }
1011
1012 /* Create tag for Rx ring. */
1013 error = bus_dma_tag_create(
1014 sc->kr_cdata.kr_parent_tag, /* parent */
1015 KR_RING_ALIGN, 0, /* alignment, boundary */
1016 BUS_SPACE_MAXADDR, /* lowaddr */
1017 BUS_SPACE_MAXADDR, /* highaddr */
1018 NULL, NULL, /* filter, filterarg */
1019 KR_RX_RING_SIZE, /* maxsize */
1020 1, /* nsegments */
1021 KR_RX_RING_SIZE, /* maxsegsize */
1022 0, /* flags */
1023 NULL, NULL, /* lockfunc, lockarg */
1024 &sc->kr_cdata.kr_rx_ring_tag);
1025 if (error != 0) {
1026 device_printf(sc->kr_dev, "failed to create Rx ring DMA tag\n");
1027 goto fail;
1028 }
1029
1030 /* Create tag for Tx buffers. */
1031 error = bus_dma_tag_create(
1032 sc->kr_cdata.kr_parent_tag, /* parent */
1033 sizeof(uint32_t), 0, /* alignment, boundary */
1034 BUS_SPACE_MAXADDR, /* lowaddr */
1035 BUS_SPACE_MAXADDR, /* highaddr */
1036 NULL, NULL, /* filter, filterarg */
1037 MCLBYTES * KR_MAXFRAGS, /* maxsize */
1038 KR_MAXFRAGS, /* nsegments */
1039 MCLBYTES, /* maxsegsize */
1040 0, /* flags */
1041 NULL, NULL, /* lockfunc, lockarg */
1042 &sc->kr_cdata.kr_tx_tag);
1043 if (error != 0) {
1044 device_printf(sc->kr_dev, "failed to create Tx DMA tag\n");
1045 goto fail;
1046 }
1047
1048 /* Create tag for Rx buffers. */
1049 error = bus_dma_tag_create(
1050 sc->kr_cdata.kr_parent_tag, /* parent */
1051 KR_RX_ALIGN, 0, /* alignment, boundary */
1052 BUS_SPACE_MAXADDR, /* lowaddr */
1053 BUS_SPACE_MAXADDR, /* highaddr */
1054 NULL, NULL, /* filter, filterarg */
1055 MCLBYTES, /* maxsize */
1056 1, /* nsegments */
1057 MCLBYTES, /* maxsegsize */
1058 0, /* flags */
1059 NULL, NULL, /* lockfunc, lockarg */
1060 &sc->kr_cdata.kr_rx_tag);
1061 if (error != 0) {
1062 device_printf(sc->kr_dev, "failed to create Rx DMA tag\n");
1063 goto fail;
1064 }
1065
1066 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1067 error = bus_dmamem_alloc(sc->kr_cdata.kr_tx_ring_tag,
1068 (void **)&sc->kr_rdata.kr_tx_ring, BUS_DMA_WAITOK |
1069 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_tx_ring_map);
1070 if (error != 0) {
1071 device_printf(sc->kr_dev,
1072 "failed to allocate DMA'able memory for Tx ring\n");
1073 goto fail;
1074 }
1075
1076 ctx.kr_busaddr = 0;
1077 error = bus_dmamap_load(sc->kr_cdata.kr_tx_ring_tag,
1078 sc->kr_cdata.kr_tx_ring_map, sc->kr_rdata.kr_tx_ring,
1079 KR_TX_RING_SIZE, kr_dmamap_cb, &ctx, 0);
1080 if (error != 0 || ctx.kr_busaddr == 0) {
1081 device_printf(sc->kr_dev,
1082 "failed to load DMA'able memory for Tx ring\n");
1083 goto fail;
1084 }
1085 sc->kr_rdata.kr_tx_ring_paddr = ctx.kr_busaddr;
1086
1087 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1088 error = bus_dmamem_alloc(sc->kr_cdata.kr_rx_ring_tag,
1089 (void **)&sc->kr_rdata.kr_rx_ring, BUS_DMA_WAITOK |
1090 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->kr_cdata.kr_rx_ring_map);
1091 if (error != 0) {
1092 device_printf(sc->kr_dev,
1093 "failed to allocate DMA'able memory for Rx ring\n");
1094 goto fail;
1095 }
1096
1097 ctx.kr_busaddr = 0;
1098 error = bus_dmamap_load(sc->kr_cdata.kr_rx_ring_tag,
1099 sc->kr_cdata.kr_rx_ring_map, sc->kr_rdata.kr_rx_ring,
1100 KR_RX_RING_SIZE, kr_dmamap_cb, &ctx, 0);
1101 if (error != 0 || ctx.kr_busaddr == 0) {
1102 device_printf(sc->kr_dev,
1103 "failed to load DMA'able memory for Rx ring\n");
1104 goto fail;
1105 }
1106 sc->kr_rdata.kr_rx_ring_paddr = ctx.kr_busaddr;
1107
1108 /* Create DMA maps for Tx buffers. */
1109 for (i = 0; i < KR_TX_RING_CNT; i++) {
1110 txd = &sc->kr_cdata.kr_txdesc[i];
1111 txd->tx_m = NULL;
1112 txd->tx_dmamap = NULL;
1113 error = bus_dmamap_create(sc->kr_cdata.kr_tx_tag, 0,
1114 &txd->tx_dmamap);
1115 if (error != 0) {
1116 device_printf(sc->kr_dev,
1117 "failed to create Tx dmamap\n");
1118 goto fail;
1119 }
1120 }
1121 /* Create DMA maps for Rx buffers. */
1122 if ((error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0,
1123 &sc->kr_cdata.kr_rx_sparemap)) != 0) {
1124 device_printf(sc->kr_dev,
1125 "failed to create spare Rx dmamap\n");
1126 goto fail;
1127 }
1128 for (i = 0; i < KR_RX_RING_CNT; i++) {
1129 rxd = &sc->kr_cdata.kr_rxdesc[i];
1130 rxd->rx_m = NULL;
1131 rxd->rx_dmamap = NULL;
1132 error = bus_dmamap_create(sc->kr_cdata.kr_rx_tag, 0,
1133 &rxd->rx_dmamap);
1134 if (error != 0) {
1135 device_printf(sc->kr_dev,
1136 "failed to create Rx dmamap\n");
1137 goto fail;
1138 }
1139 }
1140
1141fail:
1142 return (error);
1143}
1144
1145static void
1146kr_dma_free(struct kr_softc *sc)
1147{
1148 struct kr_txdesc *txd;
1149 struct kr_rxdesc *rxd;
1150 int i;
1151
1152 /* Tx ring. */
1153 if (sc->kr_cdata.kr_tx_ring_tag) {
1154 if (sc->kr_cdata.kr_tx_ring_map)
1155 bus_dmamap_unload(sc->kr_cdata.kr_tx_ring_tag,
1156 sc->kr_cdata.kr_tx_ring_map);
1157 if (sc->kr_cdata.kr_tx_ring_map &&
1158 sc->kr_rdata.kr_tx_ring)
1159 bus_dmamem_free(sc->kr_cdata.kr_tx_ring_tag,
1160 sc->kr_rdata.kr_tx_ring,
1161 sc->kr_cdata.kr_tx_ring_map);
1162 sc->kr_rdata.kr_tx_ring = NULL;
1163 sc->kr_cdata.kr_tx_ring_map = NULL;
1164 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_ring_tag);
1165 sc->kr_cdata.kr_tx_ring_tag = NULL;
1166 }
1167 /* Rx ring. */
1168 if (sc->kr_cdata.kr_rx_ring_tag) {
1169 if (sc->kr_cdata.kr_rx_ring_map)
1170 bus_dmamap_unload(sc->kr_cdata.kr_rx_ring_tag,
1171 sc->kr_cdata.kr_rx_ring_map);
1172 if (sc->kr_cdata.kr_rx_ring_map &&
1173 sc->kr_rdata.kr_rx_ring)
1174 bus_dmamem_free(sc->kr_cdata.kr_rx_ring_tag,
1175 sc->kr_rdata.kr_rx_ring,
1176 sc->kr_cdata.kr_rx_ring_map);
1177 sc->kr_rdata.kr_rx_ring = NULL;
1178 sc->kr_cdata.kr_rx_ring_map = NULL;
1179 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_ring_tag);
1180 sc->kr_cdata.kr_rx_ring_tag = NULL;
1181 }
1182 /* Tx buffers. */
1183 if (sc->kr_cdata.kr_tx_tag) {
1184 for (i = 0; i < KR_TX_RING_CNT; i++) {
1185 txd = &sc->kr_cdata.kr_txdesc[i];
1186 if (txd->tx_dmamap) {
1187 bus_dmamap_destroy(sc->kr_cdata.kr_tx_tag,
1188 txd->tx_dmamap);
1189 txd->tx_dmamap = NULL;
1190 }
1191 }
1192 bus_dma_tag_destroy(sc->kr_cdata.kr_tx_tag);
1193 sc->kr_cdata.kr_tx_tag = NULL;
1194 }
1195 /* Rx buffers. */
1196 if (sc->kr_cdata.kr_rx_tag) {
1197 for (i = 0; i < KR_RX_RING_CNT; i++) {
1198 rxd = &sc->kr_cdata.kr_rxdesc[i];
1199 if (rxd->rx_dmamap) {
1200 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
1201 rxd->rx_dmamap);
1202 rxd->rx_dmamap = NULL;
1203 }
1204 }
1205 if (sc->kr_cdata.kr_rx_sparemap) {
1206 bus_dmamap_destroy(sc->kr_cdata.kr_rx_tag,
1207 sc->kr_cdata.kr_rx_sparemap);
1208 sc->kr_cdata.kr_rx_sparemap = 0;
1209 }
1210 bus_dma_tag_destroy(sc->kr_cdata.kr_rx_tag);
1211 sc->kr_cdata.kr_rx_tag = NULL;
1212 }
1213
1214 if (sc->kr_cdata.kr_parent_tag) {
1215 bus_dma_tag_destroy(sc->kr_cdata.kr_parent_tag);
1216 sc->kr_cdata.kr_parent_tag = NULL;
1217 }
1218}
1219
1220/*
1221 * Initialize the transmit descriptors.
1222 */
1223static int
1224kr_tx_ring_init(struct kr_softc *sc)
1225{
1226 struct kr_ring_data *rd;
1227 struct kr_txdesc *txd;
1228 bus_addr_t addr;
1229 int i;
1230
1231 sc->kr_cdata.kr_tx_prod = 0;
1232 sc->kr_cdata.kr_tx_cons = 0;
1233 sc->kr_cdata.kr_tx_cnt = 0;
1234 sc->kr_cdata.kr_tx_pkts = 0;
1235
1236 rd = &sc->kr_rdata;
1237 bzero(rd->kr_tx_ring, KR_TX_RING_SIZE);
1238 for (i = 0; i < KR_TX_RING_CNT; i++) {
1239 if (i == KR_TX_RING_CNT - 1)
1240 addr = KR_TX_RING_ADDR(sc, 0);
1241 else
1242 addr = KR_TX_RING_ADDR(sc, i + 1);
1243 rd->kr_tx_ring[i].kr_ctl = KR_CTL_IOF;
1244 rd->kr_tx_ring[i].kr_ca = 0;
1245 rd->kr_tx_ring[i].kr_devcs = 0;
1246 rd->kr_tx_ring[i].kr_link = 0;
1247 txd = &sc->kr_cdata.kr_txdesc[i];
1248 txd->tx_m = NULL;
1249 }
1250
1251 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1252 sc->kr_cdata.kr_tx_ring_map,
1253 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1254
1255 return (0);
1256}
1257
1258/*
1259 * Initialize the RX descriptors and allocate mbufs for them. Note that
1260 * we arrange the descriptors in a closed ring, so that the last descriptor
1261 * points back to the first.
1262 */
1263static int
1264kr_rx_ring_init(struct kr_softc *sc)
1265{
1266 struct kr_ring_data *rd;
1267 struct kr_rxdesc *rxd;
1268 bus_addr_t addr;
1269 int i;
1270
1271 sc->kr_cdata.kr_rx_cons = 0;
1272
1273 rd = &sc->kr_rdata;
1274 bzero(rd->kr_rx_ring, KR_RX_RING_SIZE);
1275 for (i = 0; i < KR_RX_RING_CNT; i++) {
1276 rxd = &sc->kr_cdata.kr_rxdesc[i];
1277 rxd->rx_m = NULL;
1278 rxd->desc = &rd->kr_rx_ring[i];
1279 if (i == KR_RX_RING_CNT - 1)
1280 addr = KR_RX_RING_ADDR(sc, 0);
1281 else
1282 addr = KR_RX_RING_ADDR(sc, i + 1);
1283 rd->kr_rx_ring[i].kr_ctl = KR_CTL_IOD;
1284 if (i == KR_RX_RING_CNT - 1)
1285 rd->kr_rx_ring[i].kr_ctl |= KR_CTL_COD;
1286 rd->kr_rx_ring[i].kr_devcs = 0;
1287 rd->kr_rx_ring[i].kr_ca = 0;
1288 rd->kr_rx_ring[i].kr_link = addr;
1289 if (kr_newbuf(sc, i) != 0)
1290 return (ENOBUFS);
1291 }
1292
1293 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1294 sc->kr_cdata.kr_rx_ring_map,
1295 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1296
1297 return (0);
1298}
1299
1300/*
1301 * Initialize an RX descriptor and attach an MBUF cluster.
1302 */
1303static int
1304kr_newbuf(struct kr_softc *sc, int idx)
1305{
1306 struct kr_desc *desc;
1307 struct kr_rxdesc *rxd;
1308 struct mbuf *m;
1309 bus_dma_segment_t segs[1];
1310 bus_dmamap_t map;
1311 int nsegs;
1312
1313 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1314 if (m == NULL)
1315 return (ENOBUFS);
1316 m->m_len = m->m_pkthdr.len = MCLBYTES;
1317 m_adj(m, sizeof(uint64_t));
1318
1319 if (bus_dmamap_load_mbuf_sg(sc->kr_cdata.kr_rx_tag,
1320 sc->kr_cdata.kr_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1321 m_freem(m);
1322 return (ENOBUFS);
1323 }
1324 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1325
1326 rxd = &sc->kr_cdata.kr_rxdesc[idx];
1327 if (rxd->rx_m != NULL) {
1328 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1329 BUS_DMASYNC_POSTREAD);
1330 bus_dmamap_unload(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap);
1331 }
1332 map = rxd->rx_dmamap;
1333 rxd->rx_dmamap = sc->kr_cdata.kr_rx_sparemap;
1334 sc->kr_cdata.kr_rx_sparemap = map;
1335 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1336 BUS_DMASYNC_PREREAD);
1337 rxd->rx_m = m;
1338 desc = rxd->desc;
1339 desc->kr_ca = segs[0].ds_addr;
1340 desc->kr_ctl |= KR_DMASIZE(segs[0].ds_len);
1341 rxd->saved_ca = desc->kr_ca ;
1342 rxd->saved_ctl = desc->kr_ctl ;
1343
1344 return (0);
1345}
1346
1347static __inline void
1348kr_fixup_rx(struct mbuf *m)
1349{
1350 int i;
1351 uint16_t *src, *dst;
1352
1353 src = mtod(m, uint16_t *);
1354 dst = src - 1;
1355
1356 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1357 *dst++ = *src++;
1358
1359 m->m_data -= ETHER_ALIGN;
1360}
1361
1362
1363static void
1364kr_tx(struct kr_softc *sc)
1365{
1366 struct kr_txdesc *txd;
1367 struct kr_desc *cur_tx;
1368 struct ifnet *ifp;
1369 uint32_t ctl, devcs;
1370 int cons, prod;
1371
1372 KR_LOCK_ASSERT(sc);
1373
1374 cons = sc->kr_cdata.kr_tx_cons;
1375 prod = sc->kr_cdata.kr_tx_prod;
1376 if (cons == prod)
1377 return;
1378
1379 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1380 sc->kr_cdata.kr_tx_ring_map,
1381 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1382
1383 ifp = sc->kr_ifp;
1384 /*
1385 * Go through our tx list and free mbufs for those
1386 * frames that have been transmitted.
1387 */
1388 for (; cons != prod; KR_INC(cons, KR_TX_RING_CNT)) {
1389 cur_tx = &sc->kr_rdata.kr_tx_ring[cons];
1390 ctl = cur_tx->kr_ctl;
1391 devcs = cur_tx->kr_devcs;
1392 /* Check if descriptor has "finished" flag */
1393 if ((ctl & KR_CTL_F) == 0)
1394 break;
1395
1396 sc->kr_cdata.kr_tx_cnt--;
1397 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1398
1399 txd = &sc->kr_cdata.kr_txdesc[cons];
1400
1401 if (devcs & KR_DMATX_DEVCS_TOK)
1402 ifp->if_opackets++;
1403 else {
1404 ifp->if_oerrors++;
1405 /* collisions: medium busy, late collision */
1406 if ((devcs & KR_DMATX_DEVCS_EC) ||
1407 (devcs & KR_DMATX_DEVCS_LC))
1408 ifp->if_collisions++;
1409 }
1410
1411 bus_dmamap_sync(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap,
1412 BUS_DMASYNC_POSTWRITE);
1413 bus_dmamap_unload(sc->kr_cdata.kr_tx_tag, txd->tx_dmamap);
1414
1415 /* Free only if it's first descriptor in list */
1416 if (txd->tx_m)
1417 m_freem(txd->tx_m);
1418 txd->tx_m = NULL;
1419
1420 /* reset descriptor */
1421 cur_tx->kr_ctl = KR_CTL_IOF;
1422 cur_tx->kr_devcs = 0;
1423 cur_tx->kr_ca = 0;
1424 cur_tx->kr_link = 0;
1425 }
1426
1427 sc->kr_cdata.kr_tx_cons = cons;
1428
1429 bus_dmamap_sync(sc->kr_cdata.kr_tx_ring_tag,
1430 sc->kr_cdata.kr_tx_ring_map, BUS_DMASYNC_PREWRITE);
1431}
1432
1433
1434static void
1435kr_rx(struct kr_softc *sc)
1436{
1437 struct kr_rxdesc *rxd;
1438 struct ifnet *ifp = sc->kr_ifp;
1439 int cons, prog, packet_len, count, error;
1440 struct kr_desc *cur_rx;
1441 struct mbuf *m;
1442
1443 KR_LOCK_ASSERT(sc);
1444
1445 cons = sc->kr_cdata.kr_rx_cons;
1446
1447 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1448 sc->kr_cdata.kr_rx_ring_map,
1449 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1450
1451 for (prog = 0; prog < KR_RX_RING_CNT; KR_INC(cons, KR_RX_RING_CNT)) {
1452 cur_rx = &sc->kr_rdata.kr_rx_ring[cons];
1453 rxd = &sc->kr_cdata.kr_rxdesc[cons];
1454 m = rxd->rx_m;
1455
1456 if ((cur_rx->kr_ctl & KR_CTL_D) == 0)
1457 break;
1458
1459 prog++;
1460
1461 packet_len = KR_PKTSIZE(cur_rx->kr_devcs);
1462 count = m->m_len - KR_DMASIZE(cur_rx->kr_ctl);
1463 /* Assume it's error */
1464 error = 1;
1465
1466 if (packet_len != count)
1467 ifp->if_ierrors++;
1468 else if (count < 64)
1469 ifp->if_ierrors++;
1470 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_LD) == 0)
1471 ifp->if_ierrors++;
1472 else if ((cur_rx->kr_devcs & KR_DMARX_DEVCS_ROK) != 0) {
1473 error = 0;
1474 bus_dmamap_sync(sc->kr_cdata.kr_rx_tag, rxd->rx_dmamap,
1475 BUS_DMASYNC_PREREAD);
1476 m = rxd->rx_m;
1477 kr_fixup_rx(m);
1478 m->m_pkthdr.rcvif = ifp;
1479 /* Skip 4 bytes of CRC */
1480 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1481 ifp->if_ipackets++;
1482
1483 KR_UNLOCK(sc);
1484 (*ifp->if_input)(ifp, m);
1485 KR_LOCK(sc);
1486 }
1487
1488 if (error) {
1489 /* Restore CONTROL and CA values, reset DEVCS */
1490 cur_rx->kr_ctl = rxd->saved_ctl;
1491 cur_rx->kr_ca = rxd->saved_ca;
1492 cur_rx->kr_devcs = 0;
1493 }
1494 else {
1495 /* Reinit descriptor */
1496 cur_rx->kr_ctl = KR_CTL_IOD;
1497 if (cons == KR_RX_RING_CNT - 1)
1498 cur_rx->kr_ctl |= KR_CTL_COD;
1499 cur_rx->kr_devcs = 0;
1500 cur_rx->kr_ca = 0;
1501 if (kr_newbuf(sc, cons) != 0) {
1502 device_printf(sc->kr_dev,
1503 "Failed to allocate buffer\n");
1504 break;
1505 }
1506 }
1507
1508 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1509 sc->kr_cdata.kr_rx_ring_map,
1510 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1511
1512 }
1513
1514 if (prog > 0) {
1515 sc->kr_cdata.kr_rx_cons = cons;
1516
1517 bus_dmamap_sync(sc->kr_cdata.kr_rx_ring_tag,
1518 sc->kr_cdata.kr_rx_ring_map,
1519 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1520 }
1521}
1522
1523static void
1524kr_rx_intr(void *arg)
1525{
1526 struct kr_softc *sc = arg;
1527 uint32_t status;
1528
1529 KR_LOCK(sc);
1530
1531 /* mask out interrupts */
1532 KR_DMA_SETBITS_REG(KR_DMA_RXCHAN, DMA_SM,
1533 DMA_SM_D | DMA_SM_H | DMA_SM_E);
1534
1535 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S);
1536 if (status & (DMA_S_D | DMA_S_E | DMA_S_H)) {
1537 kr_rx(sc);
1538
1539 if (status & DMA_S_E)
1540 device_printf(sc->kr_dev, "RX DMA error\n");
1541 }
1542
1543 /* Reread status */
1544 status = KR_DMA_READ_REG(KR_DMA_RXCHAN, DMA_S);
1545
1546 /* restart DMA RX if it has been halted */
1547 if (status & DMA_S_H) {
1548 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_DPTR,
1549 KR_RX_RING_ADDR(sc, sc->kr_cdata.kr_rx_cons));
1550 }
1551
1552 KR_DMA_WRITE_REG(KR_DMA_RXCHAN, DMA_S, ~status);
1553
1554 /* Enable F, H, E interrupts */
1555 KR_DMA_CLEARBITS_REG(KR_DMA_RXCHAN, DMA_SM,
1556 DMA_SM_D | DMA_SM_H | DMA_SM_E);
1557
1558 KR_UNLOCK(sc);
1559}
1560
1561static void
1562kr_tx_intr(void *arg)
1563{
1564 struct kr_softc *sc = arg;
1565 uint32_t status;
1566
1567 KR_LOCK(sc);
1568
1569 /* mask out interrupts */
1570 KR_DMA_SETBITS_REG(KR_DMA_TXCHAN, DMA_SM,
1571 DMA_SM_F | DMA_SM_E);
1572
1573 status = KR_DMA_READ_REG(KR_DMA_TXCHAN, DMA_S);
1574 if (status & (DMA_S_F | DMA_S_E)) {
1575 kr_tx(sc);
1576 if (status & DMA_S_E)
1577 device_printf(sc->kr_dev, "DMA error\n");
1578 }
1579
1580 KR_DMA_WRITE_REG(KR_DMA_TXCHAN, DMA_S, ~status);
1581
1582 /* Enable F, E interrupts */
1583 KR_DMA_CLEARBITS_REG(KR_DMA_TXCHAN, DMA_SM,
1584 DMA_SM_F | DMA_SM_E);
1585
1586 KR_UNLOCK(sc);
1587
1588}
1589
1590static void
1591kr_rx_und_intr(void *arg)
1592{
1593
1594 panic("interrupt: %s\n", __func__);
1595}
1596
1597static void
1598kr_tx_ovr_intr(void *arg)
1599{
1600
1601 panic("interrupt: %s\n", __func__);
1602}
1603
1604static void
1605kr_tick(void *xsc)
1606{
1607 struct kr_softc *sc = xsc;
1608 struct mii_data *mii;
1609
1610 KR_LOCK_ASSERT(sc);
1611
1612 mii = device_get_softc(sc->kr_miibus);
1613 mii_tick(mii);
1614 callout_reset(&sc->kr_stat_callout, hz, kr_tick, sc);
1615}