Deleted Added
full compact
if_hme.c (106937) if_hme.c (108834)
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
1/*-
2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001 Thomas Moestl .
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:

--- 19 unchanged lines hidden (view full) ---

31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
38 *
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Paul Kranenburg.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:

--- 19 unchanged lines hidden (view full) ---

31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 *
37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
38 *
39 * $FreeBSD: head/sys/dev/hme/if_hme.c 106937 2002-11-14 23:54:55Z sam $
39 * $FreeBSD: head/sys/dev/hme/if_hme.c 108834 2003-01-06 22:12:57Z tmm $
40 */
41
42/*
43 * HME Ethernet module driver.
44 *
45 * The HME is e.g. part of the PCIO PCI multi function device.
46 * It supports TX gathering and TX and RX checksum offloading.
47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2

--- 52 unchanged lines hidden (view full) ---

100 u_int32_t, u_int32_t);
101static void hme_mifinit(struct hme_softc *);
102static void hme_reset(struct hme_softc *);
103static void hme_setladrf(struct hme_softc *, int);
104
105static int hme_mediachange(struct ifnet *);
106static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
107
40 */
41
42/*
43 * HME Ethernet module driver.
44 *
45 * The HME is e.g. part of the PCIO PCI multi function device.
46 * It supports TX gathering and TX and RX checksum offloading.
47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2

--- 52 unchanged lines hidden (view full) ---

100 u_int32_t, u_int32_t);
101static void hme_mifinit(struct hme_softc *);
102static void hme_reset(struct hme_softc *);
103static void hme_setladrf(struct hme_softc *, int);
104
105static int hme_mediachange(struct ifnet *);
106static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
107
108static int hme_load_mbuf(struct hme_softc *, struct mbuf *);
108static int hme_load_txmbuf(struct hme_softc *, struct mbuf *);
109static void hme_read(struct hme_softc *, int, int);
110static void hme_eint(struct hme_softc *, u_int);
111static void hme_rint(struct hme_softc *);
112static void hme_tint(struct hme_softc *);
113
114static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
109static void hme_read(struct hme_softc *, int, int);
110static void hme_eint(struct hme_softc *, u_int);
111static void hme_rint(struct hme_softc *);
112static void hme_tint(struct hme_softc *);
113
114static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
115static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, int);
116static void hme_txdma_callback(void *, bus_dma_segment_t *, int, int);
115static void hme_rxdma_callback(void *, bus_dma_segment_t *, int,
116 bus_size_t, int);
117static void hme_txdma_callback(void *, bus_dma_segment_t *, int,
118 bus_size_t, int);
117
118devclass_t hme_devclass;
119
120static int hme_nerr;
121
122DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
123MODULE_DEPEND(hem, miibus, 1, 1, 1);
124

--- 122 unchanged lines hidden (view full) ---

247 if (error != 0)
248 goto fail_rxdesc;
249 }
250 error = bus_dmamap_create(sc->sc_rdmatag, 0,
251 &sc->sc_rb.rb_spare_dmamap);
252 if (error != 0)
253 goto fail_rxdesc;
254 /* Same for the TX descs. */
119
120devclass_t hme_devclass;
121
122static int hme_nerr;
123
124DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
125MODULE_DEPEND(hem, miibus, 1, 1, 1);
126

--- 122 unchanged lines hidden (view full) ---

249 if (error != 0)
250 goto fail_rxdesc;
251 }
252 error = bus_dmamap_create(sc->sc_rdmatag, 0,
253 &sc->sc_rb.rb_spare_dmamap);
254 if (error != 0)
255 goto fail_rxdesc;
256 /* Same for the TX descs. */
255 for (tdesc = 0; tdesc < HME_NTXDESC; tdesc++) {
257 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
256 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
258 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
257 sc->sc_rb.rb_txdesc[tdesc].htx_flags = 0;
258 error = bus_dmamap_create(sc->sc_tdmatag, 0,
259 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
260 if (error != 0)
261 goto fail_txdesc;
262 }
263
264 device_printf(sc->sc_dev, "Ethernet address:");
265 for (i = 0; i < 6; i++)

--- 6 unchanged lines hidden (view full) ---

272 ifp->if_name = "hme";
273 ifp->if_mtu = ETHERMTU;
274 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
275 ifp->if_start = hme_start;
276 ifp->if_ioctl = hme_ioctl;
277 ifp->if_init = hme_init;
278 ifp->if_output = ether_output;
279 ifp->if_watchdog = hme_watchdog;
259 error = bus_dmamap_create(sc->sc_tdmatag, 0,
260 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
261 if (error != 0)
262 goto fail_txdesc;
263 }
264
265 device_printf(sc->sc_dev, "Ethernet address:");
266 for (i = 0; i < 6; i++)

--- 6 unchanged lines hidden (view full) ---

273 ifp->if_name = "hme";
274 ifp->if_mtu = ETHERMTU;
275 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
276 ifp->if_start = hme_start;
277 ifp->if_ioctl = hme_ioctl;
278 ifp->if_init = hme_init;
279 ifp->if_output = ether_output;
280 ifp->if_watchdog = hme_watchdog;
280 ifp->if_snd.ifq_maxlen = HME_NTXDESC;
281 ifp->if_snd.ifq_maxlen = HME_NTXQ;
281
282 hme_mifinit(sc);
283
284 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
285 hme_mediastatus)) != 0) {
286 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
287 goto fail_rxdesc;
288 }

--- 107 unchanged lines hidden (view full) ---

396 return;
397 DELAY(20);
398 }
399
400 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
401}
402
403static void
282
283 hme_mifinit(sc);
284
285 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
286 hme_mediastatus)) != 0) {
287 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
288 goto fail_rxdesc;
289 }

--- 107 unchanged lines hidden (view full) ---

397 return;
398 DELAY(20);
399 }
400
401 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
402}
403
404static void
404hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
405hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
406 bus_size_t totsize, int error)
405{
406 bus_addr_t *a = xsc;
407
407{
408 bus_addr_t *a = xsc;
409
408 /* XXX: A cluster should not contain more than one segment, correct? */
409 if (error != 0 || nsegs != 1)
410 KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
411 if (error != 0)
410 return;
411 *a = segs[0].ds_addr;
412}
413
414/*
415 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
416 * ring for subsequent use.
417 */
418static void
419hme_discard_rxbuf(struct hme_softc *sc, int ix, int sync)
420{
421
422 /*
423 * Dropped a packet, reinitialize the descriptor and turn the
424 * ownership back to the hardware.
425 */
426 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
412 return;
413 *a = segs[0].ds_addr;
414}
415
416/*
417 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
418 * ring for subsequent use.
419 */
420static void
421hme_discard_rxbuf(struct hme_softc *sc, int ix, int sync)
422{
423
424 /*
425 * Dropped a packet, reinitialize the descriptor and turn the
426 * ownership back to the hardware.
427 */
428 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
427 HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ,
428 sc->sc_rb.rb_rxdesc[ix].hrx_len)));
429 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
429 if (sync) {
430 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
431 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
432 }
433}
434
435static int
436hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
437{
438 struct hme_rxdesc *rd;
439 struct mbuf *m;
440 bus_addr_t ba;
430 if (sync) {
431 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
432 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
433 }
434}
435
436static int
437hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
438{
439 struct hme_rxdesc *rd;
440 struct mbuf *m;
441 bus_addr_t ba;
441 bus_size_t len, offs;
442 bus_dmamap_t map;
442 bus_dmamap_t map;
443 uintptr_t b;
443 int a, unmap;
444 int a, unmap;
444 char *b;
445
446 rd = &sc->sc_rb.rb_rxdesc[ri];
447 unmap = rd->hrx_m != NULL;
448 if (unmap && keepold) {
449 /*
450 * Reinitialize the descriptor flags, as they may have been
451 * altered by the hardware.
452 */
453 hme_discard_rxbuf(sc, ri, 0);
454 return (0);
455 }
445
446 rd = &sc->sc_rb.rb_rxdesc[ri];
447 unmap = rd->hrx_m != NULL;
448 if (unmap && keepold) {
449 /*
450 * Reinitialize the descriptor flags, as they may have been
451 * altered by the hardware.
452 */
453 hme_discard_rxbuf(sc, ri, 0);
454 return (0);
455 }
456 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
456 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
457 return (ENOBUFS);
457 return (ENOBUFS);
458 m_clget(m, M_DONTWAIT);
459 if ((m->m_flags & M_EXT) == 0)
460 goto fail_mcl;
461 len = m->m_ext.ext_size;
462 b = mtod(m, char *);
458 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
459 b = mtod(m, uintptr_t);
463 /*
464 * Required alignment boundary. At least 16 is needed, but since
465 * the mapping must be done in a way that a burst can start on a
466 * natural boundary we might need to extend this.
467 */
460 /*
461 * Required alignment boundary. At least 16 is needed, but since
462 * the mapping must be done in a way that a burst can start on a
463 * natural boundary we might need to extend this.
464 */
468 a = max(0x10, sc->sc_burst);
465 a = max(HME_MINRXALIGN, sc->sc_burst);
469 /*
466 /*
470 * Make sure the buffer suitably aligned: we need an offset of
471 * 2 modulo a. XXX: this ensures at least 16 byte alignment of the
472 * header adjacent to the ethernet header, which should be sufficient
473 * in all cases. Nevertheless, this second-guesses ALIGN().
467 * Make sure the buffer suitably aligned. The 2 byte offset is removed
468 * when the mbuf is handed up. XXX: this ensures at least 16 byte
469 * alignment of the header adjacent to the ethernet header, which
470 * should be sufficient in all cases. Nevertheless, this second-guesses
471 * ALIGN().
474 */
472 */
475 offs = (a - (((uintptr_t)b - 2) & (a - 1))) % a;
476 len -= offs;
477 /* Align the buffer on the boundary for mapping. */
478 b += offs - 2;
479 ba = 0;
480 if (bus_dmamap_load(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
481 b, len + 2, hme_rxdma_callback, &ba, 0) != 0 || ba == 0)
482 goto fail_mcl;
473 m_adj(m, roundup2(b, a) - b);
474 if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
475 m, hme_rxdma_callback, &ba, 0) != 0) {
476 m_freem(m);
477 return (ENOBUFS);
478 }
483 if (unmap) {
484 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
485 BUS_DMASYNC_POSTREAD);
486 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
487 }
488 map = rd->hrx_dmamap;
489 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
490 sc->sc_rb.rb_spare_dmamap = map;
479 if (unmap) {
480 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
481 BUS_DMASYNC_POSTREAD);
482 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
483 }
484 map = rd->hrx_dmamap;
485 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
486 sc->sc_rb.rb_spare_dmamap = map;
491 rd->hrx_offs = offs;
492 rd->hrx_len = len - sc->sc_burst;
493 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
494 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
487 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
488 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
495 /* Lazily leave at least one burst size grace space. */
496 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
497 HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ, rd->hrx_len)));
498 rd->hrx_m = m;
489 rd->hrx_m = m;
490 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
491 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
499 return (0);
492 return (0);
500
501fail_mcl:
502 m_freem(m);
503 return (ENOBUFS);
504}
505
506static int
507hme_meminit(struct hme_softc *sc)
508{
509 struct hme_ring *hr = &sc->sc_rb;
510 struct hme_txdesc *td;
511 bus_addr_t dma;

--- 25 unchanged lines hidden (view full) ---

537 /* Again move forward to the next 2048 byte boundary.*/
538 dma = (bus_addr_t)roundup((u_long)dma, 2048);
539 p = (caddr_t)roundup((u_long)p, 2048);
540
541 /*
542 * Initialize transmit buffer descriptors
543 */
544 for (i = 0; i < HME_NTXDESC; i++) {
493}
494
495static int
496hme_meminit(struct hme_softc *sc)
497{
498 struct hme_ring *hr = &sc->sc_rb;
499 struct hme_txdesc *td;
500 bus_addr_t dma;

--- 25 unchanged lines hidden (view full) ---

526 /* Again move forward to the next 2048 byte boundary.*/
527 dma = (bus_addr_t)roundup((u_long)dma, 2048);
528 p = (caddr_t)roundup((u_long)p, 2048);
529
530 /*
531 * Initialize transmit buffer descriptors
532 */
533 for (i = 0; i < HME_NTXDESC; i++) {
545 td = &sc->sc_rb.rb_txdesc[i];
546 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
547 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
534 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
535 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
536 }
537
538 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
539 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
540 for (i = 0; i < HME_NTXQ; i++) {
541 td = &sc->sc_rb.rb_txdesc[i];
548 if (td->htx_m != NULL) {
549 m_freem(td->htx_m);
542 if (td->htx_m != NULL) {
543 m_freem(td->htx_m);
544 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
550 td->htx_m = NULL;
551 }
545 td->htx_m = NULL;
546 }
552 if ((td->htx_flags & HTXF_MAPPED) != 0)
553 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
554 td->htx_flags = 0;
547 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
555 }
556
557 /*
558 * Initialize receive buffer descriptors
559 */
560 for (i = 0; i < HME_NRXDESC; i++) {
561 error = hme_add_rxbuf(sc, i, 1);
562 if (error != 0)

--- 62 unchanged lines hidden (view full) ---

625 */
626
627 /* step 1 & 2. Reset the Ethernet Channel */
628 hme_stop(sc);
629
630 /* Re-initialize the MIF */
631 hme_mifinit(sc);
632
548 }
549
550 /*
551 * Initialize receive buffer descriptors
552 */
553 for (i = 0; i < HME_NRXDESC; i++) {
554 error = hme_add_rxbuf(sc, i, 1);
555 if (error != 0)

--- 62 unchanged lines hidden (view full) ---

618 */
619
620 /* step 1 & 2. Reset the Ethernet Channel */
621 hme_stop(sc);
622
623 /* Re-initialize the MIF */
624 hme_mifinit(sc);
625
633 /* Call MI reset function if any */
634 if (sc->sc_hwreset)
635 (*sc->sc_hwreset)(sc);
636
637#if 0
638 /* Mask all MIF interrupts, just in case */
639 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
640#endif
641
642 /* step 3. Setup data structures in host memory */
643 if (hme_meminit(sc) != 0) {
644 device_printf(sc->sc_dev, "out of buffers; init aborted.");

--- 85 unchanged lines hidden (view full) ---

730 case 256:
731 v |= HME_ERX_CFG_RINGSIZE256;
732 break;
733 default:
734 printf("hme: invalid Receive Descriptor ring size\n");
735 break;
736 }
737
626#if 0
627 /* Mask all MIF interrupts, just in case */
628 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
629#endif
630
631 /* step 3. Setup data structures in host memory */
632 if (hme_meminit(sc) != 0) {
633 device_printf(sc->sc_dev, "out of buffers; init aborted.");

--- 85 unchanged lines hidden (view full) ---

719 case 256:
720 v |= HME_ERX_CFG_RINGSIZE256;
721 break;
722 default:
723 printf("hme: invalid Receive Descriptor ring size\n");
724 break;
725 }
726
738 /* Enable DMA, fix RX first byte offset to 2. */
727 /* Enable DMA, fix RX first byte offset. */
739 v &= ~HME_ERX_CFG_FBO_MASK;
728 v &= ~HME_ERX_CFG_FBO_MASK;
740 v |= HME_ERX_CFG_DMAENABLE | (2 << HME_ERX_CFG_FBO_SHIFT);
729 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
741 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
742 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
743
744 /* step 11. XIF Configuration */
745 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
746 v |= HME_MAC_XIF_OE;
747 /* If an external transceiver is connected, enable its MII drivers */
748 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)

--- 11 unchanged lines hidden (view full) ---

760 /* step 13. TX_MAC Configuration Register */
761 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
762 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
763 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
764 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
765
766 /* step 14. Issue Transmit Pending command */
767
730 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
731 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
732
733 /* step 11. XIF Configuration */
734 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
735 v |= HME_MAC_XIF_OE;
736 /* If an external transceiver is connected, enable its MII drivers */
737 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)

--- 11 unchanged lines hidden (view full) ---

749 /* step 13. TX_MAC Configuration Register */
750 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
751 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
752 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
753 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
754
755 /* step 14. Issue Transmit Pending command */
756
768 /* Call MI initialization function if any */
769 if (sc->sc_hwinit)
770 (*sc->sc_hwinit)(sc);
771
772#ifdef HMEDEBUG
773 /* Debug: double-check. */
774 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
775 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
776 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
777 HME_ERX_READ_4(sc, HME_ERXI_RING),
778 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
779 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",

--- 10 unchanged lines hidden (view full) ---

790
791 ifp->if_flags |= IFF_RUNNING;
792 ifp->if_flags &= ~IFF_OACTIVE;
793 ifp->if_timer = 0;
794 hme_start(ifp);
795}
796
797struct hme_txdma_arg {
757#ifdef HMEDEBUG
758 /* Debug: double-check. */
759 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
760 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
761 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
762 HME_ERX_READ_4(sc, HME_ERXI_RING),
763 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
764 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",

--- 10 unchanged lines hidden (view full) ---

775
776 ifp->if_flags |= IFF_RUNNING;
777 ifp->if_flags &= ~IFF_OACTIVE;
778 ifp->if_timer = 0;
779 hme_start(ifp);
780}
781
782struct hme_txdma_arg {
798 struct hme_softc *hta_sc;
799 struct mbuf *hta_m;
800 int hta_err;
801 int hta_flags;
802 int hta_offs;
803 int hta_pad;
783 struct hme_softc *hta_sc;
784 struct hme_txdesc *hta_htx;
785 int hta_ndescs;
804};
805
786};
787
806/* Values for hta_flags */
807#define HTAF_SOP 1 /* Start of packet (first mbuf in chain) */
808#define HTAF_EOP 2 /* Start of packet (last mbuf in chain) */
809
788/*
789 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
790 * are readable from the nearest burst boundary on (i.e. potentially before
791 * ds_addr) to the first boundary beyond the end. This is usually a safe
792 * assumption to make, but is not documented.
793 */
810static void
794static void
811hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
795hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
796 bus_size_t totsz, int error)
812{
813 struct hme_txdma_arg *ta = xsc;
797{
798 struct hme_txdma_arg *ta = xsc;
814 struct hme_txdesc *td;
815 bus_addr_t addr;
816 bus_size_t sz;
799 struct hme_txdesc *htx;
800 bus_size_t len = 0;
817 caddr_t txd;
801 caddr_t txd;
818 u_int32_t flags;
819 int i, *tdhead, pci;
802 u_int32_t flags = 0;
803 int i, tdhead, pci;
820
804
821 ta->hta_err = error;
822 if (error != 0)
823 return;
824
805 if (error != 0)
806 return;
807
825 tdhead = &ta->hta_sc->sc_rb.rb_tdhead;
808 tdhead = ta->hta_sc->sc_rb.rb_tdhead;
826 pci = ta->hta_sc->sc_pci;
827 txd = ta->hta_sc->sc_rb.rb_txd;
809 pci = ta->hta_sc->sc_pci;
810 txd = ta->hta_sc->sc_rb.rb_txd;
811 htx = ta->hta_htx;
812
813 if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
814 ta->hta_ndescs = -1;
815 return;
816 }
817 ta->hta_ndescs = nsegs;
818
828 for (i = 0; i < nsegs; i++) {
819 for (i = 0; i < nsegs; i++) {
829 if (ta->hta_sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
830 ta->hta_err = -1;
831 return;
832 }
833 td = &ta->hta_sc->sc_rb.rb_txdesc[*tdhead];
834 addr = segs[i].ds_addr;
835 sz = segs[i].ds_len;
836 if (i == 0) {
837 /* Adjust the offsets. */
838 addr += ta->hta_offs;
839 sz -= ta->hta_offs;
840 td->htx_flags = HTXF_MAPPED;
841 } else
842 td->htx_flags = 0;
843 if (i == nsegs - 1) {
844 /* Subtract the pad. */
845 if (sz < ta->hta_pad) {
846 /*
847 * Ooops. This should not have happened; it
848 * means that we got a zero-size segment or
849 * segment sizes were unnatural.
850 */
851 device_printf(ta->hta_sc->sc_dev,
852 "hme_txdma_callback: alignment glitch\n");
853 ta->hta_err = EINVAL;
854 return;
855 }
856 sz -= ta->hta_pad;
857 /* If sz is 0 now, this does not matter. */
858 }
820 if (segs[i].ds_len == 0)
821 continue;
822
859 /* Fill the ring entry. */
823 /* Fill the ring entry. */
860 flags = HME_XD_ENCODE_TSIZE(sz);
861 if ((ta->hta_flags & HTAF_SOP) != 0 && i == 0)
824 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
825 if (len == 0)
862 flags |= HME_XD_SOP;
826 flags |= HME_XD_SOP;
863 if ((ta->hta_flags & HTAF_EOP) != 0 && i == nsegs - 1) {
827 if (len + segs[i].ds_len == totsz)
864 flags |= HME_XD_EOP;
828 flags |= HME_XD_EOP;
865 td->htx_m = ta->hta_m;
866 } else
867 td->htx_m = NULL;
868 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
829 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
869 "flags %#x, addr %#x", i + 1, nsegs, *tdhead, (u_int)flags,
870 (u_int)addr);
871 HME_XD_SETFLAGS(pci, txd, *tdhead, flags);
872 HME_XD_SETADDR(pci, txd, *tdhead, addr);
830 "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
831 (u_int)segs[i].ds_addr);
832 HME_XD_SETFLAGS(pci, txd, tdhead, flags);
833 HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
873
874 ta->hta_sc->sc_rb.rb_td_nbusy++;
834
835 ta->hta_sc->sc_rb.rb_td_nbusy++;
875 *tdhead = ((*tdhead) + 1) % HME_NTXDESC;
836 htx->htx_lastdesc = tdhead;
837 tdhead = (tdhead + 1) % HME_NTXDESC;
838 len += segs[i].ds_len;
876 }
839 }
840 ta->hta_sc->sc_rb.rb_tdhead = tdhead;
841 KASSERT((flags & HME_XD_EOP) != 0,
842 ("hme_txdma_callback: missed end of packet!"));
877}
878
879/*
880 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
881 * start the transmission.
882 * Returns 0 on success, -1 if there were not enough free descriptors to map
883 * the packet, or an errno otherwise.
884 */
885static int
843}
844
845/*
846 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
847 * start the transmission.
848 * Returns 0 on success, -1 if there were not enough free descriptors to map
849 * the packet, or an errno otherwise.
850 */
851static int
886hme_load_mbuf(struct hme_softc *sc, struct mbuf *m0)
852hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
887{
888 struct hme_txdma_arg cba;
853{
854 struct hme_txdma_arg cba;
889 struct mbuf *m = m0, *n;
890 struct hme_txdesc *td;
855 struct hme_txdesc *td;
891 char *start;
892 int error, len, si, ri, totlen, sum;
856 int error, si, ri;
893 u_int32_t flags;
894
857 u_int32_t flags;
858
895 if ((m->m_flags & M_PKTHDR) == 0)
896 panic("hme_dmamap_load_mbuf: no packet header");
897 totlen = m->m_pkthdr.len;
898 sum = 0;
899 si = sc->sc_rb.rb_tdhead;
859 si = sc->sc_rb.rb_tdhead;
860 if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
861 return (-1);
862 td->htx_m = m0;
900 cba.hta_sc = sc;
863 cba.hta_sc = sc;
901 cba.hta_err = 0;
902 cba.hta_flags = HTAF_SOP;
903 cba.hta_m = m0;
904 for (; m != NULL && sum < totlen; m = n) {
905 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
906 error = -1;
907 goto fail;
908 }
909 len = m->m_len;
910 n = m->m_next;
911 if (len == 0)
912 continue;
913 sum += len;
914 td = &sc->sc_rb.rb_txdesc[sc->sc_rb.rb_tdhead];
915 if (n == NULL || sum >= totlen)
916 cba.hta_flags |= HTAF_EOP;
917 /*
918 * This is slightly evil: we must map the buffer in a way that
919 * allows dma transfers to start on a natural burst boundary.
920 * This is done by rounding down the mapping address, and
921 * recording the required offset for the callback. With this,
922 * we cannot cross a page boundary because the burst size
923 * is a small power of two.
924 */
925 cba.hta_offs = (sc->sc_burst -
926 (mtod(m, uintptr_t) & (sc->sc_burst - 1))) % sc->sc_burst;
927 start = mtod(m, char *) - cba.hta_offs;
928 len += cba.hta_offs;
929 /*
930 * Similarly, the end of the mapping should be on a natural
931 * burst boundary. XXX: Let's hope that any segment ends
932 * generated by the busdma code are also on such boundaries.
933 */
934 cba.hta_pad = (sc->sc_burst - (((uintptr_t)start + len) &
935 (sc->sc_burst - 1))) % sc->sc_burst;
936 len += cba.hta_pad;
937 /* Most of the work is done in the callback. */
938 if ((error = bus_dmamap_load(sc->sc_tdmatag, td->htx_dmamap,
939 start, len, hme_txdma_callback, &cba, 0)) != 0 ||
940 cba.hta_err != 0)
941 goto fail;
942 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
943 BUS_DMASYNC_PREWRITE);
944
945 cba.hta_flags = 0;
864 cba.hta_htx = td;
865 if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
866 m0, hme_txdma_callback, &cba, 0)) != 0)
867 goto fail;
868 if (cba.hta_ndescs == -1) {
869 error = -1;
870 goto fail;
946 }
871 }
872 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
873 BUS_DMASYNC_PREWRITE);
874
875 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
876 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
877
947 /* Turn descriptor ownership to the hme, back to forth. */
948 ri = sc->sc_rb.rb_tdhead;
949 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
950 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
951 do {
952 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
953 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
954 HME_XD_OWN;

--- 4 unchanged lines hidden (view full) ---

959
960 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
961 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
962
963 /* start the transmission. */
964 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
965 return (0);
966fail:
878 /* Turn descriptor ownership to the hme, back to forth. */
879 ri = sc->sc_rb.rb_tdhead;
880 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
881 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
882 do {
883 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
884 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
885 HME_XD_OWN;

--- 4 unchanged lines hidden (view full) ---

890
891 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
892 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
893
894 /* start the transmission. */
895 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
896 return (0);
897fail:
967 for (ri = si; ri != sc->sc_rb.rb_tdhead; ri = (ri + 1) % HME_NTXDESC) {
968 td = &sc->sc_rb.rb_txdesc[ri];
969 if ((td->htx_flags & HTXF_MAPPED) != 0)
970 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
971 td->htx_flags = 0;
972 td->htx_m = NULL;
973 sc->sc_rb.rb_td_nbusy--;
974 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, 0);
975 }
976 sc->sc_rb.rb_tdhead = si;
977 error = cba.hta_err != 0 ? cba.hta_err : error;
978 if (error != -1)
979 device_printf(sc->sc_dev, "could not load mbuf: %d\n", error);
898 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
980 return (error);
981}
982
983/*
984 * Pass a packet to the higher levels.
985 */
986static void
987hme_read(struct hme_softc *sc, int ix, int len)
988{
989 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
990 struct mbuf *m;
899 return (error);
900}
901
902/*
903 * Pass a packet to the higher levels.
904 */
905static void
906hme_read(struct hme_softc *sc, int ix, int len)
907{
908 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
909 struct mbuf *m;
991 int offs;
992
993 if (len <= sizeof(struct ether_header) ||
994 len > ETHERMTU + sizeof(struct ether_header)) {
995#ifdef HMEDEBUG
996 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
997 len);
998#endif
999 ifp->if_ierrors++;
1000 hme_discard_rxbuf(sc, ix, 1);
1001 return;
1002 }
1003
1004 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
910
911 if (len <= sizeof(struct ether_header) ||
912 len > ETHERMTU + sizeof(struct ether_header)) {
913#ifdef HMEDEBUG
914 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
915 len);
916#endif
917 ifp->if_ierrors++;
918 hme_discard_rxbuf(sc, ix, 1);
919 return;
920 }
921
922 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1005 offs = sc->sc_rb.rb_rxdesc[ix].hrx_offs;
1006 CTR2(KTR_HME, "hme_read: offs %d, len %d", offs, len);
923 CTR1(KTR_HME, "hme_read: len %d", len);
1007
1008 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1009 /*
1010 * hme_add_rxbuf will leave the old buffer in the ring until
1011 * it is sure that a new buffer can be mapped. If it can not,
1012 * drop the packet, but leave the interface up.
1013 */
1014 ifp->if_iqdrops++;
1015 hme_discard_rxbuf(sc, ix, 1);
1016 return;
1017 }
1018
1019 ifp->if_ipackets++;
1020
1021 /* Changed the rings; sync. */
1022 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1023 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1024
1025 m->m_pkthdr.rcvif = ifp;
924
925 if (hme_add_rxbuf(sc, ix, 0) != 0) {
926 /*
927 * hme_add_rxbuf will leave the old buffer in the ring until
928 * it is sure that a new buffer can be mapped. If it can not,
929 * drop the packet, but leave the interface up.
930 */
931 ifp->if_iqdrops++;
932 hme_discard_rxbuf(sc, ix, 1);
933 return;
934 }
935
936 ifp->if_ipackets++;
937
938 /* Changed the rings; sync. */
939 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
940 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
941
942 m->m_pkthdr.rcvif = ifp;
1026 m->m_pkthdr.len = m->m_len = len + offs;
1027 m_adj(m, offs);
943 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
944 m_adj(m, HME_RXOFFS);
1028 /* Pass the packet up. */
1029 (*ifp->if_input)(ifp, m);
1030}
1031
1032static void
1033hme_start(struct ifnet *ifp)
1034{
1035 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;

--- 4 unchanged lines hidden (view full) ---

1040 return;
1041
1042 error = 0;
1043 for (;;) {
1044 IF_DEQUEUE(&ifp->if_snd, m);
1045 if (m == NULL)
1046 break;
1047
945 /* Pass the packet up. */
946 (*ifp->if_input)(ifp, m);
947}
948
949static void
950hme_start(struct ifnet *ifp)
951{
952 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;

--- 4 unchanged lines hidden (view full) ---

957 return;
958
959 error = 0;
960 for (;;) {
961 IF_DEQUEUE(&ifp->if_snd, m);
962 if (m == NULL)
963 break;
964
1048 error = hme_load_mbuf(sc, m);
1049 if (error != 0) {
965 error = hme_load_txmbuf(sc, m);
966 if (error == -1) {
1050 ifp->if_flags |= IFF_OACTIVE;
1051 IF_PREPEND(&ifp->if_snd, m);
1052 break;
967 ifp->if_flags |= IFF_OACTIVE;
968 IF_PREPEND(&ifp->if_snd, m);
969 break;
970 } else if (error > 0) {
971 printf("hme_start: error %d while loading mbuf\n",
972 error);
1053 } else {
1054 enq = 1;
1055 BPF_MTAP(ifp, m);
1056 }
1057 }
1058
1059 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
1060 ifp->if_flags |= IFF_OACTIVE;

--- 4 unchanged lines hidden (view full) ---

1065
1066/*
1067 * Transmit interrupt.
1068 */
1069static void
1070hme_tint(struct hme_softc *sc)
1071{
1072 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
973 } else {
974 enq = 1;
975 BPF_MTAP(ifp, m);
976 }
977 }
978
979 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
980 ifp->if_flags |= IFF_OACTIVE;

--- 4 unchanged lines hidden (view full) ---

985
986/*
987 * Transmit interrupt.
988 */
989static void
990hme_tint(struct hme_softc *sc)
991{
992 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1073 struct hme_txdesc *td;
993 struct hme_txdesc *htx;
1074 unsigned int ri, txflags;
1075
1076 /*
1077 * Unload collision counters
1078 */
1079 ifp->if_collisions +=
1080 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
1081 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
1082 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
1083 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
1084
1085 /*
1086 * then clear the hardware counters.
1087 */
1088 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
1089 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
1090 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
1091 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
1092
994 unsigned int ri, txflags;
995
996 /*
997 * Unload collision counters
998 */
999 ifp->if_collisions +=
1000 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
1001 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
1002 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
1003 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
1004
1005 /*
1006 * then clear the hardware counters.
1007 */
1008 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
1009 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
1010 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
1011 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
1012
1013 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1093 /* Fetch current position in the transmit ring */
1094 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1095 if (sc->sc_rb.rb_td_nbusy <= 0) {
1096 CTR0(KTR_HME, "hme_tint: not busy!");
1097 break;
1098 }
1099
1100 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1101 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1102
1103 if ((txflags & HME_XD_OWN) != 0)
1104 break;
1105
1014 /* Fetch current position in the transmit ring */
1015 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1016 if (sc->sc_rb.rb_td_nbusy <= 0) {
1017 CTR0(KTR_HME, "hme_tint: not busy!");
1018 break;
1019 }
1020
1021 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1022 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1023
1024 if ((txflags & HME_XD_OWN) != 0)
1025 break;
1026
1106 td = &sc->sc_rb.rb_txdesc[ri];
1107 CTR1(KTR_HME, "hme_tint: not owned, dflags %#x", td->htx_flags);
1108 if ((td->htx_flags & HTXF_MAPPED) != 0) {
1109 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
1110 BUS_DMASYNC_POSTWRITE);
1111 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
1112 }
1113 td->htx_flags = 0;
1027 CTR0(KTR_HME, "hme_tint: not owned");
1114 --sc->sc_rb.rb_td_nbusy;
1115 ifp->if_flags &= ~IFF_OACTIVE;
1116
1117 /* Complete packet transmitted? */
1118 if ((txflags & HME_XD_EOP) == 0)
1119 continue;
1120
1028 --sc->sc_rb.rb_td_nbusy;
1029 ifp->if_flags &= ~IFF_OACTIVE;
1030
1031 /* Complete packet transmitted? */
1032 if ((txflags & HME_XD_EOP) == 0)
1033 continue;
1034
1035 KASSERT(htx->htx_lastdesc == ri,
1036 ("hme_tint: ring indices skewed: %d != %d!",
1037 htx->htx_lastdesc, ri));
1038 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1039 BUS_DMASYNC_POSTWRITE);
1040 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1041
1121 ifp->if_opackets++;
1042 ifp->if_opackets++;
1122 m_freem(td->htx_m);
1123 td->htx_m = NULL;
1043 m_freem(htx->htx_m);
1044 htx->htx_m = NULL;
1045 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1046 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1047 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1124 }
1125 /* Turn off watchdog */
1126 if (sc->sc_rb.rb_td_nbusy == 0)
1127 ifp->if_timer = 0;
1128
1129 /* Update ring */
1130 sc->sc_rb.rb_tdtail = ri;
1131

--- 256 unchanged lines hidden (view full) ---

1388 * start it.
1389 */
1390 hme_init(sc);
1391 } else if ((ifp->if_flags & IFF_UP) != 0) {
1392 /*
1393 * Reset the interface to pick up changes in any other
1394 * flags that affect hardware registers.
1395 */
1048 }
1049 /* Turn off watchdog */
1050 if (sc->sc_rb.rb_td_nbusy == 0)
1051 ifp->if_timer = 0;
1052
1053 /* Update ring */
1054 sc->sc_rb.rb_tdtail = ri;
1055

--- 256 unchanged lines hidden (view full) ---

1312 * start it.
1313 */
1314 hme_init(sc);
1315 } else if ((ifp->if_flags & IFF_UP) != 0) {
1316 /*
1317 * Reset the interface to pick up changes in any other
1318 * flags that affect hardware registers.
1319 */
1396 /*hme_stop(sc);*/
1397 hme_init(sc);
1398 }
1399#ifdef HMEDEBUG
1400 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1401#endif
1402 break;
1403
1404 case SIOCADDMULTI:

--- 124 unchanged lines hidden ---
1320 hme_init(sc);
1321 }
1322#ifdef HMEDEBUG
1323 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1324#endif
1325 break;
1326
1327 case SIOCADDMULTI:

--- 124 unchanged lines hidden ---