Deleted Added
full compact
3c3
< * Copyright (c) 2001 Thomas Moestl <tmm@FreeBSD.org>.
---
> * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
39c39
< * $FreeBSD: head/sys/dev/hme/if_hme.c 106937 2002-11-14 23:54:55Z sam $
---
> * $FreeBSD: head/sys/dev/hme/if_hme.c 108834 2003-01-06 22:12:57Z tmm $
108c108
< static int hme_load_mbuf(struct hme_softc *, struct mbuf *);
---
> static int hme_load_txmbuf(struct hme_softc *, struct mbuf *);
115,116c115,118
< static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, int);
< static void hme_txdma_callback(void *, bus_dma_segment_t *, int, int);
---
> static void hme_rxdma_callback(void *, bus_dma_segment_t *, int,
> bus_size_t, int);
> static void hme_txdma_callback(void *, bus_dma_segment_t *, int,
> bus_size_t, int);
255c257
< for (tdesc = 0; tdesc < HME_NTXDESC; tdesc++) {
---
> for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
257d258
< sc->sc_rb.rb_txdesc[tdesc].htx_flags = 0;
280c281
< ifp->if_snd.ifq_maxlen = HME_NTXDESC;
---
> ifp->if_snd.ifq_maxlen = HME_NTXQ;
404c405,406
< hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
---
> hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
> bus_size_t totsize, int error)
408,409c410,411
< /* XXX: A cluster should not contain more than one segment, correct? */
< if (error != 0 || nsegs != 1)
---
> KASSERT(nsegs == 1, ("hme_rxdma_callback: multiple segments!"));
> if (error != 0)
427,428c429
< HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ,
< sc->sc_rb.rb_rxdesc[ix].hrx_len)));
---
> HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
441d441
< bus_size_t len, offs;
442a443
> uintptr_t b;
444d444
< char *b;
456c456
< if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
---
> if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
458,462c458,459
< m_clget(m, M_DONTWAIT);
< if ((m->m_flags & M_EXT) == 0)
< goto fail_mcl;
< len = m->m_ext.ext_size;
< b = mtod(m, char *);
---
> m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
> b = mtod(m, uintptr_t);
468c465
< a = max(0x10, sc->sc_burst);
---
> a = max(HME_MINRXALIGN, sc->sc_burst);
470,473c467,471
< * Make sure the buffer suitably aligned: we need an offset of
< * 2 modulo a. XXX: this ensures at least 16 byte alignment of the
< * header adjacent to the ethernet header, which should be sufficient
< * in all cases. Nevertheless, this second-guesses ALIGN().
---
> * Make sure the buffer suitably aligned. The 2 byte offset is removed
> * when the mbuf is handed up. XXX: this ensures at least 16 byte
> * alignment of the header adjacent to the ethernet header, which
> * should be sufficient in all cases. Nevertheless, this second-guesses
> * ALIGN().
475,482c473,478
< offs = (a - (((uintptr_t)b - 2) & (a - 1))) % a;
< len -= offs;
< /* Align the buffer on the boundary for mapping. */
< b += offs - 2;
< ba = 0;
< if (bus_dmamap_load(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
< b, len + 2, hme_rxdma_callback, &ba, 0) != 0 || ba == 0)
< goto fail_mcl;
---
> m_adj(m, roundup2(b, a) - b);
> if (bus_dmamap_load_mbuf(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
> m, hme_rxdma_callback, &ba, 0) != 0) {
> m_freem(m);
> return (ENOBUFS);
> }
491,492d486
< rd->hrx_offs = offs;
< rd->hrx_len = len - sc->sc_burst;
495,497d488
< /* Lazily leave at least one burst size grace space. */
< HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
< HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ, rd->hrx_len)));
498a490,491
> HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
> HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
500,503d492
<
< fail_mcl:
< m_freem(m);
< return (ENOBUFS);
545d533
< td = &sc->sc_rb.rb_txdesc[i];
547a536,541
> }
>
> STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
> STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
> for (i = 0; i < HME_NTXQ; i++) {
> td = &sc->sc_rb.rb_txdesc[i];
549a544
> bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
552,554c547
< if ((td->htx_flags & HTXF_MAPPED) != 0)
< bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
< td->htx_flags = 0;
---
> STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
633,636d625
< /* Call MI reset function if any */
< if (sc->sc_hwreset)
< (*sc->sc_hwreset)(sc);
<
738c727
< /* Enable DMA, fix RX first byte offset to 2. */
---
> /* Enable DMA, fix RX first byte offset. */
740c729
< v |= HME_ERX_CFG_DMAENABLE | (2 << HME_ERX_CFG_FBO_SHIFT);
---
> v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
768,771d756
< /* Call MI initialization function if any */
< if (sc->sc_hwinit)
< (*sc->sc_hwinit)(sc);
<
798,803c783,785
< struct hme_softc *hta_sc;
< struct mbuf *hta_m;
< int hta_err;
< int hta_flags;
< int hta_offs;
< int hta_pad;
---
> struct hme_softc *hta_sc;
> struct hme_txdesc *hta_htx;
> int hta_ndescs;
806,809c788,793
< /* Values for hta_flags */
< #define HTAF_SOP 1 /* Start of packet (first mbuf in chain) */
< #define HTAF_EOP 2 /* Start of packet (last mbuf in chain) */
<
---
> /*
> * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
> * are readable from the nearest burst boundary on (i.e. potentially before
> * ds_addr) to the first boundary beyond the end. This is usually a safe
> * assumption to make, but is not documented.
> */
811c795,796
< hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
---
> hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs,
> bus_size_t totsz, int error)
814,816c799,800
< struct hme_txdesc *td;
< bus_addr_t addr;
< bus_size_t sz;
---
> struct hme_txdesc *htx;
> bus_size_t len = 0;
818,819c802,803
< u_int32_t flags;
< int i, *tdhead, pci;
---
> u_int32_t flags = 0;
> int i, tdhead, pci;
821d804
< ta->hta_err = error;
825c808
< tdhead = &ta->hta_sc->sc_rb.rb_tdhead;
---
> tdhead = ta->hta_sc->sc_rb.rb_tdhead;
827a811,818
> htx = ta->hta_htx;
>
> if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
> ta->hta_ndescs = -1;
> return;
> }
> ta->hta_ndescs = nsegs;
>
829,858c820,822
< if (ta->hta_sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
< ta->hta_err = -1;
< return;
< }
< td = &ta->hta_sc->sc_rb.rb_txdesc[*tdhead];
< addr = segs[i].ds_addr;
< sz = segs[i].ds_len;
< if (i == 0) {
< /* Adjust the offsets. */
< addr += ta->hta_offs;
< sz -= ta->hta_offs;
< td->htx_flags = HTXF_MAPPED;
< } else
< td->htx_flags = 0;
< if (i == nsegs - 1) {
< /* Subtract the pad. */
< if (sz < ta->hta_pad) {
< /*
< * Ooops. This should not have happened; it
< * means that we got a zero-size segment or
< * segment sizes were unnatural.
< */
< device_printf(ta->hta_sc->sc_dev,
< "hme_txdma_callback: alignment glitch\n");
< ta->hta_err = EINVAL;
< return;
< }
< sz -= ta->hta_pad;
< /* If sz is 0 now, this does not matter. */
< }
---
> if (segs[i].ds_len == 0)
> continue;
>
860,861c824,825
< flags = HME_XD_ENCODE_TSIZE(sz);
< if ((ta->hta_flags & HTAF_SOP) != 0 && i == 0)
---
> flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
> if (len == 0)
863c827
< if ((ta->hta_flags & HTAF_EOP) != 0 && i == nsegs - 1) {
---
> if (len + segs[i].ds_len == totsz)
865,867d828
< td->htx_m = ta->hta_m;
< } else
< td->htx_m = NULL;
869,872c830,833
< "flags %#x, addr %#x", i + 1, nsegs, *tdhead, (u_int)flags,
< (u_int)addr);
< HME_XD_SETFLAGS(pci, txd, *tdhead, flags);
< HME_XD_SETADDR(pci, txd, *tdhead, addr);
---
> "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags,
> (u_int)segs[i].ds_addr);
> HME_XD_SETFLAGS(pci, txd, tdhead, flags);
> HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr);
875c836,838
< *tdhead = ((*tdhead) + 1) % HME_NTXDESC;
---
> htx->htx_lastdesc = tdhead;
> tdhead = (tdhead + 1) % HME_NTXDESC;
> len += segs[i].ds_len;
876a840,842
> ta->hta_sc->sc_rb.rb_tdhead = tdhead;
> KASSERT((flags & HME_XD_EOP) != 0,
> ("hme_txdma_callback: missed end of packet!"));
886c852
< hme_load_mbuf(struct hme_softc *sc, struct mbuf *m0)
---
> hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0)
889d854
< struct mbuf *m = m0, *n;
891,892c856
< char *start;
< int error, len, si, ri, totlen, sum;
---
> int error, si, ri;
895,898d858
< if ((m->m_flags & M_PKTHDR) == 0)
< panic("hme_dmamap_load_mbuf: no packet header");
< totlen = m->m_pkthdr.len;
< sum = 0;
899a860,862
> if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
> return (-1);
> td->htx_m = m0;
901,945c864,870
< cba.hta_err = 0;
< cba.hta_flags = HTAF_SOP;
< cba.hta_m = m0;
< for (; m != NULL && sum < totlen; m = n) {
< if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
< error = -1;
< goto fail;
< }
< len = m->m_len;
< n = m->m_next;
< if (len == 0)
< continue;
< sum += len;
< td = &sc->sc_rb.rb_txdesc[sc->sc_rb.rb_tdhead];
< if (n == NULL || sum >= totlen)
< cba.hta_flags |= HTAF_EOP;
< /*
< * This is slightly evil: we must map the buffer in a way that
< * allows dma transfers to start on a natural burst boundary.
< * This is done by rounding down the mapping address, and
< * recording the required offset for the callback. With this,
< * we cannot cross a page boundary because the burst size
< * is a small power of two.
< */
< cba.hta_offs = (sc->sc_burst -
< (mtod(m, uintptr_t) & (sc->sc_burst - 1))) % sc->sc_burst;
< start = mtod(m, char *) - cba.hta_offs;
< len += cba.hta_offs;
< /*
< * Similarly, the end of the mapping should be on a natural
< * burst boundary. XXX: Let's hope that any segment ends
< * generated by the busdma code are also on such boundaries.
< */
< cba.hta_pad = (sc->sc_burst - (((uintptr_t)start + len) &
< (sc->sc_burst - 1))) % sc->sc_burst;
< len += cba.hta_pad;
< /* Most of the work is done in the callback. */
< if ((error = bus_dmamap_load(sc->sc_tdmatag, td->htx_dmamap,
< start, len, hme_txdma_callback, &cba, 0)) != 0 ||
< cba.hta_err != 0)
< goto fail;
< bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
< BUS_DMASYNC_PREWRITE);
<
< cba.hta_flags = 0;
---
> cba.hta_htx = td;
> if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap,
> m0, hme_txdma_callback, &cba, 0)) != 0)
> goto fail;
> if (cba.hta_ndescs == -1) {
> error = -1;
> goto fail;
946a872,877
> bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
> BUS_DMASYNC_PREWRITE);
>
> STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
> STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q);
>
967,979c898
< for (ri = si; ri != sc->sc_rb.rb_tdhead; ri = (ri + 1) % HME_NTXDESC) {
< td = &sc->sc_rb.rb_txdesc[ri];
< if ((td->htx_flags & HTXF_MAPPED) != 0)
< bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
< td->htx_flags = 0;
< td->htx_m = NULL;
< sc->sc_rb.rb_td_nbusy--;
< HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, 0);
< }
< sc->sc_rb.rb_tdhead = si;
< error = cba.hta_err != 0 ? cba.hta_err : error;
< if (error != -1)
< device_printf(sc->sc_dev, "could not load mbuf: %d\n", error);
---
> bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
991d909
< int offs;
1005,1006c923
< offs = sc->sc_rb.rb_rxdesc[ix].hrx_offs;
< CTR2(KTR_HME, "hme_read: offs %d, len %d", offs, len);
---
> CTR1(KTR_HME, "hme_read: len %d", len);
1026,1027c943,944
< m->m_pkthdr.len = m->m_len = len + offs;
< m_adj(m, offs);
---
> m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
> m_adj(m, HME_RXOFFS);
1048,1049c965,966
< error = hme_load_mbuf(sc, m);
< if (error != 0) {
---
> error = hme_load_txmbuf(sc, m);
> if (error == -1) {
1052a970,972
> } else if (error > 0) {
> printf("hme_start: error %d while loading mbuf\n",
> error);
1073c993
< struct hme_txdesc *td;
---
> struct hme_txdesc *htx;
1092a1013
> htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1106,1113c1027
< td = &sc->sc_rb.rb_txdesc[ri];
< CTR1(KTR_HME, "hme_tint: not owned, dflags %#x", td->htx_flags);
< if ((td->htx_flags & HTXF_MAPPED) != 0) {
< bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
< BUS_DMASYNC_POSTWRITE);
< bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
< }
< td->htx_flags = 0;
---
> CTR0(KTR_HME, "hme_tint: not owned");
1120a1035,1041
> KASSERT(htx->htx_lastdesc == ri,
> ("hme_tint: ring indices skewed: %d != %d!",
> htx->htx_lastdesc, ri));
> bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
> BUS_DMASYNC_POSTWRITE);
> bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
>
1122,1123c1043,1047
< m_freem(td->htx_m);
< td->htx_m = NULL;
---
> m_freem(htx->htx_m);
> htx->htx_m = NULL;
> STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
> STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
> htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1396d1319
< /*hme_stop(sc);*/