Deleted Added
sdiff udiff text old ( 206109 ) new ( 207688 )
full compact
1/**************************************************************************
2
3Copyright (c) 2007-2009, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8

--- 14 unchanged lines hidden (view full) ---

23INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_sge.c 207688 2010-05-05 22:52:06Z np $");
32
33#include "opt_inet.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/module.h>
39#include <sys/bus.h>

--- 651 unchanged lines hidden (view full) ---

691static void
692refill_fl(adapter_t *sc, struct sge_fl *q, int n)
693{
694 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
695 struct rx_desc *d = &q->desc[q->pidx];
696 struct refill_fl_cb_arg cb_arg;
697 struct mbuf *m;
698 caddr_t cl;
699 int err;
700
701 cb_arg.error = 0;
702 while (n--) {
703 /*
704 * We only allocate a cluster, mbuf allocation happens after rx
705 */
706 if (q->zone == zone_pack) {
707 if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)

--- 41 unchanged lines hidden (view full) ---

749
750 if (++q->pidx == q->size) {
751 q->pidx = 0;
752 q->gen ^= 1;
753 sd = q->sdesc;
754 d = q->desc;
755 }
756 q->credits++;
757 q->db_pending++;
758 }
759
760done:
761 if (q->db_pending >= 32) {
762 q->db_pending = 0;
763 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
764 }
765}
766
767
768/**
769 * free_rx_bufs - free the Rx buffers on an SGE free list
770 * @sc: the controle softc
771 * @q: the SGE free list to clean up
772 *

--- 34 unchanged lines hidden (view full) ---

807__refill_fl(adapter_t *adap, struct sge_fl *fl)
808{
809 refill_fl(adap, fl, min(16U, fl->size - fl->credits));
810}
811
812static __inline void
813__refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
814{
815 uint32_t reclaimable = fl->size - fl->credits;
816
817 if (reclaimable > 0)
818 refill_fl(adap, fl, min(max, reclaimable));
819}
820
821/**
822 * recycle_rx_buf - recycle a receive buffer
823 * @adapter: the adapter
824 * @q: the SGE free list
825 * @idx: index of buffer to recycle
826 *

--- 433 unchanged lines hidden (view full) ---

1260 * Ring the doorbell if a Tx queue is asleep. There is a natural race,
1261 * where the HW is going to sleep just after we checked, however,
1262 * then the interrupt handler will detect the outstanding TX packet
1263 * and ring the doorbell for us.
1264 *
1265 * When GTS is disabled we unconditionally ring the doorbell.
1266 */
1267static __inline void
1268check_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1269{
1270#if USE_GTS
1271 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1272 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1273 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1274#ifdef T3_TRACE
1275 T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1276 q->cntxt_id);
1277#endif
1278 t3_write_reg(adap, A_SG_KDOORBELL,
1279 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1280 }
1281#else
1282 if (mustring || ++q->db_pending >= 32) {
1283 wmb(); /* write descriptors before telling HW */
1284 t3_write_reg(adap, A_SG_KDOORBELL,
1285 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1286 q->db_pending = 0;
1287 }
1288#endif
1289}
1290
1291static __inline void
1292wr_gen2(struct tx_desc *d, unsigned int gen)
1293{
1294#if SGE_NUM_GENBITS == 2
1295 d->flit[TX_DESC_FLITS - 1] = htobe64(gen);

--- 186 unchanged lines hidden (view full) ---

1482 V_WR_SGLSFLT(flits)) |
1483 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1484 wr_lo = htonl(V_WR_LEN(flits) |
1485 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1486 set_wr_hdr(wrp, wr_hi, wr_lo);
1487 wmb();
1488 ETHER_BPF_MTAP(pi->ifp, m0);
1489 wr_gen2(txd, txqs.gen);
1490 check_ring_tx_db(sc, txq, 0);
1491 return (0);
1492 } else if (tso_info) {
1493 int eth_type;
1494 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1495 struct ether_header *eh;
1496 struct ip *ip;
1497 struct tcphdr *tcp;
1498

--- 46 unchanged lines hidden (view full) ---

1545 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1546 F_WR_SOP | F_WR_EOP | txqs.compl);
1547 wr_lo = htonl(V_WR_LEN(flits) |
1548 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1549 set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1550 wmb();
1551 ETHER_BPF_MTAP(pi->ifp, m0);
1552 wr_gen2(txd, txqs.gen);
1553 check_ring_tx_db(sc, txq, 0);
1554 m_freem(m0);
1555 return (0);
1556 }
1557 flits = 3;
1558 } else {
1559 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1560
1561 GET_VTAG(cntrl, m0);

--- 14 unchanged lines hidden (view full) ---

1576 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1577 F_WR_SOP | F_WR_EOP | txqs.compl);
1578 wr_lo = htonl(V_WR_LEN(flits) |
1579 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1580 set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1581 wmb();
1582 ETHER_BPF_MTAP(pi->ifp, m0);
1583 wr_gen2(txd, txqs.gen);
1584 check_ring_tx_db(sc, txq, 0);
1585 m_freem(m0);
1586 return (0);
1587 }
1588 flits = 2;
1589 }
1590 wrp = (struct work_request_hdr *)txd;
1591 sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1592 make_sgl(sgp, segs, nsegs);
1593
1594 sgl_flits = sgl_len(nsegs);
1595
1596 ETHER_BPF_MTAP(pi->ifp, m0);
1597
1598 KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1599 wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1600 wr_lo = htonl(V_WR_TID(txq->token));
1601 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1602 sgl_flits, wr_hi, wr_lo);
1603 check_ring_tx_db(sc, txq, 0);
1604
1605 return (0);
1606}
1607
1608void
1609cxgb_tx_watchdog(void *arg)
1610{
1611 struct sge_qset *qs = arg;

--- 33 unchanged lines hidden (view full) ---

1645 }
1646}
1647
1648static void
1649cxgb_start_locked(struct sge_qset *qs)
1650{
1651 struct mbuf *m_head = NULL;
1652 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1653 struct port_info *pi = qs->port;
1654 struct ifnet *ifp = pi->ifp;
1655
1656 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1657 reclaim_completed_tx(qs, 0, TXQ_ETH);
1658
1659 if (!pi->link_config.link_ok) {
1660 TXQ_RING_FLUSH(qs);
1661 return;
1662 }
1663 TXQ_LOCK_ASSERT(qs);
1664 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1665 pi->link_config.link_ok) {
1666 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1667
1668 if (txq->size - txq->in_use <= TX_MAX_DESC)
1669 break;
1670
1671 if ((m_head = cxgb_dequeue(qs)) == NULL)
1672 break;
1673 /*
1674 * Encapsulation can modify our pointer, and or make it
1675 * NULL on failure. In that event, we can't requeue.
1676 */
1677 if (t3_encap(qs, &m_head) || m_head == NULL)
1678 break;
1679
1680 m_head = NULL;
1681 }
1682
1683 if (txq->db_pending)
1684 check_ring_tx_db(pi->adapter, txq, 1);
1685
1686 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1687 pi->link_config.link_ok)
1688 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1689 qs, txq->txq_timer.c_cpu);
1690 if (m_head != NULL)
1691 m_freem(m_head);
1692}
1693

--- 17 unchanged lines hidden (view full) ---

1711 */
1712 if (check_pkt_coalesce(qs) == 0 &&
1713 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1714 if (t3_encap(qs, &m)) {
1715 if (m != NULL &&
1716 (error = drbr_enqueue(ifp, br, m)) != 0)
1717 return (error);
1718 } else {
1719 if (txq->db_pending)
1720 check_ring_tx_db(pi->adapter, txq, 1);
1721
1722 /*
1723 * We've bypassed the buf ring so we need to update
1724 * the stats directly
1725 */
1726 txq->txq_direct_packets++;
1727 txq->txq_direct_bytes += m->m_pkthdr.len;
1728 }
1729 } else if ((error = drbr_enqueue(ifp, br, m)) != 0)

--- 631 unchanged lines hidden (view full) ---

2361 T3_TRACE5(adap->tb[q->cntxt_id & 7],
2362 "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u",
2363 ndesc, pidx, skb->len, skb->len - skb->data_len,
2364 skb_shinfo(skb)->nr_frags);
2365#endif
2366 TXQ_UNLOCK(qs);
2367
2368 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
2369 check_ring_tx_db(adap, q, 1);
2370 return (0);
2371}
2372
2373/**
2374 * restart_offloadq - restart a suspended offload queue
2375 * @qs: the queue set cotaining the offload queue
2376 *
2377 * Resumes transmission on a suspended Tx offload queue.

--- 662 unchanged lines hidden (view full) ---

3040
3041 r++;
3042 if (__predict_false(++rspq->cidx == rspq->size)) {
3043 rspq->cidx = 0;
3044 rspq->gen ^= 1;
3045 r = rspq->desc;
3046 }
3047
3048 if (++rspq->credits >= 64) {
3049 refill_rspq(adap, rspq, rspq->credits);
3050 rspq->credits = 0;
3051 }
3052 if (!eth && eop) {
3053 rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum;
3054 /*
3055 * XXX size mismatch
3056 */

--- 761 unchanged lines hidden ---