• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/cxgb/

Lines Matching defs:qs

230 #define	TXQ_LOCK_ASSERT(qs)	mtx_assert(&(qs)->lock, MA_OWNED)
231 #define TXQ_TRYLOCK(qs) mtx_trylock(&(qs)->lock)
232 #define TXQ_LOCK(qs) mtx_lock(&(qs)->lock)
233 #define TXQ_UNLOCK(qs) mtx_unlock(&(qs)->lock)
234 #define TXQ_RING_EMPTY(qs) drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
235 #define TXQ_RING_NEEDS_ENQUEUE(qs) \
236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
238 #define TXQ_RING_DEQUEUE_COND(qs, func, arg) \
239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
240 #define TXQ_RING_DEQUEUE(qs) \
241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
248 static void cxgb_start_locked(struct sge_qset *qs);
256 check_pkt_coalesce(struct sge_qset *qs)
264 txq = &qs->txq[TXQ_ETH];
265 sc = qs->port->adapter;
266 fill = &sc->tunq_fill[qs->idx];
279 TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
337 cxgb_dequeue(struct sge_qset *qs)
343 if (check_pkt_coalesce(qs) == 0)
344 return TXQ_RING_DEQUEUE(qs);
349 m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
372 reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
374 struct sge_txq *q = &qs->txq[queue];
384 mtx_assert(&qs->lock, MA_OWNED);
386 t3_free_tx_desc(qs, reclaim, queue);
390 if (isset(&qs->txq_stopped, TXQ_ETH))
391 clrbit(&qs->txq_stopped, TXQ_ETH);
398 cxgb_debugnet_poll_tx(struct sge_qset *qs)
401 return (reclaim_completed_tx(qs, TX_RECLAIM_MAX, TXQ_ETH));
679 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
682 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
683 qs->rspq.polling = 0 /* p->polling */;
968 struct sge_qset *qs;
979 qs = &sc->sge.qs[pi->first_qset + j];
980 txq = &qs->txq[0];
982 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
983 (qs->fl[1].credits < qs->fl[1].size));
1057 struct sge_qset *qs = arg;
1061 reclaim_completed_tx(qs, 16, i);
1070 struct sge_qset *qs;
1077 qs = &sc->sge.qs[pi->first_qset + i];
1079 reclaim_completed_tx(qs, 16, TXQ_OFLD);
1080 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1081 &sc->sge.qs[0].rspq.lock;
1087 if (qs->fl[0].credits < qs->fl[0].size - 16)
1088 __refill_fl(sc, &qs->fl[0]);
1089 if (qs->fl[1].credits < qs->fl[1].size - 16)
1090 __refill_fl(sc, &qs->fl[1]);
1092 if (status & (1 << qs->rspq.cntxt_id)) {
1093 if (qs->rspq.credits) {
1094 refill_rspq(sc, &qs->rspq, 1);
1095 qs->rspq.credits--;
1097 1 << qs->rspq.cntxt_id);
1107 * @qs: the queue set
1113 init_qset_cntxt(struct sge_qset *qs, u_int id)
1116 qs->rspq.cntxt_id = id;
1117 qs->fl[0].cntxt_id = 2 * id;
1118 qs->fl[1].cntxt_id = 2 * id + 1;
1119 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1120 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1121 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1122 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1123 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1126 mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1127 mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1128 mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1360 t3_encap(struct sge_qset *qs, struct mbuf **m)
1378 pi = qs->port;
1380 txq = &qs->txq[TXQ_ETH];
1388 mtx_assert(&qs->lock, MA_OWNED);
1598 cxgb_debugnet_encap(struct sge_qset *qs, struct mbuf **m)
1602 error = t3_encap(qs, m);
1604 check_ring_tx_db(qs->port->adapter, &qs->txq[TXQ_ETH], 1);
1616 struct sge_qset *qs = arg;
1617 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1619 if (qs->coalescing != 0 &&
1621 TXQ_RING_EMPTY(qs))
1622 qs->coalescing = 0;
1623 else if (qs->coalescing == 0 &&
1625 qs->coalescing = 1;
1626 if (TXQ_TRYLOCK(qs)) {
1627 qs->qs_flags |= QS_FLUSHING;
1628 cxgb_start_locked(qs);
1629 qs->qs_flags &= ~QS_FLUSHING;
1630 TXQ_UNLOCK(qs);
1632 if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1634 qs, txq->txq_watchdog.c_cpu);
1640 struct sge_qset *qs = arg;
1641 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1643 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1644 qs->coalescing = 1;
1645 if (TXQ_TRYLOCK(qs)) {
1646 qs->qs_flags |= QS_TIMEOUT;
1647 cxgb_start_locked(qs);
1648 qs->qs_flags &= ~QS_TIMEOUT;
1649 TXQ_UNLOCK(qs);
1654 cxgb_start_locked(struct sge_qset *qs)
1657 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1658 struct port_info *pi = qs->port;
1661 if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1662 reclaim_completed_tx(qs, 0, TXQ_ETH);
1665 TXQ_RING_FLUSH(qs);
1668 TXQ_LOCK_ASSERT(qs);
1669 while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1671 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1676 if ((m_head = cxgb_dequeue(qs)) == NULL)
1682 if (t3_encap(qs, &m_head) || m_head == NULL)
1691 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1694 qs, txq->txq_timer.c_cpu);
1700 cxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1702 struct port_info *pi = qs->port;
1703 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1708 TXQ_LOCK_ASSERT(qs);
1717 if (check_pkt_coalesce(qs) == 0 &&
1718 !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1719 if (t3_encap(qs, &m)) {
1737 reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1738 if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1739 (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1740 cxgb_start_locked(qs);
1741 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1743 qs, txq->txq_timer.c_cpu);
1750 struct sge_qset *qs;
1764 qs = &pi->adapter->sge.qs[qidx];
1766 if (TXQ_TRYLOCK(qs)) {
1768 error = cxgb_transmit_locked(ifp, qs, m);
1769 TXQ_UNLOCK(qs);
1771 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1852 struct sge_qset *qs = txq_to_qset(q, qid);
1854 setbit(&qs->txq_stopped, qid);
1856 test_and_clear_bit(qid, &qs->txq_stopped))
1894 ctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1898 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1905 TXQ_LOCK(qs);
1911 TXQ_UNLOCK(qs);
1923 TXQ_UNLOCK(qs);
1935 * @qs: the queue set cotaining the control queue
1943 struct sge_qset *qs = (struct sge_qset *)data;
1944 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1945 adapter_t *adap = qs->port->adapter;
1947 TXQ_LOCK(qs);
1963 setbit(&qs->txq_stopped, TXQ_CTRL);
1966 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1970 TXQ_UNLOCK(qs);
1982 return ctrl_xmit(adap, &adap->sge.qs[0], m);
2074 TXQ_LOCK(&sc->sge.qs[i]);
2075 t3_free_qset(sc, &sc->sge.qs[i]);
2123 struct sge_qset *qs = &sc->sge.qs[i];
2125 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2126 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2145 t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2149 struct sge_txq *q = &qs->txq[queue];
2159 mtx_assert(&qs->lock, MA_OWNED);
2290 ofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2295 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2300 TXQ_LOCK(qs);
2301 again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
2305 TXQ_UNLOCK(qs);
2322 TXQ_UNLOCK(qs);
2329 * @qs: the queue set cotaining the offload queue
2337 struct sge_qset *qs = data;
2338 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2339 adapter_t *adap = qs->port->adapter;
2342 TXQ_LOCK(qs);
2343 again: cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2351 setbit(&qs->txq_stopped, TXQ_OFLD);
2353 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2369 TXQ_UNLOCK(qs);
2371 TXQ_LOCK(qs);
2377 TXQ_UNLOCK(qs);
2395 struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2399 return (ctrl_xmit(sc, qs, m));
2401 return (ofld_xmit(sc, qs, m));
2406 restart_tx(struct sge_qset *qs)
2408 struct adapter *sc = qs->port->adapter;
2410 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2411 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2412 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2413 qs->txq[TXQ_OFLD].restarts++;
2414 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2417 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2418 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2419 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2420 qs->txq[TXQ_CTRL].restarts++;
2421 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2444 struct sge_qset *q = &sc->sge.qs[id];
2692 * @qs: the qset that the SGE free list holding the packet belongs to
2705 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2710 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2795 * @qs: the queue set corresponding to the response
2803 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2809 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2813 qs->txq[TXQ_ETH].processed += credits;
2817 qs->txq[TXQ_CTRL].processed += credits;
2821 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2825 qs->txq[TXQ_OFLD].processed += credits;
2830 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2839 * @qs: the queue set to which the response queue belongs
2852 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2854 struct sge_rspq *rspq = &qs->rspq;
2859 int lro_enabled = qs->lro.enabled;
2861 struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2925 eop = get_packet(adap, drop_thresh, qs, mh, r);
2941 handle_rsp_cntrl_info(qs, flags);
2947 adap->cpl_handler[opcode](qs, r, mh->mh_head);
2966 skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2998 __refill_fl_lt(adap, &qs->fl[0], 32);
2999 __refill_fl_lt(adap, &qs->fl[1], 32);
3009 check_ring_db(adap, qs, sleeping);
3012 if (__predict_false(qs->txq_stopped > 1))
3013 restart_tx(qs);
3015 __refill_fl_lt(adap, &qs->fl[0], 512);
3016 __refill_fl_lt(adap, &qs->fl[1], 512);
3044 cxgb_debugnet_poll_rx(adapter_t *adap, struct sge_qset *qs)
3047 return (process_responses_gts(adap, &qs->rspq));
3063 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3080 process_responses_gts(adap, &adap->sge.qs[i].rspq);
3094 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3100 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3113 struct sge_qset *qs = data;
3114 adapter_t *adap = qs->port->adapter;
3115 struct sge_rspq *rspq = &qs->rspq;
3126 struct sge_qset *qs;
3133 qs = rspq_to_qset(rspq);
3149 err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3188 struct sge_qset *qs;
3196 qs = txq_to_qset(txq, TXQ_ETH);
3213 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3227 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3255 struct sge_qset *qs;
3262 qs = txq_to_qset(txq, TXQ_CTRL);
3284 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3314 struct sge_qset *qs;
3337 qs = &sc->sge.qs[i];
3341 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3342 &sc->sge.qs[0].rspq.lock;
3345 t3_update_qset_coalesce(qs, qsp);
3346 t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3347 V_NEWTIMER(qs->rspq.holdoff_tmr));
3479 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3485 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3487 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3490 qs->namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
3495 CTLFLAG_RD, &qs->fl[0].empty, 0,
3498 CTLFLAG_RD, &qs->fl[1].empty, 0,
3522 CTLFLAG_RD, &qs->rspq.size,
3525 CTLFLAG_RD, &qs->rspq.cidx,
3528 CTLFLAG_RD, &qs->rspq.credits,
3531 CTLFLAG_RD, &qs->rspq.starved,
3534 CTLFLAG_RD, &qs->rspq.phys_addr,
3537 CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3540 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3544 &qs->rspq, 0, t3_dump_rspq, "A",
3548 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3551 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3555 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3558 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3562 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3583 CTLFLAG_RD, &qs->txq_stopped,
3589 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3598 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3601 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3605 &qs->txq[TXQ_ETH], 0, t3_dump_txq_eth, "A",
3609 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3612 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3616 &qs->txq[TXQ_CTRL], 0, t3_dump_txq_ctrl, "A",
3620 CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3622 CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3624 CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3626 CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3715 * @qs: the queue set
3724 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3731 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3733 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3738 if (!qs->rspq.desc || idx >= qs->rspq.size)
3740 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3745 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3747 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));