Lines Matching defs:qs

443 t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
446 qs->rspq.holdoff_tmr = uimax(p->coalesce_nsecs/100, 1U);
447 qs->rspq.polling = 0 /* p->polling */;
682 struct sge_qset *qs;
689 qs = &sc->sge.qs[i + j];
690 txq = &qs->txq[0];
693 refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
694 (qs->fl[1].credits < qs->fl[1].size));
814 struct sge_qset *qs;
819 qs = &sc->sge.qs[i];
820 txq = &qs->txq[TXQ_ETH];
823 txq = &qs->txq[TXQ_OFLD];
826 lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
827 &sc->sge.qs[0].rspq.lock;
833 if (qs->fl[0].credits < qs->fl[0].size - 16)
834 __refill_fl(sc, &qs->fl[0]);
835 if (qs->fl[1].credits < qs->fl[1].size - 16)
836 __refill_fl(sc, &qs->fl[1]);
838 if (status & (1 << qs->rspq.cntxt_id)) {
839 if (qs->rspq.credits) {
840 refill_rspq(sc, &qs->rspq, 1);
841 qs->rspq.credits--;
843 1 << qs->rspq.cntxt_id);
853 * @qs: the queue set
859 init_qset_cntxt(struct sge_qset *qs, u_int id)
862 qs->rspq.cntxt_id = id;
863 qs->fl[0].cntxt_id = 2 * id;
864 qs->fl[1].cntxt_id = 2 * id + 1;
865 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
866 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
867 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
868 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
869 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1158 struct sge_qset *qs;
1181 qs = &sc->sge.qs[p->first_qset];
1183 txq = &qs->txq[TXQ_ETH];
1365 struct sge_qset *qs = txq_to_qset(q, qid);
1367 setbit(&qs->txq_stopped, qid);
1371 test_and_clear_bit(qid, &qs->txq_stopped))
1459 * @qs: the queue set cotaining the control queue
1467 struct sge_qset *qs = (struct sge_qset *)data;
1468 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1469 adapter_t *adap = qs->port->adapter;
1486 setbit(&qs->txq_stopped, TXQ_CTRL);
1490 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1506 return ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], m);
1585 t3_free_qset(sc, &sc->sge.qs[i]);
1839 * @qs: the queue set cotaining the offload queue
1848 struct sge_qset *qs = data;
1849 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1850 adapter_t *adap = qs->port->adapter;
1864 setbit(&qs->txq_stopped, TXQ_OFLD);
1868 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1942 struct sge_qset *qs = &adap->sge.qs[queue_set(m)];
1945 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], m);
1947 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], m);
1951 restart_tx(struct sge_qset *qs)
1953 if (isset(&qs->txq_stopped, TXQ_OFLD) &&
1954 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1955 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1956 qs->txq[TXQ_OFLD].restarts++;
1957 workqueue_enqueue(qs->txq[TXQ_OFLD].qresume_task.wq, &qs->txq[TXQ_OFLD].qresume_task.w, NULL);
1959 if (isset(&qs->txq_stopped, TXQ_CTRL) &&
1960 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1961 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1962 qs->txq[TXQ_CTRL].restarts++;
1963 workqueue_enqueue(qs->txq[TXQ_CTRL].qresume_task.wq, &qs->txq[TXQ_CTRL].qresume_task.w, NULL);
1986 struct sge_qset *q = &sc->sge.qs[id];
2185 * @qs: the qset that the SGE free list holding the packet belongs to
2200 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2205 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2267 get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2272 struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2327 * @qs: the queue set corresponding to the response
2335 handle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2341 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2345 qs->txq[TXQ_ETH].processed += credits;
2346 if (desc_reclaimable(&qs->txq[TXQ_ETH]) > TX_START_MAX_DESC)
2347 workqueue_enqueue(qs->port->timer_reclaim_task.wq,
2348 &qs->port->timer_reclaim_task.w, NULL);
2353 qs->txq[TXQ_CTRL].processed += credits;
2357 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2361 qs->txq[TXQ_OFLD].processed += credits;
2365 check_ring_db(adapter_t *adap, struct sge_qset *qs,
2374 * @qs: the queue set to which the response queue belongs
2387 process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2389 struct sge_rspq *rspq = &qs->rspq;
2393 int lro = qs->lro.enabled;
2458 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r, m);
2466 eop = get_packet(adap, drop_thresh, qs, rspq->rspq_mbuf, r);
2476 handle_rsp_cntrl_info(qs, flags);
2510 __refill_fl(adap, &qs->fl[0]);
2511 __refill_fl(adap, &qs->fl[1]);
2517 t3_lro_flush(adap, qs, &qs->lro);
2520 check_ring_db(adap, qs, sleeping);
2523 if (__predict_false(qs->txq_stopped != 0))
2524 restart_tx(qs);
2564 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2578 process_responses_gts(adap, &adap->sge.qs[i].rspq);
2594 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2600 if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
2612 struct sge_qset *qs = data;
2613 adapter_t *adap = qs->port->adapter;
2614 struct sge_rspq *rspq = &qs->rspq;
2626 * @qs: the queue set
2635 t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
2642 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
2644 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
2649 if (!qs->rspq.desc || idx >= qs->rspq.size)
2651 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
2656 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
2658 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));