• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/cxgb3/

Lines Matching defs:qs

718  *	@qs: the queue set
723 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
725 qs->rspq.cntxt_id = id;
726 qs->fl[0].cntxt_id = 2 * id;
727 qs->fl[1].cntxt_id = 2 * id + 1;
728 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
729 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
730 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
731 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
732 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1204 struct sge_qset *qs, struct sge_txq *q)
1207 set_bit(TXQ_ETH, &qs->txq_stopped);
1225 struct sge_qset *qs;
1238 qs = &pi->qs[qidx];
1239 q = &qs->txq[TXQ_ETH];
1248 t3_stop_tx_queue(txq, qs, q);
1257 t3_stop_tx_queue(txq, qs, q);
1260 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1279 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1281 qs->port_stats[SGE_PSTAT_TSO]++;
1283 qs->port_stats[SGE_PSTAT_VLANINS]++;
1376 struct sge_qset *qs = txq_to_qset(q, qid);
1378 set_bit(qid, &qs->txq_stopped);
1382 test_and_clear_bit(qid, &qs->txq_stopped))
1465 * @qs: the queue set cotaining the control queue
1472 struct sge_qset *qs = (struct sge_qset *)data;
1473 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1491 set_bit(TXQ_CTRL, &qs->txq_stopped);
1495 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1502 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1513 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1676 * @qs: the queue set cotaining the offload queue
1683 struct sge_qset *qs = (struct sge_qset *)data;
1684 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1685 const struct port_info *pi = netdev_priv(qs->netdev);
1696 set_bit(TXQ_OFLD, &qs->txq_stopped);
1700 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1767 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1770 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1772 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1791 struct sge_qset *qs = rspq_to_qset(q);
1793 napi_schedule(&qs->napi);
1829 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1830 struct sge_rspq *q = &qs->rspq;
1831 struct adapter *adapter = qs->adap;
1911 * @qs: the queue set to resume
1916 static void restart_tx(struct sge_qset *qs)
1918 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1919 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1920 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1921 qs->txq[TXQ_ETH].restarts++;
1922 if (netif_running(qs->netdev))
1923 netif_tx_wake_queue(qs->tx_q);
1926 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1927 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1928 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
1929 qs->txq[TXQ_OFLD].restarts++;
1930 tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
1932 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
1933 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
1934 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
1935 qs->txq[TXQ_CTRL].restarts++;
1936 tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
2014 struct sge_qset *qs = rspq_to_qset(rq);
2022 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2026 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2031 qs->port_stats[SGE_PSTAT_VLANEX]++;
2034 vlan_gro_receive(&qs->napi, grp,
2051 napi_gro_receive(&qs->napi, skb);
2069 * @qs: the associated queue set
2077 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2081 struct port_info *pi = netdev_priv(qs->netdev);
2088 if (!qs->nomem) {
2089 skb = napi_get_frags(&qs->napi);
2090 qs->nomem = !skb;
2110 qs->nomem = 0;
2119 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2124 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2128 cpl = qs->lro_va;
2145 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2151 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan));
2155 napi_gro_frags(&qs->napi);
2160 * @qs: the queue set corresponding to the response
2167 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2173 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2178 qs->txq[TXQ_ETH].processed += credits;
2182 qs->txq[TXQ_CTRL].processed += credits;
2186 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2190 qs->txq[TXQ_OFLD].processed += credits;
2196 * @qs: the queue set whose Tx queues are to be examined
2203 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2207 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2218 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2261 * @qs: the queue set to which the response queue belongs
2273 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2276 struct sge_rspq *q = &qs->rspq;
2286 int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
2323 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2333 lro_add_page(adap, qs, fl,
2361 handle_rsp_cntrl_info(qs, flags);
2403 check_ring_db(adap, qs, sleeping);
2406 if (unlikely(qs->txq_stopped != 0))
2407 restart_tx(qs);
2429 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2430 struct adapter *adap = qs->adap;
2431 int work_done = process_responses(adap, qs, budget);
2450 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2451 V_NEWTIMER(qs->rspq.next_holdoff) |
2452 V_NEWINDEX(qs->rspq.cidx));
2468 * @qs: the queue set owning the response queue
2479 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2482 struct sge_rspq *q = &qs->rspq;
2498 handle_rsp_cntrl_info(qs, flags);
2512 check_ring_db(adap, qs, sleeping);
2515 if (unlikely(qs->txq_stopped != 0))
2516 restart_tx(qs);
2538 struct sge_qset *qs = rspq_to_qset(q);
2544 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2549 napi_schedule(&qs->napi);
2559 struct sge_qset *qs = cookie;
2560 struct adapter *adap = qs->adap;
2561 struct sge_rspq *q = &qs->rspq;
2564 if (process_responses(adap, qs, -1) == 0)
2578 struct sge_qset *qs = cookie;
2579 struct sge_rspq *q = &qs->rspq;
2583 if (handle_responses(qs->adap, q) < 0)
2599 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2603 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2610 process_responses(adap, &adap->sge.qs[1], -1)) {
2611 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2626 static int rspq_check_napi(struct sge_qset *qs)
2628 struct sge_rspq *q = &qs->rspq;
2630 if (!napi_is_scheduled(&qs->napi) &&
2632 napi_schedule(&qs->napi);
2649 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2653 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2655 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2687 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2688 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2725 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2742 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2759 struct sge_qset *qs0 = &adap->sge.qs[0];
2777 napi_schedule(&adap->sge.qs[1].napi);
2875 struct sge_qset *qs = (struct sge_qset *)data;
2876 struct port_info *pi = netdev_priv(qs->netdev);
2881 if (__netif_tx_trylock(qs->tx_q)) {
2882 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2884 __netif_tx_unlock(qs->tx_q);
2887 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2888 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2890 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2896 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2916 struct sge_qset *qs = (struct sge_qset *)data;
2917 struct port_info *pi = netdev_priv(qs->netdev);
2922 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2927 if (napi_is_scheduled(&qs->napi))
2933 if (status & (1 << qs->rspq.cntxt_id)) {
2934 qs->rspq.starved++;
2935 if (qs->rspq.credits) {
2936 qs->rspq.credits--;
2937 refill_rspq(adap, &qs->rspq, 1);
2938 qs->rspq.restarted++;
2940 1 << qs->rspq.cntxt_id);
2945 if (qs->fl[0].credits < qs->fl[0].size)
2946 __refill_fl(adap, &qs->fl[0]);
2947 if (qs->fl[1].credits < qs->fl[1].size)
2948 __refill_fl(adap, &qs->fl[1]);
2953 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
2958 * @qs: the SGE queue set
2964 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2966 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
2967 qs->rspq.polling = p->polling;
2968 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
2993 struct sge_qset *q = &adapter->sge.qs[id];
3167 struct sge_qset *q = &adap->sge.qs[i];
3188 struct sge_qset *q = &adap->sge.qs[i];
3208 t3_free_qset(adap, &adap->sge.qs[i]);
3243 struct sge_qset *qs = &adap->sge.qs[i];
3245 tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3246 tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3326 * @qs: the queue set
3334 int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3341 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3343 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3348 if (!qs->rspq.desc || idx >= qs->rspq.size)
3350 memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3355 if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3357 memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));