Searched refs:txq (Results 51 - 75 of 340) sorted by relevance

1234567891011>>

/linux-master/net/sched/
H A Dsch_teql.c148 struct netdev_queue *txq; local
151 txq = netdev_get_tx_queue(master->dev, 0);
154 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc));
156 qdisc_reset(rtnl_dereference(txq->qdisc));
219 struct net_device *dev, struct netdev_queue *txq,
259 struct netdev_queue *txq)
264 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc)
271 res = __teql_resolve(skb, skb_res, dev, txq, dst);
218 __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev, struct netdev_queue *txq, struct dst_entry *dst) argument
256 teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev, struct netdev_queue *txq) argument
/linux-master/drivers/net/ethernet/microsoft/mana/
H A Dmana_en.c46 /* Ensure port state updated before txq state */
233 struct mana_txq *txq; local
243 txq = &apc->tx_qp[txq_idx].txq;
244 gdma_sq = txq->gdma_sq;
246 tx_stats = &txq->stats;
249 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
251 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
252 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
255 pkg.tx_oob.s_oob.short_vp_offset = txq
482 int txq; local
496 int txq; local
1354 struct mana_txq *txq = cq->txq; local
1832 mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) argument
1876 struct mana_txq *txq; local
2585 struct mana_txq *txq; local
[all...]
/linux-master/drivers/net/ethernet/atheros/alx/
H A Dalx.h97 struct alx_tx_queue *txq; member in struct:alx_napi
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dqos.h26 void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
/linux-master/drivers/net/wireless/realtek/rtw88/
H A Dtx.c565 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); local
584 if (!txq->sta)
587 si = (struct rtw_sta_info *)txq->sta->drv_priv;
588 set_bit(txq->tid, si->tid_ba);
597 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); local
603 rtw_tx_pkt_info_update(rtwdev, &pkt_info, txq->sta, skb);
615 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); local
618 skb = ieee80211_tx_dequeue(rtwdev->hw, txq);
657 struct ieee80211_txq *txq = rtwtxq_to_txq(rtwtxq); local
660 ieee80211_txq_get_depth(txq,
678 rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) argument
689 rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq) argument
[all...]
H A Dtx.h88 void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
89 void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_base.h24 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
/linux-master/drivers/bluetooth/
H A Dhci_mrvl.c40 struct sk_buff_head txq; member in struct:mrvl_data
71 skb_queue_head_init(&mrvl->txq);
100 skb_queue_purge(&mrvl->txq);
115 skb_queue_purge(&mrvl->txq);
126 skb = skb_dequeue(&mrvl->txq);
142 skb_queue_tail(&mrvl->txq, skb);
159 skb_queue_tail(&mrvl->txq, skb);
H A Ddtl1_cs.c75 struct sk_buff_head txq; member in struct:dtl1_info
156 skb = skb_dequeue(&(info->txq));
168 skb_queue_head(&(info->txq), skb);
370 skb_queue_purge(&(info->txq));
421 skb_queue_tail(&(info->txq), s);
443 skb_queue_head_init(&(info->txq));
H A Dbluecard_cs.c72 struct sk_buff_head txq; member in struct:bluecard_info
259 skb = skb_dequeue(&(info->txq));
323 skb_queue_head(&(info->txq), skb);
595 skb_queue_tail(&(info->txq), skb);
612 skb_queue_purge(&(info->txq));
668 skb_queue_tail(&(info->txq), skb);
690 skb_queue_head_init(&(info->txq));
773 skb_queue_purge(&(info->txq));
H A Dhci_ll.c75 struct sk_buff_head txq; member in struct:ll_struct
105 skb_queue_tail(&ll->txq, skb);
121 skb_queue_head_init(&ll->txq);
147 skb_queue_purge(&ll->txq);
160 skb_queue_purge(&ll->txq);
181 * 1. places all pending packets (waiting in tx_wait_q list) in txq list.
191 skb_queue_tail(&ll->txq, skb);
327 skb_queue_tail(&ll->txq, skb);
448 return skb_dequeue(&ll->txq);
/linux-master/drivers/vdpa/vdpa_sim/
H A Dvdpa_sim_net.c198 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; local
216 if (!txq->ready || !rxq->ready)
220 err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
221 &txq->head, GFP_ATOMIC);
229 read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
236 vdpasim_net_complete(txq, 0);
244 vdpasim_net_complete(txq, 0);
258 vdpasim_net_complete(txq,
[all...]
/linux-master/drivers/net/ethernet/qlogic/qede/
H A Dqede.h414 #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
420 #define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
421 (txq)->cos) + (txq)->index)
423 (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
425 #define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
471 struct qede_tx_queue *txq; member in struct:qede_fastpath
537 struct qede_tx_queue *txq, int *len);
575 int qede_txq_has_work(struct qede_tx_queue *txq);
[all...]
H A Dqede_ethtool.c271 struct qede_tx_queue *txq, u8 **buf)
276 if (txq->is_xdp)
278 QEDE_TXQ_XDP_TO_IDX(edev, txq),
281 sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
326 &fp->txq[cos], &buf);
361 static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf) argument
366 **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
406 qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
789 struct qede_tx_queue *txq; local
270 qede_get_strings_stats_txq(struct qede_dev *edev, struct qede_tx_queue *txq, u8 **buf) argument
874 struct qede_tx_queue *txq; local
1472 struct qede_tx_queue *txq = NULL; local
[all...]
/linux-master/drivers/net/ethernet/chelsio/cxgb4/
H A Dcudbg_lib.h241 static inline void cudbg_fill_qdesc_txq(const struct sge_txq *txq, argument
246 entry->qid = txq->cntxt_id;
248 entry->num_desc = txq->size;
249 entry->data_size = txq->size * sizeof(struct tx_desc);
250 memcpy(entry->data, txq->desc, entry->data_size);
H A Dsched.c175 struct sge_eth_txq *txq; local
180 txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
181 qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
189 struct sge_eth_txq *txq; local
196 txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
199 qe = t4_sched_entry_lookup(pi, SCHED_QUEUE, txq->q.cntxt_id);
220 struct sge_eth_txq *txq; local
232 txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
233 qid = txq->q.cntxt_id;
H A Dcxgb4_uld.c407 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; local
409 if (txq && txq->q.desc) {
410 tasklet_kill(&txq->qresume_tsk);
412 txq->q.cntxt_id);
413 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
414 kfree(txq->q.sdesc);
415 __skb_queue_purge(&txq->sendq);
416 free_txq(adap, &txq
431 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; local
[all...]
/linux-master/include/linux/
H A Dnetdevice.h782 /* HW offloaded queuing disciplines txq count and offset maps */
2439 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
3291 void netif_schedule_queue(struct netdev_queue *txq);
3322 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); local
3323 netif_tx_start_queue(txq);
3346 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); local
3347 netif_tx_wake_queue(txq);
3422 static inline int netdev_queue_dql_avail(const struct netdev_queue *txq) argument
3426 return dql_avail(&txq->dql);
3653 struct netdev_queue *txq local
3667 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); local
3681 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); local
3708 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); local
4369 __netif_tx_lock(struct netdev_queue *txq, int cpu) argument
4376 __netif_tx_acquire(struct netdev_queue *txq) argument
4382 __netif_tx_release(struct netdev_queue *txq) argument
4387 __netif_tx_lock_bh(struct netdev_queue *txq) argument
4394 __netif_tx_trylock(struct netdev_queue *txq) argument
4405 __netif_tx_unlock(struct netdev_queue *txq) argument
4412 __netif_tx_unlock_bh(struct netdev_queue *txq) argument
4422 txq_trans_update(struct netdev_queue *txq) argument
4428 txq_trans_cond_update(struct netdev_queue *txq) argument
4439 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); local
4496 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); local
4911 netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, bool more) argument
[all...]
/linux-master/drivers/net/ethernet/ti/
H A Dcpts.c89 skb_queue_walk_safe(&cpts->txq, skb, tmp) {
92 __skb_unlink(skb, &cpts->txq);
99 dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
329 spin_lock_irqsave(&cpts->txq.lock, flags);
330 skb_queue_splice_init(&cpts->txq, &txq_list);
331 spin_unlock_irqrestore(&cpts->txq.lock, flags);
352 dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
358 spin_lock_irqsave(&cpts->txq.lock, flags);
359 skb_queue_splice(&txq_list, &cpts->txq);
360 spin_unlock_irqrestore(&cpts->txq
[all...]
/linux-master/drivers/net/wireless/microchip/wilc1000/
H A Dwlan.c34 wilc->txq[q_num].count--;
45 if (!list_empty(&wilc->txq[q_num].txq_head.list)) {
46 tqe = list_first_entry(&wilc->txq[q_num].txq_head.list,
50 wilc->txq[q_num].count--;
65 list_add_tail(&tqe->list, &wilc->txq[q_num].txq_head.list);
67 wilc->txq[q_num].count++;
84 list_add(&tqe->list, &wilc->txq[q_num].txq_head.list);
86 wilc->txq[q_num].count++;
320 if (wl->txq[q_num].count <= q_limit)
375 if (wl->txq[
[all...]
/linux-master/net/mac80211/
H A Dagg-tx.c192 struct ieee80211_txq *txq = sta->sta.txq[tid]; local
197 if (!txq)
200 txqi = to_txq_info(txq);
201 sdata = vif_to_sdata(txq->vif);
213 struct ieee80211_txq *txq = sta->sta.txq[tid]; local
218 if (!txq)
221 txqi = to_txq_info(txq);
974 struct ieee80211_txq *txq; local
[all...]
/linux-master/drivers/net/ethernet/cortina/
H A Dgemini.c124 struct gmac_txq txq[TX_QUEUE_NUM]; member in struct:gemini_ethernet_port
550 struct gmac_txq *txq = port->txq; local
584 txq->ring = desc_ring;
585 txq->skb = skb_tab;
586 txq->noirq_packets = 0;
592 txq->cptr = r;
594 txq++;
602 static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, argument
608 unsigned int c = txq
1105 gmac_tx_irq_enable(struct net_device *netdev, unsigned int txq, int en) argument
1132 gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, struct gmac_txq *txq, unsigned short *desc) argument
1243 struct gmac_txq *txq; local
[all...]
/linux-master/drivers/net/wwan/t7xx/
H A Dt7xx_hif_cldma.c578 queue_work(md_ctrl->txq[i].worker,
579 &md_ctrl->txq[i].cldma_work);
581 t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
654 flush_work(&md_ctrl->txq[i].cldma_work);
698 cancel_work_sync(&md_ctrl->txq[i].cldma_work);
701 md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
735 if (md_ctrl->txq[i].tr_done)
737 md_ctrl->txq[i].tr_done->gpd_addr,
760 struct cldma_queue *txq = &md_ctrl->txq[qnu local
[all...]
/linux-master/drivers/net/ethernet/pensando/ionic/
H A Dionic_txrx.c361 desc = &q->txq[q->head_idx];
426 struct ionic_queue *txq; local
447 txq = &lif->txqcqs[qi]->q;
448 nq = netdev_get_tx_queue(netdev, txq->index);
453 !netif_txq_maybe_stop(q_to_ndq(netdev, txq),
454 ionic_q_space_avail(txq),
460 space = min_t(int, n, ionic_q_space_avail(txq));
462 if (ionic_xdp_post_frame(txq, xdp_frames[nxmit],
472 ionic_dbell_ring(lif->kern_dbpage, txq->hw_type,
473 txq
492 struct ionic_queue *txq; local
[all...]
/linux-master/drivers/net/wireless/intel/iwlegacy/
H A D4965-mac.c218 if (!il->txq) {
1650 struct il_tx_queue *txq; local
1751 txq = &il->txq[txq_id];
1752 q = &txq->q;
1767 txq->skbs[q->write_ptr] = skb;
1770 out_cmd = txq->cmd[q->write_ptr];
1771 out_meta = &txq->meta[q->write_ptr];
1836 il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
1840 il->ops->txq_attach_buf_to_tfd(il, txq, phys_add
2454 struct il_tx_queue *txq = &il->txq[txq_id]; local
2761 struct il_tx_queue *txq = &il->txq[txq_id]; local
2907 struct il_tx_queue *txq = NULL; local
3909 il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq) argument
3954 il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq, dma_addr_t addr, u16 len, u8 reset, u8 pad) argument
3994 il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq) argument
6281 il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq, int tx_fifo_id, int scd_retry) argument
[all...]

Completed in 618 milliseconds

1234567891011>>