Searched refs:queue (Results 76 - 100 of 1368) sorted by relevance

1234567891011>>

/linux-master/drivers/net/wireless/silabs/wfx/
H A Dqueue.c11 #include "queue.h"
76 bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue) argument
78 return skb_queue_empty_lockless(&queue->normal) &&
79 skb_queue_empty_lockless(&queue->cab) &&
80 skb_queue_empty_lockless(&queue->offchan);
106 void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue, argument
109 __wfx_tx_queue_drop(wvif, &queue->normal, dropped);
110 __wfx_tx_queue_drop(wvif, &queue->cab, dropped);
111 __wfx_tx_queue_drop(wvif, &queue->offchan, dropped);
117 struct wfx_queue *queue local
130 struct wfx_queue *queue; local
149 struct wfx_queue *queue; local
226 wfx_tx_queue_get_weight(struct wfx_queue *queue) argument
[all...]
/linux-master/drivers/net/wireless/ath/ath5k/
H A Ddma.c27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
116 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
118 * @queue: The hw queue number
120 * Start DMA transmit for a specific queue and since 5210 doesn't have
121 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
122 * queue for normal data and one queue for beacons). For queue setu
130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) argument
188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) argument
328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) argument
353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) argument
396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) argument
[all...]
/linux-master/drivers/net/ethernet/cadence/
H A Dmacb_main.c187 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, argument
190 index = macb_tx_ring_wrap(queue->bp, index);
191 index = macb_adj_dma_desc_idx(queue->bp, index);
192 return &queue->tx_ring[index];
195 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, argument
198 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
201 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) argument
205 offset = macb_tx_ring_wrap(queue->bp, index) *
206 macb_dma_desc_get_size(queue
216 macb_rx_desc(struct macb_queue *queue, unsigned int index) argument
223 macb_rx_buffer(struct macb_queue *queue, unsigned int index) argument
498 struct macb_queue *queue; local
706 struct macb_queue *queue; local
730 struct macb_queue *queue; local
1086 struct macb_queue *queue = container_of(work, struct macb_queue, local
1232 macb_tx_complete(struct macb_queue *queue, int budget) argument
1304 gem_rx_refill(struct macb_queue *queue) argument
1368 discard_partial_frame(struct macb_queue *queue, unsigned int begin, unsigned int end) argument
1388 gem_rx(struct macb_queue *queue, struct napi_struct *napi, int budget) argument
1479 macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi, unsigned int first_frag, unsigned int last_frag) argument
1561 macb_init_rx_ring(struct macb_queue *queue) argument
1579 macb_rx(struct macb_queue *queue, struct napi_struct *napi, int budget) argument
1658 macb_rx_pending(struct macb_queue *queue) argument
1675 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx); local
1711 macb_tx_restart(struct macb_queue *queue) argument
1736 macb_tx_complete_pending(struct macb_queue *queue) argument
1754 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx); local
1799 struct macb_queue *queue; local
1836 struct macb_queue *queue = dev_id; local
1865 struct macb_queue *queue = dev_id; local
1894 struct macb_queue *queue = dev_id; local
2018 struct macb_queue *queue; local
2029 macb_tx_map(struct macb *bp, struct macb_queue *queue, struct sk_buff *skb, unsigned int hdrlen) argument
2311 struct macb_queue *queue = &bp->queues[queue_index]; local
2433 struct macb_queue *queue; local
2464 struct macb_queue *queue = &bp->queues[0]; local
2476 struct macb_queue *queue; local
2502 struct macb_queue *queue; local
2521 struct macb_queue *queue = &bp->queues[0]; local
2538 struct macb_queue *queue; local
2580 struct macb_queue *queue; local
2622 struct macb_queue *queue; local
2727 struct macb_queue *queue; local
2929 struct macb_queue *queue; local
2989 struct macb_queue *queue; local
3044 struct macb_queue *queue; local
3139 struct macb_queue *queue; local
4078 struct macb_queue *queue; local
5214 struct macb_queue *queue; local
5306 struct macb_queue *queue; local
[all...]
/linux-master/drivers/iio/buffer/
H A Dindustrialio-buffer-dmaengine.c33 struct iio_dma_buffer_queue queue; member in struct:dmaengine_buffer
45 return container_of(buffer, struct dmaengine_buffer, queue.buffer);
54 spin_lock_irqsave(&block->queue->list_lock, flags);
56 spin_unlock_irqrestore(&block->queue->list_lock, flags);
61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, argument
65 iio_buffer_to_dmaengine_buffer(&queue->buffer);
86 spin_lock_irq(&dmaengine_buffer->queue.list_lock);
88 spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
95 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue) argument
98 iio_buffer_to_dmaengine_buffer(&queue
[all...]
/linux-master/drivers/net/wwan/t7xx/
H A Dt7xx_hif_cldma.c60 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, argument
63 queue->dir = tx_rx;
64 queue->index = index;
65 queue->md_ctrl = md_ctrl;
66 queue->tr_ring = NULL;
67 queue->tr_done = NULL;
68 queue->tx_next = NULL;
71 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl, argument
74 md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
75 init_waitqueue_head(&queue
110 t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget) argument
196 t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget) argument
240 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); local
257 t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue) argument
298 t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue) argument
336 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work); local
506 t7xx_cldma_q_reset(struct cldma_queue *queue) argument
520 t7xx_cldma_rxq_init(struct cldma_queue *queue) argument
529 t7xx_cldma_txq_init(struct cldma_queue *queue) argument
843 t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req, struct sk_buff *skb) argument
902 t7xx_cldma_set_recv_skb(struct cldma_queue *queue, int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb)) argument
924 struct cldma_queue *queue; local
1101 t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) argument
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns/
H A Dhns_dsaf_rcb.c58 "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
134 *@q: hnae queue struct pointer
814 void hns_rcb_update_stats(struct hnae_queue *queue) argument
817 container_of(queue, struct ring_pair_cb, q);
823 hw_stats->rx_pkts += dsaf_read_dev(queue,
825 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
832 hw_stats->tx_pkts += dsaf_read_dev(queue,
834 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
844 *@queue: rcb ring
847 void hns_rcb_get_stats(struct hnae_queue *queue, u6 argument
1039 hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data) argument
[all...]
/linux-master/drivers/net/ethernet/engleder/
H A Dtsnep_main.c10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
98 /* handle TX/RX queue 0 interrupt */
99 if ((active & adapter->queue[0].irq_mask) != 0) {
100 if (napi_schedule_prep(&adapter->queue[0].napi)) {
101 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
103 __napi_schedule(&adapter->queue[0].napi);
112 struct tsnep_queue *queue = arg; local
114 /* handle TX/RX queue interrupt */
115 if (napi_schedule_prep(&queue
124 tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs) argument
140 tsnep_get_irq_coalesce(struct tsnep_queue *queue) argument
1789 tsnep_pending(struct tsnep_queue *queue) argument
1802 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, local
1842 tsnep_request_irq(struct tsnep_queue *queue, bool first) argument
1876 tsnep_free_irq(struct tsnep_queue *queue, bool first) argument
1892 tsnep_queue_close(struct tsnep_queue *queue, bool first) argument
1908 tsnep_queue_open(struct tsnep_adapter *adapter, struct tsnep_queue *queue, bool first) argument
1967 tsnep_queue_enable(struct tsnep_queue *queue) argument
1979 tsnep_queue_disable(struct tsnep_queue *queue) argument
2071 tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool) argument
2111 tsnep_disable_xsk(struct tsnep_queue *queue) argument
2344 struct tsnep_queue *queue; local
[all...]
/linux-master/drivers/net/ethernet/microsoft/mana/
H A Dgdma_main.c207 struct gdma_queue *queue)
213 if (queue->type != GDMA_EQ)
219 req.hdr.dev_id = queue->gdma_dev->dev_id;
220 req.type = queue->type;
221 req.pdid = queue->gdma_dev->pdid;
222 req.doolbell_id = queue->gdma_dev->doorbell;
223 req.gdma_region = queue->mem_info.dma_region_handle;
224 req.queue_size = queue->queue_size;
225 req.log2_throttle_limit = queue->eq.log2_throttle_limit;
226 req.eq_pci_msix_index = queue
206 mana_gd_create_hw_eq(struct gdma_context *gc, struct gdma_queue *queue) argument
241 mana_gd_disable_queue(struct gdma_queue *queue) argument
323 mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) argument
449 mana_gd_register_irq(struct gdma_queue *queue, const struct gdma_queue_spec *spec) argument
482 mana_gd_deregiser_irq(struct gdma_queue *queue) argument
560 mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets, struct gdma_queue *queue) argument
577 mana_gd_create_eq(struct gdma_dev *gd, const struct gdma_queue_spec *spec, bool create_hwq, struct gdma_queue *queue) argument
625 mana_gd_create_cq(const struct gdma_queue_spec *spec, struct gdma_queue *queue) argument
636 mana_gd_destroy_cq(struct gdma_context *gc, struct gdma_queue *queue) argument
656 struct gdma_queue *queue; local
780 struct gdma_queue *queue; local
827 struct gdma_queue *queue; local
867 mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue) argument
1148 mana_gd_post_and_ring(struct gdma_queue *queue, const struct gdma_wqe_request *wqe_req, struct gdma_posted_wqe_info *wqe_info) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Dvi.h30 u32 me, u32 pipe, u32 queue, u32 vmid);
/linux-master/drivers/net/ethernet/stmicro/stmmac/
H A Dstmmac_xdp.h11 u16 queue);
H A Dstmmac_main.c134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
208 u32 queue; local
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
214 test_bit(queue, pri
234 u32 queue; local
257 u32 queue; local
362 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) argument
380 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) argument
412 u32 queue; local
1256 u32 queue; local
1284 u32 queue; local
1344 stmmac_clear_rx_descriptors(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1373 stmmac_clear_tx_descriptors(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1408 u32 queue; local
1430 stmmac_init_rx_buffers(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, struct dma_desc *p, int i, gfp_t flags, u32 queue) argument
1498 stmmac_free_tx_buffer(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue, int i) argument
1544 dma_free_rx_skbufs(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1555 stmmac_alloc_rx_buffers(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue, gfp_t flags) argument
1588 dma_free_rx_xskbufs(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1606 stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1643 stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) argument
1661 __init_dma_rx_desc_rings(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue, gfp_t flags) argument
1727 int queue; local
1769 __init_dma_tx_desc_rings(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1821 u32 queue; local
1867 dma_free_tx_skbufs(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1893 u32 queue; local
1905 __free_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1942 u32 queue; local
1955 __free_dma_tx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
1989 u32 queue; local
2006 __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
2084 u32 queue; local
2112 __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf, u32 queue) argument
2163 u32 queue; local
2228 int queue; local
2459 stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) argument
2602 stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, bool *pending_packets) argument
3060 stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) argument
3167 u32 queue; local
3184 u32 queue; local
3209 u32 queue; local
3226 u32 queue; local
3246 u32 queue; local
3266 u32 queue; local
3470 u32 queue; local
4144 stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, int total_len, bool last_segment, u32 queue) argument
4184 stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) argument
4238 u32 queue = skb_get_queue_mapping(skb); local
4505 u32 queue = skb_get_queue_mapping(skb); local
4786 stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) argument
4899 stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, struct xdp_frame *xdpf, bool dma_map) argument
5001 int queue; local
5079 int queue; local
5111 stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, struct dma_desc *p, struct dma_desc *np, struct xdp_buff *xdp) argument
5155 stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) argument
5223 stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) argument
5415 stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) argument
6020 u32 queue; local
6336 u32 queue; local
6738 int queue; local
6773 stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) argument
6786 stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) argument
6836 stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) argument
6849 stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) argument
7031 stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) argument
7279 u32 queue, maxq; local
7308 u32 queue, maxq; local
7920 stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) argument
7928 stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) argument
7947 u32 queue; local
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_queue.c33 pr_debug("Printing queue properties:\n");
46 void print_queue(struct queue *q)
50 pr_debug("Printing queue:\n");
67 int init_queue(struct queue **q, const struct queue_properties *properties)
69 struct queue *tmp_q;
81 void uninit_queue(struct queue *q)
/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_reset_types.h56 wait_queue_head_t queue; member in struct:intel_reset
/linux-master/drivers/gpu/drm/imagination/
H A Dpvr_queue.h38 /** @base: Base queue fence context. */
64 /** @queue: Queue that created this fence. */
65 struct pvr_queue *queue; member in struct:pvr_queue_fence
69 * struct pvr_queue - Job queue
71 * Used to queue and track execution of pvr_job objects.
74 /** @scheduler: Single entity scheduler use to push jobs to this queue. */
77 /** @entity: Scheduling entity backing this queue. */
80 /** @type: Type of jobs queued to this queue. */
83 /** @ctx: Context object this queue is bound to. */
86 /** @node: Used to add the queue t
[all...]
/linux-master/net/sctp/
H A Dinqueue.c11 * An SCTP inqueue is a queue into which you push SCTP packets
32 void sctp_inq_init(struct sctp_inq *queue) argument
34 INIT_LIST_HEAD(&queue->in_chunk_list);
35 queue->in_progress = NULL;
38 INIT_WORK(&queue->immediate, NULL);
50 void sctp_inq_free(struct sctp_inq *queue) argument
54 /* Empty the queue. */
55 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
63 if (queue->in_progress) {
64 sctp_inq_chunk_free(queue
92 sctp_inq_peek(struct sctp_inq *queue) argument
115 sctp_inq_pop(struct sctp_inq *queue) argument
[all...]
/linux-master/drivers/md/dm-vdo/
H A Dpriority-table.c25 * The head of a queue of table entries, all having the same priority
27 struct list_head queue; member in struct:bucket
34 * of the queue in the appropriate bucket. The dequeue operation finds the highest-priority
72 INIT_LIST_HEAD(&bucket->queue);
116 list_del_init(&table->buckets[priority].queue);
120 * vdo_priority_table_enqueue() - Add a new entry to the priority table, appending it to the queue
133 /* Append the entry to the queue in the specified bucket. */
134 list_move_tail(entry, &table->buckets[priority].queue);
174 entry = bucket->queue.next;
178 if (list_empty(&bucket->queue))
[all...]
/linux-master/drivers/net/wireless/ralink/rt2x00/
H A Drt2x00mac.c20 struct data_queue *queue,
80 retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
96 struct data_queue *queue = NULL; local
108 * Use the ATIM queue if appropriate and present.
114 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
115 if (unlikely(!queue)) {
117 "Attempt to send packet over invalid queue %d\n"
123 * If CTS/RTS is required. create and queue that frame first.
134 if (rt2x00queue_available(queue) <= 1) {
136 * Recheck for full queue unde
19 rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, struct sk_buff *frag_skb) argument
208 struct data_queue *queue = rt2x00dev->bcn; local
684 struct data_queue *queue; local
729 struct data_queue *queue; local
814 struct data_queue *queue; local
829 struct data_queue *queue; local
[all...]
/linux-master/drivers/ptp/
H A Dptp_chardev.c109 struct timestamp_event_queue *queue; local
113 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
114 if (!queue)
116 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
117 if (!queue->mask) {
118 kfree(queue);
121 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
122 spin_lock_init(&queue->lock);
124 list_add_tail(&queue
143 struct timestamp_event_queue *queue = pccontext->private_clkdata; local
510 struct timestamp_event_queue *queue; local
528 struct timestamp_event_queue *queue; local
[all...]
/linux-master/drivers/net/ethernet/netronome/nfp/
H A Dccm_mbox.c14 * enqueue that skb onto the request queue. If threads skb is first
15 * in queue this thread will handle the mailbox operation. It copies
21 * it), or becomes the first in queue.
118 return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
133 skb = skb_peek(&nn->mbox_cmsg.queue);
159 skb = __skb_peek(&nn->mbox_cmsg.queue);
192 skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
203 skb = __skb_peek(&nn->mbox_cmsg.queue);
210 skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
330 spin_lock_bh(&nn->mbox_cmsg.queue
[all...]
/linux-master/net/core/
H A Dnet-sysfs.c804 struct netdev_rx_queue *queue = to_rx_queue(kobj); local
809 return attribute->show(queue, buf);
816 struct netdev_rx_queue *queue = to_rx_queue(kobj); local
821 return attribute->store(queue, buf, count);
830 static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) argument
840 map = rcu_dereference(queue->rps_map);
852 static int netdev_rx_queue_set_rps_mask(struct netdev_rx_queue *queue, argument
877 old_map = rcu_dereference_protected(queue->rps_map,
879 rcu_assign_pointer(queue->rps_map, map);
904 static ssize_t store_rps_map(struct netdev_rx_queue *queue, argument
931 show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, char *buf) argument
953 store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, const char *buf, size_t len) argument
1031 struct netdev_rx_queue *queue = to_rx_queue(kobj); local
1055 struct netdev_rx_queue *queue = to_rx_queue(kobj); local
1081 rx_queue_default_mask(struct net_device *dev, struct netdev_rx_queue *queue) argument
1095 struct netdev_rx_queue *queue = dev->_rx + index; local
1132 struct netdev_rx_queue *queue = dev->_rx + index; local
1226 struct netdev_queue *queue = to_netdev_queue(kobj); local
1240 struct netdev_queue *queue = to_netdev_queue(kobj); local
1253 tx_timeout_show(struct netdev_queue *queue, char *buf) argument
1260 get_netdev_queue_index(struct netdev_queue *queue) argument
1271 traffic_class_show(struct netdev_queue *queue, char *buf) argument
1309 tx_maxrate_show(struct netdev_queue *queue, char *buf) argument
1315 tx_maxrate_store(struct netdev_queue *queue, const char *buf, size_t len) argument
1390 bql_show_hold_time(struct netdev_queue *queue, char *buf) argument
1398 bql_set_hold_time(struct netdev_queue *queue, const char *buf, size_t len) argument
1418 bql_show_stall_thrs(struct netdev_queue *queue, char *buf) argument
1425 bql_set_stall_thrs(struct netdev_queue *queue, const char *buf, size_t len) argument
1452 bql_show_stall_max(struct netdev_queue *queue, char *buf) argument
1457 bql_set_stall_max(struct netdev_queue *queue, const char *buf, size_t len) argument
1467 bql_show_stall_cnt(struct netdev_queue *queue, char *buf) argument
1477 bql_show_inflight(struct netdev_queue *queue, char *buf) argument
1581 xps_cpus_show(struct netdev_queue *queue, char *buf) argument
1614 xps_cpus_store(struct netdev_queue *queue, const char *buf, size_t len) argument
1655 xps_rxqs_show(struct netdev_queue *queue, char *buf) argument
1674 xps_rxqs_store(struct netdev_queue *queue, const char *buf, size_t len) argument
1731 struct netdev_queue *queue = to_netdev_queue(kobj); local
1739 struct netdev_queue *queue = to_netdev_queue(kobj); local
1776 struct netdev_queue *queue = dev->_tx + index; local
1808 struct netdev_queue *queue = ndev->_tx + index; local
1846 struct netdev_queue *queue = dev->_tx + i; local
[all...]
/linux-master/net/netfilter/
H A Dxt_NFQUEUE.c2 /* iptables module for using new netfilter netlink queue
40 u32 queue = info->queuenum; local
43 queue = nfqueue_hash(skb, queue, info->queues_total,
46 return NF_QUEUE_NR(queue);
89 u32 queue = info->queuenum; local
96 queue = info->queuenum + cpu % info->queues_total;
98 queue = nfqueue_hash(skb, queue, info->queues_total,
103 ret = NF_QUEUE_NR(queue);
[all...]
/linux-master/drivers/watchdog/
H A Dmtx-1_wdt.c53 int queue; member in struct:__anon2919
70 if (mtx1_wdt_device.queue && ticks)
88 if (!mtx1_wdt_device.queue) {
89 mtx1_wdt_device.queue = 1;
103 if (mtx1_wdt_device.queue) {
104 mtx1_wdt_device.queue = 0;
209 mtx1_wdt_device.queue = 0;
227 if (mtx1_wdt_device.queue) {
228 mtx1_wdt_device.queue = 0;
/linux-master/drivers/staging/rtl8723bs/os_dep/
H A Dxmit_linux.c70 u16 queue; local
73 queue = skb_get_queue_mapping(pkt);
75 if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
76 (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
77 netif_wake_subqueue(padapter->pnetdev, queue);
79 if (__netif_subqueue_stopped(padapter->pnetdev, queue))
80 netif_wake_subqueue(padapter->pnetdev, queue);
101 if (!list_empty(&padapter->xmitpriv.pending_xmitbuf_queue.queue))
108 u16 queue; local
110 queue
[all...]
/linux-master/drivers/net/xen-netback/
H A Dxenbus.c14 struct xenvif_queue *queue);
28 struct xenvif_queue *queue = m->private; local
29 struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
30 struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
36 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
52 queue->pending_prod,
53 queue->pending_cons,
54 nr_pending_reqs(queue));
56 queue->dealloc_prod,
57 queue
109 struct xenvif_queue *queue = local
143 void *queue = NULL; local
534 struct xenvif_queue *queue = &vif->queues[queue_index]; local
731 struct xenvif_queue *queue; local
849 connect_data_rings(struct backend_info *be, struct xenvif_queue *queue) argument
[all...]
/linux-master/arch/mips/include/asm/octeon/
H A Dcvmx-pko.h46 * maintaining PKO queue pointers. These are now stored in a
50 * queue locking correctly applies across all operating
62 #include <asm/octeon/cvmx-cmd-queue.h>
100 * the same queue at the same time
105 * to the output queue. This will maintain packet ordering on
110 * PKO uses the common command queue locks to insure exclusive
111 * access to the output queue. This is a memory based
144 * addition to the output queue,
148 * The output queue to send the packet to (0-127 are
151 uint64_t queue member in struct:__anon21::__anon22
324 cvmx_pko_doorbell(uint64_t port, uint64_t queue, uint64_t len) argument
376 cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, cvmx_pko_lock_t use_locking) argument
417 cvmx_pko_send_packet_finish( uint64_t port, uint64_t queue, union cvmx_pko_command_word0 pko_command, union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking) argument
460 cvmx_pko_send_packet_finish3( uint64_t port, uint64_t queue, union cvmx_pko_command_word0 pko_command, union cvmx_buf_ptr packet, uint64_t addr, cvmx_pko_lock_t use_locking) argument
[all...]

Completed in 510 milliseconds

1234567891011>>