Searched refs:tx_queue (Results 1 - 25 of 168) sorted by relevance

1234567

/linux-master/drivers/net/ethernet/sfc/siena/
H A Dtx_common.h14 int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue);
15 void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue);
16 void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue);
17 void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue);
24 void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
25 void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
27 void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
30 struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
32 int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
36 int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struc
[all...]
H A Dtx_common.c17 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) argument
19 return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
23 int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue) argument
25 struct efx_nic *efx = tx_queue->efx;
32 tx_queue->ptr_mask = entries - 1;
36 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
39 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
41 if (!tx_queue
68 efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue) argument
98 efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue) argument
122 efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, unsigned int *bytes_compl) argument
170 efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue) argument
198 efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index, unsigned int *pkts_compl, unsigned int *bytes_compl) argument
227 efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue) argument
240 efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) argument
273 efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, unsigned int insert_count) argument
288 efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, size_t len) argument
329 efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, unsigned int segment_count) argument
431 efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) argument
[all...]
H A Dtx.c26 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, argument
29 unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
31 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
36 efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
83 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, argument
93 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
95 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
106 ++tx_queue->insert_count;
139 netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, argument
142 unsigned int old_insert_count = tx_queue
212 struct efx_tx_queue *tx_queue; local
303 struct efx_tx_queue *tx_queue; local
346 efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) argument
[all...]
H A Dnic_common.h60 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) argument
62 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
68 static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) argument
70 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
86 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, argument
89 bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
91 tx_queue->empty_read_count = 0;
92 return was_empty && tx_queue->write_count - write_count == 1;
118 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) argument
120 return tx_queue
122 efx_nic_init_tx(struct efx_tx_queue *tx_queue) argument
126 efx_nic_remove_tx(struct efx_tx_queue *tx_queue) argument
131 efx_nic_push_buffers(struct efx_tx_queue *tx_queue) argument
[all...]
H A Defx_channels.c182 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
535 struct efx_tx_queue *tx_queue; local
548 tx_queue = &channel->tx_queue[j];
549 tx_queue->efx = efx;
550 tx_queue->queue = -1;
551 tx_queue->label = j;
552 tx_queue->channel = channel;
606 struct efx_tx_queue *tx_queue; local
623 tx_queue
644 struct efx_tx_queue *tx_queue; local
747 struct efx_tx_queue *tx_queue; local
771 efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, struct efx_tx_queue *tx_queue) argument
787 struct efx_tx_queue *tx_queue; local
872 struct efx_tx_queue *tx_queue; local
1123 struct efx_tx_queue *tx_queue; local
1147 struct efx_tx_queue *tx_queue; local
1204 struct efx_tx_queue *tx_queue; local
[all...]
H A Dnic.h124 int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
125 void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
126 void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
127 void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
128 void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
129 unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
H A Dfarch.c283 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) argument
288 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
290 efx_writed_page(tx_queue->efx, &reg,
291 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
295 static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, argument
304 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
308 efx_writeo_page(tx_queue->efx, &reg,
309 FR_BZ_TX_DESC_UPD_P0, tx_queue
317 efx_farch_tx_write(struct efx_tx_queue *tx_queue) argument
358 efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len) argument
371 efx_farch_tx_probe(struct efx_tx_queue *tx_queue) argument
383 efx_farch_tx_init(struct efx_tx_queue *tx_queue) argument
423 efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) argument
437 efx_farch_tx_fini(struct efx_tx_queue *tx_queue) argument
452 efx_farch_tx_remove(struct efx_tx_queue *tx_queue) argument
607 struct efx_tx_queue *tx_queue; local
651 struct efx_tx_queue *tx_queue; local
719 struct efx_tx_queue *tx_queue; local
830 struct efx_tx_queue *tx_queue; local
1085 struct efx_tx_queue *tx_queue; local
[all...]
/linux-master/drivers/net/ethernet/sfc/
H A Def100_tx.h18 int ef100_tx_probe(struct efx_tx_queue *tx_queue);
19 void ef100_tx_init(struct efx_tx_queue *tx_queue);
20 void ef100_tx_write(struct efx_tx_queue *tx_queue);
25 netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
26 int __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
H A Dtx_common.c17 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) argument
19 return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
23 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) argument
25 struct efx_nic *efx = tx_queue->efx;
32 tx_queue->ptr_mask = entries - 1;
36 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
39 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
41 if (!tx_queue
68 efx_init_tx_queue(struct efx_tx_queue *tx_queue) argument
98 efx_fini_tx_queue(struct efx_tx_queue *tx_queue) argument
125 efx_remove_tx_queue(struct efx_tx_queue *tx_queue) argument
149 efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, unsigned int *bytes_compl, unsigned int *efv_pkts_compl) argument
209 efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index, unsigned int *pkts_compl, unsigned int *bytes_compl, unsigned int *efv_pkts_compl) argument
240 efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) argument
253 efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) argument
290 efx_enqueue_unwind(struct efx_tx_queue *tx_queue, unsigned int insert_count) argument
307 efx_tx_map_chunk(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, size_t len) argument
348 efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, unsigned int segment_count) argument
451 efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) argument
[all...]
H A Dtx_common.h14 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
15 void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
16 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
17 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
19 void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
30 void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
31 int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
33 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
36 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
39 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struc
[all...]
H A Def100_tx.c23 int ef100_tx_probe(struct efx_tx_queue *tx_queue) argument
26 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd,
27 (tx_queue->ptr_mask + 2) *
32 void ef100_tx_init(struct efx_tx_queue *tx_queue) argument
35 tx_queue->core_txq =
36 netdev_get_tx_queue(tx_queue->efx->net_dev,
37 tx_queue->channel->channel -
38 tx_queue->efx->tx_channel_offset);
46 tx_queue
52 ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) argument
102 ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) argument
110 ef100_notify_tx_desc(struct efx_tx_queue *tx_queue) argument
128 ef100_tx_push_buffers(struct efx_tx_queue *tx_queue) argument
255 ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, unsigned int segment_count, struct efx_rep *efv) argument
343 ef100_tx_write(struct efx_tx_queue *tx_queue) argument
355 struct efx_tx_queue *tx_queue = local
370 ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) argument
376 __ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb, struct efx_rep *efv) argument
[all...]
H A Dtx.c34 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, argument
37 unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
39 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
44 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
52 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, argument
57 return efx_tx_get_copy_buffer(tx_queue, buffer);
99 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, argument
109 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
111 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
122 ++tx_queue
220 efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb) argument
276 efx_tx_may_pio(struct efx_tx_queue *tx_queue) argument
322 __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) argument
425 struct efx_tx_queue *tx_queue; local
516 struct efx_tx_queue *tx_queue; local
554 efx_xmit_done_single(struct efx_tx_queue *tx_queue) argument
594 efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) argument
[all...]
H A Dtx_tso.c79 static inline void prefetch_ptr(struct efx_tx_queue *tx_queue) argument
81 unsigned int insert_ptr = efx_tx_queue_get_insert_index(tx_queue);
84 ptr = (char *) (tx_queue->buffer + insert_ptr);
88 ptr = (char *)(((efx_qword_t *)tx_queue->txd.addr) + insert_ptr);
95 * @tx_queue: Efx TX queue
102 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, argument
112 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
113 ++tx_queue->insert_count;
115 EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count -
116 tx_queue
169 tso_start(struct tso_state *st, struct efx_nic *efx, struct efx_tx_queue *tx_queue, const struct sk_buff *skb) argument
232 tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) argument
285 tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) argument
362 efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, bool *data_mapped) argument
[all...]
H A Dnic_common.h59 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) argument
61 return ((efx_qword_t *)(tx_queue->txd.addr)) + index;
67 static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) argument
69 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
77 int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
86 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, argument
89 bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
91 tx_queue->empty_read_count = 0;
92 return was_empty && tx_queue->write_count - write_count == 1;
118 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) argument
122 efx_nic_init_tx(struct efx_tx_queue *tx_queue) argument
126 efx_nic_remove_tx(struct efx_tx_queue *tx_queue) argument
131 efx_nic_push_buffers(struct efx_tx_queue *tx_queue) argument
[all...]
H A Dmcdi_functions.h22 int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue);
23 void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
24 void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
H A Dtx.h15 unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
18 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
H A Defx_channels.c181 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
533 struct efx_tx_queue *tx_queue; local
546 tx_queue = &channel->tx_queue[j];
547 tx_queue->efx = efx;
548 tx_queue->queue = -1;
549 tx_queue->label = j;
550 tx_queue->channel = channel;
603 struct efx_tx_queue *tx_queue; local
620 tx_queue
641 struct efx_tx_queue *tx_queue; local
741 struct efx_tx_queue *tx_queue; local
765 efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, struct efx_tx_queue *tx_queue) argument
781 struct efx_tx_queue *tx_queue; local
1091 struct efx_tx_queue *tx_queue; local
1117 struct efx_tx_queue *tx_queue; local
1179 struct efx_tx_queue *tx_queue; local
[all...]
H A Dmcdi_functions.c163 int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue) argument
167 bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
168 bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
169 size_t entries = tx_queue->txd.len / EFX_BUF_SIZE;
170 struct efx_channel *channel = tx_queue->channel;
171 struct efx_nic *efx = tx_queue->efx;
178 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
180 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
181 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
185 dma_addr = tx_queue
241 efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue) argument
246 efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue) argument
357 struct efx_tx_queue *tx_queue; local
[all...]
H A Defx.h22 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
25 netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
26 static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) argument
28 return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
30 tx_queue, skb);
32 void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
/linux-master/drivers/net/ethernet/sfc/falcon/
H A Dtx.c25 static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue, argument
28 unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
30 &tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
35 ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
43 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue, argument
48 return ef4_tx_get_copy_buffer(tx_queue, buffer);
51 static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue, argument
57 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
72 netif_vdbg(tx_queue->efx, tx_done, tx_queue
148 ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue, struct sk_buff *skb) argument
181 ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue, dma_addr_t dma_addr, size_t len) argument
207 ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb) argument
269 ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue) argument
297 ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb) argument
355 ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue, unsigned int index, unsigned int *pkts_compl, unsigned int *bytes_compl) argument
398 struct ef4_tx_queue *tx_queue; local
414 ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue) argument
432 struct ef4_tx_queue *tx_queue; local
492 ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) argument
534 ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue) argument
539 ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue) argument
583 ef4_init_tx_queue(struct ef4_tx_queue *tx_queue) argument
607 ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue) argument
629 ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue) argument
[all...]
H A Dtx.h15 unsigned int ef4_tx_limit_len(struct ef4_tx_queue *tx_queue,
18 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
21 int ef4_enqueue_skb_tso(struct ef4_tx_queue *tx_queue, struct sk_buff *skb,
H A Dnic.h63 ef4_tx_desc(struct ef4_tx_queue *tx_queue, unsigned int index) argument
65 return ((ef4_qword_t *) (tx_queue->txd.buf.addr)) + index;
69 static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_queue) argument
71 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
72 return tx_queue - EF4_TXQ_TYPE_OFFLOAD;
74 return tx_queue + EF4_TXQ_TYPE_OFFLOAD;
80 static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, argument
83 unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
99 static inline bool ef4_nic_may_push_tx_desc(struct ef4_tx_queue *tx_queue, argument
102 bool was_empty = __ef4_nic_tx_is_empty(tx_queue, write_coun
315 ef4_nic_probe_tx(struct ef4_tx_queue *tx_queue) argument
319 ef4_nic_init_tx(struct ef4_tx_queue *tx_queue) argument
323 ef4_nic_remove_tx(struct ef4_tx_queue *tx_queue) argument
327 ef4_nic_push_buffers(struct ef4_tx_queue *tx_queue) argument
[all...]
H A Dnet_driver.h402 * @tx_queue: TX queues for this channel
445 struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES]; member in struct:ef4_channel
664 * @tx_queue: TX DMA queues
1082 int (*tx_probe)(struct ef4_tx_queue *tx_queue);
1083 void (*tx_init)(struct ef4_tx_queue *tx_queue);
1084 void (*tx_remove)(struct ef4_tx_queue *tx_queue);
1085 void (*tx_write)(struct ef4_tx_queue *tx_queue);
1086 unsigned int (*tx_limit_len)(struct ef4_tx_queue *tx_queue,
1192 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
1206 return &channel->tx_queue[typ
1209 ef4_tx_queue_used(struct ef4_tx_queue *tx_queue) argument
1310 ef4_tx_queue_get_insert_index(const struct ef4_tx_queue *tx_queue) argument
1317 __ef4_tx_queue_get_insert_buffer(const struct ef4_tx_queue *tx_queue) argument
1324 ef4_tx_queue_get_insert_buffer(const struct ef4_tx_queue *tx_queue) argument
[all...]
H A Dfarch.c272 static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue) argument
277 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
279 ef4_writed_page(tx_queue->efx, &reg,
280 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
284 static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue, argument
293 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
297 ef4_writeo_page(tx_queue->efx, &reg,
298 FR_BZ_TX_DESC_UPD_P0, tx_queue
306 ef4_farch_tx_write(struct ef4_tx_queue *tx_queue) argument
347 ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len) argument
363 ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue) argument
373 ef4_farch_tx_init(struct ef4_tx_queue *tx_queue) argument
429 ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue) argument
443 ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue) argument
458 ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue) argument
619 struct ef4_tx_queue *tx_queue; local
663 struct ef4_tx_queue *tx_queue; local
720 struct ef4_tx_queue *tx_queue; local
830 struct ef4_tx_queue *tx_queue; local
1092 struct ef4_tx_queue *tx_queue; local
[all...]
/linux-master/drivers/net/wireless/silabs/wfx/
H A Dqueue.c69 skb_queue_head_init(&wvif->tx_queue[i].normal);
70 skb_queue_head_init(&wvif->tx_queue[i].cab);
71 skb_queue_head_init(&wvif->tx_queue[i].offchan);
72 wvif->tx_queue[i].priority = priorities[i];
88 WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
89 WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i]));
117 struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
138 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
164 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
221 if (!skb_queue_empty_lockless(&wvif->tx_queue[
[all...]

Completed in 186 milliseconds

1234567