Lines Matching refs:tx_queue

17 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
19 return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
23 int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue)
25 struct efx_nic *efx = tx_queue->efx;
32 tx_queue->ptr_mask = entries - 1;
36 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
39 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
41 if (!tx_queue->buffer)
44 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
45 sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
46 if (!tx_queue->cb_page) {
52 rc = efx_nic_probe_tx(tx_queue);
56 tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
60 kfree(tx_queue->cb_page);
61 tx_queue->cb_page = NULL;
63 kfree(tx_queue->buffer);
64 tx_queue->buffer = NULL;
68 void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue)
70 struct efx_nic *efx = tx_queue->efx;
73 "initialising TX queue %d\n", tx_queue->queue);
75 tx_queue->insert_count = 0;
76 tx_queue->notify_count = 0;
77 tx_queue->write_count = 0;
78 tx_queue->packet_write_count = 0;
79 tx_queue->old_write_count = 0;
80 tx_queue->read_count = 0;
81 tx_queue->old_read_count = 0;
82 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
83 tx_queue->xmit_pending = false;
84 tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) &&
85 tx_queue->channel == efx_siena_ptp_channel(efx));
86 tx_queue->completed_timestamp_major = 0;
87 tx_queue->completed_timestamp_minor = 0;
89 tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
90 tx_queue->tso_version = 0;
93 efx_nic_init_tx(tx_queue);
95 tx_queue->initialised = true;
98 void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue)
102 if (!tx_queue->buffer)
105 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
106 "destroying TX queue %d\n", tx_queue->queue);
107 efx_nic_remove_tx(tx_queue);
109 if (tx_queue->cb_page) {
110 for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
111 efx_siena_free_buffer(tx_queue->efx,
112 &tx_queue->cb_page[i]);
113 kfree(tx_queue->cb_page);
114 tx_queue->cb_page = NULL;
117 kfree(tx_queue->buffer);
118 tx_queue->buffer = NULL;
119 tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
122 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
128 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
146 if (tx_queue->timestamping &&
147 (tx_queue->completed_timestamp_major ||
148 tx_queue->completed_timestamp_minor)) {
152 efx_siena_ptp_nic_to_kernel_time(tx_queue);
155 tx_queue->completed_timestamp_major = 0;
156 tx_queue->completed_timestamp_minor = 0;
159 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
161 tx_queue->queue, tx_queue->read_count);
170 void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue)
174 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
175 "shutting down TX queue %d\n", tx_queue->queue);
177 if (!tx_queue->buffer)
181 while (tx_queue->read_count != tx_queue->write_count) {
184 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
185 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
187 ++tx_queue->read_count;
189 tx_queue->xmit_pending = false;
190 netdev_tx_reset_queue(tx_queue->core_txq);
198 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
203 struct efx_nic *efx = tx_queue->efx;
206 stop_index = (index + 1) & tx_queue->ptr_mask;
207 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
210 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
215 tx_queue->queue, read_ptr);
220 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
222 ++tx_queue->read_count;
223 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
227 void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
229 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
230 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
231 if (tx_queue->read_count == tx_queue->old_write_count) {
234 tx_queue->empty_read_count =
235 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
240 void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
243 struct efx_nic *efx = tx_queue->efx;
245 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
247 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
248 tx_queue->pkts_compl += pkts_compl;
249 tx_queue->bytes_compl += bytes_compl;
252 ++tx_queue->merge_events;
259 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
262 fill_level = efx_channel_tx_fill_level(tx_queue->channel);
264 netif_tx_wake_queue(tx_queue->core_txq);
267 efx_siena_xmit_done_check_empty(tx_queue);
270 /* Remove buffers put into a tx_queue for the current packet.
273 void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
281 while (tx_queue->insert_count != insert_count) {
282 --tx_queue->insert_count;
283 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
284 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
288 struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
291 const struct efx_nic_type *nic_type = tx_queue->efx->type;
297 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
300 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
309 ++tx_queue->insert_count;
329 int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
332 struct efx_nic *efx = tx_queue->efx;
359 tx_queue->tso_long_headers++;
360 efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len);
371 buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
431 int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue,
444 efx_enqueue_skb(tx_queue, skb);