Lines Matching defs:tx_queue

25 static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
28 unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
30 &tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
35 ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
43 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
48 return ef4_tx_get_copy_buffer(tx_queue, buffer);
51 static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
57 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
72 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
74 tx_queue->queue, tx_queue->read_count);
148 static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
151 unsigned int min_len = tx_queue->tx_min_size;
159 buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
161 copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer);
177 ++tx_queue->insert_count;
181 static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
185 const struct ef4_nic_type *nic_type = tx_queue->efx->type;
191 buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
192 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
199 ++tx_queue->insert_count;
207 static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
209 struct ef4_nic *efx = tx_queue->efx;
234 buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len);
266 /* Remove buffers put into a tx_queue. None of the buffers must have
269 static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
274 while (tx_queue->insert_count != tx_queue->write_count) {
275 --tx_queue->insert_count;
276 buffer = __ef4_tx_queue_get_insert_buffer(tx_queue);
277 ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL);
297 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
305 if (skb_len < tx_queue->tx_min_size ||
308 if (ef4_enqueue_skb_copy(tx_queue, skb))
310 tx_queue->cb_packets++;
315 if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
319 netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
322 if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
323 struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
332 ef4_nic_push_buffers(tx_queue);
334 tx_queue->xmit_more_available = netdev_xmit_more();
337 tx_queue->tx_packets++;
339 ef4_tx_maybe_stop_queue(tx_queue);
345 ef4_enqueue_unwind(tx_queue);
355 static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
360 struct ef4_nic *efx = tx_queue->efx;
363 stop_index = (index + 1) & tx_queue->ptr_mask;
364 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
367 struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
373 tx_queue->queue, read_ptr);
378 ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
380 ++tx_queue->read_count;
381 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
398 struct ef4_tx_queue *tx_queue;
409 tx_queue = ef4_get_tx_queue(efx, index, type);
411 return ef4_enqueue_skb(tx_queue, skb);
414 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
416 struct ef4_nic *efx = tx_queue->efx;
419 tx_queue->core_txq =
421 tx_queue->queue / EF4_TXQ_TYPES +
422 ((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
432 struct ef4_tx_queue *tx_queue;
457 ef4_for_each_possible_channel_tx_queue(tx_queue,
459 if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
461 if (!tx_queue->buffer) {
462 rc = ef4_probe_tx_queue(tx_queue);
466 if (!tx_queue->initialised)
467 ef4_init_tx_queue(tx_queue);
468 ef4_init_tx_queue_core_txq(tx_queue);
492 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
495 struct ef4_nic *efx = tx_queue->efx;
499 EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
501 ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
502 tx_queue->pkts_compl += pkts_compl;
503 tx_queue->bytes_compl += bytes_compl;
506 ++tx_queue->merge_events;
513 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
516 txq2 = ef4_tx_queue_partner(tx_queue);
517 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
520 netif_tx_wake_queue(tx_queue->core_txq);
524 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
525 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
526 if (tx_queue->read_count == tx_queue->old_write_count) {
528 tx_queue->empty_read_count =
529 tx_queue->read_count | EF4_EMPTY_COUNT_VALID;
534 static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
536 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
539 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
541 struct ef4_nic *efx = tx_queue->efx;
548 tx_queue->ptr_mask = entries - 1;
552 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
555 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
557 if (!tx_queue->buffer)
560 tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue),
561 sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
562 if (!tx_queue->cb_page) {
568 rc = ef4_nic_probe_tx(tx_queue);
575 kfree(tx_queue->cb_page);
576 tx_queue->cb_page = NULL;
578 kfree(tx_queue->buffer);
579 tx_queue->buffer = NULL;
583 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
585 struct ef4_nic *efx = tx_queue->efx;
588 "initialising TX queue %d\n", tx_queue->queue);
590 tx_queue->insert_count = 0;
591 tx_queue->write_count = 0;
592 tx_queue->old_write_count = 0;
593 tx_queue->read_count = 0;
594 tx_queue->old_read_count = 0;
595 tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID;
596 tx_queue->xmit_more_available = false;
599 tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0;
602 ef4_nic_init_tx(tx_queue);
604 tx_queue->initialised = true;
607 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
611 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
612 "shutting down TX queue %d\n", tx_queue->queue);
614 if (!tx_queue->buffer)
618 while (tx_queue->read_count != tx_queue->write_count) {
620 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
621 ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
623 ++tx_queue->read_count;
625 tx_queue->xmit_more_available = false;
626 netdev_tx_reset_queue(tx_queue->core_txq);
629 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
633 if (!tx_queue->buffer)
636 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
637 "destroying TX queue %d\n", tx_queue->queue);
638 ef4_nic_remove_tx(tx_queue);
640 if (tx_queue->cb_page) {
641 for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++)
642 ef4_nic_free_buffer(tx_queue->efx,
643 &tx_queue->cb_page[i]);
644 kfree(tx_queue->cb_page);
645 tx_queue->cb_page = NULL;
648 kfree(tx_queue->buffer);
649 tx_queue->buffer = NULL;