• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/sfc/

Lines Matching defs:tx_queue

28  * The tx_queue descriptor ring fill-level must fall below this value
41 if (!channel->tx_queue)
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
61 if (!channel->tx_queue)
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
77 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
81 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
97 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
99 tx_queue->queue, tx_queue->read_count);
123 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
125 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
126 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
129 static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
134 buffer->tsoh->next = tx_queue->tso_headers_free;
135 tx_queue->tso_headers_free = buffer->tsoh;
137 efx_tsoh_heap_free(tx_queue, buffer->tsoh);
177 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
179 struct efx_nic *efx = tx_queue->efx;
192 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
195 return efx_enqueue_skb_tso(tx_queue, skb);
208 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
236 ++tx_queue->stopped;
241 tx_queue->old_read_count =
243 &tx_queue->read_count;
244 fill_level = (tx_queue->insert_count
245 - tx_queue->old_read_count);
250 --tx_queue->stopped;
253 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
254 buffer = &tx_queue->buffer[insert_ptr];
255 efx_tsoh_free(tx_queue, buffer);
271 ++tx_queue->insert_count;
298 efx_nic_push_buffers(tx_queue);
305 "fragments for DMA\n", tx_queue->queue, skb->len,
315 if (tx_queue->stopped == 1)
316 efx_stop_queue(tx_queue->channel);
320 while (tx_queue->insert_count != tx_queue->write_count) {
321 --tx_queue->insert_count;
322 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
323 buffer = &tx_queue->buffer[insert_ptr];
324 efx_dequeue_buffer(tx_queue, buffer);
346 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
349 struct efx_nic *efx = tx_queue->efx;
353 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
356 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
360 tx_queue->queue, read_ptr);
365 efx_dequeue_buffer(tx_queue, buffer);
369 ++tx_queue->read_count;
370 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
387 struct efx_tx_queue *tx_queue;
392 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
394 tx_queue += EFX_TXQ_TYPE_OFFLOAD;
396 return efx_enqueue_skb(tx_queue, skb);
399 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
402 struct efx_nic *efx = tx_queue->efx;
406 efx_dequeue_buffers(tx_queue, index);
412 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
413 fill_level = tx_queue->insert_count - tx_queue->read_count;
420 if (tx_queue->stopped) {
421 tx_queue->stopped = 0;
422 efx_wake_queue(tx_queue->channel);
429 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
431 struct efx_nic *efx = tx_queue->efx;
436 tx_queue->queue);
439 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
440 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
441 if (!tx_queue->buffer)
444 tx_queue->buffer[i].continuation = true;
447 rc = efx_nic_probe_tx(tx_queue);
454 kfree(tx_queue->buffer);
455 tx_queue->buffer = NULL;
459 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
461 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
462 "initialising TX queue %d\n", tx_queue->queue);
464 tx_queue->insert_count = 0;
465 tx_queue->write_count = 0;
466 tx_queue->read_count = 0;
467 tx_queue->old_read_count = 0;
468 BUG_ON(tx_queue->stopped);
471 efx_nic_init_tx(tx_queue);
474 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
478 if (!tx_queue->buffer)
482 while (tx_queue->read_count != tx_queue->write_count) {
483 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
484 efx_dequeue_buffer(tx_queue, buffer);
488 ++tx_queue->read_count;
492 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
494 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
495 "shutting down TX queue %d\n", tx_queue->queue);
498 efx_nic_fini_tx(tx_queue);
500 efx_release_tx_buffers(tx_queue);
503 efx_fini_tso(tx_queue);
506 if (tx_queue->stopped) {
507 tx_queue->stopped = 0;
508 efx_wake_queue(tx_queue->channel);
512 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
514 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
515 "destroying TX queue %d\n", tx_queue->queue);
516 efx_nic_remove_tx(tx_queue);
518 kfree(tx_queue->buffer);
519 tx_queue->buffer = NULL;
635 * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
637 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
640 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
647 netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
658 tsoh->next = tx_queue->tso_headers_free;
659 tx_queue->tso_headers_free = tsoh;
667 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
678 p = &tx_queue->tso_headers_free;
690 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
698 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
701 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
712 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
714 pci_unmap_single(tx_queue->efx->pci_dev,
722 * @tx_queue: Efx TX queue
728 * @tx_queue full.
730 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
735 struct efx_nic *efx = tx_queue->efx;
741 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
751 ++tx_queue->stopped;
755 tx_queue->old_read_count =
756 *(volatile unsigned *)&tx_queue->read_count;
757 fill_level = (tx_queue->insert_count
758 - tx_queue->old_read_count);
765 --tx_queue->stopped;
768 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
769 buffer = &tx_queue->buffer[insert_ptr];
770 ++tx_queue->insert_count;
772 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
773 tx_queue->read_count >
776 efx_tsoh_free(tx_queue, buffer);
810 static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
815 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
816 efx_tsoh_free(tx_queue, buffer);
826 ++tx_queue->insert_count;
830 /* Remove descriptors put into a tx_queue. */
831 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
837 while (tx_queue->insert_count != tx_queue->write_count) {
838 --tx_queue->insert_count;
839 buffer = &tx_queue->buffer[tx_queue->insert_count &
841 efx_tsoh_free(tx_queue, buffer);
847 pci_unmap_single(tx_queue->efx->pci_dev,
851 pci_unmap_page(tx_queue->efx->pci_dev,
925 * @tx_queue: Efx TX queue
931 * space in @tx_queue.
933 static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
954 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
978 * @tx_queue: Efx TX queue
985 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
996 if (tx_queue->tso_headers_free == NULL) {
997 if (efx_tsoh_block_alloc(tx_queue))
1000 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
1001 tsoh = tx_queue->tso_headers_free;
1002 tx_queue->tso_headers_free = tsoh->next;
1005 tx_queue->tso_long_headers++;
1006 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
1048 ++tx_queue->tso_packets;
1051 efx_tso_put_header(tx_queue, tsoh, st->header_len);
1059 * @tx_queue: Efx TX queue
1064 * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1068 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1071 struct efx_nic *efx = tx_queue->efx;
1078 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1100 if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1104 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1121 tso_start_new_packet(tx_queue, skb, &state) < 0)
1126 efx_nic_push_buffers(tx_queue);
1128 tx_queue->tso_bursts++;
1141 if (tx_queue->stopped == 1)
1142 efx_stop_queue(tx_queue->channel);
1155 efx_enqueue_unwind(tx_queue);
1161 * Free up all TSO datastructures associated with tx_queue. This
1162 * routine should be called only once the tx_queue is both empty and
1165 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1169 if (tx_queue->buffer) {
1171 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1174 while (tx_queue->tso_headers_free != NULL)
1175 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1176 tx_queue->efx->pci_dev);