Lines Matching refs:tq

116 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
118 return tq->stopped;
123 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
125 tq->stopped = false;
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
131 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
133 tq->stopped = false;
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
139 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
141 tq->stopped = true;
142 tq->num_stop++;
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
223 "%s: tq[%d] error 0x%x\n",
358 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
367 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
368 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
370 tbi = &tq->buf_info[eop_idx];
373 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
375 while (tq->tx_ring.next2comp != eop_idx) {
376 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
384 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
401 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
411 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
412 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
419 &gdesc->tcd), tq, adapter->pdev,
422 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
423 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
429 spin_lock(&tq->tx_lock);
430 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
431 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
432 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
434 vmxnet3_tq_wake(tq, adapter);
436 spin_unlock(&tq->tx_lock);
443 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
453 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
456 tbi = tq->buf_info + tq->tx_ring.next2comp;
467 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
474 for (i = 0; i < tq->tx_ring.size; i++)
475 BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
477 tq->tx_ring.gen = VMXNET3_INIT_GEN;
478 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
480 tq->comp_ring.gen = VMXNET3_INIT_GEN;
481 tq->comp_ring.next2proc = 0;
486 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
489 if (tq->tx_ring.base) {
490 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
492 tq->tx_ring.base, tq->tx_ring.basePA);
493 tq->tx_ring.base = NULL;
495 if (tq->data_ring.base) {
497 tq->data_ring.size * tq->txdata_desc_size,
498 tq->data_ring.base, tq->data_ring.basePA);
499 tq->data_ring.base = NULL;
501 if (tq->comp_ring.base) {
502 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
504 tq->comp_ring.base, tq->comp_ring.basePA);
505 tq->comp_ring.base = NULL;
507 kfree(tq->buf_info);
508 tq->buf_info = NULL;
524 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
530 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
532 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
533 tq->tx_ring.gen = VMXNET3_INIT_GEN;
535 memset(tq->data_ring.base, 0,
536 tq->data_ring.size * tq->txdata_desc_size);
539 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
541 tq->comp_ring.next2proc = 0;
542 tq->comp_ring.gen = VMXNET3_INIT_GEN;
545 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
546 for (i = 0; i < tq->tx_ring.size; i++)
547 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
554 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
557 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
558 tq->comp_ring.base || tq->buf_info);
560 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
561 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
562 &tq->tx_ring.basePA, GFP_KERNEL);
563 if (!tq->tx_ring.base) {
568 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
569 tq->data_ring.size * tq->txdata_desc_size,
570 &tq->data_ring.basePA, GFP_KERNEL);
571 if (!tq->data_ring.base) {
576 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
577 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
578 &tq->comp_ring.basePA, GFP_KERNEL);
579 if (!tq->comp_ring.base) {
584 tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
587 if (!tq->buf_info)
593 vmxnet3_tq_destroy(tq, adapter);
735 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
747 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
749 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
754 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
755 tq->tx_ring.next2fill *
756 tq->txdata_desc_size);
760 tbi = tq->buf_info + tq->tx_ring.next2fill;
765 tq->tx_ring.next2fill,
768 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
771 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
788 tbi = tq->buf_info + tq->tx_ring.next2fill;
798 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
799 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
807 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
809 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
810 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
823 tbi = tq->buf_info + tq->tx_ring.next2fill;
840 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
841 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
849 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
851 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
852 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
863 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
898 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
967 tq->txdata_desc_size,
979 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
980 tq->stats.oversized_hdr++;
1001 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1007 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
1008 tq->tx_ring.next2fill *
1009 tq->txdata_desc_size);
1014 ctx->copy_size, tq->tx_ring.next2fill);
1068 * Transmits a pkt thru a given tq
1076 * 2. tq stats may be updated accordingly
1081 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1106 tq->stats.drop_tso++;
1109 tq->stats.copy_skb_header++;
1116 tq->stats.drop_too_many_frags++;
1119 tq->stats.linearized++;
1124 tq->stats.drop_too_many_frags++;
1140 tq->stats.drop_too_many_frags++;
1143 tq->stats.linearized++;
1150 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1157 tq->stats.drop_oversized_hdr++;
1165 tq->stats.drop_oversized_hdr++;
1171 tq->stats.drop_hdr_inspect_err++;
1175 spin_lock_irqsave(&tq->tx_lock, flags);
1177 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1178 tq->stats.tx_ring_full++;
1182 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1184 vmxnet3_tq_stop(tq, adapter);
1185 spin_unlock_irqrestore(&tq->tx_lock, flags);
1190 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1193 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1207 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1254 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1281 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1284 spin_unlock_irqrestore(&tq->tx_lock, flags);
1286 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1287 tq->shared->txNumDeferred = 0;
1289 adapter->tx_prod_offset + tq->qid * 8,
1290 tq->tx_ring.next2fill);
1296 spin_unlock_irqrestore(&tq->tx_lock, flags);
1298 tq->stats.drop_total++;
2267 struct vmxnet3_tx_queue *tq =
2269 vmxnet3_tq_tx_complete(tq, adapter);
2292 struct vmxnet3_tx_queue *tq = data;
2293 struct vmxnet3_adapter *adapter = tq->adapter;
2296 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2306 vmxnet3_tq_tx_complete(tq, adapter);
2308 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2805 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2808 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2809 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2810 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2812 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2813 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2814 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2815 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2817 tqc->intrIdx = tq->comp_ring.intr_idx;
3293 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3294 tq->tx_ring.size = tx_ring_size;
3295 tq->data_ring.size = tx_ring_size;
3296 tq->comp_ring.size = tx_ring_size;
3297 tq->txdata_desc_size = txdata_desc_size;
3298 tq->shared = &adapter->tqd_start[i].ctrl;
3299 tq->stopped = true;
3300 tq->adapter = adapter;
3301 tq->qid = i;
3302 err = vmxnet3_tq_create(tq, adapter);