Lines Matching defs:tx_q

60  * @tx_q: the queue that owns the buffer
63 static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
67 dma_unmap_single(tx_q->dev,
73 dma_unmap_page(tx_q->dev,
162 * @tx_q: queue for which the buffers are allocated
166 static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
174 buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
175 tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
176 if (!tx_q->tx_buf)
180 for (i = 0; i < tx_q->desc_count; i++)
181 tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
186 tx_q->buf_stack.bufs =
187 kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
189 if (!tx_q->buf_stack.bufs)
192 tx_q->buf_stack.size = tx_q->desc_count;
193 tx_q->buf_stack.top = tx_q->desc_count;
195 for (i = 0; i < tx_q->desc_count; i++) {
196 tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
198 if (!tx_q->buf_stack.bufs[i])
207 * @tx_q: the tx ring to set up
212 static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
214 struct device *dev = tx_q->dev;
219 err = idpf_tx_buf_alloc_all(tx_q);
228 tx_q->size = tx_q->desc_count * desc_sz;
231 tx_q->size = ALIGN(tx_q->size, 4096);
232 tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
234 if (!tx_q->desc_ring) {
236 tx_q->size);
241 tx_q->next_to_alloc = 0;
242 tx_q->next_to_use = 0;
243 tx_q->next_to_clean = 0;
244 set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
249 idpf_tx_desc_rel(tx_q, bufq);
1446 * @tx_q: tx queue to handle software marker
1448 static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
1450 struct idpf_vport *vport = tx_q->vport;
1453 clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
1472 * @tx_q: tx queue to clean buffer from
1477 static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
1485 dma_unmap_single(tx_q->dev,
1596 * @tx_q: Tx queue to clean
1610 static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
1617 s16 ntc = tx_q->next_to_clean;
1620 tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
1621 next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
1622 tx_buf = &tx_q->tx_buf[ntc];
1623 ntc -= tx_q->desc_count;
1642 if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
1646 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1650 if (idpf_stash_flow_sch_buffers(tx_q,
1656 idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
1661 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1666 dma_unmap_page(tx_q->dev,
1676 idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1680 ntc += tx_q->desc_count;
1681 tx_q->next_to_clean = ntc;
1822 struct idpf_queue *tx_q;
1843 tx_q = complq->txq_grp->txqs[rel_tx_qid];
1852 idpf_tx_splitq_clean(tx_q, hw_head, budget,
1856 idpf_tx_handle_rs_completion(tx_q, tx_desc,
1860 idpf_tx_handle_sw_marker(tx_q);
1863 dev_err(&tx_q->vport->adapter->pdev->dev,
1869 u64_stats_update_begin(&tx_q->stats_sync);
1870 u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
1871 u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
1872 tx_q->cleaned_pkts += cleaned_stats.packets;
1873 tx_q->cleaned_bytes += cleaned_stats.bytes;
1875 u64_stats_update_end(&tx_q->stats_sync);
1901 struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
1906 if (!tx_q->cleaned_bytes)
1909 *cleaned += tx_q->cleaned_pkts;
1912 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
1914 dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
1916 !netif_carrier_ok(tx_q->vport->netdev);
1918 __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
1919 IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
1925 tx_q->cleaned_bytes = 0;
1926 tx_q->cleaned_pkts = 0;
1974 * @tx_q: the queue to be checked
1979 int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
1983 if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
1986 u64_stats_update_begin(&tx_q->stats_sync);
1987 u64_stats_inc(&tx_q->q_stats.tx.q_busy);
1988 u64_stats_update_end(&tx_q->stats_sync);
1990 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
1992 return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
1997 * @tx_q: the queue to be checked
2002 static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
2005 if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2012 if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2013 IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2019 if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2025 u64_stats_update_begin(&tx_q->stats_sync);
2026 u64_stats_inc(&tx_q->q_stats.tx.q_busy);
2027 u64_stats_update_end(&tx_q->stats_sync);
2028 netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
2035 * @tx_q: queue to bump
2043 void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
2048 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
2049 tx_q->next_to_use = val;
2051 idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
2062 writel(val, tx_q->tail);
2176 * @tx_q: queue to send buffer on
2184 static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
2191 u16 i = tx_q->next_to_use;
2205 tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
2207 dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2212 (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2217 if (dma_mapping_error(tx_q->dev, dma))
2218 return idpf_tx_dma_map_error(tx_q, skb, first, i);
2277 if (i == tx_q->desc_count) {
2278 tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
2280 tx_q->compl_tag_cur_gen =
2281 IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2294 memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
2295 tx_q->tx_buf[i].compl_tag = params->compl_tag;
2322 if (i == tx_q->desc_count) {
2323 tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
2325 tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2331 dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2334 tx_buf = &tx_q->tx_buf[i];
2343 i = idpf_tx_splitq_bump_ntu(tx_q, i);
2348 tx_q->txq_grp->num_completions_pending++;
2351 nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
2354 idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2564 * @tx_q: queue to send buffer on
2567 netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
2569 u64_stats_update_begin(&tx_q->stats_sync);
2570 u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
2571 u64_stats_update_end(&tx_q->stats_sync);
2573 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2583 * @tx_q: queue to send buffer on
2588 struct idpf_queue *tx_q)
2595 count = idpf_tx_desc_count_required(tx_q, skb);
2597 return idpf_tx_drop_skb(tx_q, skb);
2601 return idpf_tx_drop_skb(tx_q, skb);
2605 if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2606 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2614 idpf_tx_splitq_get_ctx_desc(tx_q);
2627 u64_stats_update_begin(&tx_q->stats_sync);
2628 u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
2629 u64_stats_update_end(&tx_q->stats_sync);
2633 first = &tx_q->tx_buf[tx_q->next_to_use];
2645 if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
2653 if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2655 tx_q->txq_grp->num_completions_pending++;
2669 idpf_tx_splitq_map(tx_q, &tx_params, first);
2685 struct idpf_queue *tx_q;
2693 tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2698 if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2699 idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2704 return idpf_tx_splitq_frame(skb, tx_q);