Lines Matching refs:tx_buf

36 	struct ice_tx_buf *tx_buf, *first;
68 first = &tx_ring->tx_buf[i];
75 tx_buf = &tx_ring->tx_buf[i];
80 memset(tx_buf, 0, sizeof(*tx_buf));
81 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
82 dma_unmap_addr_set(tx_buf, dma, dma);
88 tx_buf->type = ICE_TX_BUF_DUMMY;
89 tx_buf->raw_buf = raw_packet;
110 * @tx_buf: the buffer to free
113 ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
115 if (dma_unmap_len(tx_buf, len))
117 dma_unmap_addr(tx_buf, dma),
118 dma_unmap_len(tx_buf, len),
121 switch (tx_buf->type) {
123 devm_kfree(ring->dev, tx_buf->raw_buf);
126 dev_kfree_skb_any(tx_buf->skb);
129 page_frag_free(tx_buf->raw_buf);
132 xdp_return_frame(tx_buf->xdpf);
136 tx_buf->next_to_watch = NULL;
137 tx_buf->type = ICE_TX_BUF_EMPTY;
138 dma_unmap_len_set(tx_buf, len, 0);
139 /* tx_buf must be completely set up in the transmit path */
162 if (!tx_ring->tx_buf)
167 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
170 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
199 tx_ring->tx_buf = NULL;
224 struct ice_tx_buf *tx_buf;
229 tx_buf = &tx_ring->tx_buf[i];
236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
243 prefetchw(&tx_buf->skb->users);
247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
254 tx_buf->next_to_watch = NULL;
257 total_bytes += tx_buf->bytecount;
258 total_pkts += tx_buf->gso_segs;
261 napi_consume_skb(tx_buf->skb, napi_budget);
265 dma_unmap_addr(tx_buf, dma),
266 dma_unmap_len(tx_buf, len),
269 /* clear tx_buf data */
270 tx_buf->type = ICE_TX_BUF_EMPTY;
271 dma_unmap_len_set(tx_buf, len, 0);
275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
276 tx_buf++;
281 tx_buf = tx_ring->tx_buf;
286 if (dma_unmap_len(tx_buf, len)) {
288 dma_unmap_addr(tx_buf, dma),
289 dma_unmap_len(tx_buf, len),
291 dma_unmap_len_set(tx_buf, len, 0);
294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
297 tx_buf++;
302 tx_buf = tx_ring->tx_buf;
350 WARN_ON(tx_ring->tx_buf);
351 tx_ring->tx_buf =
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
354 if (!tx_ring->tx_buf)
374 devm_kfree(dev, tx_ring->tx_buf);
375 tx_ring->tx_buf = NULL;
643 struct ice_tx_buf *tx_buf;
666 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
677 tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
1647 struct ice_tx_buf *tx_buf;
1670 tx_buf = first;
1679 dma_unmap_len_set(tx_buf, len, size);
1680 dma_unmap_addr_set(tx_buf, dma, dma);
1729 tx_buf = &tx_ring->tx_buf[i];
1730 tx_buf->type = ICE_TX_BUF_FRAG;
1770 /* clear DMA mappings for failed tx_buf map */
1772 tx_buf = &tx_ring->tx_buf[i];
1773 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1774 if (tx_buf == first)
2372 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2504 struct ice_tx_buf *tx_buf;
2506 tx_buf = &tx_ring->tx_buf[i];
2511 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2526 tx_buf->next_to_watch = NULL;
2531 tx_buf++;
2536 tx_buf = tx_ring->tx_buf;
2541 if (dma_unmap_len(tx_buf, len))
2543 dma_unmap_addr(tx_buf, dma),
2544 dma_unmap_len(tx_buf, len),
2546 if (tx_buf->type == ICE_TX_BUF_DUMMY)
2547 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2550 tx_buf->type = ICE_TX_BUF_EMPTY;
2551 tx_buf->tx_flags = 0;
2552 tx_buf->next_to_watch = NULL;
2553 dma_unmap_len_set(tx_buf, len, 0);
2558 tx_buf++;
2563 tx_buf = tx_ring->tx_buf;