Lines Matching defs:tx_buf

1214 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1218 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
1220 dma_unmap_addr(tx_buf, dma_addr0),
1221 dma_unmap_len(tx_buf, dma_len0),
1223 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
1225 dma_unmap_addr(tx_buf, dma_addr0),
1226 dma_unmap_len(tx_buf, dma_len0),
1230 if (dma_unmap_len(tx_buf, dma_len0)) {
1232 dma_unmap_addr(tx_buf, dma_addr0),
1233 dma_unmap_len(tx_buf, dma_len0),
1237 if (dma_unmap_len(tx_buf, dma_len1)) {
1239 dma_unmap_addr(tx_buf, dma_addr1),
1240 dma_unmap_len(tx_buf, dma_len1),
1245 if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
1246 if (tx_buf->type == MTK_TYPE_SKB) {
1247 struct sk_buff *skb = tx_buf->data;
1254 struct xdp_frame *xdpf = tx_buf->data;
1256 if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
1264 tx_buf->flags = 0;
1265 tx_buf->data = NULL;
1268 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
1273 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1274 dma_unmap_len_set(tx_buf, dma_len0, size);
1279 dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
1280 dma_unmap_len_set(tx_buf, dma_len1, size);
1282 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1285 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
1286 dma_unmap_len_set(tx_buf, dma_len0, size);
1408 struct mtk_tx_buf *itx_buf, *tx_buf;
1472 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1475 memset(tx_buf, 0, sizeof(*tx_buf));
1476 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1477 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1478 tx_buf->mac_id = mac->id;
1480 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
1525 tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
1528 mtk_tx_unmap(eth, tx_buf, NULL, false);
1756 struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
1769 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
1780 tx_buf->mac_id = mac->id;
1781 tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
1782 tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
1785 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
1805 struct mtk_tx_buf *htx_buf, *tx_buf;
1825 tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
1826 memset(tx_buf, 0, sizeof(*tx_buf));
1827 htx_buf = tx_buf;
1830 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
1843 tx_buf = mtk_desc_to_tx_buf(ring, txd,
1845 memset(tx_buf, 0, sizeof(*tx_buf));
1893 tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
1894 mtk_tx_unmap(eth, tx_buf, NULL, false);
2264 struct mtk_tx_buf *tx_buf;
2282 tx_buf = mtk_desc_to_tx_buf(ring, desc,
2284 if (!tx_buf->data)
2287 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2288 if (tx_buf->type == MTK_TYPE_SKB)
2289 mtk_poll_tx_done(eth, state, tx_buf->mac_id,
2290 tx_buf->data);
2294 mtk_tx_unmap(eth, tx_buf, &bq, true);
2313 struct mtk_tx_buf *tx_buf;
2323 tx_buf = &ring->buf[cpu];
2324 if (!tx_buf->data)
2327 if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
2328 if (tx_buf->type == MTK_TYPE_SKB)
2329 mtk_poll_tx_done(eth, state, 0, tx_buf->data);
2332 mtk_tx_unmap(eth, tx_buf, &bq, true);