1/* SPDX-License-Identifier: BSD-3-Clause */
2/*  Copyright (c) 2020, Intel Corporation
3 *  All rights reserved.
4 *
5 *  Redistribution and use in source and binary forms, with or without
6 *  modification, are permitted provided that the following conditions are met:
7 *
8 *   1. Redistributions of source code must retain the above copyright notice,
9 *      this list of conditions and the following disclaimer.
10 *
11 *   2. Redistributions in binary form must reproduce the above copyright
12 *      notice, this list of conditions and the following disclaimer in the
13 *      documentation and/or other materials provided with the distribution.
14 *
15 *   3. Neither the name of the Intel Corporation nor the names of its
16 *      contributors may be used to endorse or promote products derived from
17 *      this software without specific prior written permission.
18 *
19 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 *  POSSIBILITY OF SUCH DAMAGE.
30 */
31/*$FreeBSD$*/
32
33/**
34 * @file ice_iflib_txrx.c
35 * @brief iflib Tx/Rx hotpath
36 *
37 * Main location for the iflib Tx/Rx hotpath implementation.
38 *
39 * Contains the implementation for the iflib function callbacks and the
40 * if_txrx ops structure.
41 */
42
43#include "ice_iflib.h"
44
45/* Tx/Rx hotpath utility functions */
46#include "ice_common_txrx.h"
47
48/*
49 * iflib txrx method declarations
50 */
51static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi);
52static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri);
53static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
54static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
55static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
56static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
57static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
58
59/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
60 * advanced 32byte Rx descriptors.
61 */
62#define RX_FLEX_NIC(desc, field) \
63	(((struct ice_32b_rx_flex_desc_nic *)desc)->field)
64
65/**
66 * @var ice_txrx
67 * @brief Tx/Rx operations for the iflib stack
68 *
69 * Structure defining the Tx and Rx related operations that iflib can request
70 * the driver to perform. These are the main entry points for the hot path of
71 * the transmit and receive paths in the iflib driver.
72 */
73struct if_txrx ice_txrx = {
74	.ift_txd_encap = ice_ift_txd_encap,
75	.ift_txd_flush = ice_ift_txd_flush,
76	.ift_txd_credits_update = ice_ift_txd_credits_update,
77	.ift_rxd_available = ice_ift_rxd_available,
78	.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
79	.ift_rxd_refill = ice_ift_rxd_refill,
80	.ift_rxd_flush = ice_ift_rxd_flush,
81};
82
83/**
84 * ice_ift_txd_encap - prepare Tx descriptors for a packet
85 * @arg: the iflib softc structure pointer
86 * @pi: packet info
87 *
88 * Prepares and encapsulates the given packet into into Tx descriptors, in
89 * preparation for sending to the transmit engine. Sets the necessary context
90 * descriptors for TSO and other offloads, and prepares the last descriptor
91 * for the writeback status.
92 *
93 * Return 0 on success, non-zero error code on failure.
94 */
95static int
96ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
97{
98	struct ice_softc *sc = (struct ice_softc *)arg;
99	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
100	int nsegs = pi->ipi_nsegs;
101	bus_dma_segment_t *segs = pi->ipi_segs;
102	struct ice_tx_desc *txd = NULL;
103	int i, j, mask, pidx_last;
104	u32 cmd, off;
105
106	cmd = off = 0;
107	i = pi->ipi_pidx;
108
109	/* Set up the TSO/CSUM offload */
110	if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) {
111		/* Set up the TSO context descriptor if required */
112		if (pi->ipi_csum_flags & CSUM_TSO) {
113			if (ice_tso_detect_sparse(pi))
114				return (EFBIG);
115			i = ice_tso_setup(txq, pi);
116		}
117		ice_tx_setup_offload(txq, pi, &cmd, &off);
118	}
119	if (pi->ipi_mflags & M_VLANTAG)
120		cmd |= ICE_TX_DESC_CMD_IL2TAG1;
121
122	mask = txq->desc_count - 1;
123	for (j = 0; j < nsegs; j++) {
124		bus_size_t seglen;
125
126		txd = &txq->tx_base[i];
127		seglen = segs[j].ds_len;
128
129		txd->buf_addr = htole64(segs[j].ds_addr);
130		txd->cmd_type_offset_bsz =
131		    htole64(ICE_TX_DESC_DTYPE_DATA
132		    | ((u64)cmd  << ICE_TXD_QW1_CMD_S)
133		    | ((u64)off << ICE_TXD_QW1_OFFSET_S)
134		    | ((u64)seglen  << ICE_TXD_QW1_TX_BUF_SZ_S)
135		    | ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S));
136
137		txq->stats.tx_bytes += seglen;
138		pidx_last = i;
139		i = (i+1) & mask;
140	}
141
142	/* Set the last descriptor for report */
143#define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
144	txd->cmd_type_offset_bsz |=
145	    htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S));
146
147	/* Add to report status array */
148	txq->tx_rsq[txq->tx_rs_pidx] = pidx_last;
149	txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask;
150	MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx);
151
152	pi->ipi_new_pidx = i;
153
154	++txq->stats.tx_packets;
155	return (0);
156}
157
158/**
159 * ice_ift_txd_flush - Flush Tx descriptors to hardware
160 * @arg: device specific softc pointer
161 * @txqid: the Tx queue to flush
162 * @pidx: descriptor index to advance tail to
163 *
164 * Advance the Transmit Descriptor Tail (TDT). This indicates to hardware that
165 * frames are available for transmit.
166 */
167static void
168ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
169{
170	struct ice_softc *sc = (struct ice_softc *)arg;
171	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
172	struct ice_hw *hw = &sc->hw;
173
174	wr32(hw, txq->tail, pidx);
175}
176
177/**
178 * ice_ift_txd_credits_update - cleanup Tx descriptors
179 * @arg: device private softc
180 * @txqid: the Tx queue to update
181 * @clear: if false, only report, do not actually clean
182 *
183 * If clear is false, iflib is asking if we *could* clean up any Tx
184 * descriptors.
185 *
186 * If clear is true, iflib is requesting to cleanup and reclaim used Tx
187 * descriptors.
188 */
189static int
190ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
191{
192	struct ice_softc *sc = (struct ice_softc *)arg;
193	struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
194
195	qidx_t processed = 0;
196	qidx_t cur, prev, ntxd, rs_cidx;
197	int32_t delta;
198	bool is_done;
199
200	rs_cidx = txq->tx_rs_cidx;
201	if (rs_cidx == txq->tx_rs_pidx)
202		return (0);
203	cur = txq->tx_rsq[rs_cidx];
204	MPASS(cur != QIDX_INVALID);
205	is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
206
207	if (!is_done)
208		return (0);
209	else if (clear == false)
210		return (1);
211
212	prev = txq->tx_cidx_processed;
213	ntxd = txq->desc_count;
214	do {
215		MPASS(prev != cur);
216		delta = (int32_t)cur - (int32_t)prev;
217		if (delta < 0)
218			delta += ntxd;
219		MPASS(delta > 0);
220		processed += delta;
221		prev = cur;
222		rs_cidx = (rs_cidx + 1) & (ntxd-1);
223		if (rs_cidx == txq->tx_rs_pidx)
224			break;
225		cur = txq->tx_rsq[rs_cidx];
226		MPASS(cur != QIDX_INVALID);
227		is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
228	} while (is_done);
229
230	txq->tx_rs_cidx = rs_cidx;
231	txq->tx_cidx_processed = prev;
232
233	return (processed);
234}
235
236/**
237 * ice_ift_rxd_available - Return number of available Rx packets
238 * @arg: device private softc
239 * @rxqid: the Rx queue id
240 * @pidx: descriptor start point
241 * @budget: maximum Rx budget
242 *
243 * Determines how many Rx packets are available on the queue, up to a maximum
244 * of the given budget.
245 */
246static int
247ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
248{
249	struct ice_softc *sc = (struct ice_softc *)arg;
250	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
251	union ice_32b_rx_flex_desc *rxd;
252	uint16_t status0;
253	int cnt, i, nrxd;
254
255	nrxd = rxq->desc_count;
256
257	for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) {
258		rxd = &rxq->rx_base[i];
259		status0 = le16toh(rxd->wb.status_error0);
260
261		if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0)
262			break;
263		if (++i == nrxd)
264			i = 0;
265		if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))
266			cnt++;
267	}
268
269	return (cnt);
270}
271
272/**
273 * ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
274 * @arg: device specific softc
275 * @ri: receive packet info
276 *
277 * This function is called by iflib, and executes in ithread context. It is
278 * called by iflib to obtain data which has been DMA'ed into host memory.
279 * Returns zero on success, and an error code on failure.
280 */
281static int
282ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
283{
284	struct ice_softc *sc = (struct ice_softc *)arg;
285	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
286	union ice_32b_rx_flex_desc *cur;
287	u16 status0, plen, vtag, ptype;
288	bool eop;
289	size_t cidx;
290	int i;
291
292	cidx = ri->iri_cidx;
293	i = 0;
294	do {
295		/* 5 descriptor receive limit */
296		MPASS(i < ICE_MAX_RX_SEGS);
297
298		cur = &rxq->rx_base[cidx];
299		status0 = le16toh(cur->wb.status_error0);
300		plen = le16toh(cur->wb.pkt_len) &
301			ICE_RX_FLX_DESC_PKT_LEN_M;
302		ptype = le16toh(cur->wb.ptype_flex_flags0) &
303			ICE_RX_FLEX_DESC_PTYPE_M;
304
305		/* we should never be called without a valid descriptor */
306		MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0);
307
308		ri->iri_len += plen;
309
310		cur->wb.status_error0 = 0;
311		eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S));
312		if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
313			vtag = le16toh(cur->wb.l2tag1);
314		else
315			vtag = 0;
316
317		/*
318		 * Make sure packets with bad L2 values are discarded.
319		 * NOTE: Only the EOP descriptor has valid error results.
320		 */
321		if (eop && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S))) {
322			rxq->stats.desc_errs++;
323			return (EBADMSG);
324		}
325		ri->iri_frags[i].irf_flid = 0;
326		ri->iri_frags[i].irf_idx = cidx;
327		ri->iri_frags[i].irf_len = plen;
328		if (++cidx == rxq->desc_count)
329			cidx = 0;
330		i++;
331	} while (!eop);
332
333	/* capture soft statistics for this Rx queue */
334	rxq->stats.rx_packets++;
335	rxq->stats.rx_bytes += ri->iri_len;
336
337	if ((iflib_get_ifp(sc->ctx)->if_capenable & IFCAP_RXCSUM) != 0)
338		ice_rx_checksum(rxq, &ri->iri_csum_flags,
339				&ri->iri_csum_data, status0, ptype);
340	ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
341	ri->iri_rsstype = ice_ptype_to_hash(ptype);
342	ri->iri_vtag = vtag;
343	ri->iri_nfrags = i;
344	if (vtag)
345		ri->iri_flags |= M_VLANTAG;
346	return (0);
347}
348
349/**
350 * ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
351 * @arg: device specific softc structure
352 * @iru: the Rx descriptor update structure
353 *
354 * Update the Rx descriptor indices for a given queue, assigning new physical
355 * addresses to the descriptors, preparing them for re-use by the hardware.
356 */
357static void
358ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
359{
360	struct ice_softc *sc = (struct ice_softc *)arg;
361	struct ice_rx_queue *rxq;
362	uint32_t next_pidx;
363	int i;
364	uint64_t *paddrs;
365	uint32_t pidx;
366	uint16_t qsidx, count;
367
368	paddrs = iru->iru_paddrs;
369	pidx = iru->iru_pidx;
370	qsidx = iru->iru_qsidx;
371	count = iru->iru_count;
372
373	rxq = &(sc->pf_vsi.rx_queues[qsidx]);
374
375	for (i = 0, next_pidx = pidx; i < count; i++) {
376		rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
377		if (++next_pidx == (uint32_t)rxq->desc_count)
378			next_pidx = 0;
379	}
380}
381
382/**
383 * ice_ift_rxd_flush - Flush Rx descriptors to hardware
384 * @arg: device specific softc pointer
385 * @rxqid: the Rx queue to flush
386 * @flidx: unused parameter
387 * @pidx: descriptor index to advance tail to
388 *
389 * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
390 * software is done with the descriptor and it can be recycled.
391 */
392static void
393ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
394		  qidx_t pidx)
395{
396	struct ice_softc *sc = (struct ice_softc *)arg;
397	struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
398	struct ice_hw *hw = &sc->hw;
399
400	wr32(hw, rxq->tail, pidx);
401}
402