1/*-
2 * Broadcom NetXtreme-C/E network driver.
3 *
4 * Copyright (c) 2016 Broadcom, All Rights Reserved.
5 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/types.h>
33#include <sys/socket.h>
34#include <sys/endian.h>
35#include <net/if.h>
36#include <net/if_var.h>
37#include <net/ethernet.h>
38#include <net/iflib.h>
39
40#include "opt_inet.h"
41#include "opt_inet6.h"
42#include "opt_rss.h"
43
44#include "bnxt.h"
45
46/*
47 * Function prototypes
48 */
49
50static int bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi);
51static void bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx);
52static int bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear);
53
54static void bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru);
55
56/*				uint16_t rxqid, uint8_t flid,
57    uint32_t pidx, uint64_t *paddrs, caddr_t *vaddrs, uint16_t count,
58    uint16_t buf_size);
59*/
60static void bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
61    qidx_t pidx);
62static int bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx,
63    qidx_t budget);
64static int bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri);
65
66static int bnxt_intr(void *sc);
67
68struct if_txrx bnxt_txrx  = {
69	.ift_txd_encap = bnxt_isc_txd_encap,
70	.ift_txd_flush = bnxt_isc_txd_flush,
71	.ift_txd_credits_update = bnxt_isc_txd_credits_update,
72	.ift_rxd_available = bnxt_isc_rxd_available,
73	.ift_rxd_pkt_get = bnxt_isc_rxd_pkt_get,
74	.ift_rxd_refill = bnxt_isc_rxd_refill,
75	.ift_rxd_flush = bnxt_isc_rxd_flush,
76	.ift_legacy_intr = bnxt_intr
77};
78
79/*
80 * Device Dependent Packet Transmit and Receive Functions
81 */
82
83static const uint16_t bnxt_tx_lhint[] = {
84	TX_BD_SHORT_FLAGS_LHINT_LT512,
85	TX_BD_SHORT_FLAGS_LHINT_LT1K,
86	TX_BD_SHORT_FLAGS_LHINT_LT2K,
87	TX_BD_SHORT_FLAGS_LHINT_LT2K,
88	TX_BD_SHORT_FLAGS_LHINT_GTE2K,
89};
90
91static int
92bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
93{
94	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
95	struct bnxt_ring *txr = &softc->tx_rings[pi->ipi_qsidx];
96	struct tx_bd_long *tbd;
97	struct tx_bd_long_hi *tbdh;
98	bool need_hi = false;
99	uint16_t flags_type;
100	uint16_t lflags;
101	uint32_t cfa_meta;
102	int seg = 0;
103
104	/* If we have offloads enabled, we need to use two BDs. */
105	if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
106	    pi->ipi_mflags & M_VLANTAG)
107		need_hi = true;
108
109	/* TODO: Devices before Cu+B1 need to not mix long and short BDs */
110	need_hi = true;
111
112	pi->ipi_new_pidx = pi->ipi_pidx;
113	tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
114	pi->ipi_ndescs = 0;
115	/* No need to byte-swap the opaque value */
116	tbd->opaque = ((pi->ipi_nsegs + need_hi) << 24) | pi->ipi_new_pidx;
117	tbd->len = htole16(pi->ipi_segs[seg].ds_len);
118	tbd->addr = htole64(pi->ipi_segs[seg++].ds_addr);
119	flags_type = ((pi->ipi_nsegs + need_hi) <<
120	    TX_BD_SHORT_FLAGS_BD_CNT_SFT) & TX_BD_SHORT_FLAGS_BD_CNT_MASK;
121	if (pi->ipi_len >= 2048)
122		flags_type |= TX_BD_SHORT_FLAGS_LHINT_GTE2K;
123	else
124		flags_type |= bnxt_tx_lhint[pi->ipi_len >> 9];
125
126	if (need_hi) {
127		flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
128
129		pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
130		tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
131		tbdh->mss = htole16(pi->ipi_tso_segsz);
132		tbdh->hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
133		    pi->ipi_tcp_hlen) >> 1);
134		tbdh->cfa_action = 0;
135		lflags = 0;
136		cfa_meta = 0;
137		if (pi->ipi_mflags & M_VLANTAG) {
138			/* TODO: Do we need to byte-swap the vtag here? */
139			cfa_meta = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
140			    pi->ipi_vtag;
141			cfa_meta |= TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
142		}
143		tbdh->cfa_meta = htole32(cfa_meta);
144		if (pi->ipi_csum_flags & CSUM_TSO) {
145			lflags |= TX_BD_LONG_LFLAGS_LSO |
146			    TX_BD_LONG_LFLAGS_T_IPID;
147		}
148		else if(pi->ipi_csum_flags & CSUM_OFFLOAD) {
149			lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM |
150			    TX_BD_LONG_LFLAGS_IP_CHKSUM;
151		}
152		else if(pi->ipi_csum_flags & CSUM_IP) {
153			lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
154		}
155		tbdh->lflags = htole16(lflags);
156	}
157	else {
158		flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
159	}
160
161	for (; seg < pi->ipi_nsegs; seg++) {
162		tbd->flags_type = htole16(flags_type);
163		pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
164		tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
165		tbd->len = htole16(pi->ipi_segs[seg].ds_len);
166		tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
167		flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
168	}
169	flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
170	tbd->flags_type = htole16(flags_type);
171	pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
172
173	return 0;
174}
175
176static void
177bnxt_isc_txd_flush(void *sc, uint16_t txqid, qidx_t pidx)
178{
179	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
180	struct bnxt_ring *tx_ring = &softc->tx_rings[txqid];
181
182	/* pidx is what we last set ipi_new_pidx to */
183	BNXT_TX_DB(tx_ring, pidx);
184	/* TODO: Cumulus+ doesn't need the double doorbell */
185	BNXT_TX_DB(tx_ring, pidx);
186	return;
187}
188
189static int
190bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
191{
192	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
193	struct bnxt_cp_ring *cpr = &softc->tx_cp_rings[txqid];
194	struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
195	int avail = 0;
196	uint32_t cons = cpr->cons;
197	bool v_bit = cpr->v_bit;
198	bool last_v_bit;
199	uint32_t last_cons;
200	uint16_t type;
201	uint16_t err;
202
203	for (;;) {
204		last_cons = cons;
205		last_v_bit = v_bit;
206		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
207		CMPL_PREFETCH_NEXT(cpr, cons);
208
209		if (!CMP_VALID(&cmpl[cons], v_bit))
210			goto done;
211
212		type = cmpl[cons].flags_type & TX_CMPL_TYPE_MASK;
213		switch (type) {
214		case TX_CMPL_TYPE_TX_L2:
215			err = (le16toh(cmpl[cons].errors_v) &
216			    TX_CMPL_ERRORS_BUFFER_ERROR_MASK) >>
217			    TX_CMPL_ERRORS_BUFFER_ERROR_SFT;
218			if (err)
219				device_printf(softc->dev,
220				    "TX completion error %u\n", err);
221			/* No need to byte-swap the opaque value */
222			avail += cmpl[cons].opaque >> 24;
223			/*
224			 * If we're not clearing, iflib only cares if there's
225			 * at least one buffer.  Don't scan the whole ring in
226			 * this case.
227			 */
228			if (!clear)
229				goto done;
230			break;
231		default:
232			if (type & 1) {
233				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
234				if (!CMP_VALID(&cmpl[cons], v_bit))
235					goto done;
236			}
237			device_printf(softc->dev,
238			    "Unhandled TX completion type %u\n", type);
239			break;
240		}
241	}
242done:
243
244	if (clear && avail) {
245		cpr->cons = last_cons;
246		cpr->v_bit = last_v_bit;
247		BNXT_CP_IDX_DISABLE_DB(&cpr->ring, cpr->cons);
248	}
249
250	return avail;
251}
252
253static void
254bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
255{
256	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
257	struct bnxt_ring *rx_ring;
258	struct rx_prod_pkt_bd *rxbd;
259	uint16_t type;
260	uint16_t i;
261	uint16_t rxqid;
262	uint16_t count, len;
263	uint32_t pidx;
264	uint8_t flid;
265	uint64_t *paddrs;
266	qidx_t	*frag_idxs;
267
268	rxqid = iru->iru_qsidx;
269	count = iru->iru_count;
270	len = iru->iru_buf_size;
271	pidx = iru->iru_pidx;
272	flid = iru->iru_flidx;
273	paddrs = iru->iru_paddrs;
274	frag_idxs = iru->iru_idxs;
275
276	if (flid == 0) {
277		rx_ring = &softc->rx_rings[rxqid];
278		type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
279	}
280	else {
281		rx_ring = &softc->ag_rings[rxqid];
282		type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
283	}
284	rxbd = (void *)rx_ring->vaddr;
285
286	for (i=0; i<count; i++) {
287		rxbd[pidx].flags_type = htole16(type);
288		rxbd[pidx].len = htole16(len);
289		/* No need to byte-swap the opaque value */
290		rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
291		    | (frag_idxs[i]));
292		rxbd[pidx].addr = htole64(paddrs[i]);
293		if (++pidx == rx_ring->ring_size)
294			pidx = 0;
295	}
296	return;
297}
298
299static void
300bnxt_isc_rxd_flush(void *sc, uint16_t rxqid, uint8_t flid,
301    qidx_t pidx)
302{
303	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
304	struct bnxt_ring *rx_ring;
305
306	if (flid == 0)
307		rx_ring = &softc->rx_rings[rxqid];
308	else
309		rx_ring = &softc->ag_rings[rxqid];
310
311	/*
312	 * We *must* update the completion ring before updating the RX ring
313	 * or we will overrun the completion ring and the device will wedge for
314	 * RX.
315	 */
316	if (softc->rx_cp_rings[rxqid].cons != UINT32_MAX)
317		BNXT_CP_IDX_DISABLE_DB(&softc->rx_cp_rings[rxqid].ring,
318		    softc->rx_cp_rings[rxqid].cons);
319	BNXT_RX_DB(rx_ring, pidx);
320	/* TODO: Cumulus+ doesn't need the double doorbell */
321	BNXT_RX_DB(rx_ring, pidx);
322	return;
323}
324
325static int
326bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
327{
328	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
329	struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[rxqid];
330	struct rx_pkt_cmpl *rcp;
331	struct rx_tpa_end_cmpl *rtpae;
332	struct cmpl_base *cmp = (struct cmpl_base *)cpr->ring.vaddr;
333	int avail = 0;
334	uint32_t cons = cpr->cons;
335	bool v_bit = cpr->v_bit;
336	uint8_t ags;
337	int i;
338	uint16_t type;
339
340	for (;;) {
341		NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
342		CMPL_PREFETCH_NEXT(cpr, cons);
343
344		if (!CMP_VALID(&cmp[cons], v_bit))
345			goto cmpl_invalid;
346
347		type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
348		switch (type) {
349		case CMPL_BASE_TYPE_RX_L2:
350			rcp = (void *)&cmp[cons];
351			ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
352			    RX_PKT_CMPL_AGG_BUFS_SFT;
353			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
354			CMPL_PREFETCH_NEXT(cpr, cons);
355
356			if (!CMP_VALID(&cmp[cons], v_bit))
357				goto cmpl_invalid;
358
359			/* Now account for all the AG completions */
360			for (i=0; i<ags; i++) {
361				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
362				CMPL_PREFETCH_NEXT(cpr, cons);
363				if (!CMP_VALID(&cmp[cons], v_bit))
364					goto cmpl_invalid;
365			}
366			avail++;
367			break;
368		case CMPL_BASE_TYPE_RX_TPA_END:
369			rtpae = (void *)&cmp[cons];
370			ags = (rtpae->agg_bufs_v1 &
371			    RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
372			    RX_TPA_END_CMPL_AGG_BUFS_SFT;
373			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
374			CMPL_PREFETCH_NEXT(cpr, cons);
375
376			if (!CMP_VALID(&cmp[cons], v_bit))
377				goto cmpl_invalid;
378			/* Now account for all the AG completions */
379			for (i=0; i<ags; i++) {
380				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
381				CMPL_PREFETCH_NEXT(cpr, cons);
382				if (!CMP_VALID(&cmp[cons], v_bit))
383					goto cmpl_invalid;
384			}
385			avail++;
386			break;
387		case CMPL_BASE_TYPE_RX_TPA_START:
388			NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
389			CMPL_PREFETCH_NEXT(cpr, cons);
390
391			if (!CMP_VALID(&cmp[cons], v_bit))
392				goto cmpl_invalid;
393			break;
394		case CMPL_BASE_TYPE_RX_AGG:
395			break;
396		default:
397			device_printf(softc->dev,
398			    "Unhandled completion type %d on RXQ %d\n",
399			    type, rxqid);
400
401			/* Odd completion types use two completions */
402			if (type & 1) {
403				NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
404				CMPL_PREFETCH_NEXT(cpr, cons);
405
406				if (!CMP_VALID(&cmp[cons], v_bit))
407					goto cmpl_invalid;
408			}
409			break;
410		}
411		if (avail > budget)
412			break;
413	}
414cmpl_invalid:
415
416	return avail;
417}
418
419static void
420bnxt_set_rsstype(if_rxd_info_t ri, uint8_t rss_hash_type)
421{
422	uint8_t rss_profile_id;
423
424	rss_profile_id = BNXT_GET_RSS_PROFILE_ID(rss_hash_type);
425	switch (rss_profile_id) {
426	case BNXT_RSS_HASH_TYPE_TCPV4:
427		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV4;
428		break;
429	case BNXT_RSS_HASH_TYPE_UDPV4:
430		ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV4;
431		break;
432	case BNXT_RSS_HASH_TYPE_IPV4:
433		ri->iri_rsstype = M_HASHTYPE_RSS_IPV4;
434		break;
435	case BNXT_RSS_HASH_TYPE_TCPV6:
436		ri->iri_rsstype = M_HASHTYPE_RSS_TCP_IPV6;
437		break;
438	case BNXT_RSS_HASH_TYPE_UDPV6:
439		ri->iri_rsstype = M_HASHTYPE_RSS_UDP_IPV6;
440		break;
441	case BNXT_RSS_HASH_TYPE_IPV6:
442		ri->iri_rsstype = M_HASHTYPE_RSS_IPV6;
443		break;
444	default:
445		ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH;
446		break;
447	}
448}
449
450static int
451bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
452    struct bnxt_cp_ring *cpr, uint16_t flags_type)
453{
454	struct rx_pkt_cmpl *rcp;
455	struct rx_pkt_cmpl_hi *rcph;
456	struct rx_abuf_cmpl *acp;
457	uint32_t flags2;
458	uint32_t errors;
459	uint8_t	ags;
460	int i;
461
462	rcp = &((struct rx_pkt_cmpl *)cpr->ring.vaddr)[cpr->cons];
463
464	/* Extract from the first 16-byte BD */
465	if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
466		ri->iri_flowid = le32toh(rcp->rss_hash);
467		bnxt_set_rsstype(ri, rcp->rss_hash_type);
468	}
469	else {
470		ri->iri_rsstype = M_HASHTYPE_NONE;
471	}
472	ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
473	    RX_PKT_CMPL_AGG_BUFS_SFT;
474	ri->iri_nfrags = ags + 1;
475	/* No need to byte-swap the opaque value */
476	ri->iri_frags[0].irf_flid = (rcp->opaque >> 16) & 0xff;
477	ri->iri_frags[0].irf_idx = rcp->opaque & 0xffff;
478	ri->iri_frags[0].irf_len = le16toh(rcp->len);
479	ri->iri_len = le16toh(rcp->len);
480
481	/* Now the second 16-byte BD */
482	NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
483	ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
484	rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
485
486	flags2 = le32toh(rcph->flags2);
487	errors = le16toh(rcph->errors_v2);
488	if ((flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
489	    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
490		ri->iri_flags |= M_VLANTAG;
491		/* TODO: Should this be the entire 16-bits? */
492		ri->iri_vtag = le32toh(rcph->metadata) &
493		    (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE |
494		    RX_PKT_CMPL_METADATA_PRI_MASK);
495	}
496	if (flags2 & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) {
497		ri->iri_csum_flags |= CSUM_IP_CHECKED;
498		if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
499			ri->iri_csum_flags |= CSUM_IP_VALID;
500	}
501	if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
502		      RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
503		ri->iri_csum_flags |= CSUM_L4_CALC;
504		if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
505				RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
506			ri->iri_csum_flags |= CSUM_L4_VALID;
507			ri->iri_csum_data = 0xffff;
508		}
509	}
510
511	/* And finally the ag ring stuff. */
512	for (i=1; i < ri->iri_nfrags; i++) {
513		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
514		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
515		acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
516
517		/* No need to byte-swap the opaque value */
518		ri->iri_frags[i].irf_flid = (acp->opaque >> 16 & 0xff);
519		ri->iri_frags[i].irf_idx = acp->opaque & 0xffff;
520		ri->iri_frags[i].irf_len = le16toh(acp->len);
521		ri->iri_len += le16toh(acp->len);
522	}
523
524	return 0;
525}
526
527static int
528bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
529    struct bnxt_cp_ring *cpr, uint16_t flags_type)
530{
531	struct rx_tpa_end_cmpl *agend =
532	    &((struct rx_tpa_end_cmpl *)cpr->ring.vaddr)[cpr->cons];
533	struct rx_abuf_cmpl *acp;
534	struct bnxt_full_tpa_start *tpas;
535	uint32_t flags2;
536	uint8_t	ags;
537	uint8_t agg_id;
538	int i;
539
540	/* Get the agg_id */
541	agg_id = (agend->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK) >>
542	    RX_TPA_END_CMPL_AGG_ID_SFT;
543	tpas = &(softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id]);
544
545	/* Extract from the first 16-byte BD */
546	if (le16toh(tpas->low.flags_type) & RX_TPA_START_CMPL_FLAGS_RSS_VALID) {
547		ri->iri_flowid = le32toh(tpas->low.rss_hash);
548		bnxt_set_rsstype(ri, tpas->low.rss_hash_type);
549	}
550	else {
551		ri->iri_rsstype = M_HASHTYPE_NONE;
552	}
553	ags = (agend->agg_bufs_v1 & RX_TPA_END_CMPL_AGG_BUFS_MASK) >>
554	    RX_TPA_END_CMPL_AGG_BUFS_SFT;
555	ri->iri_nfrags = ags + 1;
556	/* No need to byte-swap the opaque value */
557	ri->iri_frags[0].irf_flid = ((tpas->low.opaque >> 16) & 0xff);
558	ri->iri_frags[0].irf_idx = (tpas->low.opaque & 0xffff);
559	ri->iri_frags[0].irf_len = le16toh(tpas->low.len);
560	ri->iri_len = le16toh(tpas->low.len);
561
562	/* Now the second 16-byte BD */
563	NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
564	ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
565
566	flags2 = le32toh(tpas->high.flags2);
567	if ((flags2 & RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK) ==
568	    RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN) {
569		ri->iri_flags |= M_VLANTAG;
570		/* TODO: Should this be the entire 16-bits? */
571		ri->iri_vtag = le32toh(tpas->high.metadata) &
572		    (RX_TPA_START_CMPL_METADATA_VID_MASK |
573		    RX_TPA_START_CMPL_METADATA_DE |
574		    RX_TPA_START_CMPL_METADATA_PRI_MASK);
575	}
576	if (flags2 & RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC) {
577		ri->iri_csum_flags |= CSUM_IP_CHECKED;
578		ri->iri_csum_flags |= CSUM_IP_VALID;
579	}
580	if (flags2 & RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC) {
581		ri->iri_csum_flags |= CSUM_L4_CALC;
582		ri->iri_csum_flags |= CSUM_L4_VALID;
583		ri->iri_csum_data = 0xffff;
584	}
585
586	/* Now the ag ring stuff. */
587	for (i=1; i < ri->iri_nfrags; i++) {
588		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
589		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
590		acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
591
592		/* No need to byte-swap the opaque value */
593		ri->iri_frags[i].irf_flid = ((acp->opaque >> 16) & 0xff);
594		ri->iri_frags[i].irf_idx = (acp->opaque & 0xffff);
595		ri->iri_frags[i].irf_len = le16toh(acp->len);
596		ri->iri_len += le16toh(acp->len);
597	}
598
599	/* And finally, the empty BD at the end... */
600	ri->iri_nfrags++;
601	/* No need to byte-swap the opaque value */
602	ri->iri_frags[i].irf_flid = ((agend->opaque >> 16) & 0xff);
603	ri->iri_frags[i].irf_idx = (agend->opaque & 0xffff);
604	ri->iri_frags[i].irf_len = le16toh(agend->len);
605	ri->iri_len += le16toh(agend->len);
606
607	return 0;
608}
609
610/* If we return anything but zero, iflib will assert... */
611static int
612bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
613{
614	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
615	struct bnxt_cp_ring *cpr = &softc->rx_cp_rings[ri->iri_qsidx];
616	struct cmpl_base *cmp_q = (struct cmpl_base *)cpr->ring.vaddr;
617	struct cmpl_base *cmp;
618	struct rx_tpa_start_cmpl *rtpa;
619	uint16_t flags_type;
620	uint16_t type;
621	uint8_t agg_id;
622
623	for (;;) {
624		NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
625		ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
626		CMPL_PREFETCH_NEXT(cpr, cpr->cons);
627		cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
628
629		flags_type = le16toh(cmp->type);
630		type = flags_type & CMPL_BASE_TYPE_MASK;
631
632		switch (type) {
633		case CMPL_BASE_TYPE_RX_L2:
634			return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
635		case CMPL_BASE_TYPE_RX_TPA_END:
636			return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
637		case CMPL_BASE_TYPE_RX_TPA_START:
638			rtpa = (void *)&cmp_q[cpr->cons];
639			agg_id = (rtpa->agg_id &
640			    RX_TPA_START_CMPL_AGG_ID_MASK) >>
641			    RX_TPA_START_CMPL_AGG_ID_SFT;
642			softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
643
644			NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
645			ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
646			CMPL_PREFETCH_NEXT(cpr, cpr->cons);
647
648			softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].high =
649			    ((struct rx_tpa_start_cmpl_hi *)cmp_q)[cpr->cons];
650			break;
651		default:
652			device_printf(softc->dev,
653			    "Unhandled completion type %d on RXQ %d get\n",
654			    type, ri->iri_qsidx);
655			if (type & 1) {
656				NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
657				    cpr->v_bit);
658				ri->iri_cidx = RING_NEXT(&cpr->ring,
659				    ri->iri_cidx);
660				CMPL_PREFETCH_NEXT(cpr, cpr->cons);
661			}
662			break;
663		}
664	}
665
666	return 0;
667}
668
669static int
670bnxt_intr(void *sc)
671{
672	struct bnxt_softc *softc = (struct bnxt_softc *)sc;
673
674	device_printf(softc->dev, "STUB: %s @ %s:%d\n", __func__, __FILE__, __LINE__);
675	return ENOSYS;
676}
677