1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016 Matthew Macy <mmacy@mattmacy.io>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/* $FreeBSD$ */
30#include "if_em.h"
31
32#ifdef RSS
33#include <net/rss_config.h>
34#include <netinet/in_rss.h>
35#endif
36
37#ifdef VERBOSE_DEBUG
38#define DPRINTF device_printf
39#else
40#define DPRINTF(...)
41#endif
42
43/*********************************************************************
44 *  Local Function prototypes
45 *********************************************************************/
46static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi);
47static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
48static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
49
50static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru);
51
52static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
53    qidx_t pidx);
54static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
55    qidx_t budget);
56
57static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
58
59static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
60    uint32_t *cmd_type_len, uint32_t *olinfo_status);
61static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi,
62    uint32_t *cmd_type_len, uint32_t *olinfo_status);
63
64static void igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype);
65static int igb_determine_rsstype(uint16_t pkt_info);
66
67extern void igb_if_enable_intr(if_ctx_t ctx);
68extern int em_intr(void *arg);
69
70struct if_txrx igb_txrx = {
71	.ift_txd_encap = igb_isc_txd_encap,
72	.ift_txd_flush = igb_isc_txd_flush,
73	.ift_txd_credits_update = igb_isc_txd_credits_update,
74	.ift_rxd_available = igb_isc_rxd_available,
75	.ift_rxd_pkt_get = igb_isc_rxd_pkt_get,
76	.ift_rxd_refill = igb_isc_rxd_refill,
77	.ift_rxd_flush = igb_isc_rxd_flush,
78	.ift_legacy_intr = em_intr
79};
80
81/**********************************************************************
82 *
83 *  Setup work for hardware segmentation offload (TSO) on
84 *  adapters using advanced tx descriptors
85 *
86 **********************************************************************/
87static int
88igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
89    uint32_t *olinfo_status)
90{
91	struct e1000_adv_tx_context_desc *TXD;
92	struct e1000_softc *sc = txr->sc;
93	uint32_t type_tucmd_mlhl = 0, vlan_macip_lens = 0;
94	uint32_t mss_l4len_idx = 0;
95	uint32_t paylen;
96
97	switch(pi->ipi_etype) {
98	case ETHERTYPE_IPV6:
99		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
100		break;
101	case ETHERTYPE_IP:
102		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
103		/* Tell transmit desc to also do IPv4 checksum. */
104		*olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
105		break;
106	default:
107		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
108		      __func__, ntohs(pi->ipi_etype));
109		break;
110	}
111
112	TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
113
114	/* This is used in the transmit desc in encap */
115	paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
116
117	/* VLAN MACLEN IPLEN */
118	if (pi->ipi_mflags & M_VLANTAG) {
119		vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
120	}
121
122	vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
123	vlan_macip_lens |= pi->ipi_ip_hlen;
124	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
125
126	/* ADV DTYPE TUCMD */
127	type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
128	type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
129	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
130
131	/* MSS L4LEN IDX */
132	mss_l4len_idx |= (pi->ipi_tso_segsz << E1000_ADVTXD_MSS_SHIFT);
133	mss_l4len_idx |= (pi->ipi_tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
134	/* 82575 needs the queue index added */
135	if (sc->hw.mac.type == e1000_82575)
136		mss_l4len_idx |= txr->me << 4;
137	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
138
139	TXD->u.seqnum_seed = htole32(0);
140	*cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
141	*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
142	*olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
143
144	return (1);
145}
146
147/*********************************************************************
148 *
149 *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
150 *
151 **********************************************************************/
152static int
153igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
154    uint32_t *olinfo_status)
155{
156	struct e1000_adv_tx_context_desc *TXD;
157	struct e1000_softc *sc = txr->sc;
158	uint32_t vlan_macip_lens, type_tucmd_mlhl;
159	uint32_t mss_l4len_idx;
160	mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
161
162	/* First check if TSO is to be used */
163	if (pi->ipi_csum_flags & CSUM_TSO)
164		return (igb_tso_setup(txr, pi, cmd_type_len, olinfo_status));
165
166	/* Indicate the whole packet as payload when not doing TSO */
167	*olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
168
169	/* Now ready a context descriptor */
170	TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
171
172	/*
173	** In advanced descriptors the vlan tag must
174	** be placed into the context descriptor. Hence
175	** we need to make one even if not doing offloads.
176	*/
177	if (pi->ipi_mflags & M_VLANTAG) {
178		vlan_macip_lens |= (pi->ipi_vtag << E1000_ADVTXD_VLAN_SHIFT);
179	} else if ((pi->ipi_csum_flags & IGB_CSUM_OFFLOAD) == 0) {
180		return (0);
181	}
182
183	/* Set the ether header length */
184	vlan_macip_lens |= pi->ipi_ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
185
186	switch(pi->ipi_etype) {
187	case ETHERTYPE_IP:
188		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
189		break;
190	case ETHERTYPE_IPV6:
191		type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
192		break;
193	default:
194		break;
195	}
196
197	vlan_macip_lens |= pi->ipi_ip_hlen;
198	type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
199
200	switch (pi->ipi_ipproto) {
201	case IPPROTO_TCP:
202		if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP)) {
203			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
204			*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
205		}
206		break;
207	case IPPROTO_UDP:
208		if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) {
209			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
210			*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
211		}
212		break;
213#ifndef __HAIKU__
214	case IPPROTO_SCTP:
215		if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) {
216			type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
217			*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
218		}
219		break;
220#endif
221	default:
222		break;
223	}
224
225	/* 82575 needs the queue index added */
226	if (sc->hw.mac.type == e1000_82575)
227		mss_l4len_idx = txr->me << 4;
228
229	/* Now copy bits into descriptor */
230	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
231	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
232	TXD->u.seqnum_seed = htole32(0);
233	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
234
235	return (1);
236}
237
238static int
239igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
240{
241	struct e1000_softc *sc = arg;
242	if_softc_ctx_t scctx = sc->shared;
243	struct em_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
244	struct tx_ring *txr = &que->txr;
245	int nsegs = pi->ipi_nsegs;
246	bus_dma_segment_t *segs = pi->ipi_segs;
247	union e1000_adv_tx_desc *txd = NULL;
248	int i, j, pidx_last;
249	uint32_t olinfo_status, cmd_type_len, txd_flags;
250	qidx_t ntxd;
251
252	pidx_last = olinfo_status = 0;
253	/* Basic descriptor defines */
254	cmd_type_len = (E1000_ADVTXD_DTYP_DATA |
255			E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
256
257	if (pi->ipi_mflags & M_VLANTAG)
258		cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
259
260	i = pi->ipi_pidx;
261	ntxd = scctx->isc_ntxd[0];
262	txd_flags = pi->ipi_flags & IPI_TX_INTR ? E1000_ADVTXD_DCMD_RS : 0;
263	/* Consume the first descriptor */
264	i += igb_tx_ctx_setup(txr, pi, &cmd_type_len, &olinfo_status);
265	if (i == scctx->isc_ntxd[0])
266		i = 0;
267
268	/* 82575 needs the queue index added */
269	if (sc->hw.mac.type == e1000_82575)
270		olinfo_status |= txr->me << 4;
271
272	for (j = 0; j < nsegs; j++) {
273		bus_size_t seglen;
274		bus_addr_t segaddr;
275
276		txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
277		seglen = segs[j].ds_len;
278		segaddr = htole64(segs[j].ds_addr);
279
280		txd->read.buffer_addr = segaddr;
281		txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS |
282		    cmd_type_len | seglen);
283		txd->read.olinfo_status = htole32(olinfo_status);
284		pidx_last = i;
285		if (++i == scctx->isc_ntxd[0]) {
286			i = 0;
287		}
288	}
289	if (txd_flags) {
290		txr->tx_rsq[txr->tx_rs_pidx] = pidx_last;
291		txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & (ntxd-1);
292		MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx);
293	}
294
295	txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags);
296	pi->ipi_new_pidx = i;
297
298	return (0);
299}
300
301static void
302igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
303{
304	struct e1000_softc *sc	= arg;
305	struct em_tx_queue *que	= &sc->tx_queues[txqid];
306	struct tx_ring *txr	= &que->txr;
307
308	E1000_WRITE_REG(&sc->hw, E1000_TDT(txr->me), pidx);
309}
310
311static int
312igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
313{
314	struct e1000_softc *sc = arg;
315	if_softc_ctx_t scctx = sc->shared;
316	struct em_tx_queue *que = &sc->tx_queues[txqid];
317	struct tx_ring *txr = &que->txr;
318
319	qidx_t processed = 0;
320	int updated;
321	qidx_t cur, prev, ntxd, rs_cidx;
322	int32_t delta;
323	uint8_t status;
324
325	rs_cidx = txr->tx_rs_cidx;
326	if (rs_cidx == txr->tx_rs_pidx)
327		return (0);
328	cur = txr->tx_rsq[rs_cidx];
329	status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
330	updated = !!(status & E1000_TXD_STAT_DD);
331
332	if (!updated)
333		return (0);
334
335	/* If clear is false just let caller know that there
336	 * are descriptors to reclaim */
337	if (!clear)
338		return (1);
339
340	prev = txr->tx_cidx_processed;
341	ntxd = scctx->isc_ntxd[0];
342	do {
343		MPASS(prev != cur);
344		delta = (int32_t)cur - (int32_t)prev;
345		if (delta < 0)
346			delta += ntxd;
347		MPASS(delta > 0);
348
349		processed += delta;
350		prev  = cur;
351		rs_cidx = (rs_cidx + 1) & (ntxd-1);
352		if (rs_cidx  == txr->tx_rs_pidx)
353			break;
354		cur = txr->tx_rsq[rs_cidx];
355		status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
356	} while ((status & E1000_TXD_STAT_DD));
357
358	txr->tx_rs_cidx = rs_cidx;
359	txr->tx_cidx_processed = prev;
360	return (processed);
361}
362
363static void
364igb_isc_rxd_refill(void *arg, if_rxd_update_t iru)
365{
366	struct e1000_softc *sc = arg;
367	if_softc_ctx_t scctx = sc->shared;
368	uint16_t rxqid = iru->iru_qsidx;
369	struct em_rx_queue *que = &sc->rx_queues[rxqid];
370	union e1000_adv_rx_desc *rxd;
371	struct rx_ring *rxr = &que->rxr;
372	uint64_t *paddrs;
373	uint32_t next_pidx, pidx;
374	uint16_t count;
375	int i;
376
377	paddrs = iru->iru_paddrs;
378	pidx = iru->iru_pidx;
379	count = iru->iru_count;
380
381	for (i = 0, next_pidx = pidx; i < count; i++) {
382		rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[next_pidx];
383
384		rxd->read.pkt_addr = htole64(paddrs[i]);
385		if (++next_pidx == scctx->isc_nrxd[0])
386			next_pidx = 0;
387	}
388}
389
390static void
391igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
392{
393	struct e1000_softc *sc = arg;
394	struct em_rx_queue *que = &sc->rx_queues[rxqid];
395	struct rx_ring *rxr = &que->rxr;
396
397	E1000_WRITE_REG(&sc->hw, E1000_RDT(rxr->me), pidx);
398}
399
400static int
401igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
402{
403	struct e1000_softc *sc = arg;
404	if_softc_ctx_t scctx = sc->shared;
405	struct em_rx_queue *que = &sc->rx_queues[rxqid];
406	struct rx_ring *rxr = &que->rxr;
407	union e1000_adv_rx_desc *rxd;
408	uint32_t staterr = 0;
409	int cnt, i;
410
411	for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
412		rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[i];
413		staterr = le32toh(rxd->wb.upper.status_error);
414
415		if ((staterr & E1000_RXD_STAT_DD) == 0)
416			break;
417		if (++i == scctx->isc_nrxd[0])
418			i = 0;
419		if (staterr & E1000_RXD_STAT_EOP)
420			cnt++;
421	}
422	return (cnt);
423}
424
425/****************************************************************
426 * Routine sends data which has been dma'ed into host memory
427 * to upper layer. Initialize ri structure.
428 *
429 * Returns 0 upon success, errno on failure
430 ***************************************************************/
431
432static int
433igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
434{
435	struct e1000_softc *sc = arg;
436	if_softc_ctx_t scctx = sc->shared;
437	struct em_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
438	struct rx_ring *rxr = &que->rxr;
439	union e1000_adv_rx_desc *rxd;
440
441	uint16_t pkt_info, len;
442	uint32_t ptype, staterr;
443	int i, cidx;
444	bool eop;
445
446	staterr = i = 0;
447	cidx = ri->iri_cidx;
448
449	do {
450		rxd = (union e1000_adv_rx_desc *)&rxr->rx_base[cidx];
451		staterr = le32toh(rxd->wb.upper.status_error);
452		pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
453
454		MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
455
456		len = le16toh(rxd->wb.upper.length);
457		ptype = le32toh(rxd->wb.lower.lo_dword.data) &  IGB_PKTTYPE_MASK;
458
459		ri->iri_len += len;
460		rxr->rx_bytes += ri->iri_len;
461
462		rxd->wb.upper.status_error = 0;
463		eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
464
465		/* Make sure bad packets are discarded */
466		if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
467			sc->dropped_pkts++;
468			++rxr->rx_discarded;
469			return (EBADMSG);
470		}
471		ri->iri_frags[i].irf_flid = 0;
472		ri->iri_frags[i].irf_idx = cidx;
473		ri->iri_frags[i].irf_len = len;
474
475		if (++cidx == scctx->isc_nrxd[0])
476			cidx = 0;
477#ifdef notyet
478		if (rxr->hdr_split == true) {
479			ri->iri_frags[i].irf_flid = 1;
480			ri->iri_frags[i].irf_idx = cidx;
481			if (++cidx == scctx->isc_nrxd[0])
482				cidx = 0;
483		}
484#endif
485		i++;
486	} while (!eop);
487
488	rxr->rx_packets++;
489
490	if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
491		igb_rx_checksum(staterr, ri, ptype);
492
493	if (staterr & E1000_RXD_STAT_VP) {
494		if (((sc->hw.mac.type == e1000_i350) ||
495		    (sc->hw.mac.type == e1000_i354)) &&
496		    (staterr & E1000_RXDEXT_STATERR_LB))
497			ri->iri_vtag = be16toh(rxd->wb.upper.vlan);
498		else
499			ri->iri_vtag = le16toh(rxd->wb.upper.vlan);
500		ri->iri_flags |= M_VLANTAG;
501	}
502
503	ri->iri_flowid =
504		le32toh(rxd->wb.lower.hi_dword.rss);
505	ri->iri_rsstype = igb_determine_rsstype(pkt_info);
506	ri->iri_nfrags = i;
507
508	return (0);
509}
510
511/*********************************************************************
512 *
513 *  Verify that the hardware indicated that the checksum is valid.
514 *  Inform the stack about the status of checksum so that stack
515 *  doesn't spend time verifying the checksum.
516 *
517 *********************************************************************/
518static void
519igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
520{
521	uint16_t status = (uint16_t)staterr;
522	uint8_t errors = (uint8_t)(staterr >> 24);
523
524	if (__predict_false(status & E1000_RXD_STAT_IXSM))
525		return;
526
527	/* If there is a layer 3 or 4 error we are done */
528	if (__predict_false(errors & (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
529		return;
530
531	/* IP Checksum Good */
532	if (status & E1000_RXD_STAT_IPCS)
533		ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
534
535	/* Valid L4E checksum */
536	if (__predict_true(status &
537	    (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) {
538		/* SCTP header present */
539		if (__predict_false((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
540		    (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)) {
541			ri->iri_csum_flags |= CSUM_SCTP_VALID;
542		} else {
543			ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
544			ri->iri_csum_data = htons(0xffff);
545		}
546	}
547}
548
549/********************************************************************
550 *
551 *  Parse the packet type to determine the appropriate hash
552 *
553 ******************************************************************/
554static int
555igb_determine_rsstype(uint16_t pkt_info)
556{
557	switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
558	case E1000_RXDADV_RSSTYPE_IPV4_TCP:
559		return M_HASHTYPE_RSS_TCP_IPV4;
560	case E1000_RXDADV_RSSTYPE_IPV4:
561		return M_HASHTYPE_RSS_IPV4;
562	case E1000_RXDADV_RSSTYPE_IPV6_TCP:
563		return M_HASHTYPE_RSS_TCP_IPV6;
564	case E1000_RXDADV_RSSTYPE_IPV6_EX:
565		return M_HASHTYPE_RSS_IPV6_EX;
566	case E1000_RXDADV_RSSTYPE_IPV6:
567		return M_HASHTYPE_RSS_IPV6;
568	case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
569		return M_HASHTYPE_RSS_TCP_IPV6_EX;
570	default:
571		return M_HASHTYPE_OPAQUE;
572	}
573}
574