1/*	$OpenBSD: if_ixl.c,v 1.101 2024/05/24 06:02:53 jsg Exp $ */
2
3/*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 *  1. Redistributions of source code must retain the above copyright notice,
11 *     this list of conditions and the following disclaimer.
12 *
13 *  2. Redistributions in binary form must reproduce the above copyright
14 *     notice, this list of conditions and the following disclaimer in the
15 *     documentation and/or other materials provided with the distribution.
16 *
17 *  3. Neither the name of the Intel Corporation nor the names of its
18 *     contributors may be used to endorse or promote products derived from
19 *     this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50#include "bpfilter.h"
51#include "kstat.h"
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/proc.h>
56#include <sys/sockio.h>
57#include <sys/mbuf.h>
58#include <sys/socket.h>
59#include <sys/device.h>
60#include <sys/pool.h>
61#include <sys/queue.h>
62#include <sys/timeout.h>
63#include <sys/task.h>
64#include <sys/syslog.h>
65#include <sys/intrmap.h>
66
67#include <machine/bus.h>
68#include <machine/intr.h>
69
70#include <net/if.h>
71#include <net/if_media.h>
72#include <net/route.h>
73#include <net/toeplitz.h>
74
75#if NBPFILTER > 0
76#include <net/bpf.h>
77#endif
78
79#if NKSTAT > 0
80#include <sys/kstat.h>
81#endif
82
83#include <netinet/in.h>
84#include <netinet/if_ether.h>
85#include <netinet/tcp.h>
86#include <netinet/tcp_timer.h>
87#include <netinet/tcp_var.h>
88#include <netinet/udp.h>
89
90#include <dev/pci/pcireg.h>
91#include <dev/pci/pcivar.h>
92#include <dev/pci/pcidevs.h>
93
94#ifdef __sparc64__
95#include <dev/ofw/openfirm.h>
96#endif
97
98#ifndef CACHE_LINE_SIZE
99#define CACHE_LINE_SIZE 64
100#endif
101
102#define IXL_MAX_VECTORS			8 /* XXX this is pretty arbitrary */
103
104#define I40E_MASK(mask, shift)		((mask) << (shift))
105#define I40E_PF_RESET_WAIT_COUNT	200
106#define I40E_AQ_LARGE_BUF		512
107
108/* bitfields for Tx queue mapping in QTX_CTL */
109#define I40E_QTX_CTL_VF_QUEUE		0x0
110#define I40E_QTX_CTL_VM_QUEUE		0x1
111#define I40E_QTX_CTL_PF_QUEUE		0x2
112
113#define I40E_QUEUE_TYPE_EOL		0x7ff
114#define I40E_INTR_NOTX_QUEUE		0
115
116#define I40E_QUEUE_TYPE_RX		0x0
117#define I40E_QUEUE_TYPE_TX		0x1
118#define I40E_QUEUE_TYPE_PE_CEQ		0x2
119#define I40E_QUEUE_TYPE_UNKNOWN		0x3
120
121#define I40E_ITR_INDEX_RX		0x0
122#define I40E_ITR_INDEX_TX		0x1
123#define I40E_ITR_INDEX_OTHER		0x2
124#define I40E_ITR_INDEX_NONE		0x3
125
126#include <dev/pci/if_ixlreg.h>
127
128#define I40E_INTR_NOTX_QUEUE		0
129#define I40E_INTR_NOTX_INTR		0
130#define I40E_INTR_NOTX_RX_QUEUE		0
131#define I40E_INTR_NOTX_TX_QUEUE		1
132#define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
133#define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
134
135struct ixl_aq_desc {
136	uint16_t	iaq_flags;
137#define	IXL_AQ_DD		(1U << 0)
138#define	IXL_AQ_CMP		(1U << 1)
139#define IXL_AQ_ERR		(1U << 2)
140#define IXL_AQ_VFE		(1U << 3)
141#define IXL_AQ_LB		(1U << 9)
142#define IXL_AQ_RD		(1U << 10)
143#define IXL_AQ_VFC		(1U << 11)
144#define IXL_AQ_BUF		(1U << 12)
145#define IXL_AQ_SI		(1U << 13)
146#define IXL_AQ_EI		(1U << 14)
147#define IXL_AQ_FE		(1U << 15)
148
149#define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
150				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
151				    "\003ERR" "\002CMP" "\001DD"
152
153	uint16_t	iaq_opcode;
154
155	uint16_t	iaq_datalen;
156	uint16_t	iaq_retval;
157
158	uint64_t	iaq_cookie;
159
160	uint32_t	iaq_param[4];
161/*	iaq_data_hi	iaq_param[2] */
162/*	iaq_data_lo	iaq_param[3] */
163} __packed __aligned(8);
164
165/* aq commands */
166#define IXL_AQ_OP_GET_VERSION		0x0001
167#define IXL_AQ_OP_DRIVER_VERSION	0x0002
168#define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
169#define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
170#define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
171#define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
172#define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
173#define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
174#define IXL_AQ_OP_LIST_DEV_CAP		0x000b
175#define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
176#define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
177#define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
178#define IXL_AQ_OP_RX_CTL_READ		0x0206
179#define IXL_AQ_OP_RX_CTL_WRITE		0x0207
180#define IXL_AQ_OP_ADD_VSI		0x0210
181#define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
182#define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
183#define IXL_AQ_OP_ADD_VEB		0x0230
184#define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
185#define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
186#define IXL_AQ_OP_ADD_MACVLAN		0x0250
187#define IXL_AQ_OP_REMOVE_MACVLAN	0x0251
188#define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
189#define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
190#define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
191#define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
192#define IXL_AQ_OP_PHY_RESTART_AN	0x0605
193#define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
194#define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
195#define IXL_AQ_OP_PHY_SET_REGISTER	0x0628
196#define IXL_AQ_OP_PHY_GET_REGISTER	0x0629
197#define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
198#define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
199#define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
200#define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
201#define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
202#define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
203#define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
204#define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
205#define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
206#define IXL_AQ_OP_SET_RSS_KEY		0x0b02 /* 722 only */
207#define IXL_AQ_OP_SET_RSS_LUT		0x0b03 /* 722 only */
208#define IXL_AQ_OP_GET_RSS_KEY		0x0b04 /* 722 only */
209#define IXL_AQ_OP_GET_RSS_LUT		0x0b05 /* 722 only */
210
211struct ixl_aq_mac_addresses {
212	uint8_t		pf_lan[ETHER_ADDR_LEN];
213	uint8_t		pf_san[ETHER_ADDR_LEN];
214	uint8_t		port[ETHER_ADDR_LEN];
215	uint8_t		pf_wol[ETHER_ADDR_LEN];
216} __packed;
217
218#define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
219#define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
220#define IXL_AQ_MAC_PORT_VALID		(1U << 6)
221#define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
222
223struct ixl_aq_capability {
224	uint16_t	cap_id;
225#define IXL_AQ_CAP_SWITCH_MODE		0x0001
226#define IXL_AQ_CAP_MNG_MODE		0x0002
227#define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
228#define IXL_AQ_CAP_OS2BMC_CAP		0x0004
229#define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
230#define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
231#define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
232#define IXL_AQ_CAP_SRIOV		0x0012
233#define IXL_AQ_CAP_VF			0x0013
234#define IXL_AQ_CAP_VMDQ			0x0014
235#define IXL_AQ_CAP_8021QBG		0x0015
236#define IXL_AQ_CAP_8021QBR		0x0016
237#define IXL_AQ_CAP_VSI			0x0017
238#define IXL_AQ_CAP_DCB			0x0018
239#define IXL_AQ_CAP_FCOE			0x0021
240#define IXL_AQ_CAP_ISCSI		0x0022
241#define IXL_AQ_CAP_RSS			0x0040
242#define IXL_AQ_CAP_RXQ			0x0041
243#define IXL_AQ_CAP_TXQ			0x0042
244#define IXL_AQ_CAP_MSIX			0x0043
245#define IXL_AQ_CAP_VF_MSIX		0x0044
246#define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
247#define IXL_AQ_CAP_1588			0x0046
248#define IXL_AQ_CAP_IWARP		0x0051
249#define IXL_AQ_CAP_LED			0x0061
250#define IXL_AQ_CAP_SDP			0x0062
251#define IXL_AQ_CAP_MDIO			0x0063
252#define IXL_AQ_CAP_WSR_PROT		0x0064
253#define IXL_AQ_CAP_NVM_MGMT		0x0080
254#define IXL_AQ_CAP_FLEX10		0x00F1
255#define IXL_AQ_CAP_CEM			0x00F2
256	uint8_t		major_rev;
257	uint8_t		minor_rev;
258	uint32_t	number;
259	uint32_t	logical_id;
260	uint32_t	phys_id;
261	uint8_t		_reserved[16];
262} __packed __aligned(4);
263
264#define IXL_LLDP_SHUTDOWN		0x1
265
266struct ixl_aq_switch_config {
267	uint16_t	num_reported;
268	uint16_t	num_total;
269	uint8_t		_reserved[12];
270} __packed __aligned(4);
271
272struct ixl_aq_switch_config_element {
273	uint8_t		type;
274#define IXL_AQ_SW_ELEM_TYPE_MAC		1
275#define IXL_AQ_SW_ELEM_TYPE_PF		2
276#define IXL_AQ_SW_ELEM_TYPE_VF		3
277#define IXL_AQ_SW_ELEM_TYPE_EMP		4
278#define IXL_AQ_SW_ELEM_TYPE_BMC		5
279#define IXL_AQ_SW_ELEM_TYPE_PV		16
280#define IXL_AQ_SW_ELEM_TYPE_VEB		17
281#define IXL_AQ_SW_ELEM_TYPE_PA		18
282#define IXL_AQ_SW_ELEM_TYPE_VSI		19
283	uint8_t		revision;
284#define IXL_AQ_SW_ELEM_REV_1		1
285	uint16_t	seid;
286
287	uint16_t	uplink_seid;
288	uint16_t	downlink_seid;
289
290	uint8_t		_reserved[3];
291	uint8_t		connection_type;
292#define IXL_AQ_CONN_TYPE_REGULAR	0x1
293#define IXL_AQ_CONN_TYPE_DEFAULT	0x2
294#define IXL_AQ_CONN_TYPE_CASCADED	0x3
295
296	uint16_t	scheduler_id;
297	uint16_t	element_info;
298} __packed __aligned(4);
299
300#define IXL_PHY_TYPE_SGMII		0x00
301#define IXL_PHY_TYPE_1000BASE_KX	0x01
302#define IXL_PHY_TYPE_10GBASE_KX4	0x02
303#define IXL_PHY_TYPE_10GBASE_KR		0x03
304#define IXL_PHY_TYPE_40GBASE_KR4	0x04
305#define IXL_PHY_TYPE_XAUI		0x05
306#define IXL_PHY_TYPE_XFI		0x06
307#define IXL_PHY_TYPE_SFI		0x07
308#define IXL_PHY_TYPE_XLAUI		0x08
309#define IXL_PHY_TYPE_XLPPI		0x09
310#define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
311#define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
312#define IXL_PHY_TYPE_10GBASE_AOC	0x0c
313#define IXL_PHY_TYPE_40GBASE_AOC	0x0d
314#define IXL_PHY_TYPE_100BASE_TX		0x11
315#define IXL_PHY_TYPE_1000BASE_T		0x12
316#define IXL_PHY_TYPE_10GBASE_T		0x13
317#define IXL_PHY_TYPE_10GBASE_SR		0x14
318#define IXL_PHY_TYPE_10GBASE_LR		0x15
319#define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
320#define IXL_PHY_TYPE_10GBASE_CR1	0x17
321#define IXL_PHY_TYPE_40GBASE_CR4	0x18
322#define IXL_PHY_TYPE_40GBASE_SR4	0x19
323#define IXL_PHY_TYPE_40GBASE_LR4	0x1a
324#define IXL_PHY_TYPE_1000BASE_SX	0x1b
325#define IXL_PHY_TYPE_1000BASE_LX	0x1c
326#define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
327#define IXL_PHY_TYPE_20GBASE_KR2	0x1e
328
329#define IXL_PHY_TYPE_25GBASE_KR		0x1f
330#define IXL_PHY_TYPE_25GBASE_CR		0x20
331#define IXL_PHY_TYPE_25GBASE_SR		0x21
332#define IXL_PHY_TYPE_25GBASE_LR		0x22
333#define IXL_PHY_TYPE_25GBASE_AOC	0x23
334#define IXL_PHY_TYPE_25GBASE_ACC	0x24
335
336struct ixl_aq_module_desc {
337	uint8_t		oui[3];
338	uint8_t		_reserved1;
339	uint8_t		part_number[16];
340	uint8_t		revision[4];
341	uint8_t		_reserved2[8];
342} __packed __aligned(4);
343
344struct ixl_aq_phy_abilities {
345	uint32_t	phy_type;
346
347	uint8_t		link_speed;
348#define IXL_AQ_PHY_LINK_SPEED_100MB	(1 << 1)
349#define IXL_AQ_PHY_LINK_SPEED_1000MB	(1 << 2)
350#define IXL_AQ_PHY_LINK_SPEED_10GB	(1 << 3)
351#define IXL_AQ_PHY_LINK_SPEED_40GB	(1 << 4)
352#define IXL_AQ_PHY_LINK_SPEED_20GB	(1 << 5)
353#define IXL_AQ_PHY_LINK_SPEED_25GB	(1 << 6)
354	uint8_t		abilities;
355	uint16_t	eee_capability;
356
357	uint32_t	eeer_val;
358
359	uint8_t		d3_lpan;
360	uint8_t		phy_type_ext;
361#define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
362#define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
363#define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
364#define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
365	uint8_t		fec_cfg_curr_mod_ext_info;
366#define IXL_AQ_ENABLE_FEC_KR		0x01
367#define IXL_AQ_ENABLE_FEC_RS		0x02
368#define IXL_AQ_REQUEST_FEC_KR		0x04
369#define IXL_AQ_REQUEST_FEC_RS		0x08
370#define IXL_AQ_ENABLE_FEC_AUTO		0x10
371#define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
372#define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
373	uint8_t		ext_comp_code;
374
375	uint8_t		phy_id[4];
376
377	uint8_t		module_type[3];
378#define IXL_SFF8024_ID_SFP		0x03
379#define IXL_SFF8024_ID_QSFP		0x0c
380#define IXL_SFF8024_ID_QSFP_PLUS	0x0d
381#define IXL_SFF8024_ID_QSFP28		0x11
382	uint8_t		qualified_module_count;
383#define IXL_AQ_PHY_MAX_QMS		16
384	struct ixl_aq_module_desc
385			qualified_module[IXL_AQ_PHY_MAX_QMS];
386} __packed __aligned(4);
387
388struct ixl_aq_link_param {
389	uint8_t		notify;
390#define IXL_AQ_LINK_NOTIFY	0x03
391	uint8_t		_reserved1;
392	uint8_t		phy;
393	uint8_t		speed;
394	uint8_t		status;
395	uint8_t		_reserved2[11];
396} __packed __aligned(4);
397
398struct ixl_aq_vsi_param {
399	uint16_t	uplink_seid;
400	uint8_t		connect_type;
401#define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
402#define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
403#define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
404	uint8_t		_reserved1;
405
406	uint8_t		vf_id;
407	uint8_t		_reserved2;
408	uint16_t	vsi_flags;
409#define IXL_AQ_VSI_TYPE_SHIFT		0x0
410#define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
411#define IXL_AQ_VSI_TYPE_VF		0x0
412#define IXL_AQ_VSI_TYPE_VMDQ2		0x1
413#define IXL_AQ_VSI_TYPE_PF		0x2
414#define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
415#define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
416
417	uint32_t	addr_hi;
418	uint32_t	addr_lo;
419} __packed __aligned(16);
420
421struct ixl_aq_add_macvlan {
422	uint16_t	num_addrs;
423	uint16_t	seid0;
424	uint16_t	seid1;
425	uint16_t	seid2;
426	uint32_t	addr_hi;
427	uint32_t	addr_lo;
428} __packed __aligned(16);
429
430struct ixl_aq_add_macvlan_elem {
431	uint8_t		macaddr[6];
432	uint16_t	vlan;
433	uint16_t	flags;
434#define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH	0x0001
435#define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN	0x0004
436	uint16_t	queue;
437	uint32_t	_reserved;
438} __packed __aligned(16);
439
440struct ixl_aq_remove_macvlan {
441	uint16_t	num_addrs;
442	uint16_t	seid0;
443	uint16_t	seid1;
444	uint16_t	seid2;
445	uint32_t	addr_hi;
446	uint32_t	addr_lo;
447} __packed __aligned(16);
448
449struct ixl_aq_remove_macvlan_elem {
450	uint8_t		macaddr[6];
451	uint16_t	vlan;
452	uint8_t		flags;
453#define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH	0x0001
454#define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN	0x0008
455	uint8_t		_reserved[7];
456} __packed __aligned(16);
457
458struct ixl_aq_vsi_reply {
459	uint16_t	seid;
460	uint16_t	vsi_number;
461
462	uint16_t	vsis_used;
463	uint16_t	vsis_free;
464
465	uint32_t	addr_hi;
466	uint32_t	addr_lo;
467} __packed __aligned(16);
468
469struct ixl_aq_vsi_data {
470	/* first 96 byte are written by SW */
471	uint16_t	valid_sections;
472#define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
473#define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
474#define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
475#define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
476#define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
477#define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
478#define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
479#define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
480#define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
481#define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
482	/* switch section */
483	uint16_t	switch_id;
484#define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
485#define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
486#define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
487#define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
488
489	uint8_t		_reserved1[2];
490	/* security section */
491	uint8_t		sec_flags;
492#define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
493#define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
494#define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
495	uint8_t		_reserved2;
496
497	/* vlan section */
498	uint16_t	pvid;
499	uint16_t	fcoe_pvid;
500
501	uint8_t		port_vlan_flags;
502#define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
503#define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
504#define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
505#define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
506#define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
507#define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
508#define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
509#define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
510#define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
511#define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
512#define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
513#define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
514	uint8_t		_reserved3[3];
515
516	/* ingress egress up section */
517	uint32_t	ingress_table;
518#define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
519#define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
520	uint32_t	egress_table;
521
522	/* cascaded pv section */
523	uint16_t	cas_pv_tag;
524	uint8_t		cas_pv_flags;
525#define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
526#define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
527#define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
528#define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
529#define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
530#define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
531#define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
532#define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
533					(1 << 6)
534	uint8_t		_reserved4;
535
536	/* queue mapping section */
537	uint16_t	mapping_flags;
538#define IXL_AQ_VSI_QUE_MAP_MASK		0x1
539#define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
540#define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
541	uint16_t	queue_mapping[16];
542#define IXL_AQ_VSI_QUEUE_SHIFT		0x0
543#define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
544	uint16_t	tc_mapping[8];
545#define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
546#define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
547#define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
548#define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
549
550	/* queueing option section */
551	uint8_t		queueing_opt_flags;
552#define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
553#define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
554#define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
555#define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
556#define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
557#define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
558	uint8_t		_reserved5[3];
559
560	/* scheduler section */
561	uint8_t		up_enable_bits;
562	uint8_t		_reserved6;
563
564	/* outer up section */
565	uint32_t	outer_up_table; /* same as ingress/egress tables */
566	uint8_t		_reserved7[8];
567
568	/* last 32 bytes are written by FW */
569	uint16_t	qs_handle[8];
570#define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
571	uint16_t	stat_counter_idx;
572	uint16_t	sched_id;
573
574	uint8_t		_reserved8[12];
575} __packed __aligned(8);
576
577CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
578
579struct ixl_aq_vsi_promisc_param {
580	uint16_t	flags;
581	uint16_t	valid_flags;
582#define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
583#define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
584#define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
585#define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
586#define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
587#define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
588
589	uint16_t	seid;
590#define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
591	uint16_t	vlan;
592#define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
593	uint32_t	reserved[2];
594} __packed __aligned(8);
595
596struct ixl_aq_veb_param {
597	uint16_t	uplink_seid;
598	uint16_t	downlink_seid;
599	uint16_t	veb_flags;
600#define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
601#define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
602#define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
603#define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
604					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
605#define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
606#define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
607#define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
608	uint8_t		enable_tcs;
609	uint8_t		_reserved[9];
610} __packed __aligned(16);
611
612struct ixl_aq_veb_reply {
613	uint16_t	_reserved1;
614	uint16_t	_reserved2;
615	uint16_t	_reserved3;
616	uint16_t	switch_seid;
617	uint16_t	veb_seid;
618#define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
619#define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
620#define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
621#define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
622	uint16_t	statistic_index;
623	uint16_t	vebs_used;
624	uint16_t	vebs_free;
625} __packed __aligned(16);
626
627/* GET PHY ABILITIES param[0] */
628#define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
629#define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
630
631struct ixl_aq_phy_reg_access {
632	uint8_t		phy_iface;
633#define IXL_AQ_PHY_IF_INTERNAL		0
634#define IXL_AQ_PHY_IF_EXTERNAL		1
635#define IXL_AQ_PHY_IF_MODULE		2
636	uint8_t		dev_addr;
637	uint16_t	recall;
638#define IXL_AQ_PHY_QSFP_DEV_ADDR	0
639#define IXL_AQ_PHY_QSFP_LAST		1
640	uint32_t	reg;
641	uint32_t	val;
642	uint32_t	_reserved2;
643} __packed __aligned(16);
644
645/* RESTART_AN param[0] */
646#define IXL_AQ_PHY_RESTART_AN		(1 << 1)
647#define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
648
649struct ixl_aq_link_status { /* this occupies the iaq_param space */
650	uint16_t	command_flags; /* only field set on command */
651#define IXL_AQ_LSE_MASK			0x3
652#define IXL_AQ_LSE_NOP			0x0
653#define IXL_AQ_LSE_DISABLE		0x2
654#define IXL_AQ_LSE_ENABLE		0x3
655#define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
656	uint8_t		phy_type;
657	uint8_t		link_speed;
658#define IXL_AQ_LINK_SPEED_1GB		(1 << 2)
659#define IXL_AQ_LINK_SPEED_10GB		(1 << 3)
660#define IXL_AQ_LINK_SPEED_40GB		(1 << 4)
661#define IXL_AQ_LINK_SPEED_25GB		(1 << 6)
662	uint8_t		link_info;
663#define IXL_AQ_LINK_UP_FUNCTION		0x01
664#define IXL_AQ_LINK_FAULT		0x02
665#define IXL_AQ_LINK_FAULT_TX		0x04
666#define IXL_AQ_LINK_FAULT_RX		0x08
667#define IXL_AQ_LINK_FAULT_REMOTE	0x10
668#define IXL_AQ_LINK_UP_PORT		0x20
669#define IXL_AQ_MEDIA_AVAILABLE		0x40
670#define IXL_AQ_SIGNAL_DETECT		0x80
671	uint8_t		an_info;
672#define IXL_AQ_AN_COMPLETED		0x01
673#define IXL_AQ_LP_AN_ABILITY		0x02
674#define IXL_AQ_PD_FAULT			0x04
675#define IXL_AQ_FEC_EN			0x08
676#define IXL_AQ_PHY_LOW_POWER		0x10
677#define IXL_AQ_LINK_PAUSE_TX		0x20
678#define IXL_AQ_LINK_PAUSE_RX		0x40
679#define IXL_AQ_QUALIFIED_MODULE		0x80
680
681	uint8_t		ext_info;
682#define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
683#define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
684#define IXL_AQ_LINK_TX_SHIFT		0x02
685#define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
686#define IXL_AQ_LINK_TX_ACTIVE		0x00
687#define IXL_AQ_LINK_TX_DRAINED		0x01
688#define IXL_AQ_LINK_TX_FLUSHED		0x03
689#define IXL_AQ_LINK_FORCED_40G		0x10
690/* 25G Error Codes */
691#define IXL_AQ_25G_NO_ERR		0X00
692#define IXL_AQ_25G_NOT_PRESENT		0X01
693#define IXL_AQ_25G_NVM_CRC_ERR		0X02
694#define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
695#define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
696#define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
697	uint8_t		loopback;
698	uint16_t	max_frame_size;
699
700	uint8_t		config;
701#define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
702#define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
703#define IXL_AQ_CONFIG_CRC_ENA	0x04
704#define IXL_AQ_CONFIG_PACING_MASK	0x78
705	uint8_t		power_desc;
706#define IXL_AQ_LINK_POWER_CLASS_1	0x00
707#define IXL_AQ_LINK_POWER_CLASS_2	0x01
708#define IXL_AQ_LINK_POWER_CLASS_3	0x02
709#define IXL_AQ_LINK_POWER_CLASS_4	0x03
710#define IXL_AQ_PWR_CLASS_MASK		0x03
711
712	uint8_t		reserved[4];
713} __packed __aligned(4);
714/* event mask command flags for param[2] */
715#define IXL_AQ_PHY_EV_MASK		0x3ff
716#define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
717#define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
718#define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
719#define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
720#define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
721#define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
722#define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
723#define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
724#define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
725
726struct ixl_aq_rss_lut { /* 722 */
727#define IXL_AQ_SET_RSS_LUT_VSI_VALID	(1 << 15)
728#define IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT	0
729#define IXL_AQ_SET_RSS_LUT_VSI_ID_MASK	\
730	(0x3FF << IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT)
731
732	uint16_t	vsi_number;
733#define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
734#define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_MASK \
735	(0x1 << IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT)
736#define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_VSI	0
737#define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_PF	1
738	uint16_t	flags;
739	uint8_t		_reserved[4];
740	uint32_t	addr_hi;
741	uint32_t	addr_lo;
742} __packed __aligned(16);
743
744struct ixl_aq_get_set_rss_key { /* 722 */
745#define IXL_AQ_SET_RSS_KEY_VSI_VALID	(1 << 15)
746#define IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT	0
747#define IXL_AQ_SET_RSS_KEY_VSI_ID_MASK	\
748	(0x3FF << IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT)
749	uint16_t	vsi_number;
750	uint8_t		_reserved[6];
751	uint32_t	addr_hi;
752	uint32_t	addr_lo;
753} __packed __aligned(16);
754
755/* aq response codes */
756#define IXL_AQ_RC_OK			0  /* success */
757#define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
758#define IXL_AQ_RC_ENOENT		2  /* No such element */
759#define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
760#define IXL_AQ_RC_EINTR			4  /* operation interrupted */
761#define IXL_AQ_RC_EIO			5  /* I/O error */
762#define IXL_AQ_RC_ENXIO			6  /* No such resource */
763#define IXL_AQ_RC_E2BIG			7  /* Arg too long */
764#define IXL_AQ_RC_EAGAIN		8  /* Try again */
765#define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
766#define IXL_AQ_RC_EACCES		10 /* Permission denied */
767#define IXL_AQ_RC_EFAULT		11 /* Bad address */
768#define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
769#define IXL_AQ_RC_EEXIST		13 /* object already exists */
770#define IXL_AQ_RC_EINVAL		14 /* invalid argument */
771#define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
772#define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
773#define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
774#define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
775#define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
776#define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
777#define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
778#define IXL_AQ_RC_EFBIG			22 /* file too large */
779
780struct ixl_tx_desc {
781	uint64_t		addr;
782	uint64_t		cmd;
783#define IXL_TX_DESC_DTYPE_SHIFT		0
784#define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
785#define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
786#define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
787#define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
788#define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
789#define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
790#define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
791#define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
792#define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
793#define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
794#define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
795
796#define IXL_TX_DESC_CMD_SHIFT		4
797#define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
798#define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
799#define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
800#define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
801#define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
802#define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
803#define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
804#define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
805#define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
806#define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
807#define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
808#define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
809#define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
810#define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
811#define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
812#define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
813#define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
814
815#define IXL_TX_DESC_MACLEN_SHIFT	16
816#define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
817#define IXL_TX_DESC_IPLEN_SHIFT		23
818#define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
819#define IXL_TX_DESC_L4LEN_SHIFT		30
820#define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
821#define IXL_TX_DESC_FCLEN_SHIFT		30
822#define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
823
824#define IXL_TX_DESC_BSIZE_SHIFT		34
825#define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
826#define IXL_TX_DESC_BSIZE_MASK		\
827	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
828
829#define IXL_TX_CTX_DESC_CMD_TSO		0x10
830#define IXL_TX_CTX_DESC_TLEN_SHIFT	30
831#define IXL_TX_CTX_DESC_MSS_SHIFT	50
832
833#define IXL_TX_DESC_L2TAG1_SHIFT	48
834} __packed __aligned(16);
835
836struct ixl_rx_rd_desc_16 {
837	uint64_t		paddr; /* packet addr */
838	uint64_t		haddr; /* header addr */
839} __packed __aligned(16);
840
841struct ixl_rx_rd_desc_32 {
842	uint64_t		paddr; /* packet addr */
843	uint64_t		haddr; /* header addr */
844	uint64_t		_reserved1;
845	uint64_t		_reserved2;
846} __packed __aligned(16);
847
848struct ixl_rx_wb_desc_16 {
849	uint16_t		_reserved1;
850	uint16_t		l2tag1;
851	uint32_t		filter_status;
852	uint64_t		qword1;
853#define IXL_RX_DESC_DD			(1 << 0)
854#define IXL_RX_DESC_EOP			(1 << 1)
855#define IXL_RX_DESC_L2TAG1P		(1 << 2)
856#define IXL_RX_DESC_L3L4P		(1 << 3)
857#define IXL_RX_DESC_CRCP		(1 << 4)
858#define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
859#define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
860#define IXL_RX_DESC_UMB_SHIFT		9
861#define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
862#define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
863#define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
864#define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
865#define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
866#define IXL_RX_DESC_FLM			(1 << 11)
867#define IXL_RX_DESC_FLTSTAT_SHIFT	12
868#define IXL_RX_DESC_FLTSTAT_MASK	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
869#define IXL_RX_DESC_FLTSTAT_NODATA	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
870#define IXL_RX_DESC_FLTSTAT_FDFILTID	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
871#define IXL_RX_DESC_FLTSTAT_RSS		(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
872#define IXL_RX_DESC_LPBK		(1 << 14)
873#define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
874#define IXL_RX_DESC_INT_UDP_0		(1 << 18)
875
876#define IXL_RX_DESC_RXE			(1 << 19)
877#define IXL_RX_DESC_HBO			(1 << 21)
878#define IXL_RX_DESC_IPE			(1 << 22)
879#define IXL_RX_DESC_L4E			(1 << 23)
880#define IXL_RX_DESC_EIPE		(1 << 24)
881#define IXL_RX_DESC_OVERSIZE		(1 << 25)
882
883#define IXL_RX_DESC_PTYPE_SHIFT		30
884#define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
885
886#define IXL_RX_DESC_PLEN_SHIFT		38
887#define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
888#define IXL_RX_DESC_HLEN_SHIFT		42
889#define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
890} __packed __aligned(16);
891
892struct ixl_rx_wb_desc_32 {
893	uint64_t		qword0;
894	uint64_t		qword1;
895	uint64_t		qword2;
896	uint64_t		qword3;
897} __packed __aligned(16);
898
899#define IXL_TX_PKT_DESCS		8
900#define IXL_TX_QUEUE_ALIGN		128
901#define IXL_RX_QUEUE_ALIGN		128
902
903#define IXL_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
904#define IXL_TSO_SIZE			((255 * 1024) - 1)
905#define IXL_MAX_DMA_SEG_SIZE		((16 * 1024) - 1)
906
907/*
908 * Our TCP/IP Stack is unable handle packets greater than MAXMCLBYTES.
909 * This interface is unable handle packets greater than IXL_TSO_SIZE.
910 */
911CTASSERT(MAXMCLBYTES < IXL_TSO_SIZE);
912
913#define IXL_PCIREG			PCI_MAPREG_START
914
915#define IXL_ITR0			0x0
916#define IXL_ITR1			0x1
917#define IXL_ITR2			0x2
918#define IXL_NOITR			0x2
919
920#define IXL_AQ_NUM			256
921#define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
922#define IXL_AQ_ALIGN			64 /* lol */
923#define IXL_AQ_BUFLEN			4096
924
925/* Packet Classifier Types for filters */
926/* bits 0-28 are reserved for future use */
927#define IXL_PCT_NONF_IPV4_UDP_UCAST	(1ULL << 29)	/* 722 */
928#define IXL_PCT_NONF_IPV4_UDP_MCAST	(1ULL << 30)	/* 722 */
929#define IXL_PCT_NONF_IPV4_UDP		(1ULL << 31)
930#define IXL_PCT_NONF_IPV4_TCP_SYN_NOACK	(1ULL << 32)	/* 722 */
931#define IXL_PCT_NONF_IPV4_TCP		(1ULL << 33)
932#define IXL_PCT_NONF_IPV4_SCTP		(1ULL << 34)
933#define IXL_PCT_NONF_IPV4_OTHER		(1ULL << 35)
934#define IXL_PCT_FRAG_IPV4		(1ULL << 36)
935/* bits 37-38 are reserved for future use */
936#define IXL_PCT_NONF_IPV6_UDP_UCAST	(1ULL << 39)	/* 722 */
937#define IXL_PCT_NONF_IPV6_UDP_MCAST	(1ULL << 40)	/* 722 */
938#define IXL_PCT_NONF_IPV6_UDP		(1ULL << 41)
939#define IXL_PCT_NONF_IPV6_TCP_SYN_NOACK	(1ULL << 42)	/* 722 */
940#define IXL_PCT_NONF_IPV6_TCP		(1ULL << 43)
941#define IXL_PCT_NONF_IPV6_SCTP		(1ULL << 44)
942#define IXL_PCT_NONF_IPV6_OTHER		(1ULL << 45)
943#define IXL_PCT_FRAG_IPV6		(1ULL << 46)
944/* bit 47 is reserved for future use */
945#define IXL_PCT_FCOE_OX			(1ULL << 48)
946#define IXL_PCT_FCOE_RX			(1ULL << 49)
947#define IXL_PCT_FCOE_OTHER		(1ULL << 50)
948/* bits 51-62 are reserved for future use */
949#define IXL_PCT_L2_PAYLOAD		(1ULL << 63)
950
951#define IXL_RSS_HENA_BASE_DEFAULT		\
952	IXL_PCT_NONF_IPV4_UDP |			\
953	IXL_PCT_NONF_IPV4_TCP |			\
954	IXL_PCT_NONF_IPV4_SCTP |		\
955	IXL_PCT_NONF_IPV4_OTHER |		\
956	IXL_PCT_FRAG_IPV4 |			\
957	IXL_PCT_NONF_IPV6_UDP |			\
958	IXL_PCT_NONF_IPV6_TCP |			\
959	IXL_PCT_NONF_IPV6_SCTP |		\
960	IXL_PCT_NONF_IPV6_OTHER |		\
961	IXL_PCT_FRAG_IPV6 |			\
962	IXL_PCT_L2_PAYLOAD
963
964#define IXL_RSS_HENA_BASE_710		IXL_RSS_HENA_BASE_DEFAULT
965#define IXL_RSS_HENA_BASE_722		IXL_RSS_HENA_BASE_DEFAULT | \
966	IXL_PCT_NONF_IPV4_UDP_UCAST |		\
967	IXL_PCT_NONF_IPV4_UDP_MCAST |		\
968	IXL_PCT_NONF_IPV6_UDP_UCAST |		\
969	IXL_PCT_NONF_IPV6_UDP_MCAST |		\
970	IXL_PCT_NONF_IPV4_TCP_SYN_NOACK |	\
971	IXL_PCT_NONF_IPV6_TCP_SYN_NOACK
972
973#define IXL_HMC_ROUNDUP			512
974#define IXL_HMC_PGSIZE			4096
975#define IXL_HMC_DVASZ			sizeof(uint64_t)
976#define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
977#define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
978#define IXL_HMC_PDVALID			1ULL
979
980struct ixl_aq_regs {
981	bus_size_t		atq_tail;
982	bus_size_t		atq_head;
983	bus_size_t		atq_len;
984	bus_size_t		atq_bal;
985	bus_size_t		atq_bah;
986
987	bus_size_t		arq_tail;
988	bus_size_t		arq_head;
989	bus_size_t		arq_len;
990	bus_size_t		arq_bal;
991	bus_size_t		arq_bah;
992
993	uint32_t		atq_len_enable;
994	uint32_t		atq_tail_mask;
995	uint32_t		atq_head_mask;
996
997	uint32_t		arq_len_enable;
998	uint32_t		arq_tail_mask;
999	uint32_t		arq_head_mask;
1000};
1001
1002struct ixl_phy_type {
1003	uint64_t	phy_type;
1004	uint64_t	ifm_type;
1005};
1006
1007struct ixl_speed_type {
1008	uint8_t		dev_speed;
1009	uint64_t	net_speed;
1010};
1011
1012struct ixl_aq_buf {
1013	SIMPLEQ_ENTRY(ixl_aq_buf)
1014				 aqb_entry;
1015	void			*aqb_data;
1016	bus_dmamap_t		 aqb_map;
1017};
1018SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
1019
1020struct ixl_dmamem {
1021	bus_dmamap_t		ixm_map;
1022	bus_dma_segment_t	ixm_seg;
1023	int			ixm_nsegs;
1024	size_t			ixm_size;
1025	caddr_t			ixm_kva;
1026};
1027#define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
1028#define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
1029#define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
1030#define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
1031
1032struct ixl_hmc_entry {
1033	uint64_t		 hmc_base;
1034	uint32_t		 hmc_count;
1035	uint32_t		 hmc_size;
1036};
1037
1038#define IXL_HMC_LAN_TX		 0
1039#define IXL_HMC_LAN_RX		 1
1040#define IXL_HMC_FCOE_CTX	 2
1041#define IXL_HMC_FCOE_FILTER	 3
1042#define IXL_HMC_COUNT		 4
1043
1044struct ixl_hmc_pack {
1045	uint16_t		offset;
1046	uint16_t		width;
1047	uint16_t		lsb;
1048};
1049
1050/*
1051 * these hmc objects have weird sizes and alignments, so these are abstract
1052 * representations of them that are nice for c to populate.
1053 *
1054 * the packing code relies on little-endian values being stored in the fields,
1055 * no high bits in the fields being set, and the fields must be packed in the
1056 * same order as they are in the ctx structure.
1057 */
1058
1059struct ixl_hmc_rxq {
1060	uint16_t		 head;
1061	uint8_t			 cpuid;
1062	uint64_t		 base;
1063#define IXL_HMC_RXQ_BASE_UNIT		128
1064	uint16_t		 qlen;
1065	uint16_t		 dbuff;
1066#define IXL_HMC_RXQ_DBUFF_UNIT		128
1067	uint8_t			 hbuff;
1068#define IXL_HMC_RXQ_HBUFF_UNIT		64
1069	uint8_t			 dtype;
1070#define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
1071#define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
1072#define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
1073	uint8_t			 dsize;
1074#define IXL_HMC_RXQ_DSIZE_16		0
1075#define IXL_HMC_RXQ_DSIZE_32		1
1076	uint8_t			 crcstrip;
1077	uint8_t			 fc_ena;
1078	uint8_t			 l2tsel;
1079#define IXL_HMC_RXQ_L2TSEL_2ND_TAG_TO_L2TAG1 \
1080					0
1081#define IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1 \
1082					1
1083	uint8_t			 hsplit_0;
1084	uint8_t			 hsplit_1;
1085	uint8_t			 showiv;
1086	uint16_t		 rxmax;
1087	uint8_t			 tphrdesc_ena;
1088	uint8_t			 tphwdesc_ena;
1089	uint8_t			 tphdata_ena;
1090	uint8_t			 tphhead_ena;
1091	uint8_t			 lrxqthresh;
1092	uint8_t			 prefena;
1093};
1094
1095static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
1096	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
1097	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
1098	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
1099	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
1100	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
1101	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
1102	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
1103	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
1104	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
1105	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
1106	{ offsetof(struct ixl_hmc_rxq, l2tsel),		1,	119 },
1107	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
1108	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
1109	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
1110	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
1111	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
1112	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
1113	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
1114	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
1115	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
1116	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
1117};
1118
1119#define IXL_HMC_RXQ_MINSIZE (201 + 1)
1120
1121struct ixl_hmc_txq {
1122	uint16_t		head;
1123	uint8_t			new_context;
1124	uint64_t		base;
1125#define IXL_HMC_TXQ_BASE_UNIT		128
1126	uint8_t			fc_ena;
1127	uint8_t			timesync_ena;
1128	uint8_t			fd_ena;
1129	uint8_t			alt_vlan_ena;
1130	uint16_t		thead_wb;
1131	uint8_t			cpuid;
1132	uint8_t			head_wb_ena;
1133#define IXL_HMC_TXQ_DESC_WB		0
1134#define IXL_HMC_TXQ_HEAD_WB		1
1135	uint16_t		qlen;
1136	uint8_t			tphrdesc_ena;
1137	uint8_t			tphrpacket_ena;
1138	uint8_t			tphwdesc_ena;
1139	uint64_t		head_wb_addr;
1140	uint32_t		crc;
1141	uint16_t		rdylist;
1142	uint8_t			rdylist_act;
1143};
1144
1145static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
1146	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
1147	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
1148	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
1149	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
1150	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
1151	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
1152	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
1153	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
1154/* line 1 */
1155	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
1156	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
1157	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
1158	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
1159	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
1160	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
1161	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
1162/* line 7 */
1163	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
1164	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
1165	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
1166};
1167
1168#define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
1169
1170struct ixl_rss_key {
1171	uint32_t		 key[13];
1172};
1173
1174struct ixl_rss_lut_128 {
1175	uint32_t		 entries[128 / sizeof(uint32_t)];
1176};
1177
1178struct ixl_rss_lut_512 {
1179	uint32_t		 entries[512 / sizeof(uint32_t)];
1180};
1181
1182/* driver structures */
1183
1184struct ixl_vector;
1185struct ixl_chip;
1186
1187struct ixl_tx_map {
1188	struct mbuf		*txm_m;
1189	bus_dmamap_t		 txm_map;
1190	unsigned int		 txm_eop;
1191};
1192
1193struct ixl_tx_ring {
1194	struct ixl_softc	*txr_sc;
1195	struct ixl_vector	*txr_vector;
1196	struct ifqueue		*txr_ifq;
1197
1198	unsigned int		 txr_prod;
1199	unsigned int		 txr_cons;
1200
1201	struct ixl_tx_map	*txr_maps;
1202	struct ixl_dmamem	 txr_mem;
1203
1204	bus_size_t		 txr_tail;
1205	unsigned int		 txr_qid;
1206} __aligned(CACHE_LINE_SIZE);
1207
1208struct ixl_rx_map {
1209	struct mbuf		*rxm_m;
1210	bus_dmamap_t		 rxm_map;
1211};
1212
1213struct ixl_rx_ring {
1214	struct ixl_softc	*rxr_sc;
1215	struct ixl_vector	*rxr_vector;
1216	struct ifiqueue		*rxr_ifiq;
1217
1218	struct if_rxring	 rxr_acct;
1219	struct timeout		 rxr_refill;
1220
1221	unsigned int		 rxr_prod;
1222	unsigned int		 rxr_cons;
1223
1224	struct ixl_rx_map	*rxr_maps;
1225	struct ixl_dmamem	 rxr_mem;
1226
1227	struct mbuf		*rxr_m_head;
1228	struct mbuf		**rxr_m_tail;
1229
1230	bus_size_t		 rxr_tail;
1231	unsigned int		 rxr_qid;
1232} __aligned(CACHE_LINE_SIZE);
1233
1234struct ixl_atq {
1235	struct ixl_aq_desc	  iatq_desc;
1236	void			 *iatq_arg;
1237	void			(*iatq_fn)(struct ixl_softc *, void *);
1238};
1239SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1240
1241struct ixl_vector {
1242	struct ixl_softc	*iv_sc;
1243	struct ixl_rx_ring	*iv_rxr;
1244	struct ixl_tx_ring	*iv_txr;
1245	int			 iv_qid;
1246	void			*iv_ihc;
1247	char			 iv_name[16];
1248} __aligned(CACHE_LINE_SIZE);
1249
1250struct ixl_softc {
1251	struct device		 sc_dev;
1252	const struct ixl_chip	*sc_chip;
1253	struct arpcom		 sc_ac;
1254	struct ifmedia		 sc_media;
1255	uint64_t		 sc_media_status;
1256	uint64_t		 sc_media_active;
1257
1258	pci_chipset_tag_t	 sc_pc;
1259	pci_intr_handle_t	 sc_ih;
1260	void			*sc_ihc;
1261	pcitag_t		 sc_tag;
1262
1263	bus_dma_tag_t		 sc_dmat;
1264	bus_space_tag_t		 sc_memt;
1265	bus_space_handle_t	 sc_memh;
1266	bus_size_t		 sc_mems;
1267
1268	uint16_t		 sc_api_major;
1269	uint16_t		 sc_api_minor;
1270	uint8_t			 sc_pf_id;
1271	uint16_t		 sc_uplink_seid;	/* le */
1272	uint16_t		 sc_downlink_seid;	/* le */
1273	uint16_t		 sc_veb_seid;		/* le */
1274	uint16_t		 sc_vsi_number;		/* le */
1275	uint16_t		 sc_seid;
1276	unsigned int		 sc_base_queue;
1277	unsigned int		 sc_port;
1278
1279	struct ixl_dmamem	 sc_scratch;
1280
1281	const struct ixl_aq_regs *
1282				 sc_aq_regs;
1283
1284	struct ixl_dmamem	 sc_atq;
1285	unsigned int		 sc_atq_prod;
1286	unsigned int		 sc_atq_cons;
1287
1288	struct mutex		 sc_atq_mtx;
1289	struct ixl_dmamem	 sc_arq;
1290	struct task		 sc_arq_task;
1291	struct ixl_aq_bufs	 sc_arq_idle;
1292	struct ixl_aq_bufs	 sc_arq_live;
1293	struct if_rxring	 sc_arq_ring;
1294	unsigned int		 sc_arq_prod;
1295	unsigned int		 sc_arq_cons;
1296
1297	struct mutex		 sc_link_state_mtx;
1298	struct task		 sc_link_state_task;
1299	struct ixl_atq		 sc_link_state_atq;
1300
1301	struct ixl_dmamem	 sc_hmc_sd;
1302	struct ixl_dmamem	 sc_hmc_pd;
1303	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1304
1305	unsigned int		 sc_tx_ring_ndescs;
1306	unsigned int		 sc_rx_ring_ndescs;
1307	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1308
1309	struct intrmap		*sc_intrmap;
1310	struct ixl_vector	*sc_vectors;
1311
1312	struct rwlock		 sc_cfg_lock;
1313	unsigned int		 sc_dead;
1314
1315	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
1316
1317#if NKSTAT > 0
1318	struct mutex		 sc_kstat_mtx;
1319	struct timeout		 sc_kstat_tmo;
1320	struct kstat		*sc_port_kstat;
1321	struct kstat		*sc_vsi_kstat;
1322#endif
1323};
1324#define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1325
1326#define delaymsec(_ms)	delay(1000 * (_ms))
1327
1328static void	ixl_clear_hw(struct ixl_softc *);
1329static int	ixl_pf_reset(struct ixl_softc *);
1330
1331static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1332		    bus_size_t, u_int);
1333static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1334
1335static int	ixl_arq_fill(struct ixl_softc *);
1336static void	ixl_arq_unfill(struct ixl_softc *);
1337
1338static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1339		    unsigned int);
1340static void	ixl_atq_set(struct ixl_atq *,
1341		    void (*)(struct ixl_softc *, void *), void *);
1342static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1343static void	ixl_atq_done(struct ixl_softc *);
1344static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1345		    const char *);
1346static int	ixl_get_version(struct ixl_softc *);
1347static int	ixl_pxe_clear(struct ixl_softc *);
1348static int	ixl_lldp_shut(struct ixl_softc *);
1349static int	ixl_get_mac(struct ixl_softc *);
1350static int	ixl_get_switch_config(struct ixl_softc *);
1351static int	ixl_phy_mask_ints(struct ixl_softc *);
1352static int	ixl_get_phy_types(struct ixl_softc *, uint64_t *);
1353static int	ixl_restart_an(struct ixl_softc *);
1354static int	ixl_hmc(struct ixl_softc *);
1355static void	ixl_hmc_free(struct ixl_softc *);
1356static int	ixl_get_vsi(struct ixl_softc *);
1357static int	ixl_set_vsi(struct ixl_softc *);
1358static int	ixl_get_link_status(struct ixl_softc *);
1359static int	ixl_set_link_status(struct ixl_softc *,
1360		    const struct ixl_aq_desc *);
1361static int	ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1362		    uint16_t);
1363static int	ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1364		    uint16_t);
1365static void	ixl_link_state_update(void *);
1366static void	ixl_arq(void *);
1367static void	ixl_hmc_pack(void *, const void *,
1368		    const struct ixl_hmc_pack *, unsigned int);
1369
1370static int	ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *);
1371static int	ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t,
1372		    uint8_t *);
1373static int	ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t,
1374		    uint8_t);
1375
1376static int	ixl_match(struct device *, void *, void *);
1377static void	ixl_attach(struct device *, struct device *, void *);
1378
1379static void	ixl_media_add(struct ixl_softc *, uint64_t);
1380static int	ixl_media_change(struct ifnet *);
1381static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1382static void	ixl_watchdog(struct ifnet *);
1383static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1384static void	ixl_start(struct ifqueue *);
1385static int	ixl_intr0(void *);
1386static int	ixl_intr_vector(void *);
1387static int	ixl_up(struct ixl_softc *);
1388static int	ixl_down(struct ixl_softc *);
1389static int	ixl_iff(struct ixl_softc *);
1390
1391static struct ixl_tx_ring *
1392		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1393static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1394static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1395static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1396static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1397static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1398static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1399static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1400static int	ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *);
1401
1402static struct ixl_rx_ring *
1403		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1404static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1405static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1406static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1407static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1408static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1409static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1410static int	ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *);
1411static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1412static void	ixl_rxrefill(void *);
1413static int	ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *);
1414static void	ixl_rx_checksum(struct mbuf *, uint64_t);
1415
1416#if NKSTAT > 0
1417static void	ixl_kstat_attach(struct ixl_softc *);
1418#endif
1419
1420struct cfdriver ixl_cd = {
1421	NULL,
1422	"ixl",
1423	DV_IFNET,
1424};
1425
1426const struct cfattach ixl_ca = {
1427	sizeof(struct ixl_softc),
1428	ixl_match,
1429	ixl_attach,
1430};
1431
1432static const struct ixl_phy_type ixl_phy_type_map[] = {
1433	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1434	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1435	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1436	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1437	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1438	{ 1ULL << IXL_PHY_TYPE_XAUI |
1439	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1440	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1441	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1442	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1443	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1444	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1445	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1446	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1447	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1448	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1449	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1450	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1451	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1452	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1453	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1454	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1455	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1456	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1457	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1458	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1459	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1460	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1461	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1462	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1463	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1464	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1465	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1466	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1467};
1468
1469static const struct ixl_speed_type ixl_speed_type_map[] = {
1470	{ IXL_AQ_LINK_SPEED_40GB,		IF_Gbps(40) },
1471	{ IXL_AQ_LINK_SPEED_25GB,		IF_Gbps(25) },
1472	{ IXL_AQ_LINK_SPEED_10GB,		IF_Gbps(10) },
1473	{ IXL_AQ_LINK_SPEED_1GB,		IF_Gbps(1) },
1474};
1475
1476static const struct ixl_aq_regs ixl_pf_aq_regs = {
1477	.atq_tail	= I40E_PF_ATQT,
1478	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1479	.atq_head	= I40E_PF_ATQH,
1480	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1481	.atq_len	= I40E_PF_ATQLEN,
1482	.atq_bal	= I40E_PF_ATQBAL,
1483	.atq_bah	= I40E_PF_ATQBAH,
1484	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1485
1486	.arq_tail	= I40E_PF_ARQT,
1487	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1488	.arq_head	= I40E_PF_ARQH,
1489	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1490	.arq_len	= I40E_PF_ARQLEN,
1491	.arq_bal	= I40E_PF_ARQBAL,
1492	.arq_bah	= I40E_PF_ARQBAH,
1493	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1494};
1495
1496#define ixl_rd(_s, _r) \
1497	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1498#define ixl_wr(_s, _r, _v) \
1499	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1500#define ixl_barrier(_s, _r, _l, _o) \
1501	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1502#define ixl_intr_enable(_s) \
1503	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1504	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1505	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1506
1507#define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1508
1509#ifdef __LP64__
1510#define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1511#else
1512#define ixl_dmamem_hi(_ixm)	0
1513#endif
1514
1515#define ixl_dmamem_lo(_ixm)	(uint32_t)IXL_DMA_DVA(_ixm)
1516
1517static inline void
1518ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1519{
1520#ifdef __LP64__
1521	htolem32(&iaq->iaq_param[2], addr >> 32);
1522#else
1523	iaq->iaq_param[2] = htole32(0);
1524#endif
1525	htolem32(&iaq->iaq_param[3], addr);
1526}
1527
1528#if _BYTE_ORDER == _BIG_ENDIAN
1529#define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1530#else
1531#define HTOLE16(_x)	(_x)
1532#endif
1533
1534static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff");
1535
1536/* deal with differences between chips */
1537
1538struct ixl_chip {
1539	uint64_t		  ic_rss_hena;
1540	uint32_t		(*ic_rd_ctl)(struct ixl_softc *, uint32_t);
1541	void			(*ic_wr_ctl)(struct ixl_softc *, uint32_t,
1542				      uint32_t);
1543
1544	int			(*ic_set_rss_key)(struct ixl_softc *,
1545				      const struct ixl_rss_key *);
1546	int			(*ic_set_rss_lut)(struct ixl_softc *,
1547				      const struct ixl_rss_lut_128 *);
1548};
1549
1550static inline uint64_t
1551ixl_rss_hena(struct ixl_softc *sc)
1552{
1553	return (sc->sc_chip->ic_rss_hena);
1554}
1555
1556static inline uint32_t
1557ixl_rd_ctl(struct ixl_softc *sc, uint32_t r)
1558{
1559	return ((*sc->sc_chip->ic_rd_ctl)(sc, r));
1560}
1561
1562static inline void
1563ixl_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
1564{
1565	(*sc->sc_chip->ic_wr_ctl)(sc, r, v);
1566}
1567
1568static inline int
1569ixl_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
1570{
1571	return ((*sc->sc_chip->ic_set_rss_key)(sc, rsskey));
1572}
1573
1574static inline int
1575ixl_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
1576{
1577	return ((*sc->sc_chip->ic_set_rss_lut)(sc, lut));
1578}
1579
1580/* 710 chip specifics */
1581
1582static uint32_t		ixl_710_rd_ctl(struct ixl_softc *, uint32_t);
1583static void		ixl_710_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1584static int		ixl_710_set_rss_key(struct ixl_softc *,
1585			    const struct ixl_rss_key *);
1586static int		ixl_710_set_rss_lut(struct ixl_softc *,
1587			    const struct ixl_rss_lut_128 *);
1588
1589static const struct ixl_chip ixl_710 = {
1590	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1591	.ic_rd_ctl =		ixl_710_rd_ctl,
1592	.ic_wr_ctl =		ixl_710_wr_ctl,
1593	.ic_set_rss_key =	ixl_710_set_rss_key,
1594	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1595};
1596
1597/* 722 chip specifics */
1598
1599static uint32_t		ixl_722_rd_ctl(struct ixl_softc *, uint32_t);
1600static void		ixl_722_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1601static int		ixl_722_set_rss_key(struct ixl_softc *,
1602			    const struct ixl_rss_key *);
1603static int		ixl_722_set_rss_lut(struct ixl_softc *,
1604			    const struct ixl_rss_lut_128 *);
1605
1606static const struct ixl_chip ixl_722 = {
1607	.ic_rss_hena =		IXL_RSS_HENA_BASE_722,
1608	.ic_rd_ctl =		ixl_722_rd_ctl,
1609	.ic_wr_ctl =		ixl_722_wr_ctl,
1610	.ic_set_rss_key =	ixl_722_set_rss_key,
1611	.ic_set_rss_lut =	ixl_722_set_rss_lut,
1612};
1613
1614/*
1615 * 710 chips using an older firmware/API use the same ctl ops as
1616 * 722 chips. or 722 chips use the same ctl ops as 710 chips in early
1617 * firmware/API versions?
1618*/
1619
1620static const struct ixl_chip ixl_710_decrepit = {
1621	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1622	.ic_rd_ctl =		ixl_722_rd_ctl,
1623	.ic_wr_ctl =		ixl_722_wr_ctl,
1624	.ic_set_rss_key =	ixl_710_set_rss_key,
1625	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1626};
1627
1628/* driver code */
1629
1630struct ixl_device {
1631	const struct ixl_chip	*id_chip;
1632	pci_vendor_id_t		 id_vid;
1633	pci_product_id_t	 id_pid;
1634};
1635
1636static const struct ixl_device ixl_devices[] = {
1637	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP },
1638	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP_2 },
1639	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_40G_BP },
1640	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP, },
1641	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1642	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1643	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_QSFP },
1644	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BASET },
1645	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1646	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1647	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1648	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1649	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28, },
1650	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T, },
1651	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_KX },
1652	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_QSFP },
1653	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_1 },
1654	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G },
1655	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_T },
1656	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_2 },
1657};
1658
1659static const struct ixl_device *
1660ixl_device_lookup(struct pci_attach_args *pa)
1661{
1662	pci_vendor_id_t vid = PCI_VENDOR(pa->pa_id);
1663	pci_product_id_t pid = PCI_PRODUCT(pa->pa_id);
1664	const struct ixl_device *id;
1665	unsigned int i;
1666
1667	for (i = 0; i < nitems(ixl_devices); i++) {
1668		id = &ixl_devices[i];
1669		if (id->id_vid == vid && id->id_pid == pid)
1670			return (id);
1671	}
1672
1673	return (NULL);
1674}
1675
1676static int
1677ixl_match(struct device *parent, void *match, void *aux)
1678{
1679	return (ixl_device_lookup(aux) != NULL);
1680}
1681
1682void
1683ixl_attach(struct device *parent, struct device *self, void *aux)
1684{
1685	struct ixl_softc *sc = (struct ixl_softc *)self;
1686	struct ifnet *ifp = &sc->sc_ac.ac_if;
1687	struct pci_attach_args *pa = aux;
1688	pcireg_t memtype;
1689	uint32_t port, ari, func;
1690	uint64_t phy_types = 0;
1691	unsigned int nqueues, i;
1692	int tries;
1693
1694	rw_init(&sc->sc_cfg_lock, "ixlcfg");
1695
1696	sc->sc_chip = ixl_device_lookup(pa)->id_chip;
1697	sc->sc_pc = pa->pa_pc;
1698	sc->sc_tag = pa->pa_tag;
1699	sc->sc_dmat = pa->pa_dmat;
1700	sc->sc_aq_regs = &ixl_pf_aq_regs;
1701
1702	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1703	sc->sc_tx_ring_ndescs = 1024;
1704	sc->sc_rx_ring_ndescs = 1024;
1705
1706	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1707	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1708	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1709		printf(": unable to map registers\n");
1710		return;
1711	}
1712
1713	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1714	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1715	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1716
1717	ixl_clear_hw(sc);
1718	if (ixl_pf_reset(sc) == -1) {
1719		/* error printed by ixl_pf_reset */
1720		goto unmap;
1721	}
1722
1723	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1724	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1725	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1726	sc->sc_port = port;
1727	printf(": port %u", port);
1728
1729	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1730	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1731	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1732
1733	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1734	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1735
1736	/* initialise the adminq */
1737
1738	mtx_init(&sc->sc_atq_mtx, IPL_NET);
1739
1740	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1741	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1742		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1743		goto unmap;
1744	}
1745
1746	SIMPLEQ_INIT(&sc->sc_arq_idle);
1747	SIMPLEQ_INIT(&sc->sc_arq_live);
1748	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1749	task_set(&sc->sc_arq_task, ixl_arq, sc);
1750	sc->sc_arq_cons = 0;
1751	sc->sc_arq_prod = 0;
1752
1753	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1754	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1755		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1756		goto free_atq;
1757	}
1758
1759	if (!ixl_arq_fill(sc)) {
1760		printf("\n" "%s: unable to fill arq descriptors\n",
1761		    DEVNAME(sc));
1762		goto free_arq;
1763	}
1764
1765	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1766	    0, IXL_DMA_LEN(&sc->sc_atq),
1767	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1768
1769	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1770	    0, IXL_DMA_LEN(&sc->sc_arq),
1771	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1772
1773	for (tries = 0; tries < 10; tries++) {
1774		int rv;
1775
1776		sc->sc_atq_cons = 0;
1777		sc->sc_atq_prod = 0;
1778
1779		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1780		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1781		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1782		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1783
1784		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1785
1786		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1787		    ixl_dmamem_lo(&sc->sc_atq));
1788		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1789		    ixl_dmamem_hi(&sc->sc_atq));
1790		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1791		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1792
1793		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1794		    ixl_dmamem_lo(&sc->sc_arq));
1795		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1796		    ixl_dmamem_hi(&sc->sc_arq));
1797		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1798		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1799
1800		rv = ixl_get_version(sc);
1801		if (rv == 0)
1802			break;
1803		if (rv != ETIMEDOUT) {
1804			printf(", unable to get firmware version\n");
1805			goto shutdown;
1806		}
1807
1808		delaymsec(100);
1809	}
1810
1811	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1812
1813	if (ixl_pxe_clear(sc) != 0) {
1814		/* error printed by ixl_pxe_clear */
1815		goto shutdown;
1816	}
1817
1818	if (ixl_get_mac(sc) != 0) {
1819		/* error printed by ixl_get_mac */
1820		goto shutdown;
1821	}
1822
1823	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) == 0) {
1824		int nmsix = pci_intr_msix_count(pa);
1825		if (nmsix > 1) { /* we used 1 (the 0th) for the adminq */
1826			nmsix--;
1827
1828			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
1829			    nmsix, IXL_MAX_VECTORS, INTRMAP_POWEROF2);
1830			nqueues = intrmap_count(sc->sc_intrmap);
1831			KASSERT(nqueues > 0);
1832			KASSERT(powerof2(nqueues));
1833			sc->sc_nqueues = fls(nqueues) - 1;
1834		}
1835	} else {
1836		if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1837		    pci_intr_map(pa, &sc->sc_ih) != 0) {
1838			printf(", unable to map interrupt\n");
1839			goto shutdown;
1840		}
1841	}
1842
1843	nqueues = ixl_nqueues(sc);
1844
1845	printf(", %s, %d queue%s, address %s\n",
1846	    pci_intr_string(sc->sc_pc, sc->sc_ih), ixl_nqueues(sc),
1847	    (nqueues > 1 ? "s" : ""),
1848	    ether_sprintf(sc->sc_ac.ac_enaddr));
1849
1850	if (ixl_hmc(sc) != 0) {
1851		/* error printed by ixl_hmc */
1852		goto shutdown;
1853	}
1854
1855	if (ixl_lldp_shut(sc) != 0) {
1856		/* error printed by ixl_lldp_shut */
1857		goto free_hmc;
1858	}
1859
1860	if (ixl_phy_mask_ints(sc) != 0) {
1861		/* error printed by ixl_phy_mask_ints */
1862		goto free_hmc;
1863	}
1864
1865	if (ixl_restart_an(sc) != 0) {
1866		/* error printed by ixl_restart_an */
1867		goto free_hmc;
1868	}
1869
1870	if (ixl_get_switch_config(sc) != 0) {
1871		/* error printed by ixl_get_switch_config */
1872		goto free_hmc;
1873	}
1874
1875	if (ixl_get_phy_types(sc, &phy_types) != 0) {
1876		/* error printed by ixl_get_phy_abilities */
1877		goto free_hmc;
1878	}
1879
1880	mtx_init(&sc->sc_link_state_mtx, IPL_NET);
1881	if (ixl_get_link_status(sc) != 0) {
1882		/* error printed by ixl_get_link_status */
1883		goto free_hmc;
1884	}
1885
1886	if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1887	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1888		printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc));
1889		goto free_hmc;
1890	}
1891
1892	if (ixl_get_vsi(sc) != 0) {
1893		/* error printed by ixl_get_vsi */
1894		goto free_hmc;
1895	}
1896
1897	if (ixl_set_vsi(sc) != 0) {
1898		/* error printed by ixl_set_vsi */
1899		goto free_scratch;
1900	}
1901
1902	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1903	    IPL_NET | IPL_MPSAFE, ixl_intr0, sc, DEVNAME(sc));
1904	if (sc->sc_ihc == NULL) {
1905		printf("%s: unable to establish interrupt handler\n",
1906		    DEVNAME(sc));
1907		goto free_scratch;
1908	}
1909
1910	sc->sc_vectors = mallocarray(sizeof(*sc->sc_vectors), nqueues,
1911	    M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1912	if (sc->sc_vectors == NULL) {
1913		printf("%s: unable to allocate vectors\n", DEVNAME(sc));
1914		goto free_scratch;
1915	}
1916
1917	for (i = 0; i < nqueues; i++) {
1918		struct ixl_vector *iv = &sc->sc_vectors[i];
1919		iv->iv_sc = sc;
1920		iv->iv_qid = i;
1921		snprintf(iv->iv_name, sizeof(iv->iv_name),
1922		    "%s:%u", DEVNAME(sc), i); /* truncated? */
1923	}
1924
1925	if (sc->sc_intrmap) {
1926		for (i = 0; i < nqueues; i++) {
1927			struct ixl_vector *iv = &sc->sc_vectors[i];
1928			pci_intr_handle_t ih;
1929			int v = i + 1; /* 0 is used for adminq */
1930
1931			if (pci_intr_map_msix(pa, v, &ih)) {
1932				printf("%s: unable to map msi-x vector %d\n",
1933				    DEVNAME(sc), v);
1934				goto free_vectors;
1935			}
1936
1937			iv->iv_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
1938			    IPL_NET | IPL_MPSAFE,
1939			    intrmap_cpu(sc->sc_intrmap, i),
1940			    ixl_intr_vector, iv, iv->iv_name);
1941			if (iv->iv_ihc == NULL) {
1942				printf("%s: unable to establish interrupt %d\n",
1943				    DEVNAME(sc), v);
1944				goto free_vectors;
1945			}
1946
1947			ixl_wr(sc, I40E_PFINT_DYN_CTLN(i),
1948			    I40E_PFINT_DYN_CTLN_INTENA_MASK |
1949			    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1950			    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1951		}
1952	}
1953
1954	/* fixup the chip ops for older fw releases */
1955	if (sc->sc_chip == &ixl_710 &&
1956	    sc->sc_api_major == 1 && sc->sc_api_minor < 5)
1957		sc->sc_chip = &ixl_710_decrepit;
1958
1959	ifp->if_softc = sc;
1960	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1961	ifp->if_xflags = IFXF_MPSAFE;
1962	ifp->if_ioctl = ixl_ioctl;
1963	ifp->if_qstart = ixl_start;
1964	ifp->if_watchdog = ixl_watchdog;
1965	ifp->if_hardmtu = IXL_HARDMTU;
1966	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1967	ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1968
1969	ifp->if_capabilities = IFCAP_VLAN_HWTAGGING;
1970	ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
1971	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
1972	    IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
1973	ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1974
1975	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1976
1977	ixl_media_add(sc, phy_types);
1978	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1979	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1980
1981	if_attach(ifp);
1982	ether_ifattach(ifp);
1983
1984	if_attach_queues(ifp, nqueues);
1985	if_attach_iqueues(ifp, nqueues);
1986
1987	task_set(&sc->sc_link_state_task, ixl_link_state_update, sc);
1988	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1989	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1990	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1991	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1992	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1993
1994	/* remove default mac filter and replace it so we can see vlans */
1995	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0);
1996	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1997	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1998	ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1999	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2000	ixl_add_macvlan(sc, etherbroadcastaddr, 0,
2001	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2002	memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2003
2004	ixl_intr_enable(sc);
2005
2006#if NKSTAT > 0
2007	ixl_kstat_attach(sc);
2008#endif
2009
2010	return;
2011free_vectors:
2012	if (sc->sc_intrmap != NULL) {
2013		for (i = 0; i < nqueues; i++) {
2014			struct ixl_vector *iv = &sc->sc_vectors[i];
2015			if (iv->iv_ihc == NULL)
2016				continue;
2017			pci_intr_disestablish(sc->sc_pc, iv->iv_ihc);
2018		}
2019	}
2020	free(sc->sc_vectors, M_DEVBUF, nqueues * sizeof(*sc->sc_vectors));
2021free_scratch:
2022	ixl_dmamem_free(sc, &sc->sc_scratch);
2023free_hmc:
2024	ixl_hmc_free(sc);
2025shutdown:
2026	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
2027	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
2028	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2029	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2030
2031	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2032	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2033	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
2034
2035	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2036	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2037	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
2038
2039	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2040	    0, IXL_DMA_LEN(&sc->sc_arq),
2041	    BUS_DMASYNC_POSTREAD);
2042	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2043	    0, IXL_DMA_LEN(&sc->sc_atq),
2044	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2045
2046	ixl_arq_unfill(sc);
2047
2048free_arq:
2049	ixl_dmamem_free(sc, &sc->sc_arq);
2050free_atq:
2051	ixl_dmamem_free(sc, &sc->sc_atq);
2052unmap:
2053	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2054	sc->sc_mems = 0;
2055
2056	if (sc->sc_intrmap != NULL)
2057		intrmap_destroy(sc->sc_intrmap);
2058}
2059
2060static void
2061ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
2062{
2063	struct ifmedia *ifm = &sc->sc_media;
2064	const struct ixl_phy_type *itype;
2065	unsigned int i;
2066
2067	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
2068		itype = &ixl_phy_type_map[i];
2069
2070		if (ISSET(phy_types, itype->phy_type))
2071			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
2072	}
2073}
2074
2075static int
2076ixl_media_change(struct ifnet *ifp)
2077{
2078	/* ignore? */
2079	return (EOPNOTSUPP);
2080}
2081
2082static void
2083ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
2084{
2085	struct ixl_softc *sc = ifp->if_softc;
2086
2087	KERNEL_ASSERT_LOCKED();
2088
2089	mtx_enter(&sc->sc_link_state_mtx);
2090	ifm->ifm_status = sc->sc_media_status;
2091	ifm->ifm_active = sc->sc_media_active;
2092	mtx_leave(&sc->sc_link_state_mtx);
2093}
2094
2095static void
2096ixl_watchdog(struct ifnet *ifp)
2097{
2098
2099}
2100
2101int
2102ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2103{
2104	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
2105	struct ifreq *ifr = (struct ifreq *)data;
2106	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
2107	int aqerror, error = 0;
2108
2109	switch (cmd) {
2110	case SIOCSIFADDR:
2111		ifp->if_flags |= IFF_UP;
2112		/* FALLTHROUGH */
2113
2114	case SIOCSIFFLAGS:
2115		if (ISSET(ifp->if_flags, IFF_UP)) {
2116			if (ISSET(ifp->if_flags, IFF_RUNNING))
2117				error = ENETRESET;
2118			else
2119				error = ixl_up(sc);
2120		} else {
2121			if (ISSET(ifp->if_flags, IFF_RUNNING))
2122				error = ixl_down(sc);
2123		}
2124		break;
2125
2126	case SIOCGIFMEDIA:
2127	case SIOCSIFMEDIA:
2128		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2129		break;
2130
2131	case SIOCGIFRXR:
2132		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
2133		break;
2134
2135	case SIOCADDMULTI:
2136		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
2137			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2138			if (error != 0)
2139				return (error);
2140
2141			aqerror = ixl_add_macvlan(sc, addrlo, 0,
2142			    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2143			if (aqerror == IXL_AQ_RC_ENOSPC) {
2144				ether_delmulti(ifr, &sc->sc_ac);
2145				error = ENOSPC;
2146			}
2147
2148			if (sc->sc_ac.ac_multirangecnt > 0) {
2149				SET(ifp->if_flags, IFF_ALLMULTI);
2150				error = ENETRESET;
2151			}
2152		}
2153		break;
2154
2155	case SIOCDELMULTI:
2156		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
2157			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2158			if (error != 0)
2159				return (error);
2160
2161			ixl_remove_macvlan(sc, addrlo, 0,
2162			    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2163
2164			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
2165			    sc->sc_ac.ac_multirangecnt == 0) {
2166				CLR(ifp->if_flags, IFF_ALLMULTI);
2167				error = ENETRESET;
2168			}
2169		}
2170		break;
2171
2172	case SIOCGIFSFFPAGE:
2173		error = rw_enter(&ixl_sff_lock, RW_WRITE|RW_INTR);
2174		if (error != 0)
2175			break;
2176
2177		error = ixl_get_sffpage(sc, (struct if_sffpage *)data);
2178		rw_exit(&ixl_sff_lock);
2179		break;
2180
2181	default:
2182		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2183		break;
2184	}
2185
2186	if (error == ENETRESET)
2187		error = ixl_iff(sc);
2188
2189	return (error);
2190}
2191
2192static inline void *
2193ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
2194{
2195	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
2196	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2197
2198	if (i >= e->hmc_count)
2199		return (NULL);
2200
2201	kva += e->hmc_base;
2202	kva += i * e->hmc_size;
2203
2204	return (kva);
2205}
2206
2207static inline size_t
2208ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
2209{
2210	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2211
2212	return (e->hmc_size);
2213}
2214
2215static int
2216ixl_configure_rss(struct ixl_softc *sc)
2217{
2218	struct ixl_rss_key rsskey;
2219	struct ixl_rss_lut_128 lut;
2220	uint8_t *lute = (uint8_t *)&lut;
2221	uint64_t rss_hena;
2222	unsigned int i, nqueues;
2223	int error;
2224
2225#if 0
2226	/* if we want to do a 512 entry LUT, do this. */
2227	uint32_t v = ixl_rd_ctl(sc, I40E_PFQF_CTL_0);
2228	SET(v, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
2229	ixl_wr_ctl(sc, I40E_PFQF_CTL_0, v);
2230#endif
2231
2232	stoeplitz_to_key(&rsskey, sizeof(rsskey));
2233
2234	nqueues = ixl_nqueues(sc);
2235	for (i = 0; i < sizeof(lut); i++) {
2236		/*
2237		 * ixl must have a power of 2 rings, so using mod
2238		 * to populate the table is fine.
2239		 */
2240		lute[i] = i % nqueues;
2241	}
2242
2243	error = ixl_set_rss_key(sc, &rsskey);
2244	if (error != 0)
2245		return (error);
2246
2247	rss_hena = (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(0));
2248	rss_hena |= (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(1)) << 32;
2249	rss_hena |= ixl_rss_hena(sc);
2250	ixl_wr_ctl(sc, I40E_PFQF_HENA(0), rss_hena);
2251	ixl_wr_ctl(sc, I40E_PFQF_HENA(1), rss_hena >> 32);
2252
2253	error = ixl_set_rss_lut(sc, &lut);
2254	if (error != 0)
2255		return (error);
2256
2257	/* nothing to clena up :( */
2258
2259	return (0);
2260}
2261
2262static int
2263ixl_up(struct ixl_softc *sc)
2264{
2265	struct ifnet *ifp = &sc->sc_ac.ac_if;
2266	struct ifqueue *ifq;
2267	struct ifiqueue *ifiq;
2268	struct ixl_vector *iv;
2269	struct ixl_rx_ring *rxr;
2270	struct ixl_tx_ring *txr;
2271	unsigned int nqueues, i;
2272	uint32_t reg;
2273	int rv = ENOMEM;
2274
2275	nqueues = ixl_nqueues(sc);
2276
2277	rw_enter_write(&sc->sc_cfg_lock);
2278	if (sc->sc_dead) {
2279		rw_exit_write(&sc->sc_cfg_lock);
2280		return (ENXIO);
2281	}
2282
2283	/* allocation is the only thing that can fail, so do it up front */
2284	for (i = 0; i < nqueues; i++) {
2285		rxr = ixl_rxr_alloc(sc, i);
2286		if (rxr == NULL)
2287			goto free;
2288
2289		txr = ixl_txr_alloc(sc, i);
2290		if (txr == NULL) {
2291			ixl_rxr_free(sc, rxr);
2292			goto free;
2293		}
2294
2295		/* wire everything together */
2296		iv = &sc->sc_vectors[i];
2297		iv->iv_rxr = rxr;
2298		iv->iv_txr = txr;
2299
2300		ifq = ifp->if_ifqs[i];
2301		ifq->ifq_softc = txr;
2302		txr->txr_ifq = ifq;
2303
2304		ifiq = ifp->if_iqs[i];
2305		ifiq->ifiq_softc = rxr;
2306		rxr->rxr_ifiq = ifiq;
2307	}
2308
2309	/* XXX wait 50ms from completion of last RX queue disable */
2310
2311	for (i = 0; i < nqueues; i++) {
2312		iv = &sc->sc_vectors[i];
2313		rxr = iv->iv_rxr;
2314		txr = iv->iv_txr;
2315
2316		ixl_txr_qdis(sc, txr, 1);
2317
2318		ixl_rxr_config(sc, rxr);
2319		ixl_txr_config(sc, txr);
2320
2321		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2322		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2323
2324		ixl_wr(sc, rxr->rxr_tail, 0);
2325		ixl_rxfill(sc, rxr);
2326
2327		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2328		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2329		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2330
2331		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2332		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2333		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2334	}
2335
2336	for (i = 0; i < nqueues; i++) {
2337		iv = &sc->sc_vectors[i];
2338		rxr = iv->iv_rxr;
2339		txr = iv->iv_txr;
2340
2341		if (ixl_rxr_enabled(sc, rxr) != 0)
2342			goto down;
2343
2344		if (ixl_txr_enabled(sc, txr) != 0)
2345			goto down;
2346	}
2347
2348	ixl_configure_rss(sc);
2349
2350	SET(ifp->if_flags, IFF_RUNNING);
2351
2352	if (sc->sc_intrmap == NULL) {
2353		ixl_wr(sc, I40E_PFINT_LNKLST0,
2354		    (I40E_INTR_NOTX_QUEUE <<
2355		     I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2356		    (I40E_QUEUE_TYPE_RX <<
2357		     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2358
2359		ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
2360		    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2361		    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2362		    (I40E_INTR_NOTX_RX_QUEUE <<
2363		     I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
2364		    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2365		    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2366		    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2367
2368		ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
2369		    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2370		    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2371		    (I40E_INTR_NOTX_TX_QUEUE <<
2372		     I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
2373		    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2374		    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2375		    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2376	} else {
2377		/* vector 0 has no queues */
2378		ixl_wr(sc, I40E_PFINT_LNKLST0,
2379		    I40E_QUEUE_TYPE_EOL <<
2380		    I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT);
2381
2382		/* queue n is mapped to vector n+1 */
2383		for (i = 0; i < nqueues; i++) {
2384			/* LNKLSTN(i) configures vector i+1 */
2385			ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
2386			    (i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2387			    (I40E_QUEUE_TYPE_RX <<
2388			     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2389			ixl_wr(sc, I40E_QINT_RQCTL(i),
2390			    ((i+1) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2391			    (I40E_ITR_INDEX_RX <<
2392			     I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2393			    (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2394			    (I40E_QUEUE_TYPE_TX <<
2395			     I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2396			    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2397			ixl_wr(sc, I40E_QINT_TQCTL(i),
2398			    ((i+1) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2399			    (I40E_ITR_INDEX_TX <<
2400			     I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2401			    (I40E_QUEUE_TYPE_EOL <<
2402			     I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2403			    (I40E_QUEUE_TYPE_RX <<
2404			     I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2405			    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2406
2407			ixl_wr(sc, I40E_PFINT_ITRN(0, i), 0x7a);
2408			ixl_wr(sc, I40E_PFINT_ITRN(1, i), 0x7a);
2409			ixl_wr(sc, I40E_PFINT_ITRN(2, i), 0);
2410		}
2411	}
2412
2413	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
2414	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
2415	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
2416
2417	rw_exit_write(&sc->sc_cfg_lock);
2418
2419	return (ENETRESET);
2420
2421free:
2422	for (i = 0; i < nqueues; i++) {
2423		iv = &sc->sc_vectors[i];
2424		rxr = iv->iv_rxr;
2425		txr = iv->iv_txr;
2426
2427		if (rxr == NULL) {
2428			/*
2429			 * tx and rx get set at the same time, so if one
2430			 * is NULL, the other is too.
2431			 */
2432			continue;
2433		}
2434
2435		ixl_txr_free(sc, txr);
2436		ixl_rxr_free(sc, rxr);
2437	}
2438	rw_exit_write(&sc->sc_cfg_lock);
2439	return (rv);
2440down:
2441	rw_exit_write(&sc->sc_cfg_lock);
2442	ixl_down(sc);
2443	return (ETIMEDOUT);
2444}
2445
2446static int
2447ixl_iff(struct ixl_softc *sc)
2448{
2449	struct ifnet *ifp = &sc->sc_ac.ac_if;
2450	struct ixl_atq iatq;
2451	struct ixl_aq_desc *iaq;
2452	struct ixl_aq_vsi_promisc_param *param;
2453
2454	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2455		return (0);
2456
2457	memset(&iatq, 0, sizeof(iatq));
2458
2459	iaq = &iatq.iatq_desc;
2460	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2461
2462	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2463	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2464	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2465	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2466		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2467		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2468	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2469		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2470	}
2471	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2472	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2473	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2474	param->seid = sc->sc_seid;
2475
2476	ixl_atq_exec(sc, &iatq, "ixliff");
2477
2478	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2479		return (EIO);
2480
2481	if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
2482		ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
2483		    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2484		ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2485		    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2486		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2487	}
2488	return (0);
2489}
2490
2491static int
2492ixl_down(struct ixl_softc *sc)
2493{
2494	struct ifnet *ifp = &sc->sc_ac.ac_if;
2495	struct ixl_vector *iv;
2496	struct ixl_rx_ring *rxr;
2497	struct ixl_tx_ring *txr;
2498	unsigned int nqueues, i;
2499	uint32_t reg;
2500	int error = 0;
2501
2502	nqueues = ixl_nqueues(sc);
2503
2504	rw_enter_write(&sc->sc_cfg_lock);
2505
2506	CLR(ifp->if_flags, IFF_RUNNING);
2507
2508	NET_UNLOCK();
2509
2510	/* mask interrupts */
2511	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
2512	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2513	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
2514
2515	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
2516	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2517	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
2518
2519	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
2520
2521	/* make sure the no hw generated work is still in flight */
2522	intr_barrier(sc->sc_ihc);
2523	if (sc->sc_intrmap != NULL) {
2524		for (i = 0; i < nqueues; i++) {
2525			iv = &sc->sc_vectors[i];
2526			rxr = iv->iv_rxr;
2527			txr = iv->iv_txr;
2528
2529			ixl_txr_qdis(sc, txr, 0);
2530
2531			ifq_barrier(txr->txr_ifq);
2532
2533			timeout_del_barrier(&rxr->rxr_refill);
2534
2535			intr_barrier(iv->iv_ihc);
2536		}
2537	}
2538
2539	/* XXX wait at least 400 usec for all tx queues in one go */
2540	delay(500);
2541
2542	for (i = 0; i < nqueues; i++) {
2543		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2544		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2545		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2546
2547		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2548		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2549		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2550	}
2551
2552	for (i = 0; i < nqueues; i++) {
2553		iv = &sc->sc_vectors[i];
2554		rxr = iv->iv_rxr;
2555		txr = iv->iv_txr;
2556
2557		if (ixl_txr_disabled(sc, txr) != 0)
2558			goto die;
2559
2560		if (ixl_rxr_disabled(sc, rxr) != 0)
2561			goto die;
2562	}
2563
2564	for (i = 0; i < nqueues; i++) {
2565		iv = &sc->sc_vectors[i];
2566		rxr = iv->iv_rxr;
2567		txr = iv->iv_txr;
2568
2569		ixl_txr_unconfig(sc, txr);
2570		ixl_rxr_unconfig(sc, rxr);
2571
2572		ixl_txr_clean(sc, txr);
2573		ixl_rxr_clean(sc, rxr);
2574
2575		ixl_txr_free(sc, txr);
2576		ixl_rxr_free(sc, rxr);
2577
2578		ifp->if_iqs[i]->ifiq_softc = NULL;
2579		ifp->if_ifqs[i]->ifq_softc =  NULL;
2580	}
2581
2582out:
2583	rw_exit_write(&sc->sc_cfg_lock);
2584	NET_LOCK();
2585	return (error);
2586die:
2587	sc->sc_dead = 1;
2588	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
2589	error = ETIMEDOUT;
2590	goto out;
2591}
2592
2593static struct ixl_tx_ring *
2594ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2595{
2596	struct ixl_tx_ring *txr;
2597	struct ixl_tx_map *maps, *txm;
2598	unsigned int i;
2599
2600	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2601	if (txr == NULL)
2602		return (NULL);
2603
2604	maps = mallocarray(sizeof(*maps),
2605	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2606	if (maps == NULL)
2607		goto free;
2608
2609	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2610	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2611	    IXL_TX_QUEUE_ALIGN) != 0)
2612		goto freemap;
2613
2614	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2615		txm = &maps[i];
2616
2617		if (bus_dmamap_create(sc->sc_dmat,
2618		    MAXMCLBYTES, IXL_TX_PKT_DESCS, IXL_MAX_DMA_SEG_SIZE, 0,
2619		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2620		    &txm->txm_map) != 0)
2621			goto uncreate;
2622
2623		txm->txm_eop = -1;
2624		txm->txm_m = NULL;
2625	}
2626
2627	txr->txr_cons = txr->txr_prod = 0;
2628	txr->txr_maps = maps;
2629
2630	txr->txr_tail = I40E_QTX_TAIL(qid);
2631	txr->txr_qid = qid;
2632
2633	return (txr);
2634
2635uncreate:
2636	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2637		txm = &maps[i];
2638
2639		if (txm->txm_map == NULL)
2640			continue;
2641
2642		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2643	}
2644
2645	ixl_dmamem_free(sc, &txr->txr_mem);
2646freemap:
2647	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2648free:
2649	free(txr, M_DEVBUF, sizeof(*txr));
2650	return (NULL);
2651}
2652
2653static void
2654ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2655{
2656	unsigned int qid;
2657	bus_size_t reg;
2658	uint32_t r;
2659
2660	qid = txr->txr_qid + sc->sc_base_queue;
2661	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2662	qid %= 128;
2663
2664	r = ixl_rd(sc, reg);
2665	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2666	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2667	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2668	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2669	ixl_wr(sc, reg, r);
2670}
2671
2672static void
2673ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2674{
2675	struct ixl_hmc_txq txq;
2676	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2677	void *hmc;
2678
2679	memset(&txq, 0, sizeof(txq));
2680	txq.head = htole16(0);
2681	txq.new_context = 1;
2682	htolem64(&txq.base,
2683	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2684	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2685	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2686	txq.tphrdesc_ena = 0;
2687	txq.tphrpacket_ena = 0;
2688	txq.tphwdesc_ena = 0;
2689	txq.rdylist = data->qs_handle[0];
2690
2691	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2692	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2693	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2694}
2695
2696static void
2697ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2698{
2699	void *hmc;
2700
2701	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2702	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2703}
2704
2705static void
2706ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2707{
2708	struct ixl_tx_map *maps, *txm;
2709	bus_dmamap_t map;
2710	unsigned int i;
2711
2712	maps = txr->txr_maps;
2713	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2714		txm = &maps[i];
2715
2716		if (txm->txm_m == NULL)
2717			continue;
2718
2719		map = txm->txm_map;
2720		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2721		    BUS_DMASYNC_POSTWRITE);
2722		bus_dmamap_unload(sc->sc_dmat, map);
2723
2724		m_freem(txm->txm_m);
2725		txm->txm_m = NULL;
2726	}
2727}
2728
2729static int
2730ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2731{
2732	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2733	uint32_t reg;
2734	int i;
2735
2736	for (i = 0; i < 10; i++) {
2737		reg = ixl_rd(sc, ena);
2738		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2739			return (0);
2740
2741		delaymsec(10);
2742	}
2743
2744	return (ETIMEDOUT);
2745}
2746
2747static int
2748ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2749{
2750	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2751	uint32_t reg;
2752	int i;
2753
2754	for (i = 0; i < 20; i++) {
2755		reg = ixl_rd(sc, ena);
2756		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2757			return (0);
2758
2759		delaymsec(10);
2760	}
2761
2762	return (ETIMEDOUT);
2763}
2764
2765static void
2766ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2767{
2768	struct ixl_tx_map *maps, *txm;
2769	unsigned int i;
2770
2771	maps = txr->txr_maps;
2772	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2773		txm = &maps[i];
2774
2775		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2776	}
2777
2778	ixl_dmamem_free(sc, &txr->txr_mem);
2779	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2780	free(txr, M_DEVBUF, sizeof(*txr));
2781}
2782
2783static inline int
2784ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2785{
2786	int error;
2787
2788	error = bus_dmamap_load_mbuf(dmat, map, m,
2789	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2790	if (error != EFBIG)
2791		return (error);
2792
2793	error = m_defrag(m, M_DONTWAIT);
2794	if (error != 0)
2795		return (error);
2796
2797	return (bus_dmamap_load_mbuf(dmat, map, m,
2798	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2799}
2800
2801static uint64_t
2802ixl_tx_setup_offload(struct mbuf *m0, struct ixl_tx_ring *txr,
2803    unsigned int prod)
2804{
2805	struct ether_extracted ext;
2806	uint64_t hlen;
2807	uint64_t offload = 0;
2808
2809	if (ISSET(m0->m_flags, M_VLANTAG)) {
2810		uint64_t vtag = m0->m_pkthdr.ether_vtag;
2811		offload |= IXL_TX_DESC_CMD_IL2TAG1;
2812		offload |= vtag << IXL_TX_DESC_L2TAG1_SHIFT;
2813	}
2814
2815	if (!ISSET(m0->m_pkthdr.csum_flags,
2816	    M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT|M_TCP_TSO))
2817		return (offload);
2818
2819	ether_extract_headers(m0, &ext);
2820
2821	if (ext.ip4) {
2822		offload |= ISSET(m0->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT) ?
2823		    IXL_TX_DESC_CMD_IIPT_IPV4_CSUM :
2824		    IXL_TX_DESC_CMD_IIPT_IPV4;
2825#ifdef INET6
2826	} else if (ext.ip6) {
2827		offload |= IXL_TX_DESC_CMD_IIPT_IPV6;
2828#endif
2829	} else {
2830		panic("CSUM_OUT set for non-IP packet");
2831		/* NOTREACHED */
2832	}
2833	hlen = ext.iphlen;
2834
2835	offload |= (ETHER_HDR_LEN >> 1) << IXL_TX_DESC_MACLEN_SHIFT;
2836	offload |= (hlen >> 2) << IXL_TX_DESC_IPLEN_SHIFT;
2837
2838	if (ext.tcp && ISSET(m0->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) {
2839		offload |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2840		offload |= (uint64_t)(ext.tcphlen >> 2)
2841		    << IXL_TX_DESC_L4LEN_SHIFT;
2842	} else if (ext.udp && ISSET(m0->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) {
2843		offload |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2844		offload |= (uint64_t)(sizeof(*ext.udp) >> 2)
2845		    << IXL_TX_DESC_L4LEN_SHIFT;
2846	}
2847
2848	if (ISSET(m0->m_pkthdr.csum_flags, M_TCP_TSO)) {
2849		if (ext.tcp && m0->m_pkthdr.ph_mss > 0) {
2850			struct ixl_tx_desc *ring, *txd;
2851			uint64_t cmd = 0, paylen, outlen;
2852
2853			hlen += ext.tcphlen;
2854
2855			/*
2856			 * The MSS should not be set to a lower value than 64
2857			 * or larger than 9668 bytes.
2858			 */
2859			outlen = MIN(9668, MAX(64, m0->m_pkthdr.ph_mss));
2860			paylen = m0->m_pkthdr.len - ETHER_HDR_LEN - hlen;
2861
2862			ring = IXL_DMA_KVA(&txr->txr_mem);
2863			txd = &ring[prod];
2864
2865			cmd |= IXL_TX_DESC_DTYPE_CONTEXT;
2866			cmd |= IXL_TX_CTX_DESC_CMD_TSO;
2867			cmd |= paylen << IXL_TX_CTX_DESC_TLEN_SHIFT;
2868			cmd |= outlen << IXL_TX_CTX_DESC_MSS_SHIFT;
2869
2870			htolem64(&txd->addr, 0);
2871			htolem64(&txd->cmd, cmd);
2872
2873			tcpstat_add(tcps_outpkttso,
2874			    (paylen + outlen - 1) / outlen);
2875		} else
2876			tcpstat_inc(tcps_outbadtso);
2877	}
2878
2879	return (offload);
2880}
2881
2882static void
2883ixl_start(struct ifqueue *ifq)
2884{
2885	struct ifnet *ifp = ifq->ifq_if;
2886	struct ixl_softc *sc = ifp->if_softc;
2887	struct ixl_tx_ring *txr = ifq->ifq_softc;
2888	struct ixl_tx_desc *ring, *txd;
2889	struct ixl_tx_map *txm;
2890	bus_dmamap_t map;
2891	struct mbuf *m;
2892	uint64_t cmd;
2893	unsigned int prod, free, last, i;
2894	unsigned int mask;
2895	int post = 0;
2896	uint64_t offload;
2897#if NBPFILTER > 0
2898	caddr_t if_bpf;
2899#endif
2900
2901	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2902		ifq_purge(ifq);
2903		return;
2904	}
2905
2906	prod = txr->txr_prod;
2907	free = txr->txr_cons;
2908	if (free <= prod)
2909		free += sc->sc_tx_ring_ndescs;
2910	free -= prod;
2911
2912	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2913	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2914
2915	ring = IXL_DMA_KVA(&txr->txr_mem);
2916	mask = sc->sc_tx_ring_ndescs - 1;
2917
2918	for (;;) {
2919		/* We need one extra descriptor for TSO packets. */
2920		if (free <= (IXL_TX_PKT_DESCS + 1)) {
2921			ifq_set_oactive(ifq);
2922			break;
2923		}
2924
2925		m = ifq_dequeue(ifq);
2926		if (m == NULL)
2927			break;
2928
2929		offload = ixl_tx_setup_offload(m, txr, prod);
2930
2931		txm = &txr->txr_maps[prod];
2932		map = txm->txm_map;
2933
2934		if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) {
2935			prod++;
2936			prod &= mask;
2937			free--;
2938		}
2939
2940		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2941			ifq->ifq_errors++;
2942			m_freem(m);
2943			continue;
2944		}
2945
2946		bus_dmamap_sync(sc->sc_dmat, map, 0,
2947		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2948
2949		for (i = 0; i < map->dm_nsegs; i++) {
2950			txd = &ring[prod];
2951
2952			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2953			    IXL_TX_DESC_BSIZE_SHIFT;
2954			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2955			cmd |= offload;
2956
2957			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2958			htolem64(&txd->cmd, cmd);
2959
2960			last = prod;
2961
2962			prod++;
2963			prod &= mask;
2964		}
2965		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2966		htolem64(&txd->cmd, cmd);
2967
2968		txm->txm_m = m;
2969		txm->txm_eop = last;
2970
2971#if NBPFILTER > 0
2972		if_bpf = ifp->if_bpf;
2973		if (if_bpf)
2974			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2975#endif
2976
2977		free -= i;
2978		post = 1;
2979	}
2980
2981	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2982	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2983
2984	if (post) {
2985		txr->txr_prod = prod;
2986		ixl_wr(sc, txr->txr_tail, prod);
2987	}
2988}
2989
2990static int
2991ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2992{
2993	struct ifqueue *ifq = txr->txr_ifq;
2994	struct ixl_tx_desc *ring, *txd;
2995	struct ixl_tx_map *txm;
2996	bus_dmamap_t map;
2997	unsigned int cons, prod, last;
2998	unsigned int mask;
2999	uint64_t dtype;
3000	int done = 0;
3001
3002	prod = txr->txr_prod;
3003	cons = txr->txr_cons;
3004
3005	if (cons == prod)
3006		return (0);
3007
3008	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3009	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
3010
3011	ring = IXL_DMA_KVA(&txr->txr_mem);
3012	mask = sc->sc_tx_ring_ndescs - 1;
3013
3014	do {
3015		txm = &txr->txr_maps[cons];
3016		last = txm->txm_eop;
3017		txd = &ring[last];
3018
3019		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
3020		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
3021			break;
3022
3023		map = txm->txm_map;
3024
3025		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3026		    BUS_DMASYNC_POSTWRITE);
3027		bus_dmamap_unload(sc->sc_dmat, map);
3028		m_freem(txm->txm_m);
3029
3030		txm->txm_m = NULL;
3031		txm->txm_eop = -1;
3032
3033		cons = last + 1;
3034		cons &= mask;
3035
3036		done = 1;
3037	} while (cons != prod);
3038
3039	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3040	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
3041
3042	txr->txr_cons = cons;
3043
3044	//ixl_enable(sc, txr->txr_msix);
3045
3046	if (ifq_is_oactive(ifq))
3047		ifq_restart(ifq);
3048
3049	return (done);
3050}
3051
3052static struct ixl_rx_ring *
3053ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
3054{
3055	struct ixl_rx_ring *rxr;
3056	struct ixl_rx_map *maps, *rxm;
3057	unsigned int i;
3058
3059	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
3060	if (rxr == NULL)
3061		return (NULL);
3062
3063	maps = mallocarray(sizeof(*maps),
3064	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
3065	if (maps == NULL)
3066		goto free;
3067
3068	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
3069	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
3070	    IXL_RX_QUEUE_ALIGN) != 0)
3071		goto freemap;
3072
3073	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3074		rxm = &maps[i];
3075
3076		if (bus_dmamap_create(sc->sc_dmat,
3077		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
3078		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3079		    &rxm->rxm_map) != 0)
3080			goto uncreate;
3081
3082		rxm->rxm_m = NULL;
3083	}
3084
3085	rxr->rxr_sc = sc;
3086	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
3087	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
3088	rxr->rxr_cons = rxr->rxr_prod = 0;
3089	rxr->rxr_m_head = NULL;
3090	rxr->rxr_m_tail = &rxr->rxr_m_head;
3091	rxr->rxr_maps = maps;
3092
3093	rxr->rxr_tail = I40E_QRX_TAIL(qid);
3094	rxr->rxr_qid = qid;
3095
3096	return (rxr);
3097
3098uncreate:
3099	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3100		rxm = &maps[i];
3101
3102		if (rxm->rxm_map == NULL)
3103			continue;
3104
3105		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3106	}
3107
3108	ixl_dmamem_free(sc, &rxr->rxr_mem);
3109freemap:
3110	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3111free:
3112	free(rxr, M_DEVBUF, sizeof(*rxr));
3113	return (NULL);
3114}
3115
3116static void
3117ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3118{
3119	struct ixl_rx_map *maps, *rxm;
3120	bus_dmamap_t map;
3121	unsigned int i;
3122
3123	timeout_del_barrier(&rxr->rxr_refill);
3124
3125	maps = rxr->rxr_maps;
3126	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3127		rxm = &maps[i];
3128
3129		if (rxm->rxm_m == NULL)
3130			continue;
3131
3132		map = rxm->rxm_map;
3133		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3134		    BUS_DMASYNC_POSTWRITE);
3135		bus_dmamap_unload(sc->sc_dmat, map);
3136
3137		m_freem(rxm->rxm_m);
3138		rxm->rxm_m = NULL;
3139	}
3140
3141	m_freem(rxr->rxr_m_head);
3142	rxr->rxr_m_head = NULL;
3143	rxr->rxr_m_tail = &rxr->rxr_m_head;
3144
3145	rxr->rxr_prod = rxr->rxr_cons = 0;
3146}
3147
3148static int
3149ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3150{
3151	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3152	uint32_t reg;
3153	int i;
3154
3155	for (i = 0; i < 10; i++) {
3156		reg = ixl_rd(sc, ena);
3157		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3158			return (0);
3159
3160		delaymsec(10);
3161	}
3162
3163	return (ETIMEDOUT);
3164}
3165
3166static int
3167ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3168{
3169	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3170	uint32_t reg;
3171	int i;
3172
3173	for (i = 0; i < 20; i++) {
3174		reg = ixl_rd(sc, ena);
3175		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3176			return (0);
3177
3178		delaymsec(10);
3179	}
3180
3181	return (ETIMEDOUT);
3182}
3183
3184static void
3185ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3186{
3187	struct ixl_hmc_rxq rxq;
3188	void *hmc;
3189
3190	memset(&rxq, 0, sizeof(rxq));
3191
3192	rxq.head = htole16(0);
3193	htolem64(&rxq.base,
3194	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3195	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
3196	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3197	rxq.hbuff = 0;
3198	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3199	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
3200	rxq.crcstrip = 1;
3201	rxq.l2tsel = IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1;
3202	rxq.showiv = 0;
3203	rxq.rxmax = htole16(IXL_HARDMTU);
3204	rxq.tphrdesc_ena = 0;
3205	rxq.tphwdesc_ena = 0;
3206	rxq.tphdata_ena = 0;
3207	rxq.tphhead_ena = 0;
3208	rxq.lrxqthresh = 0;
3209	rxq.prefena = 1;
3210
3211	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3212	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3213	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
3214}
3215
3216static void
3217ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3218{
3219	void *hmc;
3220
3221	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3222	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3223}
3224
3225static void
3226ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3227{
3228	struct ixl_rx_map *maps, *rxm;
3229	unsigned int i;
3230
3231	maps = rxr->rxr_maps;
3232	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3233		rxm = &maps[i];
3234
3235		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3236	}
3237
3238	ixl_dmamem_free(sc, &rxr->rxr_mem);
3239	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3240	free(rxr, M_DEVBUF, sizeof(*rxr));
3241}
3242
3243static int
3244ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3245{
3246	struct ifiqueue *ifiq = rxr->rxr_ifiq;
3247	struct ifnet *ifp = &sc->sc_ac.ac_if;
3248	struct ixl_rx_wb_desc_16 *ring, *rxd;
3249	struct ixl_rx_map *rxm;
3250	bus_dmamap_t map;
3251	unsigned int cons, prod;
3252	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3253	struct mbuf *m;
3254	uint64_t word;
3255	unsigned int len;
3256	unsigned int mask;
3257	int done = 0;
3258
3259	prod = rxr->rxr_prod;
3260	cons = rxr->rxr_cons;
3261
3262	if (cons == prod)
3263		return (0);
3264
3265	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3266	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3267	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3268
3269	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3270	mask = sc->sc_rx_ring_ndescs - 1;
3271
3272	do {
3273		rxd = &ring[cons];
3274
3275		word = lemtoh64(&rxd->qword1);
3276		if (!ISSET(word, IXL_RX_DESC_DD))
3277			break;
3278
3279		if_rxr_put(&rxr->rxr_acct, 1);
3280
3281		rxm = &rxr->rxr_maps[cons];
3282
3283		map = rxm->rxm_map;
3284		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3285		    BUS_DMASYNC_POSTREAD);
3286		bus_dmamap_unload(sc->sc_dmat, map);
3287
3288		m = rxm->rxm_m;
3289		rxm->rxm_m = NULL;
3290
3291		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3292		m->m_len = len;
3293		m->m_pkthdr.len = 0;
3294
3295		m->m_next = NULL;
3296		*rxr->rxr_m_tail = m;
3297		rxr->rxr_m_tail = &m->m_next;
3298
3299		m = rxr->rxr_m_head;
3300		m->m_pkthdr.len += len;
3301
3302		if (ISSET(word, IXL_RX_DESC_EOP)) {
3303			if (!ISSET(word,
3304			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3305				if ((word & IXL_RX_DESC_FLTSTAT_MASK) ==
3306				    IXL_RX_DESC_FLTSTAT_RSS) {
3307					m->m_pkthdr.ph_flowid =
3308					    lemtoh32(&rxd->filter_status);
3309					m->m_pkthdr.csum_flags |= M_FLOWID;
3310				}
3311
3312				if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3313					m->m_pkthdr.ether_vtag =
3314					    lemtoh16(&rxd->l2tag1);
3315					SET(m->m_flags, M_VLANTAG);
3316				}
3317
3318				ixl_rx_checksum(m, word);
3319				ml_enqueue(&ml, m);
3320			} else {
3321				ifp->if_ierrors++; /* XXX */
3322				m_freem(m);
3323			}
3324
3325			rxr->rxr_m_head = NULL;
3326			rxr->rxr_m_tail = &rxr->rxr_m_head;
3327		}
3328
3329		cons++;
3330		cons &= mask;
3331
3332		done = 1;
3333	} while (cons != prod);
3334
3335	if (done) {
3336		rxr->rxr_cons = cons;
3337		if (ifiq_input(ifiq, &ml))
3338			if_rxr_livelocked(&rxr->rxr_acct);
3339		ixl_rxfill(sc, rxr);
3340	}
3341
3342	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3343	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3344	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3345
3346	return (done);
3347}
3348
3349static void
3350ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3351{
3352	struct ixl_rx_rd_desc_16 *ring, *rxd;
3353	struct ixl_rx_map *rxm;
3354	bus_dmamap_t map;
3355	struct mbuf *m;
3356	unsigned int prod;
3357	unsigned int slots;
3358	unsigned int mask;
3359	int post = 0;
3360
3361	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
3362	if (slots == 0)
3363		return;
3364
3365	prod = rxr->rxr_prod;
3366
3367	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3368	mask = sc->sc_rx_ring_ndescs - 1;
3369
3370	do {
3371		rxm = &rxr->rxr_maps[prod];
3372
3373		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
3374		if (m == NULL)
3375			break;
3376		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
3377		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3378
3379		map = rxm->rxm_map;
3380
3381		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3382		    BUS_DMA_NOWAIT) != 0) {
3383			m_freem(m);
3384			break;
3385		}
3386
3387		rxm->rxm_m = m;
3388
3389		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3390		    BUS_DMASYNC_PREREAD);
3391
3392		rxd = &ring[prod];
3393
3394		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
3395		rxd->haddr = htole64(0);
3396
3397		prod++;
3398		prod &= mask;
3399
3400		post = 1;
3401	} while (--slots);
3402
3403	if_rxr_put(&rxr->rxr_acct, slots);
3404
3405	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
3406		timeout_add(&rxr->rxr_refill, 1);
3407	else if (post) {
3408		rxr->rxr_prod = prod;
3409		ixl_wr(sc, rxr->rxr_tail, prod);
3410	}
3411}
3412
3413void
3414ixl_rxrefill(void *arg)
3415{
3416	struct ixl_rx_ring *rxr = arg;
3417	struct ixl_softc *sc = rxr->rxr_sc;
3418
3419	ixl_rxfill(sc, rxr);
3420}
3421
3422static int
3423ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri)
3424{
3425	struct ifnet *ifp = &sc->sc_ac.ac_if;
3426	struct if_rxring_info *ifr;
3427	struct ixl_rx_ring *ring;
3428	int i, rv;
3429
3430	if (!ISSET(ifp->if_flags, IFF_RUNNING))
3431		return (ENOTTY);
3432
3433	ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP,
3434	    M_WAITOK|M_CANFAIL|M_ZERO);
3435	if (ifr == NULL)
3436		return (ENOMEM);
3437
3438	for (i = 0; i < ixl_nqueues(sc); i++) {
3439		ring = ifp->if_iqs[i]->ifiq_softc;
3440		ifr[i].ifr_size = MCLBYTES;
3441		snprintf(ifr[i].ifr_name, sizeof(ifr[i].ifr_name), "%d", i);
3442		ifr[i].ifr_info = ring->rxr_acct;
3443	}
3444
3445	rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr);
3446	free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr));
3447
3448	return (rv);
3449}
3450
3451static void
3452ixl_rx_checksum(struct mbuf *m, uint64_t word)
3453{
3454	if (!ISSET(word, IXL_RX_DESC_L3L4P))
3455		return;
3456
3457	if (ISSET(word, IXL_RX_DESC_IPE))
3458		return;
3459
3460	m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3461
3462	if (ISSET(word, IXL_RX_DESC_L4E))
3463		return;
3464
3465	m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3466}
3467
3468static int
3469ixl_intr0(void *xsc)
3470{
3471	struct ixl_softc *sc = xsc;
3472	struct ifnet *ifp = &sc->sc_ac.ac_if;
3473	uint32_t icr;
3474	int rv = 0;
3475
3476	ixl_intr_enable(sc);
3477	icr = ixl_rd(sc, I40E_PFINT_ICR0);
3478
3479	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3480		ixl_atq_done(sc);
3481		task_add(systq, &sc->sc_arq_task);
3482		rv = 1;
3483	}
3484
3485	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3486		task_add(systq, &sc->sc_link_state_task);
3487		rv = 1;
3488	}
3489
3490	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3491		struct ixl_vector *iv = sc->sc_vectors;
3492		if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
3493			rv |= ixl_rxeof(sc, iv->iv_rxr);
3494		if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
3495			rv |= ixl_txeof(sc, iv->iv_txr);
3496	}
3497
3498	return (rv);
3499}
3500
3501static int
3502ixl_intr_vector(void *v)
3503{
3504	struct ixl_vector *iv = v;
3505	struct ixl_softc *sc = iv->iv_sc;
3506	struct ifnet *ifp = &sc->sc_ac.ac_if;
3507	int rv = 0;
3508
3509	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3510		rv |= ixl_rxeof(sc, iv->iv_rxr);
3511		rv |= ixl_txeof(sc, iv->iv_txr);
3512	}
3513
3514	ixl_wr(sc, I40E_PFINT_DYN_CTLN(iv->iv_qid),
3515	    I40E_PFINT_DYN_CTLN_INTENA_MASK |
3516	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3517	    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
3518
3519	return (rv);
3520}
3521
3522static void
3523ixl_link_state_update_iaq(struct ixl_softc *sc, void *arg)
3524{
3525	struct ifnet *ifp = &sc->sc_ac.ac_if;
3526	struct ixl_aq_desc *iaq = arg;
3527	uint16_t retval;
3528	int link_state;
3529	int change = 0;
3530
3531	retval = lemtoh16(&iaq->iaq_retval);
3532	if (retval != IXL_AQ_RC_OK) {
3533		printf("%s: LINK STATUS error %u\n", DEVNAME(sc), retval);
3534		return;
3535	}
3536
3537	link_state = ixl_set_link_status(sc, iaq);
3538	mtx_enter(&sc->sc_link_state_mtx);
3539	if (ifp->if_link_state != link_state) {
3540		ifp->if_link_state = link_state;
3541		change = 1;
3542	}
3543	mtx_leave(&sc->sc_link_state_mtx);
3544
3545	if (change)
3546		if_link_state_change(ifp);
3547}
3548
3549static void
3550ixl_link_state_update(void *xsc)
3551{
3552	struct ixl_softc *sc = xsc;
3553	struct ixl_aq_desc *iaq;
3554	struct ixl_aq_link_param *param;
3555
3556	memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3557	iaq = &sc->sc_link_state_atq.iatq_desc;
3558	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3559	param = (struct ixl_aq_link_param *)iaq->iaq_param;
3560	param->notify = IXL_AQ_LINK_NOTIFY;
3561
3562	ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_iaq, iaq);
3563	ixl_atq_post(sc, &sc->sc_link_state_atq);
3564}
3565
3566#if 0
3567static void
3568ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3569{
3570	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
3571	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
3572	    lemtoh16(&iaq->iaq_opcode));
3573	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
3574	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
3575	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
3576	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
3577	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
3578	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
3579}
3580#endif
3581
3582static void
3583ixl_arq(void *xsc)
3584{
3585	struct ixl_softc *sc = xsc;
3586	struct ixl_aq_desc *arq, *iaq;
3587	struct ixl_aq_buf *aqb;
3588	unsigned int cons = sc->sc_arq_cons;
3589	unsigned int prod;
3590	int done = 0;
3591
3592	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3593	    sc->sc_aq_regs->arq_head_mask;
3594
3595	if (cons == prod)
3596		goto done;
3597
3598	arq = IXL_DMA_KVA(&sc->sc_arq);
3599
3600	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3601	    0, IXL_DMA_LEN(&sc->sc_arq),
3602	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3603
3604	do {
3605		iaq = &arq[cons];
3606
3607		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
3608		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
3609		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3610		    BUS_DMASYNC_POSTREAD);
3611
3612		switch (iaq->iaq_opcode) {
3613		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
3614			ixl_link_state_update_iaq(sc, iaq);
3615			break;
3616		}
3617
3618		memset(iaq, 0, sizeof(*iaq));
3619		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3620		if_rxr_put(&sc->sc_arq_ring, 1);
3621
3622		cons++;
3623		cons &= IXL_AQ_MASK;
3624
3625		done = 1;
3626	} while (cons != prod);
3627
3628	if (done && ixl_arq_fill(sc))
3629		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3630
3631	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3632	    0, IXL_DMA_LEN(&sc->sc_arq),
3633	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3634
3635	sc->sc_arq_cons = cons;
3636
3637done:
3638	ixl_intr_enable(sc);
3639}
3640
3641static void
3642ixl_atq_set(struct ixl_atq *iatq,
3643    void (*fn)(struct ixl_softc *, void *), void *arg)
3644{
3645	iatq->iatq_fn = fn;
3646	iatq->iatq_arg = arg;
3647}
3648
3649static void
3650ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3651{
3652	struct ixl_aq_desc *atq, *slot;
3653	unsigned int prod;
3654
3655	mtx_enter(&sc->sc_atq_mtx);
3656
3657	atq = IXL_DMA_KVA(&sc->sc_atq);
3658	prod = sc->sc_atq_prod;
3659	slot = atq + prod;
3660
3661	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3662	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3663
3664	*slot = iatq->iatq_desc;
3665	slot->iaq_cookie = (uint64_t)iatq;
3666
3667	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3668	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3669
3670	prod++;
3671	prod &= IXL_AQ_MASK;
3672	sc->sc_atq_prod = prod;
3673	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3674
3675	mtx_leave(&sc->sc_atq_mtx);
3676}
3677
3678static void
3679ixl_atq_done(struct ixl_softc *sc)
3680{
3681	struct ixl_aq_desc *atq, *slot;
3682	struct ixl_atq *iatq;
3683	unsigned int cons;
3684	unsigned int prod;
3685
3686	mtx_enter(&sc->sc_atq_mtx);
3687
3688	prod = sc->sc_atq_prod;
3689	cons = sc->sc_atq_cons;
3690
3691	if (prod == cons) {
3692		mtx_leave(&sc->sc_atq_mtx);
3693		return;
3694	}
3695
3696	atq = IXL_DMA_KVA(&sc->sc_atq);
3697
3698	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3699	    0, IXL_DMA_LEN(&sc->sc_atq),
3700	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3701
3702	do {
3703		slot = &atq[cons];
3704		if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3705			break;
3706
3707		KASSERT(slot->iaq_cookie != 0);
3708		iatq = (struct ixl_atq *)slot->iaq_cookie;
3709		iatq->iatq_desc = *slot;
3710
3711		memset(slot, 0, sizeof(*slot));
3712
3713		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
3714
3715		cons++;
3716		cons &= IXL_AQ_MASK;
3717	} while (cons != prod);
3718
3719	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3720	    0, IXL_DMA_LEN(&sc->sc_atq),
3721	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3722
3723	sc->sc_atq_cons = cons;
3724
3725	mtx_leave(&sc->sc_atq_mtx);
3726}
3727
3728static void
3729ixl_wakeup(struct ixl_softc *sc, void *arg)
3730{
3731	struct cond *c = arg;
3732
3733	cond_signal(c);
3734}
3735
3736static void
3737ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
3738{
3739	struct cond c = COND_INITIALIZER();
3740
3741	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3742
3743	ixl_atq_set(iatq, ixl_wakeup, &c);
3744	ixl_atq_post(sc, iatq);
3745
3746	cond_wait(&c, wmesg);
3747}
3748
3749static int
3750ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3751{
3752	struct ixl_aq_desc *atq, *slot;
3753	unsigned int prod;
3754	unsigned int t = 0;
3755
3756	mtx_enter(&sc->sc_atq_mtx);
3757
3758	atq = IXL_DMA_KVA(&sc->sc_atq);
3759	prod = sc->sc_atq_prod;
3760	slot = atq + prod;
3761
3762	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3763	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3764
3765	*slot = *iaq;
3766	slot->iaq_flags |= htole16(IXL_AQ_SI);
3767
3768	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3769	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3770
3771	prod++;
3772	prod &= IXL_AQ_MASK;
3773	sc->sc_atq_prod = prod;
3774	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3775
3776	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3777		delaymsec(1);
3778
3779		if (t++ > tm) {
3780			mtx_leave(&sc->sc_atq_mtx);
3781			return (ETIMEDOUT);
3782		}
3783	}
3784
3785	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3786	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3787	*iaq = *slot;
3788	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3789	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3790
3791	sc->sc_atq_cons = prod;
3792
3793	mtx_leave(&sc->sc_atq_mtx);
3794	return (0);
3795}
3796
3797static int
3798ixl_get_version(struct ixl_softc *sc)
3799{
3800	struct ixl_aq_desc iaq;
3801	uint32_t fwbuild, fwver, apiver;
3802
3803	memset(&iaq, 0, sizeof(iaq));
3804	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3805
3806	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3807		return (ETIMEDOUT);
3808	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3809		return (EIO);
3810
3811	fwbuild = lemtoh32(&iaq.iaq_param[1]);
3812	fwver = lemtoh32(&iaq.iaq_param[2]);
3813	apiver = lemtoh32(&iaq.iaq_param[3]);
3814
3815	sc->sc_api_major = apiver & 0xffff;
3816	sc->sc_api_minor = (apiver >> 16) & 0xffff;
3817
3818	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3819	    (uint16_t)(fwver >> 16), fwbuild,
3820	    sc->sc_api_major, sc->sc_api_minor);
3821
3822	return (0);
3823}
3824
3825static int
3826ixl_pxe_clear(struct ixl_softc *sc)
3827{
3828	struct ixl_aq_desc iaq;
3829
3830	memset(&iaq, 0, sizeof(iaq));
3831	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3832	iaq.iaq_param[0] = htole32(0x2);
3833
3834	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3835		printf(", CLEAR PXE MODE timeout\n");
3836		return (-1);
3837	}
3838
3839	switch (iaq.iaq_retval) {
3840	case HTOLE16(IXL_AQ_RC_OK):
3841	case HTOLE16(IXL_AQ_RC_EEXIST):
3842		break;
3843	default:
3844		printf(", CLEAR PXE MODE error\n");
3845		return (-1);
3846	}
3847
3848	return (0);
3849}
3850
3851static int
3852ixl_lldp_shut(struct ixl_softc *sc)
3853{
3854	struct ixl_aq_desc iaq;
3855
3856	memset(&iaq, 0, sizeof(iaq));
3857	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3858	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3859
3860	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3861		printf(", STOP LLDP AGENT timeout\n");
3862		return (-1);
3863	}
3864
3865	switch (iaq.iaq_retval) {
3866	case HTOLE16(IXL_AQ_RC_EMODE):
3867	case HTOLE16(IXL_AQ_RC_EPERM):
3868		/* ignore silently */
3869	default:
3870		break;
3871	}
3872
3873	return (0);
3874}
3875
3876static int
3877ixl_get_mac(struct ixl_softc *sc)
3878{
3879	struct ixl_dmamem idm;
3880	struct ixl_aq_desc iaq;
3881	struct ixl_aq_mac_addresses *addrs;
3882	int rv;
3883
3884#ifdef __sparc64__
3885	if (OF_getprop(PCITAG_NODE(sc->sc_tag), "local-mac-address",
3886	    sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
3887		return (0);
3888#endif
3889
3890	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3891		printf(", unable to allocate mac addresses\n");
3892		return (-1);
3893	}
3894
3895	memset(&iaq, 0, sizeof(iaq));
3896	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3897	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3898	iaq.iaq_datalen = htole16(sizeof(*addrs));
3899	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3900
3901	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3902	    BUS_DMASYNC_PREREAD);
3903
3904	rv = ixl_atq_poll(sc, &iaq, 250);
3905
3906	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3907	    BUS_DMASYNC_POSTREAD);
3908
3909	if (rv != 0) {
3910		printf(", MAC ADDRESS READ timeout\n");
3911		rv = -1;
3912		goto done;
3913	}
3914	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3915		printf(", MAC ADDRESS READ error\n");
3916		rv = -1;
3917		goto done;
3918	}
3919
3920	addrs = IXL_DMA_KVA(&idm);
3921	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3922		printf(", port address is not valid\n");
3923		goto done;
3924	}
3925
3926	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3927	rv = 0;
3928
3929done:
3930	ixl_dmamem_free(sc, &idm);
3931	return (rv);
3932}
3933
3934static int
3935ixl_get_switch_config(struct ixl_softc *sc)
3936{
3937	struct ixl_dmamem idm;
3938	struct ixl_aq_desc iaq;
3939	struct ixl_aq_switch_config *hdr;
3940	struct ixl_aq_switch_config_element *elms, *elm;
3941	unsigned int nelm;
3942	int rv;
3943
3944	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3945		printf("%s: unable to allocate switch config buffer\n",
3946		    DEVNAME(sc));
3947		return (-1);
3948	}
3949
3950	memset(&iaq, 0, sizeof(iaq));
3951	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3952	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3953	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3954	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3955	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3956
3957	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3958	    BUS_DMASYNC_PREREAD);
3959
3960	rv = ixl_atq_poll(sc, &iaq, 250);
3961
3962	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3963	    BUS_DMASYNC_POSTREAD);
3964
3965	if (rv != 0) {
3966		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3967		rv = -1;
3968		goto done;
3969	}
3970	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3971		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3972		rv = -1;
3973		goto done;
3974	}
3975
3976	hdr = IXL_DMA_KVA(&idm);
3977	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3978
3979	nelm = lemtoh16(&hdr->num_reported);
3980	if (nelm < 1) {
3981		printf("%s: no switch config available\n", DEVNAME(sc));
3982		rv = -1;
3983		goto done;
3984	}
3985
3986#if 0
3987	for (i = 0; i < nelm; i++) {
3988		elm = &elms[i];
3989
3990		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3991		    elm->type, elm->revision, lemtoh16(&elm->seid));
3992		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3993		    lemtoh16(&elm->uplink_seid),
3994		    lemtoh16(&elm->downlink_seid));
3995		printf("%s: conntype %x scheduler %04x extra %04x\n",
3996		    DEVNAME(sc), elm->connection_type,
3997		    lemtoh16(&elm->scheduler_id),
3998		    lemtoh16(&elm->element_info));
3999	}
4000#endif
4001
4002	elm = &elms[0];
4003
4004	sc->sc_uplink_seid = elm->uplink_seid;
4005	sc->sc_downlink_seid = elm->downlink_seid;
4006	sc->sc_seid = elm->seid;
4007
4008	if ((sc->sc_uplink_seid == htole16(0)) !=
4009	    (sc->sc_downlink_seid == htole16(0))) {
4010		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
4011		rv = -1;
4012		goto done;
4013	}
4014
4015done:
4016	ixl_dmamem_free(sc, &idm);
4017	return (rv);
4018}
4019
4020static int
4021ixl_phy_mask_ints(struct ixl_softc *sc)
4022{
4023	struct ixl_aq_desc iaq;
4024
4025	memset(&iaq, 0, sizeof(iaq));
4026	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4027	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4028	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4029	      IXL_AQ_PHY_EV_MEDIA_NA));
4030
4031	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4032		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
4033		return (-1);
4034	}
4035	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4036		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
4037		return (-1);
4038	}
4039
4040	return (0);
4041}
4042
4043static int
4044ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4045{
4046	struct ixl_aq_desc iaq;
4047	int rv;
4048
4049	memset(&iaq, 0, sizeof(iaq));
4050	iaq.iaq_flags = htole16(IXL_AQ_BUF |
4051	    (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4052	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4053	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(idm));
4054	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4055	ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4056
4057	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4058	    BUS_DMASYNC_PREREAD);
4059
4060	rv = ixl_atq_poll(sc, &iaq, 250);
4061
4062	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4063	    BUS_DMASYNC_POSTREAD);
4064
4065	if (rv != 0)
4066		return (-1);
4067
4068	return (lemtoh16(&iaq.iaq_retval));
4069}
4070
4071static int
4072ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
4073{
4074	struct ixl_dmamem idm;
4075	struct ixl_aq_phy_abilities *phy;
4076	uint64_t phy_types;
4077	int rv;
4078
4079	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4080		printf("%s: unable to allocate phy abilities buffer\n",
4081		    DEVNAME(sc));
4082		return (-1);
4083	}
4084
4085	rv = ixl_get_phy_abilities(sc, &idm);
4086	switch (rv) {
4087	case -1:
4088		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
4089		goto err;
4090	case IXL_AQ_RC_OK:
4091		break;
4092	case IXL_AQ_RC_EIO:
4093		/* API is too old to handle this command */
4094		phy_types = 0;
4095		goto done;
4096	default:
4097		printf("%s: GET PHY ABILITIES error %u\n", DEVNAME(sc), rv);
4098		goto err;
4099	}
4100
4101	phy = IXL_DMA_KVA(&idm);
4102
4103	phy_types = lemtoh32(&phy->phy_type);
4104	phy_types |= (uint64_t)phy->phy_type_ext << 32;
4105
4106done:
4107	*phy_types_ptr = phy_types;
4108
4109	rv = 0;
4110
4111err:
4112	ixl_dmamem_free(sc, &idm);
4113	return (rv);
4114}
4115
4116/*
4117 * this returns -2 on software/driver failure, -1 for problems
4118 * talking to the hardware, or the sff module type.
4119 */
4120
4121static int
4122ixl_get_module_type(struct ixl_softc *sc)
4123{
4124	struct ixl_dmamem idm;
4125	struct ixl_aq_phy_abilities *phy;
4126	int rv;
4127
4128	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0)
4129		return (-2);
4130
4131	rv = ixl_get_phy_abilities(sc, &idm);
4132	if (rv != IXL_AQ_RC_OK) {
4133		rv = -1;
4134		goto done;
4135	}
4136
4137	phy = IXL_DMA_KVA(&idm);
4138
4139	rv = phy->module_type[0];
4140
4141done:
4142	ixl_dmamem_free(sc, &idm);
4143	return (rv);
4144}
4145
4146static int
4147ixl_get_link_status(struct ixl_softc *sc)
4148{
4149	struct ixl_aq_desc iaq;
4150	struct ixl_aq_link_param *param;
4151
4152	memset(&iaq, 0, sizeof(iaq));
4153	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4154	param = (struct ixl_aq_link_param *)iaq.iaq_param;
4155	param->notify = IXL_AQ_LINK_NOTIFY;
4156
4157	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4158		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
4159		return (-1);
4160	}
4161	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4162		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
4163		return (0);
4164	}
4165
4166	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
4167
4168	return (0);
4169}
4170
4171struct ixl_sff_ops {
4172	int (*open)(struct ixl_softc *sc, struct if_sffpage *, uint8_t *);
4173	int (*get)(struct ixl_softc *sc, struct if_sffpage *, size_t);
4174	int (*close)(struct ixl_softc *sc, struct if_sffpage *, uint8_t);
4175};
4176
4177static int
4178ixl_sfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4179{
4180	int error;
4181
4182	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4183		return (0);
4184
4185	error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4186	if (error != 0)
4187		return (error);
4188	if (*page == sff->sff_page)
4189		return (0);
4190	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, sff->sff_page);
4191	if (error != 0)
4192		return (error);
4193
4194	return (0);
4195}
4196
4197static int
4198ixl_sfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4199{
4200	return (ixl_sff_get_byte(sc, sff->sff_addr, i, &sff->sff_data[i]));
4201}
4202
4203static int
4204ixl_sfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4205{
4206	int error;
4207
4208	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4209		return (0);
4210
4211	if (page == sff->sff_page)
4212		return (0);
4213
4214	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4215	if (error != 0)
4216		return (error);
4217
4218	return (0);
4219}
4220
4221static const struct ixl_sff_ops ixl_sfp_ops = {
4222	ixl_sfp_open,
4223	ixl_sfp_get,
4224	ixl_sfp_close,
4225};
4226
4227static int
4228ixl_qsfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4229{
4230	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4231		return (EIO);
4232
4233	return (0);
4234}
4235
4236static int
4237ixl_qsfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4238{
4239	return (ixl_sff_get_byte(sc, sff->sff_page, i, &sff->sff_data[i]));
4240}
4241
4242static int
4243ixl_qsfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4244{
4245	return (0);
4246}
4247
4248static const struct ixl_sff_ops ixl_qsfp_ops = {
4249	ixl_qsfp_open,
4250	ixl_qsfp_get,
4251	ixl_qsfp_close,
4252};
4253
4254static int
4255ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff)
4256{
4257	const struct ixl_sff_ops *ops;
4258	uint8_t page;
4259	size_t i;
4260	int error;
4261
4262	switch (ixl_get_module_type(sc)) {
4263	case -2:
4264		return (ENOMEM);
4265	case -1:
4266		return (ENXIO);
4267	case IXL_SFF8024_ID_SFP:
4268		ops = &ixl_sfp_ops;
4269		break;
4270	case IXL_SFF8024_ID_QSFP:
4271	case IXL_SFF8024_ID_QSFP_PLUS:
4272	case IXL_SFF8024_ID_QSFP28:
4273		ops = &ixl_qsfp_ops;
4274		break;
4275	default:
4276		return (EOPNOTSUPP);
4277	}
4278
4279	error = (*ops->open)(sc, sff, &page);
4280	if (error != 0)
4281		return (error);
4282
4283	for (i = 0; i < sizeof(sff->sff_data); i++) {
4284		error = (*ops->get)(sc, sff, i);
4285		if (error != 0)
4286			return (error);
4287	}
4288
4289	error = (*ops->close)(sc, sff, page);
4290
4291	return (0);
4292}
4293
4294static int
4295ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p)
4296{
4297	struct ixl_atq iatq;
4298	struct ixl_aq_desc *iaq;
4299	struct ixl_aq_phy_reg_access *param;
4300
4301	memset(&iatq, 0, sizeof(iatq));
4302	iaq = &iatq.iatq_desc;
4303	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER);
4304	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4305	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4306	param->dev_addr = dev;
4307	htolem32(&param->reg, reg);
4308
4309	ixl_atq_exec(sc, &iatq, "ixlsffget");
4310
4311	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4312		printf("%s: %s(dev 0x%02x, reg 0x%02x) -> %04x\n",
4313		    DEVNAME(sc), __func__,
4314		    dev, reg, lemtoh16(&iaq->iaq_retval));
4315	}
4316
4317	switch (iaq->iaq_retval) {
4318	case htole16(IXL_AQ_RC_OK):
4319		break;
4320	case htole16(IXL_AQ_RC_EBUSY):
4321		return (EBUSY);
4322	case htole16(IXL_AQ_RC_ESRCH):
4323		return (ENODEV);
4324	case htole16(IXL_AQ_RC_EIO):
4325	case htole16(IXL_AQ_RC_EINVAL):
4326	default:
4327		return (EIO);
4328	}
4329
4330	*p = lemtoh32(&param->val);
4331
4332	return (0);
4333}
4334
4335static int
4336ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v)
4337{
4338	struct ixl_atq iatq;
4339	struct ixl_aq_desc *iaq;
4340	struct ixl_aq_phy_reg_access *param;
4341
4342	memset(&iatq, 0, sizeof(iatq));
4343	iaq = &iatq.iatq_desc;
4344	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER);
4345	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4346	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4347	param->dev_addr = dev;
4348	htolem32(&param->reg, reg);
4349	htolem32(&param->val, v);
4350
4351	ixl_atq_exec(sc, &iatq, "ixlsffset");
4352
4353	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4354		printf("%s: %s(dev 0x%02x, reg 0x%02x, val 0x%02x) -> %04x\n",
4355		    DEVNAME(sc), __func__,
4356		    dev, reg, v, lemtoh16(&iaq->iaq_retval));
4357	}
4358
4359	switch (iaq->iaq_retval) {
4360	case htole16(IXL_AQ_RC_OK):
4361		break;
4362	case htole16(IXL_AQ_RC_EBUSY):
4363		return (EBUSY);
4364	case htole16(IXL_AQ_RC_ESRCH):
4365		return (ENODEV);
4366	case htole16(IXL_AQ_RC_EIO):
4367	case htole16(IXL_AQ_RC_EINVAL):
4368	default:
4369		return (EIO);
4370	}
4371
4372	return (0);
4373}
4374
4375static int
4376ixl_get_vsi(struct ixl_softc *sc)
4377{
4378	struct ixl_dmamem *vsi = &sc->sc_scratch;
4379	struct ixl_aq_desc iaq;
4380	struct ixl_aq_vsi_param *param;
4381	struct ixl_aq_vsi_reply *reply;
4382	int rv;
4383
4384	/* grumble, vsi info isn't "known" at compile time */
4385
4386	memset(&iaq, 0, sizeof(iaq));
4387	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
4388	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4389	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4390	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4391	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4392
4393	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4394	param->uplink_seid = sc->sc_seid;
4395
4396	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4397	    BUS_DMASYNC_PREREAD);
4398
4399	rv = ixl_atq_poll(sc, &iaq, 250);
4400
4401	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4402	    BUS_DMASYNC_POSTREAD);
4403
4404	if (rv != 0) {
4405		printf("%s: GET VSI timeout\n", DEVNAME(sc));
4406		return (-1);
4407	}
4408
4409	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4410		printf("%s: GET VSI error %u\n", DEVNAME(sc),
4411		    lemtoh16(&iaq.iaq_retval));
4412		return (-1);
4413	}
4414
4415	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4416	sc->sc_vsi_number = reply->vsi_number;
4417
4418	return (0);
4419}
4420
4421static int
4422ixl_set_vsi(struct ixl_softc *sc)
4423{
4424	struct ixl_dmamem *vsi = &sc->sc_scratch;
4425	struct ixl_aq_desc iaq;
4426	struct ixl_aq_vsi_param *param;
4427	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4428	int rv;
4429
4430	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4431	    IXL_AQ_VSI_VALID_VLAN);
4432
4433	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4434	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4435	data->queue_mapping[0] = htole16(0);
4436	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4437	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4438
4439	CLR(data->port_vlan_flags,
4440	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
4441	SET(data->port_vlan_flags, htole16(IXL_AQ_VSI_PVLAN_MODE_ALL |
4442	    IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH));
4443
4444	/* grumble, vsi info isn't "known" at compile time */
4445
4446	memset(&iaq, 0, sizeof(iaq));
4447	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
4448	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4449	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4450	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4451	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4452
4453	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4454	param->uplink_seid = sc->sc_seid;
4455
4456	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4457	    BUS_DMASYNC_PREWRITE);
4458
4459	rv = ixl_atq_poll(sc, &iaq, 250);
4460
4461	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4462	    BUS_DMASYNC_POSTWRITE);
4463
4464	if (rv != 0) {
4465		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
4466		return (-1);
4467	}
4468
4469	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4470		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
4471		    lemtoh16(&iaq.iaq_retval));
4472		return (-1);
4473	}
4474
4475	return (0);
4476}
4477
4478static const struct ixl_phy_type *
4479ixl_search_phy_type(uint8_t phy_type)
4480{
4481	const struct ixl_phy_type *itype;
4482	uint64_t mask;
4483	unsigned int i;
4484
4485	if (phy_type >= 64)
4486		return (NULL);
4487
4488	mask = 1ULL << phy_type;
4489
4490	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
4491		itype = &ixl_phy_type_map[i];
4492
4493		if (ISSET(itype->phy_type, mask))
4494			return (itype);
4495	}
4496
4497	return (NULL);
4498}
4499
4500static uint64_t
4501ixl_search_link_speed(uint8_t link_speed)
4502{
4503	const struct ixl_speed_type *type;
4504	unsigned int i;
4505
4506	for (i = 0; i < nitems(ixl_speed_type_map); i++) {
4507		type = &ixl_speed_type_map[i];
4508
4509		if (ISSET(type->dev_speed, link_speed))
4510			return (type->net_speed);
4511	}
4512
4513	return (0);
4514}
4515
4516static int
4517ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
4518{
4519	const struct ixl_aq_link_status *status;
4520	const struct ixl_phy_type *itype;
4521	uint64_t ifm_active = IFM_ETHER;
4522	uint64_t ifm_status = IFM_AVALID;
4523	int link_state = LINK_STATE_DOWN;
4524	uint64_t baudrate = 0;
4525
4526	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
4527	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
4528		goto done;
4529
4530	ifm_active |= IFM_FDX;
4531	ifm_status |= IFM_ACTIVE;
4532	link_state = LINK_STATE_FULL_DUPLEX;
4533
4534	itype = ixl_search_phy_type(status->phy_type);
4535	if (itype != NULL)
4536		ifm_active |= itype->ifm_type;
4537
4538	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
4539		ifm_active |= IFM_ETH_TXPAUSE;
4540	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
4541		ifm_active |= IFM_ETH_RXPAUSE;
4542
4543	baudrate = ixl_search_link_speed(status->link_speed);
4544
4545done:
4546	mtx_enter(&sc->sc_link_state_mtx);
4547	sc->sc_media_active = ifm_active;
4548	sc->sc_media_status = ifm_status;
4549	sc->sc_ac.ac_if.if_baudrate = baudrate;
4550	mtx_leave(&sc->sc_link_state_mtx);
4551
4552	return (link_state);
4553}
4554
4555static int
4556ixl_restart_an(struct ixl_softc *sc)
4557{
4558	struct ixl_aq_desc iaq;
4559
4560	memset(&iaq, 0, sizeof(iaq));
4561	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4562	iaq.iaq_param[0] =
4563	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4564
4565	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4566		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
4567		return (-1);
4568	}
4569	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4570		printf("%s: RESTART AN error\n", DEVNAME(sc));
4571		return (-1);
4572	}
4573
4574	return (0);
4575}
4576
4577static int
4578ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4579{
4580	struct ixl_aq_desc iaq;
4581	struct ixl_aq_add_macvlan *param;
4582	struct ixl_aq_add_macvlan_elem *elem;
4583
4584	memset(&iaq, 0, sizeof(iaq));
4585	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4586	iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4587	iaq.iaq_datalen = htole16(sizeof(*elem));
4588	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4589
4590	param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4591	param->num_addrs = htole16(1);
4592	param->seid0 = htole16(0x8000) | sc->sc_seid;
4593	param->seid1 = 0;
4594	param->seid2 = 0;
4595
4596	elem = IXL_DMA_KVA(&sc->sc_scratch);
4597	memset(elem, 0, sizeof(*elem));
4598	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4599	elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4600	elem->vlan = htole16(vlan);
4601
4602	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4603		printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc));
4604		return (IXL_AQ_RC_EINVAL);
4605	}
4606
4607	return letoh16(iaq.iaq_retval);
4608}
4609
4610static int
4611ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4612{
4613	struct ixl_aq_desc iaq;
4614	struct ixl_aq_remove_macvlan *param;
4615	struct ixl_aq_remove_macvlan_elem *elem;
4616
4617	memset(&iaq, 0, sizeof(iaq));
4618	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4619	iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4620	iaq.iaq_datalen = htole16(sizeof(*elem));
4621	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4622
4623	param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4624	param->num_addrs = htole16(1);
4625	param->seid0 = htole16(0x8000) | sc->sc_seid;
4626	param->seid1 = 0;
4627	param->seid2 = 0;
4628
4629	elem = IXL_DMA_KVA(&sc->sc_scratch);
4630	memset(elem, 0, sizeof(*elem));
4631	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4632	elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4633	elem->vlan = htole16(vlan);
4634
4635	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4636		printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc));
4637		return (IXL_AQ_RC_EINVAL);
4638	}
4639
4640	return letoh16(iaq.iaq_retval);
4641}
4642
4643static int
4644ixl_hmc(struct ixl_softc *sc)
4645{
4646	struct {
4647		uint32_t   count;
4648		uint32_t   minsize;
4649		bus_size_t maxcnt;
4650		bus_size_t setoff;
4651		bus_size_t setcnt;
4652	} regs[] = {
4653		{
4654			0,
4655			IXL_HMC_TXQ_MINSIZE,
4656			I40E_GLHMC_LANTXOBJSZ,
4657			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4658			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4659		},
4660		{
4661			0,
4662			IXL_HMC_RXQ_MINSIZE,
4663			I40E_GLHMC_LANRXOBJSZ,
4664			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4665			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4666		},
4667		{
4668			0,
4669			0,
4670			I40E_GLHMC_FCOEMAX,
4671			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4672			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4673		},
4674		{
4675			0,
4676			0,
4677			I40E_GLHMC_FCOEFMAX,
4678			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4679			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4680		},
4681	};
4682	struct ixl_hmc_entry *e;
4683	uint64_t size, dva;
4684	uint8_t *kva;
4685	uint64_t *sdpage;
4686	unsigned int i;
4687	int npages, tables;
4688
4689	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
4690
4691	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4692	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
4693
4694	size = 0;
4695	for (i = 0; i < nitems(regs); i++) {
4696		e = &sc->sc_hmc_entries[i];
4697
4698		e->hmc_count = regs[i].count;
4699		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
4700		e->hmc_base = size;
4701
4702		if ((e->hmc_size * 8) < regs[i].minsize) {
4703			printf("%s: kernel hmc entry is too big\n",
4704			    DEVNAME(sc));
4705			return (-1);
4706		}
4707
4708		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4709	}
4710	size = roundup(size, IXL_HMC_PGSIZE);
4711	npages = size / IXL_HMC_PGSIZE;
4712
4713	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4714
4715	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4716		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
4717		return (-1);
4718	}
4719
4720	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4721	    IXL_HMC_PGSIZE) != 0) {
4722		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
4723		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4724		return (-1);
4725	}
4726
4727	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4728	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4729
4730	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4731	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4732	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4733
4734	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4735	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4736	for (i = 0; i < npages; i++) {
4737		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
4738
4739		dva += IXL_HMC_PGSIZE;
4740	}
4741
4742	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4743	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4744	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4745
4746	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4747	for (i = 0; i < tables; i++) {
4748		uint32_t count;
4749
4750		KASSERT(npages >= 0);
4751
4752		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
4753
4754		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4755		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4756		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4757		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4758		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4759		ixl_wr(sc, I40E_PFHMC_SDCMD,
4760		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4761
4762		npages -= IXL_HMC_PGS;
4763		dva += IXL_HMC_PGSIZE;
4764	}
4765
4766	for (i = 0; i < nitems(regs); i++) {
4767		e = &sc->sc_hmc_entries[i];
4768
4769		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4770		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4771	}
4772
4773	return (0);
4774}
4775
4776static void
4777ixl_hmc_free(struct ixl_softc *sc)
4778{
4779	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4780	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4781}
4782
4783static void
4784ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4785    unsigned int npacking)
4786{
4787	uint8_t *dst = d;
4788	const uint8_t *src = s;
4789	unsigned int i;
4790
4791	for (i = 0; i < npacking; i++) {
4792		const struct ixl_hmc_pack *pack = &packing[i];
4793		unsigned int offset = pack->lsb / 8;
4794		unsigned int align = pack->lsb % 8;
4795		const uint8_t *in = src + pack->offset;
4796		uint8_t *out = dst + offset;
4797		int width = pack->width;
4798		unsigned int inbits = 0;
4799
4800		if (align) {
4801			inbits = (*in++) << align;
4802			*out++ |= (inbits & 0xff);
4803			inbits >>= 8;
4804
4805			width -= 8 - align;
4806		}
4807
4808		while (width >= 8) {
4809			inbits |= (*in++) << align;
4810			*out++ = (inbits & 0xff);
4811			inbits >>= 8;
4812
4813			width -= 8;
4814		}
4815
4816		if (width > 0) {
4817			inbits |= (*in) << align;
4818			*out |= (inbits & ((1 << width) - 1));
4819		}
4820	}
4821}
4822
4823static struct ixl_aq_buf *
4824ixl_aqb_alloc(struct ixl_softc *sc)
4825{
4826	struct ixl_aq_buf *aqb;
4827
4828	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4829	if (aqb == NULL)
4830		return (NULL);
4831
4832	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
4833	if (aqb->aqb_data == NULL)
4834		goto free;
4835
4836	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
4837	    IXL_AQ_BUFLEN, 0,
4838	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4839	    &aqb->aqb_map) != 0)
4840		goto dma_free;
4841
4842	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4843	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
4844		goto destroy;
4845
4846	return (aqb);
4847
4848destroy:
4849	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4850dma_free:
4851	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4852free:
4853	free(aqb, M_DEVBUF, sizeof(*aqb));
4854
4855	return (NULL);
4856}
4857
4858static void
4859ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4860{
4861	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4862	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4863	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4864	free(aqb, M_DEVBUF, sizeof(*aqb));
4865}
4866
4867static int
4868ixl_arq_fill(struct ixl_softc *sc)
4869{
4870	struct ixl_aq_buf *aqb;
4871	struct ixl_aq_desc *arq, *iaq;
4872	unsigned int prod = sc->sc_arq_prod;
4873	unsigned int n;
4874	int post = 0;
4875
4876	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
4877	arq = IXL_DMA_KVA(&sc->sc_arq);
4878
4879	while (n > 0) {
4880		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4881		if (aqb != NULL)
4882			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
4883		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
4884			break;
4885
4886		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
4887
4888		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4889		    BUS_DMASYNC_PREREAD);
4890
4891		iaq = &arq[prod];
4892		iaq->iaq_flags = htole16(IXL_AQ_BUF |
4893		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4894		iaq->iaq_opcode = 0;
4895		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
4896		iaq->iaq_retval = 0;
4897		iaq->iaq_cookie = 0;
4898		iaq->iaq_param[0] = 0;
4899		iaq->iaq_param[1] = 0;
4900		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4901
4902		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
4903
4904		prod++;
4905		prod &= IXL_AQ_MASK;
4906
4907		post = 1;
4908
4909		n--;
4910	}
4911
4912	if_rxr_put(&sc->sc_arq_ring, n);
4913	sc->sc_arq_prod = prod;
4914
4915	return (post);
4916}
4917
4918static void
4919ixl_arq_unfill(struct ixl_softc *sc)
4920{
4921	struct ixl_aq_buf *aqb;
4922
4923	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
4924		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
4925
4926		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4927		    BUS_DMASYNC_POSTREAD);
4928		ixl_aqb_free(sc, aqb);
4929	}
4930}
4931
4932static void
4933ixl_clear_hw(struct ixl_softc *sc)
4934{
4935	uint32_t num_queues, base_queue;
4936	uint32_t num_pf_int;
4937	uint32_t num_vf_int;
4938	uint32_t num_vfs;
4939	uint32_t i, j;
4940	uint32_t val;
4941
4942	/* get number of interrupts, queues, and vfs */
4943	val = ixl_rd(sc, I40E_GLPCI_CNF2);
4944	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4945	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4946	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4947	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4948
4949	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4950	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4951	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4952	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4953	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4954	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4955		num_queues = (j - base_queue) + 1;
4956	else
4957		num_queues = 0;
4958
4959	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4960	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4961	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4962	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4963	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4964	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4965		num_vfs = (j - i) + 1;
4966	else
4967		num_vfs = 0;
4968
4969	/* stop all the interrupts */
4970	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4971	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4972	for (i = 0; i < num_pf_int - 2; i++)
4973		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4974
4975	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4976	val = I40E_QUEUE_TYPE_EOL << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4977	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4978	for (i = 0; i < num_pf_int - 2; i++)
4979		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4980	val = I40E_QUEUE_TYPE_EOL << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4981	for (i = 0; i < num_vfs; i++)
4982		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4983	for (i = 0; i < num_vf_int - 2; i++)
4984		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4985
4986	/* warn the HW of the coming Tx disables */
4987	for (i = 0; i < num_queues; i++) {
4988		uint32_t abs_queue_idx = base_queue + i;
4989		uint32_t reg_block = 0;
4990
4991		if (abs_queue_idx >= 128) {
4992			reg_block = abs_queue_idx / 128;
4993			abs_queue_idx %= 128;
4994		}
4995
4996		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4997		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4998		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4999		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5000
5001		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5002	}
5003	delaymsec(400);
5004
5005	/* stop all the queues */
5006	for (i = 0; i < num_queues; i++) {
5007		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5008		ixl_wr(sc, I40E_QTX_ENA(i), 0);
5009		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5010		ixl_wr(sc, I40E_QRX_ENA(i), 0);
5011	}
5012
5013	/* short wait for all queue disables to settle */
5014	delaymsec(50);
5015}
5016
5017static int
5018ixl_pf_reset(struct ixl_softc *sc)
5019{
5020	uint32_t cnt = 0;
5021	uint32_t cnt1 = 0;
5022	uint32_t reg = 0;
5023	uint32_t grst_del;
5024
5025	/*
5026	 * Poll for Global Reset steady state in case of recent GRST.
5027	 * The grst delay value is in 100ms units, and we'll wait a
5028	 * couple counts longer to be sure we don't just miss the end.
5029	 */
5030	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5031	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5032	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5033	grst_del += 10;
5034
5035	for (cnt = 0; cnt < grst_del; cnt++) {
5036		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5037		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5038			break;
5039		delaymsec(100);
5040	}
5041	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5042		printf(", Global reset polling failed to complete\n");
5043		return (-1);
5044	}
5045
5046	/* Now Wait for the FW to be ready */
5047	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5048		reg = ixl_rd(sc, I40E_GLNVM_ULD);
5049		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5050		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5051		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5052		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5053			break;
5054
5055		delaymsec(10);
5056	}
5057	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5058	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5059		printf(", wait for FW Reset complete timed out "
5060		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5061		return (-1);
5062	}
5063
5064	/*
5065	 * If there was a Global Reset in progress when we got here,
5066	 * we don't need to do the PF Reset
5067	 */
5068	if (cnt == 0) {
5069		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5070		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5071		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5072			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5073			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5074				break;
5075			delaymsec(1);
5076		}
5077		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5078			printf(", PF reset polling failed to complete"
5079			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5080			return (-1);
5081		}
5082	}
5083
5084	return (0);
5085}
5086
5087static uint32_t
5088ixl_710_rd_ctl(struct ixl_softc *sc, uint32_t r)
5089{
5090	struct ixl_atq iatq;
5091	struct ixl_aq_desc *iaq;
5092	uint16_t retval;
5093
5094	memset(&iatq, 0, sizeof(iatq));
5095	iaq = &iatq.iatq_desc;
5096	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_READ);
5097	htolem32(&iaq->iaq_param[1], r);
5098
5099	ixl_atq_exec(sc, &iatq, "ixl710rd");
5100
5101	retval = lemtoh16(&iaq->iaq_retval);
5102	if (retval != IXL_AQ_RC_OK) {
5103		printf("%s: %s failed (%u)\n", DEVNAME(sc), __func__, retval);
5104		return (~0U);
5105	}
5106
5107	return (lemtoh32(&iaq->iaq_param[3]));
5108}
5109
5110static void
5111ixl_710_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5112{
5113	struct ixl_atq iatq;
5114	struct ixl_aq_desc *iaq;
5115	uint16_t retval;
5116
5117	memset(&iatq, 0, sizeof(iatq));
5118	iaq = &iatq.iatq_desc;
5119	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_WRITE);
5120	htolem32(&iaq->iaq_param[1], r);
5121	htolem32(&iaq->iaq_param[3], v);
5122
5123	ixl_atq_exec(sc, &iatq, "ixl710wr");
5124
5125	retval = lemtoh16(&iaq->iaq_retval);
5126	if (retval != IXL_AQ_RC_OK) {
5127		printf("%s: %s %08x=%08x failed (%u)\n",
5128		    DEVNAME(sc), __func__, r, v, retval);
5129	}
5130}
5131
5132static int
5133ixl_710_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5134{
5135	unsigned int i;
5136
5137	for (i = 0; i < nitems(rsskey->key); i++)
5138		ixl_wr_ctl(sc, I40E_PFQF_HKEY(i), rsskey->key[i]);
5139
5140	return (0);
5141}
5142
5143static int
5144ixl_710_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5145{
5146	unsigned int i;
5147
5148	for (i = 0; i < nitems(lut->entries); i++)
5149		ixl_wr(sc, I40E_PFQF_HLUT(i), lut->entries[i]);
5150
5151	return (0);
5152}
5153
5154static uint32_t
5155ixl_722_rd_ctl(struct ixl_softc *sc, uint32_t r)
5156{
5157	return (ixl_rd(sc, r));
5158}
5159
5160static void
5161ixl_722_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5162{
5163	ixl_wr(sc, r, v);
5164}
5165
5166static int
5167ixl_722_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5168{
5169	/* XXX */
5170
5171	return (0);
5172}
5173
5174static int
5175ixl_722_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5176{
5177	/* XXX */
5178
5179	return (0);
5180}
5181
5182static int
5183ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5184    bus_size_t size, u_int align)
5185{
5186	ixm->ixm_size = size;
5187
5188	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5189	    ixm->ixm_size, 0,
5190	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
5191	    &ixm->ixm_map) != 0)
5192		return (1);
5193	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5194	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5195	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
5196		goto destroy;
5197	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5198	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5199		goto free;
5200	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5201	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5202		goto unmap;
5203
5204	return (0);
5205unmap:
5206	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5207free:
5208	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5209destroy:
5210	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5211	return (1);
5212}
5213
5214static void
5215ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5216{
5217	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5218	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5219	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5220	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5221}
5222
5223#if NKSTAT > 0
5224
5225CTASSERT(KSTAT_KV_U_NONE <= 0xffU);
5226CTASSERT(KSTAT_KV_U_PACKETS <= 0xffU);
5227CTASSERT(KSTAT_KV_U_BYTES <= 0xffU);
5228
5229struct ixl_counter {
5230	const char		*c_name;
5231	uint32_t		 c_base;
5232	uint8_t			 c_width;
5233	uint8_t			 c_type;
5234};
5235
5236const struct ixl_counter ixl_port_counters[] = {
5237	/* GORC */
5238	{ "rx bytes",		0x00300000, 48, KSTAT_KV_U_BYTES },
5239	/* MLFC */
5240	{ "mac local errs",	0x00300020, 32, KSTAT_KV_U_NONE },
5241	/* MRFC */
5242	{ "mac remote errs",	0x00300040, 32, KSTAT_KV_U_NONE },
5243	/* MSPDC */
5244	{ "mac short",		0x00300060, 32, KSTAT_KV_U_PACKETS },
5245	/* CRCERRS */
5246	{ "crc errs",		0x00300080, 32, KSTAT_KV_U_PACKETS },
5247	/* RLEC */
5248	{ "rx len errs",	0x003000a0, 32, KSTAT_KV_U_PACKETS },
5249	/* ERRBC */
5250	{ "byte errs",		0x003000c0, 32, KSTAT_KV_U_PACKETS },
5251	/* ILLERRC */
5252	{ "illegal byte",	0x003000d0, 32, KSTAT_KV_U_PACKETS },
5253	/* RUC */
5254	{ "rx undersize",	0x00300100, 32, KSTAT_KV_U_PACKETS },
5255	/* ROC */
5256	{ "rx oversize",	0x00300120, 32, KSTAT_KV_U_PACKETS },
5257	/* LXONRXCNT */
5258	{ "rx link xon",	0x00300140, 32, KSTAT_KV_U_PACKETS },
5259	/* LXOFFRXCNT */
5260	{ "rx link xoff",	0x00300160, 32, KSTAT_KV_U_PACKETS },
5261
5262	/* Priority XON Received Count */
5263	/* Priority XOFF Received Count */
5264	/* Priority XON to XOFF Count */
5265
5266	/* PRC64 */
5267	{ "rx 64B",		0x00300480, 48, KSTAT_KV_U_PACKETS },
5268	/* PRC127 */
5269	{ "rx 65-127B",		0x003004A0, 48, KSTAT_KV_U_PACKETS },
5270	/* PRC255 */
5271	{ "rx 128-255B",	0x003004C0, 48, KSTAT_KV_U_PACKETS },
5272	/* PRC511 */
5273	{ "rx 256-511B",	0x003004E0, 48, KSTAT_KV_U_PACKETS },
5274	/* PRC1023 */
5275	{ "rx 512-1023B",	0x00300500, 48, KSTAT_KV_U_PACKETS },
5276	/* PRC1522 */
5277	{ "rx 1024-1522B",	0x00300520, 48, KSTAT_KV_U_PACKETS },
5278	/* PRC9522 */
5279	{ "rx 1523-9522B",	0x00300540, 48, KSTAT_KV_U_PACKETS },
5280	/* ROC */
5281	{ "rx fragment",	0x00300560, 32, KSTAT_KV_U_PACKETS },
5282	/* RJC */
5283	{ "rx jabber",		0x00300580, 32, KSTAT_KV_U_PACKETS },
5284	/* UPRC */
5285	{ "rx ucasts",		0x003005a0, 48, KSTAT_KV_U_PACKETS },
5286	/* MPRC */
5287	{ "rx mcasts",		0x003005c0, 48, KSTAT_KV_U_PACKETS },
5288	/* BPRC */
5289	{ "rx bcasts",		0x003005e0, 48, KSTAT_KV_U_PACKETS },
5290	/* RDPC */
5291	{ "rx discards",	0x00300600, 32, KSTAT_KV_U_PACKETS },
5292	/* LDPC */
5293	{ "rx lo discards",	0x00300620, 32, KSTAT_KV_U_PACKETS },
5294	/* RUPP */
5295	{ "rx no dest",		0x00300660, 32, KSTAT_KV_U_PACKETS },
5296
5297	/* GOTC */
5298	{ "tx bytes",		0x00300680, 48, KSTAT_KV_U_BYTES },
5299	/* PTC64 */
5300	{ "tx 64B",		0x003006A0, 48, KSTAT_KV_U_PACKETS },
5301	/* PTC127 */
5302	{ "tx 65-127B",		0x003006C0, 48, KSTAT_KV_U_PACKETS },
5303	/* PTC255 */
5304	{ "tx 128-255B",	0x003006E0, 48, KSTAT_KV_U_PACKETS },
5305	/* PTC511 */
5306	{ "tx 256-511B",	0x00300700, 48, KSTAT_KV_U_PACKETS },
5307	/* PTC1023 */
5308	{ "tx 512-1023B",	0x00300720, 48, KSTAT_KV_U_PACKETS },
5309	/* PTC1522 */
5310	{ "tx 1024-1522B",	0x00300740, 48, KSTAT_KV_U_PACKETS },
5311	/* PTC9522 */
5312	{ "tx 1523-9522B",	0x00300760, 48, KSTAT_KV_U_PACKETS },
5313
5314	/* Priority XON Transmitted Count */
5315	/* Priority XOFF Transmitted Count */
5316
5317	/* LXONTXC */
5318	{ "tx link xon",	0x00300980, 48, KSTAT_KV_U_PACKETS },
5319	/* LXOFFTXC */
5320	{ "tx link xoff",	0x003009a0, 48, KSTAT_KV_U_PACKETS },
5321	/* UPTC */
5322	{ "tx ucasts",		0x003009c0, 48, KSTAT_KV_U_PACKETS },
5323	/* MPTC */
5324	{ "tx mcasts",		0x003009e0, 48, KSTAT_KV_U_PACKETS },
5325	/* BPTC */
5326	{ "tx bcasts",		0x00300a00, 48, KSTAT_KV_U_PACKETS },
5327	/* TDOLD */
5328	{ "tx link down",	0x00300a20, 48, KSTAT_KV_U_PACKETS },
5329};
5330
5331const struct ixl_counter ixl_vsi_counters[] = {
5332	/* VSI RDPC */
5333	{ "rx discards",	0x00310000, 32, KSTAT_KV_U_PACKETS },
5334	/* VSI GOTC */
5335	{ "tx bytes",		0x00328000, 48, KSTAT_KV_U_BYTES },
5336	/* VSI UPTC */
5337	{ "tx ucasts",		0x0033c000, 48, KSTAT_KV_U_PACKETS },
5338	/* VSI MPTC */
5339	{ "tx mcasts",		0x0033cc00, 48, KSTAT_KV_U_PACKETS },
5340	/* VSI BPTC */
5341	{ "tx bcasts",		0x0033d800, 48, KSTAT_KV_U_PACKETS },
5342	/* VSI TEPC */
5343	{ "tx errs",		0x00344000, 48, KSTAT_KV_U_PACKETS },
5344	/* VSI TDPC */
5345	{ "tx discards",	0x00348000, 48, KSTAT_KV_U_PACKETS },
5346	/* VSI GORC */
5347	{ "rx bytes",		0x00358000, 48, KSTAT_KV_U_BYTES },
5348	/* VSI UPRC */
5349	{ "rx ucasts",		0x0036c000, 48, KSTAT_KV_U_PACKETS },
5350	/* VSI MPRC */
5351	{ "rx mcasts",		0x0036cc00, 48, KSTAT_KV_U_PACKETS },
5352	/* VSI BPRC */
5353	{ "rx bcasts",		0x0036d800, 48, KSTAT_KV_U_PACKETS },
5354	/* VSI RUPP */
5355	{ "rx noproto",		0x0036e400, 32, KSTAT_KV_U_PACKETS },
5356};
5357
5358struct ixl_counter_state {
5359	const struct ixl_counter
5360				*counters;
5361	uint64_t		*values;
5362	size_t			 n;
5363	uint32_t		 index;
5364	unsigned int		 gen;
5365};
5366
5367static void
5368ixl_rd_counters(struct ixl_softc *sc, const struct ixl_counter_state *state,
5369    uint64_t *vs)
5370{
5371	const struct ixl_counter *c;
5372	bus_addr_t r;
5373	uint64_t v;
5374	size_t i;
5375
5376	for (i = 0; i < state->n; i++) {
5377		c = &state->counters[i];
5378
5379		r = c->c_base + (state->index * 8);
5380
5381		if (c->c_width == 32)
5382			v = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
5383		else
5384			v = bus_space_read_8(sc->sc_memt, sc->sc_memh, r);
5385
5386		vs[i] = v;
5387	}
5388}
5389
5390static int
5391ixl_kstat_read(struct kstat *ks)
5392{
5393	struct ixl_softc *sc = ks->ks_softc;
5394	struct kstat_kv *kvs = ks->ks_data;
5395	struct ixl_counter_state *state = ks->ks_ptr;
5396	unsigned int gen = (state->gen++) & 1;
5397	uint64_t *ovs = state->values + (gen * state->n);
5398	uint64_t *nvs = state->values + (!gen * state->n);
5399	size_t i;
5400
5401	ixl_rd_counters(sc, state, nvs);
5402	getnanouptime(&ks->ks_updated);
5403
5404	for (i = 0; i < state->n; i++) {
5405		const struct ixl_counter *c = &state->counters[i];
5406		uint64_t n = nvs[i], o = ovs[i];
5407
5408		if (c->c_width < 64) {
5409			if (n < o)
5410				n += (1ULL << c->c_width);
5411		}
5412
5413		kstat_kv_u64(&kvs[i]) += (n - o);
5414	}
5415
5416	return (0);
5417}
5418
5419static void
5420ixl_kstat_tick(void *arg)
5421{
5422	struct ixl_softc *sc = arg;
5423
5424	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5425
5426	mtx_enter(&sc->sc_kstat_mtx);
5427
5428	ixl_kstat_read(sc->sc_port_kstat);
5429	ixl_kstat_read(sc->sc_vsi_kstat);
5430
5431	mtx_leave(&sc->sc_kstat_mtx);
5432}
5433
5434static struct kstat *
5435ixl_kstat_create(struct ixl_softc *sc, const char *name,
5436    const struct ixl_counter *counters, size_t n, uint32_t index)
5437{
5438	struct kstat *ks;
5439	struct kstat_kv *kvs;
5440	struct ixl_counter_state *state;
5441	const struct ixl_counter *c;
5442	unsigned int i;
5443
5444	ks = kstat_create(DEVNAME(sc), 0, name, 0, KSTAT_T_KV, 0);
5445	if (ks == NULL) {
5446		/* unable to create kstats */
5447		return (NULL);
5448	}
5449
5450	kvs = mallocarray(n, sizeof(*kvs), M_DEVBUF, M_WAITOK|M_ZERO);
5451	for (i = 0; i < n; i++) {
5452		c = &counters[i];
5453
5454		kstat_kv_unit_init(&kvs[i], c->c_name,
5455		    KSTAT_KV_T_COUNTER64, c->c_type);
5456	}
5457
5458	ks->ks_data = kvs;
5459	ks->ks_datalen = n * sizeof(*kvs);
5460	ks->ks_read = ixl_kstat_read;
5461
5462	state = malloc(sizeof(*state), M_DEVBUF, M_WAITOK|M_ZERO);
5463	state->counters = counters;
5464	state->n = n;
5465	state->values = mallocarray(n * 2, sizeof(*state->values),
5466	    M_DEVBUF, M_WAITOK|M_ZERO);
5467	state->index = index;
5468	ks->ks_ptr = state;
5469
5470	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
5471	ks->ks_softc = sc;
5472	kstat_install(ks);
5473
5474	/* fetch a baseline */
5475	ixl_rd_counters(sc, state, state->values);
5476
5477	return (ks);
5478}
5479
5480static void
5481ixl_kstat_attach(struct ixl_softc *sc)
5482{
5483	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
5484	timeout_set(&sc->sc_kstat_tmo, ixl_kstat_tick, sc);
5485
5486	sc->sc_port_kstat = ixl_kstat_create(sc, "ixl-port",
5487	    ixl_port_counters, nitems(ixl_port_counters), sc->sc_port);
5488	sc->sc_vsi_kstat = ixl_kstat_create(sc, "ixl-vsi",
5489	    ixl_vsi_counters, nitems(ixl_vsi_counters),
5490	    lemtoh16(&sc->sc_vsi_number));
5491
5492	/* ixl counters go up even when the interface is down */
5493	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5494}
5495
5496#endif /* NKSTAT > 0 */
5497