1/******************************************************************************
2
3  Copyright (c) 2013-2018, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33/*$FreeBSD$*/
34
35#include "ixl.h"
36#include "ixl_pf.h"
37
38#ifdef IXL_IW
39#include "ixl_iw.h"
40#include "ixl_iw_int.h"
41#endif
42
43#ifdef PCI_IOV
44#include "ixl_pf_iov.h"
45#endif
46
47/*********************************************************************
48 *  Driver version
49 *********************************************************************/
50#define IXL_DRIVER_VERSION_MAJOR	2
51#define IXL_DRIVER_VERSION_MINOR	3
52#define IXL_DRIVER_VERSION_BUILD	0
53
54#define IXL_DRIVER_VERSION_STRING			\
55    __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."		\
56    __XSTRING(IXL_DRIVER_VERSION_MINOR) "."		\
57    __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"
58
59/*********************************************************************
60 *  PCI Device ID Table
61 *
62 *  Used by probe to select devices to load on
63 *
64 *  ( Vendor ID, Device ID, Branding String )
65 *********************************************************************/
66
67static pci_vendor_info_t ixl_vendor_info_array[] =
68{
69	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
70	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"),
71	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
72	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
73	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"),
74	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"),
75	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
76	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"),
77	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"),
78	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"),
79	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
80	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"),
81	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"),
82	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"),
83	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"),
84	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"),
85	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, "Intel(R) Ethernet Controller X710 for 10GBASE-T"),
86	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"),
87	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, "Intel(R) Ethernet Controller X710 for 10GbE backplane"),
88	PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_5G_BASE_T_BC, "Intel(R) Ethernet Controller V710 for 5GBASE-T"),
89	/* required last entry */
90	PVID_END
91};
92
93/*********************************************************************
94 *  Function prototypes
95 *********************************************************************/
96/*** IFLIB interface ***/
97static void	*ixl_register(device_t dev);
98static int	 ixl_if_attach_pre(if_ctx_t ctx);
99static int	 ixl_if_attach_post(if_ctx_t ctx);
100static int	 ixl_if_detach(if_ctx_t ctx);
101static int	 ixl_if_shutdown(if_ctx_t ctx);
102static int	 ixl_if_suspend(if_ctx_t ctx);
103static int	 ixl_if_resume(if_ctx_t ctx);
104static int	 ixl_if_msix_intr_assign(if_ctx_t ctx, int msix);
105static void	 ixl_if_enable_intr(if_ctx_t ctx);
106static void	 ixl_if_disable_intr(if_ctx_t ctx);
107static int	 ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
108static int	 ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
109static int	 ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
110static int	 ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
111static void	 ixl_if_queues_free(if_ctx_t ctx);
112static void	 ixl_if_update_admin_status(if_ctx_t ctx);
113static void	 ixl_if_multi_set(if_ctx_t ctx);
114static int	 ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
115static void	 ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
116static int	 ixl_if_media_change(if_ctx_t ctx);
117static int	 ixl_if_promisc_set(if_ctx_t ctx, int flags);
118static void	 ixl_if_timer(if_ctx_t ctx, uint16_t qid);
119static void	 ixl_if_vlan_register(if_ctx_t ctx, u16 vtag);
120static void	 ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
121static uint64_t	 ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt);
122static int	 ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
123static int	 ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
124static bool	 ixl_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
125#ifdef PCI_IOV
126static void	 ixl_if_vflr_handle(if_ctx_t ctx);
127#endif
128
129/*** Other ***/
130static int	 ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int);
131static void	 ixl_save_pf_tunables(struct ixl_pf *);
132static int	 ixl_allocate_pci_resources(struct ixl_pf *);
133static void	 ixl_setup_ssctx(struct ixl_pf *pf);
134static void	 ixl_admin_timer(void *arg);
135
136/*********************************************************************
137 *  FreeBSD Device Interface Entry Points
138 *********************************************************************/
139
140static device_method_t ixl_methods[] = {
141	/* Device interface */
142	DEVMETHOD(device_register, ixl_register),
143	DEVMETHOD(device_probe, iflib_device_probe),
144	DEVMETHOD(device_attach, iflib_device_attach),
145	DEVMETHOD(device_detach, iflib_device_detach),
146	DEVMETHOD(device_shutdown, iflib_device_shutdown),
147#ifdef PCI_IOV
148	DEVMETHOD(pci_iov_init, iflib_device_iov_init),
149	DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
150	DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
151#endif
152	DEVMETHOD_END
153};
154
155static driver_t ixl_driver = {
156	"ixl", ixl_methods, sizeof(struct ixl_pf),
157};
158
159devclass_t ixl_devclass;
160DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
161IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array);
162MODULE_VERSION(ixl, 3);
163
164MODULE_DEPEND(ixl, pci, 1, 1, 1);
165MODULE_DEPEND(ixl, ether, 1, 1, 1);
166MODULE_DEPEND(ixl, iflib, 1, 1, 1);
167
168static device_method_t ixl_if_methods[] = {
169	DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre),
170	DEVMETHOD(ifdi_attach_post, ixl_if_attach_post),
171	DEVMETHOD(ifdi_detach, ixl_if_detach),
172	DEVMETHOD(ifdi_shutdown, ixl_if_shutdown),
173	DEVMETHOD(ifdi_suspend, ixl_if_suspend),
174	DEVMETHOD(ifdi_resume, ixl_if_resume),
175	DEVMETHOD(ifdi_init, ixl_if_init),
176	DEVMETHOD(ifdi_stop, ixl_if_stop),
177	DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign),
178	DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr),
179	DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr),
180	DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable),
181	DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable),
182	DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc),
183	DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc),
184	DEVMETHOD(ifdi_queues_free, ixl_if_queues_free),
185	DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status),
186	DEVMETHOD(ifdi_multi_set, ixl_if_multi_set),
187	DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set),
188	DEVMETHOD(ifdi_media_status, ixl_if_media_status),
189	DEVMETHOD(ifdi_media_change, ixl_if_media_change),
190	DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set),
191	DEVMETHOD(ifdi_timer, ixl_if_timer),
192	DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register),
193	DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister),
194	DEVMETHOD(ifdi_get_counter, ixl_if_get_counter),
195	DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req),
196	DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl),
197	DEVMETHOD(ifdi_needs_restart, ixl_if_needs_restart),
198#ifdef PCI_IOV
199	DEVMETHOD(ifdi_iov_init, ixl_if_iov_init),
200	DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit),
201	DEVMETHOD(ifdi_iov_vf_add, ixl_if_iov_vf_add),
202	DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle),
203#endif
204	// ifdi_led_func
205	// ifdi_debug
206	DEVMETHOD_END
207};
208
209static driver_t ixl_if_driver = {
210	"ixl_if", ixl_if_methods, sizeof(struct ixl_pf)
211};
212
213/*
214** TUNEABLE PARAMETERS:
215*/
216
217static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
218    "ixl driver parameters");
219
220#ifdef IXL_DEBUG_FC
221/*
222 * Leave this on unless you need to send flow control
223 * frames (or other control frames) from software
224 */
225static int ixl_enable_tx_fc_filter = 1;
226TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
227    &ixl_enable_tx_fc_filter);
228SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
229    &ixl_enable_tx_fc_filter, 0,
230    "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
231#endif
232
233#ifdef IXL_DEBUG
234static int ixl_debug_recovery_mode = 0;
235TUNABLE_INT("hw.ixl.debug_recovery_mode",
236    &ixl_debug_recovery_mode);
237SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
238    &ixl_debug_recovery_mode, 0,
239    "Act like when FW entered recovery mode (for debuging)");
240#endif
241
242static int ixl_i2c_access_method = 0;
243TUNABLE_INT("hw.ixl.i2c_access_method",
244    &ixl_i2c_access_method);
245SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
246    &ixl_i2c_access_method, 0,
247    IXL_SYSCTL_HELP_I2C_METHOD);
248
249static int ixl_enable_vf_loopback = 1;
250TUNABLE_INT("hw.ixl.enable_vf_loopback",
251    &ixl_enable_vf_loopback);
252SYSCTL_INT(_hw_ixl, OID_AUTO, enable_vf_loopback, CTLFLAG_RDTUN,
253    &ixl_enable_vf_loopback, 0,
254    IXL_SYSCTL_HELP_VF_LOOPBACK);
255
256/*
257 * Different method for processing TX descriptor
258 * completion.
259 */
260static int ixl_enable_head_writeback = 1;
261TUNABLE_INT("hw.ixl.enable_head_writeback",
262    &ixl_enable_head_writeback);
263SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
264    &ixl_enable_head_writeback, 0,
265    "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
266
267static int ixl_core_debug_mask = 0;
268TUNABLE_INT("hw.ixl.core_debug_mask",
269    &ixl_core_debug_mask);
270SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
271    &ixl_core_debug_mask, 0,
272    "Display debug statements that are printed in non-shared code");
273
274static int ixl_shared_debug_mask = 0;
275TUNABLE_INT("hw.ixl.shared_debug_mask",
276    &ixl_shared_debug_mask);
277SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
278    &ixl_shared_debug_mask, 0,
279    "Display debug statements that are printed in shared code");
280
281#if 0
282/*
283** Controls for Interrupt Throttling
284**	- true/false for dynamic adjustment
285** 	- default values for static ITR
286*/
287static int ixl_dynamic_rx_itr = 0;
288TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
289SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
290    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
291
292static int ixl_dynamic_tx_itr = 0;
293TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
294SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
295    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
296#endif
297
298static int ixl_rx_itr = IXL_ITR_8K;
299TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
300SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
301    &ixl_rx_itr, 0, "RX Interrupt Rate");
302
303static int ixl_tx_itr = IXL_ITR_4K;
304TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
305SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
306    &ixl_tx_itr, 0, "TX Interrupt Rate");
307
308#ifdef IXL_IW
309int ixl_enable_iwarp = 0;
310TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
311SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
312    &ixl_enable_iwarp, 0, "iWARP enabled");
313
314#if __FreeBSD_version < 1100000
315int ixl_limit_iwarp_msix = 1;
316#else
317int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
318#endif
319TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
320SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
321    &ixl_limit_iwarp_msix, 0, "Limit MSI-X vectors assigned to iWARP");
322#endif
323
324extern struct if_txrx ixl_txrx_hwb;
325extern struct if_txrx ixl_txrx_dwb;
326
327static struct if_shared_ctx ixl_sctx_init = {
328	.isc_magic = IFLIB_MAGIC,
329	.isc_q_align = PAGE_SIZE,
330	.isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
331	.isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
332	.isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header),
333	.isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
334	.isc_rx_maxsize = 16384,
335	.isc_rx_nsegments = IXL_MAX_RX_SEGS,
336	.isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE,
337	.isc_nfl = 1,
338	.isc_ntxqs = 1,
339	.isc_nrxqs = 1,
340
341	.isc_admin_intrcnt = 1,
342	.isc_vendor_info = ixl_vendor_info_array,
343	.isc_driver_version = IXL_DRIVER_VERSION_STRING,
344	.isc_driver = &ixl_if_driver,
345	.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN,
346
347	.isc_nrxd_min = {IXL_MIN_RING},
348	.isc_ntxd_min = {IXL_MIN_RING},
349	.isc_nrxd_max = {IXL_MAX_RING},
350	.isc_ntxd_max = {IXL_MAX_RING},
351	.isc_nrxd_default = {IXL_DEFAULT_RING},
352	.isc_ntxd_default = {IXL_DEFAULT_RING},
353};
354
355if_shared_ctx_t ixl_sctx = &ixl_sctx_init;
356
357/*** Functions ***/
358static void *
359ixl_register(device_t dev)
360{
361	return (ixl_sctx);
362}
363
364static int
365ixl_allocate_pci_resources(struct ixl_pf *pf)
366{
367	device_t dev = iflib_get_dev(pf->vsi.ctx);
368	struct i40e_hw *hw = &pf->hw;
369	int             rid;
370
371	/* Map BAR0 */
372	rid = PCIR_BAR(0);
373	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
374	    &rid, RF_ACTIVE);
375
376	if (!(pf->pci_mem)) {
377		device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
378		return (ENXIO);
379	}
380
381	/* Save off the PCI information */
382	hw->vendor_id = pci_get_vendor(dev);
383	hw->device_id = pci_get_device(dev);
384	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
385	hw->subsystem_vendor_id =
386	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
387	hw->subsystem_device_id =
388	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
389
390	hw->bus.device = pci_get_slot(dev);
391	hw->bus.func = pci_get_function(dev);
392
393	/* Save off register access information */
394	pf->osdep.mem_bus_space_tag =
395		rman_get_bustag(pf->pci_mem);
396	pf->osdep.mem_bus_space_handle =
397		rman_get_bushandle(pf->pci_mem);
398	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
399	pf->osdep.flush_reg = I40E_GLGEN_STAT;
400	pf->osdep.dev = dev;
401
402	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
403	pf->hw.back = &pf->osdep;
404
405 	return (0);
406}
407
408static void
409ixl_setup_ssctx(struct ixl_pf *pf)
410{
411	if_softc_ctx_t scctx = pf->vsi.shared;
412	struct i40e_hw *hw = &pf->hw;
413
414	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
415		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
416		scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
417	} else if (hw->mac.type == I40E_MAC_X722)
418		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
419	else
420		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
421
422	if (pf->vsi.enable_head_writeback) {
423		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
424		    * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
425		scctx->isc_txrx = &ixl_txrx_hwb;
426	} else {
427		scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
428		    * sizeof(struct i40e_tx_desc), DBA_ALIGN);
429		scctx->isc_txrx = &ixl_txrx_dwb;
430	}
431
432	scctx->isc_txrx->ift_legacy_intr = ixl_intr;
433	scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
434	    * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
435	scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
436	scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
437	scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
438	scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
439	scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
440	scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
441	scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
442	scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
443}
444
445static void
446ixl_admin_timer(void *arg)
447{
448	struct ixl_pf *pf = (struct ixl_pf *)arg;
449
450	/* Fire off the admin task */
451	iflib_admin_intr_deferred(pf->vsi.ctx);
452
453	/* Reschedule the admin timer */
454	callout_schedule(&pf->admin_timer, hz/2);
455}
456
457static int
458ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
459{
460	struct ixl_vsi *vsi = &pf->vsi;
461	struct i40e_hw *hw = &pf->hw;
462	device_t dev = pf->dev;
463
464	device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
465
466	i40e_get_mac_addr(hw, hw->mac.addr);
467
468	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
469		ixl_configure_intr0_msix(pf);
470		ixl_enable_intr0(hw);
471	}
472
473	ixl_setup_ssctx(pf);
474
475	return (0);
476}
477
478static int
479ixl_if_attach_pre(if_ctx_t ctx)
480{
481	device_t dev;
482	struct ixl_pf *pf;
483	struct i40e_hw *hw;
484	struct ixl_vsi *vsi;
485	enum i40e_get_fw_lldp_status_resp lldp_status;
486	struct i40e_filter_control_settings filter;
487	enum i40e_status_code status;
488	int error = 0;
489
490	dev = iflib_get_dev(ctx);
491	pf = iflib_get_softc(ctx);
492
493	INIT_DBG_DEV(dev, "begin");
494
495	vsi = &pf->vsi;
496	vsi->back = pf;
497	pf->dev = dev;
498	hw = &pf->hw;
499
500	vsi->dev = dev;
501	vsi->hw = &pf->hw;
502	vsi->id = 0;
503	vsi->num_vlans = 0;
504	vsi->ctx = ctx;
505	vsi->media = iflib_get_media(ctx);
506	vsi->shared = iflib_get_softc_ctx(ctx);
507
508	snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
509	    "%s:admin", device_get_nameunit(dev));
510	mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
511	callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
512
513	/* Save tunable values */
514	ixl_save_pf_tunables(pf);
515
516	/* Do PCI setup - map BAR0, etc */
517	if (ixl_allocate_pci_resources(pf)) {
518		device_printf(dev, "Allocation of PCI resources failed\n");
519		error = ENXIO;
520		goto err_pci_res;
521	}
522
523	/* Establish a clean starting point */
524	i40e_clear_hw(hw);
525	i40e_set_mac_type(hw);
526
527	error = ixl_pf_reset(pf);
528	if (error)
529		goto err_out;
530
531	/* Initialize the shared code */
532	status = i40e_init_shared_code(hw);
533	if (status) {
534		device_printf(dev, "Unable to initialize shared code, error %s\n",
535		    i40e_stat_str(hw, status));
536		error = EIO;
537		goto err_out;
538	}
539
540	/* Set up the admin queue */
541	hw->aq.num_arq_entries = IXL_AQ_LEN;
542	hw->aq.num_asq_entries = IXL_AQ_LEN;
543	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
544	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
545
546	status = i40e_init_adminq(hw);
547	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
548		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
549		    i40e_stat_str(hw, status));
550		error = EIO;
551		goto err_out;
552	}
553	ixl_print_nvm_version(pf);
554
555	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
556		device_printf(dev, "The driver for the device stopped "
557		    "because the NVM image is newer than expected.\n");
558		device_printf(dev, "You must install the most recent version of "
559		    "the network driver.\n");
560		error = EIO;
561		goto err_out;
562	}
563
564        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
565	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
566		device_printf(dev, "The driver for the device detected "
567		    "a newer version of the NVM image than expected.\n");
568		device_printf(dev, "Please install the most recent version "
569		    "of the network driver.\n");
570	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
571		device_printf(dev, "The driver for the device detected "
572		    "an older version of the NVM image than expected.\n");
573		device_printf(dev, "Please update the NVM image.\n");
574	}
575
576	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
577		error = ixl_attach_pre_recovery_mode(pf);
578		if (error)
579			goto err_out;
580		return (error);
581	}
582
583	/* Clear PXE mode */
584	i40e_clear_pxe_mode(hw);
585
586	/* Get capabilities from the device */
587	error = ixl_get_hw_capabilities(pf);
588	if (error) {
589		device_printf(dev, "get_hw_capabilities failed: %d\n",
590		    error);
591		goto err_get_cap;
592	}
593
594	/* Set up host memory cache */
595	error = ixl_setup_hmc(pf);
596	if (error)
597		goto err_mac_hmc;
598
599	/* Disable LLDP from the firmware for certain NVM versions */
600	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
601	    (pf->hw.aq.fw_maj_ver < 4)) {
602		i40e_aq_stop_lldp(hw, true, false, NULL);
603		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
604	}
605
606	/* Try enabling Energy Efficient Ethernet (EEE) mode */
607	if (i40e_enable_eee(hw, true) == I40E_SUCCESS)
608		atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
609	else
610		atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
611
612	/* Get MAC addresses from hardware */
613	i40e_get_mac_addr(hw, hw->mac.addr);
614	error = i40e_validate_mac_addr(hw->mac.addr);
615	if (error) {
616		device_printf(dev, "validate_mac_addr failed: %d\n", error);
617		goto err_mac_hmc;
618	}
619	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
620	iflib_set_mac(ctx, hw->mac.addr);
621	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
622
623	/* Set up the device filtering */
624	bzero(&filter, sizeof(filter));
625	filter.enable_ethtype = TRUE;
626	filter.enable_macvlan = TRUE;
627	filter.enable_fdir = FALSE;
628	filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
629	if (i40e_set_filter_control(hw, &filter))
630		device_printf(dev, "i40e_set_filter_control() failed\n");
631
632	/* Query device FW LLDP status */
633	if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
634		if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
635			atomic_set_32(&pf->state,
636			    IXL_PF_STATE_FW_LLDP_DISABLED);
637		} else {
638			atomic_clear_32(&pf->state,
639			    IXL_PF_STATE_FW_LLDP_DISABLED);
640		}
641	}
642
643	/* Tell FW to apply DCB config on link up */
644	i40e_aq_set_dcb_parameters(hw, true, NULL);
645
646	/* Fill out iflib parameters */
647	ixl_setup_ssctx(pf);
648
649	INIT_DBG_DEV(dev, "end");
650	return (0);
651
652err_mac_hmc:
653	ixl_shutdown_hmc(pf);
654err_get_cap:
655	i40e_shutdown_adminq(hw);
656err_out:
657	ixl_free_pci_resources(pf);
658err_pci_res:
659	mtx_lock(&pf->admin_mtx);
660	callout_stop(&pf->admin_timer);
661	mtx_unlock(&pf->admin_mtx);
662	mtx_destroy(&pf->admin_mtx);
663	return (error);
664}
665
666static int
667ixl_if_attach_post(if_ctx_t ctx)
668{
669	device_t dev;
670	struct ixl_pf *pf;
671	struct i40e_hw *hw;
672	struct ixl_vsi *vsi;
673	int error = 0;
674	enum i40e_status_code status;
675
676	dev = iflib_get_dev(ctx);
677	pf = iflib_get_softc(ctx);
678
679	INIT_DBG_DEV(dev, "begin");
680
681	vsi = &pf->vsi;
682	vsi->ifp = iflib_get_ifp(ctx);
683	hw = &pf->hw;
684
685	/* Save off determined number of queues for interface */
686	vsi->num_rx_queues = vsi->shared->isc_nrxqsets;
687	vsi->num_tx_queues = vsi->shared->isc_ntxqsets;
688
689	/* Setup OS network interface / ifnet */
690	if (ixl_setup_interface(dev, pf)) {
691		device_printf(dev, "interface setup failed!\n");
692		error = EIO;
693		goto err;
694	}
695
696	if (IXL_PF_IN_RECOVERY_MODE(pf)) {
697		/* Keep admin queue interrupts active while driver is loaded */
698		if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
699			ixl_configure_intr0_msix(pf);
700			ixl_enable_intr0(hw);
701		}
702
703		ixl_add_sysctls_recovery_mode(pf);
704
705		/* Start the admin timer */
706		mtx_lock(&pf->admin_mtx);
707		callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
708		mtx_unlock(&pf->admin_mtx);
709		return (0);
710	}
711
712	/* Determine link state */
713	if (ixl_attach_get_link_status(pf)) {
714		error = EINVAL;
715		goto err;
716	}
717
718	error = ixl_switch_config(pf);
719	if (error) {
720		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
721		     error);
722		goto err;
723	}
724
725	/* Add protocol filters to list */
726	ixl_init_filters(vsi);
727
728	/* Init queue allocation manager */
729	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
730	if (error) {
731		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
732		    error);
733		goto err;
734	}
735	/* reserve a contiguous allocation for the PF's VSI */
736	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr,
737	    max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag);
738	if (error) {
739		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
740		    error);
741		goto err;
742	}
743	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
744	    pf->qtag.num_allocated, pf->qtag.num_active);
745
746	/* Limit PHY interrupts to link, autoneg, and modules failure */
747	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
748	    NULL);
749        if (status) {
750		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
751		    " aq_err %s\n", i40e_stat_str(hw, status),
752		    i40e_aq_str(hw, hw->aq.asq_last_status));
753		goto err;
754	}
755
756	/* Get the bus configuration and set the shared code */
757	ixl_get_bus_info(pf);
758
759	/* Keep admin queue interrupts active while driver is loaded */
760	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
761 		ixl_configure_intr0_msix(pf);
762 		ixl_enable_intr0(hw);
763	}
764
765	/* Set initial advertised speed sysctl value */
766	ixl_set_initial_advertised_speeds(pf);
767
768	/* Initialize statistics & add sysctls */
769	ixl_add_device_sysctls(pf);
770	ixl_pf_reset_stats(pf);
771	ixl_update_stats_counters(pf);
772	ixl_add_hw_stats(pf);
773
774	hw->phy.get_link_info = true;
775	i40e_get_link_status(hw, &pf->link_up);
776	ixl_update_link_status(pf);
777
778#ifdef PCI_IOV
779	ixl_initialize_sriov(pf);
780#endif
781
782#ifdef IXL_IW
783	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
784		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
785		if (pf->iw_enabled) {
786			error = ixl_iw_pf_attach(pf);
787			if (error) {
788				device_printf(dev,
789				    "interfacing to iWARP driver failed: %d\n",
790				    error);
791				goto err;
792			} else
793				device_printf(dev, "iWARP ready\n");
794		} else
795			device_printf(dev, "iWARP disabled on this device "
796			    "(no MSI-X vectors)\n");
797	} else {
798		pf->iw_enabled = false;
799		device_printf(dev, "The device is not iWARP enabled\n");
800	}
801#endif
802	/* Start the admin timer */
803	mtx_lock(&pf->admin_mtx);
804	callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
805	mtx_unlock(&pf->admin_mtx);
806
807	INIT_DBG_DEV(dev, "end");
808	return (0);
809
810err:
811	INIT_DEBUGOUT("end: error %d", error);
812	/* ixl_if_detach() is called on error from this */
813	return (error);
814}
815
816/**
817 * XXX: iflib always ignores the return value of detach()
818 * -> This means that this isn't allowed to fail
819 */
820static int
821ixl_if_detach(if_ctx_t ctx)
822{
823	struct ixl_pf *pf = iflib_get_softc(ctx);
824	struct ixl_vsi *vsi = &pf->vsi;
825	struct i40e_hw *hw = &pf->hw;
826	device_t dev = pf->dev;
827	enum i40e_status_code	status;
828#ifdef IXL_IW
829	int			error;
830#endif
831
832	INIT_DBG_DEV(dev, "begin");
833
834	/* Stop the admin timer */
835	mtx_lock(&pf->admin_mtx);
836	callout_stop(&pf->admin_timer);
837	mtx_unlock(&pf->admin_mtx);
838	mtx_destroy(&pf->admin_mtx);
839
840#ifdef IXL_IW
841	if (ixl_enable_iwarp && pf->iw_enabled) {
842		error = ixl_iw_pf_detach(pf);
843		if (error == EBUSY) {
844			device_printf(dev, "iwarp in use; stop it first.\n");
845			//return (error);
846		}
847	}
848#endif
849	/* Remove all previously allocated media types */
850	ifmedia_removeall(vsi->media);
851
852	/* Shutdown LAN HMC */
853	ixl_shutdown_hmc(pf);
854
855	/* Shutdown admin queue */
856	ixl_disable_intr0(hw);
857	status = i40e_shutdown_adminq(hw);
858	if (status)
859		device_printf(dev,
860		    "i40e_shutdown_adminq() failed with status %s\n",
861		    i40e_stat_str(hw, status));
862
863	ixl_pf_qmgr_destroy(&pf->qmgr);
864	ixl_free_pci_resources(pf);
865	ixl_free_mac_filters(vsi);
866	INIT_DBG_DEV(dev, "end");
867	return (0);
868}
869
870static int
871ixl_if_shutdown(if_ctx_t ctx)
872{
873	int error = 0;
874
875	INIT_DEBUGOUT("ixl_if_shutdown: begin");
876
877	/* TODO: Call ixl_if_stop()? */
878
879	/* TODO: Then setup low power mode */
880
881	return (error);
882}
883
884static int
885ixl_if_suspend(if_ctx_t ctx)
886{
887	int error = 0;
888
889	INIT_DEBUGOUT("ixl_if_suspend: begin");
890
891	/* TODO: Call ixl_if_stop()? */
892
893	/* TODO: Then setup low power mode */
894
895	return (error);
896}
897
898static int
899ixl_if_resume(if_ctx_t ctx)
900{
901	struct ifnet *ifp = iflib_get_ifp(ctx);
902
903	INIT_DEBUGOUT("ixl_if_resume: begin");
904
905	/* Read & clear wake-up registers */
906
907	/* Required after D3->D0 transition */
908	if (ifp->if_flags & IFF_UP)
909		ixl_if_init(ctx);
910
911	return (0);
912}
913
914void
915ixl_if_init(if_ctx_t ctx)
916{
917	struct ixl_pf *pf = iflib_get_softc(ctx);
918	struct ixl_vsi *vsi = &pf->vsi;
919	struct i40e_hw	*hw = &pf->hw;
920	struct ifnet *ifp = iflib_get_ifp(ctx);
921	device_t 	dev = iflib_get_dev(ctx);
922	u8		tmpaddr[ETHER_ADDR_LEN];
923	int		ret;
924
925	if (IXL_PF_IN_RECOVERY_MODE(pf))
926		return;
927	/*
928	 * If the aq is dead here, it probably means something outside of the driver
929	 * did something to the adapter, like a PF reset.
930	 * So, rebuild the driver's state here if that occurs.
931	 */
932	if (!i40e_check_asq_alive(&pf->hw)) {
933		device_printf(dev, "Admin Queue is down; resetting...\n");
934		ixl_teardown_hw_structs(pf);
935		ixl_rebuild_hw_structs_after_reset(pf, false);
936	}
937
938	/* Get the latest mac address... User might use a LAA */
939	bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN);
940	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
941	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
942		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
943		bcopy(tmpaddr, hw->mac.addr, ETH_ALEN);
944		ret = i40e_aq_mac_address_write(hw,
945		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
946		    hw->mac.addr, NULL);
947		if (ret) {
948			device_printf(dev, "LLA address change failed!!\n");
949			return;
950		}
951		ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
952	}
953
954	iflib_set_mac(ctx, hw->mac.addr);
955
956	/* Prepare the VSI: rings, hmc contexts, etc... */
957	if (ixl_initialize_vsi(vsi)) {
958		device_printf(dev, "initialize vsi failed!!\n");
959		return;
960	}
961
962	/* Reconfigure multicast filters in HW */
963	ixl_if_multi_set(ctx);
964
965	/* Set up RSS */
966	ixl_config_rss(pf);
967
968	/* Set up MSI-X routing and the ITR settings */
969	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
970		ixl_configure_queue_intr_msix(pf);
971		ixl_configure_itr(pf);
972	} else
973		ixl_configure_legacy(pf);
974
975	if (vsi->enable_head_writeback)
976		ixl_init_tx_cidx(vsi);
977	else
978		ixl_init_tx_rsqs(vsi);
979
980	ixl_enable_rings(vsi);
981
982	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
983
984	/* Re-add configure filters to HW */
985	ixl_reconfigure_filters(vsi);
986
987	/* Configure promiscuous mode */
988	ixl_if_promisc_set(ctx, if_getflags(ifp));
989
990#ifdef IXL_IW
991	if (ixl_enable_iwarp && pf->iw_enabled) {
992		ret = ixl_iw_pf_init(pf);
993		if (ret)
994			device_printf(dev,
995			    "initialize iwarp failed, code %d\n", ret);
996	}
997#endif
998}
999
1000void
1001ixl_if_stop(if_ctx_t ctx)
1002{
1003	struct ixl_pf *pf = iflib_get_softc(ctx);
1004	struct ixl_vsi *vsi = &pf->vsi;
1005
1006	INIT_DEBUGOUT("ixl_if_stop: begin\n");
1007
1008	if (IXL_PF_IN_RECOVERY_MODE(pf))
1009		return;
1010
1011	// TODO: This may need to be reworked
1012#ifdef IXL_IW
1013	/* Stop iWARP device */
1014	if (ixl_enable_iwarp && pf->iw_enabled)
1015		ixl_iw_pf_stop(pf);
1016#endif
1017
1018	ixl_disable_rings_intr(vsi);
1019	ixl_disable_rings(pf, vsi, &pf->qtag);
1020}
1021
1022static int
1023ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
1024{
1025	struct ixl_pf *pf = iflib_get_softc(ctx);
1026	struct ixl_vsi *vsi = &pf->vsi;
1027	struct ixl_rx_queue *rx_que = vsi->rx_queues;
1028	struct ixl_tx_queue *tx_que = vsi->tx_queues;
1029	int err, i, rid, vector = 0;
1030	char buf[16];
1031
1032	MPASS(vsi->shared->isc_nrxqsets > 0);
1033	MPASS(vsi->shared->isc_ntxqsets > 0);
1034
1035	/* Admin Que must use vector 0*/
1036	rid = vector + 1;
1037	err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN,
1038	    ixl_msix_adminq, pf, 0, "aq");
1039	if (err) {
1040		iflib_irq_free(ctx, &vsi->irq);
1041		device_printf(iflib_get_dev(ctx),
1042		    "Failed to register Admin Que handler");
1043		return (err);
1044	}
1045	/* Create soft IRQ for handling VFLRs */
1046	iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
1047
1048	/* Now set up the stations */
1049	for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {
1050		rid = vector + 1;
1051
1052		snprintf(buf, sizeof(buf), "rxq%d", i);
1053		err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1054		    IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf);
1055		/* XXX: Does the driver work as expected if there are fewer num_rx_queues than
1056		 * what's expected in the iflib context? */
1057		if (err) {
1058			device_printf(iflib_get_dev(ctx),
1059			    "Failed to allocate queue RX int vector %d, err: %d\n", i, err);
1060			vsi->num_rx_queues = i + 1;
1061			goto fail;
1062		}
1063		rx_que->msix = vector;
1064	}
1065
1066	bzero(buf, sizeof(buf));
1067
1068	for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) {
1069		snprintf(buf, sizeof(buf), "txq%d", i);
1070		iflib_softirq_alloc_generic(ctx,
1071		    &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq,
1072		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1073
1074		/* TODO: Maybe call a strategy function for this to figure out which
1075		* interrupts to map Tx queues to. I don't know if there's an immediately
1076		* better way than this other than a user-supplied map, though. */
1077		tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1;
1078	}
1079
1080	return (0);
1081fail:
1082	iflib_irq_free(ctx, &vsi->irq);
1083	rx_que = vsi->rx_queues;
1084	for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1085		iflib_irq_free(ctx, &rx_que->que_irq);
1086	return (err);
1087}
1088
1089/*
1090 * Enable all interrupts
1091 *
1092 * Called in:
1093 * iflib_init_locked, after ixl_if_init()
1094 */
1095static void
1096ixl_if_enable_intr(if_ctx_t ctx)
1097{
1098	struct ixl_pf *pf = iflib_get_softc(ctx);
1099	struct ixl_vsi *vsi = &pf->vsi;
1100	struct i40e_hw		*hw = vsi->hw;
1101	struct ixl_rx_queue	*que = vsi->rx_queues;
1102
1103	ixl_enable_intr0(hw);
1104	/* Enable queue interrupts */
1105	for (int i = 0; i < vsi->num_rx_queues; i++, que++)
1106		/* TODO: Queue index parameter is probably wrong */
1107		ixl_enable_queue(hw, que->rxr.me);
1108}
1109
1110/*
1111 * Disable queue interrupts
1112 *
1113 * Other interrupt causes need to remain active.
1114 */
1115static void
1116ixl_if_disable_intr(if_ctx_t ctx)
1117{
1118	struct ixl_pf *pf = iflib_get_softc(ctx);
1119	struct ixl_vsi *vsi = &pf->vsi;
1120	struct i40e_hw		*hw = vsi->hw;
1121	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
1122
1123	if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
1124		for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
1125			ixl_disable_queue(hw, rx_que->msix - 1);
1126	} else {
1127		// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
1128		// stops queues from triggering interrupts
1129		wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
1130	}
1131}
1132
1133static int
1134ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1135{
1136	struct ixl_pf *pf = iflib_get_softc(ctx);
1137	struct ixl_vsi *vsi = &pf->vsi;
1138	struct i40e_hw		*hw = vsi->hw;
1139	struct ixl_rx_queue	*rx_que = &vsi->rx_queues[rxqid];
1140
1141	ixl_enable_queue(hw, rx_que->msix - 1);
1142	return (0);
1143}
1144
1145static int
1146ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
1147{
1148	struct ixl_pf *pf = iflib_get_softc(ctx);
1149	struct ixl_vsi *vsi = &pf->vsi;
1150	struct i40e_hw *hw = vsi->hw;
1151	struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid];
1152
1153	ixl_enable_queue(hw, tx_que->msix - 1);
1154	return (0);
1155}
1156
1157static int
1158ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
1159{
1160	struct ixl_pf *pf = iflib_get_softc(ctx);
1161	struct ixl_vsi *vsi = &pf->vsi;
1162	if_softc_ctx_t scctx = vsi->shared;
1163	struct ixl_tx_queue *que;
1164	int i, j, error = 0;
1165
1166	MPASS(scctx->isc_ntxqsets > 0);
1167	MPASS(ntxqs == 1);
1168	MPASS(scctx->isc_ntxqsets == ntxqsets);
1169
1170	/* Allocate queue structure memory */
1171	if (!(vsi->tx_queues =
1172	    (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1173		device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
1174		return (ENOMEM);
1175	}
1176
1177	for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
1178		struct tx_ring *txr = &que->txr;
1179
1180		txr->me = i;
1181		que->vsi = vsi;
1182
1183		if (!vsi->enable_head_writeback) {
1184			/* Allocate report status array */
1185			if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) {
1186				device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n");
1187				error = ENOMEM;
1188				goto fail;
1189			}
1190			/* Init report status array */
1191			for (j = 0; j < scctx->isc_ntxd[0]; j++)
1192				txr->tx_rsq[j] = QIDX_INVALID;
1193		}
1194		/* get the virtual and physical address of the hardware queues */
1195		txr->tail = I40E_QTX_TAIL(txr->me);
1196		txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs];
1197		txr->tx_paddr = paddrs[i * ntxqs];
1198		txr->que = que;
1199	}
1200
1201	return (0);
1202fail:
1203	ixl_if_queues_free(ctx);
1204	return (error);
1205}
1206
1207static int
1208ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
1209{
1210	struct ixl_pf *pf = iflib_get_softc(ctx);
1211	struct ixl_vsi *vsi = &pf->vsi;
1212	struct ixl_rx_queue *que;
1213	int i, error = 0;
1214
1215#ifdef INVARIANTS
1216	if_softc_ctx_t scctx = vsi->shared;
1217	MPASS(scctx->isc_nrxqsets > 0);
1218	MPASS(nrxqs == 1);
1219	MPASS(scctx->isc_nrxqsets == nrxqsets);
1220#endif
1221
1222	/* Allocate queue structure memory */
1223	if (!(vsi->rx_queues =
1224	    (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) *
1225	    nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) {
1226		device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n");
1227		error = ENOMEM;
1228		goto fail;
1229	}
1230
1231	for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) {
1232		struct rx_ring *rxr = &que->rxr;
1233
1234		rxr->me = i;
1235		que->vsi = vsi;
1236
1237		/* get the virtual and physical address of the hardware queues */
1238		rxr->tail = I40E_QRX_TAIL(rxr->me);
1239		rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs];
1240		rxr->rx_paddr = paddrs[i * nrxqs];
1241		rxr->que = que;
1242	}
1243
1244	return (0);
1245fail:
1246	ixl_if_queues_free(ctx);
1247	return (error);
1248}
1249
1250static void
1251ixl_if_queues_free(if_ctx_t ctx)
1252{
1253	struct ixl_pf *pf = iflib_get_softc(ctx);
1254	struct ixl_vsi *vsi = &pf->vsi;
1255
1256	if (!vsi->enable_head_writeback) {
1257		struct ixl_tx_queue *que;
1258		int i = 0;
1259
1260		for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) {
1261			struct tx_ring *txr = &que->txr;
1262			if (txr->tx_rsq != NULL) {
1263				free(txr->tx_rsq, M_IXL);
1264				txr->tx_rsq = NULL;
1265			}
1266		}
1267	}
1268
1269	if (vsi->tx_queues != NULL) {
1270		free(vsi->tx_queues, M_IXL);
1271		vsi->tx_queues = NULL;
1272	}
1273	if (vsi->rx_queues != NULL) {
1274		free(vsi->rx_queues, M_IXL);
1275		vsi->rx_queues = NULL;
1276	}
1277
1278	if (!IXL_PF_IN_RECOVERY_MODE(pf))
1279		sysctl_ctx_free(&vsi->sysctl_ctx);
1280}
1281
1282void
1283ixl_update_link_status(struct ixl_pf *pf)
1284{
1285	struct ixl_vsi *vsi = &pf->vsi;
1286	struct i40e_hw *hw = &pf->hw;
1287	u64 baudrate;
1288
1289	if (pf->link_up) {
1290		if (vsi->link_active == FALSE) {
1291			vsi->link_active = TRUE;
1292			baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
1293			iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate);
1294			ixl_link_up_msg(pf);
1295#ifdef PCI_IOV
1296			ixl_broadcast_link_state(pf);
1297#endif
1298		}
1299	} else { /* Link down */
1300		if (vsi->link_active == TRUE) {
1301			vsi->link_active = FALSE;
1302			iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0);
1303#ifdef PCI_IOV
1304			ixl_broadcast_link_state(pf);
1305#endif
1306		}
1307	}
1308}
1309
1310static void
1311ixl_handle_lan_overflow_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
1312{
1313	device_t dev = pf->dev;
1314	u32 rxq_idx, qtx_ctl;
1315
1316	rxq_idx = (e->desc.params.external.param0 & I40E_PRTDCB_RUPTQ_RXQNUM_MASK) >>
1317	    I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT;
1318	qtx_ctl = e->desc.params.external.param1;
1319
1320	device_printf(dev, "LAN overflow event: global rxq_idx %d\n", rxq_idx);
1321	device_printf(dev, "LAN overflow event: QTX_CTL 0x%08x\n", qtx_ctl);
1322}
1323
1324static int
1325ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
1326{
1327	enum i40e_status_code status = I40E_SUCCESS;
1328	struct i40e_arq_event_info event;
1329	struct i40e_hw *hw = &pf->hw;
1330	device_t dev = pf->dev;
1331	u16 opcode;
1332	u32 loop = 0, reg;
1333
1334	event.buf_len = IXL_AQ_BUF_SZ;
1335	event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO);
1336	if (!event.msg_buf) {
1337		device_printf(dev, "%s: Unable to allocate memory for Admin"
1338		    " Queue event!\n", __func__);
1339		return (ENOMEM);
1340	}
1341
1342	/* clean and process any events */
1343	do {
1344		status = i40e_clean_arq_element(hw, &event, pending);
1345		if (status)
1346			break;
1347		opcode = LE16_TO_CPU(event.desc.opcode);
1348		ixl_dbg(pf, IXL_DBG_AQ,
1349		    "Admin Queue event: %#06x\n", opcode);
1350		switch (opcode) {
1351		case i40e_aqc_opc_get_link_status:
1352			ixl_link_event(pf, &event);
1353			break;
1354		case i40e_aqc_opc_send_msg_to_pf:
1355#ifdef PCI_IOV
1356			ixl_handle_vf_msg(pf, &event);
1357#endif
1358			break;
1359		/*
1360		 * This should only occur on no-drop queues, which
1361		 * aren't currently configured.
1362		 */
1363		case i40e_aqc_opc_event_lan_overflow:
1364			ixl_handle_lan_overflow_event(pf, &event);
1365			break;
1366		default:
1367			break;
1368		}
1369	} while (*pending && (loop++ < IXL_ADM_LIMIT));
1370
1371	free(event.msg_buf, M_IXL);
1372
1373	/* Re-enable admin queue interrupt cause */
1374	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1375	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
1376	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1377
1378	return (status);
1379}
1380
1381static void
1382ixl_if_update_admin_status(if_ctx_t ctx)
1383{
1384	struct ixl_pf	*pf = iflib_get_softc(ctx);
1385	struct i40e_hw	*hw = &pf->hw;
1386	u16		pending;
1387
1388	if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
1389		ixl_handle_empr_reset(pf);
1390
1391	/*
1392	 * Admin Queue is shut down while handling reset.
1393	 * Don't proceed if it hasn't been re-initialized
1394	 * e.g due to an issue with new FW.
1395	 */
1396	if (!i40e_check_asq_alive(&pf->hw))
1397		return;
1398
1399	if (pf->state & IXL_PF_STATE_MDD_PENDING)
1400		ixl_handle_mdd_event(pf);
1401
1402	ixl_process_adminq(pf, &pending);
1403	ixl_update_link_status(pf);
1404
1405	/*
1406	 * If there are still messages to process, reschedule ourselves.
1407	 * Otherwise, re-enable our interrupt and go to sleep.
1408	 */
1409	if (pending > 0)
1410		iflib_admin_intr_deferred(ctx);
1411	else
1412		ixl_enable_intr0(hw);
1413}
1414
1415static void
1416ixl_if_multi_set(if_ctx_t ctx)
1417{
1418	struct ixl_pf *pf = iflib_get_softc(ctx);
1419	struct ixl_vsi *vsi = &pf->vsi;
1420	struct i40e_hw *hw = vsi->hw;
1421	int mcnt = 0, flags;
1422	int del_mcnt;
1423
1424	IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
1425
1426	mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR);
1427	/* Delete filters for removed multicast addresses */
1428	del_mcnt = ixl_del_multi(vsi);
1429	vsi->num_macs -= del_mcnt;
1430
1431	if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
1432		i40e_aq_set_vsi_multicast_promiscuous(hw,
1433		    vsi->seid, TRUE, NULL);
1434		return;
1435	}
1436	/* (re-)install filters for all mcast addresses */
1437	/* XXX: This bypasses filter count tracking code! */
1438	mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi);
1439
1440	if (mcnt > 0) {
1441		vsi->num_macs += mcnt;
1442		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
1443		ixl_add_hw_filters(vsi, flags, mcnt);
1444	}
1445
1446	ixl_dbg_filter(pf, "%s: filter mac total: %d\n",
1447	    __func__, vsi->num_macs);
1448	IOCTL_DEBUGOUT("ixl_if_multi_set: end");
1449}
1450
1451static int
1452ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
1453{
1454	struct ixl_pf *pf = iflib_get_softc(ctx);
1455	struct ixl_vsi *vsi = &pf->vsi;
1456
1457	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
1458	if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN -
1459		ETHER_VLAN_ENCAP_LEN)
1460		return (EINVAL);
1461
1462	vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
1463		ETHER_VLAN_ENCAP_LEN;
1464
1465	return (0);
1466}
1467
1468static void
1469ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
1470{
1471	struct ixl_pf *pf = iflib_get_softc(ctx);
1472	struct i40e_hw  *hw = &pf->hw;
1473
1474	INIT_DEBUGOUT("ixl_media_status: begin");
1475
1476	ifmr->ifm_status = IFM_AVALID;
1477	ifmr->ifm_active = IFM_ETHER;
1478
1479	if (!pf->link_up) {
1480		return;
1481	}
1482
1483	ifmr->ifm_status |= IFM_ACTIVE;
1484	/* Hardware is always full-duplex */
1485	ifmr->ifm_active |= IFM_FDX;
1486
1487	switch (hw->phy.link_info.phy_type) {
1488		/* 100 M */
1489		case I40E_PHY_TYPE_100BASE_TX:
1490			ifmr->ifm_active |= IFM_100_TX;
1491			break;
1492		/* 1 G */
1493		case I40E_PHY_TYPE_1000BASE_T:
1494			ifmr->ifm_active |= IFM_1000_T;
1495			break;
1496		case I40E_PHY_TYPE_1000BASE_SX:
1497			ifmr->ifm_active |= IFM_1000_SX;
1498			break;
1499		case I40E_PHY_TYPE_1000BASE_LX:
1500			ifmr->ifm_active |= IFM_1000_LX;
1501			break;
1502		case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
1503			ifmr->ifm_active |= IFM_1000_T;
1504			break;
1505		/* 2.5 G */
1506		case I40E_PHY_TYPE_2_5GBASE_T:
1507			ifmr->ifm_active |= IFM_2500_T;
1508			break;
1509		/* 5 G */
1510		case I40E_PHY_TYPE_5GBASE_T:
1511			ifmr->ifm_active |= IFM_5000_T;
1512			break;
1513		/* 10 G */
1514		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
1515			ifmr->ifm_active |= IFM_10G_TWINAX;
1516			break;
1517		case I40E_PHY_TYPE_10GBASE_SR:
1518			ifmr->ifm_active |= IFM_10G_SR;
1519			break;
1520		case I40E_PHY_TYPE_10GBASE_LR:
1521			ifmr->ifm_active |= IFM_10G_LR;
1522			break;
1523		case I40E_PHY_TYPE_10GBASE_T:
1524			ifmr->ifm_active |= IFM_10G_T;
1525			break;
1526		case I40E_PHY_TYPE_XAUI:
1527		case I40E_PHY_TYPE_XFI:
1528			ifmr->ifm_active |= IFM_10G_TWINAX;
1529			break;
1530		case I40E_PHY_TYPE_10GBASE_AOC:
1531			ifmr->ifm_active |= IFM_10G_AOC;
1532			break;
1533		/* 25 G */
1534		case I40E_PHY_TYPE_25GBASE_KR:
1535			ifmr->ifm_active |= IFM_25G_KR;
1536			break;
1537		case I40E_PHY_TYPE_25GBASE_CR:
1538			ifmr->ifm_active |= IFM_25G_CR;
1539			break;
1540		case I40E_PHY_TYPE_25GBASE_SR:
1541			ifmr->ifm_active |= IFM_25G_SR;
1542			break;
1543		case I40E_PHY_TYPE_25GBASE_LR:
1544			ifmr->ifm_active |= IFM_25G_LR;
1545			break;
1546		case I40E_PHY_TYPE_25GBASE_AOC:
1547			ifmr->ifm_active |= IFM_25G_AOC;
1548			break;
1549		case I40E_PHY_TYPE_25GBASE_ACC:
1550			ifmr->ifm_active |= IFM_25G_ACC;
1551			break;
1552		/* 40 G */
1553		case I40E_PHY_TYPE_40GBASE_CR4:
1554		case I40E_PHY_TYPE_40GBASE_CR4_CU:
1555			ifmr->ifm_active |= IFM_40G_CR4;
1556			break;
1557		case I40E_PHY_TYPE_40GBASE_SR4:
1558			ifmr->ifm_active |= IFM_40G_SR4;
1559			break;
1560		case I40E_PHY_TYPE_40GBASE_LR4:
1561			ifmr->ifm_active |= IFM_40G_LR4;
1562			break;
1563		case I40E_PHY_TYPE_XLAUI:
1564			ifmr->ifm_active |= IFM_OTHER;
1565			break;
1566		case I40E_PHY_TYPE_1000BASE_KX:
1567			ifmr->ifm_active |= IFM_1000_KX;
1568			break;
1569		case I40E_PHY_TYPE_SGMII:
1570			ifmr->ifm_active |= IFM_1000_SGMII;
1571			break;
1572		/* ERJ: What's the difference between these? */
1573		case I40E_PHY_TYPE_10GBASE_CR1_CU:
1574		case I40E_PHY_TYPE_10GBASE_CR1:
1575			ifmr->ifm_active |= IFM_10G_CR1;
1576			break;
1577		case I40E_PHY_TYPE_10GBASE_KX4:
1578			ifmr->ifm_active |= IFM_10G_KX4;
1579			break;
1580		case I40E_PHY_TYPE_10GBASE_KR:
1581			ifmr->ifm_active |= IFM_10G_KR;
1582			break;
1583		case I40E_PHY_TYPE_SFI:
1584			ifmr->ifm_active |= IFM_10G_SFI;
1585			break;
1586		/* Our single 20G media type */
1587		case I40E_PHY_TYPE_20GBASE_KR2:
1588			ifmr->ifm_active |= IFM_20G_KR2;
1589			break;
1590		case I40E_PHY_TYPE_40GBASE_KR4:
1591			ifmr->ifm_active |= IFM_40G_KR4;
1592			break;
1593		case I40E_PHY_TYPE_XLPPI:
1594		case I40E_PHY_TYPE_40GBASE_AOC:
1595			ifmr->ifm_active |= IFM_40G_XLPPI;
1596			break;
1597		/* Unknown to driver */
1598		default:
1599			ifmr->ifm_active |= IFM_UNKNOWN;
1600			break;
1601	}
1602	/* Report flow control status as well */
1603	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
1604		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1605	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
1606		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1607}
1608
1609static int
1610ixl_if_media_change(if_ctx_t ctx)
1611{
1612	struct ifmedia *ifm = iflib_get_media(ctx);
1613
1614	INIT_DEBUGOUT("ixl_media_change: begin");
1615
1616	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1617		return (EINVAL);
1618
1619	if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n");
1620	return (ENODEV);
1621}
1622
1623static int
1624ixl_if_promisc_set(if_ctx_t ctx, int flags)
1625{
1626	struct ixl_pf *pf = iflib_get_softc(ctx);
1627	struct ixl_vsi *vsi = &pf->vsi;
1628	struct ifnet	*ifp = iflib_get_ifp(ctx);
1629	struct i40e_hw	*hw = vsi->hw;
1630	int		err;
1631	bool		uni = FALSE, multi = FALSE;
1632
1633	if (flags & IFF_PROMISC)
1634		uni = multi = TRUE;
1635	else if (flags & IFF_ALLMULTI ||
1636		if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR)
1637		multi = TRUE;
1638
1639	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
1640	    vsi->seid, uni, NULL, true);
1641	if (err)
1642		return (err);
1643	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
1644	    vsi->seid, multi, NULL);
1645	return (err);
1646}
1647
1648static void
1649ixl_if_timer(if_ctx_t ctx, uint16_t qid)
1650{
1651	struct ixl_pf *pf = iflib_get_softc(ctx);
1652
1653	if (qid != 0)
1654		return;
1655
1656	ixl_update_stats_counters(pf);
1657}
1658
1659static void
1660ixl_if_vlan_register(if_ctx_t ctx, u16 vtag)
1661{
1662	struct ixl_pf *pf = iflib_get_softc(ctx);
1663	struct ixl_vsi *vsi = &pf->vsi;
1664	struct i40e_hw	*hw = vsi->hw;
1665
1666	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1667		return;
1668
1669	++vsi->num_vlans;
1670	ixl_add_filter(vsi, hw->mac.addr, vtag);
1671}
1672
1673static void
1674ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
1675{
1676	struct ixl_pf *pf = iflib_get_softc(ctx);
1677	struct ixl_vsi *vsi = &pf->vsi;
1678	struct i40e_hw	*hw = vsi->hw;
1679
1680	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
1681		return;
1682
1683	--vsi->num_vlans;
1684	ixl_del_filter(vsi, hw->mac.addr, vtag);
1685}
1686
1687static uint64_t
1688ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1689{
1690	struct ixl_pf *pf = iflib_get_softc(ctx);
1691	struct ixl_vsi *vsi = &pf->vsi;
1692	if_t ifp = iflib_get_ifp(ctx);
1693
1694	switch (cnt) {
1695	case IFCOUNTER_IPACKETS:
1696		return (vsi->ipackets);
1697	case IFCOUNTER_IERRORS:
1698		return (vsi->ierrors);
1699	case IFCOUNTER_OPACKETS:
1700		return (vsi->opackets);
1701	case IFCOUNTER_OERRORS:
1702		return (vsi->oerrors);
1703	case IFCOUNTER_COLLISIONS:
1704		/* Collisions are by standard impossible in 40G/10G Ethernet */
1705		return (0);
1706	case IFCOUNTER_IBYTES:
1707		return (vsi->ibytes);
1708	case IFCOUNTER_OBYTES:
1709		return (vsi->obytes);
1710	case IFCOUNTER_IMCASTS:
1711		return (vsi->imcasts);
1712	case IFCOUNTER_OMCASTS:
1713		return (vsi->omcasts);
1714	case IFCOUNTER_IQDROPS:
1715		return (vsi->iqdrops);
1716	case IFCOUNTER_OQDROPS:
1717		return (vsi->oqdrops);
1718	case IFCOUNTER_NOPROTO:
1719		return (vsi->noproto);
1720	default:
1721		return (if_get_counter_default(ifp, cnt));
1722	}
1723}
1724
1725#ifdef PCI_IOV
1726static void
1727ixl_if_vflr_handle(if_ctx_t ctx)
1728{
1729	struct ixl_pf *pf = iflib_get_softc(ctx);
1730
1731	ixl_handle_vflr(pf);
1732}
1733#endif
1734
1735static int
1736ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
1737{
1738	struct ixl_pf		*pf = iflib_get_softc(ctx);
1739
1740	if (pf->read_i2c_byte == NULL)
1741		return (EINVAL);
1742
1743	for (int i = 0; i < req->len; i++)
1744		if (pf->read_i2c_byte(pf, req->offset + i,
1745		    req->dev_addr, &req->data[i]))
1746			return (EIO);
1747	return (0);
1748}
1749
1750static int
1751ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
1752{
1753	struct ixl_pf *pf = iflib_get_softc(ctx);
1754	struct ifdrv *ifd = (struct ifdrv *)data;
1755	int error = 0;
1756
1757	/*
1758	 * The iflib_if_ioctl forwards SIOCxDRVSPEC and SIOGPRIVATE_0 without
1759	 * performing privilege checks. It is important that this function
1760	 * perform the necessary checks for commands which should only be
1761	 * executed by privileged threads.
1762	 */
1763
1764	switch(command) {
1765	case SIOCGDRVSPEC:
1766	case SIOCSDRVSPEC:
1767		/* NVM update command */
1768		if (ifd->ifd_cmd == I40E_NVM_ACCESS) {
1769			error = priv_check(curthread, PRIV_DRIVER);
1770			if (error)
1771				break;
1772			error = ixl_handle_nvmupd_cmd(pf, ifd);
1773		} else {
1774			error = EINVAL;
1775		}
1776		break;
1777	default:
1778		error = EOPNOTSUPP;
1779	}
1780
1781	return (error);
1782}
1783
1784/* ixl_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1785 * @ctx: iflib context
1786 * @event: event code to check
1787 *
1788 * Defaults to returning false for every event.
1789 *
1790 * @returns true if iflib needs to reinit the interface, false otherwise
1791 */
1792static bool
1793ixl_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1794{
1795	switch (event) {
1796	case IFLIB_RESTART_VLAN_CONFIG:
1797	default:
1798		return (false);
1799	}
1800}
1801
1802static int
1803ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused)
1804{
1805	struct ixl_vsi *vsi = arg;
1806
1807	if (ifma->ifma_addr->sa_family != AF_LINK)
1808		return (0);
1809	ixl_add_mc_filter(vsi,
1810	    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
1811	return (1);
1812}
1813
1814/*
1815 * Sanity check and save off tunable values.
1816 */
1817static void
1818ixl_save_pf_tunables(struct ixl_pf *pf)
1819{
1820	device_t dev = pf->dev;
1821
1822	/* Save tunable information */
1823#ifdef IXL_DEBUG_FC
1824	pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
1825#endif
1826#ifdef IXL_DEBUG
1827	pf->recovery_mode = ixl_debug_recovery_mode;
1828#endif
1829	pf->dbg_mask = ixl_core_debug_mask;
1830	pf->hw.debug_mask = ixl_shared_debug_mask;
1831	pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
1832	pf->enable_vf_loopback = !!(ixl_enable_vf_loopback);
1833#if 0
1834	pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
1835	pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
1836#endif
1837
1838	if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0)
1839		pf->i2c_access_method = 0;
1840	else
1841		pf->i2c_access_method = ixl_i2c_access_method;
1842
1843	if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
1844		device_printf(dev, "Invalid tx_itr value of %d set!\n",
1845		    ixl_tx_itr);
1846		device_printf(dev, "tx_itr must be between %d and %d, "
1847		    "inclusive\n",
1848		    0, IXL_MAX_ITR);
1849		device_printf(dev, "Using default value of %d instead\n",
1850		    IXL_ITR_4K);
1851		pf->tx_itr = IXL_ITR_4K;
1852	} else
1853		pf->tx_itr = ixl_tx_itr;
1854
1855	if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
1856		device_printf(dev, "Invalid rx_itr value of %d set!\n",
1857		    ixl_rx_itr);
1858		device_printf(dev, "rx_itr must be between %d and %d, "
1859		    "inclusive\n",
1860		    0, IXL_MAX_ITR);
1861		device_printf(dev, "Using default value of %d instead\n",
1862		    IXL_ITR_8K);
1863		pf->rx_itr = IXL_ITR_8K;
1864	} else
1865		pf->rx_itr = ixl_rx_itr;
1866}
1867
1868