1/******************************************************************************
2
3  Copyright (c) 2001-2017, Intel Corporation
4  All rights reserved.
5
6  Redistribution and use in source and binary forms, with or without
7  modification, are permitted provided that the following conditions are met:
8
9   1. Redistributions of source code must retain the above copyright notice,
10      this list of conditions and the following disclaimer.
11
12   2. Redistributions in binary form must reproduce the above copyright
13      notice, this list of conditions and the following disclaimer in the
14      documentation and/or other materials provided with the distribution.
15
16   3. Neither the name of the Intel Corporation nor the names of its
17      contributors may be used to endorse or promote products derived from
18      this software without specific prior written permission.
19
20  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  POSSIBILITY OF SUCH DAMAGE.
31
32******************************************************************************/
33
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "opt_rss.h"
38
39#include "ixgbe.h"
40#include "ifdi_if.h"
41
42#include <net/netmap.h>
43#include <dev/netmap/netmap_kern.h>
44
45/************************************************************************
46 * Driver version
47 ************************************************************************/
48static const char ixv_driver_version[] = "2.0.1-k";
49
50/************************************************************************
51 * PCI Device ID Table
52 *
53 *   Used by probe to select devices to load on
54 *   Last field stores an index into ixv_strings
55 *   Last entry must be all 0s
56 *
57 *   { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
58 ************************************************************************/
59static const pci_vendor_info_t ixv_vendor_info_array[] =
60{
61	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
62	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
63	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
64	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
65	PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
66	/* required last entry */
67PVID_END
68};
69
70/************************************************************************
71 * Function prototypes
72 ************************************************************************/
73static void     *ixv_register(device_t);
74static int      ixv_if_attach_pre(if_ctx_t);
75static int      ixv_if_attach_post(if_ctx_t);
76static int      ixv_if_detach(if_ctx_t);
77
78static int      ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
79static int      ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
80static int      ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
81static void     ixv_if_queues_free(if_ctx_t);
82static void     ixv_identify_hardware(if_ctx_t);
83static void     ixv_init_device_features(struct ixgbe_softc *);
84static int      ixv_allocate_pci_resources(if_ctx_t);
85static void     ixv_free_pci_resources(if_ctx_t);
86static int      ixv_setup_interface(if_ctx_t);
87static void     ixv_if_media_status(if_ctx_t, struct ifmediareq *);
88static int      ixv_if_media_change(if_ctx_t);
89static void     ixv_if_update_admin_status(if_ctx_t);
90static int      ixv_if_msix_intr_assign(if_ctx_t, int);
91
92static int      ixv_if_mtu_set(if_ctx_t, uint32_t);
93static void     ixv_if_init(if_ctx_t);
94static void     ixv_if_local_timer(if_ctx_t, uint16_t);
95static void     ixv_if_stop(if_ctx_t);
96static int      ixv_negotiate_api(struct ixgbe_softc *);
97
98static void     ixv_initialize_transmit_units(if_ctx_t);
99static void     ixv_initialize_receive_units(if_ctx_t);
100static void     ixv_initialize_rss_mapping(struct ixgbe_softc *);
101
102static void     ixv_setup_vlan_support(if_ctx_t);
103static void     ixv_configure_ivars(struct ixgbe_softc *);
104static void     ixv_if_enable_intr(if_ctx_t);
105static void     ixv_if_disable_intr(if_ctx_t);
106static void     ixv_if_multi_set(if_ctx_t);
107
108static void     ixv_if_register_vlan(if_ctx_t, u16);
109static void     ixv_if_unregister_vlan(if_ctx_t, u16);
110
111static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter);
112static bool	ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event);
113
114static void     ixv_save_stats(struct ixgbe_softc *);
115static void     ixv_init_stats(struct ixgbe_softc *);
116static void     ixv_update_stats(struct ixgbe_softc *);
117static void     ixv_add_stats_sysctls(struct ixgbe_softc *);
118
119static int      ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
120static void     ixv_set_ivar(struct ixgbe_softc *, u8, u8, s8);
121
122static u8       *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
123
124/* The MSI-X Interrupt handlers */
125static int      ixv_msix_que(void *);
126static int      ixv_msix_mbx(void *);
127
128/************************************************************************
129 * FreeBSD Device Interface Entry Points
130 ************************************************************************/
131static device_method_t ixv_methods[] = {
132	/* Device interface */
133	DEVMETHOD(device_register, ixv_register),
134	DEVMETHOD(device_probe, iflib_device_probe),
135	DEVMETHOD(device_attach, iflib_device_attach),
136	DEVMETHOD(device_detach, iflib_device_detach),
137	DEVMETHOD(device_shutdown, iflib_device_shutdown),
138	DEVMETHOD_END
139};
140
141static driver_t ixv_driver = {
142	"ixv", ixv_methods, sizeof(struct ixgbe_softc),
143};
144
145DRIVER_MODULE(ixv, pci, ixv_driver, 0, 0);
146IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array);
147MODULE_DEPEND(ixv, iflib, 1, 1, 1);
148MODULE_DEPEND(ixv, pci, 1, 1, 1);
149MODULE_DEPEND(ixv, ether, 1, 1, 1);
150
151static device_method_t ixv_if_methods[] = {
152	DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre),
153	DEVMETHOD(ifdi_attach_post, ixv_if_attach_post),
154	DEVMETHOD(ifdi_detach, ixv_if_detach),
155	DEVMETHOD(ifdi_init, ixv_if_init),
156	DEVMETHOD(ifdi_stop, ixv_if_stop),
157	DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign),
158	DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr),
159	DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr),
160	DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
161	DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable),
162	DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc),
163	DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc),
164	DEVMETHOD(ifdi_queues_free, ixv_if_queues_free),
165	DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status),
166	DEVMETHOD(ifdi_multi_set, ixv_if_multi_set),
167	DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set),
168	DEVMETHOD(ifdi_media_status, ixv_if_media_status),
169	DEVMETHOD(ifdi_media_change, ixv_if_media_change),
170	DEVMETHOD(ifdi_timer, ixv_if_local_timer),
171	DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan),
172	DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan),
173	DEVMETHOD(ifdi_get_counter, ixv_if_get_counter),
174	DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart),
175	DEVMETHOD_END
176};
177
178static driver_t ixv_if_driver = {
179  "ixv_if", ixv_if_methods, sizeof(struct ixgbe_softc)
180};
181
182/*
183 * TUNEABLE PARAMETERS:
184 */
185
186/* Flow control setting, default to full */
187static int ixv_flow_control = ixgbe_fc_full;
188TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
189
190/*
191 * Header split: this causes the hardware to DMA
192 * the header into a separate mbuf from the payload,
193 * it can be a performance win in some workloads, but
194 * in others it actually hurts, its off by default.
195 */
196static int ixv_header_split = false;
197TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
198
199extern struct if_txrx ixgbe_txrx;
200
201static struct if_shared_ctx ixv_sctx_init = {
202	.isc_magic = IFLIB_MAGIC,
203	.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */
204	.isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
205	.isc_tx_maxsegsize = PAGE_SIZE,
206	.isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header),
207	.isc_tso_maxsegsize = PAGE_SIZE,
208	.isc_rx_maxsize = MJUM16BYTES,
209	.isc_rx_nsegments = 1,
210	.isc_rx_maxsegsize = MJUM16BYTES,
211	.isc_nfl = 1,
212	.isc_ntxqs = 1,
213	.isc_nrxqs = 1,
214	.isc_admin_intrcnt = 1,
215	.isc_vendor_info = ixv_vendor_info_array,
216	.isc_driver_version = ixv_driver_version,
217	.isc_driver = &ixv_if_driver,
218	.isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP,
219
220	.isc_nrxd_min = {MIN_RXD},
221	.isc_ntxd_min = {MIN_TXD},
222	.isc_nrxd_max = {MAX_RXD},
223	.isc_ntxd_max = {MAX_TXD},
224	.isc_nrxd_default = {DEFAULT_RXD},
225	.isc_ntxd_default = {DEFAULT_TXD},
226};
227
228static void *
229ixv_register(device_t dev)
230{
231	return (&ixv_sctx_init);
232}
233
234/************************************************************************
235 * ixv_if_tx_queues_alloc
236 ************************************************************************/
237static int
238ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
239    int ntxqs, int ntxqsets)
240{
241	struct ixgbe_softc *sc = iflib_get_softc(ctx);
242	if_softc_ctx_t     scctx = sc->shared;
243	struct ix_tx_queue *que;
244	int                i, j, error;
245
246	MPASS(sc->num_tx_queues == ntxqsets);
247	MPASS(ntxqs == 1);
248
249	/* Allocate queue structure memory */
250	sc->tx_queues =
251	    (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
252	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
253	if (!sc->tx_queues) {
254		device_printf(iflib_get_dev(ctx),
255		    "Unable to allocate TX ring memory\n");
256		return (ENOMEM);
257	}
258
259	for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) {
260		struct tx_ring *txr = &que->txr;
261
262		txr->me = i;
263		txr->sc =  que->sc = sc;
264
265		/* Allocate report status array */
266		if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
267			error = ENOMEM;
268			goto fail;
269		}
270		for (j = 0; j < scctx->isc_ntxd[0]; j++)
271			txr->tx_rsq[j] = QIDX_INVALID;
272		/* get the virtual and physical address of the hardware queues */
273		txr->tail = IXGBE_VFTDT(txr->me);
274		txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
275		txr->tx_paddr = paddrs[i*ntxqs];
276
277		txr->bytes = 0;
278		txr->total_packets = 0;
279
280	}
281
282	device_printf(iflib_get_dev(ctx), "allocated for %d queues\n",
283	    sc->num_tx_queues);
284
285	return (0);
286
287 fail:
288	ixv_if_queues_free(ctx);
289
290	return (error);
291} /* ixv_if_tx_queues_alloc */
292
293/************************************************************************
294 * ixv_if_rx_queues_alloc
295 ************************************************************************/
296static int
297ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
298    int nrxqs, int nrxqsets)
299{
300	struct ixgbe_softc *sc = iflib_get_softc(ctx);
301	struct ix_rx_queue *que;
302	int                i, error;
303
304	MPASS(sc->num_rx_queues == nrxqsets);
305	MPASS(nrxqs == 1);
306
307	/* Allocate queue structure memory */
308	sc->rx_queues =
309	    (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
310	                                 M_DEVBUF, M_NOWAIT | M_ZERO);
311	if (!sc->rx_queues) {
312		device_printf(iflib_get_dev(ctx),
313		    "Unable to allocate TX ring memory\n");
314		error = ENOMEM;
315		goto fail;
316	}
317
318	for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
319		struct rx_ring *rxr = &que->rxr;
320		rxr->me = i;
321		rxr->sc = que->sc = sc;
322
323
324		/* get the virtual and physical address of the hw queues */
325		rxr->tail = IXGBE_VFRDT(rxr->me);
326		rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i];
327		rxr->rx_paddr = paddrs[i*nrxqs];
328		rxr->bytes = 0;
329		rxr->que = que;
330	}
331
332	device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n",
333	    sc->num_rx_queues);
334
335	return (0);
336
337fail:
338	ixv_if_queues_free(ctx);
339
340	return (error);
341} /* ixv_if_rx_queues_alloc */
342
343/************************************************************************
344 * ixv_if_queues_free
345 ************************************************************************/
346static void
347ixv_if_queues_free(if_ctx_t ctx)
348{
349	struct ixgbe_softc *sc = iflib_get_softc(ctx);
350	struct ix_tx_queue *que = sc->tx_queues;
351	int                i;
352
353	if (que == NULL)
354		goto free;
355
356	for (i = 0; i < sc->num_tx_queues; i++, que++) {
357		struct tx_ring *txr = &que->txr;
358		if (txr->tx_rsq == NULL)
359			break;
360
361		free(txr->tx_rsq, M_DEVBUF);
362		txr->tx_rsq = NULL;
363	}
364	if (sc->tx_queues != NULL)
365		free(sc->tx_queues, M_DEVBUF);
366free:
367	if (sc->rx_queues != NULL)
368		free(sc->rx_queues, M_DEVBUF);
369	sc->tx_queues = NULL;
370	sc->rx_queues = NULL;
371} /* ixv_if_queues_free */
372
373/************************************************************************
374 * ixv_if_attach_pre - Device initialization routine
375 *
376 *   Called when the driver is being loaded.
377 *   Identifies the type of hardware, allocates all resources
378 *   and initializes the hardware.
379 *
380 *   return 0 on success, positive on failure
381 ************************************************************************/
382static int
383ixv_if_attach_pre(if_ctx_t ctx)
384{
385	struct ixgbe_softc  *sc;
386	device_t        dev;
387	if_softc_ctx_t  scctx;
388	struct ixgbe_hw *hw;
389	int             error = 0;
390
391	INIT_DEBUGOUT("ixv_attach: begin");
392
393	/* Allocate, clear, and link in our sc structure */
394	dev = iflib_get_dev(ctx);
395	sc = iflib_get_softc(ctx);
396	sc->dev = dev;
397	sc->ctx = ctx;
398	sc->hw.back = sc;
399	scctx = sc->shared = iflib_get_softc_ctx(ctx);
400	sc->media = iflib_get_media(ctx);
401	hw = &sc->hw;
402
403	/* Do base PCI setup - map BAR0 */
404	if (ixv_allocate_pci_resources(ctx)) {
405		device_printf(dev, "ixv_allocate_pci_resources() failed!\n");
406		error = ENXIO;
407		goto err_out;
408	}
409
410	/* SYSCTL APIs */
411	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
412	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
413	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
414	    sc, 0, ixv_sysctl_debug, "I", "Debug Info");
415
416	/* Determine hardware revision */
417	ixv_identify_hardware(ctx);
418	ixv_init_device_features(sc);
419
420	/* Initialize the shared code */
421	error = ixgbe_init_ops_vf(hw);
422	if (error) {
423		device_printf(dev, "ixgbe_init_ops_vf() failed!\n");
424		error = EIO;
425		goto err_out;
426	}
427
428	/* Setup the mailbox */
429	ixgbe_init_mbx_params_vf(hw);
430
431	error = hw->mac.ops.reset_hw(hw);
432	if (error == IXGBE_ERR_RESET_FAILED)
433		device_printf(dev, "...reset_hw() failure: Reset Failed!\n");
434	else if (error)
435		device_printf(dev, "...reset_hw() failed with error %d\n",
436		    error);
437	if (error) {
438		error = EIO;
439		goto err_out;
440	}
441
442	error = hw->mac.ops.init_hw(hw);
443	if (error) {
444		device_printf(dev, "...init_hw() failed with error %d\n",
445		    error);
446		error = EIO;
447		goto err_out;
448	}
449
450	/* Negotiate mailbox API version */
451	error = ixv_negotiate_api(sc);
452	if (error) {
453		device_printf(dev,
454		    "Mailbox API negotiation failed during attach!\n");
455		goto err_out;
456	}
457
458	/* Check if VF was disabled by PF */
459	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
460	if (error) {
461		/* PF is not capable of controlling VF state. Enable the link. */
462		sc->link_enabled = true;
463	}
464
465	/* If no mac address was assigned, make a random one */
466	if (!ixv_check_ether_addr(hw->mac.addr)) {
467		ether_gen_addr(iflib_get_ifp(ctx),
468		    (struct ether_addr *)hw->mac.addr);
469		bcopy(hw->mac.addr, hw->mac.perm_addr,
470		    sizeof(hw->mac.perm_addr));
471	}
472
473	/* Most of the iflib initialization... */
474
475	iflib_set_mac(ctx, hw->mac.addr);
476	switch (sc->hw.mac.type) {
477	case ixgbe_mac_X550_vf:
478	case ixgbe_mac_X550EM_x_vf:
479	case ixgbe_mac_X550EM_a_vf:
480		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2;
481		break;
482	default:
483		scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
484	}
485	scctx->isc_txqsizes[0] =
486	    roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) +
487	    sizeof(u32), DBA_ALIGN);
488	scctx->isc_rxqsizes[0] =
489	    roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc),
490	    DBA_ALIGN);
491	/* XXX */
492	scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO |
493	    CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO;
494	scctx->isc_tx_nsegments = IXGBE_82599_SCATTER;
495	scctx->isc_msix_bar = pci_msix_table_bar(dev);
496	scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments;
497	scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE;
498	scctx->isc_tx_tso_segsize_max = PAGE_SIZE;
499
500	scctx->isc_txrx = &ixgbe_txrx;
501
502	/*
503	 * Tell the upper layer(s) we support everything the PF
504	 * driver does except...
505	 *   Wake-on-LAN
506	 */
507	scctx->isc_capabilities = IXGBE_CAPS;
508	scctx->isc_capabilities ^= IFCAP_WOL;
509	scctx->isc_capenable = scctx->isc_capabilities;
510
511	INIT_DEBUGOUT("ixv_if_attach_pre: end");
512
513	return (0);
514
515err_out:
516	ixv_free_pci_resources(ctx);
517
518	return (error);
519} /* ixv_if_attach_pre */
520
521static int
522ixv_if_attach_post(if_ctx_t ctx)
523{
524	struct ixgbe_softc *sc = iflib_get_softc(ctx);
525	device_t       dev = iflib_get_dev(ctx);
526	int            error = 0;
527
528	/* Setup OS specific network interface */
529	error = ixv_setup_interface(ctx);
530	if (error) {
531		device_printf(dev, "Interface setup failed: %d\n", error);
532		goto end;
533	}
534
535	/* Do the stats setup */
536	ixv_save_stats(sc);
537	ixv_init_stats(sc);
538	ixv_add_stats_sysctls(sc);
539
540end:
541	return error;
542} /* ixv_if_attach_post */
543
544/************************************************************************
545 * ixv_detach - Device removal routine
546 *
547 *   Called when the driver is being removed.
548 *   Stops the adapter and deallocates all the resources
549 *   that were allocated for driver operation.
550 *
551 *   return 0 on success, positive on failure
552 ************************************************************************/
553static int
554ixv_if_detach(if_ctx_t ctx)
555{
556	INIT_DEBUGOUT("ixv_detach: begin");
557
558	ixv_free_pci_resources(ctx);
559
560	return (0);
561} /* ixv_if_detach */
562
563/************************************************************************
564 * ixv_if_mtu_set
565 ************************************************************************/
566static int
567ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
568{
569	struct ixgbe_softc *sc = iflib_get_softc(ctx);
570	if_t ifp = iflib_get_ifp(ctx);
571	int            error = 0;
572
573	IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
574	if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
575		error = EINVAL;
576	} else {
577		if_setmtu(ifp, mtu);
578		sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
579	}
580
581	return error;
582} /* ixv_if_mtu_set */
583
584/************************************************************************
585 * ixv_if_init - Init entry point
586 *
587 *   Used in two ways: It is used by the stack as an init entry
588 *   point in network interface structure. It is also used
589 *   by the driver as a hw/sw initialization routine to get
590 *   to a consistent state.
591 *
592 *   return 0 on success, positive on failure
593 ************************************************************************/
594static void
595ixv_if_init(if_ctx_t ctx)
596{
597	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
598	if_t ifp = iflib_get_ifp(ctx);
599	device_t        dev = iflib_get_dev(ctx);
600	struct ixgbe_hw *hw = &sc->hw;
601	int             error = 0;
602
603	INIT_DEBUGOUT("ixv_if_init: begin");
604	hw->adapter_stopped = false;
605	hw->mac.ops.stop_adapter(hw);
606
607	/* reprogram the RAR[0] in case user changed it. */
608	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
609
610	/* Get the latest mac address, User can use a LAA */
611	bcopy(if_getlladdr(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
612	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
613
614	/* Reset VF and renegotiate mailbox API version */
615	hw->mac.ops.reset_hw(hw);
616	hw->mac.ops.start_hw(hw);
617	error = ixv_negotiate_api(sc);
618	if (error) {
619		device_printf(dev,
620		    "Mailbox API negotiation failed in if_init!\n");
621		return;
622	}
623
624	ixv_initialize_transmit_units(ctx);
625
626	/* Setup Multicast table */
627	ixv_if_multi_set(ctx);
628
629	sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
630
631	/* Configure RX settings */
632	ixv_initialize_receive_units(ctx);
633
634	/* Set up VLAN offload and filter */
635	ixv_setup_vlan_support(ctx);
636
637	/* Set up MSI-X routing */
638	ixv_configure_ivars(sc);
639
640	/* Set up auto-mask */
641	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
642
643	/* Set moderation on the Link interrupt */
644	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(sc->vector), IXGBE_LINK_ITR);
645
646	/* Stats init */
647	ixv_init_stats(sc);
648
649	/* Config/Enable Link */
650	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
651	if (error) {
652		/* PF is not capable of controlling VF state. Enable the link. */
653		sc->link_enabled = true;
654	} else if (sc->link_enabled == false)
655		device_printf(dev, "VF is disabled by PF\n");
656
657	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
658	    false);
659
660	/* And now turn on interrupts */
661	ixv_if_enable_intr(ctx);
662
663	return;
664} /* ixv_if_init */
665
666/************************************************************************
667 * ixv_enable_queue
668 ************************************************************************/
669static inline void
670ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
671{
672	struct ixgbe_hw *hw = &sc->hw;
673	u32             queue = 1 << vector;
674	u32             mask;
675
676	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
677	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
678} /* ixv_enable_queue */
679
680/************************************************************************
681 * ixv_disable_queue
682 ************************************************************************/
683static inline void
684ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
685{
686	struct ixgbe_hw *hw = &sc->hw;
687	u64             queue = (u64)(1 << vector);
688	u32             mask;
689
690	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
691	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
692} /* ixv_disable_queue */
693
694
695/************************************************************************
696 * ixv_msix_que - MSI-X Queue Interrupt Service routine
697 ************************************************************************/
698static int
699ixv_msix_que(void *arg)
700{
701	struct ix_rx_queue *que = arg;
702	struct ixgbe_softc     *sc = que->sc;
703
704	ixv_disable_queue(sc, que->msix);
705	++que->irqs;
706
707	return (FILTER_SCHEDULE_THREAD);
708} /* ixv_msix_que */
709
710/************************************************************************
711 * ixv_msix_mbx
712 ************************************************************************/
713static int
714ixv_msix_mbx(void *arg)
715{
716	struct ixgbe_softc  *sc = arg;
717	struct ixgbe_hw *hw = &sc->hw;
718	u32             reg;
719
720	++sc->link_irq;
721
722	/* First get the cause */
723	reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
724	/* Clear interrupt with write */
725	IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
726
727	/* Link status change */
728	if (reg & IXGBE_EICR_LSC)
729		iflib_admin_intr_deferred(sc->ctx);
730
731	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
732
733	return (FILTER_HANDLED);
734} /* ixv_msix_mbx */
735
736/************************************************************************
737 * ixv_media_status - Media Ioctl callback
738 *
739 *   Called whenever the user queries the status of
740 *   the interface using ifconfig.
741 ************************************************************************/
742static void
743ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
744{
745	struct ixgbe_softc *sc = iflib_get_softc(ctx);
746
747	INIT_DEBUGOUT("ixv_media_status: begin");
748
749	iflib_admin_intr_deferred(ctx);
750
751	ifmr->ifm_status = IFM_AVALID;
752	ifmr->ifm_active = IFM_ETHER;
753
754	if (!sc->link_active)
755		return;
756
757	ifmr->ifm_status |= IFM_ACTIVE;
758
759	switch (sc->link_speed) {
760		case IXGBE_LINK_SPEED_1GB_FULL:
761			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
762			break;
763		case IXGBE_LINK_SPEED_10GB_FULL:
764			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
765			break;
766		case IXGBE_LINK_SPEED_100_FULL:
767			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
768			break;
769		case IXGBE_LINK_SPEED_10_FULL:
770			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
771			break;
772	}
773} /* ixv_if_media_status */
774
775/************************************************************************
776 * ixv_if_media_change - Media Ioctl callback
777 *
778 *   Called when the user changes speed/duplex using
779 *   media/mediopt option with ifconfig.
780 ************************************************************************/
781static int
782ixv_if_media_change(if_ctx_t ctx)
783{
784	struct ixgbe_softc *sc = iflib_get_softc(ctx);
785	struct ifmedia *ifm = iflib_get_media(ctx);
786
787	INIT_DEBUGOUT("ixv_media_change: begin");
788
789	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
790		return (EINVAL);
791
792	switch (IFM_SUBTYPE(ifm->ifm_media)) {
793	case IFM_AUTO:
794		break;
795	default:
796		device_printf(sc->dev, "Only auto media type\n");
797		return (EINVAL);
798	}
799
800	return (0);
801} /* ixv_if_media_change */
802
803
804/************************************************************************
805 * ixv_negotiate_api
806 *
807 *   Negotiate the Mailbox API with the PF;
808 *   start with the most featured API first.
809 ************************************************************************/
810static int
811ixv_negotiate_api(struct ixgbe_softc *sc)
812{
813	struct ixgbe_hw *hw = &sc->hw;
814	int             mbx_api[] = { ixgbe_mbox_api_12,
815	                              ixgbe_mbox_api_11,
816	                              ixgbe_mbox_api_10,
817	                              ixgbe_mbox_api_unknown };
818	int             i = 0;
819
820	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
821		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
822			return (0);
823		i++;
824	}
825
826	return (EINVAL);
827} /* ixv_negotiate_api */
828
829
830static u_int
831ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
832{
833	bcopy(LLADDR(addr), &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
834	    IXGBE_ETH_LENGTH_OF_ADDRESS);
835
836	return (++cnt);
837}
838
839/************************************************************************
840 * ixv_if_multi_set - Multicast Update
841 *
842 *   Called whenever multicast address list is updated.
843 ************************************************************************/
844static void
845ixv_if_multi_set(if_ctx_t ctx)
846{
847	u8       mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
848	struct ixgbe_softc     *sc = iflib_get_softc(ctx);
849	u8                 *update_ptr;
850	if_t               ifp = iflib_get_ifp(ctx);
851	int                mcnt = 0;
852
853	IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
854
855	mcnt = if_foreach_llmaddr(ifp, ixv_if_multi_set_cb, mta);
856
857	update_ptr = mta;
858
859	sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
860	    ixv_mc_array_itr, true);
861} /* ixv_if_multi_set */
862
863/************************************************************************
864 * ixv_mc_array_itr
865 *
866 *   An iterator function needed by the multicast shared code.
867 *   It feeds the shared code routine the addresses in the
868 *   array of ixv_set_multi() one by one.
869 ************************************************************************/
870static u8 *
871ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
872{
873	u8 *addr = *update_ptr;
874	u8 *newptr;
875
876	*vmdq = 0;
877
878	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
879	*update_ptr = newptr;
880
881	return addr;
882} /* ixv_mc_array_itr */
883
884/************************************************************************
885 * ixv_if_local_timer - Timer routine
886 *
887 *   Checks for link status, updates statistics,
888 *   and runs the watchdog check.
889 ************************************************************************/
890static void
891ixv_if_local_timer(if_ctx_t ctx, uint16_t qid)
892{
893	if (qid != 0)
894		return;
895
896	/* Fire off the adminq task */
897	iflib_admin_intr_deferred(ctx);
898} /* ixv_if_local_timer */
899
900/************************************************************************
901 * ixv_if_update_admin_status - Update OS on link state
902 *
903 * Note: Only updates the OS on the cached link state.
904 *       The real check of the hardware only happens with
905 *       a link interrupt.
906 ************************************************************************/
907static void
908ixv_if_update_admin_status(if_ctx_t ctx)
909{
910	struct ixgbe_softc *sc = iflib_get_softc(ctx);
911	device_t       dev = iflib_get_dev(ctx);
912	s32            status;
913
914	sc->hw.mac.get_link_status = true;
915
916	status = ixgbe_check_link(&sc->hw, &sc->link_speed,
917	    &sc->link_up, false);
918
919	if (status != IXGBE_SUCCESS && sc->hw.adapter_stopped == false) {
920		/* Mailbox's Clear To Send status is lost or timeout occurred.
921		 * We need reinitialization. */
922		if_init(iflib_get_ifp(ctx), ctx);
923	}
924
925	if (sc->link_up && sc->link_enabled) {
926		if (sc->link_active == false) {
927			if (bootverbose)
928				device_printf(dev, "Link is up %d Gbps %s \n",
929				    ((sc->link_speed == 128) ? 10 : 1),
930				    "Full Duplex");
931			sc->link_active = true;
932			iflib_link_state_change(ctx, LINK_STATE_UP,
933			    ixgbe_link_speed_to_baudrate(sc->link_speed));
934		}
935	} else { /* Link down */
936		if (sc->link_active == true) {
937			if (bootverbose)
938				device_printf(dev, "Link is Down\n");
939			iflib_link_state_change(ctx, LINK_STATE_DOWN,  0);
940			sc->link_active = false;
941		}
942	}
943
944	/* Stats Update */
945	ixv_update_stats(sc);
946} /* ixv_if_update_admin_status */
947
948
949/************************************************************************
950 * ixv_if_stop - Stop the hardware
951 *
952 *   Disables all traffic on the adapter by issuing a
953 *   global reset on the MAC and deallocates TX/RX buffers.
954 ************************************************************************/
955static void
956ixv_if_stop(if_ctx_t ctx)
957{
958	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
959	struct ixgbe_hw *hw = &sc->hw;
960
961	INIT_DEBUGOUT("ixv_stop: begin\n");
962
963	ixv_if_disable_intr(ctx);
964
965	hw->mac.ops.reset_hw(hw);
966	sc->hw.adapter_stopped = false;
967	hw->mac.ops.stop_adapter(hw);
968
969	/* Update the stack */
970	sc->link_up = false;
971	ixv_if_update_admin_status(ctx);
972
973	/* reprogram the RAR[0] in case user changed it. */
974	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
975} /* ixv_if_stop */
976
977
978/************************************************************************
979 * ixv_identify_hardware - Determine hardware revision.
980 ************************************************************************/
981static void
982ixv_identify_hardware(if_ctx_t ctx)
983{
984	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
985	device_t        dev = iflib_get_dev(ctx);
986	struct ixgbe_hw *hw = &sc->hw;
987
988	/* Save off the information about this board */
989	hw->vendor_id = pci_get_vendor(dev);
990	hw->device_id = pci_get_device(dev);
991	hw->revision_id = pci_get_revid(dev);
992	hw->subsystem_vendor_id = pci_get_subvendor(dev);
993	hw->subsystem_device_id = pci_get_subdevice(dev);
994
995	/* A subset of set_mac_type */
996	switch (hw->device_id) {
997	case IXGBE_DEV_ID_82599_VF:
998		hw->mac.type = ixgbe_mac_82599_vf;
999		break;
1000	case IXGBE_DEV_ID_X540_VF:
1001		hw->mac.type = ixgbe_mac_X540_vf;
1002		break;
1003	case IXGBE_DEV_ID_X550_VF:
1004		hw->mac.type = ixgbe_mac_X550_vf;
1005		break;
1006	case IXGBE_DEV_ID_X550EM_X_VF:
1007		hw->mac.type = ixgbe_mac_X550EM_x_vf;
1008		break;
1009	case IXGBE_DEV_ID_X550EM_A_VF:
1010		hw->mac.type = ixgbe_mac_X550EM_a_vf;
1011		break;
1012	default:
1013		device_printf(dev, "unknown mac type\n");
1014		hw->mac.type = ixgbe_mac_unknown;
1015		break;
1016	}
1017} /* ixv_identify_hardware */
1018
1019/************************************************************************
1020 * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers
1021 ************************************************************************/
1022static int
1023ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
1024{
1025	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1026	device_t           dev = iflib_get_dev(ctx);
1027	struct ix_rx_queue *rx_que = sc->rx_queues;
1028	struct ix_tx_queue *tx_que;
1029	int                error, rid, vector = 0;
1030	char               buf[16];
1031
1032	for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
1033		rid = vector + 1;
1034
1035		snprintf(buf, sizeof(buf), "rxq%d", i);
1036		error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
1037		    IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
1038
1039		if (error) {
1040			device_printf(iflib_get_dev(ctx),
1041			    "Failed to allocate que int %d err: %d", i, error);
1042			sc->num_rx_queues = i + 1;
1043			goto fail;
1044		}
1045
1046		rx_que->msix = vector;
1047	}
1048
1049	for (int i = 0; i < sc->num_tx_queues; i++) {
1050		snprintf(buf, sizeof(buf), "txq%d", i);
1051		tx_que = &sc->tx_queues[i];
1052		tx_que->msix = i % sc->num_rx_queues;
1053		iflib_softirq_alloc_generic(ctx,
1054		    &sc->rx_queues[tx_que->msix].que_irq,
1055		    IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf);
1056	}
1057	rid = vector + 1;
1058	error = iflib_irq_alloc_generic(ctx, &sc->irq, rid,
1059	    IFLIB_INTR_ADMIN, ixv_msix_mbx, sc, 0, "aq");
1060	if (error) {
1061		device_printf(iflib_get_dev(ctx),
1062		    "Failed to register admin handler");
1063		return (error);
1064	}
1065
1066	sc->vector = vector;
1067	/*
1068	 * Due to a broken design QEMU will fail to properly
1069	 * enable the guest for MSIX unless the vectors in
1070	 * the table are all set up, so we must rewrite the
1071	 * ENABLE in the MSIX control register again at this
1072	 * point to cause it to successfully initialize us.
1073	 */
1074	if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1075		int msix_ctrl;
1076		pci_find_cap(dev, PCIY_MSIX, &rid);
1077		rid += PCIR_MSIX_CTRL;
1078		msix_ctrl = pci_read_config(dev, rid, 2);
1079		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
1080		pci_write_config(dev, rid, msix_ctrl, 2);
1081	}
1082
1083	return (0);
1084
1085fail:
1086	iflib_irq_free(ctx, &sc->irq);
1087	rx_que = sc->rx_queues;
1088	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++)
1089		iflib_irq_free(ctx, &rx_que->que_irq);
1090
1091	return (error);
1092} /* ixv_if_msix_intr_assign */
1093
1094/************************************************************************
1095 * ixv_allocate_pci_resources
1096 ************************************************************************/
1097static int
1098ixv_allocate_pci_resources(if_ctx_t ctx)
1099{
1100	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1101	device_t       dev = iflib_get_dev(ctx);
1102	int            rid;
1103
1104	rid = PCIR_BAR(0);
1105	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1106	    RF_ACTIVE);
1107
1108	if (!(sc->pci_mem)) {
1109		device_printf(dev, "Unable to allocate bus resource: memory\n");
1110		return (ENXIO);
1111	}
1112
1113	sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
1114	sc->osdep.mem_bus_space_handle =
1115	    rman_get_bushandle(sc->pci_mem);
1116	sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
1117
1118	return (0);
1119} /* ixv_allocate_pci_resources */
1120
1121/************************************************************************
1122 * ixv_free_pci_resources
1123 ************************************************************************/
1124static void
1125ixv_free_pci_resources(if_ctx_t ctx)
1126{
1127	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1128	struct ix_rx_queue *que = sc->rx_queues;
1129	device_t           dev = iflib_get_dev(ctx);
1130
1131	/* Release all MSI-X queue resources */
1132	if (sc->intr_type == IFLIB_INTR_MSIX)
1133		iflib_irq_free(ctx, &sc->irq);
1134
1135	if (que != NULL) {
1136		for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1137			iflib_irq_free(ctx, &que->que_irq);
1138		}
1139	}
1140
1141	if (sc->pci_mem != NULL)
1142		bus_release_resource(dev, SYS_RES_MEMORY,
1143		    rman_get_rid(sc->pci_mem), sc->pci_mem);
1144} /* ixv_free_pci_resources */
1145
1146/************************************************************************
1147 * ixv_setup_interface
1148 *
1149 *   Setup networking device structure and register an interface.
1150 ************************************************************************/
1151static int
1152ixv_setup_interface(if_ctx_t ctx)
1153{
1154	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1155	if_softc_ctx_t scctx = sc->shared;
1156	if_t           ifp = iflib_get_ifp(ctx);
1157
1158	INIT_DEBUGOUT("ixv_setup_interface: begin");
1159
1160	if_setbaudrate(ifp, IF_Gbps(10));
1161	if_setsendqlen(ifp, scctx->isc_ntxd[0] - 2);
1162
1163
1164	sc->max_frame_size = if_getmtu(ifp) + IXGBE_MTU_HDR;
1165	ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1166	ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
1167
1168	return 0;
1169} /* ixv_setup_interface */
1170
1171/************************************************************************
1172 * ixv_if_get_counter
1173 ************************************************************************/
1174static uint64_t
1175ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
1176{
1177	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1178	if_t           ifp = iflib_get_ifp(ctx);
1179
1180	switch (cnt) {
1181	case IFCOUNTER_IPACKETS:
1182		return (sc->ipackets);
1183	case IFCOUNTER_OPACKETS:
1184		return (sc->opackets);
1185	case IFCOUNTER_IBYTES:
1186		return (sc->ibytes);
1187	case IFCOUNTER_OBYTES:
1188		return (sc->obytes);
1189	case IFCOUNTER_IMCASTS:
1190		return (sc->imcasts);
1191	default:
1192		return (if_get_counter_default(ifp, cnt));
1193	}
1194} /* ixv_if_get_counter */
1195
1196/* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized
1197 * @ctx: iflib context
1198 * @event: event code to check
1199 *
1200 * Defaults to returning true for every event.
1201 *
1202 * @returns true if iflib needs to reinit the interface
1203 */
1204static bool
1205ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1206{
1207	switch (event) {
1208	case IFLIB_RESTART_VLAN_CONFIG:
1209		/* XXX: This may not need to return true */
1210	default:
1211		return (true);
1212	}
1213}
1214
1215/************************************************************************
1216 * ixv_initialize_transmit_units - Enable transmit unit.
1217 ************************************************************************/
1218static void
1219ixv_initialize_transmit_units(if_ctx_t ctx)
1220{
1221	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1222	struct ixgbe_hw    *hw = &sc->hw;
1223	if_softc_ctx_t     scctx = sc->shared;
1224	struct ix_tx_queue *que = sc->tx_queues;
1225	int                i;
1226
1227	for (i = 0; i < sc->num_tx_queues; i++, que++) {
1228		struct tx_ring *txr = &que->txr;
1229		u64            tdba = txr->tx_paddr;
1230		u32            txctrl, txdctl;
1231		int            j = txr->me;
1232
1233		/* Set WTHRESH to 8, burst writeback */
1234		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1235		txdctl |= (8 << 16);
1236		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1237
1238		/* Set the HW Tx Head and Tail indices */
1239		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(j), 0);
1240		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDT(j), 0);
1241
1242		/* Set Tx Tail register */
1243		txr->tail = IXGBE_VFTDT(j);
1244
1245		txr->tx_rs_cidx = txr->tx_rs_pidx;
1246		/* Initialize the last processed descriptor to be the end of
1247		 * the ring, rather than the start, so that we avoid an
1248		 * off-by-one error when calculating how many descriptors are
1249		 * done in the credits_update function.
1250		 */
1251		txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1;
1252		for (int k = 0; k < scctx->isc_ntxd[0]; k++)
1253			txr->tx_rsq[k] = QIDX_INVALID;
1254
1255		/* Set Ring parameters */
1256		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1257		    (tdba & 0x00000000ffffffffULL));
1258		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1259		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j),
1260		    scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc));
1261		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1262		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1263		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1264
1265		/* Now enable */
1266		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1267		txdctl |= IXGBE_TXDCTL_ENABLE;
1268		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1269	}
1270
1271	return;
1272} /* ixv_initialize_transmit_units */
1273
1274/************************************************************************
1275 * ixv_initialize_rss_mapping
1276 ************************************************************************/
1277static void
1278ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
1279{
1280	struct ixgbe_hw *hw = &sc->hw;
1281	u32             reta = 0, mrqc, rss_key[10];
1282	int             queue_id;
1283	int             i, j;
1284	u32             rss_hash_config;
1285
1286	if (sc->feat_en & IXGBE_FEATURE_RSS) {
1287		/* Fetch the configured RSS key */
1288		rss_getkey((uint8_t *)&rss_key);
1289	} else {
1290		/* set up random bits */
1291		arc4rand(&rss_key, sizeof(rss_key), 0);
1292	}
1293
1294	/* Now fill out hash function seeds */
1295	for (i = 0; i < 10; i++)
1296		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
1297
1298	/* Set up the redirection table */
1299	for (i = 0, j = 0; i < 64; i++, j++) {
1300		if (j == sc->num_rx_queues)
1301			j = 0;
1302
1303		if (sc->feat_en & IXGBE_FEATURE_RSS) {
1304			/*
1305			 * Fetch the RSS bucket id for the given indirection
1306			 * entry. Cap it at the number of configured buckets
1307			 * (which is num_rx_queues.)
1308			 */
1309			queue_id = rss_get_indirection_to_bucket(i);
1310			queue_id = queue_id % sc->num_rx_queues;
1311		} else
1312			queue_id = j;
1313
1314		/*
1315		 * The low 8 bits are for hash value (n+0);
1316		 * The next 8 bits are for hash value (n+1), etc.
1317		 */
1318		reta >>= 8;
1319		reta |= ((uint32_t)queue_id) << 24;
1320		if ((i & 3) == 3) {
1321			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
1322			reta = 0;
1323		}
1324	}
1325
1326	/* Perform hash on these packet types */
1327	if (sc->feat_en & IXGBE_FEATURE_RSS)
1328		rss_hash_config = rss_gethashconfig();
1329	else {
1330		/*
1331		 * Disable UDP - IP fragments aren't currently being handled
1332		 * and so we end up with a mix of 2-tuple and 4-tuple
1333		 * traffic.
1334		 */
1335		rss_hash_config = RSS_HASHTYPE_RSS_IPV4
1336		                | RSS_HASHTYPE_RSS_TCP_IPV4
1337		                | RSS_HASHTYPE_RSS_IPV6
1338		                | RSS_HASHTYPE_RSS_TCP_IPV6;
1339	}
1340
1341	mrqc = IXGBE_MRQC_RSSEN;
1342	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
1343		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
1344	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
1345		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
1346	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1347		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
1348	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1349		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
1350	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1351		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
1352		    __func__);
1353	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
1354		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
1355		    __func__);
1356	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1357		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
1358	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1359		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
1360	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
1361		device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
1362		    __func__);
1363	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
1364} /* ixv_initialize_rss_mapping */
1365
1366
1367/************************************************************************
1368 * ixv_initialize_receive_units - Setup receive registers and features.
1369 ************************************************************************/
1370static void
1371ixv_initialize_receive_units(if_ctx_t ctx)
1372{
1373	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1374	if_softc_ctx_t     scctx;
1375	struct ixgbe_hw    *hw = &sc->hw;
1376	if_t               ifp = iflib_get_ifp(ctx);
1377	struct ix_rx_queue *que = sc->rx_queues;
1378	u32                bufsz, psrtype;
1379
1380	if (if_getmtu(ifp) > ETHERMTU)
1381		bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1382	else
1383		bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1384
1385	psrtype = IXGBE_PSRTYPE_TCPHDR
1386	        | IXGBE_PSRTYPE_UDPHDR
1387	        | IXGBE_PSRTYPE_IPV4HDR
1388	        | IXGBE_PSRTYPE_IPV6HDR
1389	        | IXGBE_PSRTYPE_L2HDR;
1390
1391	if (sc->num_rx_queues > 1)
1392		psrtype |= 1 << 29;
1393
1394	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1395
1396	/* Tell PF our max_frame size */
1397	if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
1398		device_printf(sc->dev, "There is a problem with the PF setup.  It is likely the receive unit for this VF will not function correctly.\n");
1399	}
1400	scctx = sc->shared;
1401
1402	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1403		struct rx_ring *rxr = &que->rxr;
1404		u64            rdba = rxr->rx_paddr;
1405		u32            reg, rxdctl;
1406		int            j = rxr->me;
1407
1408		/* Disable the queue */
1409		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1410		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1411		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1412		for (int k = 0; k < 10; k++) {
1413			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1414			    IXGBE_RXDCTL_ENABLE)
1415				msec_delay(1);
1416			else
1417				break;
1418		}
1419		wmb();
1420		/* Setup the Base and Length of the Rx Descriptor Ring */
1421		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1422		    (rdba & 0x00000000ffffffffULL));
1423		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1424		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j),
1425		    scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc));
1426
1427		/* Reset the ring indices */
1428		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
1429		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
1430
1431		/* Set up the SRRCTL register */
1432		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j));
1433		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1434		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1435		reg |= bufsz;
1436		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1437		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg);
1438
1439		/* Capture Rx Tail index */
1440		rxr->tail = IXGBE_VFRDT(rxr->me);
1441
1442		/* Do the queue enabling last */
1443		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1444		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1445		for (int l = 0; l < 10; l++) {
1446			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) &
1447			    IXGBE_RXDCTL_ENABLE)
1448				break;
1449			msec_delay(1);
1450		}
1451		wmb();
1452
1453		/* Set the Tail Pointer */
1454#ifdef DEV_NETMAP
1455		/*
1456		 * In netmap mode, we must preserve the buffers made
1457		 * available to userspace before the if_init()
1458		 * (this is true by default on the TX side, because
1459		 * init makes all buffers available to userspace).
1460		 *
1461		 * netmap_reset() and the device specific routines
1462		 * (e.g. ixgbe_setup_receive_rings()) map these
1463		 * buffers at the end of the NIC ring, so here we
1464		 * must set the RDT (tail) register to make sure
1465		 * they are not overwritten.
1466		 *
1467		 * In this driver the NIC ring starts at RDH = 0,
1468		 * RDT points to the last slot available for reception (?),
1469		 * so RDT = num_rx_desc - 1 means the whole ring is available.
1470		 */
1471		if (if_getcapenable(ifp) & IFCAP_NETMAP) {
1472			struct netmap_adapter *na = NA(ifp);
1473			struct netmap_kring *kring = na->rx_rings[j];
1474			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
1475
1476			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t);
1477		} else
1478#endif /* DEV_NETMAP */
1479			IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1480			    scctx->isc_nrxd[0] - 1);
1481	}
1482
1483	/*
1484	 * Do not touch RSS and RETA settings for older hardware
1485	 * as those are shared among PF and all VF.
1486	 */
1487	if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1488		ixv_initialize_rss_mapping(sc);
1489} /* ixv_initialize_receive_units */
1490
1491/************************************************************************
1492 * ixv_setup_vlan_support
1493 ************************************************************************/
1494static void
1495ixv_setup_vlan_support(if_ctx_t ctx)
1496{
1497	if_t            ifp = iflib_get_ifp(ctx);
1498	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1499	struct ixgbe_hw *hw = &sc->hw;
1500	u32             ctrl, vid, vfta, retry;
1501
1502	/*
1503	 * We get here thru if_init, meaning
1504	 * a soft reset, this has already cleared
1505	 * the VFTA and other state, so if there
1506	 * have been no vlan's registered do nothing.
1507	 */
1508	if (sc->num_vlans == 0)
1509		return;
1510
1511	if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1512		/* Enable the queues */
1513		for (int i = 0; i < sc->num_rx_queues; i++) {
1514			ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1515			ctrl |= IXGBE_RXDCTL_VME;
1516			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1517			/*
1518			 * Let Rx path know that it needs to store VLAN tag
1519			 * as part of extra mbuf info.
1520			 */
1521			sc->rx_queues[i].rxr.vtag_strip = true;
1522		}
1523	}
1524
1525	/*
1526	 * If filtering VLAN tags is disabled,
1527	 * there is no need to fill VLAN Filter Table Array (VFTA).
1528	 */
1529	if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0)
1530		return;
1531
1532	/*
1533	 * A soft reset zero's out the VFTA, so
1534	 * we need to repopulate it now.
1535	 */
1536	for (int i = 0; i < IXGBE_VFTA_SIZE; i++) {
1537		if (sc->shadow_vfta[i] == 0)
1538			continue;
1539		vfta = sc->shadow_vfta[i];
1540		/*
1541		 * Reconstruct the vlan id's
1542		 * based on the bits set in each
1543		 * of the array ints.
1544		 */
1545		for (int j = 0; j < 32; j++) {
1546			retry = 0;
1547			if ((vfta & (1 << j)) == 0)
1548				continue;
1549			vid = (i * 32) + j;
1550			/* Call the shared code mailbox routine */
1551			while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) {
1552				if (++retry > 5)
1553					break;
1554			}
1555		}
1556	}
1557} /* ixv_setup_vlan_support */
1558
1559/************************************************************************
1560 * ixv_if_register_vlan
1561 *
1562 *   Run via a vlan config EVENT, it enables us to use the
1563 *   HW Filter table since we can get the vlan id. This just
1564 *   creates the entry in the soft version of the VFTA, init
1565 *   will repopulate the real table.
1566 ************************************************************************/
1567static void
1568ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
1569{
1570	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1571	u16            index, bit;
1572
1573	index = (vtag >> 5) & 0x7F;
1574	bit = vtag & 0x1F;
1575	sc->shadow_vfta[index] |= (1 << bit);
1576	++sc->num_vlans;
1577} /* ixv_if_register_vlan */
1578
1579/************************************************************************
1580 * ixv_if_unregister_vlan
1581 *
1582 *   Run via a vlan unconfig EVENT, remove our entry
1583 *   in the soft vfta.
1584 ************************************************************************/
1585static void
1586ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
1587{
1588	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1589	u16            index, bit;
1590
1591	index = (vtag >> 5) & 0x7F;
1592	bit = vtag & 0x1F;
1593	sc->shadow_vfta[index] &= ~(1 << bit);
1594	--sc->num_vlans;
1595} /* ixv_if_unregister_vlan */
1596
1597/************************************************************************
1598 * ixv_if_enable_intr
1599 ************************************************************************/
1600static void
1601ixv_if_enable_intr(if_ctx_t ctx)
1602{
1603	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
1604	struct ixgbe_hw *hw = &sc->hw;
1605	struct ix_rx_queue *que = sc->rx_queues;
1606	u32             mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1607
1608	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1609
1610	mask = IXGBE_EIMS_ENABLE_MASK;
1611	mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1612	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1613
1614	for (int i = 0; i < sc->num_rx_queues; i++, que++)
1615		ixv_enable_queue(sc, que->msix);
1616
1617	IXGBE_WRITE_FLUSH(hw);
1618} /* ixv_if_enable_intr */
1619
1620/************************************************************************
1621 * ixv_if_disable_intr
1622 ************************************************************************/
1623static void
1624ixv_if_disable_intr(if_ctx_t ctx)
1625{
1626	struct ixgbe_softc *sc = iflib_get_softc(ctx);
1627	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1628	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1629	IXGBE_WRITE_FLUSH(&sc->hw);
1630} /* ixv_if_disable_intr */
1631
1632/************************************************************************
1633 * ixv_if_rx_queue_intr_enable
1634 ************************************************************************/
1635static int
1636ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
1637{
1638	struct ixgbe_softc	*sc = iflib_get_softc(ctx);
1639	struct ix_rx_queue *que = &sc->rx_queues[rxqid];
1640
1641	ixv_enable_queue(sc, que->rxr.me);
1642
1643	return (0);
1644} /* ixv_if_rx_queue_intr_enable */
1645
1646/************************************************************************
1647 * ixv_set_ivar
1648 *
1649 *   Setup the correct IVAR register for a particular MSI-X interrupt
1650 *    - entry is the register array entry
1651 *    - vector is the MSI-X vector for this queue
1652 *    - type is RX/TX/MISC
1653 ************************************************************************/
1654static void
1655ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
1656{
1657	struct ixgbe_hw *hw = &sc->hw;
1658	u32             ivar, index;
1659
1660	vector |= IXGBE_IVAR_ALLOC_VAL;
1661
1662	if (type == -1) { /* MISC IVAR */
1663		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1664		ivar &= ~0xFF;
1665		ivar |= vector;
1666		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1667	} else {          /* RX/TX IVARS */
1668		index = (16 * (entry & 1)) + (8 * type);
1669		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1670		ivar &= ~(0xFF << index);
1671		ivar |= (vector << index);
1672		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1673	}
1674} /* ixv_set_ivar */
1675
1676/************************************************************************
1677 * ixv_configure_ivars
1678 ************************************************************************/
1679static void
1680ixv_configure_ivars(struct ixgbe_softc *sc)
1681{
1682	struct ix_rx_queue *que = sc->rx_queues;
1683
1684	MPASS(sc->num_rx_queues == sc->num_tx_queues);
1685
1686	for (int i = 0; i < sc->num_rx_queues; i++, que++) {
1687		/* First the RX queue entry */
1688		ixv_set_ivar(sc, i, que->msix, 0);
1689		/* ... and the TX */
1690		ixv_set_ivar(sc, i, que->msix, 1);
1691		/* Set an initial value in EITR */
1692		IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1693		    IXGBE_EITR_DEFAULT);
1694	}
1695
1696	/* For the mailbox interrupt */
1697	ixv_set_ivar(sc, 1, sc->vector, -1);
1698} /* ixv_configure_ivars */
1699
1700/************************************************************************
1701 * ixv_save_stats
1702 *
1703 *   The VF stats registers never have a truly virgin
1704 *   starting point, so this routine tries to make an
1705 *   artificial one, marking ground zero on attach as
1706 *   it were.
1707 ************************************************************************/
1708static void
1709ixv_save_stats(struct ixgbe_softc *sc)
1710{
1711	if (sc->stats.vf.vfgprc || sc->stats.vf.vfgptc) {
1712		sc->stats.vf.saved_reset_vfgprc +=
1713		    sc->stats.vf.vfgprc - sc->stats.vf.base_vfgprc;
1714		sc->stats.vf.saved_reset_vfgptc +=
1715		    sc->stats.vf.vfgptc - sc->stats.vf.base_vfgptc;
1716		sc->stats.vf.saved_reset_vfgorc +=
1717		    sc->stats.vf.vfgorc - sc->stats.vf.base_vfgorc;
1718		sc->stats.vf.saved_reset_vfgotc +=
1719		    sc->stats.vf.vfgotc - sc->stats.vf.base_vfgotc;
1720		sc->stats.vf.saved_reset_vfmprc +=
1721		    sc->stats.vf.vfmprc - sc->stats.vf.base_vfmprc;
1722	}
1723} /* ixv_save_stats */
1724
1725/************************************************************************
1726 * ixv_init_stats
1727 ************************************************************************/
1728static void
1729ixv_init_stats(struct ixgbe_softc *sc)
1730{
1731	struct ixgbe_hw *hw = &sc->hw;
1732
1733	sc->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1734	sc->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1735	sc->stats.vf.last_vfgorc |=
1736	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1737
1738	sc->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1739	sc->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1740	sc->stats.vf.last_vfgotc |=
1741	    (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1742
1743	sc->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1744
1745	sc->stats.vf.base_vfgprc = sc->stats.vf.last_vfgprc;
1746	sc->stats.vf.base_vfgorc = sc->stats.vf.last_vfgorc;
1747	sc->stats.vf.base_vfgptc = sc->stats.vf.last_vfgptc;
1748	sc->stats.vf.base_vfgotc = sc->stats.vf.last_vfgotc;
1749	sc->stats.vf.base_vfmprc = sc->stats.vf.last_vfmprc;
1750} /* ixv_init_stats */
1751
1752#define UPDATE_STAT_32(reg, last, count)                \
1753{                                                       \
1754	u32 current = IXGBE_READ_REG(hw, reg);          \
1755	if (current < last)                             \
1756		count += 0x100000000LL;                 \
1757	last = current;                                 \
1758	count &= 0xFFFFFFFF00000000LL;                  \
1759	count |= current;                               \
1760}
1761
1762#define UPDATE_STAT_36(lsb, msb, last, count)           \
1763{                                                       \
1764	u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
1765	u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
1766	u64 current = ((cur_msb << 32) | cur_lsb);      \
1767	if (current < last)                             \
1768		count += 0x1000000000LL;                \
1769	last = current;                                 \
1770	count &= 0xFFFFFFF000000000LL;                  \
1771	count |= current;                               \
1772}
1773
1774/************************************************************************
1775 * ixv_update_stats - Update the board statistics counters.
1776 ************************************************************************/
1777void
1778ixv_update_stats(struct ixgbe_softc *sc)
1779{
1780	struct ixgbe_hw *hw = &sc->hw;
1781	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1782
1783	UPDATE_STAT_32(IXGBE_VFGPRC, sc->stats.vf.last_vfgprc,
1784	    sc->stats.vf.vfgprc);
1785	UPDATE_STAT_32(IXGBE_VFGPTC, sc->stats.vf.last_vfgptc,
1786	    sc->stats.vf.vfgptc);
1787	UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1788	    sc->stats.vf.last_vfgorc, sc->stats.vf.vfgorc);
1789	UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1790	    sc->stats.vf.last_vfgotc, sc->stats.vf.vfgotc);
1791	UPDATE_STAT_32(IXGBE_VFMPRC, sc->stats.vf.last_vfmprc,
1792	    sc->stats.vf.vfmprc);
1793
1794	/* Fill out the OS statistics structure */
1795	IXGBE_SET_IPACKETS(sc, stats->vfgprc);
1796	IXGBE_SET_OPACKETS(sc, stats->vfgptc);
1797	IXGBE_SET_IBYTES(sc, stats->vfgorc);
1798	IXGBE_SET_OBYTES(sc, stats->vfgotc);
1799	IXGBE_SET_IMCASTS(sc, stats->vfmprc);
1800} /* ixv_update_stats */
1801
1802/************************************************************************
1803 * ixv_add_stats_sysctls - Add statistic sysctls for the VF.
1804 ************************************************************************/
1805static void
1806ixv_add_stats_sysctls(struct ixgbe_softc *sc)
1807{
1808	device_t                dev = sc->dev;
1809	struct ix_tx_queue      *tx_que = sc->tx_queues;
1810	struct ix_rx_queue      *rx_que = sc->rx_queues;
1811	struct sysctl_ctx_list  *ctx = device_get_sysctl_ctx(dev);
1812	struct sysctl_oid       *tree = device_get_sysctl_tree(dev);
1813	struct sysctl_oid_list  *child = SYSCTL_CHILDREN(tree);
1814	struct ixgbevf_hw_stats *stats = &sc->stats.vf;
1815	struct sysctl_oid       *stat_node, *queue_node;
1816	struct sysctl_oid_list  *stat_list, *queue_list;
1817
1818#define QUEUE_NAME_LEN 32
1819	char                    namebuf[QUEUE_NAME_LEN];
1820
1821	/* Driver Statistics */
1822	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
1823	    CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts");
1824	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
1825	    CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
1826
1827	for (int i = 0; i < sc->num_tx_queues; i++, tx_que++) {
1828		struct tx_ring *txr = &tx_que->txr;
1829		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1830		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1831		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1832		queue_list = SYSCTL_CHILDREN(queue_node);
1833
1834		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
1835		    CTLFLAG_RD, &(txr->tso_tx), "TSO Packets");
1836		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
1837		    CTLFLAG_RD, &(txr->total_packets), "TX Packets");
1838	}
1839
1840	for (int i = 0; i < sc->num_rx_queues; i++, rx_que++) {
1841		struct rx_ring *rxr = &rx_que->rxr;
1842		snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
1843		queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
1844		    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name");
1845		queue_list = SYSCTL_CHILDREN(queue_node);
1846
1847		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
1848		    CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue");
1849		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
1850		    CTLFLAG_RD, &(rxr->rx_packets), "RX packets");
1851		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
1852		    CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes");
1853		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded",
1854		    CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets");
1855	}
1856
1857	stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
1858	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
1859	    "VF Statistics (read from HW registers)");
1860	stat_list = SYSCTL_CHILDREN(stat_node);
1861
1862	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
1863	    CTLFLAG_RD, &stats->vfgprc, "Good Packets Received");
1864	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
1865	    CTLFLAG_RD, &stats->vfgorc, "Good Octets Received");
1866	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
1867	    CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received");
1868	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
1869	    CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted");
1870	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
1871	    CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted");
1872} /* ixv_add_stats_sysctls */
1873
1874/************************************************************************
1875 * ixv_print_debug_info
1876 *
1877 *   Called only when em_display_debug_stats is enabled.
1878 *   Provides a way to take a look at important statistics
1879 *   maintained by the driver and hardware.
1880 ************************************************************************/
1881static void
1882ixv_print_debug_info(struct ixgbe_softc *sc)
1883{
1884	device_t dev = sc->dev;
1885	struct ixgbe_hw *hw = &sc->hw;
1886
1887	device_printf(dev, "Error Byte Count = %u \n",
1888	    IXGBE_READ_REG(hw, IXGBE_ERRBC));
1889
1890	device_printf(dev, "MBX IRQ Handled: %lu\n", (long)sc->link_irq);
1891} /* ixv_print_debug_info */
1892
1893/************************************************************************
1894 * ixv_sysctl_debug
1895 ************************************************************************/
1896static int
1897ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
1898{
1899	struct ixgbe_softc *sc;
1900	int error, result;
1901
1902	result = -1;
1903	error = sysctl_handle_int(oidp, &result, 0, req);
1904
1905	if (error || !req->newptr)
1906		return (error);
1907
1908	if (result == 1) {
1909		sc = (struct ixgbe_softc *)arg1;
1910		ixv_print_debug_info(sc);
1911	}
1912
1913	return error;
1914} /* ixv_sysctl_debug */
1915
1916/************************************************************************
1917 * ixv_init_device_features
1918 ************************************************************************/
1919static void
1920ixv_init_device_features(struct ixgbe_softc *sc)
1921{
1922	sc->feat_cap = IXGBE_FEATURE_NETMAP
1923				    | IXGBE_FEATURE_VF
1924				    | IXGBE_FEATURE_LEGACY_TX;
1925
1926	/* A tad short on feature flags for VFs, atm. */
1927	switch (sc->hw.mac.type) {
1928	case ixgbe_mac_82599_vf:
1929		break;
1930	case ixgbe_mac_X540_vf:
1931		break;
1932	case ixgbe_mac_X550_vf:
1933	case ixgbe_mac_X550EM_x_vf:
1934	case ixgbe_mac_X550EM_a_vf:
1935		sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
1936		sc->feat_cap |= IXGBE_FEATURE_RSS;
1937		break;
1938	default:
1939		break;
1940	}
1941
1942	/* Enabled by default... */
1943	/* Is a virtual function (VF) */
1944	if (sc->feat_cap & IXGBE_FEATURE_VF)
1945		sc->feat_en |= IXGBE_FEATURE_VF;
1946	/* Netmap */
1947	if (sc->feat_cap & IXGBE_FEATURE_NETMAP)
1948		sc->feat_en |= IXGBE_FEATURE_NETMAP;
1949	/* Receive-Side Scaling (RSS) */
1950	if (sc->feat_cap & IXGBE_FEATURE_RSS)
1951		sc->feat_en |= IXGBE_FEATURE_RSS;
1952	/* Needs advanced context descriptor regardless of offloads req'd */
1953	if (sc->feat_cap & IXGBE_FEATURE_NEEDS_CTXD)
1954		sc->feat_en |= IXGBE_FEATURE_NEEDS_CTXD;
1955} /* ixv_init_device_features */
1956
1957