1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 */
5
6#include "opt_rss.h"
7
8#include <sys/param.h>
9#include <sys/systm.h>
10#include <sys/kernel.h>
11#include <sys/endian.h>
12#include <sys/sockio.h>
13#include <sys/mbuf.h>
14#include <sys/malloc.h>
15#include <sys/module.h>
16#include <sys/socket.h>
17#include <sys/sysctl.h>
18#include <sys/smp.h>
19#include <vm/vm.h>
20#include <vm/pmap.h>
21
22#include <net/ethernet.h>
23#include <net/if.h>
24#include <net/if_var.h>
25#include <net/if_arp.h>
26#include <net/if_dl.h>
27#include <net/if_types.h>
28#include <net/if_media.h>
29#include <net/if_vlan_var.h>
30#include <net/iflib.h>
31#ifdef RSS
32#include <net/rss_config.h>
33#endif
34
35#include <netinet/in_systm.h>
36#include <netinet/in.h>
37#include <netinet/ip.h>
38#include <netinet/ip6.h>
39#include <netinet6/ip6_var.h>
40#include <netinet/udp.h>
41#include <netinet/tcp.h>
42
43#include <machine/bus.h>
44#include <machine/resource.h>
45#include <sys/bus.h>
46#include <sys/rman.h>
47
48#include <dev/pci/pcireg.h>
49#include <dev/pci/pcivar.h>
50
51#include "ifdi_if.h"
52#include "enic.h"
53
54#include "opt_inet.h"
55#include "opt_inet6.h"
56
57static SYSCTL_NODE(_hw, OID_AUTO, enic, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
58    "ENIC");
59
60static const pci_vendor_info_t enic_vendor_info_array[] =
61{
62	PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET,
63	     DRV_DESCRIPTION),
64		PVID(CISCO_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF,
65		     DRV_DESCRIPTION " VF"),
66	/* required last entry */
67
68		PVID_END
69};
70
71static void *enic_register(device_t);
72static int enic_attach_pre(if_ctx_t);
73static int enic_msix_intr_assign(if_ctx_t, int);
74
75static int enic_attach_post(if_ctx_t);
76static int enic_detach(if_ctx_t);
77
78static int enic_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
79static int enic_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
80static void enic_queues_free(if_ctx_t);
81static int enic_rxq_intr(void *);
82static int enic_event_intr(void *);
83static int enic_err_intr(void *);
84static void enic_stop(if_ctx_t);
85static void enic_init(if_ctx_t);
86static void enic_multi_set(if_ctx_t);
87static int enic_mtu_set(if_ctx_t, uint32_t);
88static void enic_media_status(if_ctx_t, struct ifmediareq *);
89static int enic_media_change(if_ctx_t);
90static int enic_promisc_set(if_ctx_t, int);
91static uint64_t enic_get_counter(if_ctx_t, ift_counter);
92static void enic_update_admin_status(if_ctx_t);
93static void enic_txq_timer(if_ctx_t, uint16_t);
94static int enic_link_is_up(struct enic_softc *);
95static void enic_link_status(struct enic_softc *);
96static void enic_set_lladdr(struct enic_softc *);
97static void enic_setup_txq_sysctl(struct vnic_wq *, int, struct sysctl_ctx_list *,
98    struct sysctl_oid_list *);
99static void enic_setup_rxq_sysctl(struct vnic_rq *, int,  struct sysctl_ctx_list *,
100    struct sysctl_oid_list *);
101static void enic_setup_sysctl(struct enic_softc *);
102static int enic_tx_queue_intr_enable(if_ctx_t, uint16_t);
103static int enic_rx_queue_intr_enable(if_ctx_t, uint16_t);
104static void enic_enable_intr(struct enic_softc *, int);
105static void enic_disable_intr(struct enic_softc *, int);
106static void enic_intr_enable_all(if_ctx_t);
107static void enic_intr_disable_all(if_ctx_t);
108static int enic_dev_open(struct enic *);
109static int enic_dev_init(struct enic *);
110static void *enic_alloc_consistent(void *, size_t, bus_addr_t *,
111    struct iflib_dma_info *, u8 *);
112static void enic_free_consistent(void *, size_t, void *, bus_addr_t,
113    struct iflib_dma_info *);
114static int enic_pci_mapping(struct enic_softc *);
115static void enic_pci_mapping_free(struct enic_softc *);
116static int enic_dev_wait(struct vnic_dev *, int (*) (struct vnic_dev *, int),
117    int (*) (struct vnic_dev *, int *), int arg);
118static int enic_map_bar(struct enic_softc *, struct enic_bar_info *, int, bool);
119static void enic_update_packet_filter(struct enic *enic);
120static bool enic_if_needs_restart(if_ctx_t, enum iflib_restart_event);
121
122typedef enum {
123	ENIC_BARRIER_RD,
124	ENIC_BARRIER_WR,
125	ENIC_BARRIER_RDWR,
126} enic_barrier_t;
127
128static device_method_t enic_methods[] = {
129	/* Device interface */
130	DEVMETHOD(device_register, enic_register),
131	DEVMETHOD(device_probe, iflib_device_probe),
132	DEVMETHOD(device_attach, iflib_device_attach),
133	DEVMETHOD(device_detach, iflib_device_detach),
134	DEVMETHOD(device_shutdown, iflib_device_shutdown),
135	DEVMETHOD(device_suspend, iflib_device_suspend),
136	DEVMETHOD(device_resume, iflib_device_resume),
137	DEVMETHOD_END
138};
139
140static driver_t enic_driver = {
141	"enic", enic_methods, sizeof(struct enic_softc)
142};
143
144DRIVER_MODULE(enic, pci, enic_driver, 0, 0);
145IFLIB_PNP_INFO(pci, enic, enic_vendor_info_array);
146MODULE_VERSION(enic, 2);
147
148MODULE_DEPEND(enic, pci, 1, 1, 1);
149MODULE_DEPEND(enic, ether, 1, 1, 1);
150MODULE_DEPEND(enic, iflib, 1, 1, 1);
151
152static device_method_t enic_iflib_methods[] = {
153	DEVMETHOD(ifdi_tx_queues_alloc, enic_tx_queues_alloc),
154	DEVMETHOD(ifdi_rx_queues_alloc, enic_rx_queues_alloc),
155	DEVMETHOD(ifdi_queues_free, enic_queues_free),
156
157	DEVMETHOD(ifdi_attach_pre, enic_attach_pre),
158	DEVMETHOD(ifdi_attach_post, enic_attach_post),
159	DEVMETHOD(ifdi_detach, enic_detach),
160
161	DEVMETHOD(ifdi_init, enic_init),
162	DEVMETHOD(ifdi_stop, enic_stop),
163	DEVMETHOD(ifdi_multi_set, enic_multi_set),
164	DEVMETHOD(ifdi_mtu_set, enic_mtu_set),
165	DEVMETHOD(ifdi_media_status, enic_media_status),
166	DEVMETHOD(ifdi_media_change, enic_media_change),
167	DEVMETHOD(ifdi_promisc_set, enic_promisc_set),
168	DEVMETHOD(ifdi_get_counter, enic_get_counter),
169	DEVMETHOD(ifdi_update_admin_status, enic_update_admin_status),
170	DEVMETHOD(ifdi_timer, enic_txq_timer),
171
172	DEVMETHOD(ifdi_tx_queue_intr_enable, enic_tx_queue_intr_enable),
173	DEVMETHOD(ifdi_rx_queue_intr_enable, enic_rx_queue_intr_enable),
174	DEVMETHOD(ifdi_intr_enable, enic_intr_enable_all),
175	DEVMETHOD(ifdi_intr_disable, enic_intr_disable_all),
176	DEVMETHOD(ifdi_msix_intr_assign, enic_msix_intr_assign),
177
178	DEVMETHOD(ifdi_needs_restart, enic_if_needs_restart),
179
180	DEVMETHOD_END
181};
182
183static driver_t enic_iflib_driver = {
184	"enic", enic_iflib_methods, sizeof(struct enic_softc)
185};
186
187extern struct if_txrx enic_txrx;
188
189static struct if_shared_ctx enic_sctx_init = {
190	.isc_magic = IFLIB_MAGIC,
191	.isc_q_align = 512,
192
193	.isc_tx_maxsize = ENIC_TX_MAX_PKT_SIZE,
194	.isc_tx_maxsegsize = PAGE_SIZE,
195
196	/*
197	 * These values are used to configure the busdma tag used for receive
198	 * descriptors.  Each receive descriptor only points to one buffer.
199	 */
200	.isc_rx_maxsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE,	/* One buf per
201							 * descriptor */
202	.isc_rx_nsegments = 1,	/* One mapping per descriptor */
203	.isc_rx_maxsegsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE,
204	.isc_admin_intrcnt = 3,
205	.isc_vendor_info = enic_vendor_info_array,
206	.isc_driver_version = "1",
207	.isc_driver = &enic_iflib_driver,
208	.isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ,
209
210	/*
211	 * Number of receive queues per receive queue set, with associated
212	 * descriptor settings for each.
213	 */
214
215	.isc_nrxqs = 2,
216	.isc_nfl = 1,		/* one free list for each receive command
217				 * queue */
218	.isc_nrxd_min = {16, 16},
219	.isc_nrxd_max = {2048, 2048},
220	.isc_nrxd_default = {64, 64},
221
222	/*
223	 * Number of transmit queues per transmit queue set, with associated
224	 * descriptor settings for each.
225	 */
226	.isc_ntxqs = 2,
227	.isc_ntxd_min = {16, 16},
228	.isc_ntxd_max = {2048, 2048},
229	.isc_ntxd_default = {64, 64},
230};
231
232static void *
233enic_register(device_t dev)
234{
235	return (&enic_sctx_init);
236}
237
238static int
239enic_attach_pre(if_ctx_t ctx)
240{
241	if_softc_ctx_t	scctx;
242	struct enic_softc *softc;
243	struct vnic_dev *vdev;
244	struct enic *enic;
245	device_t dev;
246
247	int err = -1;
248	int rc = 0;
249	int i;
250	u64 a0 = 0, a1 = 0;
251	int wait = 1000;
252	struct vnic_stats *stats;
253	int ret;
254
255	dev = iflib_get_dev(ctx);
256	softc = iflib_get_softc(ctx);
257	softc->dev = dev;
258	softc->ctx = ctx;
259	softc->sctx = iflib_get_sctx(ctx);
260	softc->scctx = iflib_get_softc_ctx(ctx);
261	softc->ifp = iflib_get_ifp(ctx);
262	softc->media = iflib_get_media(ctx);
263	softc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
264		ENIC_MAX_MULTICAST_ADDRESSES, M_DEVBUF,
265		     M_NOWAIT | M_ZERO);
266	if (softc->mta == NULL)
267		return (ENOMEM);
268	scctx = softc->scctx;
269
270	mtx_init(&softc->enic_lock, "ENIC Lock", NULL, MTX_DEF);
271
272	pci_enable_busmaster(softc->dev);
273	if (enic_pci_mapping(softc))
274		return (ENXIO);
275
276	enic = &softc->enic;
277	enic->softc = softc;
278	vdev = &softc->vdev;
279	vdev->softc = softc;
280	enic->vdev = vdev;
281	vdev->priv = enic;
282
283	ENIC_LOCK(softc);
284	vnic_dev_register(vdev, &softc->mem, 1);
285	enic->vdev = vdev;
286	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
287
288	vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
289	vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
290
291	bcopy((u_int8_t *) & a0, softc->mac_addr, ETHER_ADDR_LEN);
292	iflib_set_mac(ctx, softc->mac_addr);
293
294	vnic_register_cbacks(enic->vdev, enic_alloc_consistent,
295	    enic_free_consistent);
296
297	/*
298	 * Allocate the consistent memory for stats and counters upfront so
299	 * both primary and secondary processes can access them.
300	 */
301	ENIC_UNLOCK(softc);
302	err = vnic_dev_alloc_stats_mem(enic->vdev);
303	ENIC_LOCK(softc);
304	if (err) {
305		dev_err(enic, "Failed to allocate cmd memory, aborting\n");
306		goto err_out_unregister;
307	}
308	vnic_dev_stats_clear(enic->vdev);
309	ret = vnic_dev_stats_dump(enic->vdev, &stats);
310	if (ret) {
311		dev_err(enic, "Error in getting stats\n");
312		goto err_out_unregister;
313	}
314	err = vnic_dev_alloc_counter_mem(enic->vdev);
315	if (err) {
316		dev_err(enic, "Failed to allocate counter memory, aborting\n");
317		goto err_out_unregister;
318	}
319
320	/* Issue device open to get device in known state */
321	err = enic_dev_open(enic);
322	if (err) {
323		dev_err(enic, "vNIC dev open failed, aborting\n");
324		goto err_out_unregister;
325	}
326
327	/* Set ingress vlan rewrite mode before vnic initialization */
328	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
329	err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
330						enic->ig_vlan_rewrite_mode);
331	if (err) {
332		dev_err(enic,
333		    "Failed to set ingress vlan rewrite mode, aborting.\n");
334		goto err_out_dev_close;
335	}
336
337	/*
338	 * Issue device init to initialize the vnic-to-switch link. We'll
339	 * start with carrier off and wait for link UP notification later to
340	 * turn on carrier.  We don't need to wait here for the
341	 * vnic-to-switch link initialization to complete; link UP
342	 * notification is the indication that the process is complete.
343	 */
344
345	err = vnic_dev_init(enic->vdev, 0);
346	if (err) {
347		dev_err(enic, "vNIC dev init failed, aborting\n");
348		goto err_out_dev_close;
349	}
350
351	err = enic_dev_init(enic);
352	if (err) {
353		dev_err(enic, "Device initialization failed, aborting\n");
354		goto err_out_dev_close;
355	}
356	ENIC_UNLOCK(softc);
357
358	enic->port_mtu = vnic_dev_mtu(enic->vdev);
359
360	softc->scctx = iflib_get_softc_ctx(ctx);
361	scctx = softc->scctx;
362	scctx->isc_txrx = &enic_txrx;
363	scctx->isc_capabilities = scctx->isc_capenable = 0;
364	scctx->isc_tx_csum_flags = 0;
365	scctx->isc_max_frame_size = enic->config.mtu + ETHER_HDR_LEN + \
366		ETHER_CRC_LEN;
367	scctx->isc_nrxqsets_max = enic->conf_rq_count;
368	scctx->isc_ntxqsets_max = enic->conf_wq_count;
369	scctx->isc_nrxqsets = enic->conf_rq_count;
370	scctx->isc_ntxqsets = enic->conf_wq_count;
371	for (i = 0; i < enic->conf_wq_count; i++) {
372		scctx->isc_ntxd[i] = enic->config.wq_desc_count;
373		scctx->isc_txqsizes[i] = sizeof(struct cq_enet_wq_desc)
374			* scctx->isc_ntxd[i];
375		scctx->isc_ntxd[i + enic->conf_wq_count] =
376		    enic->config.wq_desc_count;
377		scctx->isc_txqsizes[i + enic->conf_wq_count] =
378		    sizeof(struct cq_desc) * scctx->isc_ntxd[i +
379		    enic->conf_wq_count];
380	}
381	for (i = 0; i < enic->conf_rq_count; i++) {
382		scctx->isc_nrxd[i] = enic->config.rq_desc_count;
383		scctx->isc_rxqsizes[i] = sizeof(struct cq_enet_rq_desc) *
384		    scctx->isc_nrxd[i];
385		scctx->isc_nrxd[i + enic->conf_rq_count] =
386		    enic->config.rq_desc_count;
387		scctx->isc_rxqsizes[i + enic->conf_rq_count] = sizeof(struct
388		    cq_desc) * scctx->isc_nrxd[i + enic->conf_rq_count];
389	}
390	scctx->isc_tx_nsegments = 31;
391
392	scctx->isc_vectors = enic->conf_cq_count;
393	scctx->isc_msix_bar = -1;
394
395	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
396	ifmedia_add(softc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
397	ifmedia_add(softc->media, IFM_ETHER | IFM_10_FL, 0, NULL);
398
399	/*
400	 * Allocate the CQ here since TX is called first before RX for now
401	 * assume RX and TX are the same
402	 */
403	if (softc->enic.cq == NULL)
404		softc->enic.cq = malloc(sizeof(struct vnic_cq) *
405		     softc->enic.wq_count + softc->enic.rq_count, M_DEVBUF,
406		     M_NOWAIT | M_ZERO);
407	if (softc->enic.cq == NULL)
408		return (ENOMEM);
409
410	softc->enic.cq->ntxqsets = softc->enic.wq_count + softc->enic.rq_count;
411
412	/*
413	 * Allocate the consistent memory for stats and counters upfront so
414	 * both primary and secondary processes can access them.
415	 */
416	err = vnic_dev_alloc_stats_mem(enic->vdev);
417	if (err) {
418		dev_err(enic, "Failed to allocate cmd memory, aborting\n");
419	}
420
421	return (rc);
422
423err_out_dev_close:
424	vnic_dev_close(enic->vdev);
425err_out_unregister:
426	free(softc->vdev.devcmd, M_DEVBUF);
427	free(softc->enic.intr_queues, M_DEVBUF);
428	free(softc->enic.cq, M_DEVBUF);
429	free(softc->mta, M_DEVBUF);
430	rc = -1;
431	pci_disable_busmaster(softc->dev);
432	enic_pci_mapping_free(softc);
433	mtx_destroy(&softc->enic_lock);
434	return (rc);
435}
436
437static int
438enic_msix_intr_assign(if_ctx_t ctx, int msix)
439{
440	struct enic_softc *softc;
441	struct enic *enic;
442	if_softc_ctx_t scctx;
443
444	int error;
445	int i;
446	char irq_name[16];
447
448	softc = iflib_get_softc(ctx);
449	enic = &softc->enic;
450	scctx = softc->scctx;
451
452	ENIC_LOCK(softc);
453	vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
454	ENIC_UNLOCK(softc);
455
456	enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
457	    enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
458	enic->intr = malloc(sizeof(*enic->intr) * msix, M_DEVBUF, M_NOWAIT
459	    | M_ZERO);
460	for (i = 0; i < scctx->isc_nrxqsets; i++) {
461		snprintf(irq_name, sizeof(irq_name), "erxq%d:%d", i,
462		    device_get_unit(softc->dev));
463
464		error = iflib_irq_alloc_generic(ctx,
465		    &enic->intr_queues[i].intr_irq, i + 1, IFLIB_INTR_RX,
466		    enic_rxq_intr, &enic->rq[i], i, irq_name);
467		if (error) {
468			device_printf(iflib_get_dev(ctx),
469			    "Failed to register rxq %d interrupt handler\n", i);
470			return (error);
471		}
472		enic->intr[i].index = i;
473		enic->intr[i].vdev = enic->vdev;
474		ENIC_LOCK(softc);
475		enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
476		    RES_TYPE_INTR_CTRL, i);
477		vnic_intr_mask(&enic->intr[i]);
478		ENIC_UNLOCK(softc);
479	}
480
481	for (i = scctx->isc_nrxqsets; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
482		snprintf(irq_name, sizeof(irq_name), "etxq%d:%d", i -
483		    scctx->isc_nrxqsets, device_get_unit(softc->dev));
484
485
486		iflib_softirq_alloc_generic(ctx, &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX, &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets, irq_name);
487
488
489		enic->intr[i].index = i;
490		enic->intr[i].vdev = enic->vdev;
491		ENIC_LOCK(softc);
492		enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev,
493		    RES_TYPE_INTR_CTRL, i);
494		vnic_intr_mask(&enic->intr[i]);
495		ENIC_UNLOCK(softc);
496	}
497
498	i = scctx->isc_nrxqsets + scctx->isc_ntxqsets;
499	error = iflib_irq_alloc_generic(ctx, &softc->enic_event_intr_irq,
500		 i + 1, IFLIB_INTR_ADMIN, enic_event_intr, softc, 0, "event");
501	if (error) {
502		device_printf(iflib_get_dev(ctx),
503		    "Failed to register event interrupt handler\n");
504		return (error);
505	}
506
507	enic->intr[i].index = i;
508	enic->intr[i].vdev = enic->vdev;
509	ENIC_LOCK(softc);
510	enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
511	    i);
512	vnic_intr_mask(&enic->intr[i]);
513	ENIC_UNLOCK(softc);
514
515	i++;
516	error = iflib_irq_alloc_generic(ctx, &softc->enic_err_intr_irq,
517		   i + 1, IFLIB_INTR_ADMIN, enic_err_intr, softc, 0, "err");
518	if (error) {
519		device_printf(iflib_get_dev(ctx),
520		    "Failed to register event interrupt handler\n");
521		return (error);
522	}
523	enic->intr[i].index = i;
524	enic->intr[i].vdev = enic->vdev;
525	ENIC_LOCK(softc);
526	enic->intr[i].ctrl = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_CTRL,
527	    i);
528	vnic_intr_mask(&enic->intr[i]);
529	ENIC_UNLOCK(softc);
530
531	enic->intr_count = msix;
532
533	return (0);
534}
535
536static void
537enic_free_irqs(struct enic_softc *softc)
538{
539	if_softc_ctx_t	scctx;
540
541	struct enic    *enic;
542	int		i;
543
544	scctx = softc->scctx;
545	enic = &softc->enic;
546
547	for (i = 0; i < scctx->isc_nrxqsets + scctx->isc_ntxqsets; i++) {
548		iflib_irq_free(softc->ctx, &enic->intr_queues[i].intr_irq);
549	}
550
551	iflib_irq_free(softc->ctx, &softc->enic_event_intr_irq);
552	iflib_irq_free(softc->ctx, &softc->enic_err_intr_irq);
553	free(enic->intr_queues, M_DEVBUF);
554	free(enic->intr, M_DEVBUF);
555}
556
557static int
558enic_attach_post(if_ctx_t ctx)
559{
560	struct enic *enic;
561	struct enic_softc *softc;
562	int error = 0;
563
564	softc = iflib_get_softc(ctx);
565	enic = &softc->enic;
566
567	enic_setup_sysctl(softc);
568
569	enic_init_vnic_resources(enic);
570	enic_setup_finish(enic);
571
572	ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
573	ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
574
575	return (error);
576}
577
578static int
579enic_detach(if_ctx_t ctx)
580{
581	struct enic_softc *softc;
582	struct enic *enic;
583
584	softc = iflib_get_softc(ctx);
585	enic = &softc->enic;
586
587	vnic_dev_notify_unset(enic->vdev);
588
589	enic_free_irqs(softc);
590
591	ENIC_LOCK(softc);
592	vnic_dev_close(enic->vdev);
593	free(softc->vdev.devcmd, M_DEVBUF);
594	pci_disable_busmaster(softc->dev);
595	enic_pci_mapping_free(softc);
596	ENIC_UNLOCK(softc);
597
598	return 0;
599}
600
601static int
602enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
603		     int ntxqs, int ntxqsets)
604{
605	struct enic_softc *softc;
606	int q;
607
608	softc = iflib_get_softc(ctx);
609	/* Allocate the array of transmit queues */
610	softc->enic.wq = malloc(sizeof(struct vnic_wq) *
611				ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
612	if (softc->enic.wq == NULL)
613		return (ENOMEM);
614
615	/* Initialize driver state for each transmit queue */
616
617	/*
618	 * Allocate queue state that is shared with the device.  This check
619	 * and call is performed in both enic_tx_queues_alloc() and
620	 * enic_rx_queues_alloc() so that we don't have to care which order
621	 * iflib invokes those routines in.
622	 */
623
624	/* Record descriptor ring vaddrs and paddrs */
625	ENIC_LOCK(softc);
626	for (q = 0; q < ntxqsets; q++) {
627		struct vnic_wq *wq;
628		struct vnic_cq *cq;
629		unsigned int	cq_wq;
630
631		wq = &softc->enic.wq[q];
632		cq_wq = enic_cq_wq(&softc->enic, q);
633		cq = &softc->enic.cq[cq_wq];
634
635		/* Completion ring */
636		wq->vdev = softc->enic.vdev;
637		wq->index = q;
638		wq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_WQ,
639		    wq->index);
640		vnic_wq_disable(wq);
641
642		wq->ring.desc_size = sizeof(struct wq_enet_desc);
643		wq->ring.desc_count = softc->scctx->isc_ntxd[q];
644		wq->ring.desc_avail = wq->ring.desc_count - 1;
645		wq->ring.last_count = wq->ring.desc_count;
646		wq->head_idx = 0;
647		wq->tail_idx = 0;
648
649		wq->ring.size = wq->ring.desc_count * wq->ring.desc_size;
650		wq->ring.descs = vaddrs[q * ntxqs + 0];
651		wq->ring.base_addr = paddrs[q * ntxqs + 0];
652
653		/* Command ring */
654		cq->vdev = softc->enic.vdev;
655		cq->index = cq_wq;
656		cq->ctrl = vnic_dev_get_res(softc->enic.vdev,
657					    RES_TYPE_CQ, cq->index);
658		cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
659		cq->ring.desc_count = softc->scctx->isc_ntxd[q];
660		cq->ring.desc_avail = cq->ring.desc_count - 1;
661
662		cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
663		cq->ring.descs = vaddrs[q * ntxqs + 1];
664		cq->ring.base_addr = paddrs[q * ntxqs + 1];
665
666	}
667
668	ENIC_UNLOCK(softc);
669
670	return (0);
671}
672
673
674
675static int
676enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
677		     int nrxqs, int nrxqsets)
678{
679	struct enic_softc *softc;
680	int q;
681
682	softc = iflib_get_softc(ctx);
683	/* Allocate the array of receive queues */
684	softc->enic.rq = malloc(sizeof(struct vnic_rq) * nrxqsets, M_DEVBUF,
685	    M_NOWAIT | M_ZERO);
686	if (softc->enic.rq == NULL)
687		return (ENOMEM);
688
689	/* Initialize driver state for each receive queue */
690
691	/*
692	 * Allocate queue state that is shared with the device.  This check
693	 * and call is performed in both enic_tx_queues_alloc() and
694	 * enic_rx_queues_alloc() so that we don't have to care which order
695	 * iflib invokes those routines in.
696	 */
697
698	/* Record descriptor ring vaddrs and paddrs */
699	ENIC_LOCK(softc);
700	for (q = 0; q < nrxqsets; q++) {
701		struct vnic_rq *rq;
702		struct vnic_cq *cq;
703		unsigned int	cq_rq;
704
705		rq = &softc->enic.rq[q];
706		cq_rq = enic_cq_rq(&softc->enic, q);
707		cq = &softc->enic.cq[cq_rq];
708
709		/* Completion ring */
710		cq->vdev = softc->enic.vdev;
711		cq->index = cq_rq;
712		cq->ctrl = vnic_dev_get_res(softc->enic.vdev, RES_TYPE_CQ,
713		    cq->index);
714		cq->ring.desc_size = sizeof(struct cq_enet_wq_desc);
715		cq->ring.desc_count = softc->scctx->isc_nrxd[1];
716		cq->ring.desc_avail = cq->ring.desc_count - 1;
717
718		cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
719		cq->ring.descs = vaddrs[q * nrxqs + 0];
720		cq->ring.base_addr = paddrs[q * nrxqs + 0];
721
722		/* Command ring(s) */
723		rq->vdev = softc->enic.vdev;
724
725		rq->index = q;
726		rq->ctrl = vnic_dev_get_res(softc->enic.vdev,
727					    RES_TYPE_RQ, rq->index);
728		vnic_rq_disable(rq);
729
730		rq->ring.desc_size = sizeof(struct rq_enet_desc);
731		rq->ring.desc_count = softc->scctx->isc_nrxd[0];
732		rq->ring.desc_avail = rq->ring.desc_count - 1;
733
734		rq->ring.size = rq->ring.desc_count * rq->ring.desc_size;
735		rq->ring.descs = vaddrs[q * nrxqs + 1];
736		rq->ring.base_addr = paddrs[q * nrxqs + 1];
737		rq->need_initial_post = true;
738	}
739
740	ENIC_UNLOCK(softc);
741
742	return (0);
743}
744
745static void
746enic_queues_free(if_ctx_t ctx)
747{
748	struct enic_softc *softc;
749	softc = iflib_get_softc(ctx);
750
751	free(softc->enic.rq, M_DEVBUF);
752	free(softc->enic.wq, M_DEVBUF);
753	free(softc->enic.cq, M_DEVBUF);
754}
755
756static int
757enic_rxq_intr(void *rxq)
758{
759	struct vnic_rq *rq;
760	if_t ifp;
761
762	rq = (struct vnic_rq *)rxq;
763	ifp = iflib_get_ifp(rq->vdev->softc->ctx);
764	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
765		return (FILTER_HANDLED);
766
767	return (FILTER_SCHEDULE_THREAD);
768}
769
770static int
771enic_event_intr(void *vsc)
772{
773	struct enic_softc *softc;
774	struct enic    *enic;
775	uint32_t mtu;
776
777	softc = vsc;
778	enic = &softc->enic;
779
780	mtu = vnic_dev_mtu(enic->vdev);
781	if (mtu && mtu != enic->port_mtu) {
782		enic->port_mtu = mtu;
783	}
784
785	enic_link_status(softc);
786
787	return (FILTER_HANDLED);
788}
789
790static int
791enic_err_intr(void *vsc)
792{
793	struct enic_softc *softc;
794
795	softc = vsc;
796
797	enic_stop(softc->ctx);
798	enic_init(softc->ctx);
799
800	return (FILTER_HANDLED);
801}
802
803static void
804enic_stop(if_ctx_t ctx)
805{
806	struct enic_softc *softc;
807	struct enic    *enic;
808	if_softc_ctx_t	scctx;
809	unsigned int	index;
810
811	softc = iflib_get_softc(ctx);
812	scctx = softc->scctx;
813	enic = &softc->enic;
814
815	if (softc->stopped)
816		return;
817	softc->link_active = 0;
818	softc->stopped = 1;
819
820	for (index = 0; index < scctx->isc_ntxqsets; index++) {
821		enic_stop_wq(enic, index);
822		vnic_wq_clean(&enic->wq[index]);
823		vnic_cq_clean(&enic->cq[enic_cq_rq(enic, index)]);
824	}
825
826	for (index = 0; index < scctx->isc_nrxqsets; index++) {
827		vnic_rq_clean(&enic->rq[index]);
828		vnic_cq_clean(&enic->cq[enic_cq_wq(enic, index)]);
829	}
830
831	for (index = 0; index < scctx->isc_vectors; index++) {
832		vnic_intr_clean(&enic->intr[index]);
833	}
834}
835
836static void
837enic_init(if_ctx_t ctx)
838{
839	struct enic_softc *softc;
840	struct enic *enic;
841	if_softc_ctx_t scctx;
842	unsigned int index;
843
844	softc = iflib_get_softc(ctx);
845	scctx = softc->scctx;
846	enic = &softc->enic;
847
848	for (index = 0; index < scctx->isc_ntxqsets; index++)
849		enic_prep_wq_for_simple_tx(&softc->enic, index);
850
851	for (index = 0; index < scctx->isc_ntxqsets; index++)
852		enic_start_wq(enic, index);
853
854	for (index = 0; index < scctx->isc_nrxqsets; index++)
855		enic_start_rq(enic, index);
856
857	/* Use the current MAC address. */
858	bcopy(if_getlladdr(softc->ifp), softc->lladdr, ETHER_ADDR_LEN);
859	enic_set_lladdr(softc);
860
861	ENIC_LOCK(softc);
862	vnic_dev_enable_wait(enic->vdev);
863	ENIC_UNLOCK(softc);
864
865	enic_link_status(softc);
866}
867
868static void
869enic_del_mcast(struct enic_softc *softc) {
870	struct enic *enic;
871	int i;
872
873	enic = &softc->enic;
874	for (i=0; i < softc->mc_count; i++) {
875		vnic_dev_del_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
876	}
877	softc->multicast = 0;
878	softc->mc_count = 0;
879}
880
881static void
882enic_add_mcast(struct enic_softc *softc) {
883	struct enic *enic;
884	int i;
885
886	enic = &softc->enic;
887	for (i=0; i < softc->mc_count; i++) {
888		vnic_dev_add_addr(enic->vdev, &softc->mta[i * ETHER_ADDR_LEN]);
889	}
890	softc->multicast = 1;
891}
892
893static u_int
894enic_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
895{
896	uint8_t *mta = arg;
897
898	if (idx == ENIC_MAX_MULTICAST_ADDRESSES)
899		return (0);
900
901	bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
902	return (1);
903}
904
905static void
906enic_multi_set(if_ctx_t ctx)
907{
908	if_t ifp;
909	struct enic_softc *softc;
910	u_int count;
911
912	softc = iflib_get_softc(ctx);
913	ifp = iflib_get_ifp(ctx);
914
915	ENIC_LOCK(softc);
916	enic_del_mcast(softc);
917	count = if_foreach_llmaddr(ifp, enic_copy_maddr, softc->mta);
918	softc->mc_count = count;
919	enic_add_mcast(softc);
920	ENIC_UNLOCK(softc);
921
922	if (if_getflags(ifp) & IFF_PROMISC) {
923		softc->promisc = 1;
924	} else {
925		softc->promisc = 0;
926	}
927	if (if_getflags(ifp) & IFF_ALLMULTI) {
928		softc->allmulti = 1;
929	} else {
930		softc->allmulti = 0;
931	}
932	enic_update_packet_filter(&softc->enic);
933}
934
935static int
936enic_mtu_set(if_ctx_t ctx, uint32_t mtu)
937{
938	struct enic_softc *softc;
939	struct enic *enic;
940	if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
941
942	softc = iflib_get_softc(ctx);
943	enic = &softc->enic;
944
945	if (mtu > enic->port_mtu){
946		return (EINVAL);
947	}
948
949	enic->config.mtu = mtu;
950	scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
951
952	return (0);
953}
954
955static void
956enic_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
957{
958	struct enic_softc *softc;
959	struct ifmedia_entry *next;
960	uint32_t speed;
961	uint64_t target_baudrate;
962
963	softc = iflib_get_softc(ctx);
964
965	ifmr->ifm_status = IFM_AVALID;
966	ifmr->ifm_active = IFM_ETHER;
967
968	if (enic_link_is_up(softc) != 0) {
969		ENIC_LOCK(softc);
970		speed = vnic_dev_port_speed(&softc->vdev);
971		ENIC_UNLOCK(softc);
972		target_baudrate = 1000ull * speed;
973		LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) {
974			if (ifmedia_baudrate(next->ifm_media) == target_baudrate) {
975				ifmr->ifm_active |= next->ifm_media;
976			}
977		}
978
979		ifmr->ifm_status |= IFM_ACTIVE;
980		ifmr->ifm_active |= IFM_AUTO;
981	} else
982		ifmr->ifm_active |= IFM_NONE;
983}
984
985static int
986enic_media_change(if_ctx_t ctx)
987{
988	return (ENODEV);
989}
990
991static int
992enic_promisc_set(if_ctx_t ctx, int flags)
993{
994	if_t ifp;
995	struct enic_softc *softc;
996
997	softc = iflib_get_softc(ctx);
998	ifp = iflib_get_ifp(ctx);
999
1000	if (if_getflags(ifp) & IFF_PROMISC) {
1001		softc->promisc = 1;
1002	} else {
1003		softc->promisc = 0;
1004	}
1005	if (if_getflags(ifp) & IFF_ALLMULTI) {
1006		softc->allmulti = 1;
1007	} else {
1008		softc->allmulti = 0;
1009	}
1010	enic_update_packet_filter(&softc->enic);
1011
1012	return (0);
1013}
1014
1015static uint64_t
1016enic_get_counter(if_ctx_t ctx, ift_counter cnt) {
1017	if_t ifp = iflib_get_ifp(ctx);
1018
1019	if (cnt < IFCOUNTERS)
1020		return if_get_counter_default(ifp, cnt);
1021
1022	return (0);
1023}
1024
1025static void
1026enic_update_admin_status(if_ctx_t ctx)
1027{
1028	struct enic_softc *softc;
1029
1030	softc = iflib_get_softc(ctx);
1031
1032	enic_link_status(softc);
1033}
1034
1035static void
1036enic_txq_timer(if_ctx_t ctx, uint16_t qid)
1037{
1038
1039	struct enic_softc *softc;
1040	struct enic *enic;
1041	struct vnic_stats *stats;
1042	int ret;
1043
1044	softc = iflib_get_softc(ctx);
1045	enic = &softc->enic;
1046
1047	ENIC_LOCK(softc);
1048	ret = vnic_dev_stats_dump(enic->vdev, &stats);
1049	ENIC_UNLOCK(softc);
1050	if (ret) {
1051		dev_err(enic, "Error in getting stats\n");
1052	}
1053}
1054
1055static int
1056enic_link_is_up(struct enic_softc *softc)
1057{
1058	return (vnic_dev_link_status(&softc->vdev) == 1);
1059}
1060
1061static void
1062enic_link_status(struct enic_softc *softc)
1063{
1064	if_ctx_t ctx;
1065	uint64_t speed;
1066	int link;
1067
1068	ctx = softc->ctx;
1069	link = enic_link_is_up(softc);
1070	speed = IF_Gbps(10);
1071
1072	ENIC_LOCK(softc);
1073	speed = vnic_dev_port_speed(&softc->vdev);
1074	ENIC_UNLOCK(softc);
1075
1076	if (link != 0 && softc->link_active == 0) {
1077		softc->link_active = 1;
1078		iflib_link_state_change(ctx, LINK_STATE_UP, speed);
1079	} else if (link == 0 && softc->link_active != 0) {
1080		softc->link_active = 0;
1081		iflib_link_state_change(ctx, LINK_STATE_DOWN, speed);
1082	}
1083}
1084
1085static void
1086enic_set_lladdr(struct enic_softc *softc)
1087{
1088	struct enic *enic;
1089	enic = &softc->enic;
1090
1091	ENIC_LOCK(softc);
1092	vnic_dev_add_addr(enic->vdev, softc->lladdr);
1093	ENIC_UNLOCK(softc);
1094}
1095
1096
1097static void
1098enic_setup_txq_sysctl(struct vnic_wq *wq, int i, struct sysctl_ctx_list *ctx,
1099    struct sysctl_oid_list *child)
1100{
1101	struct sysctl_oid *txsnode;
1102	struct sysctl_oid_list *txslist;
1103	struct vnic_stats *stats = wq[i].vdev->stats;
1104
1105	txsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
1106	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
1107	txslist = SYSCTL_CHILDREN(txsnode);
1108
1109	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_frames_ok", CTLFLAG_RD,
1110	   &stats->tx.tx_frames_ok, "TX Frames OK");
1111	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_frames_ok", CTLFLAG_RD,
1112	   &stats->tx.tx_unicast_frames_ok, "TX unicast frames OK");
1113	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_frames_ok", CTLFLAG_RD,
1114	    &stats->tx.tx_multicast_frames_ok, "TX multicast framse OK");
1115	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_frames_ok", CTLFLAG_RD,
1116	    &stats->tx.tx_broadcast_frames_ok, "TX Broadcast frames OK");
1117	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_bytes_ok", CTLFLAG_RD,
1118	    &stats->tx.tx_bytes_ok, "TX bytes OK ");
1119	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_unicast_bytes_ok", CTLFLAG_RD,
1120	    &stats->tx.tx_unicast_bytes_ok, "TX unicast bytes OK");
1121	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_multicast_bytes_ok", CTLFLAG_RD,
1122	    &stats->tx.tx_multicast_bytes_ok, "TX multicast bytes OK");
1123	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_broadcast_bytes_ok", CTLFLAG_RD,
1124	    &stats->tx.tx_broadcast_bytes_ok, "TX broadcast bytes OK");
1125	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_drops", CTLFLAG_RD,
1126	    &stats->tx.tx_drops, "TX drops");
1127	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_errors", CTLFLAG_RD,
1128	    &stats->tx.tx_errors, "TX errors");
1129	SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tx_tso", CTLFLAG_RD,
1130	    &stats->tx.tx_tso, "TX TSO");
1131}
1132
1133static void
1134enic_setup_rxq_sysctl(struct vnic_rq *rq, int i, struct sysctl_ctx_list *ctx,
1135    struct sysctl_oid_list *child)
1136{
1137	struct sysctl_oid *rxsnode;
1138	struct sysctl_oid_list *rxslist;
1139	struct vnic_stats *stats = rq[i].vdev->stats;
1140
1141	rxsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
1142	    CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
1143	rxslist = SYSCTL_CHILDREN(rxsnode);
1144
1145	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_ok", CTLFLAG_RD,
1146	    &stats->rx.rx_frames_ok, "RX Frames OK");
1147	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_total", CTLFLAG_RD,
1148	    &stats->rx.rx_frames_total, "RX frames total");
1149	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_frames_ok", CTLFLAG_RD,
1150	    &stats->rx.rx_unicast_frames_ok, "RX unicast frames ok");
1151	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_frames_ok", CTLFLAG_RD,
1152	    &stats->rx.rx_multicast_frames_ok, "RX multicast Frames ok");
1153	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_frames_ok", CTLFLAG_RD,
1154	    &stats->rx.rx_broadcast_frames_ok, "RX broadcast frames ok");
1155	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_bytes_ok", CTLFLAG_RD,
1156	    &stats->rx.rx_bytes_ok, "RX bytes ok");
1157	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_unicast_bytes_ok", CTLFLAG_RD,
1158	    &stats->rx.rx_unicast_bytes_ok, "RX unicast bytes ok");
1159	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_multicast_bytes_ok", CTLFLAG_RD,
1160	    &stats->rx.rx_multicast_bytes_ok, "RX multicast bytes ok");
1161	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_broadcast_bytes_ok", CTLFLAG_RD,
1162	    &stats->rx.rx_broadcast_bytes_ok, "RX broadcast bytes ok");
1163	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_drop", CTLFLAG_RD,
1164	    &stats->rx.rx_drop, "RX drop");
1165	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_errors", CTLFLAG_RD,
1166	    &stats->rx.rx_errors, "RX errors");
1167	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_rss", CTLFLAG_RD,
1168	    &stats->rx.rx_rss, "RX rss");
1169	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_crc_errors", CTLFLAG_RD,
1170	    &stats->rx.rx_crc_errors, "RX crc errors");
1171	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_64", CTLFLAG_RD,
1172	    &stats->rx.rx_frames_64, "RX frames 64");
1173	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_127", CTLFLAG_RD,
1174	    &stats->rx.rx_frames_127, "RX frames 127");
1175	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_255", CTLFLAG_RD,
1176	    &stats->rx.rx_frames_255, "RX frames 255");
1177	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_511", CTLFLAG_RD,
1178	    &stats->rx.rx_frames_511, "RX frames 511");
1179	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1023", CTLFLAG_RD,
1180	    &stats->rx.rx_frames_1023, "RX frames 1023");
1181	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_1518", CTLFLAG_RD,
1182	    &stats->rx.rx_frames_1518, "RX frames 1518");
1183	SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "rx_frames_to_max", CTLFLAG_RD,
1184	    &stats->rx.rx_frames_to_max, "RX frames to max");
1185}
1186
1187static void
1188enic_setup_queue_sysctl(struct enic_softc *softc, struct sysctl_ctx_list *ctx,
1189    struct sysctl_oid_list *child)
1190{
1191	enic_setup_txq_sysctl(softc->enic.wq, 0, ctx, child);
1192	enic_setup_rxq_sysctl(softc->enic.rq, 0, ctx, child);
1193}
1194
1195static void
1196enic_setup_sysctl(struct enic_softc *softc)
1197{
1198	device_t dev;
1199	struct sysctl_ctx_list *ctx;
1200	struct sysctl_oid *tree;
1201	struct sysctl_oid_list *child;
1202
1203	dev = softc->dev;
1204	ctx = device_get_sysctl_ctx(dev);
1205	tree = device_get_sysctl_tree(dev);
1206	child = SYSCTL_CHILDREN(tree);
1207
1208	enic_setup_queue_sysctl(softc, ctx, child);
1209}
1210
1211static void
1212enic_enable_intr(struct enic_softc *softc, int irq)
1213{
1214	struct enic *enic = &softc->enic;
1215
1216	vnic_intr_unmask(&enic->intr[irq]);
1217	vnic_intr_return_all_credits(&enic->intr[irq]);
1218}
1219
1220static void
1221enic_disable_intr(struct enic_softc *softc, int irq)
1222{
1223	struct enic *enic = &softc->enic;
1224
1225	vnic_intr_mask(&enic->intr[irq]);
1226	vnic_intr_masked(&enic->intr[irq]);	/* flush write */
1227}
1228
1229static int
1230enic_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1231{
1232	struct enic_softc *softc;
1233	if_softc_ctx_t scctx;
1234
1235	softc = iflib_get_softc(ctx);
1236	scctx = softc->scctx;
1237
1238	enic_enable_intr(softc, qid + scctx->isc_nrxqsets);
1239
1240	return 0;
1241}
1242
1243static int
1244enic_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
1245{
1246	struct enic_softc *softc;
1247
1248	softc = iflib_get_softc(ctx);
1249	enic_enable_intr(softc, qid);
1250
1251	return 0;
1252}
1253
1254static void
1255enic_intr_enable_all(if_ctx_t ctx)
1256{
1257	struct enic_softc *softc;
1258	if_softc_ctx_t scctx;
1259	int i;
1260
1261	softc = iflib_get_softc(ctx);
1262	scctx = softc->scctx;
1263
1264	for (i = 0; i < scctx->isc_vectors; i++) {
1265		enic_enable_intr(softc, i);
1266	}
1267}
1268
1269static void
1270enic_intr_disable_all(if_ctx_t ctx)
1271{
1272	struct enic_softc *softc;
1273	if_softc_ctx_t scctx;
1274	int i;
1275
1276	softc = iflib_get_softc(ctx);
1277	scctx = softc->scctx;
1278	/*
1279	 * iflib may invoke this routine before enic_attach_post() has run,
1280	 * which is before the top level shared data area is initialized and
1281	 * the device made aware of it.
1282	 */
1283
1284	for (i = 0; i < scctx->isc_vectors; i++) {
1285		enic_disable_intr(softc, i);
1286	}
1287}
1288
1289static int
1290enic_dev_open(struct enic *enic)
1291{
1292	int err;
1293	int flags = CMD_OPENF_IG_DESCCACHE;
1294
1295	err = enic_dev_wait(enic->vdev, vnic_dev_open,
1296			    vnic_dev_open_done, flags);
1297	if (err)
1298		dev_err(enic_get_dev(enic),
1299			"vNIC device open failed, err %d\n", err);
1300
1301	return err;
1302}
1303
1304static int
1305enic_dev_init(struct enic *enic)
1306{
1307	int err;
1308
1309	vnic_dev_intr_coal_timer_info_default(enic->vdev);
1310
1311	/*
1312	 * Get vNIC configuration
1313	 */
1314	err = enic_get_vnic_config(enic);
1315	if (err) {
1316		dev_err(dev, "Get vNIC configuration failed, aborting\n");
1317		return err;
1318	}
1319
1320	/* Get available resource counts */
1321	enic_get_res_counts(enic);
1322
1323	/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
1324	enic->intr_queues = malloc(sizeof(*enic->intr_queues) *
1325	    enic->conf_intr_count, M_DEVBUF, M_NOWAIT | M_ZERO);
1326
1327	vnic_dev_set_reset_flag(enic->vdev, 0);
1328	enic->max_flow_counter = -1;
1329
1330	/* set up link status checking */
1331	vnic_dev_notify_set(enic->vdev, -1);	/* No Intr for notify */
1332
1333	enic->overlay_offload = false;
1334	if (enic->disable_overlay && enic->vxlan) {
1335		/*
1336		 * Explicitly disable overlay offload as the setting is
1337		 * sticky, and resetting vNIC does not disable it.
1338		 */
1339		if (vnic_dev_overlay_offload_ctrl(enic->vdev,
1340		    OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE)) {
1341			dev_err(enic, "failed to disable overlay offload\n");
1342		} else {
1343			dev_info(enic, "Overlay offload is disabled\n");
1344		}
1345	}
1346	if (!enic->disable_overlay && enic->vxlan &&
1347	/* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
1348	    vnic_dev_overlay_offload_ctrl(enic->vdev,
1349	    OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_ENABLE) == 0) {
1350		enic->overlay_offload = true;
1351		enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
1352		dev_info(enic, "Overlay offload is enabled\n");
1353		/*
1354		 * Reset the vxlan port to the default, as the NIC firmware
1355		 * does not reset it automatically and keeps the old setting.
1356		 */
1357		if (vnic_dev_overlay_offload_cfg(enic->vdev,
1358		   OVERLAY_CFG_VXLAN_PORT_UPDATE, ENIC_DEFAULT_VXLAN_PORT)) {
1359			dev_err(enic, "failed to update vxlan port\n");
1360			return -EINVAL;
1361		}
1362	}
1363	return 0;
1364}
1365
1366static void    *
1367enic_alloc_consistent(void *priv, size_t size, bus_addr_t * dma_handle,
1368    struct iflib_dma_info *res, u8 * name)
1369{
1370	void	       *vaddr;
1371	*dma_handle = 0;
1372	struct enic    *enic = (struct enic *)priv;
1373	int		rz;
1374
1375	rz = iflib_dma_alloc(enic->softc->ctx, size, res, BUS_DMA_NOWAIT);
1376	if (rz) {
1377		pr_err("%s : Failed to allocate memory requested for %s\n",
1378		    __func__, name);
1379		return NULL;
1380	}
1381
1382	vaddr = res->idi_vaddr;
1383	*dma_handle = res->idi_paddr;
1384
1385	return vaddr;
1386}
1387
1388static void
1389enic_free_consistent(void *priv, size_t size, void *vaddr,
1390    bus_addr_t dma_handle, struct iflib_dma_info *res)
1391{
1392	iflib_dma_free(res);
1393}
1394
1395static int
1396enic_pci_mapping(struct enic_softc *softc)
1397{
1398	int rc;
1399
1400	rc = enic_map_bar(softc, &softc->mem, 0, true);
1401	if (rc)
1402		return rc;
1403
1404	rc = enic_map_bar(softc, &softc->io, 2, false);
1405
1406	return rc;
1407}
1408
1409static void
1410enic_pci_mapping_free(struct enic_softc *softc)
1411{
1412	if (softc->mem.res != NULL)
1413		bus_release_resource(softc->dev, SYS_RES_MEMORY,
1414				     softc->mem.rid, softc->mem.res);
1415	softc->mem.res = NULL;
1416
1417	if (softc->io.res != NULL)
1418		bus_release_resource(softc->dev, SYS_RES_MEMORY,
1419				     softc->io.rid, softc->io.res);
1420	softc->io.res = NULL;
1421}
1422
1423static int
1424enic_dev_wait(struct vnic_dev *vdev, int (*start) (struct vnic_dev *, int),
1425    int (*finished) (struct vnic_dev *, int *), int arg)
1426{
1427	int done;
1428	int err;
1429	int i;
1430
1431	err = start(vdev, arg);
1432	if (err)
1433		return err;
1434
1435	/* Wait for func to complete...2 seconds max */
1436	for (i = 0; i < 2000; i++) {
1437		err = finished(vdev, &done);
1438		if (err)
1439			return err;
1440		if (done)
1441			return 0;
1442		usleep(1000);
1443	}
1444	return -ETIMEDOUT;
1445}
1446
1447static int
1448enic_map_bar(struct enic_softc *softc, struct enic_bar_info *bar, int bar_num,
1449    bool shareable)
1450{
1451	uint32_t flag;
1452
1453	if (bar->res != NULL) {
1454		device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
1455		return EDOOFUS;
1456	}
1457
1458	bar->rid = PCIR_BAR(bar_num);
1459	flag = RF_ACTIVE;
1460	if (shareable)
1461		flag |= RF_SHAREABLE;
1462
1463	if ((bar->res = bus_alloc_resource_any(softc->dev,
1464	   SYS_RES_MEMORY, &bar->rid, flag)) == NULL) {
1465		device_printf(softc->dev,
1466			      "PCI BAR%d mapping failure\n", bar_num);
1467		return (ENXIO);
1468	}
1469	bar->tag = rman_get_bustag(bar->res);
1470	bar->handle = rman_get_bushandle(bar->res);
1471	bar->size = rman_get_size(bar->res);
1472
1473	return 0;
1474}
1475
1476void
1477enic_init_vnic_resources(struct enic *enic)
1478{
1479	unsigned int error_interrupt_enable = 1;
1480	unsigned int error_interrupt_offset = 0;
1481	unsigned int rxq_interrupt_enable = 0;
1482	unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
1483	unsigned int txq_interrupt_enable = 0;
1484	unsigned int txq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
1485	unsigned int index = 0;
1486	unsigned int cq_idx;
1487	if_softc_ctx_t scctx;
1488
1489	scctx = enic->softc->scctx;
1490
1491
1492	rxq_interrupt_enable = 1;
1493	txq_interrupt_enable = 1;
1494
1495	rxq_interrupt_offset = 0;
1496	txq_interrupt_offset = enic->intr_count - 2;
1497	txq_interrupt_offset = 1;
1498
1499	for (index = 0; index < enic->intr_count; index++) {
1500		vnic_intr_alloc(enic->vdev, &enic->intr[index], index);
1501	}
1502
1503	for (index = 0; index < scctx->isc_nrxqsets; index++) {
1504		cq_idx = enic_cq_rq(enic, index);
1505
1506		vnic_rq_clean(&enic->rq[index]);
1507		vnic_rq_init(&enic->rq[index], cq_idx, error_interrupt_enable,
1508		    error_interrupt_offset);
1509
1510		vnic_cq_clean(&enic->cq[cq_idx]);
1511		vnic_cq_init(&enic->cq[cq_idx],
1512		    0 /* flow_control_enable */ ,
1513		    1 /* color_enable */ ,
1514		    0 /* cq_head */ ,
1515		    0 /* cq_tail */ ,
1516		    1 /* cq_tail_color */ ,
1517		    rxq_interrupt_enable,
1518		    1 /* cq_entry_enable */ ,
1519		    0 /* cq_message_enable */ ,
1520		    rxq_interrupt_offset,
1521		    0 /* cq_message_addr */ );
1522		if (rxq_interrupt_enable)
1523			rxq_interrupt_offset++;
1524	}
1525
1526	for (index = 0; index < scctx->isc_ntxqsets; index++) {
1527		cq_idx = enic_cq_wq(enic, index);
1528		vnic_wq_clean(&enic->wq[index]);
1529		vnic_wq_init(&enic->wq[index], cq_idx, error_interrupt_enable,
1530		    error_interrupt_offset);
1531		/* Compute unsupported ol flags for enic_prep_pkts() */
1532		enic->wq[index].tx_offload_notsup_mask = 0;
1533
1534		vnic_cq_clean(&enic->cq[cq_idx]);
1535		vnic_cq_init(&enic->cq[cq_idx],
1536		   0 /* flow_control_enable */ ,
1537		   1 /* color_enable */ ,
1538		   0 /* cq_head */ ,
1539		   0 /* cq_tail */ ,
1540		   1 /* cq_tail_color */ ,
1541		   txq_interrupt_enable,
1542		   1,
1543		   0,
1544		   txq_interrupt_offset,
1545		   0 /* (u64)enic->wq[index].cqmsg_rz->iova */ );
1546
1547	}
1548
1549	for (index = 0; index < enic->intr_count; index++) {
1550		vnic_intr_init(&enic->intr[index], 125,
1551		    enic->config.intr_timer_type, /* mask_on_assertion */ 1);
1552	}
1553}
1554
1555static void
1556enic_update_packet_filter(struct enic *enic)
1557{
1558	struct enic_softc *softc = enic->softc;
1559
1560	ENIC_LOCK(softc);
1561	vnic_dev_packet_filter(enic->vdev,
1562	    softc->directed,
1563	    softc->multicast,
1564	    softc->broadcast,
1565	    softc->promisc,
1566	    softc->allmulti);
1567	ENIC_UNLOCK(softc);
1568}
1569
1570static bool
1571enic_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
1572{
1573	switch (event) {
1574	case IFLIB_RESTART_VLAN_CONFIG:
1575	default:
1576		return (false);
1577	}
1578}
1579
1580int
1581enic_setup_finish(struct enic *enic)
1582{
1583	struct enic_softc *softc = enic->softc;
1584
1585	/* Default conf */
1586	softc->directed = 1;
1587	softc->multicast = 0;
1588	softc->broadcast = 1;
1589	softc->promisc = 0;
1590	softc->allmulti = 1;
1591	enic_update_packet_filter(enic);
1592
1593	return 0;
1594}
1595