1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/dev/ena/ena.c 369337 2021-02-22 20:47:05Z mw $");
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/bus.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/kthread.h>
39#include <sys/malloc.h>
40#include <sys/mbuf.h>
41#include <sys/module.h>
42#include <sys/rman.h>
43#include <sys/smp.h>
44#include <sys/socket.h>
45#include <sys/sockio.h>
46#include <sys/sysctl.h>
47#include <sys/taskqueue.h>
48#include <sys/time.h>
49#include <sys/eventhandler.h>
50
51#include <machine/bus.h>
52#include <machine/resource.h>
53#include <machine/in_cksum.h>
54
55#include <net/bpf.h>
56#include <net/ethernet.h>
57#include <net/if.h>
58#include <net/if_var.h>
59#include <net/if_arp.h>
60#include <net/if_dl.h>
61#include <net/if_media.h>
62#include <net/if_types.h>
63#include <net/if_vlan_var.h>
64
65#include <netinet/in_systm.h>
66#include <netinet/in.h>
67#include <netinet/if_ether.h>
68#include <netinet/ip.h>
69#include <netinet/ip6.h>
70#include <netinet/tcp.h>
71#include <netinet/udp.h>
72
73#include <dev/pci/pcivar.h>
74#include <dev/pci/pcireg.h>
75
76#include <vm/vm.h>
77#include <vm/pmap.h>
78
79#include "ena_datapath.h"
80#include "ena.h"
81#include "ena_sysctl.h"
82
83#ifdef DEV_NETMAP
84#include "ena_netmap.h"
85#endif /* DEV_NETMAP */
86
87/*********************************************************
88 *  Function prototypes
89 *********************************************************/
90static int	ena_probe(device_t);
91static void	ena_intr_msix_mgmnt(void *);
92static void	ena_free_pci_resources(struct ena_adapter *);
93static int	ena_change_mtu(if_t, int);
94static inline void ena_alloc_counters(counter_u64_t *, int);
95static inline void ena_free_counters(counter_u64_t *, int);
96static inline void ena_reset_counters(counter_u64_t *, int);
97static void	ena_init_io_rings_common(struct ena_adapter *,
98    struct ena_ring *, uint16_t);
99static void	ena_init_io_rings_basic(struct ena_adapter *);
100static void	ena_init_io_rings_advanced(struct ena_adapter *);
101static void	ena_init_io_rings(struct ena_adapter *);
102static void	ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
103static void	ena_free_all_io_rings_resources(struct ena_adapter *);
104static int	ena_setup_tx_dma_tag(struct ena_adapter *);
105static int	ena_free_tx_dma_tag(struct ena_adapter *);
106static int	ena_setup_rx_dma_tag(struct ena_adapter *);
107static int	ena_free_rx_dma_tag(struct ena_adapter *);
108static void	ena_release_all_tx_dmamap(struct ena_ring *);
109static int	ena_setup_tx_resources(struct ena_adapter *, int);
110static void	ena_free_tx_resources(struct ena_adapter *, int);
111static int	ena_setup_all_tx_resources(struct ena_adapter *);
112static void	ena_free_all_tx_resources(struct ena_adapter *);
113static int	ena_setup_rx_resources(struct ena_adapter *, unsigned int);
114static void	ena_free_rx_resources(struct ena_adapter *, unsigned int);
115static int	ena_setup_all_rx_resources(struct ena_adapter *);
116static void	ena_free_all_rx_resources(struct ena_adapter *);
117static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
118    struct ena_rx_buffer *);
119static void	ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
120    struct ena_rx_buffer *);
121static void	ena_free_rx_bufs(struct ena_adapter *, unsigned int);
122static void	ena_refill_all_rx_bufs(struct ena_adapter *);
123static void	ena_free_all_rx_bufs(struct ena_adapter *);
124static void	ena_free_tx_bufs(struct ena_adapter *, unsigned int);
125static void	ena_free_all_tx_bufs(struct ena_adapter *);
126static void	ena_destroy_all_tx_queues(struct ena_adapter *);
127static void	ena_destroy_all_rx_queues(struct ena_adapter *);
128static void	ena_destroy_all_io_queues(struct ena_adapter *);
129static int	ena_create_io_queues(struct ena_adapter *);
130static int	ena_handle_msix(void *);
131static int	ena_enable_msix(struct ena_adapter *);
132static void	ena_setup_mgmnt_intr(struct ena_adapter *);
133static int	ena_setup_io_intr(struct ena_adapter *);
134static int	ena_request_mgmnt_irq(struct ena_adapter *);
135static int	ena_request_io_irq(struct ena_adapter *);
136static void	ena_free_mgmnt_irq(struct ena_adapter *);
137static void	ena_free_io_irq(struct ena_adapter *);
138static void	ena_free_irqs(struct ena_adapter*);
139static void	ena_disable_msix(struct ena_adapter *);
140static void	ena_unmask_all_io_irqs(struct ena_adapter *);
141static int	ena_rss_configure(struct ena_adapter *);
142static int	ena_up_complete(struct ena_adapter *);
143static uint64_t	ena_get_counter(if_t, ift_counter);
144static int	ena_media_change(if_t);
145static void	ena_media_status(if_t, struct ifmediareq *);
146static void	ena_init(void *);
147static int	ena_ioctl(if_t, u_long, caddr_t);
148static int	ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
149static void	ena_update_host_info(struct ena_admin_host_info *, if_t);
150static void	ena_update_hwassist(struct ena_adapter *);
151static int	ena_setup_ifnet(device_t, struct ena_adapter *,
152    struct ena_com_dev_get_features_ctx *);
153static int	ena_enable_wc(struct resource *);
154static int	ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
155    struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
156static uint32_t	ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
157    struct ena_com_dev_get_features_ctx *);
158static int	ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
159static int	ena_rss_init_default(struct ena_adapter *);
160static void	ena_rss_init_default_deferred(void *);
161static void	ena_config_host_info(struct ena_com_dev *, device_t);
162static int	ena_attach(device_t);
163static int	ena_detach(device_t);
164static int	ena_device_init(struct ena_adapter *, device_t,
165    struct ena_com_dev_get_features_ctx *, int *);
166static int	ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
167static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
168static void	unimplemented_aenq_handler(void *,
169    struct ena_admin_aenq_entry *);
170static int	ena_copy_eni_metrics(struct ena_adapter *);
171static void	ena_timer_service(void *);
172
173static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
174
175static ena_vendor_info_t ena_vendor_info_array[] = {
176    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
177    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0},
178    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
179    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0},
180    /* Last entry */
181    { 0, 0, 0 }
182};
183
184/*
185 * Contains pointers to event handlers, e.g. link state chage.
186 */
187static struct ena_aenq_handlers aenq_handlers;
188
189void
190ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
191{
192	if (error != 0)
193		return;
194	*(bus_addr_t *) arg = segs[0].ds_addr;
195}
196
197int
198ena_dma_alloc(device_t dmadev, bus_size_t size,
199    ena_mem_handle_t *dma, int mapflags, bus_size_t alignment)
200{
201	struct ena_adapter* adapter = device_get_softc(dmadev);
202	uint32_t maxsize;
203	uint64_t dma_space_addr;
204	int error;
205
206	maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
207
208	dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
209	if (unlikely(dma_space_addr == 0))
210		dma_space_addr = BUS_SPACE_MAXADDR;
211
212	error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
213	    alignment, 0,     /* alignment, bounds 		*/
214	    dma_space_addr,   /* lowaddr of exclusion window	*/
215	    BUS_SPACE_MAXADDR,/* highaddr of exclusion window	*/
216	    NULL, NULL,	      /* filter, filterarg 		*/
217	    maxsize,	      /* maxsize 			*/
218	    1,		      /* nsegments 			*/
219	    maxsize,	      /* maxsegsize 			*/
220	    BUS_DMA_ALLOCNOW, /* flags 				*/
221	    NULL,	      /* lockfunc 			*/
222	    NULL,	      /* lockarg 			*/
223	    &dma->tag);
224	if (unlikely(error != 0)) {
225		ena_trace(NULL, ENA_ALERT, "bus_dma_tag_create failed: %d\n", error);
226		goto fail_tag;
227	}
228
229	error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
230	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
231	if (unlikely(error != 0)) {
232		ena_trace(NULL, ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n",
233		    (uintmax_t)size, error);
234		goto fail_map_create;
235	}
236
237	dma->paddr = 0;
238	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
239	    size, ena_dmamap_callback, &dma->paddr, mapflags);
240	if (unlikely((error != 0) || (dma->paddr == 0))) {
241		ena_trace(NULL, ENA_ALERT, ": bus_dmamap_load failed: %d\n", error);
242		goto fail_map_load;
243	}
244
245	bus_dmamap_sync(dma->tag, dma->map,
246	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
247
248	return (0);
249
250fail_map_load:
251	bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
252fail_map_create:
253	bus_dma_tag_destroy(dma->tag);
254fail_tag:
255	dma->tag = NULL;
256	dma->vaddr = NULL;
257	dma->paddr = 0;
258
259	return (error);
260}
261
262/*
263 * This function should generate unique key for the whole driver.
264 * If the key was already genereated in the previous call (for example
265 * for another adapter), then it should be returned instead.
266 */
267void
268ena_rss_key_fill(void *key, size_t size)
269{
270	static bool key_generated;
271	static uint8_t default_key[ENA_HASH_KEY_SIZE];
272
273	KASSERT(size <= ENA_HASH_KEY_SIZE, ("Requested more bytes than ENA RSS key can hold"));
274
275	if (!key_generated) {
276		arc4rand(default_key, ENA_HASH_KEY_SIZE, 0);
277		key_generated = true;
278	}
279
280	memcpy(key, default_key, size);
281}
282
283static void
284ena_free_pci_resources(struct ena_adapter *adapter)
285{
286	device_t pdev = adapter->pdev;
287
288	if (adapter->memory != NULL) {
289		bus_release_resource(pdev, SYS_RES_MEMORY,
290		    PCIR_BAR(ENA_MEM_BAR), adapter->memory);
291	}
292
293	if (adapter->registers != NULL) {
294		bus_release_resource(pdev, SYS_RES_MEMORY,
295		    PCIR_BAR(ENA_REG_BAR), adapter->registers);
296	}
297
298	if (adapter->msix != NULL) {
299		bus_release_resource(pdev, SYS_RES_MEMORY,
300		    adapter->msix_rid, adapter->msix);
301	}
302}
303
304static int
305ena_probe(device_t dev)
306{
307	ena_vendor_info_t *ent;
308	char		adapter_name[60];
309	uint16_t	pci_vendor_id = 0;
310	uint16_t	pci_device_id = 0;
311
312	pci_vendor_id = pci_get_vendor(dev);
313	pci_device_id = pci_get_device(dev);
314
315	ent = ena_vendor_info_array;
316	while (ent->vendor_id != 0) {
317		if ((pci_vendor_id == ent->vendor_id) &&
318		    (pci_device_id == ent->device_id)) {
319			ena_trace(NULL, ENA_DBG, "vendor=%x device=%x\n",
320			    pci_vendor_id, pci_device_id);
321
322			sprintf(adapter_name, DEVICE_DESC);
323			device_set_desc_copy(dev, adapter_name);
324			return (BUS_PROBE_DEFAULT);
325		}
326
327		ent++;
328
329	}
330
331	return (ENXIO);
332}
333
334static int
335ena_change_mtu(if_t ifp, int new_mtu)
336{
337	struct ena_adapter *adapter = if_getsoftc(ifp);
338	int rc;
339
340	if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
341		device_printf(adapter->pdev, "Invalid MTU setting. "
342		    "new_mtu: %d max mtu: %d min mtu: %d\n",
343		    new_mtu, adapter->max_mtu, ENA_MIN_MTU);
344		return (EINVAL);
345	}
346
347	rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
348	if (likely(rc == 0)) {
349		ena_trace(NULL, ENA_DBG, "set MTU to %d\n", new_mtu);
350		if_setmtu(ifp, new_mtu);
351	} else {
352		device_printf(adapter->pdev, "Failed to set MTU to %d\n",
353		    new_mtu);
354	}
355
356	return (rc);
357}
358
359static inline void
360ena_alloc_counters(counter_u64_t *begin, int size)
361{
362	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
363
364	for (; begin < end; ++begin)
365		*begin = counter_u64_alloc(M_WAITOK);
366}
367
368static inline void
369ena_free_counters(counter_u64_t *begin, int size)
370{
371	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
372
373	for (; begin < end; ++begin)
374		counter_u64_free(*begin);
375}
376
377static inline void
378ena_reset_counters(counter_u64_t *begin, int size)
379{
380	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
381
382	for (; begin < end; ++begin)
383		counter_u64_zero(*begin);
384}
385
386static void
387ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
388    uint16_t qid)
389{
390
391	ring->qid = qid;
392	ring->adapter = adapter;
393	ring->ena_dev = adapter->ena_dev;
394	ring->first_interrupt = false;
395	ring->no_interrupt_event_cnt = 0;
396}
397
398static void
399ena_init_io_rings_basic(struct ena_adapter *adapter)
400{
401	struct ena_com_dev *ena_dev;
402	struct ena_ring *txr, *rxr;
403	struct ena_que *que;
404	int i;
405
406	ena_dev = adapter->ena_dev;
407
408	for (i = 0; i < adapter->num_io_queues; i++) {
409		txr = &adapter->tx_ring[i];
410		rxr = &adapter->rx_ring[i];
411
412		/* TX/RX common ring state */
413		ena_init_io_rings_common(adapter, txr, i);
414		ena_init_io_rings_common(adapter, rxr, i);
415
416		/* TX specific ring state */
417		txr->tx_max_header_size = ena_dev->tx_max_header_size;
418		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
419
420		que = &adapter->que[i];
421		que->adapter = adapter;
422		que->id = i;
423		que->tx_ring = txr;
424		que->rx_ring = rxr;
425
426		txr->que = que;
427		rxr->que = que;
428
429		rxr->empty_rx_queue = 0;
430		rxr->rx_mbuf_sz = ena_mbuf_sz;
431	}
432}
433
434static void
435ena_init_io_rings_advanced(struct ena_adapter *adapter)
436{
437	struct ena_ring *txr, *rxr;
438	int i;
439
440	for (i = 0; i < adapter->num_io_queues; i++) {
441		txr = &adapter->tx_ring[i];
442		rxr = &adapter->rx_ring[i];
443
444		/* Allocate a buf ring */
445		txr->buf_ring_size = adapter->buf_ring_size;
446		txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF,
447		    M_WAITOK, &txr->ring_mtx);
448
449		/* Allocate Tx statistics. */
450		ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
451		    sizeof(txr->tx_stats));
452
453		/* Allocate Rx statistics. */
454		ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
455		    sizeof(rxr->rx_stats));
456
457		/* Initialize locks */
458		snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
459		    device_get_nameunit(adapter->pdev), i);
460		snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
461		    device_get_nameunit(adapter->pdev), i);
462
463		mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
464	}
465}
466
467static void
468ena_init_io_rings(struct ena_adapter *adapter)
469{
470	/*
471	 * IO rings initialization can be divided into the 2 steps:
472	 *   1. Initialize variables and fields with initial values and copy
473	 *      them from adapter/ena_dev (basic)
474	 *   2. Allocate mutex, counters and buf_ring (advanced)
475	 */
476	ena_init_io_rings_basic(adapter);
477	ena_init_io_rings_advanced(adapter);
478}
479
480static void
481ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
482{
483	struct ena_ring *txr = &adapter->tx_ring[qid];
484	struct ena_ring *rxr = &adapter->rx_ring[qid];
485
486	ena_free_counters((counter_u64_t *)&txr->tx_stats,
487	    sizeof(txr->tx_stats));
488	ena_free_counters((counter_u64_t *)&rxr->rx_stats,
489	    sizeof(rxr->rx_stats));
490
491	ENA_RING_MTX_LOCK(txr);
492	drbr_free(txr->br, M_DEVBUF);
493	ENA_RING_MTX_UNLOCK(txr);
494
495	mtx_destroy(&txr->ring_mtx);
496}
497
498static void
499ena_free_all_io_rings_resources(struct ena_adapter *adapter)
500{
501	int i;
502
503	for (i = 0; i < adapter->num_io_queues; i++)
504		ena_free_io_ring_resources(adapter, i);
505
506}
507
508static int
509ena_setup_tx_dma_tag(struct ena_adapter *adapter)
510{
511	int ret;
512
513	/* Create DMA tag for Tx buffers */
514	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
515	    1, 0,				  /* alignment, bounds 	     */
516	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
517	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
518	    NULL, NULL,				  /* filter, filterarg 	     */
519	    ENA_TSO_MAXSIZE,			  /* maxsize 		     */
520	    adapter->max_tx_sgl_size - 1,	  /* nsegments 		     */
521	    ENA_TSO_MAXSIZE,			  /* maxsegsize 	     */
522	    0,					  /* flags 		     */
523	    NULL,				  /* lockfunc 		     */
524	    NULL,				  /* lockfuncarg 	     */
525	    &adapter->tx_buf_tag);
526
527	return (ret);
528}
529
530static int
531ena_free_tx_dma_tag(struct ena_adapter *adapter)
532{
533	int ret;
534
535	ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
536
537	if (likely(ret == 0))
538		adapter->tx_buf_tag = NULL;
539
540	return (ret);
541}
542
543static int
544ena_setup_rx_dma_tag(struct ena_adapter *adapter)
545{
546	int ret;
547
548	/* Create DMA tag for Rx buffers*/
549	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
550	    1, 0,				  /* alignment, bounds 	     */
551	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
552	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
553	    NULL, NULL,				  /* filter, filterarg 	     */
554	    ena_mbuf_sz,			  /* maxsize 		     */
555	    adapter->max_rx_sgl_size,		  /* nsegments 		     */
556	    ena_mbuf_sz,			  /* maxsegsize 	     */
557	    0,					  /* flags 		     */
558	    NULL,				  /* lockfunc 		     */
559	    NULL,				  /* lockarg 		     */
560	    &adapter->rx_buf_tag);
561
562	return (ret);
563}
564
565static int
566ena_free_rx_dma_tag(struct ena_adapter *adapter)
567{
568	int ret;
569
570	ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
571
572	if (likely(ret == 0))
573		adapter->rx_buf_tag = NULL;
574
575	return (ret);
576}
577
578static void
579ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
580{
581	struct ena_adapter *adapter = tx_ring->adapter;
582	struct ena_tx_buffer *tx_info;
583	bus_dma_tag_t tx_tag = adapter->tx_buf_tag;;
584	int i;
585#ifdef DEV_NETMAP
586	struct ena_netmap_tx_info *nm_info;
587	int j;
588#endif /* DEV_NETMAP */
589
590	for (i = 0; i < tx_ring->ring_size; ++i) {
591		tx_info = &tx_ring->tx_buffer_info[i];
592#ifdef DEV_NETMAP
593		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
594			nm_info = &tx_info->nm_info;
595			for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
596				if (nm_info->map_seg[j] != NULL) {
597					bus_dmamap_destroy(tx_tag,
598					    nm_info->map_seg[j]);
599					nm_info->map_seg[j] = NULL;
600				}
601			}
602		}
603#endif /* DEV_NETMAP */
604		if (tx_info->dmamap != NULL) {
605			bus_dmamap_destroy(tx_tag, tx_info->dmamap);
606			tx_info->dmamap = NULL;
607		}
608	}
609}
610
611/**
612 * ena_setup_tx_resources - allocate Tx resources (Descriptors)
613 * @adapter: network interface device structure
614 * @qid: queue index
615 *
616 * Returns 0 on success, otherwise on failure.
617 **/
618static int
619ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
620{
621	struct ena_que *que = &adapter->que[qid];
622	struct ena_ring *tx_ring = que->tx_ring;
623	int size, i, err;
624#ifdef DEV_NETMAP
625	bus_dmamap_t *map;
626	int j;
627
628	ena_netmap_reset_tx_ring(adapter, qid);
629#endif /* DEV_NETMAP */
630
631	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
632
633	tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
634	if (unlikely(tx_ring->tx_buffer_info == NULL))
635		return (ENOMEM);
636
637	size = sizeof(uint16_t) * tx_ring->ring_size;
638	tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
639	if (unlikely(tx_ring->free_tx_ids == NULL))
640		goto err_buf_info_free;
641
642	size = tx_ring->tx_max_header_size;
643	tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
644	    M_NOWAIT | M_ZERO);
645	if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
646		goto err_tx_ids_free;
647
648	/* Req id stack for TX OOO completions */
649	for (i = 0; i < tx_ring->ring_size; i++)
650		tx_ring->free_tx_ids[i] = i;
651
652	/* Reset TX statistics. */
653	ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
654	    sizeof(tx_ring->tx_stats));
655
656	tx_ring->next_to_use = 0;
657	tx_ring->next_to_clean = 0;
658	tx_ring->acum_pkts = 0;
659
660	/* Make sure that drbr is empty */
661	ENA_RING_MTX_LOCK(tx_ring);
662	drbr_flush(adapter->ifp, tx_ring->br);
663	ENA_RING_MTX_UNLOCK(tx_ring);
664
665	/* ... and create the buffer DMA maps */
666	for (i = 0; i < tx_ring->ring_size; i++) {
667		err = bus_dmamap_create(adapter->tx_buf_tag, 0,
668		    &tx_ring->tx_buffer_info[i].dmamap);
669		if (unlikely(err != 0)) {
670			ena_trace(NULL, ENA_ALERT,
671			    "Unable to create Tx DMA map for buffer %d\n",
672			    i);
673			goto err_map_release;
674		}
675
676#ifdef DEV_NETMAP
677		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
678			map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
679			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
680				err = bus_dmamap_create(adapter->tx_buf_tag, 0,
681				    &map[j]);
682				if (unlikely(err != 0)) {
683					ena_trace(NULL, ENA_ALERT, "Unable to create "
684					    "Tx DMA for buffer %d %d\n", i, j);
685					goto err_map_release;
686				}
687			}
688		}
689#endif /* DEV_NETMAP */
690	}
691
692	/* Allocate taskqueues */
693	TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
694	tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
695	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
696	if (unlikely(tx_ring->enqueue_tq == NULL)) {
697		ena_trace(NULL, ENA_ALERT,
698		    "Unable to create taskqueue for enqueue task\n");
699		i = tx_ring->ring_size;
700		goto err_map_release;
701	}
702
703	tx_ring->running = true;
704
705	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
706	    "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
707
708	return (0);
709
710err_map_release:
711	ena_release_all_tx_dmamap(tx_ring);
712err_tx_ids_free:
713	free(tx_ring->free_tx_ids, M_DEVBUF);
714	tx_ring->free_tx_ids = NULL;
715err_buf_info_free:
716	free(tx_ring->tx_buffer_info, M_DEVBUF);
717	tx_ring->tx_buffer_info = NULL;
718
719	return (ENOMEM);
720}
721
722/**
723 * ena_free_tx_resources - Free Tx Resources per Queue
724 * @adapter: network interface device structure
725 * @qid: queue index
726 *
727 * Free all transmit software resources
728 **/
729static void
730ena_free_tx_resources(struct ena_adapter *adapter, int qid)
731{
732	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
733#ifdef DEV_NETMAP
734	struct ena_netmap_tx_info *nm_info;
735	int j;
736#endif /* DEV_NETMAP */
737
738	while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
739	    NULL))
740		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
741
742	taskqueue_free(tx_ring->enqueue_tq);
743
744	ENA_RING_MTX_LOCK(tx_ring);
745	/* Flush buffer ring, */
746	drbr_flush(adapter->ifp, tx_ring->br);
747
748	/* Free buffer DMA maps, */
749	for (int i = 0; i < tx_ring->ring_size; i++) {
750		bus_dmamap_sync(adapter->tx_buf_tag,
751		    tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
752		bus_dmamap_unload(adapter->tx_buf_tag,
753		    tx_ring->tx_buffer_info[i].dmamap);
754		bus_dmamap_destroy(adapter->tx_buf_tag,
755		    tx_ring->tx_buffer_info[i].dmamap);
756
757#ifdef DEV_NETMAP
758		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
759			nm_info = &tx_ring->tx_buffer_info[i].nm_info;
760			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
761				if (nm_info->socket_buf_idx[j] != 0) {
762					bus_dmamap_sync(adapter->tx_buf_tag,
763					    nm_info->map_seg[j],
764					    BUS_DMASYNC_POSTWRITE);
765					ena_netmap_unload(adapter,
766					    nm_info->map_seg[j]);
767				}
768				bus_dmamap_destroy(adapter->tx_buf_tag,
769				    nm_info->map_seg[j]);
770				nm_info->socket_buf_idx[j] = 0;
771			}
772		}
773#endif /* DEV_NETMAP */
774
775		m_freem(tx_ring->tx_buffer_info[i].mbuf);
776		tx_ring->tx_buffer_info[i].mbuf = NULL;
777	}
778	ENA_RING_MTX_UNLOCK(tx_ring);
779
780	/* And free allocated memory. */
781	free(tx_ring->tx_buffer_info, M_DEVBUF);
782	tx_ring->tx_buffer_info = NULL;
783
784	free(tx_ring->free_tx_ids, M_DEVBUF);
785	tx_ring->free_tx_ids = NULL;
786
787	free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
788	tx_ring->push_buf_intermediate_buf = NULL;
789}
790
791/**
792 * ena_setup_all_tx_resources - allocate all queues Tx resources
793 * @adapter: network interface device structure
794 *
795 * Returns 0 on success, otherwise on failure.
796 **/
797static int
798ena_setup_all_tx_resources(struct ena_adapter *adapter)
799{
800	int i, rc;
801
802	for (i = 0; i < adapter->num_io_queues; i++) {
803		rc = ena_setup_tx_resources(adapter, i);
804		if (rc != 0) {
805			device_printf(adapter->pdev,
806			    "Allocation for Tx Queue %u failed\n", i);
807			goto err_setup_tx;
808		}
809	}
810
811	return (0);
812
813err_setup_tx:
814	/* Rewind the index freeing the rings as we go */
815	while (i--)
816		ena_free_tx_resources(adapter, i);
817	return (rc);
818}
819
820/**
821 * ena_free_all_tx_resources - Free Tx Resources for All Queues
822 * @adapter: network interface device structure
823 *
824 * Free all transmit software resources
825 **/
826static void
827ena_free_all_tx_resources(struct ena_adapter *adapter)
828{
829	int i;
830
831	for (i = 0; i < adapter->num_io_queues; i++)
832		ena_free_tx_resources(adapter, i);
833}
834
835/**
836 * ena_setup_rx_resources - allocate Rx resources (Descriptors)
837 * @adapter: network interface device structure
838 * @qid: queue index
839 *
840 * Returns 0 on success, otherwise on failure.
841 **/
842static int
843ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
844{
845	struct ena_que *que = &adapter->que[qid];
846	struct ena_ring *rx_ring = que->rx_ring;
847	int size, err, i;
848
849	size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
850
851#ifdef DEV_NETMAP
852	ena_netmap_reset_rx_ring(adapter, qid);
853	rx_ring->initialized = false;
854#endif /* DEV_NETMAP */
855
856	/*
857	 * Alloc extra element so in rx path
858	 * we can always prefetch rx_info + 1
859	 */
860	size += sizeof(struct ena_rx_buffer);
861
862	rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
863
864	size = sizeof(uint16_t) * rx_ring->ring_size;
865	rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
866
867	for (i = 0; i < rx_ring->ring_size; i++)
868		rx_ring->free_rx_ids[i] = i;
869
870	/* Reset RX statistics. */
871	ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
872	    sizeof(rx_ring->rx_stats));
873
874	rx_ring->next_to_clean = 0;
875	rx_ring->next_to_use = 0;
876
877	/* ... and create the buffer DMA maps */
878	for (i = 0; i < rx_ring->ring_size; i++) {
879		err = bus_dmamap_create(adapter->rx_buf_tag, 0,
880		    &(rx_ring->rx_buffer_info[i].map));
881		if (err != 0) {
882			ena_trace(NULL, ENA_ALERT,
883			    "Unable to create Rx DMA map for buffer %d\n", i);
884			goto err_buf_info_unmap;
885		}
886	}
887
888	/* Create LRO for the ring */
889	if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
890		int err = tcp_lro_init(&rx_ring->lro);
891		if (err != 0) {
892			device_printf(adapter->pdev,
893			    "LRO[%d] Initialization failed!\n", qid);
894		} else {
895			ena_trace(NULL, ENA_INFO,
896			    "RX Soft LRO[%d] Initialized\n", qid);
897			rx_ring->lro.ifp = adapter->ifp;
898		}
899	}
900
901	return (0);
902
903err_buf_info_unmap:
904	while (i--) {
905		bus_dmamap_destroy(adapter->rx_buf_tag,
906		    rx_ring->rx_buffer_info[i].map);
907	}
908
909	free(rx_ring->free_rx_ids, M_DEVBUF);
910	rx_ring->free_rx_ids = NULL;
911	free(rx_ring->rx_buffer_info, M_DEVBUF);
912	rx_ring->rx_buffer_info = NULL;
913	return (ENOMEM);
914}
915
916/**
917 * ena_free_rx_resources - Free Rx Resources
918 * @adapter: network interface device structure
919 * @qid: queue index
920 *
921 * Free all receive software resources
922 **/
923static void
924ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
925{
926	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
927
928	/* Free buffer DMA maps, */
929	for (int i = 0; i < rx_ring->ring_size; i++) {
930		bus_dmamap_sync(adapter->rx_buf_tag,
931		    rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
932		m_freem(rx_ring->rx_buffer_info[i].mbuf);
933		rx_ring->rx_buffer_info[i].mbuf = NULL;
934		bus_dmamap_unload(adapter->rx_buf_tag,
935		    rx_ring->rx_buffer_info[i].map);
936		bus_dmamap_destroy(adapter->rx_buf_tag,
937		    rx_ring->rx_buffer_info[i].map);
938	}
939
940	/* free LRO resources, */
941	tcp_lro_free(&rx_ring->lro);
942
943	/* free allocated memory */
944	free(rx_ring->rx_buffer_info, M_DEVBUF);
945	rx_ring->rx_buffer_info = NULL;
946
947	free(rx_ring->free_rx_ids, M_DEVBUF);
948	rx_ring->free_rx_ids = NULL;
949}
950
951/**
952 * ena_setup_all_rx_resources - allocate all queues Rx resources
953 * @adapter: network interface device structure
954 *
955 * Returns 0 on success, otherwise on failure.
956 **/
957static int
958ena_setup_all_rx_resources(struct ena_adapter *adapter)
959{
960	int i, rc = 0;
961
962	for (i = 0; i < adapter->num_io_queues; i++) {
963		rc = ena_setup_rx_resources(adapter, i);
964		if (rc != 0) {
965			device_printf(adapter->pdev,
966			    "Allocation for Rx Queue %u failed\n", i);
967			goto err_setup_rx;
968		}
969	}
970	return (0);
971
972err_setup_rx:
973	/* rewind the index freeing the rings as we go */
974	while (i--)
975		ena_free_rx_resources(adapter, i);
976	return (rc);
977}
978
979/**
980 * ena_free_all_rx_resources - Free Rx resources for all queues
981 * @adapter: network interface device structure
982 *
983 * Free all receive software resources
984 **/
985static void
986ena_free_all_rx_resources(struct ena_adapter *adapter)
987{
988	int i;
989
990	for (i = 0; i < adapter->num_io_queues; i++)
991		ena_free_rx_resources(adapter, i);
992}
993
994static inline int
995ena_alloc_rx_mbuf(struct ena_adapter *adapter,
996    struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
997{
998	struct ena_com_buf *ena_buf;
999	bus_dma_segment_t segs[1];
1000	int nsegs, error;
1001	int mlen;
1002
1003	/* if previous allocated frag is not used */
1004	if (unlikely(rx_info->mbuf != NULL))
1005		return (0);
1006
1007	/* Get mbuf using UMA allocator */
1008	rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1009	    rx_ring->rx_mbuf_sz);
1010
1011	if (unlikely(rx_info->mbuf == NULL)) {
1012		counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1013		rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1014		if (unlikely(rx_info->mbuf == NULL)) {
1015			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1016			return (ENOMEM);
1017		}
1018		mlen = MCLBYTES;
1019	} else {
1020		mlen = rx_ring->rx_mbuf_sz;
1021	}
1022	/* Set mbuf length*/
1023	rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1024
1025	/* Map packets for DMA */
1026	ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1027	    "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1028	    adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
1029	error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1030	    rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1031	if (unlikely((error != 0) || (nsegs != 1))) {
1032		ena_trace(NULL, ENA_WARNING, "failed to map mbuf, error: %d, "
1033		    "nsegs: %d\n", error, nsegs);
1034		counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1035		goto exit;
1036
1037	}
1038
1039	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1040
1041	ena_buf = &rx_info->ena_buf;
1042	ena_buf->paddr = segs[0].ds_addr;
1043	ena_buf->len = mlen;
1044
1045	ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1046	    "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1047	    rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
1048
1049	return (0);
1050
1051exit:
1052	m_freem(rx_info->mbuf);
1053	rx_info->mbuf = NULL;
1054	return (EFAULT);
1055}
1056
1057static void
1058ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1059    struct ena_rx_buffer *rx_info)
1060{
1061
1062	if (rx_info->mbuf == NULL) {
1063		ena_trace(NULL, ENA_WARNING, "Trying to free unallocated buffer\n");
1064		return;
1065	}
1066
1067	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1068	    BUS_DMASYNC_POSTREAD);
1069	bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1070	m_freem(rx_info->mbuf);
1071	rx_info->mbuf = NULL;
1072}
1073
1074/**
1075 * ena_refill_rx_bufs - Refills ring with descriptors
1076 * @rx_ring: the ring which we want to feed with free descriptors
1077 * @num: number of descriptors to refill
1078 * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1079 **/
1080int
1081ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1082{
1083	struct ena_adapter *adapter = rx_ring->adapter;
1084	uint16_t next_to_use, req_id;
1085	uint32_t i;
1086	int rc;
1087
1088	ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n",
1089	    rx_ring->qid);
1090
1091	next_to_use = rx_ring->next_to_use;
1092
1093	for (i = 0; i < num; i++) {
1094		struct ena_rx_buffer *rx_info;
1095
1096		ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC,
1097		    "RX buffer - next to use: %d\n", next_to_use);
1098
1099		req_id = rx_ring->free_rx_ids[next_to_use];
1100		rx_info = &rx_ring->rx_buffer_info[req_id];
1101#ifdef DEV_NETMAP
1102		if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1103			rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info);
1104		else
1105#endif /* DEV_NETMAP */
1106			rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1107		if (unlikely(rc != 0)) {
1108			ena_trace(NULL, ENA_WARNING,
1109			    "failed to alloc buffer for rx queue %d\n",
1110			    rx_ring->qid);
1111			break;
1112		}
1113		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1114		    &rx_info->ena_buf, req_id);
1115		if (unlikely(rc != 0)) {
1116			ena_trace(NULL, ENA_WARNING,
1117			    "failed to add buffer for rx queue %d\n",
1118			    rx_ring->qid);
1119			break;
1120		}
1121		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1122		    rx_ring->ring_size);
1123	}
1124
1125	if (unlikely(i < num)) {
1126		counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1127		ena_trace(NULL, ENA_WARNING,
1128		     "refilled rx qid %d with only %d mbufs (from %d)\n",
1129		     rx_ring->qid, i, num);
1130	}
1131
1132	if (likely(i != 0))
1133		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1134
1135	rx_ring->next_to_use = next_to_use;
1136	return (i);
1137}
1138
1139int
1140ena_update_buf_ring_size(struct ena_adapter *adapter,
1141    uint32_t new_buf_ring_size)
1142{
1143	uint32_t old_buf_ring_size;
1144	int rc = 0;
1145	bool dev_was_up;
1146
1147	ENA_LOCK_LOCK(adapter);
1148
1149	old_buf_ring_size = adapter->buf_ring_size;
1150	adapter->buf_ring_size = new_buf_ring_size;
1151
1152	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1153	ena_down(adapter);
1154
1155	/* Reconfigure buf ring for all Tx rings. */
1156	ena_free_all_io_rings_resources(adapter);
1157	ena_init_io_rings_advanced(adapter);
1158	if (dev_was_up) {
1159		/*
1160		 * If ena_up() fails, it's not because of recent buf_ring size
1161		 * changes. Because of that, we just want to revert old drbr
1162		 * value and trigger the reset because something else had to
1163		 * go wrong.
1164		 */
1165		rc = ena_up(adapter);
1166		if (unlikely(rc != 0)) {
1167			device_printf(adapter->pdev,
1168			    "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1169			    new_buf_ring_size, old_buf_ring_size);
1170
1171			/* Revert old size and trigger the reset */
1172			adapter->buf_ring_size = old_buf_ring_size;
1173			ena_free_all_io_rings_resources(adapter);
1174			ena_init_io_rings_advanced(adapter);
1175
1176			ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1177			    adapter);
1178			ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1179
1180		}
1181	}
1182
1183	ENA_LOCK_UNLOCK(adapter);
1184
1185	return (rc);
1186}
1187
1188int
1189ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1190    uint32_t new_rx_size)
1191{
1192	uint32_t old_tx_size, old_rx_size;
1193	int rc = 0;
1194	bool dev_was_up;
1195
1196	ENA_LOCK_LOCK(adapter);
1197
1198	old_tx_size = adapter->requested_tx_ring_size;
1199	old_rx_size = adapter->requested_rx_ring_size;
1200	adapter->requested_tx_ring_size = new_tx_size;
1201	adapter->requested_rx_ring_size = new_rx_size;
1202
1203	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1204	ena_down(adapter);
1205
1206	/* Configure queues with new size. */
1207	ena_init_io_rings_basic(adapter);
1208	if (dev_was_up) {
1209		rc = ena_up(adapter);
1210		if (unlikely(rc != 0)) {
1211			device_printf(adapter->pdev,
1212			    "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1213			    new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1214
1215			/* Revert old size. */
1216			adapter->requested_tx_ring_size = old_tx_size;
1217			adapter->requested_rx_ring_size = old_rx_size;
1218			ena_init_io_rings_basic(adapter);
1219
1220			/* And try again. */
1221			rc = ena_up(adapter);
1222			if (unlikely(rc != 0)) {
1223				device_printf(adapter->pdev,
1224				    "Failed to revert old queue sizes. Triggering device reset.\n");
1225				/*
1226				 * If we've failed again, something had to go
1227				 * wrong. After reset, the device should try to
1228				 * go up
1229				 */
1230				ENA_FLAG_SET_ATOMIC(
1231				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1232				ena_trigger_reset(adapter,
1233				    ENA_REGS_RESET_OS_TRIGGER);
1234			}
1235		}
1236	}
1237
1238	ENA_LOCK_UNLOCK(adapter);
1239
1240	return (rc);
1241}
1242
1243static void
1244ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1245{
1246	ena_free_all_io_rings_resources(adapter);
1247	/* Force indirection table to be reinitialized */
1248	ena_com_rss_destroy(adapter->ena_dev);
1249
1250	adapter->num_io_queues = num;
1251	ena_init_io_rings(adapter);
1252}
1253
1254/* Caller should sanitize new_num */
1255int
1256ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1257{
1258	uint32_t old_num;
1259	int rc = 0;
1260	bool dev_was_up;
1261
1262	ENA_LOCK_LOCK(adapter);
1263
1264	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1265	old_num = adapter->num_io_queues;
1266	ena_down(adapter);
1267
1268	ena_update_io_rings(adapter, new_num);
1269
1270	if (dev_was_up) {
1271		rc = ena_up(adapter);
1272		if (unlikely(rc != 0)) {
1273			device_printf(adapter->pdev,
1274			    "Failed to configure device with %u IO queues. "
1275			    "Reverting to previous value: %u\n",
1276			    new_num, old_num);
1277
1278			ena_update_io_rings(adapter, old_num);
1279
1280			rc = ena_up(adapter);
1281			if (unlikely(rc != 0)) {
1282				device_printf(adapter->pdev,
1283				    "Failed to revert to previous setup IO "
1284				    "queues. Triggering device reset.\n");
1285				ENA_FLAG_SET_ATOMIC(
1286				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1287				ena_trigger_reset(adapter,
1288				    ENA_REGS_RESET_OS_TRIGGER);
1289			}
1290		}
1291	}
1292
1293	ENA_LOCK_UNLOCK(adapter);
1294
1295	return (rc);
1296}
1297
1298static void
1299ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1300{
1301	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1302	unsigned int i;
1303
1304	for (i = 0; i < rx_ring->ring_size; i++) {
1305		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1306
1307		if (rx_info->mbuf != NULL)
1308			ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1309#ifdef DEV_NETMAP
1310		if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1311		    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1312			if (rx_info->netmap_buf_idx != 0)
1313				ena_netmap_free_rx_slot(adapter, rx_ring,
1314				    rx_info);
1315		}
1316#endif /* DEV_NETMAP */
1317	}
1318}
1319
1320/**
1321 * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1322 * @adapter: network interface device structure
1323 *
1324 */
1325static void
1326ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1327{
1328	struct ena_ring *rx_ring;
1329	int i, rc, bufs_num;
1330
1331	for (i = 0; i < adapter->num_io_queues; i++) {
1332		rx_ring = &adapter->rx_ring[i];
1333		bufs_num = rx_ring->ring_size - 1;
1334		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1335		if (unlikely(rc != bufs_num))
1336			ena_trace(NULL, ENA_WARNING, "refilling Queue %d failed. "
1337			    "Allocated %d buffers from: %d\n", i, rc, bufs_num);
1338#ifdef DEV_NETMAP
1339		rx_ring->initialized = true;
1340#endif /* DEV_NETMAP */
1341	}
1342}
1343
1344static void
1345ena_free_all_rx_bufs(struct ena_adapter *adapter)
1346{
1347	int i;
1348
1349	for (i = 0; i < adapter->num_io_queues; i++)
1350		ena_free_rx_bufs(adapter, i);
1351}
1352
1353/**
1354 * ena_free_tx_bufs - Free Tx Buffers per Queue
1355 * @adapter: network interface device structure
1356 * @qid: queue index
1357 **/
1358static void
1359ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1360{
1361	bool print_once = true;
1362	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1363
1364	ENA_RING_MTX_LOCK(tx_ring);
1365	for (int i = 0; i < tx_ring->ring_size; i++) {
1366		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1367
1368		if (tx_info->mbuf == NULL)
1369			continue;
1370
1371		if (print_once) {
1372			device_printf(adapter->pdev,
1373			    "free uncompleted tx mbuf qid %d idx 0x%x\n",
1374			    qid, i);
1375			print_once = false;
1376		} else {
1377			ena_trace(NULL, ENA_DBG,
1378			    "free uncompleted tx mbuf qid %d idx 0x%x\n",
1379			     qid, i);
1380		}
1381
1382		bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1383		    BUS_DMASYNC_POSTWRITE);
1384		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1385
1386		m_free(tx_info->mbuf);
1387		tx_info->mbuf = NULL;
1388	}
1389	ENA_RING_MTX_UNLOCK(tx_ring);
1390}
1391
1392static void
1393ena_free_all_tx_bufs(struct ena_adapter *adapter)
1394{
1395
1396	for (int i = 0; i < adapter->num_io_queues; i++)
1397		ena_free_tx_bufs(adapter, i);
1398}
1399
1400static void
1401ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1402{
1403	uint16_t ena_qid;
1404	int i;
1405
1406	for (i = 0; i < adapter->num_io_queues; i++) {
1407		ena_qid = ENA_IO_TXQ_IDX(i);
1408		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1409	}
1410}
1411
1412static void
1413ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1414{
1415	uint16_t ena_qid;
1416	int i;
1417
1418	for (i = 0; i < adapter->num_io_queues; i++) {
1419		ena_qid = ENA_IO_RXQ_IDX(i);
1420		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1421	}
1422}
1423
1424static void
1425ena_destroy_all_io_queues(struct ena_adapter *adapter)
1426{
1427	struct ena_que *queue;
1428	int i;
1429
1430	for (i = 0; i < adapter->num_io_queues; i++) {
1431		queue = &adapter->que[i];
1432		while (taskqueue_cancel(queue->cleanup_tq,
1433		    &queue->cleanup_task, NULL))
1434			taskqueue_drain(queue->cleanup_tq,
1435			    &queue->cleanup_task);
1436		taskqueue_free(queue->cleanup_tq);
1437	}
1438
1439	ena_destroy_all_tx_queues(adapter);
1440	ena_destroy_all_rx_queues(adapter);
1441}
1442
1443static int
1444ena_create_io_queues(struct ena_adapter *adapter)
1445{
1446	struct ena_com_dev *ena_dev = adapter->ena_dev;
1447	struct ena_com_create_io_ctx ctx;
1448	struct ena_ring *ring;
1449	struct ena_que *queue;
1450	uint16_t ena_qid;
1451	uint32_t msix_vector;
1452	int rc, i;
1453
1454	/* Create TX queues */
1455	for (i = 0; i < adapter->num_io_queues; i++) {
1456		msix_vector = ENA_IO_IRQ_IDX(i);
1457		ena_qid = ENA_IO_TXQ_IDX(i);
1458		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1459		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1460		ctx.queue_size = adapter->requested_tx_ring_size;
1461		ctx.msix_vector = msix_vector;
1462		ctx.qid = ena_qid;
1463		rc = ena_com_create_io_queue(ena_dev, &ctx);
1464		if (rc != 0) {
1465			device_printf(adapter->pdev,
1466			    "Failed to create io TX queue #%d rc: %d\n", i, rc);
1467			goto err_tx;
1468		}
1469		ring = &adapter->tx_ring[i];
1470		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1471		    &ring->ena_com_io_sq,
1472		    &ring->ena_com_io_cq);
1473		if (rc != 0) {
1474			device_printf(adapter->pdev,
1475			    "Failed to get TX queue handlers. TX queue num"
1476			    " %d rc: %d\n", i, rc);
1477			ena_com_destroy_io_queue(ena_dev, ena_qid);
1478			goto err_tx;
1479		}
1480	}
1481
1482	/* Create RX queues */
1483	for (i = 0; i < adapter->num_io_queues; i++) {
1484		msix_vector = ENA_IO_IRQ_IDX(i);
1485		ena_qid = ENA_IO_RXQ_IDX(i);
1486		ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1487		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1488		ctx.queue_size = adapter->requested_rx_ring_size;
1489		ctx.msix_vector = msix_vector;
1490		ctx.qid = ena_qid;
1491		rc = ena_com_create_io_queue(ena_dev, &ctx);
1492		if (unlikely(rc != 0)) {
1493			device_printf(adapter->pdev,
1494			    "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1495			goto err_rx;
1496		}
1497
1498		ring = &adapter->rx_ring[i];
1499		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1500		    &ring->ena_com_io_sq,
1501		    &ring->ena_com_io_cq);
1502		if (unlikely(rc != 0)) {
1503			device_printf(adapter->pdev,
1504			    "Failed to get RX queue handlers. RX queue num"
1505			    " %d rc: %d\n", i, rc);
1506			ena_com_destroy_io_queue(ena_dev, ena_qid);
1507			goto err_rx;
1508		}
1509	}
1510
1511	for (i = 0; i < adapter->num_io_queues; i++) {
1512		queue = &adapter->que[i];
1513
1514		TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1515		queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1516		    M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1517
1518		taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET,
1519		    "%s queue %d cleanup",
1520		    device_get_nameunit(adapter->pdev), i);
1521	}
1522
1523	return (0);
1524
1525err_rx:
1526	while (i--)
1527		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1528	i = adapter->num_io_queues;
1529err_tx:
1530	while (i--)
1531		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1532
1533	return (ENXIO);
1534}
1535
1536/*********************************************************************
1537 *
1538 *  MSIX & Interrupt Service routine
1539 *
1540 **********************************************************************/
1541
1542/**
1543 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1544 * @arg: interrupt number
1545 **/
1546static void
1547ena_intr_msix_mgmnt(void *arg)
1548{
1549	struct ena_adapter *adapter = (struct ena_adapter *)arg;
1550
1551	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1552	if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1553		ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1554}
1555
1556/**
1557 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1558 * @arg: queue
1559 **/
1560static int
1561ena_handle_msix(void *arg)
1562{
1563	struct ena_que *queue = arg;
1564	struct ena_adapter *adapter = queue->adapter;
1565	if_t ifp = adapter->ifp;
1566
1567	if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1568		return (FILTER_STRAY);
1569
1570	taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1571
1572	return (FILTER_HANDLED);
1573}
1574
1575static int
1576ena_enable_msix(struct ena_adapter *adapter)
1577{
1578	device_t dev = adapter->pdev;
1579	int msix_vecs, msix_req;
1580	int i, rc = 0;
1581
1582	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1583		device_printf(dev, "Error, MSI-X is already enabled\n");
1584		return (EINVAL);
1585	}
1586
1587	/* Reserved the max msix vectors we might need */
1588	msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1589
1590	adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1591	    M_DEVBUF, M_WAITOK | M_ZERO);
1592
1593	ena_trace(NULL, ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1594
1595	for (i = 0; i < msix_vecs; i++) {
1596		adapter->msix_entries[i].entry = i;
1597		/* Vectors must start from 1 */
1598		adapter->msix_entries[i].vector = i + 1;
1599	}
1600
1601	msix_req = msix_vecs;
1602	rc = pci_alloc_msix(dev, &msix_vecs);
1603	if (unlikely(rc != 0)) {
1604		device_printf(dev,
1605		    "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
1606
1607		rc = ENOSPC;
1608		goto err_msix_free;
1609	}
1610
1611	if (msix_vecs != msix_req) {
1612		if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1613			device_printf(dev,
1614			    "Not enough number of MSI-x allocated: %d\n",
1615			    msix_vecs);
1616			pci_release_msi(dev);
1617			rc = ENOSPC;
1618			goto err_msix_free;
1619		}
1620		device_printf(dev, "Enable only %d MSI-x (out of %d), reduce "
1621		    "the number of queues\n", msix_vecs, msix_req);
1622	}
1623
1624	adapter->msix_vecs = msix_vecs;
1625	ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1626
1627	return (0);
1628
1629err_msix_free:
1630	free(adapter->msix_entries, M_DEVBUF);
1631	adapter->msix_entries = NULL;
1632
1633	return (rc);
1634}
1635
1636static void
1637ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1638{
1639
1640	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1641	    ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1642	    device_get_nameunit(adapter->pdev));
1643	/*
1644	 * Handler is NULL on purpose, it will be set
1645	 * when mgmnt interrupt is acquired
1646	 */
1647	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1648	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1649	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1650	    adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1651}
1652
1653static int
1654ena_setup_io_intr(struct ena_adapter *adapter)
1655{
1656	static int last_bind_cpu = -1;
1657	int irq_idx;
1658
1659	if (adapter->msix_entries == NULL)
1660		return (EINVAL);
1661
1662	for (int i = 0; i < adapter->num_io_queues; i++) {
1663		irq_idx = ENA_IO_IRQ_IDX(i);
1664
1665		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1666		    "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1667		adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1668		adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1669		adapter->irq_tbl[irq_idx].vector =
1670		    adapter->msix_entries[irq_idx].vector;
1671		ena_trace(NULL, ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1672		    adapter->msix_entries[irq_idx].vector);
1673
1674		/*
1675		 * We want to bind rings to the corresponding cpu
1676		 * using something similar to the RSS round-robin technique.
1677		 */
1678		if (unlikely(last_bind_cpu < 0))
1679			last_bind_cpu = CPU_FIRST();
1680		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1681		    last_bind_cpu;
1682		last_bind_cpu = CPU_NEXT(last_bind_cpu);
1683	}
1684
1685	return (0);
1686}
1687
1688static int
1689ena_request_mgmnt_irq(struct ena_adapter *adapter)
1690{
1691	struct ena_irq *irq;
1692	unsigned long flags;
1693	int rc, rcc;
1694
1695	flags = RF_ACTIVE | RF_SHAREABLE;
1696
1697	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1698	irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1699	    &irq->vector, flags);
1700
1701	if (unlikely(irq->res == NULL)) {
1702		device_printf(adapter->pdev, "could not allocate "
1703		    "irq vector: %d\n", irq->vector);
1704		return (ENXIO);
1705	}
1706
1707	rc = bus_setup_intr(adapter->pdev, irq->res,
1708	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
1709	    irq->data, &irq->cookie);
1710	if (unlikely(rc != 0)) {
1711		device_printf(adapter->pdev, "failed to register "
1712		    "interrupt handler for irq %ju: %d\n",
1713		    rman_get_start(irq->res), rc);
1714		goto err_res_free;
1715	}
1716	irq->requested = true;
1717
1718	return (rc);
1719
1720err_res_free:
1721	ena_trace(NULL, ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n",
1722	    irq->vector);
1723	rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1724	    irq->vector, irq->res);
1725	if (unlikely(rcc != 0))
1726		device_printf(adapter->pdev, "dev has no parent while "
1727		    "releasing res for irq: %d\n", irq->vector);
1728	irq->res = NULL;
1729
1730	return (rc);
1731}
1732
1733static int
1734ena_request_io_irq(struct ena_adapter *adapter)
1735{
1736	struct ena_irq *irq;
1737	unsigned long flags = 0;
1738	int rc = 0, i, rcc;
1739
1740	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1741		device_printf(adapter->pdev,
1742		    "failed to request I/O IRQ: MSI-X is not enabled\n");
1743		return (EINVAL);
1744	} else {
1745		flags = RF_ACTIVE | RF_SHAREABLE;
1746	}
1747
1748	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1749		irq = &adapter->irq_tbl[i];
1750
1751		if (unlikely(irq->requested))
1752			continue;
1753
1754		irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1755		    &irq->vector, flags);
1756		if (unlikely(irq->res == NULL)) {
1757			rc = ENOMEM;
1758			device_printf(adapter->pdev, "could not allocate "
1759			    "irq vector: %d\n", irq->vector);
1760			goto err;
1761		}
1762
1763		rc = bus_setup_intr(adapter->pdev, irq->res,
1764		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL,
1765		    irq->data, &irq->cookie);
1766		 if (unlikely(rc != 0)) {
1767			device_printf(adapter->pdev, "failed to register "
1768			    "interrupt handler for irq %ju: %d\n",
1769			    rman_get_start(irq->res), rc);
1770			goto err;
1771		}
1772		irq->requested = true;
1773
1774		ena_trace(NULL, ENA_INFO, "queue %d - cpu %d\n",
1775		    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1776	}
1777
1778	return (rc);
1779
1780err:
1781
1782	for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1783		irq = &adapter->irq_tbl[i];
1784		rcc = 0;
1785
1786		/* Once we entered err: section and irq->requested is true we
1787		   free both intr and resources */
1788		if (irq->requested)
1789			rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1790		if (unlikely(rcc != 0))
1791			device_printf(adapter->pdev, "could not release"
1792			    " irq: %d, error: %d\n", irq->vector, rcc);
1793
1794		/* If we entred err: section without irq->requested set we know
1795		   it was bus_alloc_resource_any() that needs cleanup, provided
1796		   res is not NULL. In case res is NULL no work in needed in
1797		   this iteration */
1798		rcc = 0;
1799		if (irq->res != NULL) {
1800			rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1801			    irq->vector, irq->res);
1802		}
1803		if (unlikely(rcc != 0))
1804			device_printf(adapter->pdev, "dev has no parent while "
1805			    "releasing res for irq: %d\n", irq->vector);
1806		irq->requested = false;
1807		irq->res = NULL;
1808	}
1809
1810	return (rc);
1811}
1812
1813static void
1814ena_free_mgmnt_irq(struct ena_adapter *adapter)
1815{
1816	struct ena_irq *irq;
1817	int rc;
1818
1819	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1820	if (irq->requested) {
1821		ena_trace(NULL, ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
1822		    irq->vector);
1823		rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1824		if (unlikely(rc != 0))
1825			device_printf(adapter->pdev, "failed to tear "
1826			    "down irq: %d\n", irq->vector);
1827		irq->requested = 0;
1828	}
1829
1830	if (irq->res != NULL) {
1831		ena_trace(NULL, ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
1832		    irq->vector);
1833		rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1834		    irq->vector, irq->res);
1835		irq->res = NULL;
1836		if (unlikely(rc != 0))
1837			device_printf(adapter->pdev, "dev has no parent while "
1838			    "releasing res for irq: %d\n", irq->vector);
1839	}
1840}
1841
1842static void
1843ena_free_io_irq(struct ena_adapter *adapter)
1844{
1845	struct ena_irq *irq;
1846	int rc;
1847
1848	for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1849		irq = &adapter->irq_tbl[i];
1850		if (irq->requested) {
1851			ena_trace(NULL, ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
1852			    irq->vector);
1853			rc = bus_teardown_intr(adapter->pdev, irq->res,
1854			    irq->cookie);
1855			if (unlikely(rc != 0)) {
1856				device_printf(adapter->pdev, "failed to tear "
1857				    "down irq: %d\n", irq->vector);
1858			}
1859			irq->requested = 0;
1860		}
1861
1862		if (irq->res != NULL) {
1863			ena_trace(NULL, ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
1864			    irq->vector);
1865			rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1866			    irq->vector, irq->res);
1867			irq->res = NULL;
1868			if (unlikely(rc != 0)) {
1869				device_printf(adapter->pdev, "dev has no parent"
1870				    " while releasing res for irq: %d\n",
1871				    irq->vector);
1872			}
1873		}
1874	}
1875}
1876
1877static void
1878ena_free_irqs(struct ena_adapter* adapter)
1879{
1880
1881	ena_free_io_irq(adapter);
1882	ena_free_mgmnt_irq(adapter);
1883	ena_disable_msix(adapter);
1884}
1885
1886static void
1887ena_disable_msix(struct ena_adapter *adapter)
1888{
1889
1890	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1891		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1892		pci_release_msi(adapter->pdev);
1893	}
1894
1895	adapter->msix_vecs = 0;
1896	if (adapter->msix_entries != NULL)
1897		free(adapter->msix_entries, M_DEVBUF);
1898	adapter->msix_entries = NULL;
1899}
1900
1901static void
1902ena_unmask_all_io_irqs(struct ena_adapter *adapter)
1903{
1904	struct ena_com_io_cq* io_cq;
1905	struct ena_eth_io_intr_reg intr_reg;
1906	uint16_t ena_qid;
1907	int i;
1908
1909	/* Unmask interrupts for all queues */
1910	for (i = 0; i < adapter->num_io_queues; i++) {
1911		ena_qid = ENA_IO_TXQ_IDX(i);
1912		io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1913		ena_com_update_intr_reg(&intr_reg, 0, 0, true);
1914		ena_com_unmask_intr(io_cq, &intr_reg);
1915	}
1916}
1917
1918/* Configure the Rx forwarding */
1919static int
1920ena_rss_configure(struct ena_adapter *adapter)
1921{
1922	struct ena_com_dev *ena_dev = adapter->ena_dev;
1923	int rc;
1924
1925	/* In case the RSS table was destroyed */
1926	if (!ena_dev->rss.tbl_log_size) {
1927		rc = ena_rss_init_default(adapter);
1928		if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
1929			device_printf(adapter->pdev,
1930			    "WARNING: RSS was not properly re-initialized,"
1931			    " it will affect bandwidth\n");
1932			ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
1933			return (rc);
1934		}
1935	}
1936
1937	/* Set indirect table */
1938	rc = ena_com_indirect_table_set(ena_dev);
1939	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1940		return (rc);
1941
1942	/* Configure hash function (if supported) */
1943	rc = ena_com_set_hash_function(ena_dev);
1944	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1945		return (rc);
1946
1947	/* Configure hash inputs (if supported) */
1948	rc = ena_com_set_hash_ctrl(ena_dev);
1949	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1950		return (rc);
1951
1952	return (0);
1953}
1954
1955static int
1956ena_up_complete(struct ena_adapter *adapter)
1957{
1958	int rc;
1959
1960	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
1961		rc = ena_rss_configure(adapter);
1962		if (rc != 0) {
1963			device_printf(adapter->pdev,
1964			    "Failed to configure RSS\n");
1965			return (rc);
1966		}
1967	}
1968
1969	rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
1970	if (unlikely(rc != 0))
1971		return (rc);
1972
1973	ena_refill_all_rx_bufs(adapter);
1974	ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
1975	    sizeof(adapter->hw_stats));
1976
1977	return (0);
1978}
1979
1980static void
1981set_io_rings_size(struct ena_adapter *adapter, int new_tx_size,
1982    int new_rx_size)
1983{
1984	int i;
1985
1986	for (i = 0; i < adapter->num_io_queues; i++) {
1987		adapter->tx_ring[i].ring_size = new_tx_size;
1988		adapter->rx_ring[i].ring_size = new_rx_size;
1989	}
1990}
1991
1992static int
1993create_queues_with_size_backoff(struct ena_adapter *adapter)
1994{
1995	int rc;
1996	uint32_t cur_rx_ring_size, cur_tx_ring_size;
1997	uint32_t new_rx_ring_size, new_tx_ring_size;
1998
1999	/*
2000	 * Current queue sizes might be set to smaller than the requested
2001	 * ones due to past queue allocation failures.
2002	 */
2003	set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2004	    adapter->requested_rx_ring_size);
2005
2006	while (1) {
2007		/* Allocate transmit descriptors */
2008		rc = ena_setup_all_tx_resources(adapter);
2009		if (unlikely(rc != 0)) {
2010			ena_trace(NULL, ENA_ALERT, "err_setup_tx\n");
2011			goto err_setup_tx;
2012		}
2013
2014		/* Allocate receive descriptors */
2015		rc = ena_setup_all_rx_resources(adapter);
2016		if (unlikely(rc != 0)) {
2017			ena_trace(NULL, ENA_ALERT, "err_setup_rx\n");
2018			goto err_setup_rx;
2019		}
2020
2021		/* Create IO queues for Rx & Tx */
2022		rc = ena_create_io_queues(adapter);
2023		if (unlikely(rc != 0)) {
2024			ena_trace(NULL, ENA_ALERT,
2025			    "create IO queues failed\n");
2026			goto err_io_que;
2027		}
2028
2029		return (0);
2030
2031err_io_que:
2032		ena_free_all_rx_resources(adapter);
2033err_setup_rx:
2034		ena_free_all_tx_resources(adapter);
2035err_setup_tx:
2036		/*
2037		 * Lower the ring size if ENOMEM. Otherwise, return the
2038		 * error straightaway.
2039		 */
2040		if (unlikely(rc != ENOMEM)) {
2041			ena_trace(NULL, ENA_ALERT,
2042			    "Queue creation failed with error code: %d\n", rc);
2043			return (rc);
2044		}
2045
2046		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2047		cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2048
2049		device_printf(adapter->pdev,
2050		    "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2051		    cur_tx_ring_size, cur_rx_ring_size);
2052
2053		new_tx_ring_size = cur_tx_ring_size;
2054		new_rx_ring_size = cur_rx_ring_size;
2055
2056		/*
2057		 * Decrease the size of a larger queue, or decrease both if they are
2058		 * the same size.
2059		 */
2060		if (cur_rx_ring_size <= cur_tx_ring_size)
2061			new_tx_ring_size = cur_tx_ring_size / 2;
2062		if (cur_rx_ring_size >= cur_tx_ring_size)
2063			new_rx_ring_size = cur_rx_ring_size / 2;
2064
2065		if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2066		    new_rx_ring_size < ENA_MIN_RING_SIZE) {
2067			device_printf(adapter->pdev,
2068			    "Queue creation failed with the smallest possible queue size"
2069			    "of %d for both queues. Not retrying with smaller queues\n",
2070			    ENA_MIN_RING_SIZE);
2071			return (rc);
2072		}
2073
2074		set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2075	}
2076}
2077
2078int
2079ena_up(struct ena_adapter *adapter)
2080{
2081	int rc = 0;
2082
2083	if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2084		device_printf(adapter->pdev, "device is not attached!\n");
2085		return (ENXIO);
2086	}
2087
2088	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2089		return (0);
2090
2091	device_printf(adapter->pdev, "device is going UP\n");
2092
2093	/* setup interrupts for IO queues */
2094	rc = ena_setup_io_intr(adapter);
2095	if (unlikely(rc != 0)) {
2096		ena_trace(NULL, ENA_ALERT, "error setting up IO interrupt\n");
2097		goto error;
2098	}
2099	rc = ena_request_io_irq(adapter);
2100	if (unlikely(rc != 0)) {
2101		ena_trace(NULL, ENA_ALERT, "err_req_irq\n");
2102		goto error;
2103	}
2104
2105	device_printf(adapter->pdev,
2106	    "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, "
2107	    "LLQ is %s\n",
2108	    adapter->num_io_queues,
2109	    adapter->requested_rx_ring_size,
2110	    adapter->requested_tx_ring_size,
2111	    (adapter->ena_dev->tx_mem_queue_type ==
2112	        ENA_ADMIN_PLACEMENT_POLICY_DEV) ?  "ENABLED" : "DISABLED");
2113
2114	rc = create_queues_with_size_backoff(adapter);
2115	if (unlikely(rc != 0)) {
2116		ena_trace(NULL, ENA_ALERT,
2117		    "error creating queues with size backoff\n");
2118		goto err_create_queues_with_backoff;
2119	}
2120
2121	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2122		if_link_state_change(adapter->ifp, LINK_STATE_UP);
2123
2124	rc = ena_up_complete(adapter);
2125	if (unlikely(rc != 0))
2126		goto err_up_complete;
2127
2128	counter_u64_add(adapter->dev_stats.interface_up, 1);
2129
2130	ena_update_hwassist(adapter);
2131
2132	if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
2133		IFF_DRV_OACTIVE);
2134
2135	/* Activate timer service only if the device is running.
2136		* If this flag is not set, it means that the driver is being
2137		* reset and timer service will be activated afterwards.
2138		*/
2139	if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) {
2140		callout_reset_sbt(&adapter->timer_service, SBT_1S,
2141			SBT_1S, ena_timer_service, (void *)adapter, 0);
2142	}
2143
2144	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2145
2146	ena_unmask_all_io_irqs(adapter);
2147
2148	return (0);
2149
2150err_up_complete:
2151	ena_destroy_all_io_queues(adapter);
2152	ena_free_all_rx_resources(adapter);
2153	ena_free_all_tx_resources(adapter);
2154err_create_queues_with_backoff:
2155	ena_free_io_irq(adapter);
2156error:
2157	return (rc);
2158}
2159
2160static uint64_t
2161ena_get_counter(if_t ifp, ift_counter cnt)
2162{
2163	struct ena_adapter *adapter;
2164	struct ena_hw_stats *stats;
2165
2166	adapter = if_getsoftc(ifp);
2167	stats = &adapter->hw_stats;
2168
2169	switch (cnt) {
2170	case IFCOUNTER_IPACKETS:
2171		return (counter_u64_fetch(stats->rx_packets));
2172	case IFCOUNTER_OPACKETS:
2173		return (counter_u64_fetch(stats->tx_packets));
2174	case IFCOUNTER_IBYTES:
2175		return (counter_u64_fetch(stats->rx_bytes));
2176	case IFCOUNTER_OBYTES:
2177		return (counter_u64_fetch(stats->tx_bytes));
2178	case IFCOUNTER_IQDROPS:
2179		return (counter_u64_fetch(stats->rx_drops));
2180	case IFCOUNTER_OQDROPS:
2181		return (counter_u64_fetch(stats->tx_drops));
2182	default:
2183		return (if_get_counter_default(ifp, cnt));
2184	}
2185}
2186
2187static int
2188ena_media_change(if_t ifp)
2189{
2190	/* Media Change is not supported by firmware */
2191	return (0);
2192}
2193
2194static void
2195ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2196{
2197	struct ena_adapter *adapter = if_getsoftc(ifp);
2198	ena_trace(NULL, ENA_DBG, "enter\n");
2199
2200	ENA_LOCK_LOCK(adapter);
2201
2202	ifmr->ifm_status = IFM_AVALID;
2203	ifmr->ifm_active = IFM_ETHER;
2204
2205	if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2206		ENA_LOCK_UNLOCK(adapter);
2207		ena_trace(NULL, ENA_INFO, "Link is down\n");
2208		return;
2209	}
2210
2211	ifmr->ifm_status |= IFM_ACTIVE;
2212	ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2213
2214	ENA_LOCK_UNLOCK(adapter);
2215}
2216
2217static void
2218ena_init(void *arg)
2219{
2220	struct ena_adapter *adapter = (struct ena_adapter *)arg;
2221
2222	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2223		ENA_LOCK_LOCK(adapter);
2224		ena_up(adapter);
2225		ENA_LOCK_UNLOCK(adapter);
2226	}
2227}
2228
2229static int
2230ena_ioctl(if_t ifp, u_long command, caddr_t data)
2231{
2232	struct ena_adapter *adapter;
2233	struct ifreq *ifr;
2234	int rc;
2235
2236	adapter = ifp->if_softc;
2237	ifr = (struct ifreq *)data;
2238
2239	/*
2240	 * Acquiring lock to prevent from running up and down routines parallel.
2241	 */
2242	rc = 0;
2243	switch (command) {
2244	case SIOCSIFMTU:
2245		if (ifp->if_mtu == ifr->ifr_mtu)
2246			break;
2247		ENA_LOCK_LOCK(adapter);
2248		ena_down(adapter);
2249
2250		ena_change_mtu(ifp, ifr->ifr_mtu);
2251
2252		rc = ena_up(adapter);
2253		ENA_LOCK_UNLOCK(adapter);
2254		break;
2255
2256	case SIOCSIFFLAGS:
2257		if ((ifp->if_flags & IFF_UP) != 0) {
2258			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2259				if ((ifp->if_flags & (IFF_PROMISC |
2260				    IFF_ALLMULTI)) != 0) {
2261					device_printf(adapter->pdev,
2262					    "ioctl promisc/allmulti\n");
2263				}
2264			} else {
2265				ENA_LOCK_LOCK(adapter);
2266				rc = ena_up(adapter);
2267				ENA_LOCK_UNLOCK(adapter);
2268			}
2269		} else {
2270			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2271				ENA_LOCK_LOCK(adapter);
2272				ena_down(adapter);
2273				ENA_LOCK_UNLOCK(adapter);
2274			}
2275		}
2276		break;
2277
2278	case SIOCADDMULTI:
2279	case SIOCDELMULTI:
2280		break;
2281
2282	case SIOCSIFMEDIA:
2283	case SIOCGIFMEDIA:
2284		rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2285		break;
2286
2287	case SIOCSIFCAP:
2288		{
2289			int reinit = 0;
2290
2291			if (ifr->ifr_reqcap != ifp->if_capenable) {
2292				ifp->if_capenable = ifr->ifr_reqcap;
2293				reinit = 1;
2294			}
2295
2296			if ((reinit != 0) &&
2297			    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2298				ENA_LOCK_LOCK(adapter);
2299				ena_down(adapter);
2300				rc = ena_up(adapter);
2301				ENA_LOCK_UNLOCK(adapter);
2302			}
2303		}
2304
2305		break;
2306	default:
2307		rc = ether_ioctl(ifp, command, data);
2308		break;
2309	}
2310
2311	return (rc);
2312}
2313
2314static int
2315ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2316{
2317	int caps = 0;
2318
2319	if ((feat->offload.tx &
2320	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2321	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2322	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2323		caps |= IFCAP_TXCSUM;
2324
2325	if ((feat->offload.tx &
2326	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2327	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2328		caps |= IFCAP_TXCSUM_IPV6;
2329
2330	if ((feat->offload.tx &
2331	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2332		caps |= IFCAP_TSO4;
2333
2334	if ((feat->offload.tx &
2335	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2336		caps |= IFCAP_TSO6;
2337
2338	if ((feat->offload.rx_supported &
2339	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2340	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2341		caps |= IFCAP_RXCSUM;
2342
2343	if ((feat->offload.rx_supported &
2344	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2345		caps |= IFCAP_RXCSUM_IPV6;
2346
2347	caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2348
2349	return (caps);
2350}
2351
2352static void
2353ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2354{
2355
2356	host_info->supported_network_features[0] =
2357	    (uint32_t)if_getcapabilities(ifp);
2358}
2359
2360static void
2361ena_update_hwassist(struct ena_adapter *adapter)
2362{
2363	if_t ifp = adapter->ifp;
2364	uint32_t feat = adapter->tx_offload_cap;
2365	int cap = if_getcapenable(ifp);
2366	int flags = 0;
2367
2368	if_clearhwassist(ifp);
2369
2370	if ((cap & IFCAP_TXCSUM) != 0) {
2371		if ((feat &
2372		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2373			flags |= CSUM_IP;
2374		if ((feat &
2375		    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2376		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2377			flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2378	}
2379
2380	if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2381		flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2382
2383	if ((cap & IFCAP_TSO4) != 0)
2384		flags |= CSUM_IP_TSO;
2385
2386	if ((cap & IFCAP_TSO6) != 0)
2387		flags |= CSUM_IP6_TSO;
2388
2389	if_sethwassistbits(ifp, flags, 0);
2390}
2391
2392static int
2393ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2394    struct ena_com_dev_get_features_ctx *feat)
2395{
2396	if_t ifp;
2397	int caps = 0;
2398
2399	ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2400	if (unlikely(ifp == NULL)) {
2401		ena_trace(NULL, ENA_ALERT, "can not allocate ifnet structure\n");
2402		return (ENXIO);
2403	}
2404	if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2405	if_setdev(ifp, pdev);
2406	if_setsoftc(ifp, adapter);
2407
2408	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2409	if_setinitfn(ifp, ena_init);
2410	if_settransmitfn(ifp, ena_mq_start);
2411	if_setqflushfn(ifp, ena_qflush);
2412	if_setioctlfn(ifp, ena_ioctl);
2413	if_setgetcounterfn(ifp, ena_get_counter);
2414
2415	if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2416	if_setsendqready(ifp);
2417	if_setmtu(ifp, ETHERMTU);
2418	if_setbaudrate(ifp, 0);
2419	/* Zeroize capabilities... */
2420	if_setcapabilities(ifp, 0);
2421	if_setcapenable(ifp, 0);
2422	/* check hardware support */
2423	caps = ena_get_dev_offloads(feat);
2424	/* ... and set them */
2425	if_setcapabilitiesbit(ifp, caps, 0);
2426
2427	/* TSO parameters */
2428	ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2429	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2430	ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2431	ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2432
2433	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2434	if_setcapenable(ifp, if_getcapabilities(ifp));
2435
2436	/*
2437	 * Specify the media types supported by this adapter and register
2438	 * callbacks to update media and link information
2439	 */
2440	ifmedia_init(&adapter->media, IFM_IMASK,
2441	    ena_media_change, ena_media_status);
2442	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2443	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2444
2445	ether_ifattach(ifp, adapter->mac_addr);
2446
2447	return (0);
2448}
2449
2450void
2451ena_down(struct ena_adapter *adapter)
2452{
2453	int rc;
2454
2455	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2456		return;
2457
2458	device_printf(adapter->pdev, "device is going DOWN\n");
2459
2460	callout_drain(&adapter->timer_service);
2461
2462	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2463	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
2464		IFF_DRV_RUNNING);
2465
2466	ena_free_io_irq(adapter);
2467
2468	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2469		rc = ena_com_dev_reset(adapter->ena_dev,
2470			adapter->reset_reason);
2471		if (unlikely(rc != 0))
2472			device_printf(adapter->pdev,
2473				"Device reset failed\n");
2474	}
2475
2476	ena_destroy_all_io_queues(adapter);
2477
2478	ena_free_all_tx_bufs(adapter);
2479	ena_free_all_rx_bufs(adapter);
2480	ena_free_all_tx_resources(adapter);
2481	ena_free_all_rx_resources(adapter);
2482
2483	counter_u64_add(adapter->dev_stats.interface_down, 1);
2484}
2485
2486static uint32_t
2487ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2488    struct ena_com_dev_get_features_ctx *get_feat_ctx)
2489{
2490	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2491
2492	/* Regular queues capabilities */
2493	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2494		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2495		    &get_feat_ctx->max_queue_ext.max_queue_ext;
2496		io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2497			max_queue_ext->max_rx_cq_num);
2498
2499		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2500		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2501	} else {
2502		struct ena_admin_queue_feature_desc *max_queues =
2503		    &get_feat_ctx->max_queues;
2504		io_tx_sq_num = max_queues->max_sq_num;
2505		io_tx_cq_num = max_queues->max_cq_num;
2506		io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2507	}
2508
2509	/* In case of LLQ use the llq fields for the tx SQ/CQ */
2510	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2511		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2512
2513	max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2514	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2515	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2516	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2517	/* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
2518	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2519	    pci_msix_count(pdev) - 1);
2520
2521	return (max_num_io_queues);
2522}
2523
2524static int
2525ena_enable_wc(struct resource *res)
2526{
2527#if defined(__i386) || defined(__amd64)
2528	vm_offset_t va;
2529	vm_size_t len;
2530	int rc;
2531
2532	va = (vm_offset_t)rman_get_virtual(res);
2533	len = rman_get_size(res);
2534	/* Enable write combining */
2535	rc = pmap_change_attr(va, len, PAT_WRITE_COMBINING);
2536	if (unlikely(rc != 0)) {
2537		ena_trace(NULL, ENA_ALERT, "pmap_change_attr failed, %d\n", rc);
2538		return (rc);
2539	}
2540
2541	return (0);
2542#endif
2543	return (EOPNOTSUPP);
2544}
2545
2546static int
2547ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2548    struct ena_admin_feature_llq_desc *llq,
2549    struct ena_llq_configurations *llq_default_configurations)
2550{
2551	struct ena_adapter *adapter = device_get_softc(pdev);
2552	int rc, rid;
2553	uint32_t llq_feature_mask;
2554
2555	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2556	if (!(ena_dev->supported_features & llq_feature_mask)) {
2557		device_printf(pdev,
2558		    "LLQ is not supported. Fallback to host mode policy.\n");
2559		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2560		return (0);
2561	}
2562
2563	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2564	if (unlikely(rc != 0)) {
2565		device_printf(pdev, "Failed to configure the device mode. "
2566		    "Fallback to host mode policy.\n");
2567		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2568		return (0);
2569	}
2570
2571	/* Nothing to config, exit */
2572	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
2573		return (0);
2574
2575	/* Try to allocate resources for LLQ bar */
2576	rid = PCIR_BAR(ENA_MEM_BAR);
2577	adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
2578	    &rid, RF_ACTIVE);
2579	if (unlikely(adapter->memory == NULL)) {
2580		device_printf(pdev, "unable to allocate LLQ bar resource. "
2581		    "Fallback to host mode policy.\n");
2582		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2583		return (0);
2584	}
2585
2586	/* Enable write combining for better LLQ performance */
2587	rc = ena_enable_wc(adapter->memory);
2588	if (unlikely(rc != 0)) {
2589		device_printf(pdev, "failed to enable write combining.\n");
2590		return (rc);
2591	}
2592
2593	/*
2594	 * Save virtual address of the device's memory region
2595	 * for the ena_com layer.
2596	 */
2597	ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2598
2599	return (0);
2600}
2601
2602static inline
2603void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
2604{
2605	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2606	llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2607	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2608	llq_config->llq_num_decs_before_header =
2609	    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2610	llq_config->llq_ring_entry_size_value = 128;
2611}
2612
2613static int
2614ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
2615{
2616	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2617	struct ena_com_dev *ena_dev = ctx->ena_dev;
2618	uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2619	uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2620	uint32_t max_tx_queue_size;
2621	uint32_t max_rx_queue_size;
2622
2623	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2624		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2625		    &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2626		max_rx_queue_size = min_t(uint32_t,
2627		    max_queue_ext->max_rx_cq_depth,
2628		    max_queue_ext->max_rx_sq_depth);
2629		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2630
2631		if (ena_dev->tx_mem_queue_type ==
2632		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2633			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2634			    llq->max_llq_depth);
2635		else
2636			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2637			    max_queue_ext->max_tx_sq_depth);
2638
2639		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2640		    max_queue_ext->max_per_packet_tx_descs);
2641		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2642		    max_queue_ext->max_per_packet_rx_descs);
2643	} else {
2644		struct ena_admin_queue_feature_desc *max_queues =
2645		    &ctx->get_feat_ctx->max_queues;
2646		max_rx_queue_size = min_t(uint32_t,
2647		    max_queues->max_cq_depth,
2648		    max_queues->max_sq_depth);
2649		max_tx_queue_size = max_queues->max_cq_depth;
2650
2651		if (ena_dev->tx_mem_queue_type ==
2652		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2653			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2654			    llq->max_llq_depth);
2655		else
2656			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2657			    max_queues->max_sq_depth);
2658
2659		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2660		    max_queues->max_packet_tx_descs);
2661		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2662		    max_queues->max_packet_rx_descs);
2663	}
2664
2665	/* round down to the nearest power of 2 */
2666	max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2667	max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2668
2669	tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2670	    max_tx_queue_size);
2671	rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2672	    max_rx_queue_size);
2673
2674	tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2675	rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2676
2677	ctx->max_tx_queue_size = max_tx_queue_size;
2678	ctx->max_rx_queue_size = max_rx_queue_size;
2679	ctx->tx_queue_size = tx_queue_size;
2680	ctx->rx_queue_size = rx_queue_size;
2681
2682	return (0);
2683}
2684
2685static int
2686ena_rss_init_default(struct ena_adapter *adapter)
2687{
2688	struct ena_com_dev *ena_dev = adapter->ena_dev;
2689	device_t dev = adapter->pdev;
2690	int qid, rc, i;
2691
2692	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2693	if (unlikely(rc != 0)) {
2694		device_printf(dev, "Cannot init indirect table\n");
2695		return (rc);
2696	}
2697
2698	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
2699		qid = i % adapter->num_io_queues;
2700		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
2701		    ENA_IO_RXQ_IDX(qid));
2702		if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2703			device_printf(dev, "Cannot fill indirect table\n");
2704			goto err_rss_destroy;
2705		}
2706	}
2707
2708	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
2709	    ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
2710	if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2711		device_printf(dev, "Cannot fill hash function\n");
2712		goto err_rss_destroy;
2713	}
2714
2715	rc = ena_com_set_default_hash_ctrl(ena_dev);
2716	if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2717		device_printf(dev, "Cannot fill hash control\n");
2718		goto err_rss_destroy;
2719	}
2720
2721	return (0);
2722
2723err_rss_destroy:
2724	ena_com_rss_destroy(ena_dev);
2725	return (rc);
2726}
2727
2728static void
2729ena_rss_init_default_deferred(void *arg)
2730{
2731	struct ena_adapter *adapter;
2732	devclass_t dc;
2733	int max;
2734	int rc;
2735
2736	dc = devclass_find("ena");
2737	if (unlikely(dc == NULL)) {
2738		ena_trace(NULL, ENA_ALERT, "No devclass ena\n");
2739		return;
2740	}
2741
2742	max = devclass_get_maxunit(dc);
2743	while (max-- >= 0) {
2744		adapter = devclass_get_softc(dc, max);
2745		if (adapter != NULL) {
2746			rc = ena_rss_init_default(adapter);
2747			ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2748			if (unlikely(rc != 0)) {
2749				device_printf(adapter->pdev,
2750				    "WARNING: RSS was not properly initialized,"
2751				    " it will affect bandwidth\n");
2752				ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2753			}
2754		}
2755	}
2756}
2757SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
2758
2759static void
2760ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2761{
2762	struct ena_admin_host_info *host_info;
2763	uintptr_t rid;
2764	int rc;
2765
2766	/* Allocate only the host info */
2767	rc = ena_com_allocate_host_info(ena_dev);
2768	if (unlikely(rc != 0)) {
2769		ena_trace(NULL, ENA_ALERT, "Cannot allocate host info\n");
2770		return;
2771	}
2772
2773	host_info = ena_dev->host_attr.host_info;
2774
2775	if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2776		host_info->bdf = rid;
2777	host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2778	host_info->kernel_ver = osreldate;
2779
2780	sprintf(host_info->kernel_ver_str, "%d", osreldate);
2781	host_info->os_dist = 0;
2782	strncpy(host_info->os_dist_str, osrelease,
2783	    sizeof(host_info->os_dist_str) - 1);
2784
2785	host_info->driver_version =
2786		(DRV_MODULE_VER_MAJOR) |
2787		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2788		(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2789	host_info->num_cpus = mp_ncpus;
2790	host_info->driver_supported_features =
2791	    ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
2792
2793	rc = ena_com_set_host_attributes(ena_dev);
2794	if (unlikely(rc != 0)) {
2795		if (rc == EOPNOTSUPP)
2796			ena_trace(NULL, ENA_WARNING, "Cannot set host attributes\n");
2797		else
2798			ena_trace(NULL, ENA_ALERT, "Cannot set host attributes\n");
2799
2800		goto err;
2801	}
2802
2803	return;
2804
2805err:
2806	ena_com_delete_host_info(ena_dev);
2807}
2808
2809static int
2810ena_device_init(struct ena_adapter *adapter, device_t pdev,
2811    struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2812{
2813	struct ena_com_dev* ena_dev = adapter->ena_dev;
2814	bool readless_supported;
2815	uint32_t aenq_groups;
2816	int dma_width;
2817	int rc;
2818
2819	rc = ena_com_mmio_reg_read_request_init(ena_dev);
2820	if (unlikely(rc != 0)) {
2821		device_printf(pdev, "failed to init mmio read less\n");
2822		return (rc);
2823	}
2824
2825	/*
2826	 * The PCIe configuration space revision id indicate if mmio reg
2827	 * read is disabled
2828	 */
2829	readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2830	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2831
2832	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2833	if (unlikely(rc != 0)) {
2834		device_printf(pdev, "Can not reset device\n");
2835		goto err_mmio_read_less;
2836	}
2837
2838	rc = ena_com_validate_version(ena_dev);
2839	if (unlikely(rc != 0)) {
2840		device_printf(pdev, "device version is too low\n");
2841		goto err_mmio_read_less;
2842	}
2843
2844	dma_width = ena_com_get_dma_width(ena_dev);
2845	if (unlikely(dma_width < 0)) {
2846		device_printf(pdev, "Invalid dma width value %d", dma_width);
2847		rc = dma_width;
2848		goto err_mmio_read_less;
2849	}
2850	adapter->dma_width = dma_width;
2851
2852	/* ENA admin level init */
2853	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2854	if (unlikely(rc != 0)) {
2855		device_printf(pdev,
2856		    "Can not initialize ena admin queue with device\n");
2857		goto err_mmio_read_less;
2858	}
2859
2860	/*
2861	 * To enable the msix interrupts the driver needs to know the number
2862	 * of queues. So the driver uses polling mode to retrieve this
2863	 * information
2864	 */
2865	ena_com_set_admin_polling_mode(ena_dev, true);
2866
2867	ena_config_host_info(ena_dev, pdev);
2868
2869	/* Get Device Attributes */
2870	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2871	if (unlikely(rc != 0)) {
2872		device_printf(pdev,
2873		    "Cannot get attribute for ena device rc: %d\n", rc);
2874		goto err_admin_init;
2875	}
2876
2877	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2878	    BIT(ENA_ADMIN_FATAL_ERROR) |
2879	    BIT(ENA_ADMIN_WARNING) |
2880	    BIT(ENA_ADMIN_NOTIFICATION) |
2881	    BIT(ENA_ADMIN_KEEP_ALIVE);
2882
2883	aenq_groups &= get_feat_ctx->aenq.supported_groups;
2884	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2885	if (unlikely(rc != 0)) {
2886		device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
2887		goto err_admin_init;
2888	}
2889
2890	*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2891
2892	return (0);
2893
2894err_admin_init:
2895	ena_com_delete_host_info(ena_dev);
2896	ena_com_admin_destroy(ena_dev);
2897err_mmio_read_less:
2898	ena_com_mmio_reg_read_request_destroy(ena_dev);
2899
2900	return (rc);
2901}
2902
2903static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
2904{
2905	struct ena_com_dev *ena_dev = adapter->ena_dev;
2906	int rc;
2907
2908	rc = ena_enable_msix(adapter);
2909	if (unlikely(rc != 0)) {
2910		device_printf(adapter->pdev, "Error with MSI-X enablement\n");
2911		return (rc);
2912	}
2913
2914	ena_setup_mgmnt_intr(adapter);
2915
2916	rc = ena_request_mgmnt_irq(adapter);
2917	if (unlikely(rc != 0)) {
2918		device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
2919		goto err_disable_msix;
2920	}
2921
2922	ena_com_set_admin_polling_mode(ena_dev, false);
2923
2924	ena_com_admin_aenq_enable(ena_dev);
2925
2926	return (0);
2927
2928err_disable_msix:
2929	ena_disable_msix(adapter);
2930
2931	return (rc);
2932}
2933
2934/* Function called on ENA_ADMIN_KEEP_ALIVE event */
2935static void ena_keep_alive_wd(void *adapter_data,
2936    struct ena_admin_aenq_entry *aenq_e)
2937{
2938	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
2939	struct ena_admin_aenq_keep_alive_desc *desc;
2940	sbintime_t stime;
2941	uint64_t rx_drops;
2942	uint64_t tx_drops;
2943
2944	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
2945
2946	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2947	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2948	counter_u64_zero(adapter->hw_stats.rx_drops);
2949	counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
2950	counter_u64_zero(adapter->hw_stats.tx_drops);
2951	counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
2952
2953	stime = getsbinuptime();
2954	atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
2955}
2956
2957/* Check for keep alive expiration */
2958static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2959{
2960	sbintime_t timestamp, time;
2961
2962	if (adapter->wd_active == 0)
2963		return;
2964
2965	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2966		return;
2967
2968	timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
2969	time = getsbinuptime() - timestamp;
2970	if (unlikely(time > adapter->keep_alive_timeout)) {
2971		device_printf(adapter->pdev,
2972		    "Keep alive watchdog timeout.\n");
2973		counter_u64_add(adapter->dev_stats.wd_expired, 1);
2974		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
2975	}
2976}
2977
2978/* Check if admin queue is enabled */
2979static void check_for_admin_com_state(struct ena_adapter *adapter)
2980{
2981	if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
2982	    false)) {
2983		device_printf(adapter->pdev,
2984		    "ENA admin queue is not in running state!\n");
2985		counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
2986		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
2987	}
2988}
2989
2990static int
2991check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2992    struct ena_ring *rx_ring)
2993{
2994	if (likely(rx_ring->first_interrupt))
2995		return (0);
2996
2997	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2998		return (0);
2999
3000	rx_ring->no_interrupt_event_cnt++;
3001
3002	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3003		device_printf(adapter->pdev, "Potential MSIX issue on Rx side "
3004		    "Queue = %d. Reset the device\n", rx_ring->qid);
3005		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3006		return (EIO);
3007	}
3008
3009	return (0);
3010}
3011
3012static int
3013check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3014    struct ena_ring *tx_ring)
3015{
3016	struct bintime curtime, time;
3017	struct ena_tx_buffer *tx_buf;
3018	sbintime_t time_offset;
3019	uint32_t missed_tx = 0;
3020	int i, rc = 0;
3021
3022	getbinuptime(&curtime);
3023
3024	for (i = 0; i < tx_ring->ring_size; i++) {
3025		tx_buf = &tx_ring->tx_buffer_info[i];
3026
3027		if (bintime_isset(&tx_buf->timestamp) == 0)
3028			continue;
3029
3030		time = curtime;
3031		bintime_sub(&time, &tx_buf->timestamp);
3032		time_offset = bttosbt(time);
3033
3034		if (unlikely(!tx_ring->first_interrupt &&
3035		    time_offset > 2 * adapter->missing_tx_timeout)) {
3036			/*
3037			 * If after graceful period interrupt is still not
3038			 * received, we schedule a reset.
3039			 */
3040			device_printf(adapter->pdev,
3041			    "Potential MSIX issue on Tx side Queue = %d. "
3042			    "Reset the device\n", tx_ring->qid);
3043			ena_trigger_reset(adapter,
3044			    ENA_REGS_RESET_MISS_INTERRUPT);
3045			return (EIO);
3046		}
3047
3048		/* Check again if packet is still waiting */
3049		if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3050
3051			if (!tx_buf->print_once)
3052				ena_trace(NULL, ENA_WARNING, "Found a Tx that wasn't "
3053				    "completed on time, qid %d, index %d.\n",
3054				    tx_ring->qid, i);
3055
3056			tx_buf->print_once = true;
3057			missed_tx++;
3058		}
3059	}
3060
3061	if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3062		device_printf(adapter->pdev,
3063		    "The number of lost tx completion is above the threshold "
3064		    "(%d > %d). Reset the device\n",
3065		    missed_tx, adapter->missing_tx_threshold);
3066		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3067		rc = EIO;
3068	}
3069
3070	counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
3071
3072	return (rc);
3073}
3074
3075/*
3076 * Check for TX which were not completed on time.
3077 * Timeout is defined by "missing_tx_timeout".
3078 * Reset will be performed if number of incompleted
3079 * transactions exceeds "missing_tx_threshold".
3080 */
3081static void
3082check_for_missing_completions(struct ena_adapter *adapter)
3083{
3084	struct ena_ring *tx_ring;
3085	struct ena_ring *rx_ring;
3086	int i, budget, rc;
3087
3088	/* Make sure the driver doesn't turn the device in other process */
3089	rmb();
3090
3091	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3092		return;
3093
3094	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3095		return;
3096
3097	if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3098		return;
3099
3100	budget = adapter->missing_tx_max_queues;
3101
3102	for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3103		tx_ring = &adapter->tx_ring[i];
3104		rx_ring = &adapter->rx_ring[i];
3105
3106		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3107		if (unlikely(rc != 0))
3108			return;
3109
3110		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3111		if (unlikely(rc != 0))
3112			return;
3113
3114		budget--;
3115		if (budget == 0) {
3116			i++;
3117			break;
3118		}
3119	}
3120
3121	adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3122}
3123
3124/* trigger rx cleanup after 2 consecutive detections */
3125#define EMPTY_RX_REFILL 2
3126/* For the rare case where the device runs out of Rx descriptors and the
3127 * msix handler failed to refill new Rx descriptors (due to a lack of memory
3128 * for example).
3129 * This case will lead to a deadlock:
3130 * The device won't send interrupts since all the new Rx packets will be dropped
3131 * The msix handler won't allocate new Rx descriptors so the device won't be
3132 * able to send new packets.
3133 *
3134 * When such a situation is detected - execute rx cleanup task in another thread
3135 */
3136static void
3137check_for_empty_rx_ring(struct ena_adapter *adapter)
3138{
3139	struct ena_ring *rx_ring;
3140	int i, refill_required;
3141
3142	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3143		return;
3144
3145	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3146		return;
3147
3148	for (i = 0; i < adapter->num_io_queues; i++) {
3149		rx_ring = &adapter->rx_ring[i];
3150
3151		refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3152		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3153			rx_ring->empty_rx_queue++;
3154
3155			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL)	{
3156				counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3157				    1);
3158
3159				device_printf(adapter->pdev,
3160				    "trigger refill for ring %d\n", i);
3161
3162				taskqueue_enqueue(rx_ring->que->cleanup_tq,
3163				    &rx_ring->que->cleanup_task);
3164				rx_ring->empty_rx_queue = 0;
3165			}
3166		} else {
3167			rx_ring->empty_rx_queue = 0;
3168		}
3169	}
3170}
3171
3172static void ena_update_hints(struct ena_adapter *adapter,
3173			     struct ena_admin_ena_hw_hints *hints)
3174{
3175	struct ena_com_dev *ena_dev = adapter->ena_dev;
3176
3177	if (hints->admin_completion_tx_timeout)
3178		ena_dev->admin_queue.completion_timeout =
3179		    hints->admin_completion_tx_timeout * 1000;
3180
3181	if (hints->mmio_read_timeout)
3182		/* convert to usec */
3183		ena_dev->mmio_read.reg_read_to =
3184		    hints->mmio_read_timeout * 1000;
3185
3186	if (hints->missed_tx_completion_count_threshold_to_reset)
3187		adapter->missing_tx_threshold =
3188		    hints->missed_tx_completion_count_threshold_to_reset;
3189
3190	if (hints->missing_tx_completion_timeout) {
3191		if (hints->missing_tx_completion_timeout ==
3192		     ENA_HW_HINTS_NO_TIMEOUT)
3193			adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3194		else
3195			adapter->missing_tx_timeout =
3196			    SBT_1MS * hints->missing_tx_completion_timeout;
3197	}
3198
3199	if (hints->driver_watchdog_timeout) {
3200		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3201			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3202		else
3203			adapter->keep_alive_timeout =
3204			    SBT_1MS * hints->driver_watchdog_timeout;
3205	}
3206}
3207
3208/**
3209 * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3210 * @adapter: ENA device adapter
3211 *
3212 * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
3213 * and other error codes on failure.
3214 *
3215 * This function can possibly cause a race with other calls to the admin queue.
3216 * Because of that, the caller should either lock this function or make sure
3217 * that there is no race in the current context.
3218 */
3219static int
3220ena_copy_eni_metrics(struct ena_adapter *adapter)
3221{
3222	static bool print_once = true;
3223	int rc;
3224
3225	rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3226
3227	if (rc != 0) {
3228		if (rc == ENA_COM_UNSUPPORTED) {
3229			if (print_once) {
3230				device_printf(adapter->pdev,
3231				    "Retrieving ENI metrics is not supported.\n");
3232				print_once = false;
3233			} else {
3234				ena_trace(NULL, ENA_DBG,
3235				    "Retrieving ENI metrics is not supported.\n");
3236			}
3237		} else {
3238			device_printf(adapter->pdev,
3239			    "Failed to get ENI metrics: %d\n", rc);
3240		}
3241	}
3242
3243	return (rc);
3244}
3245
3246static void
3247ena_timer_service(void *data)
3248{
3249	struct ena_adapter *adapter = (struct ena_adapter *)data;
3250	struct ena_admin_host_info *host_info =
3251	    adapter->ena_dev->host_attr.host_info;
3252
3253	check_for_missing_keep_alive(adapter);
3254
3255	check_for_admin_com_state(adapter);
3256
3257	check_for_missing_completions(adapter);
3258
3259	check_for_empty_rx_ring(adapter);
3260
3261	/*
3262	 * User controller update of the ENI metrics.
3263	 * If the delay was set to 0, then the stats shouldn't be updated at
3264	 * all.
3265	 * Otherwise, wait 'eni_metrics_sample_interval' seconds, before
3266	 * updating stats.
3267	 * As timer service is executed every second, it's enough to increment
3268	 * appropriate counter each time the timer service is executed.
3269	 */
3270	if ((adapter->eni_metrics_sample_interval != 0) &&
3271	    (++adapter->eni_metrics_sample_interval_cnt >=
3272	     adapter->eni_metrics_sample_interval)) {
3273		/*
3274		 * There is no race with other admin queue calls, as:
3275		 *   - Timer service runs after interface is up, so all
3276		 *     configuration calls to the admin queue are finished.
3277		 *   - After interface is up, the driver doesn't use (at least
3278		 *     for now) other functions writing to the admin queue.
3279		 *
3280		 * It may change in the future, so in that situation, the lock
3281		 * will be needed. ENA_LOCK_*() cannot be used for that purpose,
3282		 * as callout ena_timer_service is protected by them. It could
3283		 * lead to the deadlock if callout_drain() would hold the lock
3284		 * before ena_copy_eni_metrics() was executed. It's advised to
3285		 * use separate lock in that situation which will be used only
3286		 * for the admin queue.
3287		 */
3288		(void)ena_copy_eni_metrics(adapter);
3289		adapter->eni_metrics_sample_interval_cnt = 0;
3290	}
3291
3292
3293	if (host_info != NULL)
3294		ena_update_host_info(host_info, adapter->ifp);
3295
3296	if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3297		device_printf(adapter->pdev, "Trigger reset is on\n");
3298		taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3299		return;
3300	}
3301
3302	/*
3303	 * Schedule another timeout one second from now.
3304	 */
3305	callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
3306}
3307
3308void
3309ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3310{
3311	if_t ifp = adapter->ifp;
3312	struct ena_com_dev *ena_dev = adapter->ena_dev;
3313	bool dev_up;
3314
3315	if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3316		return;
3317
3318	if_link_state_change(ifp, LINK_STATE_DOWN);
3319
3320	callout_drain(&adapter->timer_service);
3321
3322	dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3323	if (dev_up)
3324		ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3325
3326	if (!graceful)
3327		ena_com_set_admin_running_state(ena_dev, false);
3328
3329	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3330		ena_down(adapter);
3331
3332	/*
3333	 * Stop the device from sending AENQ events (if the device was up, and
3334	 * the trigger reset was on, ena_down already performs device reset)
3335	 */
3336	if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3337		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3338
3339	ena_free_mgmnt_irq(adapter);
3340
3341	ena_disable_msix(adapter);
3342
3343	/*
3344	 * IO rings resources should be freed because `ena_restore_device()`
3345	 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3346	 * vectors. The amount of MSIX vectors after destroy-restore may be
3347	 * different than before. Therefore, IO rings resources should be
3348	 * established from scratch each time.
3349	 */
3350	ena_free_all_io_rings_resources(adapter);
3351
3352	ena_com_abort_admin_commands(ena_dev);
3353
3354	ena_com_wait_for_abort_completion(ena_dev);
3355
3356	ena_com_admin_destroy(ena_dev);
3357
3358	ena_com_mmio_reg_read_request_destroy(ena_dev);
3359
3360	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3361
3362	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3363	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3364}
3365
3366static int
3367ena_device_validate_params(struct ena_adapter *adapter,
3368    struct ena_com_dev_get_features_ctx *get_feat_ctx)
3369{
3370
3371	if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3372	    ETHER_ADDR_LEN) != 0) {
3373		device_printf(adapter->pdev,
3374		    "Error, mac address are different\n");
3375		return (EINVAL);
3376	}
3377
3378	if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3379		device_printf(adapter->pdev,
3380		    "Error, device max mtu is smaller than ifp MTU\n");
3381		return (EINVAL);
3382	}
3383
3384	return 0;
3385}
3386
3387int
3388ena_restore_device(struct ena_adapter *adapter)
3389{
3390	struct ena_com_dev_get_features_ctx get_feat_ctx;
3391	struct ena_com_dev *ena_dev = adapter->ena_dev;
3392	if_t ifp = adapter->ifp;
3393	device_t dev = adapter->pdev;
3394	int wd_active;
3395	int rc;
3396
3397	ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3398
3399	rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3400	if (rc != 0) {
3401		device_printf(dev, "Cannot initialize device\n");
3402		goto err;
3403	}
3404	/*
3405	 * Only enable WD if it was enabled before reset, so it won't override
3406	 * value set by the user by the sysctl.
3407	 */
3408	if (adapter->wd_active != 0)
3409		adapter->wd_active = wd_active;
3410
3411	rc = ena_device_validate_params(adapter, &get_feat_ctx);
3412	if (rc != 0) {
3413		device_printf(dev, "Validation of device parameters failed\n");
3414		goto err_device_destroy;
3415	}
3416
3417	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3418	/* Make sure we don't have a race with AENQ Links state handler */
3419	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3420		if_link_state_change(ifp, LINK_STATE_UP);
3421
3422	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3423	if (rc != 0) {
3424		device_printf(dev, "Enable MSI-X failed\n");
3425		goto err_device_destroy;
3426	}
3427
3428	/*
3429	 * Effective value of used MSIX vectors should be the same as before
3430	 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3431	 * are available.
3432	 */
3433	if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3434		adapter->num_io_queues =
3435		    adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3436
3437	/* Re-initialize rings basic information */
3438	ena_init_io_rings(adapter);
3439
3440	/* If the interface was up before the reset bring it up */
3441	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3442		rc = ena_up(adapter);
3443		if (rc != 0) {
3444			device_printf(dev, "Failed to create I/O queues\n");
3445			goto err_disable_msix;
3446		}
3447	}
3448
3449	/* Indicate that device is running again and ready to work */
3450	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3451
3452	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3453		/*
3454		 * As the AENQ handlers weren't executed during reset because
3455		 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3456		 * timestamp must be updated again That will prevent next reset
3457		 * caused by missing keep alive.
3458		 */
3459		adapter->keep_alive_timestamp = getsbinuptime();
3460		callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
3461		    ena_timer_service, (void *)adapter, 0);
3462	}
3463	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3464
3465	device_printf(dev,
3466	    "Device reset completed successfully, Driver info: %s\n", ena_version);
3467
3468	return (rc);
3469
3470err_disable_msix:
3471	ena_free_mgmnt_irq(adapter);
3472	ena_disable_msix(adapter);
3473err_device_destroy:
3474	ena_com_abort_admin_commands(ena_dev);
3475	ena_com_wait_for_abort_completion(ena_dev);
3476	ena_com_admin_destroy(ena_dev);
3477	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3478	ena_com_mmio_reg_read_request_destroy(ena_dev);
3479err:
3480	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3481	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3482	device_printf(dev, "Reset attempt failed. Can not reset the device\n");
3483
3484	return (rc);
3485}
3486
3487static void
3488ena_reset_task(void *arg, int pending)
3489{
3490	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3491
3492	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3493		device_printf(adapter->pdev,
3494		    "device reset scheduled but trigger_reset is off\n");
3495		return;
3496	}
3497
3498	ENA_LOCK_LOCK(adapter);
3499	ena_destroy_device(adapter, false);
3500	ena_restore_device(adapter);
3501	ENA_LOCK_UNLOCK(adapter);
3502}
3503
3504/**
3505 * ena_attach - Device Initialization Routine
3506 * @pdev: device information struct
3507 *
3508 * Returns 0 on success, otherwise on failure.
3509 *
3510 * ena_attach initializes an adapter identified by a device structure.
3511 * The OS initialization, configuring of the adapter private structure,
3512 * and a hardware reset occur.
3513 **/
3514static int
3515ena_attach(device_t pdev)
3516{
3517	struct ena_com_dev_get_features_ctx get_feat_ctx;
3518	struct ena_llq_configurations llq_config;
3519	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3520	static int version_printed;
3521	struct ena_adapter *adapter;
3522	struct ena_com_dev *ena_dev = NULL;
3523	uint32_t max_num_io_queues;
3524	int msix_rid;
3525	int rid, rc;
3526
3527	adapter = device_get_softc(pdev);
3528	adapter->pdev = pdev;
3529
3530	ENA_LOCK_INIT(adapter);
3531
3532	/*
3533	 * Set up the timer service - driver is responsible for avoiding
3534	 * concurrency, as the callout won't be using any locking inside.
3535	 */
3536	callout_init(&adapter->timer_service, true);
3537	adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3538	adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3539	adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3540	adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3541
3542	if (version_printed++ == 0)
3543		device_printf(pdev, "%s\n", ena_version);
3544
3545	/* Allocate memory for ena_dev structure */
3546	ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3547	    M_WAITOK | M_ZERO);
3548
3549	adapter->ena_dev = ena_dev;
3550	ena_dev->dmadev = pdev;
3551
3552	rid = PCIR_BAR(ENA_REG_BAR);
3553	adapter->memory = NULL;
3554	adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3555	    &rid, RF_ACTIVE);
3556	if (unlikely(adapter->registers == NULL)) {
3557		device_printf(pdev,
3558		    "unable to allocate bus resource: registers!\n");
3559		rc = ENOMEM;
3560		goto err_dev_free;
3561	}
3562
3563	/* MSIx vector table may reside on BAR0 with registers or on BAR1. */
3564	msix_rid = pci_msix_table_bar(pdev);
3565	if (msix_rid != rid) {
3566		adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3567		    &msix_rid, RF_ACTIVE);
3568		if (unlikely(adapter->msix == NULL)) {
3569			device_printf(pdev,
3570			    "unable to allocate bus resource: msix!\n");
3571			rc = ENOMEM;
3572			goto err_pci_free;
3573		}
3574		adapter->msix_rid = msix_rid;
3575	}
3576
3577	ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3578	    M_WAITOK | M_ZERO);
3579
3580	/* Store register resources */
3581	((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
3582	    rman_get_bustag(adapter->registers);
3583	((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
3584	    rman_get_bushandle(adapter->registers);
3585
3586	if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
3587		device_printf(pdev, "failed to pmap registers bar\n");
3588		rc = ENXIO;
3589		goto err_bus_free;
3590	}
3591
3592	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3593
3594	/* Initially clear all the flags */
3595	ENA_FLAG_ZERO(adapter);
3596
3597	/* Device initialization */
3598	rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3599	if (unlikely(rc != 0)) {
3600		device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
3601		rc = ENXIO;
3602		goto err_bus_free;
3603	}
3604
3605	set_default_llq_configurations(&llq_config);
3606
3607#if defined(__arm__) || defined(__aarch64__)
3608	/*
3609	 * Force LLQ disable, as the driver is not supporting WC enablement
3610	 * on the ARM architecture. Using LLQ without WC would affect
3611	 * performance in a negative way.
3612	 */
3613	ena_dev->supported_features &= ~(1 << ENA_ADMIN_LLQ);
3614#endif
3615	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3616	     &llq_config);
3617	if (unlikely(rc != 0)) {
3618		device_printf(pdev, "failed to set placement policy\n");
3619		goto err_com_free;
3620	}
3621
3622	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3623		adapter->disable_meta_caching =
3624		    !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3625		    BIT(ENA_ADMIN_DISABLE_META_CACHING));
3626
3627	adapter->keep_alive_timestamp = getsbinuptime();
3628
3629	adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3630
3631	memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3632	    ETHER_ADDR_LEN);
3633
3634	calc_queue_ctx.pdev = pdev;
3635	calc_queue_ctx.ena_dev = ena_dev;
3636	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3637
3638	/* Calculate initial and maximum IO queue number and size */
3639	max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3640	    &get_feat_ctx);
3641	rc = ena_calc_io_queue_size(&calc_queue_ctx);
3642	if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3643		rc = EFAULT;
3644		goto err_com_free;
3645	}
3646
3647	adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3648	adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3649	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3650	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3651	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3652	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3653
3654	adapter->max_num_io_queues = max_num_io_queues;
3655
3656	adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3657
3658	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3659
3660	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3661
3662	/* set up dma tags for rx and tx buffers */
3663	rc = ena_setup_tx_dma_tag(adapter);
3664	if (unlikely(rc != 0)) {
3665		device_printf(pdev, "Failed to create TX DMA tag\n");
3666		goto err_com_free;
3667	}
3668
3669	rc = ena_setup_rx_dma_tag(adapter);
3670	if (unlikely(rc != 0)) {
3671		device_printf(pdev, "Failed to create RX DMA tag\n");
3672		goto err_tx_tag_free;
3673	}
3674
3675	/*
3676	 * The amount of requested MSIX vectors is equal to
3677	 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3678	 * number of admin queue interrupts. The former is initially determined
3679	 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3680	 * achieved if there are not enough system resources. By default, the
3681	 * number of effectively used IO queues is the same but later on it can
3682	 * be limited by the user using sysctl interface.
3683	 */
3684	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3685	if (unlikely(rc != 0)) {
3686		device_printf(pdev,
3687		    "Failed to enable and set the admin interrupts\n");
3688		goto err_io_free;
3689	}
3690	/* By default all of allocated MSIX vectors are actively used */
3691	adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3692
3693	/* initialize rings basic information */
3694	ena_init_io_rings(adapter);
3695
3696	/* setup network interface */
3697	rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3698	if (unlikely(rc != 0)) {
3699		device_printf(pdev, "Error with network interface setup\n");
3700		goto err_msix_free;
3701	}
3702
3703	/* Initialize reset task queue */
3704	TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3705	adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3706	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3707	taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
3708	    "%s rstq", device_get_nameunit(adapter->pdev));
3709
3710	/* Initialize statistics */
3711	ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3712	    sizeof(struct ena_stats_dev));
3713	ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3714	    sizeof(struct ena_hw_stats));
3715	ena_sysctl_add_nodes(adapter);
3716
3717#ifdef DEV_NETMAP
3718	rc = ena_netmap_attach(adapter);
3719	if (rc != 0) {
3720		device_printf(pdev, "netmap attach failed: %d\n", rc);
3721		goto err_detach;
3722	}
3723#endif /* DEV_NETMAP */
3724
3725	/* Tell the stack that the interface is not active */
3726	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3727	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3728
3729	return (0);
3730
3731#ifdef DEV_NETMAP
3732err_detach:
3733	ether_ifdetach(adapter->ifp);
3734#endif /* DEV_NETMAP */
3735err_msix_free:
3736	ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3737	ena_free_mgmnt_irq(adapter);
3738	ena_disable_msix(adapter);
3739err_io_free:
3740	ena_free_all_io_rings_resources(adapter);
3741	ena_free_rx_dma_tag(adapter);
3742err_tx_tag_free:
3743	ena_free_tx_dma_tag(adapter);
3744err_com_free:
3745	ena_com_admin_destroy(ena_dev);
3746	ena_com_delete_host_info(ena_dev);
3747	ena_com_mmio_reg_read_request_destroy(ena_dev);
3748err_bus_free:
3749	free(ena_dev->bus, M_DEVBUF);
3750err_pci_free:
3751	ena_free_pci_resources(adapter);
3752err_dev_free:
3753	free(ena_dev, M_DEVBUF);
3754
3755	return (rc);
3756}
3757
3758/**
3759 * ena_detach - Device Removal Routine
3760 * @pdev: device information struct
3761 *
3762 * ena_detach is called by the device subsystem to alert the driver
3763 * that it should release a PCI device.
3764 **/
3765static int
3766ena_detach(device_t pdev)
3767{
3768	struct ena_adapter *adapter = device_get_softc(pdev);
3769	struct ena_com_dev *ena_dev = adapter->ena_dev;
3770	int rc;
3771
3772	/* Make sure VLANS are not using driver */
3773	if (adapter->ifp->if_vlantrunk != NULL) {
3774		device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3775		return (EBUSY);
3776	}
3777
3778	ether_ifdetach(adapter->ifp);
3779
3780	/* Stop timer service */
3781	ENA_LOCK_LOCK(adapter);
3782	callout_drain(&adapter->timer_service);
3783	ENA_LOCK_UNLOCK(adapter);
3784
3785	/* Release reset task */
3786	while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3787		taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3788	taskqueue_free(adapter->reset_tq);
3789
3790	ENA_LOCK_LOCK(adapter);
3791	ena_down(adapter);
3792	ena_destroy_device(adapter, true);
3793	ENA_LOCK_UNLOCK(adapter);
3794
3795#ifdef DEV_NETMAP
3796	netmap_detach(adapter->ifp);
3797#endif /* DEV_NETMAP */
3798
3799	ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3800	    sizeof(struct ena_hw_stats));
3801	ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3802	    sizeof(struct ena_stats_dev));
3803
3804	rc = ena_free_rx_dma_tag(adapter);
3805	if (unlikely(rc != 0))
3806		device_printf(adapter->pdev,
3807		    "Unmapped RX DMA tag associations\n");
3808
3809	rc = ena_free_tx_dma_tag(adapter);
3810	if (unlikely(rc != 0))
3811		device_printf(adapter->pdev,
3812		    "Unmapped TX DMA tag associations\n");
3813
3814	ena_free_irqs(adapter);
3815
3816	ena_free_pci_resources(adapter);
3817
3818	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
3819		ena_com_rss_destroy(ena_dev);
3820
3821	ena_com_delete_host_info(ena_dev);
3822
3823	ENA_LOCK_DESTROY(adapter);
3824
3825	if_free(adapter->ifp);
3826
3827	if (ena_dev->bus != NULL)
3828		free(ena_dev->bus, M_DEVBUF);
3829
3830	if (ena_dev != NULL)
3831		free(ena_dev, M_DEVBUF);
3832
3833	return (bus_generic_detach(pdev));
3834}
3835
3836/******************************************************************************
3837 ******************************** AENQ Handlers *******************************
3838 *****************************************************************************/
3839/**
3840 * ena_update_on_link_change:
3841 * Notify the network interface about the change in link status
3842 **/
3843static void
3844ena_update_on_link_change(void *adapter_data,
3845    struct ena_admin_aenq_entry *aenq_e)
3846{
3847	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3848	struct ena_admin_aenq_link_change_desc *aenq_desc;
3849	int status;
3850	if_t ifp;
3851
3852	aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3853	ifp = adapter->ifp;
3854	status = aenq_desc->flags &
3855	    ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3856
3857	if (status != 0) {
3858		device_printf(adapter->pdev, "link is UP\n");
3859		ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3860		if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
3861			if_link_state_change(ifp, LINK_STATE_UP);
3862	} else {
3863		device_printf(adapter->pdev, "link is DOWN\n");
3864		if_link_state_change(ifp, LINK_STATE_DOWN);
3865		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3866	}
3867}
3868
3869static void ena_notification(void *adapter_data,
3870    struct ena_admin_aenq_entry *aenq_e)
3871{
3872	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3873	struct ena_admin_ena_hw_hints *hints;
3874
3875	ENA_WARN(NULL, aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3876	    "Invalid group(%x) expected %x\n",	aenq_e->aenq_common_desc.group,
3877	    ENA_ADMIN_NOTIFICATION);
3878
3879	switch (aenq_e->aenq_common_desc.syndrome) {
3880	case ENA_ADMIN_UPDATE_HINTS:
3881		hints =
3882		    (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
3883		ena_update_hints(adapter, hints);
3884		break;
3885	default:
3886		device_printf(adapter->pdev,
3887		    "Invalid aenq notification link state %d\n",
3888		    aenq_e->aenq_common_desc.syndrome);
3889	}
3890}
3891
3892/**
3893 * This handler will called for unknown event group or unimplemented handlers
3894 **/
3895static void
3896unimplemented_aenq_handler(void *adapter_data,
3897    struct ena_admin_aenq_entry *aenq_e)
3898{
3899	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3900
3901	device_printf(adapter->pdev,
3902	    "Unknown event was received or event with unimplemented handler\n");
3903}
3904
3905static struct ena_aenq_handlers aenq_handlers = {
3906    .handlers = {
3907	    [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3908	    [ENA_ADMIN_NOTIFICATION] = ena_notification,
3909	    [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3910    },
3911    .unimplemented_handler = unimplemented_aenq_handler
3912};
3913
3914/*********************************************************************
3915 *  FreeBSD Device Interface Entry Points
3916 *********************************************************************/
3917
3918static device_method_t ena_methods[] = {
3919    /* Device interface */
3920    DEVMETHOD(device_probe, ena_probe),
3921    DEVMETHOD(device_attach, ena_attach),
3922    DEVMETHOD(device_detach, ena_detach),
3923    DEVMETHOD_END
3924};
3925
3926static driver_t ena_driver = {
3927    "ena", ena_methods, sizeof(struct ena_adapter),
3928};
3929
3930devclass_t ena_devclass;
3931DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
3932MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
3933    sizeof(ena_vendor_info_array[0]), nitems(ena_vendor_info_array) - 1);
3934MODULE_DEPEND(ena, pci, 1, 1, 1);
3935MODULE_DEPEND(ena, ether, 1, 1, 1);
3936#ifdef DEV_NETMAP
3937MODULE_DEPEND(ena, netmap, 1, 1, 1);
3938#endif /* DEV_NETMAP */
3939
3940/*********************************************************************/
3941