1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD$");
32
33#include "opt_rss.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/endian.h>
39#include <sys/kernel.h>
40#include <sys/kthread.h>
41#include <sys/malloc.h>
42#include <sys/mbuf.h>
43#include <sys/module.h>
44#include <sys/rman.h>
45#include <sys/smp.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/sysctl.h>
49#include <sys/taskqueue.h>
50#include <sys/time.h>
51#include <sys/eventhandler.h>
52
53#include <machine/bus.h>
54#include <machine/resource.h>
55#include <machine/in_cksum.h>
56
57#include <net/bpf.h>
58#include <net/ethernet.h>
59#include <net/if.h>
60#include <net/if_var.h>
61#include <net/if_arp.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65#include <net/if_vlan_var.h>
66#ifdef RSS
67#include <net/rss_config.h>
68#endif
69
70#include <netinet/in_systm.h>
71#include <netinet/in.h>
72#include <netinet/if_ether.h>
73#include <netinet/ip.h>
74#include <netinet/ip6.h>
75#include <netinet/tcp.h>
76#include <netinet/udp.h>
77
78#include <dev/pci/pcivar.h>
79#include <dev/pci/pcireg.h>
80
81#include <vm/vm.h>
82#include <vm/pmap.h>
83
84#include "ena_datapath.h"
85#include "ena.h"
86#include "ena_sysctl.h"
87
88#ifdef DEV_NETMAP
89#include "ena_netmap.h"
90#endif /* DEV_NETMAP */
91
92/*********************************************************
93 *  Function prototypes
94 *********************************************************/
95static int	ena_probe(device_t);
96static void	ena_intr_msix_mgmnt(void *);
97static void	ena_free_pci_resources(struct ena_adapter *);
98static int	ena_change_mtu(if_t, int);
99static inline void ena_alloc_counters(counter_u64_t *, int);
100static inline void ena_free_counters(counter_u64_t *, int);
101static inline void ena_reset_counters(counter_u64_t *, int);
102static void	ena_init_io_rings_common(struct ena_adapter *,
103    struct ena_ring *, uint16_t);
104static void	ena_init_io_rings_basic(struct ena_adapter *);
105static void	ena_init_io_rings_advanced(struct ena_adapter *);
106static void	ena_init_io_rings(struct ena_adapter *);
107static void	ena_free_io_ring_resources(struct ena_adapter *, unsigned int);
108static void	ena_free_all_io_rings_resources(struct ena_adapter *);
109static int	ena_setup_tx_dma_tag(struct ena_adapter *);
110static int	ena_free_tx_dma_tag(struct ena_adapter *);
111static int	ena_setup_rx_dma_tag(struct ena_adapter *);
112static int	ena_free_rx_dma_tag(struct ena_adapter *);
113static void	ena_release_all_tx_dmamap(struct ena_ring *);
114static int	ena_setup_tx_resources(struct ena_adapter *, int);
115static void	ena_free_tx_resources(struct ena_adapter *, int);
116static int	ena_setup_all_tx_resources(struct ena_adapter *);
117static void	ena_free_all_tx_resources(struct ena_adapter *);
118static int	ena_setup_rx_resources(struct ena_adapter *, unsigned int);
119static void	ena_free_rx_resources(struct ena_adapter *, unsigned int);
120static int	ena_setup_all_rx_resources(struct ena_adapter *);
121static void	ena_free_all_rx_resources(struct ena_adapter *);
122static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *,
123    struct ena_rx_buffer *);
124static void	ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *,
125    struct ena_rx_buffer *);
126static void	ena_free_rx_bufs(struct ena_adapter *, unsigned int);
127static void	ena_refill_all_rx_bufs(struct ena_adapter *);
128static void	ena_free_all_rx_bufs(struct ena_adapter *);
129static void	ena_free_tx_bufs(struct ena_adapter *, unsigned int);
130static void	ena_free_all_tx_bufs(struct ena_adapter *);
131static void	ena_destroy_all_tx_queues(struct ena_adapter *);
132static void	ena_destroy_all_rx_queues(struct ena_adapter *);
133static void	ena_destroy_all_io_queues(struct ena_adapter *);
134static int	ena_create_io_queues(struct ena_adapter *);
135static int	ena_handle_msix(void *);
136static int	ena_enable_msix(struct ena_adapter *);
137static void	ena_setup_mgmnt_intr(struct ena_adapter *);
138static int	ena_setup_io_intr(struct ena_adapter *);
139static int	ena_request_mgmnt_irq(struct ena_adapter *);
140static int	ena_request_io_irq(struct ena_adapter *);
141static void	ena_free_mgmnt_irq(struct ena_adapter *);
142static void	ena_free_io_irq(struct ena_adapter *);
143static void	ena_free_irqs(struct ena_adapter*);
144static void	ena_disable_msix(struct ena_adapter *);
145static void	ena_unmask_all_io_irqs(struct ena_adapter *);
146static int	ena_rss_configure(struct ena_adapter *);
147static int	ena_up_complete(struct ena_adapter *);
148static uint64_t	ena_get_counter(if_t, ift_counter);
149static int	ena_media_change(if_t);
150static void	ena_media_status(if_t, struct ifmediareq *);
151static void	ena_init(void *);
152static int	ena_ioctl(if_t, u_long, caddr_t);
153static int	ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
154static void	ena_update_host_info(struct ena_admin_host_info *, if_t);
155static void	ena_update_hwassist(struct ena_adapter *);
156static int	ena_setup_ifnet(device_t, struct ena_adapter *,
157    struct ena_com_dev_get_features_ctx *);
158static int	ena_enable_wc(struct resource *);
159static int	ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
160    struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *);
161static uint32_t	ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
162    struct ena_com_dev_get_features_ctx *);
163static int	ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
164static int	ena_rss_init_default(struct ena_adapter *);
165static void	ena_rss_init_default_deferred(void *);
166static void	ena_config_host_info(struct ena_com_dev *, device_t);
167static int	ena_attach(device_t);
168static int	ena_detach(device_t);
169static int	ena_device_init(struct ena_adapter *, device_t,
170    struct ena_com_dev_get_features_ctx *, int *);
171static int	ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *);
172static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *);
173static void	unimplemented_aenq_handler(void *,
174    struct ena_admin_aenq_entry *);
175static int	ena_copy_eni_metrics(struct ena_adapter *);
176static void	ena_timer_service(void *);
177
178static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION;
179
180static ena_vendor_info_t ena_vendor_info_array[] = {
181    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0},
182    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF_RSERV0, 0},
183    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0},
184    { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF_RSERV0, 0},
185    /* Last entry */
186    { 0, 0, 0 }
187};
188
189/*
190 * Contains pointers to event handlers, e.g. link state chage.
191 */
192static struct ena_aenq_handlers aenq_handlers;
193
194void
195ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error)
196{
197	if (error != 0)
198		return;
199	*(bus_addr_t *) arg = segs[0].ds_addr;
200}
201
202int
203ena_dma_alloc(device_t dmadev, bus_size_t size,
204    ena_mem_handle_t *dma, int mapflags, bus_size_t alignment)
205{
206	struct ena_adapter* adapter = device_get_softc(dmadev);
207	uint32_t maxsize;
208	uint64_t dma_space_addr;
209	int error;
210
211	maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE;
212
213	dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width);
214	if (unlikely(dma_space_addr == 0))
215		dma_space_addr = BUS_SPACE_MAXADDR;
216
217	error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */
218	    alignment, 0,     /* alignment, bounds 		*/
219	    dma_space_addr,   /* lowaddr of exclusion window	*/
220	    BUS_SPACE_MAXADDR,/* highaddr of exclusion window	*/
221	    NULL, NULL,	      /* filter, filterarg 		*/
222	    maxsize,	      /* maxsize 			*/
223	    1,		      /* nsegments 			*/
224	    maxsize,	      /* maxsegsize 			*/
225	    BUS_DMA_ALLOCNOW, /* flags 				*/
226	    NULL,	      /* lockfunc 			*/
227	    NULL,	      /* lockarg 			*/
228	    &dma->tag);
229	if (unlikely(error != 0)) {
230		ena_trace(NULL, ENA_ALERT, "bus_dma_tag_create failed: %d\n", error);
231		goto fail_tag;
232	}
233
234	error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr,
235	    BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map);
236	if (unlikely(error != 0)) {
237		ena_trace(NULL, ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n",
238		    (uintmax_t)size, error);
239		goto fail_map_create;
240	}
241
242	dma->paddr = 0;
243	error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr,
244	    size, ena_dmamap_callback, &dma->paddr, mapflags);
245	if (unlikely((error != 0) || (dma->paddr == 0))) {
246		ena_trace(NULL, ENA_ALERT, ": bus_dmamap_load failed: %d\n", error);
247		goto fail_map_load;
248	}
249
250	bus_dmamap_sync(dma->tag, dma->map,
251	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
252
253	return (0);
254
255fail_map_load:
256	bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
257fail_map_create:
258	bus_dma_tag_destroy(dma->tag);
259fail_tag:
260	dma->tag = NULL;
261	dma->vaddr = NULL;
262	dma->paddr = 0;
263
264	return (error);
265}
266
267/*
268 * This function should generate unique key for the whole driver.
269 * If the key was already genereated in the previous call (for example
270 * for another adapter), then it should be returned instead.
271 */
272void
273ena_rss_key_fill(void *key, size_t size)
274{
275	static bool key_generated;
276	static uint8_t default_key[ENA_HASH_KEY_SIZE];
277
278	KASSERT(size <= ENA_HASH_KEY_SIZE, ("Requested more bytes than ENA RSS key can hold"));
279
280	if (!key_generated) {
281		arc4random_buf(default_key, ENA_HASH_KEY_SIZE);
282		key_generated = true;
283	}
284
285	memcpy(key, default_key, size);
286}
287
288static void
289ena_free_pci_resources(struct ena_adapter *adapter)
290{
291	device_t pdev = adapter->pdev;
292
293	if (adapter->memory != NULL) {
294		bus_release_resource(pdev, SYS_RES_MEMORY,
295		    PCIR_BAR(ENA_MEM_BAR), adapter->memory);
296	}
297
298	if (adapter->registers != NULL) {
299		bus_release_resource(pdev, SYS_RES_MEMORY,
300		    PCIR_BAR(ENA_REG_BAR), adapter->registers);
301	}
302
303	if (adapter->msix != NULL) {
304		bus_release_resource(pdev, SYS_RES_MEMORY,
305		    adapter->msix_rid, adapter->msix);
306	}
307}
308
309static int
310ena_probe(device_t dev)
311{
312	ena_vendor_info_t *ent;
313	char		adapter_name[60];
314	uint16_t	pci_vendor_id = 0;
315	uint16_t	pci_device_id = 0;
316
317	pci_vendor_id = pci_get_vendor(dev);
318	pci_device_id = pci_get_device(dev);
319
320	ent = ena_vendor_info_array;
321	while (ent->vendor_id != 0) {
322		if ((pci_vendor_id == ent->vendor_id) &&
323		    (pci_device_id == ent->device_id)) {
324			ena_trace(NULL, ENA_DBG, "vendor=%x device=%x\n",
325			    pci_vendor_id, pci_device_id);
326
327			sprintf(adapter_name, DEVICE_DESC);
328			device_set_desc_copy(dev, adapter_name);
329			return (BUS_PROBE_DEFAULT);
330		}
331
332		ent++;
333
334	}
335
336	return (ENXIO);
337}
338
339static int
340ena_change_mtu(if_t ifp, int new_mtu)
341{
342	struct ena_adapter *adapter = if_getsoftc(ifp);
343	int rc;
344
345	if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) {
346		device_printf(adapter->pdev, "Invalid MTU setting. "
347		    "new_mtu: %d max mtu: %d min mtu: %d\n",
348		    new_mtu, adapter->max_mtu, ENA_MIN_MTU);
349		return (EINVAL);
350	}
351
352	rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
353	if (likely(rc == 0)) {
354		ena_trace(NULL, ENA_DBG, "set MTU to %d\n", new_mtu);
355		if_setmtu(ifp, new_mtu);
356	} else {
357		device_printf(adapter->pdev, "Failed to set MTU to %d\n",
358		    new_mtu);
359	}
360
361	return (rc);
362}
363
364static inline void
365ena_alloc_counters(counter_u64_t *begin, int size)
366{
367	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
368
369	for (; begin < end; ++begin)
370		*begin = counter_u64_alloc(M_WAITOK);
371}
372
373static inline void
374ena_free_counters(counter_u64_t *begin, int size)
375{
376	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
377
378	for (; begin < end; ++begin)
379		counter_u64_free(*begin);
380}
381
382static inline void
383ena_reset_counters(counter_u64_t *begin, int size)
384{
385	counter_u64_t *end = (counter_u64_t *)((char *)begin + size);
386
387	for (; begin < end; ++begin)
388		counter_u64_zero(*begin);
389}
390
391static void
392ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring,
393    uint16_t qid)
394{
395
396	ring->qid = qid;
397	ring->adapter = adapter;
398	ring->ena_dev = adapter->ena_dev;
399	ring->first_interrupt = false;
400	ring->no_interrupt_event_cnt = 0;
401}
402
403static void
404ena_init_io_rings_basic(struct ena_adapter *adapter)
405{
406	struct ena_com_dev *ena_dev;
407	struct ena_ring *txr, *rxr;
408	struct ena_que *que;
409	int i;
410
411	ena_dev = adapter->ena_dev;
412
413	for (i = 0; i < adapter->num_io_queues; i++) {
414		txr = &adapter->tx_ring[i];
415		rxr = &adapter->rx_ring[i];
416
417		/* TX/RX common ring state */
418		ena_init_io_rings_common(adapter, txr, i);
419		ena_init_io_rings_common(adapter, rxr, i);
420
421		/* TX specific ring state */
422		txr->tx_max_header_size = ena_dev->tx_max_header_size;
423		txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
424
425		que = &adapter->que[i];
426		que->adapter = adapter;
427		que->id = i;
428		que->tx_ring = txr;
429		que->rx_ring = rxr;
430
431		txr->que = que;
432		rxr->que = que;
433
434		rxr->empty_rx_queue = 0;
435		rxr->rx_mbuf_sz = ena_mbuf_sz;
436	}
437}
438
439static void
440ena_init_io_rings_advanced(struct ena_adapter *adapter)
441{
442	struct ena_ring *txr, *rxr;
443	int i;
444
445	for (i = 0; i < adapter->num_io_queues; i++) {
446		txr = &adapter->tx_ring[i];
447		rxr = &adapter->rx_ring[i];
448
449		/* Allocate a buf ring */
450		txr->buf_ring_size = adapter->buf_ring_size;
451		txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF,
452		    M_WAITOK, &txr->ring_mtx);
453
454		/* Allocate Tx statistics. */
455		ena_alloc_counters((counter_u64_t *)&txr->tx_stats,
456		    sizeof(txr->tx_stats));
457
458		/* Allocate Rx statistics. */
459		ena_alloc_counters((counter_u64_t *)&rxr->rx_stats,
460		    sizeof(rxr->rx_stats));
461
462		/* Initialize locks */
463		snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)",
464		    device_get_nameunit(adapter->pdev), i);
465		snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)",
466		    device_get_nameunit(adapter->pdev), i);
467
468		mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF);
469	}
470}
471
472static void
473ena_init_io_rings(struct ena_adapter *adapter)
474{
475	/*
476	 * IO rings initialization can be divided into the 2 steps:
477	 *   1. Initialize variables and fields with initial values and copy
478	 *      them from adapter/ena_dev (basic)
479	 *   2. Allocate mutex, counters and buf_ring (advanced)
480	 */
481	ena_init_io_rings_basic(adapter);
482	ena_init_io_rings_advanced(adapter);
483}
484
485static void
486ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid)
487{
488	struct ena_ring *txr = &adapter->tx_ring[qid];
489	struct ena_ring *rxr = &adapter->rx_ring[qid];
490
491	ena_free_counters((counter_u64_t *)&txr->tx_stats,
492	    sizeof(txr->tx_stats));
493	ena_free_counters((counter_u64_t *)&rxr->rx_stats,
494	    sizeof(rxr->rx_stats));
495
496	ENA_RING_MTX_LOCK(txr);
497	drbr_free(txr->br, M_DEVBUF);
498	ENA_RING_MTX_UNLOCK(txr);
499
500	mtx_destroy(&txr->ring_mtx);
501}
502
503static void
504ena_free_all_io_rings_resources(struct ena_adapter *adapter)
505{
506	int i;
507
508	for (i = 0; i < adapter->num_io_queues; i++)
509		ena_free_io_ring_resources(adapter, i);
510
511}
512
513static int
514ena_setup_tx_dma_tag(struct ena_adapter *adapter)
515{
516	int ret;
517
518	/* Create DMA tag for Tx buffers */
519	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev),
520	    1, 0,				  /* alignment, bounds 	     */
521	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
522	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
523	    NULL, NULL,				  /* filter, filterarg 	     */
524	    ENA_TSO_MAXSIZE,			  /* maxsize 		     */
525	    adapter->max_tx_sgl_size - 1,	  /* nsegments 		     */
526	    ENA_TSO_MAXSIZE,			  /* maxsegsize 	     */
527	    0,					  /* flags 		     */
528	    NULL,				  /* lockfunc 		     */
529	    NULL,				  /* lockfuncarg 	     */
530	    &adapter->tx_buf_tag);
531
532	return (ret);
533}
534
535static int
536ena_free_tx_dma_tag(struct ena_adapter *adapter)
537{
538	int ret;
539
540	ret = bus_dma_tag_destroy(adapter->tx_buf_tag);
541
542	if (likely(ret == 0))
543		adapter->tx_buf_tag = NULL;
544
545	return (ret);
546}
547
548static int
549ena_setup_rx_dma_tag(struct ena_adapter *adapter)
550{
551	int ret;
552
553	/* Create DMA tag for Rx buffers*/
554	ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent   */
555	    1, 0,				  /* alignment, bounds 	     */
556	    ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window  */
557	    BUS_SPACE_MAXADDR, 			  /* highaddr of excl window */
558	    NULL, NULL,				  /* filter, filterarg 	     */
559	    ena_mbuf_sz,			  /* maxsize 		     */
560	    adapter->max_rx_sgl_size,		  /* nsegments 		     */
561	    ena_mbuf_sz,			  /* maxsegsize 	     */
562	    0,					  /* flags 		     */
563	    NULL,				  /* lockfunc 		     */
564	    NULL,				  /* lockarg 		     */
565	    &adapter->rx_buf_tag);
566
567	return (ret);
568}
569
570static int
571ena_free_rx_dma_tag(struct ena_adapter *adapter)
572{
573	int ret;
574
575	ret = bus_dma_tag_destroy(adapter->rx_buf_tag);
576
577	if (likely(ret == 0))
578		adapter->rx_buf_tag = NULL;
579
580	return (ret);
581}
582
583static void
584ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
585{
586	struct ena_adapter *adapter = tx_ring->adapter;
587	struct ena_tx_buffer *tx_info;
588	bus_dma_tag_t tx_tag = adapter->tx_buf_tag;;
589	int i;
590#ifdef DEV_NETMAP
591	struct ena_netmap_tx_info *nm_info;
592	int j;
593#endif /* DEV_NETMAP */
594
595	for (i = 0; i < tx_ring->ring_size; ++i) {
596		tx_info = &tx_ring->tx_buffer_info[i];
597#ifdef DEV_NETMAP
598		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
599			nm_info = &tx_info->nm_info;
600			for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) {
601				if (nm_info->map_seg[j] != NULL) {
602					bus_dmamap_destroy(tx_tag,
603					    nm_info->map_seg[j]);
604					nm_info->map_seg[j] = NULL;
605				}
606			}
607		}
608#endif /* DEV_NETMAP */
609		if (tx_info->dmamap != NULL) {
610			bus_dmamap_destroy(tx_tag, tx_info->dmamap);
611			tx_info->dmamap = NULL;
612		}
613	}
614}
615
616/**
617 * ena_setup_tx_resources - allocate Tx resources (Descriptors)
618 * @adapter: network interface device structure
619 * @qid: queue index
620 *
621 * Returns 0 on success, otherwise on failure.
622 **/
623static int
624ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
625{
626	struct ena_que *que = &adapter->que[qid];
627	struct ena_ring *tx_ring = que->tx_ring;
628	int size, i, err;
629#ifdef DEV_NETMAP
630	bus_dmamap_t *map;
631	int j;
632
633	ena_netmap_reset_tx_ring(adapter, qid);
634#endif /* DEV_NETMAP */
635
636	size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
637
638	tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
639	if (unlikely(tx_ring->tx_buffer_info == NULL))
640		return (ENOMEM);
641
642	size = sizeof(uint16_t) * tx_ring->ring_size;
643	tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
644	if (unlikely(tx_ring->free_tx_ids == NULL))
645		goto err_buf_info_free;
646
647	size = tx_ring->tx_max_header_size;
648	tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF,
649	    M_NOWAIT | M_ZERO);
650	if (unlikely(tx_ring->push_buf_intermediate_buf == NULL))
651		goto err_tx_ids_free;
652
653	/* Req id stack for TX OOO completions */
654	for (i = 0; i < tx_ring->ring_size; i++)
655		tx_ring->free_tx_ids[i] = i;
656
657	/* Reset TX statistics. */
658	ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats,
659	    sizeof(tx_ring->tx_stats));
660
661	tx_ring->next_to_use = 0;
662	tx_ring->next_to_clean = 0;
663	tx_ring->acum_pkts = 0;
664
665	/* Make sure that drbr is empty */
666	ENA_RING_MTX_LOCK(tx_ring);
667	drbr_flush(adapter->ifp, tx_ring->br);
668	ENA_RING_MTX_UNLOCK(tx_ring);
669
670	/* ... and create the buffer DMA maps */
671	for (i = 0; i < tx_ring->ring_size; i++) {
672		err = bus_dmamap_create(adapter->tx_buf_tag, 0,
673		    &tx_ring->tx_buffer_info[i].dmamap);
674		if (unlikely(err != 0)) {
675			ena_trace(NULL, ENA_ALERT,
676			    "Unable to create Tx DMA map for buffer %d\n",
677			    i);
678			goto err_map_release;
679		}
680
681#ifdef DEV_NETMAP
682		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
683			map = tx_ring->tx_buffer_info[i].nm_info.map_seg;
684			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
685				err = bus_dmamap_create(adapter->tx_buf_tag, 0,
686				    &map[j]);
687				if (unlikely(err != 0)) {
688					ena_trace(NULL, ENA_ALERT, "Unable to create "
689					    "Tx DMA for buffer %d %d\n", i, j);
690					goto err_map_release;
691				}
692			}
693		}
694#endif /* DEV_NETMAP */
695	}
696
697	/* Allocate taskqueues */
698	TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring);
699	tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT,
700	    taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
701	if (unlikely(tx_ring->enqueue_tq == NULL)) {
702		ena_trace(NULL, ENA_ALERT,
703		    "Unable to create taskqueue for enqueue task\n");
704		i = tx_ring->ring_size;
705		goto err_map_release;
706	}
707
708	tx_ring->running = true;
709
710	taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET,
711	    "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu);
712
713	return (0);
714
715err_map_release:
716	ena_release_all_tx_dmamap(tx_ring);
717err_tx_ids_free:
718	free(tx_ring->free_tx_ids, M_DEVBUF);
719	tx_ring->free_tx_ids = NULL;
720err_buf_info_free:
721	free(tx_ring->tx_buffer_info, M_DEVBUF);
722	tx_ring->tx_buffer_info = NULL;
723
724	return (ENOMEM);
725}
726
727/**
728 * ena_free_tx_resources - Free Tx Resources per Queue
729 * @adapter: network interface device structure
730 * @qid: queue index
731 *
732 * Free all transmit software resources
733 **/
734static void
735ena_free_tx_resources(struct ena_adapter *adapter, int qid)
736{
737	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
738#ifdef DEV_NETMAP
739	struct ena_netmap_tx_info *nm_info;
740	int j;
741#endif /* DEV_NETMAP */
742
743	while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task,
744	    NULL))
745		taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
746
747	taskqueue_free(tx_ring->enqueue_tq);
748
749	ENA_RING_MTX_LOCK(tx_ring);
750	/* Flush buffer ring, */
751	drbr_flush(adapter->ifp, tx_ring->br);
752
753	/* Free buffer DMA maps, */
754	for (int i = 0; i < tx_ring->ring_size; i++) {
755		bus_dmamap_sync(adapter->tx_buf_tag,
756		    tx_ring->tx_buffer_info[i].dmamap, BUS_DMASYNC_POSTWRITE);
757		bus_dmamap_unload(adapter->tx_buf_tag,
758		    tx_ring->tx_buffer_info[i].dmamap);
759		bus_dmamap_destroy(adapter->tx_buf_tag,
760		    tx_ring->tx_buffer_info[i].dmamap);
761
762#ifdef DEV_NETMAP
763		if (adapter->ifp->if_capenable & IFCAP_NETMAP) {
764			nm_info = &tx_ring->tx_buffer_info[i].nm_info;
765			for (j = 0; j < ENA_PKT_MAX_BUFS; j++) {
766				if (nm_info->socket_buf_idx[j] != 0) {
767					bus_dmamap_sync(adapter->tx_buf_tag,
768					    nm_info->map_seg[j],
769					    BUS_DMASYNC_POSTWRITE);
770					ena_netmap_unload(adapter,
771					    nm_info->map_seg[j]);
772				}
773				bus_dmamap_destroy(adapter->tx_buf_tag,
774				    nm_info->map_seg[j]);
775				nm_info->socket_buf_idx[j] = 0;
776			}
777		}
778#endif /* DEV_NETMAP */
779
780		m_freem(tx_ring->tx_buffer_info[i].mbuf);
781		tx_ring->tx_buffer_info[i].mbuf = NULL;
782	}
783	ENA_RING_MTX_UNLOCK(tx_ring);
784
785	/* And free allocated memory. */
786	free(tx_ring->tx_buffer_info, M_DEVBUF);
787	tx_ring->tx_buffer_info = NULL;
788
789	free(tx_ring->free_tx_ids, M_DEVBUF);
790	tx_ring->free_tx_ids = NULL;
791
792	free(tx_ring->push_buf_intermediate_buf, M_DEVBUF);
793	tx_ring->push_buf_intermediate_buf = NULL;
794}
795
796/**
797 * ena_setup_all_tx_resources - allocate all queues Tx resources
798 * @adapter: network interface device structure
799 *
800 * Returns 0 on success, otherwise on failure.
801 **/
802static int
803ena_setup_all_tx_resources(struct ena_adapter *adapter)
804{
805	int i, rc;
806
807	for (i = 0; i < adapter->num_io_queues; i++) {
808		rc = ena_setup_tx_resources(adapter, i);
809		if (rc != 0) {
810			device_printf(adapter->pdev,
811			    "Allocation for Tx Queue %u failed\n", i);
812			goto err_setup_tx;
813		}
814	}
815
816	return (0);
817
818err_setup_tx:
819	/* Rewind the index freeing the rings as we go */
820	while (i--)
821		ena_free_tx_resources(adapter, i);
822	return (rc);
823}
824
825/**
826 * ena_free_all_tx_resources - Free Tx Resources for All Queues
827 * @adapter: network interface device structure
828 *
829 * Free all transmit software resources
830 **/
831static void
832ena_free_all_tx_resources(struct ena_adapter *adapter)
833{
834	int i;
835
836	for (i = 0; i < adapter->num_io_queues; i++)
837		ena_free_tx_resources(adapter, i);
838}
839
840/**
841 * ena_setup_rx_resources - allocate Rx resources (Descriptors)
842 * @adapter: network interface device structure
843 * @qid: queue index
844 *
845 * Returns 0 on success, otherwise on failure.
846 **/
847static int
848ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid)
849{
850	struct ena_que *que = &adapter->que[qid];
851	struct ena_ring *rx_ring = que->rx_ring;
852	int size, err, i;
853
854	size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size;
855
856#ifdef DEV_NETMAP
857	ena_netmap_reset_rx_ring(adapter, qid);
858	rx_ring->initialized = false;
859#endif /* DEV_NETMAP */
860
861	/*
862	 * Alloc extra element so in rx path
863	 * we can always prefetch rx_info + 1
864	 */
865	size += sizeof(struct ena_rx_buffer);
866
867	rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
868
869	size = sizeof(uint16_t) * rx_ring->ring_size;
870	rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK);
871
872	for (i = 0; i < rx_ring->ring_size; i++)
873		rx_ring->free_rx_ids[i] = i;
874
875	/* Reset RX statistics. */
876	ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats,
877	    sizeof(rx_ring->rx_stats));
878
879	rx_ring->next_to_clean = 0;
880	rx_ring->next_to_use = 0;
881
882	/* ... and create the buffer DMA maps */
883	for (i = 0; i < rx_ring->ring_size; i++) {
884		err = bus_dmamap_create(adapter->rx_buf_tag, 0,
885		    &(rx_ring->rx_buffer_info[i].map));
886		if (err != 0) {
887			ena_trace(NULL, ENA_ALERT,
888			    "Unable to create Rx DMA map for buffer %d\n", i);
889			goto err_buf_info_unmap;
890		}
891	}
892
893	/* Create LRO for the ring */
894	if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) {
895		int err = tcp_lro_init(&rx_ring->lro);
896		if (err != 0) {
897			device_printf(adapter->pdev,
898			    "LRO[%d] Initialization failed!\n", qid);
899		} else {
900			ena_trace(NULL, ENA_INFO,
901			    "RX Soft LRO[%d] Initialized\n", qid);
902			rx_ring->lro.ifp = adapter->ifp;
903		}
904	}
905
906	return (0);
907
908err_buf_info_unmap:
909	while (i--) {
910		bus_dmamap_destroy(adapter->rx_buf_tag,
911		    rx_ring->rx_buffer_info[i].map);
912	}
913
914	free(rx_ring->free_rx_ids, M_DEVBUF);
915	rx_ring->free_rx_ids = NULL;
916	free(rx_ring->rx_buffer_info, M_DEVBUF);
917	rx_ring->rx_buffer_info = NULL;
918	return (ENOMEM);
919}
920
921/**
922 * ena_free_rx_resources - Free Rx Resources
923 * @adapter: network interface device structure
924 * @qid: queue index
925 *
926 * Free all receive software resources
927 **/
928static void
929ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid)
930{
931	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
932
933	/* Free buffer DMA maps, */
934	for (int i = 0; i < rx_ring->ring_size; i++) {
935		bus_dmamap_sync(adapter->rx_buf_tag,
936		    rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD);
937		m_freem(rx_ring->rx_buffer_info[i].mbuf);
938		rx_ring->rx_buffer_info[i].mbuf = NULL;
939		bus_dmamap_unload(adapter->rx_buf_tag,
940		    rx_ring->rx_buffer_info[i].map);
941		bus_dmamap_destroy(adapter->rx_buf_tag,
942		    rx_ring->rx_buffer_info[i].map);
943	}
944
945	/* free LRO resources, */
946	tcp_lro_free(&rx_ring->lro);
947
948	/* free allocated memory */
949	free(rx_ring->rx_buffer_info, M_DEVBUF);
950	rx_ring->rx_buffer_info = NULL;
951
952	free(rx_ring->free_rx_ids, M_DEVBUF);
953	rx_ring->free_rx_ids = NULL;
954}
955
956/**
957 * ena_setup_all_rx_resources - allocate all queues Rx resources
958 * @adapter: network interface device structure
959 *
960 * Returns 0 on success, otherwise on failure.
961 **/
962static int
963ena_setup_all_rx_resources(struct ena_adapter *adapter)
964{
965	int i, rc = 0;
966
967	for (i = 0; i < adapter->num_io_queues; i++) {
968		rc = ena_setup_rx_resources(adapter, i);
969		if (rc != 0) {
970			device_printf(adapter->pdev,
971			    "Allocation for Rx Queue %u failed\n", i);
972			goto err_setup_rx;
973		}
974	}
975	return (0);
976
977err_setup_rx:
978	/* rewind the index freeing the rings as we go */
979	while (i--)
980		ena_free_rx_resources(adapter, i);
981	return (rc);
982}
983
984/**
985 * ena_free_all_rx_resources - Free Rx resources for all queues
986 * @adapter: network interface device structure
987 *
988 * Free all receive software resources
989 **/
990static void
991ena_free_all_rx_resources(struct ena_adapter *adapter)
992{
993	int i;
994
995	for (i = 0; i < adapter->num_io_queues; i++)
996		ena_free_rx_resources(adapter, i);
997}
998
999static inline int
1000ena_alloc_rx_mbuf(struct ena_adapter *adapter,
1001    struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info)
1002{
1003	struct ena_com_buf *ena_buf;
1004	bus_dma_segment_t segs[1];
1005	int nsegs, error;
1006	int mlen;
1007
1008	/* if previous allocated frag is not used */
1009	if (unlikely(rx_info->mbuf != NULL))
1010		return (0);
1011
1012	/* Get mbuf using UMA allocator */
1013	rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1014	    rx_ring->rx_mbuf_sz);
1015
1016	if (unlikely(rx_info->mbuf == NULL)) {
1017		counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1);
1018		rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1019		if (unlikely(rx_info->mbuf == NULL)) {
1020			counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1);
1021			return (ENOMEM);
1022		}
1023		mlen = MCLBYTES;
1024	} else {
1025		mlen = rx_ring->rx_mbuf_sz;
1026	}
1027	/* Set mbuf length*/
1028	rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen;
1029
1030	/* Map packets for DMA */
1031	ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1032	    "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n",
1033	    adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len);
1034	error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map,
1035	    rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
1036	if (unlikely((error != 0) || (nsegs != 1))) {
1037		ena_trace(NULL, ENA_WARNING, "failed to map mbuf, error: %d, "
1038		    "nsegs: %d\n", error, nsegs);
1039		counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1);
1040		goto exit;
1041
1042	}
1043
1044	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD);
1045
1046	ena_buf = &rx_info->ena_buf;
1047	ena_buf->paddr = segs[0].ds_addr;
1048	ena_buf->len = mlen;
1049
1050	ena_trace(NULL, ENA_DBG | ENA_RSC | ENA_RXPTH,
1051	    "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n",
1052	    rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr);
1053
1054	return (0);
1055
1056exit:
1057	m_freem(rx_info->mbuf);
1058	rx_info->mbuf = NULL;
1059	return (EFAULT);
1060}
1061
1062static void
1063ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring,
1064    struct ena_rx_buffer *rx_info)
1065{
1066
1067	if (rx_info->mbuf == NULL) {
1068		ena_trace(NULL, ENA_WARNING, "Trying to free unallocated buffer\n");
1069		return;
1070	}
1071
1072	bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map,
1073	    BUS_DMASYNC_POSTREAD);
1074	bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map);
1075	m_freem(rx_info->mbuf);
1076	rx_info->mbuf = NULL;
1077}
1078
1079/**
1080 * ena_refill_rx_bufs - Refills ring with descriptors
1081 * @rx_ring: the ring which we want to feed with free descriptors
1082 * @num: number of descriptors to refill
1083 * Refills the ring with newly allocated DMA-mapped mbufs for receiving
1084 **/
1085int
1086ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
1087{
1088	struct ena_adapter *adapter = rx_ring->adapter;
1089	uint16_t next_to_use, req_id;
1090	uint32_t i;
1091	int rc;
1092
1093	ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n",
1094	    rx_ring->qid);
1095
1096	next_to_use = rx_ring->next_to_use;
1097
1098	for (i = 0; i < num; i++) {
1099		struct ena_rx_buffer *rx_info;
1100
1101		ena_trace(NULL, ENA_DBG | ENA_RXPTH | ENA_RSC,
1102		    "RX buffer - next to use: %d\n", next_to_use);
1103
1104		req_id = rx_ring->free_rx_ids[next_to_use];
1105		rx_info = &rx_ring->rx_buffer_info[req_id];
1106#ifdef DEV_NETMAP
1107		if (ena_rx_ring_in_netmap(adapter, rx_ring->qid))
1108			rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info);
1109		else
1110#endif /* DEV_NETMAP */
1111			rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
1112		if (unlikely(rc != 0)) {
1113			ena_trace(NULL, ENA_WARNING,
1114			    "failed to alloc buffer for rx queue %d\n",
1115			    rx_ring->qid);
1116			break;
1117		}
1118		rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1119		    &rx_info->ena_buf, req_id);
1120		if (unlikely(rc != 0)) {
1121			ena_trace(NULL, ENA_WARNING,
1122			    "failed to add buffer for rx queue %d\n",
1123			    rx_ring->qid);
1124			break;
1125		}
1126		next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1127		    rx_ring->ring_size);
1128	}
1129
1130	if (unlikely(i < num)) {
1131		counter_u64_add(rx_ring->rx_stats.refil_partial, 1);
1132		ena_trace(NULL, ENA_WARNING,
1133		     "refilled rx qid %d with only %d mbufs (from %d)\n",
1134		     rx_ring->qid, i, num);
1135	}
1136
1137	if (likely(i != 0))
1138		ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1139
1140	rx_ring->next_to_use = next_to_use;
1141	return (i);
1142}
1143
1144int
1145ena_update_buf_ring_size(struct ena_adapter *adapter,
1146    uint32_t new_buf_ring_size)
1147{
1148	uint32_t old_buf_ring_size;
1149	int rc = 0;
1150	bool dev_was_up;
1151
1152	ENA_LOCK_LOCK(adapter);
1153
1154	old_buf_ring_size = adapter->buf_ring_size;
1155	adapter->buf_ring_size = new_buf_ring_size;
1156
1157	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1158	ena_down(adapter);
1159
1160	/* Reconfigure buf ring for all Tx rings. */
1161	ena_free_all_io_rings_resources(adapter);
1162	ena_init_io_rings_advanced(adapter);
1163	if (dev_was_up) {
1164		/*
1165		 * If ena_up() fails, it's not because of recent buf_ring size
1166		 * changes. Because of that, we just want to revert old drbr
1167		 * value and trigger the reset because something else had to
1168		 * go wrong.
1169		 */
1170		rc = ena_up(adapter);
1171		if (unlikely(rc != 0)) {
1172			device_printf(adapter->pdev,
1173			    "Failed to configure device after setting new drbr size: %u. Reverting old value: %u and triggering the reset\n",
1174			    new_buf_ring_size, old_buf_ring_size);
1175
1176			/* Revert old size and trigger the reset */
1177			adapter->buf_ring_size = old_buf_ring_size;
1178			ena_free_all_io_rings_resources(adapter);
1179			ena_init_io_rings_advanced(adapter);
1180
1181			ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
1182			    adapter);
1183			ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
1184
1185		}
1186	}
1187
1188	ENA_LOCK_UNLOCK(adapter);
1189
1190	return (rc);
1191}
1192
1193int
1194ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
1195    uint32_t new_rx_size)
1196{
1197	uint32_t old_tx_size, old_rx_size;
1198	int rc = 0;
1199	bool dev_was_up;
1200
1201	ENA_LOCK_LOCK(adapter);
1202
1203	old_tx_size = adapter->requested_tx_ring_size;
1204	old_rx_size = adapter->requested_rx_ring_size;
1205	adapter->requested_tx_ring_size = new_tx_size;
1206	adapter->requested_rx_ring_size = new_rx_size;
1207
1208	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1209	ena_down(adapter);
1210
1211	/* Configure queues with new size. */
1212	ena_init_io_rings_basic(adapter);
1213	if (dev_was_up) {
1214		rc = ena_up(adapter);
1215		if (unlikely(rc != 0)) {
1216			device_printf(adapter->pdev,
1217			    "Failed to configure device with the new sizes - Tx: %u Rx: %u. Reverting old values - Tx: %u Rx: %u\n",
1218			    new_tx_size, new_rx_size, old_tx_size, old_rx_size);
1219
1220			/* Revert old size. */
1221			adapter->requested_tx_ring_size = old_tx_size;
1222			adapter->requested_rx_ring_size = old_rx_size;
1223			ena_init_io_rings_basic(adapter);
1224
1225			/* And try again. */
1226			rc = ena_up(adapter);
1227			if (unlikely(rc != 0)) {
1228				device_printf(adapter->pdev,
1229				    "Failed to revert old queue sizes. Triggering device reset.\n");
1230				/*
1231				 * If we've failed again, something had to go
1232				 * wrong. After reset, the device should try to
1233				 * go up
1234				 */
1235				ENA_FLAG_SET_ATOMIC(
1236				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1237				ena_trigger_reset(adapter,
1238				    ENA_REGS_RESET_OS_TRIGGER);
1239			}
1240		}
1241	}
1242
1243	ENA_LOCK_UNLOCK(adapter);
1244
1245	return (rc);
1246}
1247
1248static void
1249ena_update_io_rings(struct ena_adapter *adapter, uint32_t num)
1250{
1251	ena_free_all_io_rings_resources(adapter);
1252	/* Force indirection table to be reinitialized */
1253	ena_com_rss_destroy(adapter->ena_dev);
1254
1255	adapter->num_io_queues = num;
1256	ena_init_io_rings(adapter);
1257}
1258
1259/* Caller should sanitize new_num */
1260int
1261ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
1262{
1263	uint32_t old_num;
1264	int rc = 0;
1265	bool dev_was_up;
1266
1267	ENA_LOCK_LOCK(adapter);
1268
1269	dev_was_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
1270	old_num = adapter->num_io_queues;
1271	ena_down(adapter);
1272
1273	ena_update_io_rings(adapter, new_num);
1274
1275	if (dev_was_up) {
1276		rc = ena_up(adapter);
1277		if (unlikely(rc != 0)) {
1278			device_printf(adapter->pdev,
1279			    "Failed to configure device with %u IO queues. "
1280			    "Reverting to previous value: %u\n",
1281			    new_num, old_num);
1282
1283			ena_update_io_rings(adapter, old_num);
1284
1285			rc = ena_up(adapter);
1286			if (unlikely(rc != 0)) {
1287				device_printf(adapter->pdev,
1288				    "Failed to revert to previous setup IO "
1289				    "queues. Triggering device reset.\n");
1290				ENA_FLAG_SET_ATOMIC(
1291				    ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
1292				ena_trigger_reset(adapter,
1293				    ENA_REGS_RESET_OS_TRIGGER);
1294			}
1295		}
1296	}
1297
1298	ENA_LOCK_UNLOCK(adapter);
1299
1300	return (rc);
1301}
1302
1303static void
1304ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid)
1305{
1306	struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1307	unsigned int i;
1308
1309	for (i = 0; i < rx_ring->ring_size; i++) {
1310		struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1311
1312		if (rx_info->mbuf != NULL)
1313			ena_free_rx_mbuf(adapter, rx_ring, rx_info);
1314#ifdef DEV_NETMAP
1315		if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) &&
1316		    (adapter->ifp->if_capenable & IFCAP_NETMAP)) {
1317			if (rx_info->netmap_buf_idx != 0)
1318				ena_netmap_free_rx_slot(adapter, rx_ring,
1319				    rx_info);
1320		}
1321#endif /* DEV_NETMAP */
1322	}
1323}
1324
1325/**
1326 * ena_refill_all_rx_bufs - allocate all queues Rx buffers
1327 * @adapter: network interface device structure
1328 *
1329 */
1330static void
1331ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1332{
1333	struct ena_ring *rx_ring;
1334	int i, rc, bufs_num;
1335
1336	for (i = 0; i < adapter->num_io_queues; i++) {
1337		rx_ring = &adapter->rx_ring[i];
1338		bufs_num = rx_ring->ring_size - 1;
1339		rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1340		if (unlikely(rc != bufs_num))
1341			ena_trace(NULL, ENA_WARNING, "refilling Queue %d failed. "
1342			    "Allocated %d buffers from: %d\n", i, rc, bufs_num);
1343#ifdef DEV_NETMAP
1344		rx_ring->initialized = true;
1345#endif /* DEV_NETMAP */
1346	}
1347}
1348
1349static void
1350ena_free_all_rx_bufs(struct ena_adapter *adapter)
1351{
1352	int i;
1353
1354	for (i = 0; i < adapter->num_io_queues; i++)
1355		ena_free_rx_bufs(adapter, i);
1356}
1357
1358/**
1359 * ena_free_tx_bufs - Free Tx Buffers per Queue
1360 * @adapter: network interface device structure
1361 * @qid: queue index
1362 **/
1363static void
1364ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid)
1365{
1366	bool print_once = true;
1367	struct ena_ring *tx_ring = &adapter->tx_ring[qid];
1368
1369	ENA_RING_MTX_LOCK(tx_ring);
1370	for (int i = 0; i < tx_ring->ring_size; i++) {
1371		struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1372
1373		if (tx_info->mbuf == NULL)
1374			continue;
1375
1376		if (print_once) {
1377			device_printf(adapter->pdev,
1378			    "free uncompleted tx mbuf qid %d idx 0x%x\n",
1379			    qid, i);
1380			print_once = false;
1381		} else {
1382			ena_trace(NULL, ENA_DBG,
1383			    "free uncompleted tx mbuf qid %d idx 0x%x\n",
1384			     qid, i);
1385		}
1386
1387		bus_dmamap_sync(adapter->tx_buf_tag, tx_info->dmamap,
1388		    BUS_DMASYNC_POSTWRITE);
1389		bus_dmamap_unload(adapter->tx_buf_tag, tx_info->dmamap);
1390
1391		m_free(tx_info->mbuf);
1392		tx_info->mbuf = NULL;
1393	}
1394	ENA_RING_MTX_UNLOCK(tx_ring);
1395}
1396
1397static void
1398ena_free_all_tx_bufs(struct ena_adapter *adapter)
1399{
1400
1401	for (int i = 0; i < adapter->num_io_queues; i++)
1402		ena_free_tx_bufs(adapter, i);
1403}
1404
1405static void
1406ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1407{
1408	uint16_t ena_qid;
1409	int i;
1410
1411	for (i = 0; i < adapter->num_io_queues; i++) {
1412		ena_qid = ENA_IO_TXQ_IDX(i);
1413		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1414	}
1415}
1416
1417static void
1418ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1419{
1420	uint16_t ena_qid;
1421	int i;
1422
1423	for (i = 0; i < adapter->num_io_queues; i++) {
1424		ena_qid = ENA_IO_RXQ_IDX(i);
1425		ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1426	}
1427}
1428
1429static void
1430ena_destroy_all_io_queues(struct ena_adapter *adapter)
1431{
1432	struct ena_que *queue;
1433	int i;
1434
1435	for (i = 0; i < adapter->num_io_queues; i++) {
1436		queue = &adapter->que[i];
1437		while (taskqueue_cancel(queue->cleanup_tq,
1438		    &queue->cleanup_task, NULL))
1439			taskqueue_drain(queue->cleanup_tq,
1440			    &queue->cleanup_task);
1441		taskqueue_free(queue->cleanup_tq);
1442	}
1443
1444	ena_destroy_all_tx_queues(adapter);
1445	ena_destroy_all_rx_queues(adapter);
1446}
1447
1448static int
1449ena_create_io_queues(struct ena_adapter *adapter)
1450{
1451	struct ena_com_dev *ena_dev = adapter->ena_dev;
1452	struct ena_com_create_io_ctx ctx;
1453	struct ena_ring *ring;
1454	struct ena_que *queue;
1455	uint16_t ena_qid;
1456	uint32_t msix_vector;
1457	int rc, i;
1458
1459	/* Create TX queues */
1460	for (i = 0; i < adapter->num_io_queues; i++) {
1461		msix_vector = ENA_IO_IRQ_IDX(i);
1462		ena_qid = ENA_IO_TXQ_IDX(i);
1463		ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1464		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1465		ctx.queue_size = adapter->requested_tx_ring_size;
1466		ctx.msix_vector = msix_vector;
1467		ctx.qid = ena_qid;
1468		rc = ena_com_create_io_queue(ena_dev, &ctx);
1469		if (rc != 0) {
1470			device_printf(adapter->pdev,
1471			    "Failed to create io TX queue #%d rc: %d\n", i, rc);
1472			goto err_tx;
1473		}
1474		ring = &adapter->tx_ring[i];
1475		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1476		    &ring->ena_com_io_sq,
1477		    &ring->ena_com_io_cq);
1478		if (rc != 0) {
1479			device_printf(adapter->pdev,
1480			    "Failed to get TX queue handlers. TX queue num"
1481			    " %d rc: %d\n", i, rc);
1482			ena_com_destroy_io_queue(ena_dev, ena_qid);
1483			goto err_tx;
1484		}
1485	}
1486
1487	/* Create RX queues */
1488	for (i = 0; i < adapter->num_io_queues; i++) {
1489		msix_vector = ENA_IO_IRQ_IDX(i);
1490		ena_qid = ENA_IO_RXQ_IDX(i);
1491		ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1492		ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1493		ctx.queue_size = adapter->requested_rx_ring_size;
1494		ctx.msix_vector = msix_vector;
1495		ctx.qid = ena_qid;
1496		rc = ena_com_create_io_queue(ena_dev, &ctx);
1497		if (unlikely(rc != 0)) {
1498			device_printf(adapter->pdev,
1499			    "Failed to create io RX queue[%d] rc: %d\n", i, rc);
1500			goto err_rx;
1501		}
1502
1503		ring = &adapter->rx_ring[i];
1504		rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1505		    &ring->ena_com_io_sq,
1506		    &ring->ena_com_io_cq);
1507		if (unlikely(rc != 0)) {
1508			device_printf(adapter->pdev,
1509			    "Failed to get RX queue handlers. RX queue num"
1510			    " %d rc: %d\n", i, rc);
1511			ena_com_destroy_io_queue(ena_dev, ena_qid);
1512			goto err_rx;
1513		}
1514	}
1515
1516	for (i = 0; i < adapter->num_io_queues; i++) {
1517		queue = &adapter->que[i];
1518
1519		TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
1520		queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
1521		    M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
1522
1523		taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET,
1524		    "%s queue %d cleanup",
1525		    device_get_nameunit(adapter->pdev), i);
1526	}
1527
1528	return (0);
1529
1530err_rx:
1531	while (i--)
1532		ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1533	i = adapter->num_io_queues;
1534err_tx:
1535	while (i--)
1536		ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1537
1538	return (ENXIO);
1539}
1540
1541/*********************************************************************
1542 *
1543 *  MSIX & Interrupt Service routine
1544 *
1545 **********************************************************************/
1546
1547/**
1548 * ena_handle_msix - MSIX Interrupt Handler for admin/async queue
1549 * @arg: interrupt number
1550 **/
1551static void
1552ena_intr_msix_mgmnt(void *arg)
1553{
1554	struct ena_adapter *adapter = (struct ena_adapter *)arg;
1555
1556	ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1557	if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)))
1558		ena_com_aenq_intr_handler(adapter->ena_dev, arg);
1559}
1560
1561/**
1562 * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx
1563 * @arg: queue
1564 **/
1565static int
1566ena_handle_msix(void *arg)
1567{
1568	struct ena_que *queue = arg;
1569	struct ena_adapter *adapter = queue->adapter;
1570	if_t ifp = adapter->ifp;
1571
1572	if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
1573		return (FILTER_STRAY);
1574
1575	taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
1576
1577	return (FILTER_HANDLED);
1578}
1579
1580static int
1581ena_enable_msix(struct ena_adapter *adapter)
1582{
1583	device_t dev = adapter->pdev;
1584	int msix_vecs, msix_req;
1585	int i, rc = 0;
1586
1587	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1588		device_printf(dev, "Error, MSI-X is already enabled\n");
1589		return (EINVAL);
1590	}
1591
1592	/* Reserved the max msix vectors we might need */
1593	msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1594
1595	adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry),
1596	    M_DEVBUF, M_WAITOK | M_ZERO);
1597
1598	ena_trace(NULL, ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs);
1599
1600	for (i = 0; i < msix_vecs; i++) {
1601		adapter->msix_entries[i].entry = i;
1602		/* Vectors must start from 1 */
1603		adapter->msix_entries[i].vector = i + 1;
1604	}
1605
1606	msix_req = msix_vecs;
1607	rc = pci_alloc_msix(dev, &msix_vecs);
1608	if (unlikely(rc != 0)) {
1609		device_printf(dev,
1610		    "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc);
1611
1612		rc = ENOSPC;
1613		goto err_msix_free;
1614	}
1615
1616	if (msix_vecs != msix_req) {
1617		if (msix_vecs == ENA_ADMIN_MSIX_VEC) {
1618			device_printf(dev,
1619			    "Not enough number of MSI-x allocated: %d\n",
1620			    msix_vecs);
1621			pci_release_msi(dev);
1622			rc = ENOSPC;
1623			goto err_msix_free;
1624		}
1625		device_printf(dev, "Enable only %d MSI-x (out of %d), reduce "
1626		    "the number of queues\n", msix_vecs, msix_req);
1627	}
1628
1629	adapter->msix_vecs = msix_vecs;
1630	ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1631
1632	return (0);
1633
1634err_msix_free:
1635	free(adapter->msix_entries, M_DEVBUF);
1636	adapter->msix_entries = NULL;
1637
1638	return (rc);
1639}
1640
1641static void
1642ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1643{
1644
1645	snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1646	    ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1647	    device_get_nameunit(adapter->pdev));
1648	/*
1649	 * Handler is NULL on purpose, it will be set
1650	 * when mgmnt interrupt is acquired
1651	 */
1652	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL;
1653	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1654	adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1655	    adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector;
1656}
1657
1658static int
1659ena_setup_io_intr(struct ena_adapter *adapter)
1660{
1661	static int last_bind_cpu = -1;
1662	int irq_idx;
1663
1664	if (adapter->msix_entries == NULL)
1665		return (EINVAL);
1666
1667	for (int i = 0; i < adapter->num_io_queues; i++) {
1668		irq_idx = ENA_IO_IRQ_IDX(i);
1669
1670		snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1671		    "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i);
1672		adapter->irq_tbl[irq_idx].handler = ena_handle_msix;
1673		adapter->irq_tbl[irq_idx].data = &adapter->que[i];
1674		adapter->irq_tbl[irq_idx].vector =
1675		    adapter->msix_entries[irq_idx].vector;
1676		ena_trace(NULL, ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n",
1677		    adapter->msix_entries[irq_idx].vector);
1678
1679		/*
1680		 * We want to bind rings to the corresponding cpu
1681		 * using something similar to the RSS round-robin technique.
1682		 */
1683		if (unlikely(last_bind_cpu < 0))
1684			last_bind_cpu = CPU_FIRST();
1685		adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu =
1686		    last_bind_cpu;
1687		last_bind_cpu = CPU_NEXT(last_bind_cpu);
1688	}
1689
1690	return (0);
1691}
1692
1693static int
1694ena_request_mgmnt_irq(struct ena_adapter *adapter)
1695{
1696	struct ena_irq *irq;
1697	unsigned long flags;
1698	int rc, rcc;
1699
1700	flags = RF_ACTIVE | RF_SHAREABLE;
1701
1702	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1703	irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1704	    &irq->vector, flags);
1705
1706	if (unlikely(irq->res == NULL)) {
1707		device_printf(adapter->pdev, "could not allocate "
1708		    "irq vector: %d\n", irq->vector);
1709		return (ENXIO);
1710	}
1711
1712	rc = bus_setup_intr(adapter->pdev, irq->res,
1713	    INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt,
1714	    irq->data, &irq->cookie);
1715	if (unlikely(rc != 0)) {
1716		device_printf(adapter->pdev, "failed to register "
1717		    "interrupt handler for irq %ju: %d\n",
1718		    rman_get_start(irq->res), rc);
1719		goto err_res_free;
1720	}
1721	irq->requested = true;
1722
1723	return (rc);
1724
1725err_res_free:
1726	ena_trace(NULL, ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n",
1727	    irq->vector);
1728	rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1729	    irq->vector, irq->res);
1730	if (unlikely(rcc != 0))
1731		device_printf(adapter->pdev, "dev has no parent while "
1732		    "releasing res for irq: %d\n", irq->vector);
1733	irq->res = NULL;
1734
1735	return (rc);
1736}
1737
1738static int
1739ena_request_io_irq(struct ena_adapter *adapter)
1740{
1741	struct ena_irq *irq;
1742	unsigned long flags = 0;
1743	int rc = 0, i, rcc;
1744
1745	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) {
1746		device_printf(adapter->pdev,
1747		    "failed to request I/O IRQ: MSI-X is not enabled\n");
1748		return (EINVAL);
1749	} else {
1750		flags = RF_ACTIVE | RF_SHAREABLE;
1751	}
1752
1753	for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1754		irq = &adapter->irq_tbl[i];
1755
1756		if (unlikely(irq->requested))
1757			continue;
1758
1759		irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ,
1760		    &irq->vector, flags);
1761		if (unlikely(irq->res == NULL)) {
1762			rc = ENOMEM;
1763			device_printf(adapter->pdev, "could not allocate "
1764			    "irq vector: %d\n", irq->vector);
1765			goto err;
1766		}
1767
1768		rc = bus_setup_intr(adapter->pdev, irq->res,
1769		    INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL,
1770		    irq->data, &irq->cookie);
1771		 if (unlikely(rc != 0)) {
1772			device_printf(adapter->pdev, "failed to register "
1773			    "interrupt handler for irq %ju: %d\n",
1774			    rman_get_start(irq->res), rc);
1775			goto err;
1776		}
1777		irq->requested = true;
1778
1779		ena_trace(NULL, ENA_INFO, "queue %d - cpu %d\n",
1780		    i - ENA_IO_IRQ_FIRST_IDX, irq->cpu);
1781	}
1782
1783	return (rc);
1784
1785err:
1786
1787	for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) {
1788		irq = &adapter->irq_tbl[i];
1789		rcc = 0;
1790
1791		/* Once we entered err: section and irq->requested is true we
1792		   free both intr and resources */
1793		if (irq->requested)
1794			rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1795		if (unlikely(rcc != 0))
1796			device_printf(adapter->pdev, "could not release"
1797			    " irq: %d, error: %d\n", irq->vector, rcc);
1798
1799		/* If we entred err: section without irq->requested set we know
1800		   it was bus_alloc_resource_any() that needs cleanup, provided
1801		   res is not NULL. In case res is NULL no work in needed in
1802		   this iteration */
1803		rcc = 0;
1804		if (irq->res != NULL) {
1805			rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1806			    irq->vector, irq->res);
1807		}
1808		if (unlikely(rcc != 0))
1809			device_printf(adapter->pdev, "dev has no parent while "
1810			    "releasing res for irq: %d\n", irq->vector);
1811		irq->requested = false;
1812		irq->res = NULL;
1813	}
1814
1815	return (rc);
1816}
1817
1818static void
1819ena_free_mgmnt_irq(struct ena_adapter *adapter)
1820{
1821	struct ena_irq *irq;
1822	int rc;
1823
1824	irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1825	if (irq->requested) {
1826		ena_trace(NULL, ENA_INFO | ENA_ADMQ, "tear down irq: %d\n",
1827		    irq->vector);
1828		rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie);
1829		if (unlikely(rc != 0))
1830			device_printf(adapter->pdev, "failed to tear "
1831			    "down irq: %d\n", irq->vector);
1832		irq->requested = 0;
1833	}
1834
1835	if (irq->res != NULL) {
1836		ena_trace(NULL, ENA_INFO | ENA_ADMQ, "release resource irq: %d\n",
1837		    irq->vector);
1838		rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1839		    irq->vector, irq->res);
1840		irq->res = NULL;
1841		if (unlikely(rc != 0))
1842			device_printf(adapter->pdev, "dev has no parent while "
1843			    "releasing res for irq: %d\n", irq->vector);
1844	}
1845}
1846
1847static void
1848ena_free_io_irq(struct ena_adapter *adapter)
1849{
1850	struct ena_irq *irq;
1851	int rc;
1852
1853	for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1854		irq = &adapter->irq_tbl[i];
1855		if (irq->requested) {
1856			ena_trace(NULL, ENA_INFO | ENA_IOQ, "tear down irq: %d\n",
1857			    irq->vector);
1858			rc = bus_teardown_intr(adapter->pdev, irq->res,
1859			    irq->cookie);
1860			if (unlikely(rc != 0)) {
1861				device_printf(adapter->pdev, "failed to tear "
1862				    "down irq: %d\n", irq->vector);
1863			}
1864			irq->requested = 0;
1865		}
1866
1867		if (irq->res != NULL) {
1868			ena_trace(NULL, ENA_INFO | ENA_IOQ, "release resource irq: %d\n",
1869			    irq->vector);
1870			rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ,
1871			    irq->vector, irq->res);
1872			irq->res = NULL;
1873			if (unlikely(rc != 0)) {
1874				device_printf(adapter->pdev, "dev has no parent"
1875				    " while releasing res for irq: %d\n",
1876				    irq->vector);
1877			}
1878		}
1879	}
1880}
1881
1882static void
1883ena_free_irqs(struct ena_adapter* adapter)
1884{
1885
1886	ena_free_io_irq(adapter);
1887	ena_free_mgmnt_irq(adapter);
1888	ena_disable_msix(adapter);
1889}
1890
1891static void
1892ena_disable_msix(struct ena_adapter *adapter)
1893{
1894
1895	if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) {
1896		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter);
1897		pci_release_msi(adapter->pdev);
1898	}
1899
1900	adapter->msix_vecs = 0;
1901	if (adapter->msix_entries != NULL)
1902		free(adapter->msix_entries, M_DEVBUF);
1903	adapter->msix_entries = NULL;
1904}
1905
1906static void
1907ena_unmask_all_io_irqs(struct ena_adapter *adapter)
1908{
1909	struct ena_com_io_cq* io_cq;
1910	struct ena_eth_io_intr_reg intr_reg;
1911	uint16_t ena_qid;
1912	int i;
1913
1914	/* Unmask interrupts for all queues */
1915	for (i = 0; i < adapter->num_io_queues; i++) {
1916		ena_qid = ENA_IO_TXQ_IDX(i);
1917		io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
1918		ena_com_update_intr_reg(&intr_reg, 0, 0, true);
1919		ena_com_unmask_intr(io_cq, &intr_reg);
1920	}
1921}
1922
1923/* Configure the Rx forwarding */
1924static int
1925ena_rss_configure(struct ena_adapter *adapter)
1926{
1927	struct ena_com_dev *ena_dev = adapter->ena_dev;
1928	int rc;
1929
1930	/* In case the RSS table was destroyed */
1931	if (!ena_dev->rss.tbl_log_size) {
1932		rc = ena_rss_init_default(adapter);
1933		if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
1934			device_printf(adapter->pdev,
1935			    "WARNING: RSS was not properly re-initialized,"
1936			    " it will affect bandwidth\n");
1937			ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
1938			return (rc);
1939		}
1940	}
1941
1942	/* Set indirect table */
1943	rc = ena_com_indirect_table_set(ena_dev);
1944	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1945		return (rc);
1946
1947	/* Configure hash function (if supported) */
1948	rc = ena_com_set_hash_function(ena_dev);
1949	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1950		return (rc);
1951
1952	/* Configure hash inputs (if supported) */
1953	rc = ena_com_set_hash_ctrl(ena_dev);
1954	if (unlikely((rc != 0) && (rc != EOPNOTSUPP)))
1955		return (rc);
1956
1957	return (0);
1958}
1959
1960static int
1961ena_up_complete(struct ena_adapter *adapter)
1962{
1963	int rc;
1964
1965	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) {
1966		rc = ena_rss_configure(adapter);
1967		if (rc != 0) {
1968			device_printf(adapter->pdev,
1969			    "Failed to configure RSS\n");
1970			return (rc);
1971		}
1972	}
1973
1974	rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu);
1975	if (unlikely(rc != 0))
1976		return (rc);
1977
1978	ena_refill_all_rx_bufs(adapter);
1979	ena_reset_counters((counter_u64_t *)&adapter->hw_stats,
1980	    sizeof(adapter->hw_stats));
1981
1982	return (0);
1983}
1984
1985static void
1986set_io_rings_size(struct ena_adapter *adapter, int new_tx_size,
1987    int new_rx_size)
1988{
1989	int i;
1990
1991	for (i = 0; i < adapter->num_io_queues; i++) {
1992		adapter->tx_ring[i].ring_size = new_tx_size;
1993		adapter->rx_ring[i].ring_size = new_rx_size;
1994	}
1995}
1996
1997static int
1998create_queues_with_size_backoff(struct ena_adapter *adapter)
1999{
2000	int rc;
2001	uint32_t cur_rx_ring_size, cur_tx_ring_size;
2002	uint32_t new_rx_ring_size, new_tx_ring_size;
2003
2004	/*
2005	 * Current queue sizes might be set to smaller than the requested
2006	 * ones due to past queue allocation failures.
2007	 */
2008	set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2009	    adapter->requested_rx_ring_size);
2010
2011	while (1) {
2012		/* Allocate transmit descriptors */
2013		rc = ena_setup_all_tx_resources(adapter);
2014		if (unlikely(rc != 0)) {
2015			ena_trace(NULL, ENA_ALERT, "err_setup_tx\n");
2016			goto err_setup_tx;
2017		}
2018
2019		/* Allocate receive descriptors */
2020		rc = ena_setup_all_rx_resources(adapter);
2021		if (unlikely(rc != 0)) {
2022			ena_trace(NULL, ENA_ALERT, "err_setup_rx\n");
2023			goto err_setup_rx;
2024		}
2025
2026		/* Create IO queues for Rx & Tx */
2027		rc = ena_create_io_queues(adapter);
2028		if (unlikely(rc != 0)) {
2029			ena_trace(NULL, ENA_ALERT,
2030			    "create IO queues failed\n");
2031			goto err_io_que;
2032		}
2033
2034		return (0);
2035
2036err_io_que:
2037		ena_free_all_rx_resources(adapter);
2038err_setup_rx:
2039		ena_free_all_tx_resources(adapter);
2040err_setup_tx:
2041		/*
2042		 * Lower the ring size if ENOMEM. Otherwise, return the
2043		 * error straightaway.
2044		 */
2045		if (unlikely(rc != ENOMEM)) {
2046			ena_trace(NULL, ENA_ALERT,
2047			    "Queue creation failed with error code: %d\n", rc);
2048			return (rc);
2049		}
2050
2051		cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2052		cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2053
2054		device_printf(adapter->pdev,
2055		    "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2056		    cur_tx_ring_size, cur_rx_ring_size);
2057
2058		new_tx_ring_size = cur_tx_ring_size;
2059		new_rx_ring_size = cur_rx_ring_size;
2060
2061		/*
2062		 * Decrease the size of a larger queue, or decrease both if they are
2063		 * the same size.
2064		 */
2065		if (cur_rx_ring_size <= cur_tx_ring_size)
2066			new_tx_ring_size = cur_tx_ring_size / 2;
2067		if (cur_rx_ring_size >= cur_tx_ring_size)
2068			new_rx_ring_size = cur_rx_ring_size / 2;
2069
2070		if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2071		    new_rx_ring_size < ENA_MIN_RING_SIZE) {
2072			device_printf(adapter->pdev,
2073			    "Queue creation failed with the smallest possible queue size"
2074			    "of %d for both queues. Not retrying with smaller queues\n",
2075			    ENA_MIN_RING_SIZE);
2076			return (rc);
2077		}
2078
2079		set_io_rings_size(adapter, new_tx_ring_size, new_rx_ring_size);
2080	}
2081}
2082
2083int
2084ena_up(struct ena_adapter *adapter)
2085{
2086	int rc = 0;
2087
2088	if (unlikely(device_is_attached(adapter->pdev) == 0)) {
2089		device_printf(adapter->pdev, "device is not attached!\n");
2090		return (ENXIO);
2091	}
2092
2093	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2094		return (0);
2095
2096	device_printf(adapter->pdev, "device is going UP\n");
2097
2098	/* setup interrupts for IO queues */
2099	rc = ena_setup_io_intr(adapter);
2100	if (unlikely(rc != 0)) {
2101		ena_trace(NULL, ENA_ALERT, "error setting up IO interrupt\n");
2102		goto error;
2103	}
2104	rc = ena_request_io_irq(adapter);
2105	if (unlikely(rc != 0)) {
2106		ena_trace(NULL, ENA_ALERT, "err_req_irq\n");
2107		goto error;
2108	}
2109
2110	device_printf(adapter->pdev,
2111	    "Creating %u IO queues. Rx queue size: %d, Tx queue size: %d, "
2112	    "LLQ is %s\n",
2113	    adapter->num_io_queues,
2114	    adapter->requested_rx_ring_size,
2115	    adapter->requested_tx_ring_size,
2116	    (adapter->ena_dev->tx_mem_queue_type ==
2117	        ENA_ADMIN_PLACEMENT_POLICY_DEV) ?  "ENABLED" : "DISABLED");
2118
2119	rc = create_queues_with_size_backoff(adapter);
2120	if (unlikely(rc != 0)) {
2121		ena_trace(NULL, ENA_ALERT,
2122		    "error creating queues with size backoff\n");
2123		goto err_create_queues_with_backoff;
2124	}
2125
2126	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
2127		if_link_state_change(adapter->ifp, LINK_STATE_UP);
2128
2129	rc = ena_up_complete(adapter);
2130	if (unlikely(rc != 0))
2131		goto err_up_complete;
2132
2133	counter_u64_add(adapter->dev_stats.interface_up, 1);
2134
2135	ena_update_hwassist(adapter);
2136
2137	if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING,
2138		IFF_DRV_OACTIVE);
2139
2140	/* Activate timer service only if the device is running.
2141		* If this flag is not set, it means that the driver is being
2142		* reset and timer service will be activated afterwards.
2143		*/
2144	if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) {
2145		callout_reset_sbt(&adapter->timer_service, SBT_1S,
2146			SBT_1S, ena_timer_service, (void *)adapter, 0);
2147	}
2148
2149	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2150
2151	ena_unmask_all_io_irqs(adapter);
2152
2153	return (0);
2154
2155err_up_complete:
2156	ena_destroy_all_io_queues(adapter);
2157	ena_free_all_rx_resources(adapter);
2158	ena_free_all_tx_resources(adapter);
2159err_create_queues_with_backoff:
2160	ena_free_io_irq(adapter);
2161error:
2162	return (rc);
2163}
2164
2165static uint64_t
2166ena_get_counter(if_t ifp, ift_counter cnt)
2167{
2168	struct ena_adapter *adapter;
2169	struct ena_hw_stats *stats;
2170
2171	adapter = if_getsoftc(ifp);
2172	stats = &adapter->hw_stats;
2173
2174	switch (cnt) {
2175	case IFCOUNTER_IPACKETS:
2176		return (counter_u64_fetch(stats->rx_packets));
2177	case IFCOUNTER_OPACKETS:
2178		return (counter_u64_fetch(stats->tx_packets));
2179	case IFCOUNTER_IBYTES:
2180		return (counter_u64_fetch(stats->rx_bytes));
2181	case IFCOUNTER_OBYTES:
2182		return (counter_u64_fetch(stats->tx_bytes));
2183	case IFCOUNTER_IQDROPS:
2184		return (counter_u64_fetch(stats->rx_drops));
2185	case IFCOUNTER_OQDROPS:
2186		return (counter_u64_fetch(stats->tx_drops));
2187	default:
2188		return (if_get_counter_default(ifp, cnt));
2189	}
2190}
2191
2192static int
2193ena_media_change(if_t ifp)
2194{
2195	/* Media Change is not supported by firmware */
2196	return (0);
2197}
2198
2199static void
2200ena_media_status(if_t ifp, struct ifmediareq *ifmr)
2201{
2202	struct ena_adapter *adapter = if_getsoftc(ifp);
2203	ena_trace(NULL, ENA_DBG, "enter\n");
2204
2205	ENA_LOCK_LOCK(adapter);
2206
2207	ifmr->ifm_status = IFM_AVALID;
2208	ifmr->ifm_active = IFM_ETHER;
2209
2210	if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) {
2211		ENA_LOCK_UNLOCK(adapter);
2212		ena_trace(NULL, ENA_INFO, "Link is down\n");
2213		return;
2214	}
2215
2216	ifmr->ifm_status |= IFM_ACTIVE;
2217	ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX;
2218
2219	ENA_LOCK_UNLOCK(adapter);
2220}
2221
2222static void
2223ena_init(void *arg)
2224{
2225	struct ena_adapter *adapter = (struct ena_adapter *)arg;
2226
2227	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) {
2228		ENA_LOCK_LOCK(adapter);
2229		ena_up(adapter);
2230		ENA_LOCK_UNLOCK(adapter);
2231	}
2232}
2233
2234static int
2235ena_ioctl(if_t ifp, u_long command, caddr_t data)
2236{
2237	struct ena_adapter *adapter;
2238	struct ifreq *ifr;
2239	int rc;
2240
2241	adapter = ifp->if_softc;
2242	ifr = (struct ifreq *)data;
2243
2244	/*
2245	 * Acquiring lock to prevent from running up and down routines parallel.
2246	 */
2247	rc = 0;
2248	switch (command) {
2249	case SIOCSIFMTU:
2250		if (ifp->if_mtu == ifr->ifr_mtu)
2251			break;
2252		ENA_LOCK_LOCK(adapter);
2253		ena_down(adapter);
2254
2255		ena_change_mtu(ifp, ifr->ifr_mtu);
2256
2257		rc = ena_up(adapter);
2258		ENA_LOCK_UNLOCK(adapter);
2259		break;
2260
2261	case SIOCSIFFLAGS:
2262		if ((ifp->if_flags & IFF_UP) != 0) {
2263			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2264				if ((ifp->if_flags & (IFF_PROMISC |
2265				    IFF_ALLMULTI)) != 0) {
2266					device_printf(adapter->pdev,
2267					    "ioctl promisc/allmulti\n");
2268				}
2269			} else {
2270				ENA_LOCK_LOCK(adapter);
2271				rc = ena_up(adapter);
2272				ENA_LOCK_UNLOCK(adapter);
2273			}
2274		} else {
2275			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
2276				ENA_LOCK_LOCK(adapter);
2277				ena_down(adapter);
2278				ENA_LOCK_UNLOCK(adapter);
2279			}
2280		}
2281		break;
2282
2283	case SIOCADDMULTI:
2284	case SIOCDELMULTI:
2285		break;
2286
2287	case SIOCSIFMEDIA:
2288	case SIOCGIFMEDIA:
2289		rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
2290		break;
2291
2292	case SIOCSIFCAP:
2293		{
2294			int reinit = 0;
2295
2296			if (ifr->ifr_reqcap != ifp->if_capenable) {
2297				ifp->if_capenable = ifr->ifr_reqcap;
2298				reinit = 1;
2299			}
2300
2301			if ((reinit != 0) &&
2302			    ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) {
2303				ENA_LOCK_LOCK(adapter);
2304				ena_down(adapter);
2305				rc = ena_up(adapter);
2306				ENA_LOCK_UNLOCK(adapter);
2307			}
2308		}
2309
2310		break;
2311	default:
2312		rc = ether_ioctl(ifp, command, data);
2313		break;
2314	}
2315
2316	return (rc);
2317}
2318
2319static int
2320ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat)
2321{
2322	int caps = 0;
2323
2324	if ((feat->offload.tx &
2325	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2326	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK |
2327	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0)
2328		caps |= IFCAP_TXCSUM;
2329
2330	if ((feat->offload.tx &
2331	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK |
2332	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0)
2333		caps |= IFCAP_TXCSUM_IPV6;
2334
2335	if ((feat->offload.tx &
2336	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0)
2337		caps |= IFCAP_TSO4;
2338
2339	if ((feat->offload.tx &
2340	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0)
2341		caps |= IFCAP_TSO6;
2342
2343	if ((feat->offload.rx_supported &
2344	    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK |
2345	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0)
2346		caps |= IFCAP_RXCSUM;
2347
2348	if ((feat->offload.rx_supported &
2349	    ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0)
2350		caps |= IFCAP_RXCSUM_IPV6;
2351
2352	caps |= IFCAP_LRO | IFCAP_JUMBO_MTU;
2353
2354	return (caps);
2355}
2356
2357static void
2358ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp)
2359{
2360
2361	host_info->supported_network_features[0] =
2362	    (uint32_t)if_getcapabilities(ifp);
2363}
2364
2365static void
2366ena_update_hwassist(struct ena_adapter *adapter)
2367{
2368	if_t ifp = adapter->ifp;
2369	uint32_t feat = adapter->tx_offload_cap;
2370	int cap = if_getcapenable(ifp);
2371	int flags = 0;
2372
2373	if_clearhwassist(ifp);
2374
2375	if ((cap & IFCAP_TXCSUM) != 0) {
2376		if ((feat &
2377		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0)
2378			flags |= CSUM_IP;
2379		if ((feat &
2380		    (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK |
2381		    ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0)
2382			flags |= CSUM_IP_UDP | CSUM_IP_TCP;
2383	}
2384
2385	if ((cap & IFCAP_TXCSUM_IPV6) != 0)
2386		flags |= CSUM_IP6_UDP | CSUM_IP6_TCP;
2387
2388	if ((cap & IFCAP_TSO4) != 0)
2389		flags |= CSUM_IP_TSO;
2390
2391	if ((cap & IFCAP_TSO6) != 0)
2392		flags |= CSUM_IP6_TSO;
2393
2394	if_sethwassistbits(ifp, flags, 0);
2395}
2396
2397static int
2398ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
2399    struct ena_com_dev_get_features_ctx *feat)
2400{
2401	if_t ifp;
2402	int caps = 0;
2403
2404	ifp = adapter->ifp = if_gethandle(IFT_ETHER);
2405	if (unlikely(ifp == NULL)) {
2406		ena_trace(NULL, ENA_ALERT, "can not allocate ifnet structure\n");
2407		return (ENXIO);
2408	}
2409	if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
2410	if_setdev(ifp, pdev);
2411	if_setsoftc(ifp, adapter);
2412
2413	if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
2414	if_setinitfn(ifp, ena_init);
2415	if_settransmitfn(ifp, ena_mq_start);
2416	if_setqflushfn(ifp, ena_qflush);
2417	if_setioctlfn(ifp, ena_ioctl);
2418	if_setgetcounterfn(ifp, ena_get_counter);
2419
2420	if_setsendqlen(ifp, adapter->requested_tx_ring_size);
2421	if_setsendqready(ifp);
2422	if_setmtu(ifp, ETHERMTU);
2423	if_setbaudrate(ifp, 0);
2424	/* Zeroize capabilities... */
2425	if_setcapabilities(ifp, 0);
2426	if_setcapenable(ifp, 0);
2427	/* check hardware support */
2428	caps = ena_get_dev_offloads(feat);
2429	/* ... and set them */
2430	if_setcapabilitiesbit(ifp, caps, 0);
2431
2432	/* TSO parameters */
2433	ifp->if_hw_tsomax = ENA_TSO_MAXSIZE -
2434	    (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
2435	ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1;
2436	ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE;
2437
2438	if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
2439	if_setcapenable(ifp, if_getcapabilities(ifp));
2440
2441	/*
2442	 * Specify the media types supported by this adapter and register
2443	 * callbacks to update media and link information
2444	 */
2445	ifmedia_init(&adapter->media, IFM_IMASK,
2446	    ena_media_change, ena_media_status);
2447	ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2448	ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
2449
2450	ether_ifattach(ifp, adapter->mac_addr);
2451
2452	return (0);
2453}
2454
2455void
2456ena_down(struct ena_adapter *adapter)
2457{
2458	int rc;
2459
2460	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
2461		return;
2462
2463	device_printf(adapter->pdev, "device is going DOWN\n");
2464
2465	callout_drain(&adapter->timer_service);
2466
2467	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter);
2468	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE,
2469		IFF_DRV_RUNNING);
2470
2471	ena_free_io_irq(adapter);
2472
2473	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) {
2474		rc = ena_com_dev_reset(adapter->ena_dev,
2475			adapter->reset_reason);
2476		if (unlikely(rc != 0))
2477			device_printf(adapter->pdev,
2478				"Device reset failed\n");
2479	}
2480
2481	ena_destroy_all_io_queues(adapter);
2482
2483	ena_free_all_tx_bufs(adapter);
2484	ena_free_all_rx_bufs(adapter);
2485	ena_free_all_tx_resources(adapter);
2486	ena_free_all_rx_resources(adapter);
2487
2488	counter_u64_add(adapter->dev_stats.interface_down, 1);
2489}
2490
2491static uint32_t
2492ena_calc_max_io_queue_num(device_t pdev, struct ena_com_dev *ena_dev,
2493    struct ena_com_dev_get_features_ctx *get_feat_ctx)
2494{
2495	uint32_t io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
2496
2497	/* Regular queues capabilities */
2498	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2499		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2500		    &get_feat_ctx->max_queue_ext.max_queue_ext;
2501		io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num,
2502			max_queue_ext->max_rx_cq_num);
2503
2504		io_tx_sq_num = max_queue_ext->max_tx_sq_num;
2505		io_tx_cq_num = max_queue_ext->max_tx_cq_num;
2506	} else {
2507		struct ena_admin_queue_feature_desc *max_queues =
2508		    &get_feat_ctx->max_queues;
2509		io_tx_sq_num = max_queues->max_sq_num;
2510		io_tx_cq_num = max_queues->max_cq_num;
2511		io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num);
2512	}
2513
2514	/* In case of LLQ use the llq fields for the tx SQ/CQ */
2515	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2516		io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
2517
2518	max_num_io_queues = min_t(uint32_t, mp_ncpus, ENA_MAX_NUM_IO_QUEUES);
2519	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_rx_num);
2520	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_sq_num);
2521	max_num_io_queues = min_t(uint32_t, max_num_io_queues, io_tx_cq_num);
2522	/* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */
2523	max_num_io_queues = min_t(uint32_t, max_num_io_queues,
2524	    pci_msix_count(pdev) - 1);
2525
2526	return (max_num_io_queues);
2527}
2528
2529static int
2530ena_enable_wc(struct resource *res)
2531{
2532#if defined(__i386) || defined(__amd64) || defined(__aarch64__)
2533	vm_offset_t va;
2534	vm_size_t len;
2535
2536	va = (vm_offset_t)rman_get_virtual(res);
2537	len = rman_get_size(res);
2538#if defined(__i386) || defined(__amd64)
2539	int rc;
2540	rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING);
2541	if (unlikely(rc != 0))
2542		ena_trace(NULL, ENA_ALERT, "pmap_change_attr failed, %d\n", rc);
2543
2544	return (rc);
2545#else /* defined(__aarch64__) */
2546	vm_paddr_t pa;
2547	pa = rman_get_start(res);
2548	pmap_kenter(va, len, pa, VM_MEMATTR_WRITE_COMBINING);
2549
2550	return (0);
2551#endif /* defined(__i386) || defined(__amd64) */
2552#endif
2553	return (EOPNOTSUPP);
2554}
2555
2556static int
2557ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev,
2558    struct ena_admin_feature_llq_desc *llq,
2559    struct ena_llq_configurations *llq_default_configurations)
2560{
2561	struct ena_adapter *adapter = device_get_softc(pdev);
2562	int rc, rid;
2563	uint32_t llq_feature_mask;
2564
2565	llq_feature_mask = 1 << ENA_ADMIN_LLQ;
2566	if (!(ena_dev->supported_features & llq_feature_mask)) {
2567		device_printf(pdev,
2568		    "LLQ is not supported. Fallback to host mode policy.\n");
2569		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2570		return (0);
2571	}
2572
2573	rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
2574	if (unlikely(rc != 0)) {
2575		device_printf(pdev, "Failed to configure the device mode. "
2576		    "Fallback to host mode policy.\n");
2577		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2578		return (0);
2579	}
2580
2581	/* Nothing to config, exit */
2582	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
2583		return (0);
2584
2585	/* Try to allocate resources for LLQ bar */
2586	rid = PCIR_BAR(ENA_MEM_BAR);
2587	adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
2588	    &rid, RF_ACTIVE);
2589	if (unlikely(adapter->memory == NULL)) {
2590		device_printf(pdev, "unable to allocate LLQ bar resource. "
2591		    "Fallback to host mode policy.\n");
2592		ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2593		return (0);
2594	}
2595
2596	/* Enable write combining for better LLQ performance */
2597	rc = ena_enable_wc(adapter->memory);
2598	if (unlikely(rc != 0)) {
2599		device_printf(pdev, "failed to enable write combining.\n");
2600		return (rc);
2601	}
2602
2603	/*
2604	 * Save virtual address of the device's memory region
2605	 * for the ena_com layer.
2606	 */
2607	ena_dev->mem_bar = rman_get_virtual(adapter->memory);
2608
2609	return (0);
2610}
2611
2612static inline
2613void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
2614{
2615	llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
2616	llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
2617	llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
2618	llq_config->llq_num_decs_before_header =
2619	    ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
2620	llq_config->llq_ring_entry_size_value = 128;
2621}
2622
2623static int
2624ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
2625{
2626	struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
2627	struct ena_com_dev *ena_dev = ctx->ena_dev;
2628	uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE;
2629	uint32_t rx_queue_size = ENA_DEFAULT_RING_SIZE;
2630	uint32_t max_tx_queue_size;
2631	uint32_t max_rx_queue_size;
2632
2633	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2634		struct ena_admin_queue_ext_feature_fields *max_queue_ext =
2635		    &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
2636		max_rx_queue_size = min_t(uint32_t,
2637		    max_queue_ext->max_rx_cq_depth,
2638		    max_queue_ext->max_rx_sq_depth);
2639		max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
2640
2641		if (ena_dev->tx_mem_queue_type ==
2642		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2643			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2644			    llq->max_llq_depth);
2645		else
2646			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2647			    max_queue_ext->max_tx_sq_depth);
2648
2649		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2650		    max_queue_ext->max_per_packet_tx_descs);
2651		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2652		    max_queue_ext->max_per_packet_rx_descs);
2653	} else {
2654		struct ena_admin_queue_feature_desc *max_queues =
2655		    &ctx->get_feat_ctx->max_queues;
2656		max_rx_queue_size = min_t(uint32_t,
2657		    max_queues->max_cq_depth,
2658		    max_queues->max_sq_depth);
2659		max_tx_queue_size = max_queues->max_cq_depth;
2660
2661		if (ena_dev->tx_mem_queue_type ==
2662		    ENA_ADMIN_PLACEMENT_POLICY_DEV)
2663			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2664			    llq->max_llq_depth);
2665		else
2666			max_tx_queue_size = min_t(uint32_t, max_tx_queue_size,
2667			    max_queues->max_sq_depth);
2668
2669		ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2670		    max_queues->max_packet_tx_descs);
2671		ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS,
2672		    max_queues->max_packet_rx_descs);
2673	}
2674
2675	/* round down to the nearest power of 2 */
2676	max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
2677	max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
2678
2679	tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
2680	    max_tx_queue_size);
2681	rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
2682	    max_rx_queue_size);
2683
2684	tx_queue_size = 1 << (flsl(tx_queue_size) - 1);
2685	rx_queue_size = 1 << (flsl(rx_queue_size) - 1);
2686
2687	ctx->max_tx_queue_size = max_tx_queue_size;
2688	ctx->max_rx_queue_size = max_rx_queue_size;
2689	ctx->tx_queue_size = tx_queue_size;
2690	ctx->rx_queue_size = rx_queue_size;
2691
2692	return (0);
2693}
2694
2695static int
2696ena_rss_init_default(struct ena_adapter *adapter)
2697{
2698	struct ena_com_dev *ena_dev = adapter->ena_dev;
2699	device_t dev = adapter->pdev;
2700	int qid, rc, i;
2701
2702	rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2703	if (unlikely(rc != 0)) {
2704		device_printf(dev, "Cannot init indirect table\n");
2705		return (rc);
2706	}
2707
2708	for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
2709		qid = i % adapter->num_io_queues;
2710		rc = ena_com_indirect_table_fill_entry(ena_dev, i,
2711		    ENA_IO_RXQ_IDX(qid));
2712		if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2713			device_printf(dev, "Cannot fill indirect table\n");
2714			goto err_rss_destroy;
2715		}
2716	}
2717
2718#ifdef RSS
2719	uint8_t rss_algo = rss_gethashalgo();
2720	if (rss_algo == RSS_HASH_TOEPLITZ) {
2721		uint8_t hash_key[RSS_KEYSIZE];
2722
2723		rss_getkey(hash_key);
2724		rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ,
2725		    hash_key, RSS_KEYSIZE, 0xFFFFFFFF);
2726	} else
2727#endif
2728	rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
2729	    ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
2730	if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2731		device_printf(dev, "Cannot fill hash function\n");
2732		goto err_rss_destroy;
2733	}
2734
2735	rc = ena_com_set_default_hash_ctrl(ena_dev);
2736	if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) {
2737		device_printf(dev, "Cannot fill hash control\n");
2738		goto err_rss_destroy;
2739	}
2740
2741	return (0);
2742
2743err_rss_destroy:
2744	ena_com_rss_destroy(ena_dev);
2745	return (rc);
2746}
2747
2748static void
2749ena_rss_init_default_deferred(void *arg)
2750{
2751	struct ena_adapter *adapter;
2752	devclass_t dc;
2753	int max;
2754	int rc;
2755
2756	dc = devclass_find("ena");
2757	if (unlikely(dc == NULL)) {
2758		ena_trace(NULL, ENA_ALERT, "No devclass ena\n");
2759		return;
2760	}
2761
2762	max = devclass_get_maxunit(dc);
2763	while (max-- >= 0) {
2764		adapter = devclass_get_softc(dc, max);
2765		if (adapter != NULL) {
2766			rc = ena_rss_init_default(adapter);
2767			ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2768			if (unlikely(rc != 0)) {
2769				device_printf(adapter->pdev,
2770				    "WARNING: RSS was not properly initialized,"
2771				    " it will affect bandwidth\n");
2772				ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter);
2773			}
2774		}
2775	}
2776}
2777SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL);
2778
2779static void
2780ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev)
2781{
2782	struct ena_admin_host_info *host_info;
2783	uintptr_t rid;
2784	int rc;
2785
2786	/* Allocate only the host info */
2787	rc = ena_com_allocate_host_info(ena_dev);
2788	if (unlikely(rc != 0)) {
2789		ena_trace(NULL, ENA_ALERT, "Cannot allocate host info\n");
2790		return;
2791	}
2792
2793	host_info = ena_dev->host_attr.host_info;
2794
2795	if (pci_get_id(dev, PCI_ID_RID, &rid) == 0)
2796		host_info->bdf = rid;
2797	host_info->os_type = ENA_ADMIN_OS_FREEBSD;
2798	host_info->kernel_ver = osreldate;
2799
2800	sprintf(host_info->kernel_ver_str, "%d", osreldate);
2801	host_info->os_dist = 0;
2802	strncpy(host_info->os_dist_str, osrelease,
2803	    sizeof(host_info->os_dist_str) - 1);
2804
2805	host_info->driver_version =
2806		(DRV_MODULE_VER_MAJOR) |
2807		(DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2808		(DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2809	host_info->num_cpus = mp_ncpus;
2810	host_info->driver_supported_features =
2811	    ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
2812
2813	rc = ena_com_set_host_attributes(ena_dev);
2814	if (unlikely(rc != 0)) {
2815		if (rc == EOPNOTSUPP)
2816			ena_trace(NULL, ENA_WARNING, "Cannot set host attributes\n");
2817		else
2818			ena_trace(NULL, ENA_ALERT, "Cannot set host attributes\n");
2819
2820		goto err;
2821	}
2822
2823	return;
2824
2825err:
2826	ena_com_delete_host_info(ena_dev);
2827}
2828
2829static int
2830ena_device_init(struct ena_adapter *adapter, device_t pdev,
2831    struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active)
2832{
2833	struct ena_com_dev* ena_dev = adapter->ena_dev;
2834	bool readless_supported;
2835	uint32_t aenq_groups;
2836	int dma_width;
2837	int rc;
2838
2839	rc = ena_com_mmio_reg_read_request_init(ena_dev);
2840	if (unlikely(rc != 0)) {
2841		device_printf(pdev, "failed to init mmio read less\n");
2842		return (rc);
2843	}
2844
2845	/*
2846	 * The PCIe configuration space revision id indicate if mmio reg
2847	 * read is disabled
2848	 */
2849	readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ);
2850	ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2851
2852	rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2853	if (unlikely(rc != 0)) {
2854		device_printf(pdev, "Can not reset device\n");
2855		goto err_mmio_read_less;
2856	}
2857
2858	rc = ena_com_validate_version(ena_dev);
2859	if (unlikely(rc != 0)) {
2860		device_printf(pdev, "device version is too low\n");
2861		goto err_mmio_read_less;
2862	}
2863
2864	dma_width = ena_com_get_dma_width(ena_dev);
2865	if (unlikely(dma_width < 0)) {
2866		device_printf(pdev, "Invalid dma width value %d", dma_width);
2867		rc = dma_width;
2868		goto err_mmio_read_less;
2869	}
2870	adapter->dma_width = dma_width;
2871
2872	/* ENA admin level init */
2873	rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2874	if (unlikely(rc != 0)) {
2875		device_printf(pdev,
2876		    "Can not initialize ena admin queue with device\n");
2877		goto err_mmio_read_less;
2878	}
2879
2880	/*
2881	 * To enable the msix interrupts the driver needs to know the number
2882	 * of queues. So the driver uses polling mode to retrieve this
2883	 * information
2884	 */
2885	ena_com_set_admin_polling_mode(ena_dev, true);
2886
2887	ena_config_host_info(ena_dev, pdev);
2888
2889	/* Get Device Attributes */
2890	rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2891	if (unlikely(rc != 0)) {
2892		device_printf(pdev,
2893		    "Cannot get attribute for ena device rc: %d\n", rc);
2894		goto err_admin_init;
2895	}
2896
2897	aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2898	    BIT(ENA_ADMIN_FATAL_ERROR) |
2899	    BIT(ENA_ADMIN_WARNING) |
2900	    BIT(ENA_ADMIN_NOTIFICATION) |
2901	    BIT(ENA_ADMIN_KEEP_ALIVE);
2902
2903	aenq_groups &= get_feat_ctx->aenq.supported_groups;
2904	rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2905	if (unlikely(rc != 0)) {
2906		device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc);
2907		goto err_admin_init;
2908	}
2909
2910	*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2911
2912	return (0);
2913
2914err_admin_init:
2915	ena_com_delete_host_info(ena_dev);
2916	ena_com_admin_destroy(ena_dev);
2917err_mmio_read_less:
2918	ena_com_mmio_reg_read_request_destroy(ena_dev);
2919
2920	return (rc);
2921}
2922
2923static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
2924{
2925	struct ena_com_dev *ena_dev = adapter->ena_dev;
2926	int rc;
2927
2928	rc = ena_enable_msix(adapter);
2929	if (unlikely(rc != 0)) {
2930		device_printf(adapter->pdev, "Error with MSI-X enablement\n");
2931		return (rc);
2932	}
2933
2934	ena_setup_mgmnt_intr(adapter);
2935
2936	rc = ena_request_mgmnt_irq(adapter);
2937	if (unlikely(rc != 0)) {
2938		device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n");
2939		goto err_disable_msix;
2940	}
2941
2942	ena_com_set_admin_polling_mode(ena_dev, false);
2943
2944	ena_com_admin_aenq_enable(ena_dev);
2945
2946	return (0);
2947
2948err_disable_msix:
2949	ena_disable_msix(adapter);
2950
2951	return (rc);
2952}
2953
2954/* Function called on ENA_ADMIN_KEEP_ALIVE event */
2955static void ena_keep_alive_wd(void *adapter_data,
2956    struct ena_admin_aenq_entry *aenq_e)
2957{
2958	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
2959	struct ena_admin_aenq_keep_alive_desc *desc;
2960	sbintime_t stime;
2961	uint64_t rx_drops;
2962	uint64_t tx_drops;
2963
2964	desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
2965
2966	rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low;
2967	tx_drops = ((uint64_t)desc->tx_drops_high << 32) | desc->tx_drops_low;
2968	counter_u64_zero(adapter->hw_stats.rx_drops);
2969	counter_u64_add(adapter->hw_stats.rx_drops, rx_drops);
2970	counter_u64_zero(adapter->hw_stats.tx_drops);
2971	counter_u64_add(adapter->hw_stats.tx_drops, tx_drops);
2972
2973	stime = getsbinuptime();
2974	atomic_store_rel_64(&adapter->keep_alive_timestamp, stime);
2975}
2976
2977/* Check for keep alive expiration */
2978static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2979{
2980	sbintime_t timestamp, time;
2981
2982	if (adapter->wd_active == 0)
2983		return;
2984
2985	if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2986		return;
2987
2988	timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp);
2989	time = getsbinuptime() - timestamp;
2990	if (unlikely(time > adapter->keep_alive_timeout)) {
2991		device_printf(adapter->pdev,
2992		    "Keep alive watchdog timeout.\n");
2993		counter_u64_add(adapter->dev_stats.wd_expired, 1);
2994		ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
2995	}
2996}
2997
2998/* Check if admin queue is enabled */
2999static void check_for_admin_com_state(struct ena_adapter *adapter)
3000{
3001	if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) ==
3002	    false)) {
3003		device_printf(adapter->pdev,
3004		    "ENA admin queue is not in running state!\n");
3005		counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
3006		ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
3007	}
3008}
3009
3010static int
3011check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3012    struct ena_ring *rx_ring)
3013{
3014	if (likely(rx_ring->first_interrupt))
3015		return (0);
3016
3017	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3018		return (0);
3019
3020	rx_ring->no_interrupt_event_cnt++;
3021
3022	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3023		device_printf(adapter->pdev, "Potential MSIX issue on Rx side "
3024		    "Queue = %d. Reset the device\n", rx_ring->qid);
3025		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3026		return (EIO);
3027	}
3028
3029	return (0);
3030}
3031
3032static int
3033check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3034    struct ena_ring *tx_ring)
3035{
3036	struct bintime curtime, time;
3037	struct ena_tx_buffer *tx_buf;
3038	sbintime_t time_offset;
3039	uint32_t missed_tx = 0;
3040	int i, rc = 0;
3041
3042	getbinuptime(&curtime);
3043
3044	for (i = 0; i < tx_ring->ring_size; i++) {
3045		tx_buf = &tx_ring->tx_buffer_info[i];
3046
3047		if (bintime_isset(&tx_buf->timestamp) == 0)
3048			continue;
3049
3050		time = curtime;
3051		bintime_sub(&time, &tx_buf->timestamp);
3052		time_offset = bttosbt(time);
3053
3054		if (unlikely(!tx_ring->first_interrupt &&
3055		    time_offset > 2 * adapter->missing_tx_timeout)) {
3056			/*
3057			 * If after graceful period interrupt is still not
3058			 * received, we schedule a reset.
3059			 */
3060			device_printf(adapter->pdev,
3061			    "Potential MSIX issue on Tx side Queue = %d. "
3062			    "Reset the device\n", tx_ring->qid);
3063			ena_trigger_reset(adapter,
3064			    ENA_REGS_RESET_MISS_INTERRUPT);
3065			return (EIO);
3066		}
3067
3068		/* Check again if packet is still waiting */
3069		if (unlikely(time_offset > adapter->missing_tx_timeout)) {
3070
3071			if (!tx_buf->print_once)
3072				ena_trace(NULL, ENA_WARNING, "Found a Tx that wasn't "
3073				    "completed on time, qid %d, index %d.\n",
3074				    tx_ring->qid, i);
3075
3076			tx_buf->print_once = true;
3077			missed_tx++;
3078		}
3079	}
3080
3081	if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
3082		device_printf(adapter->pdev,
3083		    "The number of lost tx completion is above the threshold "
3084		    "(%d > %d). Reset the device\n",
3085		    missed_tx, adapter->missing_tx_threshold);
3086		ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3087		rc = EIO;
3088	}
3089
3090	counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
3091
3092	return (rc);
3093}
3094
3095/*
3096 * Check for TX which were not completed on time.
3097 * Timeout is defined by "missing_tx_timeout".
3098 * Reset will be performed if number of incompleted
3099 * transactions exceeds "missing_tx_threshold".
3100 */
3101static void
3102check_for_missing_completions(struct ena_adapter *adapter)
3103{
3104	struct ena_ring *tx_ring;
3105	struct ena_ring *rx_ring;
3106	int i, budget, rc;
3107
3108	/* Make sure the driver doesn't turn the device in other process */
3109	rmb();
3110
3111	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3112		return;
3113
3114	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3115		return;
3116
3117	if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3118		return;
3119
3120	budget = adapter->missing_tx_max_queues;
3121
3122	for (i = adapter->next_monitored_tx_qid; i < adapter->num_io_queues; i++) {
3123		tx_ring = &adapter->tx_ring[i];
3124		rx_ring = &adapter->rx_ring[i];
3125
3126		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3127		if (unlikely(rc != 0))
3128			return;
3129
3130		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
3131		if (unlikely(rc != 0))
3132			return;
3133
3134		budget--;
3135		if (budget == 0) {
3136			i++;
3137			break;
3138		}
3139	}
3140
3141	adapter->next_monitored_tx_qid = i % adapter->num_io_queues;
3142}
3143
3144/* trigger rx cleanup after 2 consecutive detections */
3145#define EMPTY_RX_REFILL 2
3146/* For the rare case where the device runs out of Rx descriptors and the
3147 * msix handler failed to refill new Rx descriptors (due to a lack of memory
3148 * for example).
3149 * This case will lead to a deadlock:
3150 * The device won't send interrupts since all the new Rx packets will be dropped
3151 * The msix handler won't allocate new Rx descriptors so the device won't be
3152 * able to send new packets.
3153 *
3154 * When such a situation is detected - execute rx cleanup task in another thread
3155 */
3156static void
3157check_for_empty_rx_ring(struct ena_adapter *adapter)
3158{
3159	struct ena_ring *rx_ring;
3160	int i, refill_required;
3161
3162	if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3163		return;
3164
3165	if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))
3166		return;
3167
3168	for (i = 0; i < adapter->num_io_queues; i++) {
3169		rx_ring = &adapter->rx_ring[i];
3170
3171		refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3172		if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3173			rx_ring->empty_rx_queue++;
3174
3175			if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL)	{
3176				counter_u64_add(rx_ring->rx_stats.empty_rx_ring,
3177				    1);
3178
3179				device_printf(adapter->pdev,
3180				    "trigger refill for ring %d\n", i);
3181
3182				taskqueue_enqueue(rx_ring->que->cleanup_tq,
3183				    &rx_ring->que->cleanup_task);
3184				rx_ring->empty_rx_queue = 0;
3185			}
3186		} else {
3187			rx_ring->empty_rx_queue = 0;
3188		}
3189	}
3190}
3191
3192static void ena_update_hints(struct ena_adapter *adapter,
3193			     struct ena_admin_ena_hw_hints *hints)
3194{
3195	struct ena_com_dev *ena_dev = adapter->ena_dev;
3196
3197	if (hints->admin_completion_tx_timeout)
3198		ena_dev->admin_queue.completion_timeout =
3199		    hints->admin_completion_tx_timeout * 1000;
3200
3201	if (hints->mmio_read_timeout)
3202		/* convert to usec */
3203		ena_dev->mmio_read.reg_read_to =
3204		    hints->mmio_read_timeout * 1000;
3205
3206	if (hints->missed_tx_completion_count_threshold_to_reset)
3207		adapter->missing_tx_threshold =
3208		    hints->missed_tx_completion_count_threshold_to_reset;
3209
3210	if (hints->missing_tx_completion_timeout) {
3211		if (hints->missing_tx_completion_timeout ==
3212		     ENA_HW_HINTS_NO_TIMEOUT)
3213			adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3214		else
3215			adapter->missing_tx_timeout =
3216			    SBT_1MS * hints->missing_tx_completion_timeout;
3217	}
3218
3219	if (hints->driver_watchdog_timeout) {
3220		if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3221			adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3222		else
3223			adapter->keep_alive_timeout =
3224			    SBT_1MS * hints->driver_watchdog_timeout;
3225	}
3226}
3227
3228/**
3229 * ena_copy_eni_metrics - Get and copy ENI metrics from the HW.
3230 * @adapter: ENA device adapter
3231 *
3232 * Returns 0 on success, EOPNOTSUPP if current HW doesn't support those metrics
3233 * and other error codes on failure.
3234 *
3235 * This function can possibly cause a race with other calls to the admin queue.
3236 * Because of that, the caller should either lock this function or make sure
3237 * that there is no race in the current context.
3238 */
3239static int
3240ena_copy_eni_metrics(struct ena_adapter *adapter)
3241{
3242	static bool print_once = true;
3243	int rc;
3244
3245	rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_metrics);
3246
3247	if (rc != 0) {
3248		if (rc == ENA_COM_UNSUPPORTED) {
3249			if (print_once) {
3250				device_printf(adapter->pdev,
3251				    "Retrieving ENI metrics is not supported.\n");
3252				print_once = false;
3253			} else {
3254				ena_trace(NULL, ENA_DBG,
3255				    "Retrieving ENI metrics is not supported.\n");
3256			}
3257		} else {
3258			device_printf(adapter->pdev,
3259			    "Failed to get ENI metrics: %d\n", rc);
3260		}
3261	}
3262
3263	return (rc);
3264}
3265
3266static void
3267ena_timer_service(void *data)
3268{
3269	struct ena_adapter *adapter = (struct ena_adapter *)data;
3270	struct ena_admin_host_info *host_info =
3271	    adapter->ena_dev->host_attr.host_info;
3272
3273	check_for_missing_keep_alive(adapter);
3274
3275	check_for_admin_com_state(adapter);
3276
3277	check_for_missing_completions(adapter);
3278
3279	check_for_empty_rx_ring(adapter);
3280
3281	/*
3282	 * User controller update of the ENI metrics.
3283	 * If the delay was set to 0, then the stats shouldn't be updated at
3284	 * all.
3285	 * Otherwise, wait 'eni_metrics_sample_interval' seconds, before
3286	 * updating stats.
3287	 * As timer service is executed every second, it's enough to increment
3288	 * appropriate counter each time the timer service is executed.
3289	 */
3290	if ((adapter->eni_metrics_sample_interval != 0) &&
3291	    (++adapter->eni_metrics_sample_interval_cnt >=
3292	     adapter->eni_metrics_sample_interval)) {
3293		/*
3294		 * There is no race with other admin queue calls, as:
3295		 *   - Timer service runs after interface is up, so all
3296		 *     configuration calls to the admin queue are finished.
3297		 *   - After interface is up, the driver doesn't use (at least
3298		 *     for now) other functions writing to the admin queue.
3299		 *
3300		 * It may change in the future, so in that situation, the lock
3301		 * will be needed. ENA_LOCK_*() cannot be used for that purpose,
3302		 * as callout ena_timer_service is protected by them. It could
3303		 * lead to the deadlock if callout_drain() would hold the lock
3304		 * before ena_copy_eni_metrics() was executed. It's advised to
3305		 * use separate lock in that situation which will be used only
3306		 * for the admin queue.
3307		 */
3308		(void)ena_copy_eni_metrics(adapter);
3309		adapter->eni_metrics_sample_interval_cnt = 0;
3310	}
3311
3312
3313	if (host_info != NULL)
3314		ena_update_host_info(host_info, adapter->ifp);
3315
3316	if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3317		device_printf(adapter->pdev, "Trigger reset is on\n");
3318		taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task);
3319		return;
3320	}
3321
3322	/*
3323	 * Schedule another timeout one second from now.
3324	 */
3325	callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0);
3326}
3327
3328void
3329ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3330{
3331	if_t ifp = adapter->ifp;
3332	struct ena_com_dev *ena_dev = adapter->ena_dev;
3333	bool dev_up;
3334
3335	if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))
3336		return;
3337
3338	if_link_state_change(ifp, LINK_STATE_DOWN);
3339
3340	callout_drain(&adapter->timer_service);
3341
3342	dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter);
3343	if (dev_up)
3344		ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3345
3346	if (!graceful)
3347		ena_com_set_admin_running_state(ena_dev, false);
3348
3349	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter))
3350		ena_down(adapter);
3351
3352	/*
3353	 * Stop the device from sending AENQ events (if the device was up, and
3354	 * the trigger reset was on, ena_down already performs device reset)
3355	 */
3356	if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up))
3357		ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3358
3359	ena_free_mgmnt_irq(adapter);
3360
3361	ena_disable_msix(adapter);
3362
3363	/*
3364	 * IO rings resources should be freed because `ena_restore_device()`
3365	 * calls (not directly) `ena_enable_msix()`, which re-allocates MSIX
3366	 * vectors. The amount of MSIX vectors after destroy-restore may be
3367	 * different than before. Therefore, IO rings resources should be
3368	 * established from scratch each time.
3369	 */
3370	ena_free_all_io_rings_resources(adapter);
3371
3372	ena_com_abort_admin_commands(ena_dev);
3373
3374	ena_com_wait_for_abort_completion(ena_dev);
3375
3376	ena_com_admin_destroy(ena_dev);
3377
3378	ena_com_mmio_reg_read_request_destroy(ena_dev);
3379
3380	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3381
3382	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
3383	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3384}
3385
3386static int
3387ena_device_validate_params(struct ena_adapter *adapter,
3388    struct ena_com_dev_get_features_ctx *get_feat_ctx)
3389{
3390
3391	if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr,
3392	    ETHER_ADDR_LEN) != 0) {
3393		device_printf(adapter->pdev,
3394		    "Error, mac address are different\n");
3395		return (EINVAL);
3396	}
3397
3398	if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) {
3399		device_printf(adapter->pdev,
3400		    "Error, device max mtu is smaller than ifp MTU\n");
3401		return (EINVAL);
3402	}
3403
3404	return 0;
3405}
3406
3407int
3408ena_restore_device(struct ena_adapter *adapter)
3409{
3410	struct ena_com_dev_get_features_ctx get_feat_ctx;
3411	struct ena_com_dev *ena_dev = adapter->ena_dev;
3412	if_t ifp = adapter->ifp;
3413	device_t dev = adapter->pdev;
3414	int wd_active;
3415	int rc;
3416
3417	ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3418
3419	rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active);
3420	if (rc != 0) {
3421		device_printf(dev, "Cannot initialize device\n");
3422		goto err;
3423	}
3424	/*
3425	 * Only enable WD if it was enabled before reset, so it won't override
3426	 * value set by the user by the sysctl.
3427	 */
3428	if (adapter->wd_active != 0)
3429		adapter->wd_active = wd_active;
3430
3431	rc = ena_device_validate_params(adapter, &get_feat_ctx);
3432	if (rc != 0) {
3433		device_printf(dev, "Validation of device parameters failed\n");
3434		goto err_device_destroy;
3435	}
3436
3437	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3438	/* Make sure we don't have a race with AENQ Links state handler */
3439	if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter))
3440		if_link_state_change(ifp, LINK_STATE_UP);
3441
3442	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3443	if (rc != 0) {
3444		device_printf(dev, "Enable MSI-X failed\n");
3445		goto err_device_destroy;
3446	}
3447
3448	/*
3449	 * Effective value of used MSIX vectors should be the same as before
3450	 * `ena_destroy_device()`, if possible, or closest to it if less vectors
3451	 * are available.
3452	 */
3453	if ((adapter->msix_vecs - ENA_ADMIN_MSIX_VEC) < adapter->num_io_queues)
3454		adapter->num_io_queues =
3455		    adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3456
3457	/* Re-initialize rings basic information */
3458	ena_init_io_rings(adapter);
3459
3460	/* If the interface was up before the reset bring it up */
3461	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3462		rc = ena_up(adapter);
3463		if (rc != 0) {
3464			device_printf(dev, "Failed to create I/O queues\n");
3465			goto err_disable_msix;
3466		}
3467	}
3468
3469	/* Indicate that device is running again and ready to work */
3470	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3471
3472	if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) {
3473		/*
3474		 * As the AENQ handlers weren't executed during reset because
3475		 * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the
3476		 * timestamp must be updated again That will prevent next reset
3477		 * caused by missing keep alive.
3478		 */
3479		adapter->keep_alive_timestamp = getsbinuptime();
3480		callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S,
3481		    ena_timer_service, (void *)adapter, 0);
3482	}
3483	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter);
3484
3485	device_printf(dev,
3486	    "Device reset completed successfully, Driver info: %s\n", ena_version);
3487
3488	return (rc);
3489
3490err_disable_msix:
3491	ena_free_mgmnt_irq(adapter);
3492	ena_disable_msix(adapter);
3493err_device_destroy:
3494	ena_com_abort_admin_commands(ena_dev);
3495	ena_com_wait_for_abort_completion(ena_dev);
3496	ena_com_admin_destroy(ena_dev);
3497	ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3498	ena_com_mmio_reg_read_request_destroy(ena_dev);
3499err:
3500	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3501	ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter);
3502	device_printf(dev, "Reset attempt failed. Can not reset the device\n");
3503
3504	return (rc);
3505}
3506
3507static void
3508ena_reset_task(void *arg, int pending)
3509{
3510	struct ena_adapter *adapter = (struct ena_adapter *)arg;
3511
3512	if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
3513		device_printf(adapter->pdev,
3514		    "device reset scheduled but trigger_reset is off\n");
3515		return;
3516	}
3517
3518	ENA_LOCK_LOCK(adapter);
3519	ena_destroy_device(adapter, false);
3520	ena_restore_device(adapter);
3521	ENA_LOCK_UNLOCK(adapter);
3522}
3523
3524/**
3525 * ena_attach - Device Initialization Routine
3526 * @pdev: device information struct
3527 *
3528 * Returns 0 on success, otherwise on failure.
3529 *
3530 * ena_attach initializes an adapter identified by a device structure.
3531 * The OS initialization, configuring of the adapter private structure,
3532 * and a hardware reset occur.
3533 **/
3534static int
3535ena_attach(device_t pdev)
3536{
3537	struct ena_com_dev_get_features_ctx get_feat_ctx;
3538	struct ena_llq_configurations llq_config;
3539	struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
3540	static int version_printed;
3541	struct ena_adapter *adapter;
3542	struct ena_com_dev *ena_dev = NULL;
3543	uint32_t max_num_io_queues;
3544	int msix_rid;
3545	int rid, rc;
3546
3547	adapter = device_get_softc(pdev);
3548	adapter->pdev = pdev;
3549
3550	ENA_LOCK_INIT(adapter);
3551
3552	/*
3553	 * Set up the timer service - driver is responsible for avoiding
3554	 * concurrency, as the callout won't be using any locking inside.
3555	 */
3556	callout_init(&adapter->timer_service, true);
3557	adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO;
3558	adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO;
3559	adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES;
3560	adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD;
3561
3562	if (version_printed++ == 0)
3563		device_printf(pdev, "%s\n", ena_version);
3564
3565	/* Allocate memory for ena_dev structure */
3566	ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF,
3567	    M_WAITOK | M_ZERO);
3568
3569	adapter->ena_dev = ena_dev;
3570	ena_dev->dmadev = pdev;
3571
3572	rid = PCIR_BAR(ENA_REG_BAR);
3573	adapter->memory = NULL;
3574	adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3575	    &rid, RF_ACTIVE);
3576	if (unlikely(adapter->registers == NULL)) {
3577		device_printf(pdev,
3578		    "unable to allocate bus resource: registers!\n");
3579		rc = ENOMEM;
3580		goto err_dev_free;
3581	}
3582
3583	/* MSIx vector table may reside on BAR0 with registers or on BAR1. */
3584	msix_rid = pci_msix_table_bar(pdev);
3585	if (msix_rid != rid) {
3586		adapter->msix = bus_alloc_resource_any(pdev, SYS_RES_MEMORY,
3587		    &msix_rid, RF_ACTIVE);
3588		if (unlikely(adapter->msix == NULL)) {
3589			device_printf(pdev,
3590			    "unable to allocate bus resource: msix!\n");
3591			rc = ENOMEM;
3592			goto err_pci_free;
3593		}
3594		adapter->msix_rid = msix_rid;
3595	}
3596
3597	ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF,
3598	    M_WAITOK | M_ZERO);
3599
3600	/* Store register resources */
3601	((struct ena_bus*)(ena_dev->bus))->reg_bar_t =
3602	    rman_get_bustag(adapter->registers);
3603	((struct ena_bus*)(ena_dev->bus))->reg_bar_h =
3604	    rman_get_bushandle(adapter->registers);
3605
3606	if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) {
3607		device_printf(pdev, "failed to pmap registers bar\n");
3608		rc = ENXIO;
3609		goto err_bus_free;
3610	}
3611
3612	ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3613
3614	/* Initially clear all the flags */
3615	ENA_FLAG_ZERO(adapter);
3616
3617	/* Device initialization */
3618	rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active);
3619	if (unlikely(rc != 0)) {
3620		device_printf(pdev, "ENA device init failed! (err: %d)\n", rc);
3621		rc = ENXIO;
3622		goto err_bus_free;
3623	}
3624
3625	set_default_llq_configurations(&llq_config);
3626
3627	rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3628	     &llq_config);
3629	if (unlikely(rc != 0)) {
3630		device_printf(pdev, "failed to set placement policy\n");
3631		goto err_com_free;
3632	}
3633
3634	if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3635		adapter->disable_meta_caching =
3636		    !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
3637		    BIT(ENA_ADMIN_DISABLE_META_CACHING));
3638
3639	adapter->keep_alive_timestamp = getsbinuptime();
3640
3641	adapter->tx_offload_cap = get_feat_ctx.offload.tx;
3642
3643	memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr,
3644	    ETHER_ADDR_LEN);
3645
3646	calc_queue_ctx.pdev = pdev;
3647	calc_queue_ctx.ena_dev = ena_dev;
3648	calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
3649
3650	/* Calculate initial and maximum IO queue number and size */
3651	max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
3652	    &get_feat_ctx);
3653	rc = ena_calc_io_queue_size(&calc_queue_ctx);
3654	if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
3655		rc = EFAULT;
3656		goto err_com_free;
3657	}
3658
3659	adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
3660	adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
3661	adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
3662	adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
3663	adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
3664	adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
3665
3666	adapter->max_num_io_queues = max_num_io_queues;
3667
3668	adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE;
3669
3670	adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
3671
3672	adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3673
3674	/* set up dma tags for rx and tx buffers */
3675	rc = ena_setup_tx_dma_tag(adapter);
3676	if (unlikely(rc != 0)) {
3677		device_printf(pdev, "Failed to create TX DMA tag\n");
3678		goto err_com_free;
3679	}
3680
3681	rc = ena_setup_rx_dma_tag(adapter);
3682	if (unlikely(rc != 0)) {
3683		device_printf(pdev, "Failed to create RX DMA tag\n");
3684		goto err_tx_tag_free;
3685	}
3686
3687	/*
3688	 * The amount of requested MSIX vectors is equal to
3689	 * adapter::max_num_io_queues (see `ena_enable_msix()`), plus a constant
3690	 * number of admin queue interrupts. The former is initially determined
3691	 * by HW capabilities (see `ena_calc_max_io_queue_num())` but may not be
3692	 * achieved if there are not enough system resources. By default, the
3693	 * number of effectively used IO queues is the same but later on it can
3694	 * be limited by the user using sysctl interface.
3695	 */
3696	rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3697	if (unlikely(rc != 0)) {
3698		device_printf(pdev,
3699		    "Failed to enable and set the admin interrupts\n");
3700		goto err_io_free;
3701	}
3702	/* By default all of allocated MSIX vectors are actively used */
3703	adapter->num_io_queues = adapter->msix_vecs - ENA_ADMIN_MSIX_VEC;
3704
3705	/* initialize rings basic information */
3706	ena_init_io_rings(adapter);
3707
3708	/* setup network interface */
3709	rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
3710	if (unlikely(rc != 0)) {
3711		device_printf(pdev, "Error with network interface setup\n");
3712		goto err_msix_free;
3713	}
3714
3715	/* Initialize reset task queue */
3716	TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
3717	adapter->reset_tq = taskqueue_create("ena_reset_enqueue",
3718	    M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq);
3719	taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET,
3720	    "%s rstq", device_get_nameunit(adapter->pdev));
3721
3722	/* Initialize statistics */
3723	ena_alloc_counters((counter_u64_t *)&adapter->dev_stats,
3724	    sizeof(struct ena_stats_dev));
3725	ena_alloc_counters((counter_u64_t *)&adapter->hw_stats,
3726	    sizeof(struct ena_hw_stats));
3727	ena_sysctl_add_nodes(adapter);
3728
3729#ifdef DEV_NETMAP
3730	rc = ena_netmap_attach(adapter);
3731	if (rc != 0) {
3732		device_printf(pdev, "netmap attach failed: %d\n", rc);
3733		goto err_detach;
3734	}
3735#endif /* DEV_NETMAP */
3736
3737	/* Tell the stack that the interface is not active */
3738	if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
3739	ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter);
3740
3741	return (0);
3742
3743#ifdef DEV_NETMAP
3744err_detach:
3745	ether_ifdetach(adapter->ifp);
3746#endif /* DEV_NETMAP */
3747err_msix_free:
3748	ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR);
3749	ena_free_mgmnt_irq(adapter);
3750	ena_disable_msix(adapter);
3751err_io_free:
3752	ena_free_all_io_rings_resources(adapter);
3753	ena_free_rx_dma_tag(adapter);
3754err_tx_tag_free:
3755	ena_free_tx_dma_tag(adapter);
3756err_com_free:
3757	ena_com_admin_destroy(ena_dev);
3758	ena_com_delete_host_info(ena_dev);
3759	ena_com_mmio_reg_read_request_destroy(ena_dev);
3760err_bus_free:
3761	free(ena_dev->bus, M_DEVBUF);
3762err_pci_free:
3763	ena_free_pci_resources(adapter);
3764err_dev_free:
3765	free(ena_dev, M_DEVBUF);
3766
3767	return (rc);
3768}
3769
3770/**
3771 * ena_detach - Device Removal Routine
3772 * @pdev: device information struct
3773 *
3774 * ena_detach is called by the device subsystem to alert the driver
3775 * that it should release a PCI device.
3776 **/
3777static int
3778ena_detach(device_t pdev)
3779{
3780	struct ena_adapter *adapter = device_get_softc(pdev);
3781	struct ena_com_dev *ena_dev = adapter->ena_dev;
3782	int rc;
3783
3784	/* Make sure VLANS are not using driver */
3785	if (adapter->ifp->if_vlantrunk != NULL) {
3786		device_printf(adapter->pdev ,"VLAN is in use, detach first\n");
3787		return (EBUSY);
3788	}
3789
3790	ether_ifdetach(adapter->ifp);
3791
3792	/* Stop timer service */
3793	ENA_LOCK_LOCK(adapter);
3794	callout_drain(&adapter->timer_service);
3795	ENA_LOCK_UNLOCK(adapter);
3796
3797	/* Release reset task */
3798	while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL))
3799		taskqueue_drain(adapter->reset_tq, &adapter->reset_task);
3800	taskqueue_free(adapter->reset_tq);
3801
3802	ENA_LOCK_LOCK(adapter);
3803	ena_down(adapter);
3804	ena_destroy_device(adapter, true);
3805	ENA_LOCK_UNLOCK(adapter);
3806
3807#ifdef DEV_NETMAP
3808	netmap_detach(adapter->ifp);
3809#endif /* DEV_NETMAP */
3810
3811	ena_free_counters((counter_u64_t *)&adapter->hw_stats,
3812	    sizeof(struct ena_hw_stats));
3813	ena_free_counters((counter_u64_t *)&adapter->dev_stats,
3814	    sizeof(struct ena_stats_dev));
3815
3816	rc = ena_free_rx_dma_tag(adapter);
3817	if (unlikely(rc != 0))
3818		device_printf(adapter->pdev,
3819		    "Unmapped RX DMA tag associations\n");
3820
3821	rc = ena_free_tx_dma_tag(adapter);
3822	if (unlikely(rc != 0))
3823		device_printf(adapter->pdev,
3824		    "Unmapped TX DMA tag associations\n");
3825
3826	ena_free_irqs(adapter);
3827
3828	ena_free_pci_resources(adapter);
3829
3830	if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)))
3831		ena_com_rss_destroy(ena_dev);
3832
3833	ena_com_delete_host_info(ena_dev);
3834
3835	ENA_LOCK_DESTROY(adapter);
3836
3837	if_free(adapter->ifp);
3838
3839	if (ena_dev->bus != NULL)
3840		free(ena_dev->bus, M_DEVBUF);
3841
3842	if (ena_dev != NULL)
3843		free(ena_dev, M_DEVBUF);
3844
3845	return (bus_generic_detach(pdev));
3846}
3847
3848/******************************************************************************
3849 ******************************** AENQ Handlers *******************************
3850 *****************************************************************************/
3851/**
3852 * ena_update_on_link_change:
3853 * Notify the network interface about the change in link status
3854 **/
3855static void
3856ena_update_on_link_change(void *adapter_data,
3857    struct ena_admin_aenq_entry *aenq_e)
3858{
3859	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3860	struct ena_admin_aenq_link_change_desc *aenq_desc;
3861	int status;
3862	if_t ifp;
3863
3864	aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
3865	ifp = adapter->ifp;
3866	status = aenq_desc->flags &
3867	    ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3868
3869	if (status != 0) {
3870		device_printf(adapter->pdev, "link is UP\n");
3871		ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3872		if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter))
3873			if_link_state_change(ifp, LINK_STATE_UP);
3874	} else {
3875		device_printf(adapter->pdev, "link is DOWN\n");
3876		if_link_state_change(ifp, LINK_STATE_DOWN);
3877		ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter);
3878	}
3879}
3880
3881static void ena_notification(void *adapter_data,
3882    struct ena_admin_aenq_entry *aenq_e)
3883{
3884	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3885	struct ena_admin_ena_hw_hints *hints;
3886
3887	ENA_WARN(NULL, aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3888	    "Invalid group(%x) expected %x\n",	aenq_e->aenq_common_desc.group,
3889	    ENA_ADMIN_NOTIFICATION);
3890
3891	switch (aenq_e->aenq_common_desc.syndrome) {
3892	case ENA_ADMIN_UPDATE_HINTS:
3893		hints =
3894		    (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4);
3895		ena_update_hints(adapter, hints);
3896		break;
3897	default:
3898		device_printf(adapter->pdev,
3899		    "Invalid aenq notification link state %d\n",
3900		    aenq_e->aenq_common_desc.syndrome);
3901	}
3902}
3903
3904/**
3905 * This handler will called for unknown event group or unimplemented handlers
3906 **/
3907static void
3908unimplemented_aenq_handler(void *adapter_data,
3909    struct ena_admin_aenq_entry *aenq_e)
3910{
3911	struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3912
3913	device_printf(adapter->pdev,
3914	    "Unknown event was received or event with unimplemented handler\n");
3915}
3916
3917static struct ena_aenq_handlers aenq_handlers = {
3918    .handlers = {
3919	    [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3920	    [ENA_ADMIN_NOTIFICATION] = ena_notification,
3921	    [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3922    },
3923    .unimplemented_handler = unimplemented_aenq_handler
3924};
3925
3926/*********************************************************************
3927 *  FreeBSD Device Interface Entry Points
3928 *********************************************************************/
3929
3930static device_method_t ena_methods[] = {
3931    /* Device interface */
3932    DEVMETHOD(device_probe, ena_probe),
3933    DEVMETHOD(device_attach, ena_attach),
3934    DEVMETHOD(device_detach, ena_detach),
3935    DEVMETHOD_END
3936};
3937
3938static driver_t ena_driver = {
3939    "ena", ena_methods, sizeof(struct ena_adapter),
3940};
3941
3942devclass_t ena_devclass;
3943DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0);
3944MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array,
3945    nitems(ena_vendor_info_array) - 1);
3946MODULE_DEPEND(ena, pci, 1, 1, 1);
3947MODULE_DEPEND(ena, ether, 1, 1, 1);
3948#ifdef DEV_NETMAP
3949MODULE_DEPEND(ena, netmap, 1, 1, 1);
3950#endif /* DEV_NETMAP */
3951
3952/*********************************************************************/
3953