1/*	$OpenBSD: if_bnxt.c,v 1.51 2024/06/26 01:40:49 jsg Exp $	*/
2/*-
3 * Broadcom NetXtreme-C/E network driver.
4 *
5 * Copyright (c) 2016 Broadcom, All Rights Reserved.
6 * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30/*
31 * Copyright (c) 2018 Jonathan Matthew <jmatthew@openbsd.org>
32 *
33 * Permission to use, copy, modify, and distribute this software for any
34 * purpose with or without fee is hereby granted, provided that the above
35 * copyright notice and this permission notice appear in all copies.
36 *
37 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
38 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
39 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
40 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
41 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
42 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
43 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
44 */
45
46
47#include "bpfilter.h"
48#include "vlan.h"
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/mbuf.h>
53#include <sys/malloc.h>
54#include <sys/device.h>
55#include <sys/stdint.h>
56#include <sys/sockio.h>
57#include <sys/atomic.h>
58#include <sys/intrmap.h>
59
60#include <machine/bus.h>
61
62#include <dev/pci/pcivar.h>
63#include <dev/pci/pcidevs.h>
64
65#include <dev/pci/if_bnxtreg.h>
66
67#include <net/if.h>
68#include <net/if_media.h>
69#include <net/route.h>
70#include <net/toeplitz.h>
71
72#if NBPFILTER > 0
73#include <net/bpf.h>
74#endif
75
76#include <netinet/in.h>
77#include <netinet/if_ether.h>
78#include <netinet/tcp.h>
79#include <netinet/tcp_timer.h>
80#include <netinet/tcp_var.h>
81
82#define BNXT_HWRM_BAR		0x10
83#define BNXT_DOORBELL_BAR	0x18
84
85#define BNXT_MAX_QUEUES		8
86
87#define BNXT_CP_RING_ID_BASE	0
88#define BNXT_RX_RING_ID_BASE	(BNXT_MAX_QUEUES + 1)
89#define BNXT_AG_RING_ID_BASE	((BNXT_MAX_QUEUES * 2) + 1)
90#define BNXT_TX_RING_ID_BASE	((BNXT_MAX_QUEUES * 3) + 1)
91
92#define BNXT_MAX_MTU		9500
93#define BNXT_AG_BUFFER_SIZE	8192
94
95#define BNXT_CP_PAGES		4
96
97#define BNXT_MAX_TX_SEGS	31
98#define BNXT_TX_SLOTS(bs)	(bs->bs_map->dm_nsegs + 1)
99
100#define BNXT_HWRM_SHORT_REQ_LEN	sizeof(struct hwrm_short_input)
101
102#define BNXT_HWRM_LOCK_INIT(_sc, _name)	\
103	mtx_init_flags(&sc->sc_lock, IPL_NET, _name, 0)
104#define BNXT_HWRM_LOCK(_sc) 		mtx_enter(&_sc->sc_lock)
105#define BNXT_HWRM_UNLOCK(_sc) 		mtx_leave(&_sc->sc_lock)
106#define BNXT_HWRM_LOCK_DESTROY(_sc)	/* nothing */
107#define BNXT_HWRM_LOCK_ASSERT(_sc)	MUTEX_ASSERT_LOCKED(&_sc->sc_lock)
108
109#define BNXT_FLAG_VF            0x0001
110#define BNXT_FLAG_NPAR          0x0002
111#define BNXT_FLAG_WOL_CAP       0x0004
112#define BNXT_FLAG_SHORT_CMD     0x0008
113#define BNXT_FLAG_MSIX          0x0010
114
115/* NVRam stuff has a five minute timeout */
116#define BNXT_NVM_TIMEO	(5 * 60 * 1000)
117
118#define NEXT_CP_CONS_V(_ring, _cons, _v_bit)		\
119do {	 						\
120	if (++(_cons) == (_ring)->ring_size)		\
121		((_cons) = 0, (_v_bit) = !_v_bit);	\
122} while (0);
123
124struct bnxt_ring {
125	uint64_t		paddr;
126	uint64_t		doorbell;
127	caddr_t			vaddr;
128	uint32_t		ring_size;
129	uint16_t		id;
130	uint16_t		phys_id;
131};
132
133struct bnxt_cp_ring {
134	struct bnxt_ring	ring;
135	void			*irq;
136	struct bnxt_softc	*softc;
137	uint32_t		cons;
138	int			v_bit;
139	uint32_t		commit_cons;
140	int			commit_v_bit;
141	struct ctx_hw_stats	*stats;
142	uint32_t		stats_ctx_id;
143	struct bnxt_dmamem	*ring_mem;
144};
145
146struct bnxt_grp_info {
147	uint32_t		grp_id;
148	uint16_t		stats_ctx;
149	uint16_t		rx_ring_id;
150	uint16_t		cp_ring_id;
151	uint16_t		ag_ring_id;
152};
153
154struct bnxt_vnic_info {
155	uint16_t		id;
156	uint16_t		def_ring_grp;
157	uint16_t		cos_rule;
158	uint16_t		lb_rule;
159	uint16_t		mru;
160
161	uint32_t		flags;
162#define BNXT_VNIC_FLAG_DEFAULT		0x01
163#define BNXT_VNIC_FLAG_BD_STALL		0x02
164#define BNXT_VNIC_FLAG_VLAN_STRIP	0x04
165
166	uint64_t		filter_id;
167	uint32_t		flow_id;
168
169	uint16_t		rss_id;
170};
171
172struct bnxt_slot {
173	bus_dmamap_t		bs_map;
174	struct mbuf		*bs_m;
175};
176
177struct bnxt_dmamem {
178	bus_dmamap_t		bdm_map;
179	bus_dma_segment_t	bdm_seg;
180	size_t			bdm_size;
181	caddr_t			bdm_kva;
182};
183#define BNXT_DMA_MAP(_bdm)	((_bdm)->bdm_map)
184#define BNXT_DMA_LEN(_bdm)	((_bdm)->bdm_size)
185#define BNXT_DMA_DVA(_bdm)	((u_int64_t)(_bdm)->bdm_map->dm_segs[0].ds_addr)
186#define BNXT_DMA_KVA(_bdm)	((void *)(_bdm)->bdm_kva)
187
188struct bnxt_rx_queue {
189	struct bnxt_softc	*rx_softc;
190	struct ifiqueue		*rx_ifiq;
191	struct bnxt_dmamem	*rx_ring_mem;	/* rx and ag */
192	struct bnxt_ring	rx_ring;
193	struct bnxt_ring	rx_ag_ring;
194	struct if_rxring	rxr[2];
195	struct bnxt_slot	*rx_slots;
196	struct bnxt_slot	*rx_ag_slots;
197	int			rx_prod;
198	int			rx_cons;
199	int			rx_ag_prod;
200	int			rx_ag_cons;
201	struct timeout		rx_refill;
202};
203
204struct bnxt_tx_queue {
205	struct bnxt_softc	*tx_softc;
206	struct ifqueue		*tx_ifq;
207	struct bnxt_dmamem	*tx_ring_mem;
208	struct bnxt_ring	tx_ring;
209	struct bnxt_slot	*tx_slots;
210	int			tx_prod;
211	int			tx_cons;
212	int			tx_ring_prod;
213	int			tx_ring_cons;
214};
215
216struct bnxt_queue {
217	char			q_name[8];
218	int			q_index;
219	void			*q_ihc;
220	struct bnxt_softc	*q_sc;
221	struct bnxt_cp_ring	q_cp;
222	struct bnxt_rx_queue	q_rx;
223	struct bnxt_tx_queue	q_tx;
224	struct bnxt_grp_info	q_rg;
225};
226
227struct bnxt_softc {
228	struct device		sc_dev;
229	struct arpcom		sc_ac;
230	struct ifmedia		sc_media;
231
232	struct mutex		sc_lock;
233
234	pci_chipset_tag_t	sc_pc;
235	pcitag_t		sc_tag;
236	bus_dma_tag_t		sc_dmat;
237
238	bus_space_tag_t		sc_hwrm_t;
239	bus_space_handle_t	sc_hwrm_h;
240	bus_size_t		sc_hwrm_s;
241
242	struct bnxt_dmamem	*sc_cmd_resp;
243	uint16_t		sc_cmd_seq;
244	uint16_t		sc_max_req_len;
245	uint32_t		sc_cmd_timeo;
246	uint32_t		sc_flags;
247
248	bus_space_tag_t		sc_db_t;
249	bus_space_handle_t	sc_db_h;
250	bus_size_t		sc_db_s;
251
252	void			*sc_ih;
253
254	int			sc_hwrm_ver;
255	int			sc_tx_queue_id;
256
257	struct bnxt_vnic_info	sc_vnic;
258	struct bnxt_dmamem	*sc_stats_ctx_mem;
259	struct bnxt_dmamem	*sc_rx_cfg;
260
261	struct bnxt_cp_ring	sc_cp_ring;
262
263	int			sc_nqueues;
264	struct intrmap		*sc_intrmap;
265	struct bnxt_queue	sc_queues[BNXT_MAX_QUEUES];
266};
267#define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
268
269const struct pci_matchid bnxt_devices[] = {
270	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57301 },
271	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57302 },
272	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57304 },
273	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57311 },
274	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57312 },
275	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57314 },
276	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57402 },
277	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57404 },
278	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57406 },
279	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57407 },
280	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57412 },
281	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57414 },
282	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416 },
283	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57416_SFP },
284	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417 },
285	{ PCI_VENDOR_BROADCOM,	PCI_PRODUCT_BROADCOM_BCM57417_SFP }
286};
287
288int		bnxt_match(struct device *, void *, void *);
289void		bnxt_attach(struct device *, struct device *, void *);
290
291void		bnxt_up(struct bnxt_softc *);
292void		bnxt_down(struct bnxt_softc *);
293void		bnxt_iff(struct bnxt_softc *);
294int		bnxt_ioctl(struct ifnet *, u_long, caddr_t);
295int		bnxt_rxrinfo(struct bnxt_softc *, struct if_rxrinfo *);
296void		bnxt_start(struct ifqueue *);
297int		bnxt_admin_intr(void *);
298int		bnxt_intr(void *);
299void		bnxt_watchdog(struct ifnet *);
300void		bnxt_media_status(struct ifnet *, struct ifmediareq *);
301int		bnxt_media_change(struct ifnet *);
302int		bnxt_media_autonegotiate(struct bnxt_softc *);
303
304struct cmpl_base *bnxt_cpr_next_cmpl(struct bnxt_softc *, struct bnxt_cp_ring *);
305void		bnxt_cpr_commit(struct bnxt_softc *, struct bnxt_cp_ring *);
306void		bnxt_cpr_rollback(struct bnxt_softc *, struct bnxt_cp_ring *);
307
308void		bnxt_mark_cpr_invalid(struct bnxt_cp_ring *);
309void		bnxt_write_cp_doorbell(struct bnxt_softc *, struct bnxt_ring *,
310		    int);
311void		bnxt_write_cp_doorbell_index(struct bnxt_softc *,
312		    struct bnxt_ring *, uint32_t, int);
313void		bnxt_write_rx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
314		    int);
315void		bnxt_write_tx_doorbell(struct bnxt_softc *, struct bnxt_ring *,
316		    int);
317
318int		bnxt_rx_fill(struct bnxt_queue *);
319u_int		bnxt_rx_fill_slots(struct bnxt_softc *, struct bnxt_ring *, void *,
320		    struct bnxt_slot *, uint *, int, uint16_t, u_int);
321void		bnxt_refill(void *);
322int		bnxt_rx(struct bnxt_softc *, struct bnxt_rx_queue *,
323		    struct bnxt_cp_ring *, struct mbuf_list *, int *, int *,
324		    struct cmpl_base *);
325
326void		bnxt_txeof(struct bnxt_softc *, struct bnxt_tx_queue *, int *,
327		    struct cmpl_base *);
328
329int		bnxt_set_cp_ring_aggint(struct bnxt_softc *, struct bnxt_cp_ring *);
330
331int		_hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
332int		hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
333void		bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
334int 		bnxt_hwrm_err_map(uint16_t err);
335
336/* HWRM Function Prototypes */
337int		bnxt_hwrm_ring_alloc(struct bnxt_softc *, uint8_t,
338		    struct bnxt_ring *, uint16_t, uint32_t, int);
339int		bnxt_hwrm_ring_free(struct bnxt_softc *, uint8_t,
340		    struct bnxt_ring *);
341int		bnxt_hwrm_ver_get(struct bnxt_softc *);
342int		bnxt_hwrm_queue_qportcfg(struct bnxt_softc *);
343int		bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *);
344int		bnxt_hwrm_func_qcaps(struct bnxt_softc *);
345int		bnxt_hwrm_func_qcfg(struct bnxt_softc *);
346int		bnxt_hwrm_func_reset(struct bnxt_softc *);
347int		bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *, uint16_t *);
348int		bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *, uint16_t *);
349int		bnxt_hwrm_vnic_cfg(struct bnxt_softc *,
350		    struct bnxt_vnic_info *);
351int		bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *,
352		    struct bnxt_vnic_info *vnic);
353int		bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *,
354		    struct bnxt_cp_ring *, uint64_t);
355int		bnxt_hwrm_stat_ctx_free(struct bnxt_softc *,
356		    struct bnxt_cp_ring *);
357int		bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *,
358		    struct bnxt_grp_info *);
359int		bnxt_hwrm_ring_grp_free(struct bnxt_softc *,
360		    struct bnxt_grp_info *);
361int		bnxt_hwrm_vnic_alloc(struct bnxt_softc *,
362		    struct bnxt_vnic_info *);
363int		bnxt_hwrm_vnic_free(struct bnxt_softc *,
364		    struct bnxt_vnic_info *);
365int		bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *,
366		    uint32_t, uint32_t, uint64_t, uint32_t);
367int		bnxt_hwrm_set_filter(struct bnxt_softc *,
368		    struct bnxt_vnic_info *);
369int		bnxt_hwrm_free_filter(struct bnxt_softc *,
370		    struct bnxt_vnic_info *);
371int		bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *,
372		    struct bnxt_vnic_info *, uint32_t, daddr_t, daddr_t);
373int		bnxt_cfg_async_cr(struct bnxt_softc *, struct bnxt_cp_ring *);
374int		bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *, uint16_t *,
375		    uint16_t *, uint32_t *, uint32_t *, uint32_t *, uint32_t *);
376int		bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *,
377		    struct ifmediareq *);
378int		bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *);
379int		bnxt_get_sffpage(struct bnxt_softc *, struct if_sffpage *);
380
381/* not used yet: */
382#if 0
383int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown);
384
385int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
386
387
388int bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc);
389void bnxt_validate_hw_lro_settings(struct bnxt_softc *softc);
390int bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
391    uint8_t *selfreset);
392int bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type,
393    uint8_t *selfreset);
394int bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year,
395    uint8_t *month, uint8_t *day, uint8_t *hour, uint8_t *minute,
396    uint8_t *second, uint16_t *millisecond, uint16_t *zone);
397int bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year,
398    uint8_t month, uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
399    uint16_t millisecond, uint16_t zone);
400
401#endif
402
403
404const struct cfattach bnxt_ca = {
405	sizeof(struct bnxt_softc), bnxt_match, bnxt_attach
406};
407
408struct cfdriver bnxt_cd = {
409	NULL, "bnxt", DV_IFNET
410};
411
412struct bnxt_dmamem *
413bnxt_dmamem_alloc(struct bnxt_softc *sc, size_t size)
414{
415	struct bnxt_dmamem *m;
416	int nsegs;
417
418	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
419	if (m == NULL)
420		return (NULL);
421
422	m->bdm_size = size;
423
424	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
425	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->bdm_map) != 0)
426		goto bdmfree;
427
428	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->bdm_seg, 1,
429	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
430		goto destroy;
431
432	if (bus_dmamem_map(sc->sc_dmat, &m->bdm_seg, nsegs, size, &m->bdm_kva,
433	    BUS_DMA_NOWAIT) != 0)
434		goto free;
435
436	if (bus_dmamap_load(sc->sc_dmat, m->bdm_map, m->bdm_kva, size, NULL,
437	    BUS_DMA_NOWAIT) != 0)
438		goto unmap;
439
440	return (m);
441
442unmap:
443	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
444free:
445	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
446destroy:
447	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
448bdmfree:
449	free(m, M_DEVBUF, sizeof *m);
450
451	return (NULL);
452}
453
454void
455bnxt_dmamem_free(struct bnxt_softc *sc, struct bnxt_dmamem *m)
456{
457	bus_dmamap_unload(sc->sc_dmat, m->bdm_map);
458	bus_dmamem_unmap(sc->sc_dmat, m->bdm_kva, m->bdm_size);
459	bus_dmamem_free(sc->sc_dmat, &m->bdm_seg, 1);
460	bus_dmamap_destroy(sc->sc_dmat, m->bdm_map);
461	free(m, M_DEVBUF, sizeof *m);
462}
463
464int
465bnxt_match(struct device *parent, void *match, void *aux)
466{
467	return (pci_matchbyid(aux, bnxt_devices, nitems(bnxt_devices)));
468}
469
470void
471bnxt_attach(struct device *parent, struct device *self, void *aux)
472{
473	struct bnxt_softc *sc = (struct bnxt_softc *)self;
474	struct ifnet *ifp = &sc->sc_ac.ac_if;
475	struct pci_attach_args *pa = aux;
476	struct bnxt_cp_ring *cpr;
477	pci_intr_handle_t ih;
478	const char *intrstr;
479	u_int memtype;
480	int i;
481
482	sc->sc_pc = pa->pa_pc;
483	sc->sc_tag = pa->pa_tag;
484	sc->sc_dmat = pa->pa_dmat;
485
486	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_HWRM_BAR);
487	if (pci_mapreg_map(pa, BNXT_HWRM_BAR, memtype, 0, &sc->sc_hwrm_t,
488	    &sc->sc_hwrm_h, NULL, &sc->sc_hwrm_s, 0)) {
489		printf(": failed to map hwrm\n");
490		return;
491	}
492
493	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BNXT_DOORBELL_BAR);
494	if (pci_mapreg_map(pa, BNXT_DOORBELL_BAR, memtype, 0, &sc->sc_db_t,
495	    &sc->sc_db_h, NULL, &sc->sc_db_s, 0)) {
496		printf(": failed to map doorbell\n");
497		goto unmap_1;
498	}
499
500	BNXT_HWRM_LOCK_INIT(sc, DEVNAME(sc));
501	sc->sc_cmd_resp = bnxt_dmamem_alloc(sc, PAGE_SIZE);
502	if (sc->sc_cmd_resp == NULL) {
503		printf(": failed to allocate command response buffer\n");
504		goto unmap_2;
505	}
506
507	if (bnxt_hwrm_ver_get(sc) != 0) {
508		printf(": failed to query version info\n");
509		goto free_resp;
510	}
511
512	if (bnxt_hwrm_nvm_get_dev_info(sc, NULL, NULL, NULL, NULL, NULL, NULL)
513	    != 0) {
514		printf(": failed to get nvram info\n");
515		goto free_resp;
516	}
517
518	if (bnxt_hwrm_func_drv_rgtr(sc) != 0) {
519		printf(": failed to register driver with firmware\n");
520		goto free_resp;
521	}
522
523	if (bnxt_hwrm_func_rgtr_async_events(sc) != 0) {
524		printf(": failed to register async events\n");
525		goto free_resp;
526	}
527
528	if (bnxt_hwrm_func_qcaps(sc) != 0) {
529		printf(": failed to get queue capabilities\n");
530		goto free_resp;
531	}
532
533	/*
534	 * devices advertise msi support, but there's no way to tell a
535	 * completion queue to use msi mode, only legacy or msi-x.
536	 */
537	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
538		int nmsix;
539
540		sc->sc_flags |= BNXT_FLAG_MSIX;
541		intrstr = pci_intr_string(sc->sc_pc, ih);
542
543		nmsix = pci_intr_msix_count(pa);
544		if (nmsix > 1) {
545			sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
546			    IPL_NET | IPL_MPSAFE, bnxt_admin_intr, sc, DEVNAME(sc));
547			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
548			    nmsix - 1, BNXT_MAX_QUEUES, INTRMAP_POWEROF2);
549			sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
550			KASSERT(sc->sc_nqueues > 0);
551			KASSERT(powerof2(sc->sc_nqueues));
552		} else {
553			sc->sc_ih = pci_intr_establish(sc->sc_pc, ih,
554			    IPL_NET | IPL_MPSAFE, bnxt_intr, &sc->sc_queues[0],
555			    DEVNAME(sc));
556			sc->sc_nqueues = 1;
557		}
558	} else if (pci_intr_map(pa, &ih) == 0) {
559		intrstr = pci_intr_string(sc->sc_pc, ih);
560		sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_NET | IPL_MPSAFE,
561		    bnxt_intr, &sc->sc_queues[0], DEVNAME(sc));
562		sc->sc_nqueues = 1;
563	} else {
564		printf(": unable to map interrupt\n");
565		goto free_resp;
566	}
567	if (sc->sc_ih == NULL) {
568		printf(": unable to establish interrupt");
569		if (intrstr != NULL)
570			printf(" at %s", intrstr);
571		printf("\n");
572		goto deintr;
573	}
574	printf("%s, %d queues, address %s\n", intrstr, sc->sc_nqueues,
575	    ether_sprintf(sc->sc_ac.ac_enaddr));
576
577	if (bnxt_hwrm_func_qcfg(sc) != 0) {
578		printf("%s: failed to query function config\n", DEVNAME(sc));
579		goto deintr;
580	}
581
582	if (bnxt_hwrm_queue_qportcfg(sc) != 0) {
583		printf("%s: failed to query port config\n", DEVNAME(sc));
584		goto deintr;
585	}
586
587	if (bnxt_hwrm_func_reset(sc) != 0) {
588		printf("%s: reset failed\n", DEVNAME(sc));
589		goto deintr;
590	}
591
592	if (sc->sc_intrmap == NULL)
593		cpr = &sc->sc_queues[0].q_cp;
594	else
595		cpr = &sc->sc_cp_ring;
596
597	cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
598	cpr->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
599	cpr->softc = sc;
600	cpr->ring.id = 0;
601	cpr->ring.doorbell = cpr->ring.id * 0x80;
602	cpr->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
603	    sizeof(struct cmpl_base);
604	cpr->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE *
605	    BNXT_CP_PAGES);
606	if (cpr->ring_mem == NULL) {
607		printf("%s: failed to allocate completion queue memory\n",
608		    DEVNAME(sc));
609		goto deintr;
610	}
611	cpr->ring.vaddr = BNXT_DMA_KVA(cpr->ring_mem);
612	cpr->ring.paddr = BNXT_DMA_DVA(cpr->ring_mem);
613	cpr->cons = UINT32_MAX;
614	cpr->v_bit = 1;
615	bnxt_mark_cpr_invalid(cpr);
616	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
617	    &cpr->ring, (uint16_t)HWRM_NA_SIGNATURE,
618	    HWRM_NA_SIGNATURE, 1) != 0) {
619		printf("%s: failed to allocate completion queue\n",
620		    DEVNAME(sc));
621		goto free_cp_mem;
622	}
623	if (bnxt_cfg_async_cr(sc, cpr) != 0) {
624		printf("%s: failed to set async completion ring\n",
625		    DEVNAME(sc));
626		goto free_cp_mem;
627	}
628	bnxt_write_cp_doorbell(sc, &cpr->ring, 1);
629
630	if (bnxt_set_cp_ring_aggint(sc, cpr) != 0) {
631		printf("%s: failed to set interrupt aggregation\n",
632		    DEVNAME(sc));
633		goto free_cp_mem;
634	}
635
636	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
637	ifp->if_softc = sc;
638	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
639	ifp->if_xflags = IFXF_MPSAFE;
640	ifp->if_ioctl = bnxt_ioctl;
641	ifp->if_qstart = bnxt_start;
642	ifp->if_watchdog = bnxt_watchdog;
643	ifp->if_hardmtu = BNXT_MAX_MTU;
644	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
645	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv6 |
646	    IFCAP_CSUM_TCPv6;
647	ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
648#if NVLAN > 0
649	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
650#endif
651	ifq_init_maxlen(&ifp->if_snd, 1024);	/* ? */
652
653	ifmedia_init(&sc->sc_media, IFM_IMASK, bnxt_media_change,
654	    bnxt_media_status);
655
656	if_attach(ifp);
657	ether_ifattach(ifp);
658
659	if_attach_iqueues(ifp, sc->sc_nqueues);
660	if_attach_queues(ifp, sc->sc_nqueues);
661	for (i = 0; i < sc->sc_nqueues; i++) {
662		struct ifiqueue *ifiq = ifp->if_iqs[i];
663		struct ifqueue *ifq = ifp->if_ifqs[i];
664		struct bnxt_queue *bq = &sc->sc_queues[i];
665		struct bnxt_cp_ring *cp = &bq->q_cp;
666		struct bnxt_rx_queue *rx = &bq->q_rx;
667		struct bnxt_tx_queue *tx = &bq->q_tx;
668
669		bq->q_index = i;
670		bq->q_sc = sc;
671
672		rx->rx_softc = sc;
673		rx->rx_ifiq = ifiq;
674		timeout_set(&rx->rx_refill, bnxt_refill, bq);
675		ifiq->ifiq_softc = rx;
676
677		tx->tx_softc = sc;
678		tx->tx_ifq = ifq;
679		ifq->ifq_softc = tx;
680
681		if (sc->sc_intrmap != NULL) {
682			cp->stats_ctx_id = HWRM_NA_SIGNATURE;
683			cp->ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
684			cp->ring.id = i + 1;	/* first cp ring is async only */
685			cp->softc = sc;
686			cp->ring.doorbell = bq->q_cp.ring.id * 0x80;
687			cp->ring.ring_size = (PAGE_SIZE * BNXT_CP_PAGES) /
688			    sizeof(struct cmpl_base);
689			if (pci_intr_map_msix(pa, i + 1, &ih) != 0) {
690				printf("%s: unable to map queue interrupt %d\n",
691				    DEVNAME(sc), i);
692				goto intrdisestablish;
693			}
694			snprintf(bq->q_name, sizeof(bq->q_name), "%s:%d",
695			    DEVNAME(sc), i);
696			bq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
697			    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
698			    bnxt_intr, bq, bq->q_name);
699			if (bq->q_ihc == NULL) {
700				printf("%s: unable to establish interrupt %d\n",
701				    DEVNAME(sc), i);
702				goto intrdisestablish;
703			}
704		}
705	}
706
707	bnxt_media_autonegotiate(sc);
708	bnxt_hwrm_port_phy_qcfg(sc, NULL);
709	return;
710
711intrdisestablish:
712	for (i = 0; i < sc->sc_nqueues; i++) {
713		struct bnxt_queue *bq = &sc->sc_queues[i];
714		if (bq->q_ihc == NULL)
715			continue;
716		pci_intr_disestablish(sc->sc_pc, bq->q_ihc);
717		bq->q_ihc = NULL;
718	}
719free_cp_mem:
720	bnxt_dmamem_free(sc, cpr->ring_mem);
721deintr:
722	if (sc->sc_intrmap != NULL) {
723		intrmap_destroy(sc->sc_intrmap);
724		sc->sc_intrmap = NULL;
725	}
726	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
727	sc->sc_ih = NULL;
728free_resp:
729	bnxt_dmamem_free(sc, sc->sc_cmd_resp);
730unmap_2:
731	bus_space_unmap(sc->sc_db_t, sc->sc_db_h, sc->sc_db_s);
732	sc->sc_db_s = 0;
733unmap_1:
734	bus_space_unmap(sc->sc_hwrm_t, sc->sc_hwrm_h, sc->sc_hwrm_s);
735	sc->sc_hwrm_s = 0;
736}
737
738void
739bnxt_free_slots(struct bnxt_softc *sc, struct bnxt_slot *slots, int allocated,
740    int total)
741{
742	struct bnxt_slot *bs;
743
744	int i = allocated;
745	while (i-- > 0) {
746		bs = &slots[i];
747		bus_dmamap_destroy(sc->sc_dmat, bs->bs_map);
748		if (bs->bs_m != NULL)
749			m_freem(bs->bs_m);
750	}
751	free(slots, M_DEVBUF, total * sizeof(*bs));
752}
753
754int
755bnxt_set_cp_ring_aggint(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
756{
757	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input aggint;
758
759	/*
760	 * set interrupt aggregation parameters for around 10k interrupts
761	 * per second.  the timers are in units of 80usec, and the counters
762	 * are based on the minimum rx ring size of 32.
763	 */
764	memset(&aggint, 0, sizeof(aggint));
765        bnxt_hwrm_cmd_hdr_init(sc, &aggint,
766	    HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
767	aggint.ring_id = htole16(cpr->ring.phys_id);
768	aggint.num_cmpl_dma_aggr = htole16(32);
769	aggint.num_cmpl_dma_aggr_during_int  = aggint.num_cmpl_dma_aggr;
770	aggint.cmpl_aggr_dma_tmr = htole16((1000000000 / 20000) / 80);
771	aggint.cmpl_aggr_dma_tmr_during_int = aggint.cmpl_aggr_dma_tmr;
772	aggint.int_lat_tmr_min = htole16((1000000000 / 20000) / 80);
773	aggint.int_lat_tmr_max = htole16((1000000000 / 10000) / 80);
774	aggint.num_cmpl_aggr_int = htole16(16);
775	return (hwrm_send_message(sc, &aggint, sizeof(aggint)));
776}
777
778int
779bnxt_queue_up(struct bnxt_softc *sc, struct bnxt_queue *bq)
780{
781	struct ifnet *ifp = &sc->sc_ac.ac_if;
782	struct bnxt_cp_ring *cp = &bq->q_cp;
783	struct bnxt_rx_queue *rx = &bq->q_rx;
784	struct bnxt_tx_queue *tx = &bq->q_tx;
785	struct bnxt_grp_info *rg = &bq->q_rg;
786	struct bnxt_slot *bs;
787	int i;
788
789	tx->tx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE);
790	if (tx->tx_ring_mem == NULL) {
791		printf("%s: failed to allocate tx ring %d\n", DEVNAME(sc), bq->q_index);
792		return ENOMEM;
793	}
794
795	rx->rx_ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
796	if (rx->rx_ring_mem == NULL) {
797		printf("%s: failed to allocate rx ring %d\n", DEVNAME(sc), bq->q_index);
798		goto free_tx;
799	}
800
801	/* completion ring is already allocated if we're not using an intrmap */
802	if (sc->sc_intrmap != NULL) {
803		cp->ring_mem = bnxt_dmamem_alloc(sc, PAGE_SIZE * BNXT_CP_PAGES);
804		if (cp->ring_mem == NULL) {
805			printf("%s: failed to allocate completion ring %d mem\n",
806			    DEVNAME(sc), bq->q_index);
807			goto free_rx;
808		}
809		cp->ring.vaddr = BNXT_DMA_KVA(cp->ring_mem);
810		cp->ring.paddr = BNXT_DMA_DVA(cp->ring_mem);
811		cp->cons = UINT32_MAX;
812		cp->v_bit = 1;
813		bnxt_mark_cpr_invalid(cp);
814
815		if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
816		    &cp->ring, (uint16_t)HWRM_NA_SIGNATURE,
817		    HWRM_NA_SIGNATURE, 1) != 0) {
818			printf("%s: failed to allocate completion queue %d\n",
819			    DEVNAME(sc), bq->q_index);
820			goto free_rx;
821		}
822
823		if (bnxt_set_cp_ring_aggint(sc, cp) != 0) {
824			printf("%s: failed to set interrupt %d aggregation\n",
825			    DEVNAME(sc), bq->q_index);
826			goto free_rx;
827		}
828		bnxt_write_cp_doorbell(sc, &cp->ring, 1);
829	}
830
831	if (bnxt_hwrm_stat_ctx_alloc(sc, &bq->q_cp,
832	    BNXT_DMA_DVA(sc->sc_stats_ctx_mem) +
833	    (bq->q_index * sizeof(struct ctx_hw_stats))) != 0) {
834		printf("%s: failed to set up stats context\n", DEVNAME(sc));
835		goto free_rx;
836	}
837
838	tx->tx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
839	tx->tx_ring.id = BNXT_TX_RING_ID_BASE + bq->q_index;
840	tx->tx_ring.doorbell = tx->tx_ring.id * 0x80;
841	tx->tx_ring.ring_size = PAGE_SIZE / sizeof(struct tx_bd_short);
842	tx->tx_ring.vaddr = BNXT_DMA_KVA(tx->tx_ring_mem);
843	tx->tx_ring.paddr = BNXT_DMA_DVA(tx->tx_ring_mem);
844	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
845	    &tx->tx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
846		printf("%s: failed to set up tx ring\n",
847		    DEVNAME(sc));
848		goto dealloc_stats;
849	}
850	bnxt_write_tx_doorbell(sc, &tx->tx_ring, 0);
851
852	rx->rx_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
853	rx->rx_ring.id = BNXT_RX_RING_ID_BASE + bq->q_index;
854	rx->rx_ring.doorbell = rx->rx_ring.id * 0x80;
855	rx->rx_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
856	rx->rx_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem);
857	rx->rx_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem);
858	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
859	    &rx->rx_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
860		printf("%s: failed to set up rx ring\n",
861		    DEVNAME(sc));
862		goto dealloc_tx;
863	}
864	bnxt_write_rx_doorbell(sc, &rx->rx_ring, 0);
865
866	rx->rx_ag_ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
867	rx->rx_ag_ring.id = BNXT_AG_RING_ID_BASE + bq->q_index;
868	rx->rx_ag_ring.doorbell = rx->rx_ag_ring.id * 0x80;
869	rx->rx_ag_ring.ring_size = PAGE_SIZE / sizeof(struct rx_prod_pkt_bd);
870	rx->rx_ag_ring.vaddr = BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE;
871	rx->rx_ag_ring.paddr = BNXT_DMA_DVA(rx->rx_ring_mem) + PAGE_SIZE;
872	if (bnxt_hwrm_ring_alloc(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
873	    &rx->rx_ag_ring, cp->ring.phys_id, HWRM_NA_SIGNATURE, 1) != 0) {
874		printf("%s: failed to set up rx ag ring\n",
875		    DEVNAME(sc));
876		goto dealloc_rx;
877	}
878	bnxt_write_rx_doorbell(sc, &rx->rx_ag_ring, 0);
879
880	rg->grp_id = HWRM_NA_SIGNATURE;
881	rg->stats_ctx = cp->stats_ctx_id;
882	rg->rx_ring_id = rx->rx_ring.phys_id;
883	rg->ag_ring_id = rx->rx_ag_ring.phys_id;
884	rg->cp_ring_id = cp->ring.phys_id;
885	if (bnxt_hwrm_ring_grp_alloc(sc, rg) != 0) {
886		printf("%s: failed to allocate ring group\n",
887		    DEVNAME(sc));
888		goto dealloc_ag;
889	}
890
891	rx->rx_slots = mallocarray(sizeof(*bs), rx->rx_ring.ring_size,
892	    M_DEVBUF, M_WAITOK | M_ZERO);
893	if (rx->rx_slots == NULL) {
894		printf("%s: failed to allocate rx slots\n", DEVNAME(sc));
895		goto dealloc_ring_group;
896	}
897
898	for (i = 0; i < rx->rx_ring.ring_size; i++) {
899		bs = &rx->rx_slots[i];
900		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
901		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bs->bs_map) != 0) {
902			printf("%s: failed to allocate rx dma maps\n",
903			    DEVNAME(sc));
904			goto destroy_rx_slots;
905		}
906	}
907
908	rx->rx_ag_slots = mallocarray(sizeof(*bs), rx->rx_ag_ring.ring_size,
909	    M_DEVBUF, M_WAITOK | M_ZERO);
910	if (rx->rx_ag_slots == NULL) {
911		printf("%s: failed to allocate rx ag slots\n", DEVNAME(sc));
912		goto destroy_rx_slots;
913	}
914
915	for (i = 0; i < rx->rx_ag_ring.ring_size; i++) {
916		bs = &rx->rx_ag_slots[i];
917		if (bus_dmamap_create(sc->sc_dmat, BNXT_AG_BUFFER_SIZE, 1,
918		    BNXT_AG_BUFFER_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
919		    &bs->bs_map) != 0) {
920			printf("%s: failed to allocate rx ag dma maps\n",
921			    DEVNAME(sc));
922			goto destroy_rx_ag_slots;
923		}
924	}
925
926	tx->tx_slots = mallocarray(sizeof(*bs), tx->tx_ring.ring_size,
927	    M_DEVBUF, M_WAITOK | M_ZERO);
928	if (tx->tx_slots == NULL) {
929		printf("%s: failed to allocate tx slots\n", DEVNAME(sc));
930		goto destroy_rx_ag_slots;
931	}
932
933	for (i = 0; i < tx->tx_ring.ring_size; i++) {
934		bs = &tx->tx_slots[i];
935		if (bus_dmamap_create(sc->sc_dmat, MAXMCLBYTES, BNXT_MAX_TX_SEGS,
936		    BNXT_MAX_MTU, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
937		    &bs->bs_map) != 0) {
938			printf("%s: failed to allocate tx dma maps\n",
939			    DEVNAME(sc));
940			goto destroy_tx_slots;
941		}
942	}
943
944	/*
945	 * initially, the rx ring must be filled at least some distance beyond
946	 * the current consumer index, as it looks like the firmware assumes the
947	 * ring is full on creation, but doesn't prefetch the whole thing.
948	 * once the whole ring has been used once, we should be able to back off
949	 * to 2 or so slots, but we currently don't have a way of doing that.
950	 */
951	if_rxr_init(&rx->rxr[0], 32, rx->rx_ring.ring_size - 1);
952	if_rxr_init(&rx->rxr[1], 32, rx->rx_ag_ring.ring_size - 1);
953	rx->rx_prod = 0;
954	rx->rx_cons = 0;
955	rx->rx_ag_prod = 0;
956	rx->rx_ag_cons = 0;
957	bnxt_rx_fill(bq);
958
959	tx->tx_cons = 0;
960	tx->tx_prod = 0;
961	tx->tx_ring_cons = 0;
962	tx->tx_ring_prod = 0;
963	ifq_clr_oactive(ifp->if_ifqs[bq->q_index]);
964	ifq_restart(ifp->if_ifqs[bq->q_index]);
965	return 0;
966
967destroy_tx_slots:
968	bnxt_free_slots(sc, tx->tx_slots, i, tx->tx_ring.ring_size);
969	tx->tx_slots = NULL;
970
971	i = rx->rx_ag_ring.ring_size;
972destroy_rx_ag_slots:
973	bnxt_free_slots(sc, rx->rx_ag_slots, i, rx->rx_ag_ring.ring_size);
974	rx->rx_ag_slots = NULL;
975
976	i = rx->rx_ring.ring_size;
977destroy_rx_slots:
978	bnxt_free_slots(sc, rx->rx_slots, i, rx->rx_ring.ring_size);
979	rx->rx_slots = NULL;
980dealloc_ring_group:
981	bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
982dealloc_ag:
983	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
984	    &rx->rx_ag_ring);
985dealloc_tx:
986	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
987	    &tx->tx_ring);
988dealloc_rx:
989	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
990	    &rx->rx_ring);
991dealloc_stats:
992	bnxt_hwrm_stat_ctx_free(sc, cp);
993free_rx:
994	bnxt_dmamem_free(sc, rx->rx_ring_mem);
995	rx->rx_ring_mem = NULL;
996free_tx:
997	bnxt_dmamem_free(sc, tx->tx_ring_mem);
998	tx->tx_ring_mem = NULL;
999	return ENOMEM;
1000}
1001
1002void
1003bnxt_queue_down(struct bnxt_softc *sc, struct bnxt_queue *bq)
1004{
1005	struct bnxt_cp_ring *cp = &bq->q_cp;
1006	struct bnxt_rx_queue *rx = &bq->q_rx;
1007	struct bnxt_tx_queue *tx = &bq->q_tx;
1008
1009	bnxt_free_slots(sc, tx->tx_slots, tx->tx_ring.ring_size,
1010	    tx->tx_ring.ring_size);
1011	tx->tx_slots = NULL;
1012
1013	bnxt_free_slots(sc, rx->rx_ag_slots, rx->rx_ag_ring.ring_size,
1014	    rx->rx_ag_ring.ring_size);
1015	rx->rx_ag_slots = NULL;
1016
1017	bnxt_free_slots(sc, rx->rx_slots, rx->rx_ring.ring_size,
1018	    rx->rx_ring.ring_size);
1019	rx->rx_slots = NULL;
1020
1021	bnxt_hwrm_ring_grp_free(sc, &bq->q_rg);
1022	bnxt_hwrm_stat_ctx_free(sc, &bq->q_cp);
1023
1024	/* may need to wait for 500ms here before we can free the rings */
1025
1026	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
1027	    &tx->tx_ring);
1028	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1029	    &rx->rx_ag_ring);
1030	bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
1031	    &rx->rx_ring);
1032
1033	/* if no intrmap, leave cp ring in place for async events */
1034	if (sc->sc_intrmap != NULL) {
1035		bnxt_hwrm_ring_free(sc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
1036		    &cp->ring);
1037
1038		bnxt_dmamem_free(sc, cp->ring_mem);
1039		cp->ring_mem = NULL;
1040	}
1041
1042	bnxt_dmamem_free(sc, rx->rx_ring_mem);
1043	rx->rx_ring_mem = NULL;
1044
1045	bnxt_dmamem_free(sc, tx->tx_ring_mem);
1046	tx->tx_ring_mem = NULL;
1047}
1048
1049void
1050bnxt_up(struct bnxt_softc *sc)
1051{
1052	struct ifnet *ifp = &sc->sc_ac.ac_if;
1053	int i;
1054
1055	sc->sc_stats_ctx_mem = bnxt_dmamem_alloc(sc,
1056	    sizeof(struct ctx_hw_stats) * sc->sc_nqueues);
1057	if (sc->sc_stats_ctx_mem == NULL) {
1058		printf("%s: failed to allocate stats contexts\n", DEVNAME(sc));
1059		return;
1060	}
1061
1062	sc->sc_rx_cfg = bnxt_dmamem_alloc(sc, PAGE_SIZE * 2);
1063	if (sc->sc_rx_cfg == NULL) {
1064		printf("%s: failed to allocate rx config buffer\n",
1065		    DEVNAME(sc));
1066		goto free_stats;
1067	}
1068
1069	for (i = 0; i < sc->sc_nqueues; i++) {
1070		if (bnxt_queue_up(sc, &sc->sc_queues[i]) != 0) {
1071			goto down_queues;
1072		}
1073	}
1074
1075	sc->sc_vnic.rss_id = (uint16_t)HWRM_NA_SIGNATURE;
1076	if (bnxt_hwrm_vnic_ctx_alloc(sc, &sc->sc_vnic.rss_id) != 0) {
1077		printf("%s: failed to allocate vnic rss context\n",
1078		    DEVNAME(sc));
1079		goto down_all_queues;
1080	}
1081
1082	sc->sc_vnic.id = (uint16_t)HWRM_NA_SIGNATURE;
1083	sc->sc_vnic.def_ring_grp = sc->sc_queues[0].q_rg.grp_id;
1084	sc->sc_vnic.mru = BNXT_MAX_MTU;
1085	sc->sc_vnic.cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1086	sc->sc_vnic.lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1087	sc->sc_vnic.flags = BNXT_VNIC_FLAG_DEFAULT |
1088	    BNXT_VNIC_FLAG_VLAN_STRIP;
1089	if (bnxt_hwrm_vnic_alloc(sc, &sc->sc_vnic) != 0) {
1090		printf("%s: failed to allocate vnic\n", DEVNAME(sc));
1091		goto dealloc_vnic_ctx;
1092	}
1093
1094	if (bnxt_hwrm_vnic_cfg(sc, &sc->sc_vnic) != 0) {
1095		printf("%s: failed to configure vnic\n", DEVNAME(sc));
1096		goto dealloc_vnic;
1097	}
1098
1099	if (bnxt_hwrm_vnic_cfg_placement(sc, &sc->sc_vnic) != 0) {
1100		printf("%s: failed to configure vnic placement mode\n",
1101		    DEVNAME(sc));
1102		goto dealloc_vnic;
1103	}
1104
1105	sc->sc_vnic.filter_id = -1;
1106	if (bnxt_hwrm_set_filter(sc, &sc->sc_vnic) != 0) {
1107		printf("%s: failed to set vnic filter\n", DEVNAME(sc));
1108		goto dealloc_vnic;
1109	}
1110
1111	if (sc->sc_nqueues > 1) {
1112		uint16_t *rss_table = (BNXT_DMA_KVA(sc->sc_rx_cfg) + PAGE_SIZE);
1113		uint8_t *hash_key = (uint8_t *)(rss_table + HW_HASH_INDEX_SIZE);
1114
1115		for (i = 0; i < HW_HASH_INDEX_SIZE; i++) {
1116			struct bnxt_queue *bq;
1117
1118			bq = &sc->sc_queues[i % sc->sc_nqueues];
1119			rss_table[i] = htole16(bq->q_rg.grp_id);
1120		}
1121		stoeplitz_to_key(hash_key, HW_HASH_KEY_SIZE);
1122
1123		if (bnxt_hwrm_vnic_rss_cfg(sc, &sc->sc_vnic,
1124		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
1125		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 |
1126		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 |
1127		    HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6,
1128		    BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE,
1129		    BNXT_DMA_DVA(sc->sc_rx_cfg) + PAGE_SIZE +
1130		    (HW_HASH_INDEX_SIZE * sizeof(uint16_t))) != 0) {
1131			printf("%s: failed to set RSS config\n", DEVNAME(sc));
1132			goto dealloc_vnic;
1133		}
1134	}
1135
1136	bnxt_iff(sc);
1137	SET(ifp->if_flags, IFF_RUNNING);
1138
1139	return;
1140
1141dealloc_vnic:
1142	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1143dealloc_vnic_ctx:
1144	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1145
1146down_all_queues:
1147	i = sc->sc_nqueues;
1148down_queues:
1149	while (i-- > 0)
1150		bnxt_queue_down(sc, &sc->sc_queues[i]);
1151
1152	bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1153	sc->sc_rx_cfg = NULL;
1154free_stats:
1155	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1156	sc->sc_stats_ctx_mem = NULL;
1157}
1158
1159void
1160bnxt_down(struct bnxt_softc *sc)
1161{
1162	struct ifnet *ifp = &sc->sc_ac.ac_if;
1163	int i;
1164
1165	CLR(ifp->if_flags, IFF_RUNNING);
1166
1167	intr_barrier(sc->sc_ih);
1168
1169	for (i = 0; i < sc->sc_nqueues; i++) {
1170		ifq_clr_oactive(ifp->if_ifqs[i]);
1171		ifq_barrier(ifp->if_ifqs[i]);
1172
1173		timeout_del_barrier(&sc->sc_queues[i].q_rx.rx_refill);
1174
1175		if (sc->sc_intrmap != NULL)
1176			intr_barrier(sc->sc_queues[i].q_ihc);
1177	}
1178
1179	bnxt_hwrm_free_filter(sc, &sc->sc_vnic);
1180	bnxt_hwrm_vnic_free(sc, &sc->sc_vnic);
1181	bnxt_hwrm_vnic_ctx_free(sc, &sc->sc_vnic.rss_id);
1182
1183	for (i = 0; i < sc->sc_nqueues; i++)
1184		bnxt_queue_down(sc, &sc->sc_queues[i]);
1185
1186	bnxt_dmamem_free(sc, sc->sc_rx_cfg);
1187	sc->sc_rx_cfg = NULL;
1188
1189	bnxt_dmamem_free(sc, sc->sc_stats_ctx_mem);
1190	sc->sc_stats_ctx_mem = NULL;
1191}
1192
1193void
1194bnxt_iff(struct bnxt_softc *sc)
1195{
1196	struct ifnet *ifp = &sc->sc_ac.ac_if;
1197	struct ether_multi *enm;
1198	struct ether_multistep step;
1199	char *mc_list;
1200	uint32_t rx_mask, mc_count;
1201
1202	rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST
1203	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST
1204	    | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN;
1205
1206	mc_list = BNXT_DMA_KVA(sc->sc_rx_cfg);
1207	mc_count = 0;
1208
1209	if (ifp->if_flags & IFF_PROMISC) {
1210		SET(ifp->if_flags, IFF_ALLMULTI);
1211		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
1212	} else if ((sc->sc_ac.ac_multirangecnt > 0) ||
1213	    (sc->sc_ac.ac_multicnt > (PAGE_SIZE / ETHER_ADDR_LEN))) {
1214		SET(ifp->if_flags, IFF_ALLMULTI);
1215		rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
1216	} else {
1217		CLR(ifp->if_flags, IFF_ALLMULTI);
1218		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1219		while (enm != NULL) {
1220			memcpy(mc_list, enm->enm_addrlo, ETHER_ADDR_LEN);
1221			mc_list += ETHER_ADDR_LEN;
1222			mc_count++;
1223
1224			ETHER_NEXT_MULTI(step, enm);
1225		}
1226	}
1227
1228	bnxt_hwrm_cfa_l2_set_rx_mask(sc, sc->sc_vnic.id, rx_mask,
1229	    BNXT_DMA_DVA(sc->sc_rx_cfg), mc_count);
1230}
1231
1232int
1233bnxt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1234{
1235	struct bnxt_softc 	*sc = (struct bnxt_softc *)ifp->if_softc;
1236	struct ifreq		*ifr = (struct ifreq *)data;
1237	int			s, error = 0;
1238
1239	s = splnet();
1240	switch (cmd) {
1241	case SIOCSIFADDR:
1242		ifp->if_flags |= IFF_UP;
1243		/* FALLTHROUGH */
1244
1245	case SIOCSIFFLAGS:
1246		if (ISSET(ifp->if_flags, IFF_UP)) {
1247			if (ISSET(ifp->if_flags, IFF_RUNNING))
1248				error = ENETRESET;
1249			else
1250				bnxt_up(sc);
1251		} else {
1252			if (ISSET(ifp->if_flags, IFF_RUNNING))
1253				bnxt_down(sc);
1254		}
1255		break;
1256
1257	case SIOCGIFMEDIA:
1258	case SIOCSIFMEDIA:
1259		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1260		break;
1261
1262	case SIOCGIFRXR:
1263		error = bnxt_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1264		break;
1265
1266	case SIOCGIFSFFPAGE:
1267		error = bnxt_get_sffpage(sc, (struct if_sffpage *)data);
1268		break;
1269
1270	default:
1271		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1272	}
1273
1274	if (error == ENETRESET) {
1275		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1276		    (IFF_UP | IFF_RUNNING))
1277			bnxt_iff(sc);
1278		error = 0;
1279	}
1280
1281	splx(s);
1282
1283	return (error);
1284}
1285
1286int
1287bnxt_rxrinfo(struct bnxt_softc *sc, struct if_rxrinfo *ifri)
1288{
1289	struct if_rxring_info *ifr;
1290	int i;
1291	int error;
1292
1293	ifr = mallocarray(sc->sc_nqueues * 2, sizeof(*ifr), M_TEMP,
1294	    M_WAITOK | M_ZERO | M_CANFAIL);
1295	if (ifr == NULL)
1296		return (ENOMEM);
1297
1298	for (i = 0; i < sc->sc_nqueues; i++) {
1299		ifr[(i * 2)].ifr_size = MCLBYTES;
1300		ifr[(i * 2)].ifr_info = sc->sc_queues[i].q_rx.rxr[0];
1301
1302		ifr[(i * 2) + 1].ifr_size = BNXT_AG_BUFFER_SIZE;
1303		ifr[(i * 2) + 1].ifr_info = sc->sc_queues[i].q_rx.rxr[1];
1304	}
1305
1306	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues * 2, ifr);
1307	free(ifr, M_TEMP, sc->sc_nqueues * 2 * sizeof(*ifr));
1308
1309	return (error);
1310}
1311
1312int
1313bnxt_load_mbuf(struct bnxt_softc *sc, struct bnxt_slot *bs, struct mbuf *m)
1314{
1315	switch (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1316	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) {
1317	case 0:
1318		break;
1319
1320	case EFBIG:
1321		if (m_defrag(m, M_DONTWAIT) == 0 &&
1322		    bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
1323		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0)
1324			break;
1325
1326	default:
1327		return (1);
1328	}
1329
1330	bs->bs_m = m;
1331	return (0);
1332}
1333
1334void
1335bnxt_start(struct ifqueue *ifq)
1336{
1337	struct ifnet *ifp = ifq->ifq_if;
1338	struct tx_bd_short *txring;
1339	struct tx_bd_long_hi *txhi;
1340	struct bnxt_tx_queue *tx = ifq->ifq_softc;
1341	struct bnxt_softc *sc = tx->tx_softc;
1342	struct bnxt_slot *bs;
1343	struct ether_extracted ext;
1344	bus_dmamap_t map;
1345	struct mbuf *m;
1346	u_int idx, free, used, laststart;
1347	uint16_t txflags, lflags;
1348	int i, slen;
1349
1350	txring = (struct tx_bd_short *)BNXT_DMA_KVA(tx->tx_ring_mem);
1351
1352	idx = tx->tx_ring_prod;
1353	free = tx->tx_ring_cons;
1354	if (free <= idx)
1355		free += tx->tx_ring.ring_size;
1356	free -= idx;
1357
1358	used = 0;
1359
1360	for (;;) {
1361		/* +1 for tx_bd_long_hi */
1362		if (used + BNXT_MAX_TX_SEGS + 1 > free) {
1363			ifq_set_oactive(ifq);
1364			break;
1365		}
1366
1367		m = ifq_dequeue(ifq);
1368		if (m == NULL)
1369			break;
1370
1371		bs = &tx->tx_slots[tx->tx_prod];
1372		if (bnxt_load_mbuf(sc, bs, m) != 0) {
1373			m_freem(m);
1374			ifp->if_oerrors++;
1375			continue;
1376		}
1377
1378#if NBPFILTER > 0
1379		if (ifp->if_bpf)
1380			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1381#endif
1382		map = bs->bs_map;
1383		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1384		    BUS_DMASYNC_PREWRITE);
1385		used += BNXT_TX_SLOTS(bs);
1386
1387		/* first segment */
1388		laststart = idx;
1389		txring[idx].len = htole16(map->dm_segs[0].ds_len);
1390		txring[idx].opaque = tx->tx_prod;
1391		txring[idx].addr = htole64(map->dm_segs[0].ds_addr);
1392		if (m->m_pkthdr.csum_flags & M_TCP_TSO)
1393			slen = m->m_pkthdr.ph_mss;
1394		else
1395			slen = map->dm_mapsize;
1396
1397		if (slen < 512)
1398			txflags = TX_BD_LONG_FLAGS_LHINT_LT512;
1399		else if (slen < 1024)
1400			txflags = TX_BD_LONG_FLAGS_LHINT_LT1K;
1401		else if (slen < 2048)
1402			txflags = TX_BD_LONG_FLAGS_LHINT_LT2K;
1403		else
1404			txflags = TX_BD_LONG_FLAGS_LHINT_GTE2K;
1405		txflags |= TX_BD_LONG_TYPE_TX_BD_LONG |
1406		    TX_BD_LONG_FLAGS_NO_CMPL;
1407		txflags |= (BNXT_TX_SLOTS(bs) << TX_BD_LONG_FLAGS_BD_CNT_SFT) &
1408		    TX_BD_LONG_FLAGS_BD_CNT_MASK;
1409		if (map->dm_nsegs == 1)
1410			txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1411		txring[idx].flags_type = htole16(txflags);
1412
1413		idx++;
1414		if (idx == tx->tx_ring.ring_size)
1415			idx = 0;
1416
1417		/* long tx descriptor */
1418		txhi = (struct tx_bd_long_hi *)&txring[idx];
1419		memset(txhi, 0, sizeof(*txhi));
1420
1421		lflags = 0;
1422		if (m->m_pkthdr.csum_flags & M_TCP_TSO) {
1423			uint16_t hdrsize;
1424			uint32_t outlen;
1425			uint32_t paylen;
1426
1427			ether_extract_headers(m, &ext);
1428			if (ext.tcp && m->m_pkthdr.ph_mss > 0) {
1429				lflags |= TX_BD_LONG_LFLAGS_LSO;
1430				hdrsize = sizeof(*ext.eh);
1431				if (ext.ip4 || ext.ip6)
1432					hdrsize += ext.iphlen;
1433				else
1434					tcpstat_inc(tcps_outbadtso);
1435
1436				hdrsize += ext.tcphlen;
1437				txhi->hdr_size = htole16(hdrsize / 2);
1438
1439				outlen = m->m_pkthdr.ph_mss;
1440				txhi->mss = htole32(outlen);
1441
1442				paylen = m->m_pkthdr.len - hdrsize;
1443				tcpstat_add(tcps_outpkttso,
1444				    (paylen + outlen + 1) / outlen);
1445			} else {
1446				tcpstat_inc(tcps_outbadtso);
1447			}
1448		} else {
1449			if (m->m_pkthdr.csum_flags & (M_UDP_CSUM_OUT |
1450			    M_TCP_CSUM_OUT))
1451				lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
1452			if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1453				lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
1454		}
1455		txhi->lflags = htole16(lflags);
1456
1457#if NVLAN > 0
1458		if (m->m_flags & M_VLANTAG) {
1459			txhi->cfa_meta = htole32(m->m_pkthdr.ether_vtag |
1460			    TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 |
1461			    TX_BD_LONG_CFA_META_KEY_VLAN_TAG);
1462		}
1463#endif
1464
1465		idx++;
1466		if (idx == tx->tx_ring.ring_size)
1467			idx = 0;
1468
1469		/* remaining segments */
1470		txflags = TX_BD_SHORT_TYPE_TX_BD_SHORT;
1471		for (i = 1; i < map->dm_nsegs; i++) {
1472			if (i == map->dm_nsegs - 1)
1473				txflags |= TX_BD_SHORT_FLAGS_PACKET_END;
1474			txring[idx].flags_type = htole16(txflags);
1475
1476			txring[idx].len =
1477			    htole16(bs->bs_map->dm_segs[i].ds_len);
1478			txring[idx].opaque = tx->tx_prod;
1479			txring[idx].addr =
1480			    htole64(bs->bs_map->dm_segs[i].ds_addr);
1481
1482			idx++;
1483			if (idx == tx->tx_ring.ring_size)
1484				idx = 0;
1485		}
1486
1487		if (++tx->tx_prod >= tx->tx_ring.ring_size)
1488			tx->tx_prod = 0;
1489	}
1490
1491	/* unset NO_CMPL on the first bd of the last packet */
1492	if (used != 0) {
1493		txring[laststart].flags_type &=
1494		    ~htole16(TX_BD_SHORT_FLAGS_NO_CMPL);
1495	}
1496
1497	bnxt_write_tx_doorbell(sc, &tx->tx_ring, idx);
1498	tx->tx_ring_prod = idx;
1499}
1500
1501void
1502bnxt_handle_async_event(struct bnxt_softc *sc, struct cmpl_base *cmpl)
1503{
1504	struct hwrm_async_event_cmpl *ae = (struct hwrm_async_event_cmpl *)cmpl;
1505	uint16_t type = le16toh(ae->event_id);
1506
1507	switch (type) {
1508	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1509	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1510	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
1511		bnxt_hwrm_port_phy_qcfg(sc, NULL);
1512		break;
1513
1514	default:
1515		printf("%s: unexpected async event %x\n", DEVNAME(sc), type);
1516		break;
1517	}
1518}
1519
1520struct cmpl_base *
1521bnxt_cpr_next_cmpl(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1522{
1523	struct cmpl_base *cmpl;
1524	uint32_t cons;
1525	int v_bit;
1526
1527	cons = cpr->cons + 1;
1528	v_bit = cpr->v_bit;
1529	if (cons == cpr->ring.ring_size) {
1530		cons = 0;
1531		v_bit = !v_bit;
1532	}
1533	cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons];
1534
1535	if ((!!(cmpl->info3_v & htole32(CMPL_BASE_V))) != (!!v_bit))
1536		return (NULL);
1537
1538	cpr->cons = cons;
1539	cpr->v_bit = v_bit;
1540	return (cmpl);
1541}
1542
1543void
1544bnxt_cpr_commit(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1545{
1546	cpr->commit_cons = cpr->cons;
1547	cpr->commit_v_bit = cpr->v_bit;
1548}
1549
1550void
1551bnxt_cpr_rollback(struct bnxt_softc *sc, struct bnxt_cp_ring *cpr)
1552{
1553	cpr->cons = cpr->commit_cons;
1554	cpr->v_bit = cpr->commit_v_bit;
1555}
1556
1557int
1558bnxt_admin_intr(void *xsc)
1559{
1560	struct bnxt_softc *sc = (struct bnxt_softc *)xsc;
1561	struct bnxt_cp_ring *cpr = &sc->sc_cp_ring;
1562	struct cmpl_base *cmpl;
1563	uint16_t type;
1564
1565	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1566	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1567	while (cmpl != NULL) {
1568		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1569		switch (type) {
1570		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1571			bnxt_handle_async_event(sc, cmpl);
1572			break;
1573		default:
1574			printf("%s: unexpected completion type %u\n",
1575			    DEVNAME(sc), type);
1576		}
1577
1578		bnxt_cpr_commit(sc, cpr);
1579		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1580	}
1581
1582	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1583	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1584	return (1);
1585}
1586
1587int
1588bnxt_intr(void *xq)
1589{
1590	struct bnxt_queue *q = (struct bnxt_queue *)xq;
1591	struct bnxt_softc *sc = q->q_sc;
1592	struct ifnet *ifp = &sc->sc_ac.ac_if;
1593	struct bnxt_cp_ring *cpr = &q->q_cp;
1594	struct bnxt_rx_queue *rx = &q->q_rx;
1595	struct bnxt_tx_queue *tx = &q->q_tx;
1596	struct cmpl_base *cmpl;
1597	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1598	uint16_t type;
1599	int rxfree, txfree, agfree, rv, rollback;
1600
1601	bnxt_write_cp_doorbell(sc, &cpr->ring, 0);
1602	rxfree = 0;
1603	txfree = 0;
1604	agfree = 0;
1605	rv = -1;
1606	cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1607	while (cmpl != NULL) {
1608		type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK;
1609		rollback = 0;
1610		switch (type) {
1611		case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1612			bnxt_handle_async_event(sc, cmpl);
1613			break;
1614		case CMPL_BASE_TYPE_RX_L2:
1615			if (ISSET(ifp->if_flags, IFF_RUNNING))
1616				rollback = bnxt_rx(sc, rx, cpr, &ml, &rxfree,
1617				    &agfree, cmpl);
1618			break;
1619		case CMPL_BASE_TYPE_TX_L2:
1620			if (ISSET(ifp->if_flags, IFF_RUNNING))
1621				bnxt_txeof(sc, tx, &txfree, cmpl);
1622			break;
1623		default:
1624			printf("%s: unexpected completion type %u\n",
1625			    DEVNAME(sc), type);
1626		}
1627
1628		if (rollback) {
1629			bnxt_cpr_rollback(sc, cpr);
1630			break;
1631		}
1632		rv = 1;
1633		bnxt_cpr_commit(sc, cpr);
1634		cmpl = bnxt_cpr_next_cmpl(sc, cpr);
1635	}
1636
1637	/*
1638	 * comments in bnxtreg.h suggest we should be writing cpr->cons here,
1639	 * but writing cpr->cons + 1 makes it stop interrupting.
1640	 */
1641	bnxt_write_cp_doorbell_index(sc, &cpr->ring,
1642	    (cpr->commit_cons+1) % cpr->ring.ring_size, 1);
1643
1644	if (rxfree != 0) {
1645		rx->rx_cons += rxfree;
1646		if (rx->rx_cons >= rx->rx_ring.ring_size)
1647			rx->rx_cons -= rx->rx_ring.ring_size;
1648
1649		rx->rx_ag_cons += agfree;
1650		if (rx->rx_ag_cons >= rx->rx_ag_ring.ring_size)
1651			rx->rx_ag_cons -= rx->rx_ag_ring.ring_size;
1652
1653		if_rxr_put(&rx->rxr[0], rxfree);
1654		if_rxr_put(&rx->rxr[1], agfree);
1655
1656		if (ifiq_input(rx->rx_ifiq, &ml)) {
1657			if_rxr_livelocked(&rx->rxr[0]);
1658			if_rxr_livelocked(&rx->rxr[1]);
1659		}
1660
1661		bnxt_rx_fill(q);
1662		if ((rx->rx_cons == rx->rx_prod) ||
1663		    (rx->rx_ag_cons == rx->rx_ag_prod))
1664			timeout_add(&rx->rx_refill, 0);
1665	}
1666	if (txfree != 0) {
1667		if (ifq_is_oactive(tx->tx_ifq))
1668			ifq_restart(tx->tx_ifq);
1669	}
1670	return (rv);
1671}
1672
1673void
1674bnxt_watchdog(struct ifnet *ifp)
1675{
1676}
1677
1678void
1679bnxt_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1680{
1681	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1682	bnxt_hwrm_port_phy_qcfg(sc, ifmr);
1683}
1684
1685uint64_t
1686bnxt_get_media_type(uint64_t speed, int phy_type)
1687{
1688	switch (phy_type) {
1689	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
1690	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
1691	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
1692	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
1693	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
1694	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
1695	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
1696		switch (speed) {
1697		case IF_Gbps(1):
1698			return IFM_1000_T;
1699		case IF_Gbps(10):
1700			return IFM_10G_SFP_CU;
1701		case IF_Gbps(25):
1702			return IFM_25G_CR;
1703		case IF_Gbps(40):
1704			return IFM_40G_CR4;
1705		case IF_Gbps(50):
1706			return IFM_50G_CR2;
1707		case IF_Gbps(100):
1708			return IFM_100G_CR4;
1709		}
1710		break;
1711
1712	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
1713	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
1714	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
1715		switch (speed) {
1716		case IF_Gbps(1):
1717			return IFM_1000_LX;
1718		case IF_Gbps(10):
1719			return IFM_10G_LR;
1720		case IF_Gbps(25):
1721			return IFM_25G_LR;
1722		case IF_Gbps(40):
1723			return IFM_40G_LR4;
1724		case IF_Gbps(100):
1725			return IFM_100G_LR4;
1726		}
1727		break;
1728
1729	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
1730	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
1731	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
1732	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
1733	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
1734		switch (speed) {
1735		case IF_Gbps(1):
1736			return IFM_1000_SX;
1737		case IF_Gbps(10):
1738			return IFM_10G_SR;
1739		case IF_Gbps(25):
1740			return IFM_25G_SR;
1741		case IF_Gbps(40):
1742			return IFM_40G_SR4;
1743		case IF_Gbps(100):
1744			return IFM_100G_SR4;
1745		}
1746		break;
1747
1748	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
1749	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
1750		switch (speed) {
1751		case IF_Gbps(10):
1752			return IFM_10G_ER;
1753		case IF_Gbps(25):
1754			return IFM_25G_ER;
1755		}
1756		/* missing IFM_40G_ER4, IFM_100G_ER4 */
1757		break;
1758
1759	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
1760	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
1761	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
1762		switch (speed) {
1763		case IF_Gbps(10):
1764			return IFM_10G_KR;
1765		case IF_Gbps(20):
1766			return IFM_20G_KR2;
1767		case IF_Gbps(25):
1768			return IFM_25G_KR;
1769		case IF_Gbps(40):
1770			return IFM_40G_KR4;
1771		case IF_Gbps(50):
1772			return IFM_50G_KR2;
1773		case IF_Gbps(100):
1774			return IFM_100G_KR4;
1775		}
1776		break;
1777
1778	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
1779		switch (speed) {
1780		case IF_Gbps(1):
1781			return IFM_1000_KX;
1782		case IF_Mbps(2500):
1783			return IFM_2500_KX;
1784		case IF_Gbps(10):
1785			return IFM_10G_KX4;
1786		}
1787		break;
1788
1789	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
1790	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
1791	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
1792		switch (speed) {
1793		case IF_Mbps(10):
1794			return IFM_10_T;
1795		case IF_Mbps(100):
1796			return IFM_100_TX;
1797		case IF_Gbps(1):
1798			return IFM_1000_T;
1799		case IF_Mbps(2500):
1800			return IFM_2500_T;
1801		case IF_Gbps(10):
1802			return IFM_10G_T;
1803		}
1804		break;
1805
1806	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
1807		switch (speed) {
1808		case IF_Gbps(1):
1809			return IFM_1000_SGMII;
1810		}
1811		break;
1812
1813	case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
1814		switch (speed) {
1815		case IF_Gbps(10):
1816			return IFM_10G_AOC;
1817		case IF_Gbps(25):
1818			return IFM_25G_AOC;
1819		case IF_Gbps(40):
1820			return IFM_40G_AOC;
1821		case IF_Gbps(100):
1822			return IFM_100G_AOC;
1823		}
1824		break;
1825	}
1826
1827	return 0;
1828}
1829
1830void
1831bnxt_add_media_type(struct bnxt_softc *sc, int supported_speeds, uint64_t speed, uint64_t ifmt)
1832{
1833	int speed_bit = 0;
1834	switch (speed) {
1835	case IF_Gbps(1):
1836		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB;
1837		break;
1838	case IF_Gbps(2):
1839		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB;
1840		break;
1841	case IF_Mbps(2500):
1842		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB;
1843		break;
1844	case IF_Gbps(10):
1845		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB;
1846		break;
1847	case IF_Gbps(20):
1848		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB;
1849		break;
1850	case IF_Gbps(25):
1851		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB;
1852		break;
1853	case IF_Gbps(40):
1854		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB;
1855		break;
1856	case IF_Gbps(50):
1857		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB;
1858		break;
1859	case IF_Gbps(100):
1860		speed_bit = HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB;
1861		break;
1862	}
1863	if (supported_speeds & speed_bit)
1864		ifmedia_add(&sc->sc_media, IFM_ETHER | ifmt, 0, NULL);
1865}
1866
1867int
1868bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc, struct ifmediareq *ifmr)
1869{
1870	struct ifnet *ifp = &softc->sc_ac.ac_if;
1871	struct hwrm_port_phy_qcfg_input req = {0};
1872	struct hwrm_port_phy_qcfg_output *resp =
1873	    BNXT_DMA_KVA(softc->sc_cmd_resp);
1874	int link_state = LINK_STATE_DOWN;
1875	uint64_t speeds[] = {
1876		IF_Gbps(1), IF_Gbps(2), IF_Mbps(2500), IF_Gbps(10), IF_Gbps(20),
1877		IF_Gbps(25), IF_Gbps(40), IF_Gbps(50), IF_Gbps(100)
1878	};
1879	uint64_t media_type;
1880	int duplex;
1881	int rc = 0;
1882	int i;
1883
1884	BNXT_HWRM_LOCK(softc);
1885	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
1886
1887	rc = _hwrm_send_message(softc, &req, sizeof(req));
1888	if (rc) {
1889		printf("%s: failed to query port phy config\n", DEVNAME(softc));
1890		goto exit;
1891	}
1892
1893	if (softc->sc_hwrm_ver > 0x10800)
1894		duplex = resp->duplex_state;
1895	else
1896		duplex = resp->duplex_cfg;
1897
1898	if (resp->link == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
1899		if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1900			link_state = LINK_STATE_HALF_DUPLEX;
1901		else
1902			link_state = LINK_STATE_FULL_DUPLEX;
1903
1904		switch (resp->link_speed) {
1905		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB:
1906			ifp->if_baudrate = IF_Mbps(10);
1907			break;
1908		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1909			ifp->if_baudrate = IF_Mbps(100);
1910			break;
1911		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1912			ifp->if_baudrate = IF_Gbps(1);
1913			break;
1914		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1915			ifp->if_baudrate = IF_Gbps(2);
1916			break;
1917		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1918			ifp->if_baudrate = IF_Mbps(2500);
1919			break;
1920		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1921			ifp->if_baudrate = IF_Gbps(10);
1922			break;
1923		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1924			ifp->if_baudrate = IF_Gbps(20);
1925			break;
1926		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1927			ifp->if_baudrate = IF_Gbps(25);
1928			break;
1929		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1930			ifp->if_baudrate = IF_Gbps(40);
1931			break;
1932		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1933			ifp->if_baudrate = IF_Gbps(50);
1934			break;
1935		case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
1936			ifp->if_baudrate = IF_Gbps(100);
1937			break;
1938		}
1939	}
1940
1941	ifmedia_delete_instance(&softc->sc_media, IFM_INST_ANY);
1942	for (i = 0; i < nitems(speeds); i++) {
1943		media_type = bnxt_get_media_type(speeds[i], resp->phy_type);
1944		if (media_type != 0)
1945			bnxt_add_media_type(softc, resp->support_speeds,
1946			    speeds[i], media_type);
1947	}
1948	ifmedia_add(&softc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1949	ifmedia_set(&softc->sc_media, IFM_ETHER|IFM_AUTO);
1950
1951	if (ifmr != NULL) {
1952		ifmr->ifm_status = IFM_AVALID;
1953		if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1954			ifmr->ifm_status |= IFM_ACTIVE;
1955			ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
1956			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
1957				ifmr->ifm_active |= IFM_ETH_TXPAUSE;
1958			if (resp->pause & HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
1959				ifmr->ifm_active |= IFM_ETH_RXPAUSE;
1960			if (duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF)
1961				ifmr->ifm_active |= IFM_HDX;
1962			else
1963				ifmr->ifm_active |= IFM_FDX;
1964
1965			media_type = bnxt_get_media_type(ifp->if_baudrate, resp->phy_type);
1966			if (media_type != 0)
1967				ifmr->ifm_active |= media_type;
1968		}
1969	}
1970
1971exit:
1972	BNXT_HWRM_UNLOCK(softc);
1973
1974	if (rc == 0 && (link_state != ifp->if_link_state)) {
1975		ifp->if_link_state = link_state;
1976		if_link_state_change(ifp);
1977	}
1978
1979	return rc;
1980}
1981
1982int
1983bnxt_media_change(struct ifnet *ifp)
1984{
1985	struct bnxt_softc *sc = (struct bnxt_softc *)ifp->if_softc;
1986	struct hwrm_port_phy_cfg_input req = {0};
1987	uint64_t link_speed;
1988
1989	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1990		return EINVAL;
1991
1992	if (sc->sc_flags & BNXT_FLAG_NPAR)
1993		return ENODEV;
1994
1995	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
1996
1997	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
1998	case IFM_100G_CR4:
1999	case IFM_100G_SR4:
2000	case IFM_100G_KR4:
2001	case IFM_100G_LR4:
2002	case IFM_100G_AOC:
2003		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB;
2004		break;
2005
2006	case IFM_50G_CR2:
2007	case IFM_50G_KR2:
2008		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB;
2009		break;
2010
2011	case IFM_40G_CR4:
2012	case IFM_40G_SR4:
2013	case IFM_40G_LR4:
2014	case IFM_40G_KR4:
2015	case IFM_40G_AOC:
2016		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB;
2017		break;
2018
2019	case IFM_25G_CR:
2020	case IFM_25G_KR:
2021	case IFM_25G_SR:
2022	case IFM_25G_LR:
2023	case IFM_25G_ER:
2024	case IFM_25G_AOC:
2025		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB;
2026		break;
2027
2028	case IFM_10G_LR:
2029	case IFM_10G_SR:
2030	case IFM_10G_CX4:
2031	case IFM_10G_T:
2032	case IFM_10G_SFP_CU:
2033	case IFM_10G_LRM:
2034	case IFM_10G_KX4:
2035	case IFM_10G_KR:
2036	case IFM_10G_CR1:
2037	case IFM_10G_ER:
2038	case IFM_10G_AOC:
2039		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB;
2040		break;
2041
2042	case IFM_2500_SX:
2043	case IFM_2500_KX:
2044	case IFM_2500_T:
2045		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB;
2046		break;
2047
2048	case IFM_1000_T:
2049	case IFM_1000_LX:
2050	case IFM_1000_SX:
2051	case IFM_1000_CX:
2052	case IFM_1000_KX:
2053		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB;
2054		break;
2055
2056	case IFM_100_TX:
2057		link_speed = HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB;
2058		break;
2059
2060	default:
2061		link_speed = 0;
2062	}
2063
2064	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2065	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2066	if (link_speed == 0) {
2067		req.auto_mode |=
2068		    HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2069		req.flags |=
2070		    htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2071		req.enables |=
2072		    htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE);
2073	} else {
2074		req.force_link_speed = htole16(link_speed);
2075		req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
2076	}
2077	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2078
2079	return hwrm_send_message(sc, &req, sizeof(req));
2080}
2081
2082int
2083bnxt_media_autonegotiate(struct bnxt_softc *sc)
2084{
2085	struct hwrm_port_phy_cfg_input req = {0};
2086
2087	if (sc->sc_flags & BNXT_FLAG_NPAR)
2088		return ENODEV;
2089
2090	bnxt_hwrm_cmd_hdr_init(sc, &req, HWRM_PORT_PHY_CFG);
2091	req.auto_mode |= HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
2092	req.auto_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2093	req.enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE |
2094	    HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX);
2095	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
2096	req.flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY);
2097
2098	return hwrm_send_message(sc, &req, sizeof(req));
2099}
2100
2101
2102void
2103bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
2104{
2105	struct cmpl_base *cmp = (void *)cpr->ring.vaddr;
2106	int i;
2107
2108	for (i = 0; i < cpr->ring.ring_size; i++)
2109		cmp[i].info3_v = !cpr->v_bit;
2110}
2111
2112void
2113bnxt_write_cp_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring,
2114    int enable)
2115{
2116	uint32_t val = CMPL_DOORBELL_KEY_CMPL;
2117	if (enable == 0)
2118		val |= CMPL_DOORBELL_MASK;
2119
2120	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2121	    BUS_SPACE_BARRIER_WRITE);
2122	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2123	    BUS_SPACE_BARRIER_WRITE);
2124	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2125	    htole32(val));
2126}
2127
2128void
2129bnxt_write_cp_doorbell_index(struct bnxt_softc *sc, struct bnxt_ring *ring,
2130    uint32_t index, int enable)
2131{
2132	uint32_t val = CMPL_DOORBELL_KEY_CMPL | CMPL_DOORBELL_IDX_VALID |
2133	    (index & CMPL_DOORBELL_IDX_MASK);
2134	if (enable == 0)
2135		val |= CMPL_DOORBELL_MASK;
2136	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2137	    BUS_SPACE_BARRIER_WRITE);
2138	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2139	    htole32(val));
2140	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, 0, sc->sc_db_s,
2141	    BUS_SPACE_BARRIER_WRITE);
2142}
2143
2144void
2145bnxt_write_rx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2146{
2147	uint32_t val = RX_DOORBELL_KEY_RX | index;
2148	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2149	    BUS_SPACE_BARRIER_WRITE);
2150	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2151	    htole32(val));
2152
2153	/* second write isn't necessary on all hardware */
2154	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2155	    BUS_SPACE_BARRIER_WRITE);
2156	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2157	    htole32(val));
2158}
2159
2160void
2161bnxt_write_tx_doorbell(struct bnxt_softc *sc, struct bnxt_ring *ring, int index)
2162{
2163	uint32_t val = TX_DOORBELL_KEY_TX | index;
2164	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2165	    BUS_SPACE_BARRIER_WRITE);
2166	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2167	    htole32(val));
2168
2169	/* second write isn't necessary on all hardware */
2170	bus_space_barrier(sc->sc_db_t, sc->sc_db_h, ring->doorbell, 4,
2171	    BUS_SPACE_BARRIER_WRITE);
2172	bus_space_write_4(sc->sc_db_t, sc->sc_db_h, ring->doorbell,
2173	    htole32(val));
2174}
2175
2176u_int
2177bnxt_rx_fill_slots(struct bnxt_softc *sc, struct bnxt_ring *ring, void *ring_mem,
2178    struct bnxt_slot *slots, uint *prod, int bufsize, uint16_t bdtype,
2179    u_int nslots)
2180{
2181	struct rx_prod_pkt_bd *rxring;
2182	struct bnxt_slot *bs;
2183	struct mbuf *m;
2184	uint p, fills;
2185
2186	rxring = (struct rx_prod_pkt_bd *)ring_mem;
2187	p = *prod;
2188	for (fills = 0; fills < nslots; fills++) {
2189		bs = &slots[p];
2190		m = MCLGETL(NULL, M_DONTWAIT, bufsize);
2191		if (m == NULL)
2192			break;
2193
2194		m->m_len = m->m_pkthdr.len = bufsize;
2195		if (bus_dmamap_load_mbuf(sc->sc_dmat, bs->bs_map, m,
2196		    BUS_DMA_NOWAIT) != 0) {
2197			m_freem(m);
2198			break;
2199		}
2200		bs->bs_m = m;
2201
2202		rxring[p].flags_type = htole16(bdtype);
2203		rxring[p].len = htole16(bufsize);
2204		rxring[p].opaque = p;
2205		rxring[p].addr = htole64(bs->bs_map->dm_segs[0].ds_addr);
2206
2207		if (++p >= ring->ring_size)
2208			p = 0;
2209	}
2210
2211	if (fills != 0)
2212		bnxt_write_rx_doorbell(sc, ring, p);
2213	*prod = p;
2214
2215	return (nslots - fills);
2216}
2217
2218int
2219bnxt_rx_fill(struct bnxt_queue *q)
2220{
2221	struct bnxt_rx_queue *rx = &q->q_rx;
2222	struct bnxt_softc *sc = q->q_sc;
2223	u_int slots;
2224	int rv = 0;
2225
2226	slots = if_rxr_get(&rx->rxr[0], rx->rx_ring.ring_size);
2227	if (slots > 0) {
2228		slots = bnxt_rx_fill_slots(sc, &rx->rx_ring,
2229		    BNXT_DMA_KVA(rx->rx_ring_mem), rx->rx_slots,
2230		    &rx->rx_prod, MCLBYTES,
2231		    RX_PROD_PKT_BD_TYPE_RX_PROD_PKT, slots);
2232		if_rxr_put(&rx->rxr[0], slots);
2233	} else
2234		rv = 1;
2235
2236	slots = if_rxr_get(&rx->rxr[1],  rx->rx_ag_ring.ring_size);
2237	if (slots > 0) {
2238		slots = bnxt_rx_fill_slots(sc, &rx->rx_ag_ring,
2239		    BNXT_DMA_KVA(rx->rx_ring_mem) + PAGE_SIZE,
2240		    rx->rx_ag_slots, &rx->rx_ag_prod,
2241		    BNXT_AG_BUFFER_SIZE,
2242		    RX_PROD_AGG_BD_TYPE_RX_PROD_AGG, slots);
2243		if_rxr_put(&rx->rxr[1], slots);
2244	} else
2245		rv = 1;
2246
2247	return (rv);
2248}
2249
2250void
2251bnxt_refill(void *xq)
2252{
2253	struct bnxt_queue *q = xq;
2254	struct bnxt_rx_queue *rx = &q->q_rx;
2255
2256	bnxt_rx_fill(q);
2257
2258	if (rx->rx_cons == rx->rx_prod)
2259		timeout_add(&rx->rx_refill, 1);
2260}
2261
2262int
2263bnxt_rx(struct bnxt_softc *sc, struct bnxt_rx_queue *rx,
2264    struct bnxt_cp_ring *cpr, struct mbuf_list *ml, int *slots, int *agslots,
2265    struct cmpl_base *cmpl)
2266{
2267	struct mbuf *m, *am;
2268	struct bnxt_slot *bs;
2269	struct rx_pkt_cmpl *rxlo = (struct rx_pkt_cmpl *)cmpl;
2270	struct rx_pkt_cmpl_hi *rxhi;
2271	struct rx_abuf_cmpl *ag;
2272	uint32_t flags;
2273	uint16_t errors;
2274
2275	/* second part of the rx completion */
2276	rxhi = (struct rx_pkt_cmpl_hi *)bnxt_cpr_next_cmpl(sc, cpr);
2277	if (rxhi == NULL) {
2278		return (1);
2279	}
2280
2281	/* packets over 2k in size use an aggregation buffer completion too */
2282	ag = NULL;
2283	if ((rxlo->agg_bufs_v1 >> RX_PKT_CMPL_AGG_BUFS_SFT) != 0) {
2284		ag = (struct rx_abuf_cmpl *)bnxt_cpr_next_cmpl(sc, cpr);
2285		if (ag == NULL) {
2286			return (1);
2287		}
2288	}
2289
2290	bs = &rx->rx_slots[rxlo->opaque];
2291	bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0, bs->bs_map->dm_mapsize,
2292	    BUS_DMASYNC_POSTREAD);
2293	bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2294
2295	m = bs->bs_m;
2296	bs->bs_m = NULL;
2297	m->m_pkthdr.len = m->m_len = letoh16(rxlo->len);
2298	(*slots)++;
2299
2300	/* checksum flags */
2301	flags = lemtoh32(&rxhi->flags2);
2302	errors = lemtoh16(&rxhi->errors_v2);
2303	if ((flags & RX_PKT_CMPL_FLAGS2_IP_CS_CALC) != 0 &&
2304	    (errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR) == 0)
2305		m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
2306
2307	if ((flags & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) != 0 &&
2308	    (errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR) == 0)
2309		m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
2310		    M_UDP_CSUM_IN_OK;
2311
2312#if NVLAN > 0
2313	if ((flags & RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK) ==
2314	    RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
2315		m->m_pkthdr.ether_vtag = lemtoh16(&rxhi->metadata);
2316		m->m_flags |= M_VLANTAG;
2317	}
2318#endif
2319
2320	if (lemtoh16(&rxlo->flags_type) & RX_PKT_CMPL_FLAGS_RSS_VALID) {
2321		m->m_pkthdr.ph_flowid = lemtoh32(&rxlo->rss_hash);
2322		m->m_pkthdr.csum_flags |= M_FLOWID;
2323	}
2324
2325	if (ag != NULL) {
2326		bs = &rx->rx_ag_slots[ag->opaque];
2327		bus_dmamap_sync(sc->sc_dmat, bs->bs_map, 0,
2328		    bs->bs_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2329		bus_dmamap_unload(sc->sc_dmat, bs->bs_map);
2330
2331		am = bs->bs_m;
2332		bs->bs_m = NULL;
2333		am->m_len = letoh16(ag->len);
2334		m->m_next = am;
2335		m->m_pkthdr.len += am->m_len;
2336		(*agslots)++;
2337	}
2338
2339	ml_enqueue(ml, m);
2340	return (0);
2341}
2342
2343void
2344bnxt_txeof(struct bnxt_softc *sc, struct bnxt_tx_queue *tx, int *txfree,
2345    struct cmpl_base *cmpl)
2346{
2347	struct tx_cmpl *txcmpl = (struct tx_cmpl *)cmpl;
2348	struct bnxt_slot *bs;
2349	bus_dmamap_t map;
2350	u_int idx, segs, last;
2351
2352	idx = tx->tx_ring_cons;
2353	last = tx->tx_cons;
2354	do {
2355		bs = &tx->tx_slots[tx->tx_cons];
2356		map = bs->bs_map;
2357
2358		segs = BNXT_TX_SLOTS(bs);
2359		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2360		    BUS_DMASYNC_POSTWRITE);
2361		bus_dmamap_unload(sc->sc_dmat, map);
2362		m_freem(bs->bs_m);
2363		bs->bs_m = NULL;
2364
2365		idx += segs;
2366		(*txfree) += segs;
2367		if (idx >= tx->tx_ring.ring_size)
2368			idx -= tx->tx_ring.ring_size;
2369
2370		last = tx->tx_cons;
2371		if (++tx->tx_cons >= tx->tx_ring.ring_size)
2372			tx->tx_cons = 0;
2373
2374	} while (last != txcmpl->opaque);
2375	tx->tx_ring_cons = idx;
2376}
2377
2378/* bnxt_hwrm.c */
2379
2380int
2381bnxt_hwrm_err_map(uint16_t err)
2382{
2383	int rc;
2384
2385	switch (err) {
2386	case HWRM_ERR_CODE_SUCCESS:
2387		return 0;
2388	case HWRM_ERR_CODE_INVALID_PARAMS:
2389	case HWRM_ERR_CODE_INVALID_FLAGS:
2390	case HWRM_ERR_CODE_INVALID_ENABLES:
2391		return EINVAL;
2392	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
2393		return EACCES;
2394	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
2395		return ENOMEM;
2396	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
2397		return ENOSYS;
2398	case HWRM_ERR_CODE_FAIL:
2399		return EIO;
2400	case HWRM_ERR_CODE_HWRM_ERROR:
2401	case HWRM_ERR_CODE_UNKNOWN_ERR:
2402	default:
2403		return EIO;
2404	}
2405
2406	return rc;
2407}
2408
2409void
2410bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
2411    uint16_t req_type)
2412{
2413	struct input *req = request;
2414
2415	req->req_type = htole16(req_type);
2416	req->cmpl_ring = 0xffff;
2417	req->target_id = 0xffff;
2418	req->resp_addr = htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2419}
2420
2421int
2422_hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2423{
2424	struct input *req = msg;
2425	struct hwrm_err_output *resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2426	uint32_t *data = msg;
2427	int i;
2428	uint8_t *valid;
2429	uint16_t err;
2430	uint16_t max_req_len = HWRM_MAX_REQ_LEN;
2431	struct hwrm_short_input short_input = {0};
2432
2433	/* TODO: DMASYNC in here. */
2434	req->seq_id = htole16(softc->sc_cmd_seq++);
2435	memset(resp, 0, PAGE_SIZE);
2436
2437	if (softc->sc_flags & BNXT_FLAG_SHORT_CMD) {
2438		void *short_cmd_req = BNXT_DMA_KVA(softc->sc_cmd_resp);
2439
2440		memcpy(short_cmd_req, req, msg_len);
2441		memset((uint8_t *) short_cmd_req + msg_len, 0,
2442		    softc->sc_max_req_len - msg_len);
2443
2444		short_input.req_type = req->req_type;
2445		short_input.signature =
2446		    htole16(HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
2447		short_input.size = htole16(msg_len);
2448		short_input.req_addr =
2449		    htole64(BNXT_DMA_DVA(softc->sc_cmd_resp));
2450
2451		data = (uint32_t *)&short_input;
2452		msg_len = sizeof(short_input);
2453
2454		/* Sync memory write before updating doorbell */
2455		membar_sync();
2456
2457		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
2458	}
2459
2460	/* Write request msg to hwrm channel */
2461	for (i = 0; i < msg_len; i += 4) {
2462		bus_space_write_4(softc->sc_hwrm_t,
2463				  softc->sc_hwrm_h,
2464				  i, *data);
2465		data++;
2466	}
2467
2468	/* Clear to the end of the request buffer */
2469	for (i = msg_len; i < max_req_len; i += 4)
2470		bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h,
2471		    i, 0);
2472
2473	/* Ring channel doorbell */
2474	bus_space_write_4(softc->sc_hwrm_t, softc->sc_hwrm_h, 0x100,
2475	    htole32(1));
2476
2477	/* Check if response len is updated */
2478	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2479		if (resp->resp_len && resp->resp_len <= 4096)
2480			break;
2481		DELAY(1000);
2482	}
2483	if (i >= softc->sc_cmd_timeo) {
2484		printf("%s: timeout sending %s: (timeout: %u) seq: %d\n",
2485		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2486		    softc->sc_cmd_timeo,
2487		    le16toh(req->seq_id));
2488		return ETIMEDOUT;
2489	}
2490	/* Last byte of resp contains the valid key */
2491	valid = (uint8_t *)resp + resp->resp_len - 1;
2492	for (i = 0; i < softc->sc_cmd_timeo; i++) {
2493		if (*valid == HWRM_RESP_VALID_KEY)
2494			break;
2495		DELAY(1000);
2496	}
2497	if (i >= softc->sc_cmd_timeo) {
2498		printf("%s: timeout sending %s: "
2499		    "(timeout: %u) msg {0x%x 0x%x} len:%d v: %d\n",
2500		    DEVNAME(softc), GET_HWRM_REQ_TYPE(req->req_type),
2501		    softc->sc_cmd_timeo, le16toh(req->req_type),
2502		    le16toh(req->seq_id), msg_len,
2503		    *valid);
2504		return ETIMEDOUT;
2505	}
2506
2507	err = le16toh(resp->error_code);
2508	if (err) {
2509		/* HWRM_ERR_CODE_FAIL is a "normal" error, don't log */
2510		if (err != HWRM_ERR_CODE_FAIL) {
2511			printf("%s: %s command returned %s error.\n",
2512			    DEVNAME(softc),
2513			    GET_HWRM_REQ_TYPE(req->req_type),
2514			    GET_HWRM_ERROR_CODE(err));
2515		}
2516		return bnxt_hwrm_err_map(err);
2517	}
2518
2519	return 0;
2520}
2521
2522
2523int
2524hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
2525{
2526	int rc;
2527
2528	BNXT_HWRM_LOCK(softc);
2529	rc = _hwrm_send_message(softc, msg, msg_len);
2530	BNXT_HWRM_UNLOCK(softc);
2531	return rc;
2532}
2533
2534
2535int
2536bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
2537{
2538	struct hwrm_queue_qportcfg_input req = {0};
2539	struct hwrm_queue_qportcfg_output *resp =
2540	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2541	int	rc = 0;
2542
2543	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
2544
2545	BNXT_HWRM_LOCK(softc);
2546	rc = _hwrm_send_message(softc, &req, sizeof(req));
2547	if (rc)
2548		goto qportcfg_exit;
2549
2550	if (!resp->max_configurable_queues) {
2551		rc = -EINVAL;
2552		goto qportcfg_exit;
2553	}
2554
2555	softc->sc_tx_queue_id = resp->queue_id0;
2556
2557qportcfg_exit:
2558	BNXT_HWRM_UNLOCK(softc);
2559	return rc;
2560}
2561
2562int
2563bnxt_hwrm_ver_get(struct bnxt_softc *softc)
2564{
2565	struct hwrm_ver_get_input	req = {0};
2566	struct hwrm_ver_get_output	*resp =
2567	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2568	int				rc;
2569#if 0
2570	const char nastr[] = "<not installed>";
2571	const char naver[] = "<N/A>";
2572#endif
2573	uint32_t dev_caps_cfg;
2574
2575	softc->sc_max_req_len = HWRM_MAX_REQ_LEN;
2576	softc->sc_cmd_timeo = 1000;
2577	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VER_GET);
2578
2579	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
2580	req.hwrm_intf_min = HWRM_VERSION_MINOR;
2581	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
2582
2583	BNXT_HWRM_LOCK(softc);
2584	rc = _hwrm_send_message(softc, &req, sizeof(req));
2585	if (rc)
2586		goto fail;
2587
2588	printf(": fw ver %d.%d.%d, ", resp->hwrm_fw_maj, resp->hwrm_fw_min,
2589	    resp->hwrm_fw_bld);
2590
2591	softc->sc_hwrm_ver = (resp->hwrm_intf_maj << 16) |
2592	    (resp->hwrm_intf_min << 8) | resp->hwrm_intf_upd;
2593#if 0
2594	snprintf(softc->ver_info->hwrm_if_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2595	    resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
2596	softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj;
2597	softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min;
2598	softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd;
2599	snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
2600	    resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
2601	strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
2602	    BNXT_VERSTR_SIZE);
2603	strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
2604	    BNXT_NAME_SIZE);
2605
2606	if (resp->mgmt_fw_maj == 0 && resp->mgmt_fw_min == 0 &&
2607	    resp->mgmt_fw_bld == 0) {
2608		strlcpy(softc->ver_info->mgmt_fw_ver, naver, BNXT_VERSTR_SIZE);
2609		strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
2610	}
2611	else {
2612		snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
2613		    "%d.%d.%d", resp->mgmt_fw_maj, resp->mgmt_fw_min,
2614		    resp->mgmt_fw_bld);
2615		strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
2616		    BNXT_NAME_SIZE);
2617	}
2618	if (resp->netctrl_fw_maj == 0 && resp->netctrl_fw_min == 0 &&
2619	    resp->netctrl_fw_bld == 0) {
2620		strlcpy(softc->ver_info->netctrl_fw_ver, naver,
2621		    BNXT_VERSTR_SIZE);
2622		strlcpy(softc->ver_info->netctrl_fw_name, nastr,
2623		    BNXT_NAME_SIZE);
2624	}
2625	else {
2626		snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
2627		    "%d.%d.%d", resp->netctrl_fw_maj, resp->netctrl_fw_min,
2628		    resp->netctrl_fw_bld);
2629		strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
2630		    BNXT_NAME_SIZE);
2631	}
2632	if (resp->roce_fw_maj == 0 && resp->roce_fw_min == 0 &&
2633	    resp->roce_fw_bld == 0) {
2634		strlcpy(softc->ver_info->roce_fw_ver, naver, BNXT_VERSTR_SIZE);
2635		strlcpy(softc->ver_info->roce_fw_name, nastr, BNXT_NAME_SIZE);
2636	}
2637	else {
2638		snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
2639		    "%d.%d.%d", resp->roce_fw_maj, resp->roce_fw_min,
2640		    resp->roce_fw_bld);
2641		strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
2642		    BNXT_NAME_SIZE);
2643	}
2644	softc->ver_info->chip_num = le16toh(resp->chip_num);
2645	softc->ver_info->chip_rev = resp->chip_rev;
2646	softc->ver_info->chip_metal = resp->chip_metal;
2647	softc->ver_info->chip_bond_id = resp->chip_bond_id;
2648	softc->ver_info->chip_type = resp->chip_platform_type;
2649#endif
2650
2651	if (resp->max_req_win_len)
2652		softc->sc_max_req_len = le16toh(resp->max_req_win_len);
2653	if (resp->def_req_timeout)
2654		softc->sc_cmd_timeo = le16toh(resp->def_req_timeout);
2655
2656	dev_caps_cfg = le32toh(resp->dev_caps_cfg);
2657	if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
2658	    (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
2659		softc->sc_flags |= BNXT_FLAG_SHORT_CMD;
2660
2661fail:
2662	BNXT_HWRM_UNLOCK(softc);
2663	return rc;
2664}
2665
2666
2667int
2668bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
2669{
2670	struct hwrm_func_drv_rgtr_input req = {0};
2671
2672	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
2673
2674	req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
2675	    HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
2676	req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
2677
2678	req.ver_maj = 6;
2679	req.ver_min = 4;
2680	req.ver_upd = 0;
2681
2682	return hwrm_send_message(softc, &req, sizeof(req));
2683}
2684
2685#if 0
2686
2687int
2688bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown)
2689{
2690	struct hwrm_func_drv_unrgtr_input req = {0};
2691
2692	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_UNRGTR);
2693	if (shutdown == true)
2694		req.flags |=
2695		    HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN;
2696	return hwrm_send_message(softc, &req, sizeof(req));
2697}
2698
2699#endif
2700
2701int
2702bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
2703{
2704	int rc = 0;
2705	struct hwrm_func_qcaps_input req = {0};
2706	struct hwrm_func_qcaps_output *resp =
2707	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2708	/* struct bnxt_func_info *func = &softc->func; */
2709
2710	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
2711	req.fid = htole16(0xffff);
2712
2713	BNXT_HWRM_LOCK(softc);
2714	rc = _hwrm_send_message(softc, &req, sizeof(req));
2715	if (rc)
2716		goto fail;
2717
2718	if (resp->flags &
2719	    htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
2720		softc->sc_flags |= BNXT_FLAG_WOL_CAP;
2721
2722	memcpy(softc->sc_ac.ac_enaddr, resp->mac_address, 6);
2723	/*
2724	func->fw_fid = le16toh(resp->fid);
2725	memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
2726	func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
2727	func->max_cp_rings = le16toh(resp->max_cmpl_rings);
2728	func->max_tx_rings = le16toh(resp->max_tx_rings);
2729	func->max_rx_rings = le16toh(resp->max_rx_rings);
2730	func->max_hw_ring_grps = le32toh(resp->max_hw_ring_grps);
2731	if (!func->max_hw_ring_grps)
2732		func->max_hw_ring_grps = func->max_tx_rings;
2733	func->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
2734	func->max_vnics = le16toh(resp->max_vnics);
2735	func->max_stat_ctxs = le16toh(resp->max_stat_ctx);
2736	if (BNXT_PF(softc)) {
2737		struct bnxt_pf_info *pf = &softc->pf;
2738
2739		pf->port_id = le16toh(resp->port_id);
2740		pf->first_vf_id = le16toh(resp->first_vf_id);
2741		pf->max_vfs = le16toh(resp->max_vfs);
2742		pf->max_encap_records = le32toh(resp->max_encap_records);
2743		pf->max_decap_records = le32toh(resp->max_decap_records);
2744		pf->max_tx_em_flows = le32toh(resp->max_tx_em_flows);
2745		pf->max_tx_wm_flows = le32toh(resp->max_tx_wm_flows);
2746		pf->max_rx_em_flows = le32toh(resp->max_rx_em_flows);
2747		pf->max_rx_wm_flows = le32toh(resp->max_rx_wm_flows);
2748	}
2749	if (!_is_valid_ether_addr(func->mac_addr)) {
2750		device_printf(softc->dev, "Invalid ethernet address, generating random locally administered address\n");
2751		get_random_ether_addr(func->mac_addr);
2752	}
2753	*/
2754
2755fail:
2756	BNXT_HWRM_UNLOCK(softc);
2757	return rc;
2758}
2759
2760
2761int
2762bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
2763{
2764        struct hwrm_func_qcfg_input req = {0};
2765        /* struct hwrm_func_qcfg_output *resp =
2766	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2767	struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg; */
2768        int rc;
2769
2770	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
2771        req.fid = htole16(0xffff);
2772	BNXT_HWRM_LOCK(softc);
2773	rc = _hwrm_send_message(softc, &req, sizeof(req));
2774        if (rc)
2775		goto fail;
2776
2777	/*
2778	fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
2779	fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
2780	fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
2781	fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
2782	*/
2783fail:
2784	BNXT_HWRM_UNLOCK(softc);
2785        return rc;
2786}
2787
2788
2789int
2790bnxt_hwrm_func_reset(struct bnxt_softc *softc)
2791{
2792	struct hwrm_func_reset_input req = {0};
2793
2794	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESET);
2795	req.enables = 0;
2796
2797	return hwrm_send_message(softc, &req, sizeof(req));
2798}
2799
2800int
2801bnxt_hwrm_vnic_cfg_placement(struct bnxt_softc *softc,
2802    struct bnxt_vnic_info *vnic)
2803{
2804	struct hwrm_vnic_plcmodes_cfg_input req = {0};
2805
2806	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
2807
2808	req.flags = htole32(
2809	    HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2810	req.enables = htole32(
2811	    HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2812	req.vnic_id = htole16(vnic->id);
2813	req.jumbo_thresh = htole16(MCLBYTES);
2814
2815	return hwrm_send_message(softc, &req, sizeof(req));
2816}
2817
2818int
2819bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2820{
2821	struct hwrm_vnic_cfg_input req = {0};
2822
2823	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_CFG);
2824
2825	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2826		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
2827	if (vnic->flags & BNXT_VNIC_FLAG_BD_STALL)
2828		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
2829	if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
2830		req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
2831	req.enables = htole32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
2832	    HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
2833	    HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
2834	req.vnic_id = htole16(vnic->id);
2835	req.dflt_ring_grp = htole16(vnic->def_ring_grp);
2836	req.rss_rule = htole16(vnic->rss_id);
2837	req.cos_rule = htole16(vnic->cos_rule);
2838	req.lb_rule = htole16(vnic->lb_rule);
2839	req.mru = htole16(vnic->mru);
2840
2841	return hwrm_send_message(softc, &req, sizeof(req));
2842}
2843
2844int
2845bnxt_hwrm_vnic_alloc(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2846{
2847	struct hwrm_vnic_alloc_input req = {0};
2848	struct hwrm_vnic_alloc_output *resp =
2849	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2850	int rc;
2851
2852	if (vnic->id != (uint16_t)HWRM_NA_SIGNATURE) {
2853		printf("%s: attempt to re-allocate vnic %04x\n",
2854		    DEVNAME(softc), vnic->id);
2855		return EINVAL;
2856	}
2857
2858	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_ALLOC);
2859
2860	if (vnic->flags & BNXT_VNIC_FLAG_DEFAULT)
2861		req.flags = htole32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
2862
2863	BNXT_HWRM_LOCK(softc);
2864	rc = _hwrm_send_message(softc, &req, sizeof(req));
2865	if (rc)
2866		goto fail;
2867
2868	vnic->id = le32toh(resp->vnic_id);
2869
2870fail:
2871	BNXT_HWRM_UNLOCK(softc);
2872	return rc;
2873}
2874
2875int
2876bnxt_hwrm_vnic_free(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
2877{
2878	struct hwrm_vnic_free_input req = {0};
2879	int rc;
2880
2881	if (vnic->id == (uint16_t)HWRM_NA_SIGNATURE) {
2882		printf("%s: attempt to deallocate vnic %04x\n",
2883		    DEVNAME(softc), vnic->id);
2884		return (EINVAL);
2885	}
2886
2887	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_FREE);
2888	req.vnic_id = htole16(vnic->id);
2889
2890	BNXT_HWRM_LOCK(softc);
2891	rc = _hwrm_send_message(softc, &req, sizeof(req));
2892	if (rc == 0)
2893		vnic->id = (uint16_t)HWRM_NA_SIGNATURE;
2894	BNXT_HWRM_UNLOCK(softc);
2895
2896	return (rc);
2897}
2898
2899int
2900bnxt_hwrm_vnic_ctx_alloc(struct bnxt_softc *softc, uint16_t *ctx_id)
2901{
2902	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
2903	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2904	    BNXT_DMA_KVA(softc->sc_cmd_resp);
2905	int rc;
2906
2907	if (*ctx_id != (uint16_t)HWRM_NA_SIGNATURE) {
2908		printf("%s: attempt to re-allocate vnic ctx %04x\n",
2909		    DEVNAME(softc), *ctx_id);
2910		return EINVAL;
2911	}
2912
2913	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
2914
2915	BNXT_HWRM_LOCK(softc);
2916	rc = _hwrm_send_message(softc, &req, sizeof(req));
2917	if (rc)
2918		goto fail;
2919
2920	*ctx_id = letoh16(resp->rss_cos_lb_ctx_id);
2921
2922fail:
2923	BNXT_HWRM_UNLOCK(softc);
2924	return (rc);
2925}
2926
2927int
2928bnxt_hwrm_vnic_ctx_free(struct bnxt_softc *softc, uint16_t *ctx_id)
2929{
2930	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
2931	int rc;
2932
2933	if (*ctx_id == (uint16_t)HWRM_NA_SIGNATURE) {
2934		printf("%s: attempt to deallocate vnic ctx %04x\n",
2935		    DEVNAME(softc), *ctx_id);
2936		return (EINVAL);
2937	}
2938
2939	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE);
2940	req.rss_cos_lb_ctx_id = htole32(*ctx_id);
2941
2942	BNXT_HWRM_LOCK(softc);
2943	rc = _hwrm_send_message(softc, &req, sizeof(req));
2944	if (rc == 0)
2945		*ctx_id = (uint16_t)HWRM_NA_SIGNATURE;
2946	BNXT_HWRM_UNLOCK(softc);
2947	return (rc);
2948}
2949
2950int
2951bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2952{
2953	struct hwrm_ring_grp_alloc_input req = {0};
2954	struct hwrm_ring_grp_alloc_output *resp;
2955	int rc = 0;
2956
2957	if (grp->grp_id != HWRM_NA_SIGNATURE) {
2958		printf("%s: attempt to re-allocate ring group %04x\n",
2959		    DEVNAME(softc), grp->grp_id);
2960		return EINVAL;
2961	}
2962
2963	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
2964	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_ALLOC);
2965	req.cr = htole16(grp->cp_ring_id);
2966	req.rr = htole16(grp->rx_ring_id);
2967	req.ar = htole16(grp->ag_ring_id);
2968	req.sc = htole16(grp->stats_ctx);
2969
2970	BNXT_HWRM_LOCK(softc);
2971	rc = _hwrm_send_message(softc, &req, sizeof(req));
2972	if (rc)
2973		goto fail;
2974
2975	grp->grp_id = letoh32(resp->ring_group_id);
2976
2977fail:
2978	BNXT_HWRM_UNLOCK(softc);
2979	return rc;
2980}
2981
2982int
2983bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
2984{
2985	struct hwrm_ring_grp_free_input req = {0};
2986	int rc = 0;
2987
2988	if (grp->grp_id == HWRM_NA_SIGNATURE) {
2989		printf("%s: attempt to free ring group %04x\n",
2990		    DEVNAME(softc), grp->grp_id);
2991		return EINVAL;
2992	}
2993
2994	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
2995	req.ring_group_id = htole32(grp->grp_id);
2996
2997	BNXT_HWRM_LOCK(softc);
2998	rc = _hwrm_send_message(softc, &req, sizeof(req));
2999	if (rc == 0)
3000		grp->grp_id = HWRM_NA_SIGNATURE;
3001
3002	BNXT_HWRM_UNLOCK(softc);
3003	return (rc);
3004}
3005
3006/*
3007 * Ring allocation message to the firmware
3008 */
3009int
3010bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
3011    struct bnxt_ring *ring, uint16_t cmpl_ring_id, uint32_t stat_ctx_id,
3012    int irq)
3013{
3014	struct hwrm_ring_alloc_input req = {0};
3015	struct hwrm_ring_alloc_output *resp;
3016	int rc;
3017
3018	if (ring->phys_id != (uint16_t)HWRM_NA_SIGNATURE) {
3019		printf("%s: attempt to re-allocate ring %04x\n",
3020		    DEVNAME(softc), ring->phys_id);
3021		return EINVAL;
3022	}
3023
3024	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3025	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_ALLOC);
3026	req.enables = htole32(0);
3027	req.fbo = htole32(0);
3028
3029	if (stat_ctx_id != HWRM_NA_SIGNATURE) {
3030		req.enables |= htole32(
3031		    HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
3032		req.stat_ctx_id = htole32(stat_ctx_id);
3033	}
3034	req.ring_type = type;
3035	req.page_tbl_addr = htole64(ring->paddr);
3036	req.length = htole32(ring->ring_size);
3037	req.logical_id = htole16(ring->id);
3038	req.cmpl_ring_id = htole16(cmpl_ring_id);
3039	req.queue_id = htole16(softc->sc_tx_queue_id);
3040	req.int_mode = (softc->sc_flags & BNXT_FLAG_MSIX) ?
3041	    HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX :
3042	    HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY;
3043	BNXT_HWRM_LOCK(softc);
3044	rc = _hwrm_send_message(softc, &req, sizeof(req));
3045	if (rc)
3046		goto fail;
3047
3048	ring->phys_id = le16toh(resp->ring_id);
3049
3050fail:
3051	BNXT_HWRM_UNLOCK(softc);
3052	return rc;
3053}
3054
3055int
3056bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint8_t type, struct bnxt_ring *ring)
3057{
3058	struct hwrm_ring_free_input req = {0};
3059	int rc;
3060
3061	if (ring->phys_id == (uint16_t)HWRM_NA_SIGNATURE) {
3062		printf("%s: attempt to deallocate ring %04x\n",
3063		    DEVNAME(softc), ring->phys_id);
3064		return (EINVAL);
3065	}
3066
3067	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_FREE);
3068	req.ring_type = type;
3069	req.ring_id = htole16(ring->phys_id);
3070	BNXT_HWRM_LOCK(softc);
3071	rc = _hwrm_send_message(softc, &req, sizeof(req));
3072	if (rc)
3073		goto fail;
3074
3075	ring->phys_id = (uint16_t)HWRM_NA_SIGNATURE;
3076fail:
3077	BNXT_HWRM_UNLOCK(softc);
3078	return (rc);
3079}
3080
3081
3082int
3083bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
3084    uint64_t paddr)
3085{
3086	struct hwrm_stat_ctx_alloc_input req = {0};
3087	struct hwrm_stat_ctx_alloc_output *resp;
3088	int rc = 0;
3089
3090	if (cpr->stats_ctx_id != HWRM_NA_SIGNATURE) {
3091		printf("%s: attempt to re-allocate stats ctx %08x\n",
3092		    DEVNAME(softc), cpr->stats_ctx_id);
3093		return EINVAL;
3094	}
3095
3096	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3097	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_ALLOC);
3098
3099	req.update_period_ms = htole32(1000);
3100	req.stats_dma_addr = htole64(paddr);
3101
3102	BNXT_HWRM_LOCK(softc);
3103	rc = _hwrm_send_message(softc, &req, sizeof(req));
3104	if (rc)
3105		goto fail;
3106
3107	cpr->stats_ctx_id = le32toh(resp->stat_ctx_id);
3108
3109fail:
3110	BNXT_HWRM_UNLOCK(softc);
3111
3112	return rc;
3113}
3114
3115int
3116bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3117{
3118	struct hwrm_stat_ctx_free_input req = {0};
3119	int rc = 0;
3120
3121	if (cpr->stats_ctx_id == HWRM_NA_SIGNATURE) {
3122		printf("%s: attempt to free stats ctx %08x\n",
3123		    DEVNAME(softc), cpr->stats_ctx_id);
3124		return EINVAL;
3125	}
3126
3127	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_STAT_CTX_FREE);
3128	req.stat_ctx_id = htole32(cpr->stats_ctx_id);
3129
3130	BNXT_HWRM_LOCK(softc);
3131	rc = _hwrm_send_message(softc, &req, sizeof(req));
3132	BNXT_HWRM_UNLOCK(softc);
3133
3134	if (rc == 0)
3135		cpr->stats_ctx_id = HWRM_NA_SIGNATURE;
3136
3137	return (rc);
3138}
3139
3140#if 0
3141
3142int
3143bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
3144{
3145	struct hwrm_port_qstats_input req = {0};
3146	int rc = 0;
3147
3148	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS);
3149
3150	req.port_id = htole16(softc->pf.port_id);
3151	req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats.idi_paddr);
3152	req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats.idi_paddr);
3153
3154	BNXT_HWRM_LOCK(softc);
3155	rc = _hwrm_send_message(softc, &req, sizeof(req));
3156	BNXT_HWRM_UNLOCK(softc);
3157
3158	return rc;
3159}
3160
3161#endif
3162
3163int
3164bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt_softc *softc,
3165    uint32_t vnic_id, uint32_t rx_mask, uint64_t mc_addr, uint32_t mc_count)
3166{
3167	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3168
3169	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_SET_RX_MASK);
3170
3171	req.vnic_id = htole32(vnic_id);
3172	req.mask = htole32(rx_mask);
3173	req.mc_tbl_addr = htole64(mc_addr);
3174	req.num_mc_entries = htole32(mc_count);
3175	return hwrm_send_message(softc, &req, sizeof(req));
3176}
3177
3178int
3179bnxt_hwrm_set_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3180{
3181	struct hwrm_cfa_l2_filter_alloc_input	req = {0};
3182	struct hwrm_cfa_l2_filter_alloc_output	*resp;
3183	uint32_t enables = 0;
3184	int rc = 0;
3185
3186	if (vnic->filter_id != -1) {
3187		printf("%s: attempt to re-allocate l2 ctx filter\n",
3188		    DEVNAME(softc));
3189		return EINVAL;
3190	}
3191
3192	resp = BNXT_DMA_KVA(softc->sc_cmd_resp);
3193	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_ALLOC);
3194
3195	req.flags = htole32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
3196	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
3197	enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR
3198	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK
3199	    | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3200	req.enables = htole32(enables);
3201	req.dst_id = htole16(vnic->id);
3202	memcpy(req.l2_addr, softc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
3203	memset(&req.l2_addr_mask, 0xff, sizeof(req.l2_addr_mask));
3204
3205	BNXT_HWRM_LOCK(softc);
3206	rc = _hwrm_send_message(softc, &req, sizeof(req));
3207	if (rc)
3208		goto fail;
3209
3210	vnic->filter_id = le64toh(resp->l2_filter_id);
3211	vnic->flow_id = le64toh(resp->flow_id);
3212
3213fail:
3214	BNXT_HWRM_UNLOCK(softc);
3215	return (rc);
3216}
3217
3218int
3219bnxt_hwrm_free_filter(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
3220{
3221	struct hwrm_cfa_l2_filter_free_input req = {0};
3222	int rc = 0;
3223
3224	if (vnic->filter_id == -1) {
3225		printf("%s: attempt to deallocate filter %llx\n",
3226		     DEVNAME(softc), vnic->filter_id);
3227		return (EINVAL);
3228	}
3229
3230	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_CFA_L2_FILTER_FREE);
3231	req.l2_filter_id = htole64(vnic->filter_id);
3232
3233	BNXT_HWRM_LOCK(softc);
3234	rc = _hwrm_send_message(softc, &req, sizeof(req));
3235	if (rc == 0)
3236		vnic->filter_id = -1;
3237	BNXT_HWRM_UNLOCK(softc);
3238
3239	return (rc);
3240}
3241
3242
3243int
3244bnxt_hwrm_vnic_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
3245    uint32_t hash_type, daddr_t rss_table, daddr_t rss_key)
3246{
3247	struct hwrm_vnic_rss_cfg_input	req = {0};
3248
3249	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
3250
3251	req.hash_type = htole32(hash_type);
3252	req.ring_grp_tbl_addr = htole64(rss_table);
3253	req.hash_key_tbl_addr = htole64(rss_key);
3254	req.rss_ctx_idx = htole16(vnic->rss_id);
3255
3256	return hwrm_send_message(softc, &req, sizeof(req));
3257}
3258
3259int
3260bnxt_cfg_async_cr(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr)
3261{
3262	int rc = 0;
3263
3264	if (1 /* BNXT_PF(softc) */) {
3265		struct hwrm_func_cfg_input req = {0};
3266
3267		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_CFG);
3268
3269		req.fid = htole16(0xffff);
3270		req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3271		req.async_event_cr = htole16(cpr->ring.phys_id);
3272
3273		rc = hwrm_send_message(softc, &req, sizeof(req));
3274	} else {
3275		struct hwrm_func_vf_cfg_input req = {0};
3276
3277		bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_VF_CFG);
3278
3279		req.enables = htole32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3280		req.async_event_cr = htole16(cpr->ring.phys_id);
3281
3282		rc = hwrm_send_message(softc, &req, sizeof(req));
3283	}
3284	return rc;
3285}
3286
3287#if 0
3288
3289void
3290bnxt_validate_hw_lro_settings(struct bnxt_softc *softc)
3291{
3292	softc->hw_lro.enable = min(softc->hw_lro.enable, 1);
3293
3294        softc->hw_lro.is_mode_gro = min(softc->hw_lro.is_mode_gro, 1);
3295
3296	softc->hw_lro.max_agg_segs = min(softc->hw_lro.max_agg_segs,
3297		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX);
3298
3299	softc->hw_lro.max_aggs = min(softc->hw_lro.max_aggs,
3300		HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
3301
3302	softc->hw_lro.min_agg_len = min(softc->hw_lro.min_agg_len, BNXT_MAX_MTU);
3303}
3304
3305int
3306bnxt_hwrm_vnic_tpa_cfg(struct bnxt_softc *softc)
3307{
3308	struct hwrm_vnic_tpa_cfg_input req = {0};
3309	uint32_t flags;
3310
3311	if (softc->vnic_info.id == (uint16_t) HWRM_NA_SIGNATURE) {
3312		return 0;
3313	}
3314
3315	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_TPA_CFG);
3316
3317	if (softc->hw_lro.enable) {
3318		flags = HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
3319			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
3320			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
3321			HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3322
3323        	if (softc->hw_lro.is_mode_gro)
3324			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO;
3325		else
3326			flags |= HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE;
3327
3328		req.flags = htole32(flags);
3329
3330		req.enables = htole32(HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
3331				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
3332				HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
3333
3334		req.max_agg_segs = htole16(softc->hw_lro.max_agg_segs);
3335		req.max_aggs = htole16(softc->hw_lro.max_aggs);
3336		req.min_agg_len = htole32(softc->hw_lro.min_agg_len);
3337	}
3338
3339	req.vnic_id = htole16(softc->vnic_info.id);
3340
3341	return hwrm_send_message(softc, &req, sizeof(req));
3342}
3343
3344
3345int
3346bnxt_hwrm_fw_reset(struct bnxt_softc *softc, uint8_t processor,
3347    uint8_t *selfreset)
3348{
3349	struct hwrm_fw_reset_input req = {0};
3350	struct hwrm_fw_reset_output *resp =
3351	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3352	int rc;
3353
3354	MPASS(selfreset);
3355
3356	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_RESET);
3357	req.embedded_proc_type = processor;
3358	req.selfrst_status = *selfreset;
3359
3360	BNXT_HWRM_LOCK(softc);
3361	rc = _hwrm_send_message(softc, &req, sizeof(req));
3362	if (rc)
3363		goto exit;
3364	*selfreset = resp->selfrst_status;
3365
3366exit:
3367	BNXT_HWRM_UNLOCK(softc);
3368	return rc;
3369}
3370
3371int
3372bnxt_hwrm_fw_qstatus(struct bnxt_softc *softc, uint8_t type, uint8_t *selfreset)
3373{
3374	struct hwrm_fw_qstatus_input req = {0};
3375	struct hwrm_fw_qstatus_output *resp =
3376	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3377	int rc;
3378
3379	MPASS(selfreset);
3380
3381	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_QSTATUS);
3382	req.embedded_proc_type = type;
3383
3384	BNXT_HWRM_LOCK(softc);
3385	rc = _hwrm_send_message(softc, &req, sizeof(req));
3386	if (rc)
3387		goto exit;
3388	*selfreset = resp->selfrst_status;
3389
3390exit:
3391	BNXT_HWRM_UNLOCK(softc);
3392	return rc;
3393}
3394
3395#endif
3396
3397int
3398bnxt_hwrm_nvm_get_dev_info(struct bnxt_softc *softc, uint16_t *mfg_id,
3399    uint16_t *device_id, uint32_t *sector_size, uint32_t *nvram_size,
3400    uint32_t *reserved_size, uint32_t *available_size)
3401{
3402	struct hwrm_nvm_get_dev_info_input req = {0};
3403	struct hwrm_nvm_get_dev_info_output *resp =
3404	    BNXT_DMA_KVA(softc->sc_cmd_resp);
3405	int rc;
3406	uint32_t old_timeo;
3407
3408	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_NVM_GET_DEV_INFO);
3409
3410	BNXT_HWRM_LOCK(softc);
3411	old_timeo = softc->sc_cmd_timeo;
3412	softc->sc_cmd_timeo = BNXT_NVM_TIMEO;
3413	rc = _hwrm_send_message(softc, &req, sizeof(req));
3414	softc->sc_cmd_timeo = old_timeo;
3415	if (rc)
3416		goto exit;
3417
3418	if (mfg_id)
3419		*mfg_id = le16toh(resp->manufacturer_id);
3420	if (device_id)
3421		*device_id = le16toh(resp->device_id);
3422	if (sector_size)
3423		*sector_size = le32toh(resp->sector_size);
3424	if (nvram_size)
3425		*nvram_size = le32toh(resp->nvram_size);
3426	if (reserved_size)
3427		*reserved_size = le32toh(resp->reserved_size);
3428	if (available_size)
3429		*available_size = le32toh(resp->available_size);
3430
3431exit:
3432	BNXT_HWRM_UNLOCK(softc);
3433	return rc;
3434}
3435
3436#if 0
3437
3438int
3439bnxt_hwrm_fw_get_time(struct bnxt_softc *softc, uint16_t *year, uint8_t *month,
3440    uint8_t *day, uint8_t *hour, uint8_t *minute, uint8_t *second,
3441    uint16_t *millisecond, uint16_t *zone)
3442{
3443	struct hwrm_fw_get_time_input req = {0};
3444	struct hwrm_fw_get_time_output *resp =
3445	    (void *)softc->hwrm_cmd_resp.idi_vaddr;
3446	int rc;
3447
3448	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_GET_TIME);
3449
3450	BNXT_HWRM_LOCK(softc);
3451	rc = _hwrm_send_message(softc, &req, sizeof(req));
3452	if (rc)
3453		goto exit;
3454
3455	if (year)
3456		*year = le16toh(resp->year);
3457	if (month)
3458		*month = resp->month;
3459	if (day)
3460		*day = resp->day;
3461	if (hour)
3462		*hour = resp->hour;
3463	if (minute)
3464		*minute = resp->minute;
3465	if (second)
3466		*second = resp->second;
3467	if (millisecond)
3468		*millisecond = le16toh(resp->millisecond);
3469	if (zone)
3470		*zone = le16toh(resp->zone);
3471
3472exit:
3473	BNXT_HWRM_UNLOCK(softc);
3474	return rc;
3475}
3476
3477int
3478bnxt_hwrm_fw_set_time(struct bnxt_softc *softc, uint16_t year, uint8_t month,
3479    uint8_t day, uint8_t hour, uint8_t minute, uint8_t second,
3480    uint16_t millisecond, uint16_t zone)
3481{
3482	struct hwrm_fw_set_time_input req = {0};
3483
3484	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FW_SET_TIME);
3485
3486	req.year = htole16(year);
3487	req.month = month;
3488	req.day = day;
3489	req.hour = hour;
3490	req.minute = minute;
3491	req.second = second;
3492	req.millisecond = htole16(millisecond);
3493	req.zone = htole16(zone);
3494	return hwrm_send_message(softc, &req, sizeof(req));
3495}
3496
3497#endif
3498
3499void
3500_bnxt_hwrm_set_async_event_bit(struct hwrm_func_drv_rgtr_input *req, int bit)
3501{
3502	req->async_event_fwd[bit/32] |= (1 << (bit % 32));
3503}
3504
3505int
3506bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc)
3507{
3508	struct hwrm_func_drv_rgtr_input req = {0};
3509	int events[] = {
3510		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
3511		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
3512		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
3513		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
3514		HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
3515	};
3516	int i;
3517
3518	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
3519
3520	req.enables =
3521		htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
3522
3523	for (i = 0; i < nitems(events); i++)
3524		_bnxt_hwrm_set_async_event_bit(&req, events[i]);
3525
3526	return hwrm_send_message(softc, &req, sizeof(req));
3527}
3528
3529int
3530bnxt_get_sffpage(struct bnxt_softc *softc, struct if_sffpage *sff)
3531{
3532	struct hwrm_port_phy_i2c_read_input req;
3533	struct hwrm_port_phy_i2c_read_output *out;
3534	int offset;
3535
3536	bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_I2C_READ);
3537	req.i2c_slave_addr = sff->sff_addr;
3538	req.page_number = htole16(sff->sff_page);
3539
3540	for (offset = 0; offset < 256; offset += sizeof(out->data)) {
3541		req.page_offset = htole16(offset);
3542		req.data_length = sizeof(out->data);
3543		req.enables = htole32(HWRM_PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET);
3544
3545		if (hwrm_send_message(softc, &req, sizeof(req))) {
3546			printf("%s: failed to read i2c data\n", DEVNAME(softc));
3547			return 1;
3548		}
3549
3550		out = (struct hwrm_port_phy_i2c_read_output *)
3551		    BNXT_DMA_KVA(softc->sc_cmd_resp);
3552		memcpy(sff->sff_data + offset, out->data, sizeof(out->data));
3553	}
3554
3555	return 0;
3556}
3557