t4_main.c revision 259142
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_main.c 259142 2013-12-09 22:40:22Z np $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/conf.h>
36#include <sys/priv.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/module.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/taskqueue.h>
43#include <sys/pciio.h>
44#include <dev/pci/pcireg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pci_private.h>
47#include <sys/firmware.h>
48#include <sys/sbuf.h>
49#include <sys/smp.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_types.h>
56#include <net/if_dl.h>
57#include <net/if_vlan_var.h>
58#if defined(__i386__) || defined(__amd64__)
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#endif
62
63#include "common/common.h"
64#include "common/t4_msg.h"
65#include "common/t4_regs.h"
66#include "common/t4_regs_values.h"
67#include "t4_ioctl.h"
68#include "t4_l2t.h"
69
70/* T4 bus driver interface */
71static int t4_probe(device_t);
72static int t4_attach(device_t);
73static int t4_detach(device_t);
74static device_method_t t4_methods[] = {
75	DEVMETHOD(device_probe,		t4_probe),
76	DEVMETHOD(device_attach,	t4_attach),
77	DEVMETHOD(device_detach,	t4_detach),
78
79	DEVMETHOD_END
80};
81static driver_t t4_driver = {
82	"t4nex",
83	t4_methods,
84	sizeof(struct adapter)
85};
86
87
88/* T4 port (cxgbe) interface */
89static int cxgbe_probe(device_t);
90static int cxgbe_attach(device_t);
91static int cxgbe_detach(device_t);
92static device_method_t cxgbe_methods[] = {
93	DEVMETHOD(device_probe,		cxgbe_probe),
94	DEVMETHOD(device_attach,	cxgbe_attach),
95	DEVMETHOD(device_detach,	cxgbe_detach),
96	{ 0, 0 }
97};
98static driver_t cxgbe_driver = {
99	"cxgbe",
100	cxgbe_methods,
101	sizeof(struct port_info)
102};
103
104static d_ioctl_t t4_ioctl;
105static d_open_t t4_open;
106static d_close_t t4_close;
107
108static struct cdevsw t4_cdevsw = {
109       .d_version = D_VERSION,
110       .d_flags = 0,
111       .d_open = t4_open,
112       .d_close = t4_close,
113       .d_ioctl = t4_ioctl,
114       .d_name = "t4nex",
115};
116
117/* T5 bus driver interface */
118static int t5_probe(device_t);
119static device_method_t t5_methods[] = {
120	DEVMETHOD(device_probe,		t5_probe),
121	DEVMETHOD(device_attach,	t4_attach),
122	DEVMETHOD(device_detach,	t4_detach),
123
124	DEVMETHOD_END
125};
126static driver_t t5_driver = {
127	"t5nex",
128	t5_methods,
129	sizeof(struct adapter)
130};
131
132
133/* T5 port (cxl) interface */
134static driver_t cxl_driver = {
135	"cxl",
136	cxgbe_methods,
137	sizeof(struct port_info)
138};
139
140static struct cdevsw t5_cdevsw = {
141       .d_version = D_VERSION,
142       .d_flags = 0,
143       .d_open = t4_open,
144       .d_close = t4_close,
145       .d_ioctl = t4_ioctl,
146       .d_name = "t5nex",
147};
148
149/* ifnet + media interface */
150static void cxgbe_init(void *);
151static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153static void cxgbe_qflush(struct ifnet *);
154static int cxgbe_media_change(struct ifnet *);
155static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159/*
160 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161 * then ADAPTER_LOCK, then t4_uld_list_lock.
162 */
163static struct sx t4_list_lock;
164static SLIST_HEAD(, adapter) t4_list;
165#ifdef TCP_OFFLOAD
166static struct sx t4_uld_list_lock;
167static SLIST_HEAD(, uld_info) t4_uld_list;
168#endif
169
170/*
171 * Tunables.  See tweak_tunables() too.
172 *
173 * Each tunable is set to a default value here if it's known at compile-time.
174 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175 * provide a reasonable default when the driver is loaded.
176 *
177 * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178 * T5 are under hw.cxl.
179 */
180
181/*
182 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183 */
184#define NTXQ_10G 16
185static int t4_ntxq10g = -1;
186TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188#define NRXQ_10G 8
189static int t4_nrxq10g = -1;
190TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192#define NTXQ_1G 4
193static int t4_ntxq1g = -1;
194TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196#define NRXQ_1G 2
197static int t4_nrxq1g = -1;
198TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200#ifdef TCP_OFFLOAD
201#define NOFLDTXQ_10G 8
202static int t4_nofldtxq10g = -1;
203TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204
205#define NOFLDRXQ_10G 2
206static int t4_nofldrxq10g = -1;
207TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208
209#define NOFLDTXQ_1G 2
210static int t4_nofldtxq1g = -1;
211TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212
213#define NOFLDRXQ_1G 1
214static int t4_nofldrxq1g = -1;
215TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216#endif
217
218/*
219 * Holdoff parameters for 10G and 1G ports.
220 */
221#define TMR_IDX_10G 1
222static int t4_tmr_idx_10g = TMR_IDX_10G;
223TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224
225#define PKTC_IDX_10G (-1)
226static int t4_pktc_idx_10g = PKTC_IDX_10G;
227TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228
229#define TMR_IDX_1G 1
230static int t4_tmr_idx_1g = TMR_IDX_1G;
231TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232
233#define PKTC_IDX_1G (-1)
234static int t4_pktc_idx_1g = PKTC_IDX_1G;
235TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236
237/*
238 * Size (# of entries) of each tx and rx queue.
239 */
240static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242
243static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245
246/*
247 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248 */
249static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251
252/*
253 * Configuration file.
254 */
255#define DEFAULT_CF	"default"
256#define FLASH_CF	"flash"
257#define UWIRE_CF	"uwire"
258#define FPGA_CF		"fpga"
259static char t4_cfg_file[32] = DEFAULT_CF;
260TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261
262/*
263 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264 * encouraged respectively).
265 */
266static unsigned int t4_fw_install = 1;
267TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268
269/*
270 * ASIC features that will be used.  Disable the ones you don't want so that the
271 * chip resources aren't wasted on features that will not be used.
272 */
273static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
274TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275
276static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278
279static int t4_toecaps_allowed = -1;
280TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281
282static int t4_rdmacaps_allowed = 0;
283TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284
285static int t4_iscsicaps_allowed = 0;
286TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287
288static int t4_fcoecaps_allowed = 0;
289TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290
291static int t5_write_combine = 0;
292TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293
294struct intrs_and_queues {
295	int intr_type;		/* INTx, MSI, or MSI-X */
296	int nirq;		/* Number of vectors */
297	int intr_flags;
298	int ntxq10g;		/* # of NIC txq's for each 10G port */
299	int nrxq10g;		/* # of NIC rxq's for each 10G port */
300	int ntxq1g;		/* # of NIC txq's for each 1G port */
301	int nrxq1g;		/* # of NIC rxq's for each 1G port */
302#ifdef TCP_OFFLOAD
303	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
304	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
305	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
306	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
307#endif
308};
309
310struct filter_entry {
311        uint32_t valid:1;	/* filter allocated and valid */
312        uint32_t locked:1;	/* filter is administratively locked */
313        uint32_t pending:1;	/* filter action is pending firmware reply */
314	uint32_t smtidx:8;	/* Source MAC Table index for smac */
315	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
316
317        struct t4_filter_specification fs;
318};
319
320enum {
321	XGMAC_MTU	= (1 << 0),
322	XGMAC_PROMISC	= (1 << 1),
323	XGMAC_ALLMULTI	= (1 << 2),
324	XGMAC_VLANEX	= (1 << 3),
325	XGMAC_UCADDR	= (1 << 4),
326	XGMAC_MCADDRS	= (1 << 5),
327
328	XGMAC_ALL	= 0xffff
329};
330
331static int map_bars_0_and_4(struct adapter *);
332static int map_bar_2(struct adapter *);
333static void setup_memwin(struct adapter *);
334static int validate_mem_range(struct adapter *, uint32_t, int);
335static int fwmtype_to_hwmtype(int);
336static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
337    uint32_t *);
338static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
339static uint32_t position_memwin(struct adapter *, int, uint32_t);
340static int cfg_itype_and_nqueues(struct adapter *, int, int,
341    struct intrs_and_queues *);
342static int prep_firmware(struct adapter *);
343static int partition_resources(struct adapter *, const struct firmware *,
344    const char *);
345static int get_params__pre_init(struct adapter *);
346static int get_params__post_init(struct adapter *);
347static int set_params__post_init(struct adapter *);
348static void t4_set_desc(struct adapter *);
349static void build_medialist(struct port_info *);
350static int update_mac_settings(struct port_info *, int);
351static int cxgbe_init_synchronized(struct port_info *);
352static int cxgbe_uninit_synchronized(struct port_info *);
353static int setup_intr_handlers(struct adapter *);
354static int adapter_full_init(struct adapter *);
355static int adapter_full_uninit(struct adapter *);
356static int port_full_init(struct port_info *);
357static int port_full_uninit(struct port_info *);
358static void quiesce_eq(struct adapter *, struct sge_eq *);
359static void quiesce_iq(struct adapter *, struct sge_iq *);
360static void quiesce_fl(struct adapter *, struct sge_fl *);
361static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
362    driver_intr_t *, void *, char *);
363static int t4_free_irq(struct adapter *, struct irq *);
364static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
365    unsigned int);
366static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
367static void cxgbe_tick(void *);
368static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
369static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
370    struct mbuf *);
371static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
372static int fw_msg_not_handled(struct adapter *, const __be64 *);
373static int t4_sysctls(struct adapter *);
374static int cxgbe_sysctls(struct port_info *);
375static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
376static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
377static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
378static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
379static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
380static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
381static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
382static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
383static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
384#ifdef SBUF_DRAIN
385static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
386static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
387static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
388static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
389static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
390static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
391static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
392static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
393static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
394static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
395static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
396static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
397static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
398static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
399static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
400static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
401static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
402static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
403static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
404static int sysctl_tids(SYSCTL_HANDLER_ARGS);
405static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
406static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
407static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
408static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
409static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
410#endif
411static inline void txq_start(struct ifnet *, struct sge_txq *);
412static uint32_t fconf_to_mode(uint32_t);
413static uint32_t mode_to_fconf(uint32_t);
414static uint32_t fspec_to_fconf(struct t4_filter_specification *);
415static int get_filter_mode(struct adapter *, uint32_t *);
416static int set_filter_mode(struct adapter *, uint32_t);
417static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
418static int get_filter(struct adapter *, struct t4_filter *);
419static int set_filter(struct adapter *, struct t4_filter *);
420static int del_filter(struct adapter *, struct t4_filter *);
421static void clear_filter(struct filter_entry *);
422static int set_filter_wr(struct adapter *, int);
423static int del_filter_wr(struct adapter *, int);
424static int get_sge_context(struct adapter *, struct t4_sge_context *);
425static int load_fw(struct adapter *, struct t4_data *);
426static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
427static int read_i2c(struct adapter *, struct t4_i2c_data *);
428static int set_sched_class(struct adapter *, struct t4_sched_params *);
429static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
430#ifdef TCP_OFFLOAD
431static int toe_capability(struct port_info *, int);
432#endif
433static int mod_event(module_t, int, void *);
434
435struct {
436	uint16_t device;
437	char *desc;
438} t4_pciids[] = {
439	{0xa000, "Chelsio Terminator 4 FPGA"},
440	{0x4400, "Chelsio T440-dbg"},
441	{0x4401, "Chelsio T420-CR"},
442	{0x4402, "Chelsio T422-CR"},
443	{0x4403, "Chelsio T440-CR"},
444	{0x4404, "Chelsio T420-BCH"},
445	{0x4405, "Chelsio T440-BCH"},
446	{0x4406, "Chelsio T440-CH"},
447	{0x4407, "Chelsio T420-SO"},
448	{0x4408, "Chelsio T420-CX"},
449	{0x4409, "Chelsio T420-BT"},
450	{0x440a, "Chelsio T404-BT"},
451	{0x440e, "Chelsio T440-LP-CR"},
452}, t5_pciids[] = {
453	{0xb000, "Chelsio Terminator 5 FPGA"},
454	{0x5400, "Chelsio T580-dbg"},
455	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
456	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
457	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
458	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
459	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
460	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
461	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
462	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
463	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
464	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
465	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
466	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
467#ifdef notyet
468	{0x5404,  "Chelsio T520-BCH"},
469	{0x5405,  "Chelsio T540-BCH"},
470	{0x5406,  "Chelsio T540-CH"},
471	{0x5408,  "Chelsio T520-CX"},
472	{0x540b,  "Chelsio B520-SR"},
473	{0x540c,  "Chelsio B504-BT"},
474	{0x540f,  "Chelsio Amsterdam"},
475	{0x5413,  "Chelsio T580-CHR"},
476#endif
477};
478
479#ifdef TCP_OFFLOAD
480/*
481 * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
482 * exactly the same for both rxq and ofld_rxq.
483 */
484CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
485CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
486#endif
487
488/* No easy way to include t4_msg.h before adapter.h so we check this way */
489CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
490CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
491
492static int
493t4_probe(device_t dev)
494{
495	int i;
496	uint16_t v = pci_get_vendor(dev);
497	uint16_t d = pci_get_device(dev);
498	uint8_t f = pci_get_function(dev);
499
500	if (v != PCI_VENDOR_ID_CHELSIO)
501		return (ENXIO);
502
503	/* Attach only to PF0 of the FPGA */
504	if (d == 0xa000 && f != 0)
505		return (ENXIO);
506
507	for (i = 0; i < nitems(t4_pciids); i++) {
508		if (d == t4_pciids[i].device) {
509			device_set_desc(dev, t4_pciids[i].desc);
510			return (BUS_PROBE_DEFAULT);
511		}
512	}
513
514	return (ENXIO);
515}
516
517static int
518t5_probe(device_t dev)
519{
520	int i;
521	uint16_t v = pci_get_vendor(dev);
522	uint16_t d = pci_get_device(dev);
523	uint8_t f = pci_get_function(dev);
524
525	if (v != PCI_VENDOR_ID_CHELSIO)
526		return (ENXIO);
527
528	/* Attach only to PF0 of the FPGA */
529	if (d == 0xb000 && f != 0)
530		return (ENXIO);
531
532	for (i = 0; i < nitems(t5_pciids); i++) {
533		if (d == t5_pciids[i].device) {
534			device_set_desc(dev, t5_pciids[i].desc);
535			return (BUS_PROBE_DEFAULT);
536		}
537	}
538
539	return (ENXIO);
540}
541
542static int
543t4_attach(device_t dev)
544{
545	struct adapter *sc;
546	int rc = 0, i, n10g, n1g, rqidx, tqidx;
547	struct intrs_and_queues iaq;
548	struct sge *s;
549#ifdef TCP_OFFLOAD
550	int ofld_rqidx, ofld_tqidx;
551#endif
552
553	sc = device_get_softc(dev);
554	sc->dev = dev;
555
556	pci_enable_busmaster(dev);
557	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
558		uint32_t v;
559
560		pci_set_max_read_req(dev, 4096);
561		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
562		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
563		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
564	}
565
566	sc->traceq = -1;
567	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
568	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
569	    device_get_nameunit(dev));
570
571	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
572	    device_get_nameunit(dev));
573	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
574	sx_xlock(&t4_list_lock);
575	SLIST_INSERT_HEAD(&t4_list, sc, link);
576	sx_xunlock(&t4_list_lock);
577
578	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
579	TAILQ_INIT(&sc->sfl);
580	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
581
582	rc = map_bars_0_and_4(sc);
583	if (rc != 0)
584		goto done; /* error message displayed already */
585
586	/*
587	 * This is the real PF# to which we're attaching.  Works from within PCI
588	 * passthrough environments too, where pci_get_function() could return a
589	 * different PF# depending on the passthrough configuration.  We need to
590	 * use the real PF# in all our communication with the firmware.
591	 */
592	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
593	sc->mbox = sc->pf;
594
595	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
596	sc->an_handler = an_not_handled;
597	for (i = 0; i < nitems(sc->cpl_handler); i++)
598		sc->cpl_handler[i] = cpl_not_handled;
599	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
600		sc->fw_msg_handler[i] = fw_msg_not_handled;
601	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
602	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
603	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
604	t4_init_sge_cpl_handlers(sc);
605
606	/* Prepare the adapter for operation */
607	rc = -t4_prep_adapter(sc);
608	if (rc != 0) {
609		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
610		goto done;
611	}
612
613	/*
614	 * Do this really early, with the memory windows set up even before the
615	 * character device.  The userland tool's register i/o and mem read
616	 * will work even in "recovery mode".
617	 */
618	setup_memwin(sc);
619	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
620	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
621	    device_get_nameunit(dev));
622	if (sc->cdev == NULL)
623		device_printf(dev, "failed to create nexus char device.\n");
624	else
625		sc->cdev->si_drv1 = sc;
626
627	/* Go no further if recovery mode has been requested. */
628	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
629		device_printf(dev, "recovery mode.\n");
630		goto done;
631	}
632
633	/* Prepare the firmware for operation */
634	rc = prep_firmware(sc);
635	if (rc != 0)
636		goto done; /* error message displayed already */
637
638	rc = get_params__post_init(sc);
639	if (rc != 0)
640		goto done; /* error message displayed already */
641
642	rc = set_params__post_init(sc);
643	if (rc != 0)
644		goto done; /* error message displayed already */
645
646	rc = map_bar_2(sc);
647	if (rc != 0)
648		goto done; /* error message displayed already */
649
650	rc = t4_create_dma_tag(sc);
651	if (rc != 0)
652		goto done; /* error message displayed already */
653
654	/*
655	 * First pass over all the ports - allocate VIs and initialize some
656	 * basic parameters like mac address, port type, etc.  We also figure
657	 * out whether a port is 10G or 1G and use that information when
658	 * calculating how many interrupts to attempt to allocate.
659	 */
660	n10g = n1g = 0;
661	for_each_port(sc, i) {
662		struct port_info *pi;
663
664		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
665		sc->port[i] = pi;
666
667		/* These must be set before t4_port_init */
668		pi->adapter = sc;
669		pi->port_id = i;
670
671		/* Allocate the vi and initialize parameters like mac addr */
672		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
673		if (rc != 0) {
674			device_printf(dev, "unable to initialize port %d: %d\n",
675			    i, rc);
676			free(pi, M_CXGBE);
677			sc->port[i] = NULL;
678			goto done;
679		}
680
681		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
682		    device_get_nameunit(dev), i);
683		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
684		sc->chan_map[pi->tx_chan] = i;
685
686		if (is_10G_port(pi) || is_40G_port(pi)) {
687			n10g++;
688			pi->tmr_idx = t4_tmr_idx_10g;
689			pi->pktc_idx = t4_pktc_idx_10g;
690		} else {
691			n1g++;
692			pi->tmr_idx = t4_tmr_idx_1g;
693			pi->pktc_idx = t4_pktc_idx_1g;
694		}
695
696		pi->xact_addr_filt = -1;
697		pi->linkdnrc = -1;
698
699		pi->qsize_rxq = t4_qsize_rxq;
700		pi->qsize_txq = t4_qsize_txq;
701
702		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
703		if (pi->dev == NULL) {
704			device_printf(dev,
705			    "failed to add device for port %d.\n", i);
706			rc = ENXIO;
707			goto done;
708		}
709		device_set_softc(pi->dev, pi);
710	}
711
712	/*
713	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
714	 */
715	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
716	if (rc != 0)
717		goto done; /* error message displayed already */
718
719	sc->intr_type = iaq.intr_type;
720	sc->intr_count = iaq.nirq;
721	sc->flags |= iaq.intr_flags;
722
723	s = &sc->sge;
724	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
725	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
726	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
727	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
728	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
729
730#ifdef TCP_OFFLOAD
731	if (is_offload(sc)) {
732
733		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
734		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
735		s->neq += s->nofldtxq + s->nofldrxq;
736		s->niq += s->nofldrxq;
737
738		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
739		    M_CXGBE, M_ZERO | M_WAITOK);
740		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
741		    M_CXGBE, M_ZERO | M_WAITOK);
742	}
743#endif
744
745	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
746	    M_ZERO | M_WAITOK);
747	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
748	    M_ZERO | M_WAITOK);
749	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
750	    M_ZERO | M_WAITOK);
751	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
752	    M_ZERO | M_WAITOK);
753	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
754	    M_ZERO | M_WAITOK);
755
756	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
757	    M_ZERO | M_WAITOK);
758
759	t4_init_l2t(sc, M_WAITOK);
760
761	/*
762	 * Second pass over the ports.  This time we know the number of rx and
763	 * tx queues that each port should get.
764	 */
765	rqidx = tqidx = 0;
766#ifdef TCP_OFFLOAD
767	ofld_rqidx = ofld_tqidx = 0;
768#endif
769	for_each_port(sc, i) {
770		struct port_info *pi = sc->port[i];
771
772		if (pi == NULL)
773			continue;
774
775		pi->first_rxq = rqidx;
776		pi->first_txq = tqidx;
777		if (is_10G_port(pi) || is_40G_port(pi)) {
778			pi->nrxq = iaq.nrxq10g;
779			pi->ntxq = iaq.ntxq10g;
780		} else {
781			pi->nrxq = iaq.nrxq1g;
782			pi->ntxq = iaq.ntxq1g;
783		}
784
785		rqidx += pi->nrxq;
786		tqidx += pi->ntxq;
787
788#ifdef TCP_OFFLOAD
789		if (is_offload(sc)) {
790			pi->first_ofld_rxq = ofld_rqidx;
791			pi->first_ofld_txq = ofld_tqidx;
792			if (is_10G_port(pi) || is_40G_port(pi)) {
793				pi->nofldrxq = iaq.nofldrxq10g;
794				pi->nofldtxq = iaq.nofldtxq10g;
795			} else {
796				pi->nofldrxq = iaq.nofldrxq1g;
797				pi->nofldtxq = iaq.nofldtxq1g;
798			}
799			ofld_rqidx += pi->nofldrxq;
800			ofld_tqidx += pi->nofldtxq;
801		}
802#endif
803	}
804
805	rc = setup_intr_handlers(sc);
806	if (rc != 0) {
807		device_printf(dev,
808		    "failed to setup interrupt handlers: %d\n", rc);
809		goto done;
810	}
811
812	rc = bus_generic_attach(dev);
813	if (rc != 0) {
814		device_printf(dev,
815		    "failed to attach all child ports: %d\n", rc);
816		goto done;
817	}
818
819	device_printf(dev,
820	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
821	    sc->params.pci.width, sc->params.nports, sc->intr_count,
822	    sc->intr_type == INTR_MSIX ? "MSI-X" :
823	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
824	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
825
826	t4_set_desc(sc);
827
828done:
829	if (rc != 0 && sc->cdev) {
830		/* cdev was created and so cxgbetool works; recover that way. */
831		device_printf(dev,
832		    "error during attach, adapter is now in recovery mode.\n");
833		rc = 0;
834	}
835
836	if (rc != 0)
837		t4_detach(dev);
838	else
839		t4_sysctls(sc);
840
841	return (rc);
842}
843
844/*
845 * Idempotent
846 */
847static int
848t4_detach(device_t dev)
849{
850	struct adapter *sc;
851	struct port_info *pi;
852	int i, rc;
853
854	sc = device_get_softc(dev);
855
856	if (sc->flags & FULL_INIT_DONE)
857		t4_intr_disable(sc);
858
859	if (sc->cdev) {
860		destroy_dev(sc->cdev);
861		sc->cdev = NULL;
862	}
863
864	rc = bus_generic_detach(dev);
865	if (rc) {
866		device_printf(dev,
867		    "failed to detach child devices: %d\n", rc);
868		return (rc);
869	}
870
871	for (i = 0; i < sc->intr_count; i++)
872		t4_free_irq(sc, &sc->irq[i]);
873
874	for (i = 0; i < MAX_NPORTS; i++) {
875		pi = sc->port[i];
876		if (pi) {
877			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
878			if (pi->dev)
879				device_delete_child(dev, pi->dev);
880
881			mtx_destroy(&pi->pi_lock);
882			free(pi, M_CXGBE);
883		}
884	}
885
886	if (sc->flags & FULL_INIT_DONE)
887		adapter_full_uninit(sc);
888
889	if (sc->flags & FW_OK)
890		t4_fw_bye(sc, sc->mbox);
891
892	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
893		pci_release_msi(dev);
894
895	if (sc->regs_res)
896		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
897		    sc->regs_res);
898
899	if (sc->udbs_res)
900		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
901		    sc->udbs_res);
902
903	if (sc->msix_res)
904		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
905		    sc->msix_res);
906
907	if (sc->l2t)
908		t4_free_l2t(sc->l2t);
909
910#ifdef TCP_OFFLOAD
911	free(sc->sge.ofld_rxq, M_CXGBE);
912	free(sc->sge.ofld_txq, M_CXGBE);
913#endif
914	free(sc->irq, M_CXGBE);
915	free(sc->sge.rxq, M_CXGBE);
916	free(sc->sge.txq, M_CXGBE);
917	free(sc->sge.ctrlq, M_CXGBE);
918	free(sc->sge.iqmap, M_CXGBE);
919	free(sc->sge.eqmap, M_CXGBE);
920	free(sc->tids.ftid_tab, M_CXGBE);
921	t4_destroy_dma_tag(sc);
922	if (mtx_initialized(&sc->sc_lock)) {
923		sx_xlock(&t4_list_lock);
924		SLIST_REMOVE(&t4_list, sc, adapter, link);
925		sx_xunlock(&t4_list_lock);
926		mtx_destroy(&sc->sc_lock);
927	}
928
929	if (mtx_initialized(&sc->tids.ftid_lock))
930		mtx_destroy(&sc->tids.ftid_lock);
931	if (mtx_initialized(&sc->sfl_lock))
932		mtx_destroy(&sc->sfl_lock);
933	if (mtx_initialized(&sc->ifp_lock))
934		mtx_destroy(&sc->ifp_lock);
935
936	bzero(sc, sizeof(*sc));
937
938	return (0);
939}
940
941
942static int
943cxgbe_probe(device_t dev)
944{
945	char buf[128];
946	struct port_info *pi = device_get_softc(dev);
947
948	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
949	device_set_desc_copy(dev, buf);
950
951	return (BUS_PROBE_DEFAULT);
952}
953
954#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
955    IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
956    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
957#define T4_CAP_ENABLE (T4_CAP)
958
959static int
960cxgbe_attach(device_t dev)
961{
962	struct port_info *pi = device_get_softc(dev);
963	struct ifnet *ifp;
964
965	/* Allocate an ifnet and set it up */
966	ifp = if_alloc(IFT_ETHER);
967	if (ifp == NULL) {
968		device_printf(dev, "Cannot allocate ifnet\n");
969		return (ENOMEM);
970	}
971	pi->ifp = ifp;
972	ifp->if_softc = pi;
973
974	callout_init(&pi->tick, CALLOUT_MPSAFE);
975
976	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
977	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
978
979	ifp->if_init = cxgbe_init;
980	ifp->if_ioctl = cxgbe_ioctl;
981	ifp->if_transmit = cxgbe_transmit;
982	ifp->if_qflush = cxgbe_qflush;
983
984	ifp->if_capabilities = T4_CAP;
985#ifdef TCP_OFFLOAD
986	if (is_offload(pi->adapter))
987		ifp->if_capabilities |= IFCAP_TOE;
988#endif
989	ifp->if_capenable = T4_CAP_ENABLE;
990	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
991	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
992
993	/* Initialize ifmedia for this port */
994	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
995	    cxgbe_media_status);
996	build_medialist(pi);
997
998	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
999	    EVENTHANDLER_PRI_ANY);
1000
1001	ether_ifattach(ifp, pi->hw_addr);
1002
1003#ifdef TCP_OFFLOAD
1004	if (is_offload(pi->adapter)) {
1005		device_printf(dev,
1006		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1007		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1008	} else
1009#endif
1010		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1011
1012	cxgbe_sysctls(pi);
1013
1014	return (0);
1015}
1016
1017static int
1018cxgbe_detach(device_t dev)
1019{
1020	struct port_info *pi = device_get_softc(dev);
1021	struct adapter *sc = pi->adapter;
1022	struct ifnet *ifp = pi->ifp;
1023
1024	/* Tell if_ioctl and if_init that the port is going away */
1025	ADAPTER_LOCK(sc);
1026	SET_DOOMED(pi);
1027	wakeup(&sc->flags);
1028	while (IS_BUSY(sc))
1029		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1030	SET_BUSY(sc);
1031#ifdef INVARIANTS
1032	sc->last_op = "t4detach";
1033	sc->last_op_thr = curthread;
1034#endif
1035	ADAPTER_UNLOCK(sc);
1036
1037	if (pi->flags & HAS_TRACEQ) {
1038		sc->traceq = -1;	/* cloner should not create ifnet */
1039		t4_tracer_port_detach(sc);
1040	}
1041
1042	if (pi->vlan_c)
1043		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1044
1045	PORT_LOCK(pi);
1046	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1047	callout_stop(&pi->tick);
1048	PORT_UNLOCK(pi);
1049	callout_drain(&pi->tick);
1050
1051	/* Let detach proceed even if these fail. */
1052	cxgbe_uninit_synchronized(pi);
1053	port_full_uninit(pi);
1054
1055	ifmedia_removeall(&pi->media);
1056	ether_ifdetach(pi->ifp);
1057	if_free(pi->ifp);
1058
1059	ADAPTER_LOCK(sc);
1060	CLR_BUSY(sc);
1061	wakeup(&sc->flags);
1062	ADAPTER_UNLOCK(sc);
1063
1064	return (0);
1065}
1066
1067static void
1068cxgbe_init(void *arg)
1069{
1070	struct port_info *pi = arg;
1071	struct adapter *sc = pi->adapter;
1072
1073	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1074		return;
1075	cxgbe_init_synchronized(pi);
1076	end_synchronized_op(sc, 0);
1077}
1078
1079static int
1080cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1081{
1082	int rc = 0, mtu, flags;
1083	struct port_info *pi = ifp->if_softc;
1084	struct adapter *sc = pi->adapter;
1085	struct ifreq *ifr = (struct ifreq *)data;
1086	uint32_t mask;
1087
1088	switch (cmd) {
1089	case SIOCSIFMTU:
1090		mtu = ifr->ifr_mtu;
1091		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1092			return (EINVAL);
1093
1094		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1095		if (rc)
1096			return (rc);
1097		ifp->if_mtu = mtu;
1098		if (pi->flags & PORT_INIT_DONE) {
1099			t4_update_fl_bufsize(ifp);
1100			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1101				rc = update_mac_settings(pi, XGMAC_MTU);
1102		}
1103		end_synchronized_op(sc, 0);
1104		break;
1105
1106	case SIOCSIFFLAGS:
1107		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1108		if (rc)
1109			return (rc);
1110
1111		if (ifp->if_flags & IFF_UP) {
1112			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1113				flags = pi->if_flags;
1114				if ((ifp->if_flags ^ flags) &
1115				    (IFF_PROMISC | IFF_ALLMULTI)) {
1116					rc = update_mac_settings(pi,
1117					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1118				}
1119			} else
1120				rc = cxgbe_init_synchronized(pi);
1121			pi->if_flags = ifp->if_flags;
1122		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1123			rc = cxgbe_uninit_synchronized(pi);
1124		end_synchronized_op(sc, 0);
1125		break;
1126
1127	case SIOCADDMULTI:
1128	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1129		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1130		if (rc)
1131			return (rc);
1132		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1133			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1134		end_synchronized_op(sc, LOCK_HELD);
1135		break;
1136
1137	case SIOCSIFCAP:
1138		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1139		if (rc)
1140			return (rc);
1141
1142		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1143		if (mask & IFCAP_TXCSUM) {
1144			ifp->if_capenable ^= IFCAP_TXCSUM;
1145			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1146
1147			if (IFCAP_TSO4 & ifp->if_capenable &&
1148			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1149				ifp->if_capenable &= ~IFCAP_TSO4;
1150				if_printf(ifp,
1151				    "tso4 disabled due to -txcsum.\n");
1152			}
1153		}
1154		if (mask & IFCAP_TXCSUM_IPV6) {
1155			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1156			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1157
1158			if (IFCAP_TSO6 & ifp->if_capenable &&
1159			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1160				ifp->if_capenable &= ~IFCAP_TSO6;
1161				if_printf(ifp,
1162				    "tso6 disabled due to -txcsum6.\n");
1163			}
1164		}
1165		if (mask & IFCAP_RXCSUM)
1166			ifp->if_capenable ^= IFCAP_RXCSUM;
1167		if (mask & IFCAP_RXCSUM_IPV6)
1168			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1169
1170		/*
1171		 * Note that we leave CSUM_TSO alone (it is always set).  The
1172		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1173		 * sending a TSO request our way, so it's sufficient to toggle
1174		 * IFCAP_TSOx only.
1175		 */
1176		if (mask & IFCAP_TSO4) {
1177			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1178			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1179				if_printf(ifp, "enable txcsum first.\n");
1180				rc = EAGAIN;
1181				goto fail;
1182			}
1183			ifp->if_capenable ^= IFCAP_TSO4;
1184		}
1185		if (mask & IFCAP_TSO6) {
1186			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1187			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1188				if_printf(ifp, "enable txcsum6 first.\n");
1189				rc = EAGAIN;
1190				goto fail;
1191			}
1192			ifp->if_capenable ^= IFCAP_TSO6;
1193		}
1194		if (mask & IFCAP_LRO) {
1195#if defined(INET) || defined(INET6)
1196			int i;
1197			struct sge_rxq *rxq;
1198
1199			ifp->if_capenable ^= IFCAP_LRO;
1200			for_each_rxq(pi, i, rxq) {
1201				if (ifp->if_capenable & IFCAP_LRO)
1202					rxq->iq.flags |= IQ_LRO_ENABLED;
1203				else
1204					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1205			}
1206#endif
1207		}
1208#ifdef TCP_OFFLOAD
1209		if (mask & IFCAP_TOE) {
1210			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1211
1212			rc = toe_capability(pi, enable);
1213			if (rc != 0)
1214				goto fail;
1215
1216			ifp->if_capenable ^= mask;
1217		}
1218#endif
1219		if (mask & IFCAP_VLAN_HWTAGGING) {
1220			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1221			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1222				rc = update_mac_settings(pi, XGMAC_VLANEX);
1223		}
1224		if (mask & IFCAP_VLAN_MTU) {
1225			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1226
1227			/* Need to find out how to disable auto-mtu-inflation */
1228		}
1229		if (mask & IFCAP_VLAN_HWTSO)
1230			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1231		if (mask & IFCAP_VLAN_HWCSUM)
1232			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1233
1234#ifdef VLAN_CAPABILITIES
1235		VLAN_CAPABILITIES(ifp);
1236#endif
1237fail:
1238		end_synchronized_op(sc, 0);
1239		break;
1240
1241	case SIOCSIFMEDIA:
1242	case SIOCGIFMEDIA:
1243		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1244		break;
1245
1246	default:
1247		rc = ether_ioctl(ifp, cmd, data);
1248	}
1249
1250	return (rc);
1251}
1252
1253static int
1254cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1255{
1256	struct port_info *pi = ifp->if_softc;
1257	struct adapter *sc = pi->adapter;
1258	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1259	struct buf_ring *br;
1260	int rc;
1261
1262	M_ASSERTPKTHDR(m);
1263
1264	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1265		m_freem(m);
1266		return (ENETDOWN);
1267	}
1268
1269	if (m->m_flags & M_FLOWID)
1270		txq += (m->m_pkthdr.flowid % pi->ntxq);
1271	br = txq->br;
1272
1273	if (TXQ_TRYLOCK(txq) == 0) {
1274		struct sge_eq *eq = &txq->eq;
1275
1276		/*
1277		 * It is possible that t4_eth_tx finishes up and releases the
1278		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1279		 * need to make sure that this mbuf doesn't just sit there in
1280		 * the drbr.
1281		 */
1282
1283		rc = drbr_enqueue(ifp, br, m);
1284		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1285		    !(eq->flags & EQ_DOOMED))
1286			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1287		return (rc);
1288	}
1289
1290	/*
1291	 * txq->m is the mbuf that is held up due to a temporary shortage of
1292	 * resources and it should be put on the wire first.  Then what's in
1293	 * drbr and finally the mbuf that was just passed in to us.
1294	 *
1295	 * Return code should indicate the fate of the mbuf that was passed in
1296	 * this time.
1297	 */
1298
1299	TXQ_LOCK_ASSERT_OWNED(txq);
1300	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1301
1302		/* Queued for transmission. */
1303
1304		rc = drbr_enqueue(ifp, br, m);
1305		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1306		(void) t4_eth_tx(ifp, txq, m);
1307		TXQ_UNLOCK(txq);
1308		return (rc);
1309	}
1310
1311	/* Direct transmission. */
1312	rc = t4_eth_tx(ifp, txq, m);
1313	if (rc != 0 && txq->m)
1314		rc = 0;	/* held, will be transmitted soon (hopefully) */
1315
1316	TXQ_UNLOCK(txq);
1317	return (rc);
1318}
1319
1320static void
1321cxgbe_qflush(struct ifnet *ifp)
1322{
1323	struct port_info *pi = ifp->if_softc;
1324	struct sge_txq *txq;
1325	int i;
1326	struct mbuf *m;
1327
1328	/* queues do not exist if !PORT_INIT_DONE. */
1329	if (pi->flags & PORT_INIT_DONE) {
1330		for_each_txq(pi, i, txq) {
1331			TXQ_LOCK(txq);
1332			m_freem(txq->m);
1333			txq->m = NULL;
1334			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1335				m_freem(m);
1336			TXQ_UNLOCK(txq);
1337		}
1338	}
1339	if_qflush(ifp);
1340}
1341
1342static int
1343cxgbe_media_change(struct ifnet *ifp)
1344{
1345	struct port_info *pi = ifp->if_softc;
1346
1347	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1348
1349	return (EOPNOTSUPP);
1350}
1351
1352static void
1353cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1354{
1355	struct port_info *pi = ifp->if_softc;
1356	struct ifmedia_entry *cur = pi->media.ifm_cur;
1357	int speed = pi->link_cfg.speed;
1358	int data = (pi->port_type << 8) | pi->mod_type;
1359
1360	if (cur->ifm_data != data) {
1361		build_medialist(pi);
1362		cur = pi->media.ifm_cur;
1363	}
1364
1365	ifmr->ifm_status = IFM_AVALID;
1366	if (!pi->link_cfg.link_ok)
1367		return;
1368
1369	ifmr->ifm_status |= IFM_ACTIVE;
1370
1371	/* active and current will differ iff current media is autoselect. */
1372	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1373		return;
1374
1375	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1376	if (speed == SPEED_10000)
1377		ifmr->ifm_active |= IFM_10G_T;
1378	else if (speed == SPEED_1000)
1379		ifmr->ifm_active |= IFM_1000_T;
1380	else if (speed == SPEED_100)
1381		ifmr->ifm_active |= IFM_100_TX;
1382	else if (speed == SPEED_10)
1383		ifmr->ifm_active |= IFM_10_T;
1384	else
1385		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1386			    speed));
1387}
1388
1389void
1390t4_fatal_err(struct adapter *sc)
1391{
1392	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1393	t4_intr_disable(sc);
1394	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1395	    device_get_nameunit(sc->dev));
1396}
1397
1398static int
1399map_bars_0_and_4(struct adapter *sc)
1400{
1401	sc->regs_rid = PCIR_BAR(0);
1402	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1403	    &sc->regs_rid, RF_ACTIVE);
1404	if (sc->regs_res == NULL) {
1405		device_printf(sc->dev, "cannot map registers.\n");
1406		return (ENXIO);
1407	}
1408	sc->bt = rman_get_bustag(sc->regs_res);
1409	sc->bh = rman_get_bushandle(sc->regs_res);
1410	sc->mmio_len = rman_get_size(sc->regs_res);
1411	setbit(&sc->doorbells, DOORBELL_KDB);
1412
1413	sc->msix_rid = PCIR_BAR(4);
1414	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1415	    &sc->msix_rid, RF_ACTIVE);
1416	if (sc->msix_res == NULL) {
1417		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1418		return (ENXIO);
1419	}
1420
1421	return (0);
1422}
1423
1424static int
1425map_bar_2(struct adapter *sc)
1426{
1427
1428	/*
1429	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1430	 * to map it if RDMA is disabled.
1431	 */
1432	if (is_t4(sc) && sc->rdmacaps == 0)
1433		return (0);
1434
1435	sc->udbs_rid = PCIR_BAR(2);
1436	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1437	    &sc->udbs_rid, RF_ACTIVE);
1438	if (sc->udbs_res == NULL) {
1439		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1440		return (ENXIO);
1441	}
1442	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1443
1444	if (is_t5(sc)) {
1445		setbit(&sc->doorbells, DOORBELL_UDB);
1446#if defined(__i386__) || defined(__amd64__)
1447		if (t5_write_combine) {
1448			int rc;
1449
1450			/*
1451			 * Enable write combining on BAR2.  This is the
1452			 * userspace doorbell BAR and is split into 128B
1453			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1454			 * with an egress queue.  The first 64B has the doorbell
1455			 * and the second 64B can be used to submit a tx work
1456			 * request with an implicit doorbell.
1457			 */
1458
1459			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1460			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1461			if (rc == 0) {
1462				clrbit(&sc->doorbells, DOORBELL_UDB);
1463				setbit(&sc->doorbells, DOORBELL_WCWR);
1464				setbit(&sc->doorbells, DOORBELL_UDBWC);
1465			} else {
1466				device_printf(sc->dev,
1467				    "couldn't enable write combining: %d\n",
1468				    rc);
1469			}
1470
1471			t4_write_reg(sc, A_SGE_STAT_CFG,
1472			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1473		}
1474#endif
1475	}
1476
1477	return (0);
1478}
1479
1480static const struct memwin t4_memwin[] = {
1481	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1482	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1483	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1484};
1485
1486static const struct memwin t5_memwin[] = {
1487	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1488	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1489	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1490};
1491
1492static void
1493setup_memwin(struct adapter *sc)
1494{
1495	const struct memwin *mw;
1496	int i, n;
1497	uint32_t bar0;
1498
1499	if (is_t4(sc)) {
1500		/*
1501		 * Read low 32b of bar0 indirectly via the hardware backdoor
1502		 * mechanism.  Works from within PCI passthrough environments
1503		 * too, where rman_get_start() can return a different value.  We
1504		 * need to program the T4 memory window decoders with the actual
1505		 * addresses that will be coming across the PCIe link.
1506		 */
1507		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1508		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1509
1510		mw = &t4_memwin[0];
1511		n = nitems(t4_memwin);
1512	} else {
1513		/* T5 uses the relative offset inside the PCIe BAR */
1514		bar0 = 0;
1515
1516		mw = &t5_memwin[0];
1517		n = nitems(t5_memwin);
1518	}
1519
1520	for (i = 0; i < n; i++, mw++) {
1521		t4_write_reg(sc,
1522		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1523		    (mw->base + bar0) | V_BIR(0) |
1524		    V_WINDOW(ilog2(mw->aperture) - 10));
1525	}
1526
1527	/* flush */
1528	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1529}
1530
1531/*
1532 * Verify that the memory range specified by the addr/len pair is valid and lies
1533 * entirely within a single region (EDCx or MCx).
1534 */
1535static int
1536validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1537{
1538	uint32_t em, addr_len, maddr, mlen;
1539
1540	/* Memory can only be accessed in naturally aligned 4 byte units */
1541	if (addr & 3 || len & 3 || len == 0)
1542		return (EINVAL);
1543
1544	/* Enabled memories */
1545	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1546	if (em & F_EDRAM0_ENABLE) {
1547		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1548		maddr = G_EDRAM0_BASE(addr_len) << 20;
1549		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1550		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1551		    addr + len <= maddr + mlen)
1552			return (0);
1553	}
1554	if (em & F_EDRAM1_ENABLE) {
1555		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1556		maddr = G_EDRAM1_BASE(addr_len) << 20;
1557		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1558		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1559		    addr + len <= maddr + mlen)
1560			return (0);
1561	}
1562	if (em & F_EXT_MEM_ENABLE) {
1563		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1564		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1565		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1566		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1567		    addr + len <= maddr + mlen)
1568			return (0);
1569	}
1570	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1571		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1572		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1573		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1574		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1575		    addr + len <= maddr + mlen)
1576			return (0);
1577	}
1578
1579	return (EFAULT);
1580}
1581
1582static int
1583fwmtype_to_hwmtype(int mtype)
1584{
1585
1586	switch (mtype) {
1587	case FW_MEMTYPE_EDC0:
1588		return (MEM_EDC0);
1589	case FW_MEMTYPE_EDC1:
1590		return (MEM_EDC1);
1591	case FW_MEMTYPE_EXTMEM:
1592		return (MEM_MC0);
1593	case FW_MEMTYPE_EXTMEM1:
1594		return (MEM_MC1);
1595	default:
1596		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1597	}
1598}
1599
1600/*
1601 * Verify that the memory range specified by the memtype/offset/len pair is
1602 * valid and lies entirely within the memtype specified.  The global address of
1603 * the start of the range is returned in addr.
1604 */
1605static int
1606validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1607    uint32_t *addr)
1608{
1609	uint32_t em, addr_len, maddr, mlen;
1610
1611	/* Memory can only be accessed in naturally aligned 4 byte units */
1612	if (off & 3 || len & 3 || len == 0)
1613		return (EINVAL);
1614
1615	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1616	switch (fwmtype_to_hwmtype(mtype)) {
1617	case MEM_EDC0:
1618		if (!(em & F_EDRAM0_ENABLE))
1619			return (EINVAL);
1620		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1621		maddr = G_EDRAM0_BASE(addr_len) << 20;
1622		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1623		break;
1624	case MEM_EDC1:
1625		if (!(em & F_EDRAM1_ENABLE))
1626			return (EINVAL);
1627		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1628		maddr = G_EDRAM1_BASE(addr_len) << 20;
1629		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1630		break;
1631	case MEM_MC:
1632		if (!(em & F_EXT_MEM_ENABLE))
1633			return (EINVAL);
1634		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1635		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1636		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1637		break;
1638	case MEM_MC1:
1639		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1640			return (EINVAL);
1641		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1642		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1643		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1644		break;
1645	default:
1646		return (EINVAL);
1647	}
1648
1649	if (mlen > 0 && off < mlen && off + len <= mlen) {
1650		*addr = maddr + off;	/* global address */
1651		return (0);
1652	}
1653
1654	return (EFAULT);
1655}
1656
1657static void
1658memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1659{
1660	const struct memwin *mw;
1661
1662	if (is_t4(sc)) {
1663		KASSERT(win >= 0 && win < nitems(t4_memwin),
1664		    ("%s: incorrect memwin# (%d)", __func__, win));
1665		mw = &t4_memwin[win];
1666	} else {
1667		KASSERT(win >= 0 && win < nitems(t5_memwin),
1668		    ("%s: incorrect memwin# (%d)", __func__, win));
1669		mw = &t5_memwin[win];
1670	}
1671
1672	if (base != NULL)
1673		*base = mw->base;
1674	if (aperture != NULL)
1675		*aperture = mw->aperture;
1676}
1677
1678/*
1679 * Positions the memory window such that it can be used to access the specified
1680 * address in the chip's address space.  The return value is the offset of addr
1681 * from the start of the window.
1682 */
1683static uint32_t
1684position_memwin(struct adapter *sc, int n, uint32_t addr)
1685{
1686	uint32_t start, pf;
1687	uint32_t reg;
1688
1689	KASSERT(n >= 0 && n <= 3,
1690	    ("%s: invalid window %d.", __func__, n));
1691	KASSERT((addr & 3) == 0,
1692	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1693
1694	if (is_t4(sc)) {
1695		pf = 0;
1696		start = addr & ~0xf;	/* start must be 16B aligned */
1697	} else {
1698		pf = V_PFNUM(sc->pf);
1699		start = addr & ~0x7f;	/* start must be 128B aligned */
1700	}
1701	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1702
1703	t4_write_reg(sc, reg, start | pf);
1704	t4_read_reg(sc, reg);
1705
1706	return (addr - start);
1707}
1708
1709static int
1710cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1711    struct intrs_and_queues *iaq)
1712{
1713	int rc, itype, navail, nrxq10g, nrxq1g, n;
1714	int nofldrxq10g = 0, nofldrxq1g = 0;
1715
1716	bzero(iaq, sizeof(*iaq));
1717
1718	iaq->ntxq10g = t4_ntxq10g;
1719	iaq->ntxq1g = t4_ntxq1g;
1720	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1721	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1722#ifdef TCP_OFFLOAD
1723	if (is_offload(sc)) {
1724		iaq->nofldtxq10g = t4_nofldtxq10g;
1725		iaq->nofldtxq1g = t4_nofldtxq1g;
1726		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1727		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1728	}
1729#endif
1730
1731	for (itype = INTR_MSIX; itype; itype >>= 1) {
1732
1733		if ((itype & t4_intr_types) == 0)
1734			continue;	/* not allowed */
1735
1736		if (itype == INTR_MSIX)
1737			navail = pci_msix_count(sc->dev);
1738		else if (itype == INTR_MSI)
1739			navail = pci_msi_count(sc->dev);
1740		else
1741			navail = 1;
1742restart:
1743		if (navail == 0)
1744			continue;
1745
1746		iaq->intr_type = itype;
1747		iaq->intr_flags = 0;
1748
1749		/*
1750		 * Best option: an interrupt vector for errors, one for the
1751		 * firmware event queue, and one each for each rxq (NIC as well
1752		 * as offload).
1753		 */
1754		iaq->nirq = T4_EXTRA_INTR;
1755		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1756		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1757		if (iaq->nirq <= navail &&
1758		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1759			iaq->intr_flags |= INTR_DIRECT;
1760			goto allocate;
1761		}
1762
1763		/*
1764		 * Second best option: an interrupt vector for errors, one for
1765		 * the firmware event queue, and one each for either NIC or
1766		 * offload rxq's.
1767		 */
1768		iaq->nirq = T4_EXTRA_INTR;
1769		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1770		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1771		if (iaq->nirq <= navail &&
1772		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1773			goto allocate;
1774
1775		/*
1776		 * Next best option: an interrupt vector for errors, one for the
1777		 * firmware event queue, and at least one per port.  At this
1778		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1779		 * what's available to us.
1780		 */
1781		iaq->nirq = T4_EXTRA_INTR;
1782		iaq->nirq += n10g + n1g;
1783		if (iaq->nirq <= navail) {
1784			int leftover = navail - iaq->nirq;
1785
1786			if (n10g > 0) {
1787				int target = max(nrxq10g, nofldrxq10g);
1788
1789				n = 1;
1790				while (n < target && leftover >= n10g) {
1791					leftover -= n10g;
1792					iaq->nirq += n10g;
1793					n++;
1794				}
1795				iaq->nrxq10g = min(n, nrxq10g);
1796#ifdef TCP_OFFLOAD
1797				if (is_offload(sc))
1798					iaq->nofldrxq10g = min(n, nofldrxq10g);
1799#endif
1800			}
1801
1802			if (n1g > 0) {
1803				int target = max(nrxq1g, nofldrxq1g);
1804
1805				n = 1;
1806				while (n < target && leftover >= n1g) {
1807					leftover -= n1g;
1808					iaq->nirq += n1g;
1809					n++;
1810				}
1811				iaq->nrxq1g = min(n, nrxq1g);
1812#ifdef TCP_OFFLOAD
1813				if (is_offload(sc))
1814					iaq->nofldrxq1g = min(n, nofldrxq1g);
1815#endif
1816			}
1817
1818			if (itype != INTR_MSI || powerof2(iaq->nirq))
1819				goto allocate;
1820		}
1821
1822		/*
1823		 * Least desirable option: one interrupt vector for everything.
1824		 */
1825		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1826#ifdef TCP_OFFLOAD
1827		if (is_offload(sc))
1828			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1829#endif
1830
1831allocate:
1832		navail = iaq->nirq;
1833		rc = 0;
1834		if (itype == INTR_MSIX)
1835			rc = pci_alloc_msix(sc->dev, &navail);
1836		else if (itype == INTR_MSI)
1837			rc = pci_alloc_msi(sc->dev, &navail);
1838
1839		if (rc == 0) {
1840			if (navail == iaq->nirq)
1841				return (0);
1842
1843			/*
1844			 * Didn't get the number requested.  Use whatever number
1845			 * the kernel is willing to allocate (it's in navail).
1846			 */
1847			device_printf(sc->dev, "fewer vectors than requested, "
1848			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1849			    itype, iaq->nirq, navail);
1850			pci_release_msi(sc->dev);
1851			goto restart;
1852		}
1853
1854		device_printf(sc->dev,
1855		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1856		    itype, rc, iaq->nirq, navail);
1857	}
1858
1859	device_printf(sc->dev,
1860	    "failed to find a usable interrupt type.  "
1861	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1862	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1863
1864	return (ENXIO);
1865}
1866
1867#define FW_VERSION(chip) ( \
1868    V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1869    V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1870    V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1871    V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1872#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1873
1874struct fw_info {
1875	uint8_t chip;
1876	char *kld_name;
1877	char *fw_mod_name;
1878	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1879} fw_info[] = {
1880	{
1881		.chip = CHELSIO_T4,
1882		.kld_name = "t4fw_cfg",
1883		.fw_mod_name = "t4fw",
1884		.fw_hdr = {
1885			.chip = FW_HDR_CHIP_T4,
1886			.fw_ver = htobe32_const(FW_VERSION(T4)),
1887			.intfver_nic = FW_INTFVER(T4, NIC),
1888			.intfver_vnic = FW_INTFVER(T4, VNIC),
1889			.intfver_ofld = FW_INTFVER(T4, OFLD),
1890			.intfver_ri = FW_INTFVER(T4, RI),
1891			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1892			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1893			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1894			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1895		},
1896	}, {
1897		.chip = CHELSIO_T5,
1898		.kld_name = "t5fw_cfg",
1899		.fw_mod_name = "t5fw",
1900		.fw_hdr = {
1901			.chip = FW_HDR_CHIP_T5,
1902			.fw_ver = htobe32_const(FW_VERSION(T5)),
1903			.intfver_nic = FW_INTFVER(T5, NIC),
1904			.intfver_vnic = FW_INTFVER(T5, VNIC),
1905			.intfver_ofld = FW_INTFVER(T5, OFLD),
1906			.intfver_ri = FW_INTFVER(T5, RI),
1907			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1908			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1909			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1910			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1911		},
1912	}
1913};
1914
1915static struct fw_info *
1916find_fw_info(int chip)
1917{
1918	int i;
1919
1920	for (i = 0; i < nitems(fw_info); i++) {
1921		if (fw_info[i].chip == chip)
1922			return (&fw_info[i]);
1923	}
1924	return (NULL);
1925}
1926
1927/*
1928 * Is the given firmware API compatible with the one the driver was compiled
1929 * with?
1930 */
1931static int
1932fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1933{
1934
1935	/* short circuit if it's the exact same firmware version */
1936	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1937		return (1);
1938
1939	/*
1940	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1941	 * features that are supported in the driver.
1942	 */
1943#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1944	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1945	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1946	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1947		return (1);
1948#undef SAME_INTF
1949
1950	return (0);
1951}
1952
1953/*
1954 * The firmware in the KLD is usable, but should it be installed?  This routine
1955 * explains itself in detail if it indicates the KLD firmware should be
1956 * installed.
1957 */
1958static int
1959should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1960{
1961	const char *reason;
1962
1963	if (!card_fw_usable) {
1964		reason = "incompatible or unusable";
1965		goto install;
1966	}
1967
1968	if (k > c) {
1969		reason = "older than the version bundled with this driver";
1970		goto install;
1971	}
1972
1973	if (t4_fw_install == 2 && k != c) {
1974		reason = "different than the version bundled with this driver";
1975		goto install;
1976	}
1977
1978	return (0);
1979
1980install:
1981	if (t4_fw_install == 0) {
1982		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1983		    "but the driver is prohibited from installing a different "
1984		    "firmware on the card.\n",
1985		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1986		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1987
1988		return (0);
1989	}
1990
1991	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1992	    "installing firmware %u.%u.%u.%u on card.\n",
1993	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1994	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1995	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1996	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1997
1998	return (1);
1999}
2000/*
2001 * Establish contact with the firmware and determine if we are the master driver
2002 * or not, and whether we are responsible for chip initialization.
2003 */
2004static int
2005prep_firmware(struct adapter *sc)
2006{
2007	const struct firmware *fw = NULL, *default_cfg;
2008	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2009	enum dev_state state;
2010	struct fw_info *fw_info;
2011	struct fw_hdr *card_fw;		/* fw on the card */
2012	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2013	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2014					   against */
2015
2016	/* Contact firmware. */
2017	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2018	if (rc < 0 || state == DEV_STATE_ERR) {
2019		rc = -rc;
2020		device_printf(sc->dev,
2021		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2022		return (rc);
2023	}
2024	pf = rc;
2025	if (pf == sc->mbox)
2026		sc->flags |= MASTER_PF;
2027	else if (state == DEV_STATE_UNINIT) {
2028		/*
2029		 * We didn't get to be the master so we definitely won't be
2030		 * configuring the chip.  It's a bug if someone else hasn't
2031		 * configured it already.
2032		 */
2033		device_printf(sc->dev, "couldn't be master(%d), "
2034		    "device not already initialized either(%d).\n", rc, state);
2035		return (EDOOFUS);
2036	}
2037
2038	/* This is the firmware whose headers the driver was compiled against */
2039	fw_info = find_fw_info(chip_id(sc));
2040	if (fw_info == NULL) {
2041		device_printf(sc->dev,
2042		    "unable to look up firmware information for chip %d.\n",
2043		    chip_id(sc));
2044		return (EINVAL);
2045	}
2046	drv_fw = &fw_info->fw_hdr;
2047
2048	/*
2049	 * The firmware KLD contains many modules.  The KLD name is also the
2050	 * name of the module that contains the default config file.
2051	 */
2052	default_cfg = firmware_get(fw_info->kld_name);
2053
2054	/* Read the header of the firmware on the card */
2055	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2056	rc = -t4_read_flash(sc, FLASH_FW_START,
2057	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2058	if (rc == 0)
2059		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2060	else {
2061		device_printf(sc->dev,
2062		    "Unable to read card's firmware header: %d\n", rc);
2063		card_fw_usable = 0;
2064	}
2065
2066	/* This is the firmware in the KLD */
2067	fw = firmware_get(fw_info->fw_mod_name);
2068	if (fw != NULL) {
2069		kld_fw = (const void *)fw->data;
2070		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2071	} else {
2072		kld_fw = NULL;
2073		kld_fw_usable = 0;
2074	}
2075
2076	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2077	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2078		/*
2079		 * Common case: the firmware on the card is an exact match and
2080		 * the KLD is an exact match too, or the KLD is
2081		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2082		 * here -- use cxgbetool loadfw if you want to reinstall the
2083		 * same firmware as the one on the card.
2084		 */
2085	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2086	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2087	    be32toh(card_fw->fw_ver))) {
2088
2089		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2090		if (rc != 0) {
2091			device_printf(sc->dev,
2092			    "failed to install firmware: %d\n", rc);
2093			goto done;
2094		}
2095
2096		/* Installed successfully, update the cached header too. */
2097		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2098		card_fw_usable = 1;
2099		need_fw_reset = 0;	/* already reset as part of load_fw */
2100	}
2101
2102	if (!card_fw_usable) {
2103		uint32_t d, c, k;
2104
2105		d = ntohl(drv_fw->fw_ver);
2106		c = ntohl(card_fw->fw_ver);
2107		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2108
2109		device_printf(sc->dev, "Cannot find a usable firmware: "
2110		    "fw_install %d, chip state %d, "
2111		    "driver compiled with %d.%d.%d.%d, "
2112		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2113		    t4_fw_install, state,
2114		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2115		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2116		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2117		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2118		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2119		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2120		rc = EINVAL;
2121		goto done;
2122	}
2123
2124	/* We're using whatever's on the card and it's known to be good. */
2125	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2126	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2127	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2128	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2129	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2130	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2131	t4_get_tp_version(sc, &sc->params.tp_vers);
2132
2133	/* Reset device */
2134	if (need_fw_reset &&
2135	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2136		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2137		if (rc != ETIMEDOUT && rc != EIO)
2138			t4_fw_bye(sc, sc->mbox);
2139		goto done;
2140	}
2141	sc->flags |= FW_OK;
2142
2143	rc = get_params__pre_init(sc);
2144	if (rc != 0)
2145		goto done; /* error message displayed already */
2146
2147	/* Partition adapter resources as specified in the config file. */
2148	if (state == DEV_STATE_UNINIT) {
2149
2150		KASSERT(sc->flags & MASTER_PF,
2151		    ("%s: trying to change chip settings when not master.",
2152		    __func__));
2153
2154		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2155		if (rc != 0)
2156			goto done;	/* error message displayed already */
2157
2158		t4_tweak_chip_settings(sc);
2159
2160		/* get basic stuff going */
2161		rc = -t4_fw_initialize(sc, sc->mbox);
2162		if (rc != 0) {
2163			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2164			goto done;
2165		}
2166	} else {
2167		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2168		sc->cfcsum = 0;
2169	}
2170
2171done:
2172	free(card_fw, M_CXGBE);
2173	if (fw != NULL)
2174		firmware_put(fw, FIRMWARE_UNLOAD);
2175	if (default_cfg != NULL)
2176		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2177
2178	return (rc);
2179}
2180
2181#define FW_PARAM_DEV(param) \
2182	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2183	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2184#define FW_PARAM_PFVF(param) \
2185	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2186	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2187
2188/*
2189 * Partition chip resources for use between various PFs, VFs, etc.
2190 */
2191static int
2192partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2193    const char *name_prefix)
2194{
2195	const struct firmware *cfg = NULL;
2196	int rc = 0;
2197	struct fw_caps_config_cmd caps;
2198	uint32_t mtype, moff, finicsum, cfcsum;
2199
2200	/*
2201	 * Figure out what configuration file to use.  Pick the default config
2202	 * file for the card if the user hasn't specified one explicitly.
2203	 */
2204	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2205	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2206		/* Card specific overrides go here. */
2207		if (pci_get_device(sc->dev) == 0x440a)
2208			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2209		if (is_fpga(sc))
2210			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2211	}
2212
2213	/*
2214	 * We need to load another module if the profile is anything except
2215	 * "default" or "flash".
2216	 */
2217	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2218	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2219		char s[32];
2220
2221		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2222		cfg = firmware_get(s);
2223		if (cfg == NULL) {
2224			if (default_cfg != NULL) {
2225				device_printf(sc->dev,
2226				    "unable to load module \"%s\" for "
2227				    "configuration profile \"%s\", will use "
2228				    "the default config file instead.\n",
2229				    s, sc->cfg_file);
2230				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2231				    "%s", DEFAULT_CF);
2232			} else {
2233				device_printf(sc->dev,
2234				    "unable to load module \"%s\" for "
2235				    "configuration profile \"%s\", will use "
2236				    "the config file on the card's flash "
2237				    "instead.\n", s, sc->cfg_file);
2238				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2239				    "%s", FLASH_CF);
2240			}
2241		}
2242	}
2243
2244	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2245	    default_cfg == NULL) {
2246		device_printf(sc->dev,
2247		    "default config file not available, will use the config "
2248		    "file on the card's flash instead.\n");
2249		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2250	}
2251
2252	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2253		u_int cflen, i, n;
2254		const uint32_t *cfdata;
2255		uint32_t param, val, addr, off, mw_base, mw_aperture;
2256
2257		KASSERT(cfg != NULL || default_cfg != NULL,
2258		    ("%s: no config to upload", __func__));
2259
2260		/*
2261		 * Ask the firmware where it wants us to upload the config file.
2262		 */
2263		param = FW_PARAM_DEV(CF);
2264		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2265		if (rc != 0) {
2266			/* No support for config file?  Shouldn't happen. */
2267			device_printf(sc->dev,
2268			    "failed to query config file location: %d.\n", rc);
2269			goto done;
2270		}
2271		mtype = G_FW_PARAMS_PARAM_Y(val);
2272		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2273
2274		/*
2275		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2276		 * useless stuffing/comments at the end of the config file so
2277		 * it's ok to simply throw away the last remaining bytes when
2278		 * the config file is not an exact multiple of 4.  This also
2279		 * helps with the validate_mt_off_len check.
2280		 */
2281		if (cfg != NULL) {
2282			cflen = cfg->datasize & ~3;
2283			cfdata = cfg->data;
2284		} else {
2285			cflen = default_cfg->datasize & ~3;
2286			cfdata = default_cfg->data;
2287		}
2288
2289		if (cflen > FLASH_CFG_MAX_SIZE) {
2290			device_printf(sc->dev,
2291			    "config file too long (%d, max allowed is %d).  "
2292			    "Will try to use the config on the card, if any.\n",
2293			    cflen, FLASH_CFG_MAX_SIZE);
2294			goto use_config_on_flash;
2295		}
2296
2297		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2298		if (rc != 0) {
2299			device_printf(sc->dev,
2300			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2301			    "Will try to use the config on the card, if any.\n",
2302			    __func__, mtype, moff, cflen, rc);
2303			goto use_config_on_flash;
2304		}
2305
2306		memwin_info(sc, 2, &mw_base, &mw_aperture);
2307		while (cflen) {
2308			off = position_memwin(sc, 2, addr);
2309			n = min(cflen, mw_aperture - off);
2310			for (i = 0; i < n; i += 4)
2311				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2312			cflen -= n;
2313			addr += n;
2314		}
2315	} else {
2316use_config_on_flash:
2317		mtype = FW_MEMTYPE_FLASH;
2318		moff = t4_flash_cfg_addr(sc);
2319	}
2320
2321	bzero(&caps, sizeof(caps));
2322	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2323	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2324	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2325	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2326	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2327	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2328	if (rc != 0) {
2329		device_printf(sc->dev,
2330		    "failed to pre-process config file: %d "
2331		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2332		goto done;
2333	}
2334
2335	finicsum = be32toh(caps.finicsum);
2336	cfcsum = be32toh(caps.cfcsum);
2337	if (finicsum != cfcsum) {
2338		device_printf(sc->dev,
2339		    "WARNING: config file checksum mismatch: %08x %08x\n",
2340		    finicsum, cfcsum);
2341	}
2342	sc->cfcsum = cfcsum;
2343
2344#define LIMIT_CAPS(x) do { \
2345	caps.x &= htobe16(t4_##x##_allowed); \
2346	sc->x = htobe16(caps.x); \
2347} while (0)
2348
2349	/*
2350	 * Let the firmware know what features will (not) be used so it can tune
2351	 * things accordingly.
2352	 */
2353	LIMIT_CAPS(linkcaps);
2354	LIMIT_CAPS(niccaps);
2355	LIMIT_CAPS(toecaps);
2356	LIMIT_CAPS(rdmacaps);
2357	LIMIT_CAPS(iscsicaps);
2358	LIMIT_CAPS(fcoecaps);
2359#undef LIMIT_CAPS
2360
2361	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2362	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2363	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2364	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2365	if (rc != 0) {
2366		device_printf(sc->dev,
2367		    "failed to process config file: %d.\n", rc);
2368	}
2369done:
2370	if (cfg != NULL)
2371		firmware_put(cfg, FIRMWARE_UNLOAD);
2372	return (rc);
2373}
2374
2375/*
2376 * Retrieve parameters that are needed (or nice to have) very early.
2377 */
2378static int
2379get_params__pre_init(struct adapter *sc)
2380{
2381	int rc;
2382	uint32_t param[2], val[2];
2383	struct fw_devlog_cmd cmd;
2384	struct devlog_params *dlog = &sc->params.devlog;
2385
2386	param[0] = FW_PARAM_DEV(PORTVEC);
2387	param[1] = FW_PARAM_DEV(CCLK);
2388	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2389	if (rc != 0) {
2390		device_printf(sc->dev,
2391		    "failed to query parameters (pre_init): %d.\n", rc);
2392		return (rc);
2393	}
2394
2395	sc->params.portvec = val[0];
2396	sc->params.nports = bitcount32(val[0]);
2397	sc->params.vpd.cclk = val[1];
2398
2399	/* Read device log parameters. */
2400	bzero(&cmd, sizeof(cmd));
2401	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2402	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2403	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2404	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2405	if (rc != 0) {
2406		device_printf(sc->dev,
2407		    "failed to get devlog parameters: %d.\n", rc);
2408		bzero(dlog, sizeof (*dlog));
2409		rc = 0;	/* devlog isn't critical for device operation */
2410	} else {
2411		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2412		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2413		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2414		dlog->size = be32toh(cmd.memsize_devlog);
2415	}
2416
2417	return (rc);
2418}
2419
2420/*
2421 * Retrieve various parameters that are of interest to the driver.  The device
2422 * has been initialized by the firmware at this point.
2423 */
2424static int
2425get_params__post_init(struct adapter *sc)
2426{
2427	int rc;
2428	uint32_t param[7], val[7];
2429	struct fw_caps_config_cmd caps;
2430
2431	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2432	param[1] = FW_PARAM_PFVF(EQ_START);
2433	param[2] = FW_PARAM_PFVF(FILTER_START);
2434	param[3] = FW_PARAM_PFVF(FILTER_END);
2435	param[4] = FW_PARAM_PFVF(L2T_START);
2436	param[5] = FW_PARAM_PFVF(L2T_END);
2437	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2438	if (rc != 0) {
2439		device_printf(sc->dev,
2440		    "failed to query parameters (post_init): %d.\n", rc);
2441		return (rc);
2442	}
2443
2444	sc->sge.iq_start = val[0];
2445	sc->sge.eq_start = val[1];
2446	sc->tids.ftid_base = val[2];
2447	sc->tids.nftids = val[3] - val[2] + 1;
2448	sc->vres.l2t.start = val[4];
2449	sc->vres.l2t.size = val[5] - val[4] + 1;
2450	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2451	    ("%s: L2 table size (%u) larger than expected (%u)",
2452	    __func__, sc->vres.l2t.size, L2T_SIZE));
2453
2454	/* get capabilites */
2455	bzero(&caps, sizeof(caps));
2456	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2457	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2458	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2459	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2460	if (rc != 0) {
2461		device_printf(sc->dev,
2462		    "failed to get card capabilities: %d.\n", rc);
2463		return (rc);
2464	}
2465
2466	if (caps.toecaps) {
2467		/* query offload-related parameters */
2468		param[0] = FW_PARAM_DEV(NTID);
2469		param[1] = FW_PARAM_PFVF(SERVER_START);
2470		param[2] = FW_PARAM_PFVF(SERVER_END);
2471		param[3] = FW_PARAM_PFVF(TDDP_START);
2472		param[4] = FW_PARAM_PFVF(TDDP_END);
2473		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2474		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2475		if (rc != 0) {
2476			device_printf(sc->dev,
2477			    "failed to query TOE parameters: %d.\n", rc);
2478			return (rc);
2479		}
2480		sc->tids.ntids = val[0];
2481		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2482		sc->tids.stid_base = val[1];
2483		sc->tids.nstids = val[2] - val[1] + 1;
2484		sc->vres.ddp.start = val[3];
2485		sc->vres.ddp.size = val[4] - val[3] + 1;
2486		sc->params.ofldq_wr_cred = val[5];
2487		sc->params.offload = 1;
2488	}
2489	if (caps.rdmacaps) {
2490		param[0] = FW_PARAM_PFVF(STAG_START);
2491		param[1] = FW_PARAM_PFVF(STAG_END);
2492		param[2] = FW_PARAM_PFVF(RQ_START);
2493		param[3] = FW_PARAM_PFVF(RQ_END);
2494		param[4] = FW_PARAM_PFVF(PBL_START);
2495		param[5] = FW_PARAM_PFVF(PBL_END);
2496		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2497		if (rc != 0) {
2498			device_printf(sc->dev,
2499			    "failed to query RDMA parameters(1): %d.\n", rc);
2500			return (rc);
2501		}
2502		sc->vres.stag.start = val[0];
2503		sc->vres.stag.size = val[1] - val[0] + 1;
2504		sc->vres.rq.start = val[2];
2505		sc->vres.rq.size = val[3] - val[2] + 1;
2506		sc->vres.pbl.start = val[4];
2507		sc->vres.pbl.size = val[5] - val[4] + 1;
2508
2509		param[0] = FW_PARAM_PFVF(SQRQ_START);
2510		param[1] = FW_PARAM_PFVF(SQRQ_END);
2511		param[2] = FW_PARAM_PFVF(CQ_START);
2512		param[3] = FW_PARAM_PFVF(CQ_END);
2513		param[4] = FW_PARAM_PFVF(OCQ_START);
2514		param[5] = FW_PARAM_PFVF(OCQ_END);
2515		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2516		if (rc != 0) {
2517			device_printf(sc->dev,
2518			    "failed to query RDMA parameters(2): %d.\n", rc);
2519			return (rc);
2520		}
2521		sc->vres.qp.start = val[0];
2522		sc->vres.qp.size = val[1] - val[0] + 1;
2523		sc->vres.cq.start = val[2];
2524		sc->vres.cq.size = val[3] - val[2] + 1;
2525		sc->vres.ocq.start = val[4];
2526		sc->vres.ocq.size = val[5] - val[4] + 1;
2527	}
2528	if (caps.iscsicaps) {
2529		param[0] = FW_PARAM_PFVF(ISCSI_START);
2530		param[1] = FW_PARAM_PFVF(ISCSI_END);
2531		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2532		if (rc != 0) {
2533			device_printf(sc->dev,
2534			    "failed to query iSCSI parameters: %d.\n", rc);
2535			return (rc);
2536		}
2537		sc->vres.iscsi.start = val[0];
2538		sc->vres.iscsi.size = val[1] - val[0] + 1;
2539	}
2540
2541	/*
2542	 * We've got the params we wanted to query via the firmware.  Now grab
2543	 * some others directly from the chip.
2544	 */
2545	rc = t4_read_chip_settings(sc);
2546
2547	return (rc);
2548}
2549
2550static int
2551set_params__post_init(struct adapter *sc)
2552{
2553	uint32_t param, val;
2554
2555	/* ask for encapsulated CPLs */
2556	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2557	val = 1;
2558	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2559
2560	return (0);
2561}
2562
2563#undef FW_PARAM_PFVF
2564#undef FW_PARAM_DEV
2565
2566static void
2567t4_set_desc(struct adapter *sc)
2568{
2569	char buf[128];
2570	struct adapter_params *p = &sc->params;
2571
2572	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2573	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2574	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2575
2576	device_set_desc_copy(sc->dev, buf);
2577}
2578
2579static void
2580build_medialist(struct port_info *pi)
2581{
2582	struct ifmedia *media = &pi->media;
2583	int data, m;
2584
2585	PORT_LOCK(pi);
2586
2587	ifmedia_removeall(media);
2588
2589	m = IFM_ETHER | IFM_FDX;
2590	data = (pi->port_type << 8) | pi->mod_type;
2591
2592	switch(pi->port_type) {
2593	case FW_PORT_TYPE_BT_XFI:
2594		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2595		break;
2596
2597	case FW_PORT_TYPE_BT_XAUI:
2598		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2599		/* fall through */
2600
2601	case FW_PORT_TYPE_BT_SGMII:
2602		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2603		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2604		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2605		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2606		break;
2607
2608	case FW_PORT_TYPE_CX4:
2609		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2610		ifmedia_set(media, m | IFM_10G_CX4);
2611		break;
2612
2613	case FW_PORT_TYPE_SFP:
2614	case FW_PORT_TYPE_FIBER_XFI:
2615	case FW_PORT_TYPE_FIBER_XAUI:
2616		switch (pi->mod_type) {
2617
2618		case FW_PORT_MOD_TYPE_LR:
2619			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2620			ifmedia_set(media, m | IFM_10G_LR);
2621			break;
2622
2623		case FW_PORT_MOD_TYPE_SR:
2624			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2625			ifmedia_set(media, m | IFM_10G_SR);
2626			break;
2627
2628		case FW_PORT_MOD_TYPE_LRM:
2629			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2630			ifmedia_set(media, m | IFM_10G_LRM);
2631			break;
2632
2633		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2634		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2635			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2636			ifmedia_set(media, m | IFM_10G_TWINAX);
2637			break;
2638
2639		case FW_PORT_MOD_TYPE_NONE:
2640			m &= ~IFM_FDX;
2641			ifmedia_add(media, m | IFM_NONE, data, NULL);
2642			ifmedia_set(media, m | IFM_NONE);
2643			break;
2644
2645		case FW_PORT_MOD_TYPE_NA:
2646		case FW_PORT_MOD_TYPE_ER:
2647		default:
2648			device_printf(pi->dev,
2649			    "unknown port_type (%d), mod_type (%d)\n",
2650			    pi->port_type, pi->mod_type);
2651			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2652			ifmedia_set(media, m | IFM_UNKNOWN);
2653			break;
2654		}
2655		break;
2656
2657	case FW_PORT_TYPE_QSFP:
2658		switch (pi->mod_type) {
2659
2660		case FW_PORT_MOD_TYPE_LR:
2661			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2662			ifmedia_set(media, m | IFM_40G_LR4);
2663			break;
2664
2665		case FW_PORT_MOD_TYPE_SR:
2666			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2667			ifmedia_set(media, m | IFM_40G_SR4);
2668			break;
2669
2670		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2671		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2672			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2673			ifmedia_set(media, m | IFM_40G_CR4);
2674			break;
2675
2676		case FW_PORT_MOD_TYPE_NONE:
2677			m &= ~IFM_FDX;
2678			ifmedia_add(media, m | IFM_NONE, data, NULL);
2679			ifmedia_set(media, m | IFM_NONE);
2680			break;
2681
2682		default:
2683			device_printf(pi->dev,
2684			    "unknown port_type (%d), mod_type (%d)\n",
2685			    pi->port_type, pi->mod_type);
2686			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2687			ifmedia_set(media, m | IFM_UNKNOWN);
2688			break;
2689		}
2690		break;
2691
2692	default:
2693		device_printf(pi->dev,
2694		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2695		    pi->mod_type);
2696		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2697		ifmedia_set(media, m | IFM_UNKNOWN);
2698		break;
2699	}
2700
2701	PORT_UNLOCK(pi);
2702}
2703
2704#define FW_MAC_EXACT_CHUNK	7
2705
2706/*
2707 * Program the port's XGMAC based on parameters in ifnet.  The caller also
2708 * indicates which parameters should be programmed (the rest are left alone).
2709 */
2710static int
2711update_mac_settings(struct port_info *pi, int flags)
2712{
2713	int rc;
2714	struct ifnet *ifp = pi->ifp;
2715	struct adapter *sc = pi->adapter;
2716	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2717
2718	ASSERT_SYNCHRONIZED_OP(sc);
2719	KASSERT(flags, ("%s: not told what to update.", __func__));
2720
2721	if (flags & XGMAC_MTU)
2722		mtu = ifp->if_mtu;
2723
2724	if (flags & XGMAC_PROMISC)
2725		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2726
2727	if (flags & XGMAC_ALLMULTI)
2728		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2729
2730	if (flags & XGMAC_VLANEX)
2731		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2732
2733	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2734	    vlanex, false);
2735	if (rc) {
2736		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2737		return (rc);
2738	}
2739
2740	if (flags & XGMAC_UCADDR) {
2741		uint8_t ucaddr[ETHER_ADDR_LEN];
2742
2743		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2744		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2745		    ucaddr, true, true);
2746		if (rc < 0) {
2747			rc = -rc;
2748			if_printf(ifp, "change_mac failed: %d\n", rc);
2749			return (rc);
2750		} else {
2751			pi->xact_addr_filt = rc;
2752			rc = 0;
2753		}
2754	}
2755
2756	if (flags & XGMAC_MCADDRS) {
2757		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2758		int del = 1;
2759		uint64_t hash = 0;
2760		struct ifmultiaddr *ifma;
2761		int i = 0, j;
2762
2763		if_maddr_rlock(ifp);
2764		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2765			if (ifma->ifma_addr->sa_family != AF_LINK)
2766				continue;
2767			mcaddr[i++] =
2768			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2769
2770			if (i == FW_MAC_EXACT_CHUNK) {
2771				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2772				    del, i, mcaddr, NULL, &hash, 0);
2773				if (rc < 0) {
2774					rc = -rc;
2775					for (j = 0; j < i; j++) {
2776						if_printf(ifp,
2777						    "failed to add mc address"
2778						    " %02x:%02x:%02x:"
2779						    "%02x:%02x:%02x rc=%d\n",
2780						    mcaddr[j][0], mcaddr[j][1],
2781						    mcaddr[j][2], mcaddr[j][3],
2782						    mcaddr[j][4], mcaddr[j][5],
2783						    rc);
2784					}
2785					goto mcfail;
2786				}
2787				del = 0;
2788				i = 0;
2789			}
2790		}
2791		if (i > 0) {
2792			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2793			    del, i, mcaddr, NULL, &hash, 0);
2794			if (rc < 0) {
2795				rc = -rc;
2796				for (j = 0; j < i; j++) {
2797					if_printf(ifp,
2798					    "failed to add mc address"
2799					    " %02x:%02x:%02x:"
2800					    "%02x:%02x:%02x rc=%d\n",
2801					    mcaddr[j][0], mcaddr[j][1],
2802					    mcaddr[j][2], mcaddr[j][3],
2803					    mcaddr[j][4], mcaddr[j][5],
2804					    rc);
2805				}
2806				goto mcfail;
2807			}
2808		}
2809
2810		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2811		if (rc != 0)
2812			if_printf(ifp, "failed to set mc address hash: %d", rc);
2813mcfail:
2814		if_maddr_runlock(ifp);
2815	}
2816
2817	return (rc);
2818}
2819
2820int
2821begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2822    char *wmesg)
2823{
2824	int rc, pri;
2825
2826#ifdef WITNESS
2827	/* the caller thinks it's ok to sleep, but is it really? */
2828	if (flags & SLEEP_OK)
2829		pause("t4slptst", 1);
2830#endif
2831
2832	if (INTR_OK)
2833		pri = PCATCH;
2834	else
2835		pri = 0;
2836
2837	ADAPTER_LOCK(sc);
2838	for (;;) {
2839
2840		if (pi && IS_DOOMED(pi)) {
2841			rc = ENXIO;
2842			goto done;
2843		}
2844
2845		if (!IS_BUSY(sc)) {
2846			rc = 0;
2847			break;
2848		}
2849
2850		if (!(flags & SLEEP_OK)) {
2851			rc = EBUSY;
2852			goto done;
2853		}
2854
2855		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2856			rc = EINTR;
2857			goto done;
2858		}
2859	}
2860
2861	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2862	SET_BUSY(sc);
2863#ifdef INVARIANTS
2864	sc->last_op = wmesg;
2865	sc->last_op_thr = curthread;
2866#endif
2867
2868done:
2869	if (!(flags & HOLD_LOCK) || rc)
2870		ADAPTER_UNLOCK(sc);
2871
2872	return (rc);
2873}
2874
2875void
2876end_synchronized_op(struct adapter *sc, int flags)
2877{
2878
2879	if (flags & LOCK_HELD)
2880		ADAPTER_LOCK_ASSERT_OWNED(sc);
2881	else
2882		ADAPTER_LOCK(sc);
2883
2884	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2885	CLR_BUSY(sc);
2886	wakeup(&sc->flags);
2887	ADAPTER_UNLOCK(sc);
2888}
2889
2890static int
2891cxgbe_init_synchronized(struct port_info *pi)
2892{
2893	struct adapter *sc = pi->adapter;
2894	struct ifnet *ifp = pi->ifp;
2895	int rc = 0;
2896
2897	ASSERT_SYNCHRONIZED_OP(sc);
2898
2899	if (isset(&sc->open_device_map, pi->port_id)) {
2900		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2901		    ("mismatch between open_device_map and if_drv_flags"));
2902		return (0);	/* already running */
2903	}
2904
2905	if (!(sc->flags & FULL_INIT_DONE) &&
2906	    ((rc = adapter_full_init(sc)) != 0))
2907		return (rc);	/* error message displayed already */
2908
2909	if (!(pi->flags & PORT_INIT_DONE) &&
2910	    ((rc = port_full_init(pi)) != 0))
2911		return (rc); /* error message displayed already */
2912
2913	rc = update_mac_settings(pi, XGMAC_ALL);
2914	if (rc)
2915		goto done;	/* error message displayed already */
2916
2917	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2918	if (rc != 0) {
2919		if_printf(ifp, "start_link failed: %d\n", rc);
2920		goto done;
2921	}
2922
2923	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2924	if (rc != 0) {
2925		if_printf(ifp, "enable_vi failed: %d\n", rc);
2926		goto done;
2927	}
2928
2929	/*
2930	 * The first iq of the first port to come up is used for tracing.
2931	 */
2932	if (sc->traceq < 0) {
2933		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2934		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2935		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2936		    V_QUEUENUMBER(sc->traceq));
2937		pi->flags |= HAS_TRACEQ;
2938	}
2939
2940	/* all ok */
2941	setbit(&sc->open_device_map, pi->port_id);
2942	PORT_LOCK(pi);
2943	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2944	PORT_UNLOCK(pi);
2945
2946	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2947done:
2948	if (rc != 0)
2949		cxgbe_uninit_synchronized(pi);
2950
2951	return (rc);
2952}
2953
2954/*
2955 * Idempotent.
2956 */
2957static int
2958cxgbe_uninit_synchronized(struct port_info *pi)
2959{
2960	struct adapter *sc = pi->adapter;
2961	struct ifnet *ifp = pi->ifp;
2962	int rc;
2963
2964	ASSERT_SYNCHRONIZED_OP(sc);
2965
2966	/*
2967	 * Disable the VI so that all its data in either direction is discarded
2968	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2969	 * tick) intact as the TP can deliver negative advice or data that it's
2970	 * holding in its RAM (for an offloaded connection) even after the VI is
2971	 * disabled.
2972	 */
2973	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2974	if (rc) {
2975		if_printf(ifp, "disable_vi failed: %d\n", rc);
2976		return (rc);
2977	}
2978
2979	clrbit(&sc->open_device_map, pi->port_id);
2980	PORT_LOCK(pi);
2981	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2982	PORT_UNLOCK(pi);
2983
2984	pi->link_cfg.link_ok = 0;
2985	pi->link_cfg.speed = 0;
2986	pi->linkdnrc = -1;
2987	t4_os_link_changed(sc, pi->port_id, 0, -1);
2988
2989	return (0);
2990}
2991
2992/*
2993 * It is ok for this function to fail midway and return right away.  t4_detach
2994 * will walk the entire sc->irq list and clean up whatever is valid.
2995 */
2996static int
2997setup_intr_handlers(struct adapter *sc)
2998{
2999	int rc, rid, p, q;
3000	char s[8];
3001	struct irq *irq;
3002	struct port_info *pi;
3003	struct sge_rxq *rxq;
3004#ifdef TCP_OFFLOAD
3005	struct sge_ofld_rxq *ofld_rxq;
3006#endif
3007
3008	/*
3009	 * Setup interrupts.
3010	 */
3011	irq = &sc->irq[0];
3012	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3013	if (sc->intr_count == 1) {
3014		KASSERT(!(sc->flags & INTR_DIRECT),
3015		    ("%s: single interrupt && INTR_DIRECT?", __func__));
3016
3017		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3018		if (rc != 0)
3019			return (rc);
3020	} else {
3021		/* Multiple interrupts. */
3022		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3023		    ("%s: too few intr.", __func__));
3024
3025		/* The first one is always error intr */
3026		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3027		if (rc != 0)
3028			return (rc);
3029		irq++;
3030		rid++;
3031
3032		/* The second one is always the firmware event queue */
3033		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3034		    "evt");
3035		if (rc != 0)
3036			return (rc);
3037		irq++;
3038		rid++;
3039
3040		/*
3041		 * Note that if INTR_DIRECT is not set then either the NIC rx
3042		 * queues or (exclusive or) the TOE rx queueus will be taking
3043		 * direct interrupts.
3044		 *
3045		 * There is no need to check for is_offload(sc) as nofldrxq
3046		 * will be 0 if offload is disabled.
3047		 */
3048		for_each_port(sc, p) {
3049			pi = sc->port[p];
3050
3051#ifdef TCP_OFFLOAD
3052			/*
3053			 * Skip over the NIC queues if they aren't taking direct
3054			 * interrupts.
3055			 */
3056			if (!(sc->flags & INTR_DIRECT) &&
3057			    pi->nofldrxq > pi->nrxq)
3058				goto ofld_queues;
3059#endif
3060			rxq = &sc->sge.rxq[pi->first_rxq];
3061			for (q = 0; q < pi->nrxq; q++, rxq++) {
3062				snprintf(s, sizeof(s), "%d.%d", p, q);
3063				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3064				    s);
3065				if (rc != 0)
3066					return (rc);
3067				irq++;
3068				rid++;
3069			}
3070
3071#ifdef TCP_OFFLOAD
3072			/*
3073			 * Skip over the offload queues if they aren't taking
3074			 * direct interrupts.
3075			 */
3076			if (!(sc->flags & INTR_DIRECT))
3077				continue;
3078ofld_queues:
3079			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3080			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3081				snprintf(s, sizeof(s), "%d,%d", p, q);
3082				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3083				    ofld_rxq, s);
3084				if (rc != 0)
3085					return (rc);
3086				irq++;
3087				rid++;
3088			}
3089#endif
3090		}
3091	}
3092
3093	return (0);
3094}
3095
3096static int
3097adapter_full_init(struct adapter *sc)
3098{
3099	int rc, i;
3100
3101	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3102	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3103	    ("%s: FULL_INIT_DONE already", __func__));
3104
3105	/*
3106	 * queues that belong to the adapter (not any particular port).
3107	 */
3108	rc = t4_setup_adapter_queues(sc);
3109	if (rc != 0)
3110		goto done;
3111
3112	for (i = 0; i < nitems(sc->tq); i++) {
3113		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3114		    taskqueue_thread_enqueue, &sc->tq[i]);
3115		if (sc->tq[i] == NULL) {
3116			device_printf(sc->dev,
3117			    "failed to allocate task queue %d\n", i);
3118			rc = ENOMEM;
3119			goto done;
3120		}
3121		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3122		    device_get_nameunit(sc->dev), i);
3123	}
3124
3125	t4_intr_enable(sc);
3126	sc->flags |= FULL_INIT_DONE;
3127done:
3128	if (rc != 0)
3129		adapter_full_uninit(sc);
3130
3131	return (rc);
3132}
3133
3134static int
3135adapter_full_uninit(struct adapter *sc)
3136{
3137	int i;
3138
3139	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3140
3141	t4_teardown_adapter_queues(sc);
3142
3143	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3144		taskqueue_free(sc->tq[i]);
3145		sc->tq[i] = NULL;
3146	}
3147
3148	sc->flags &= ~FULL_INIT_DONE;
3149
3150	return (0);
3151}
3152
3153static int
3154port_full_init(struct port_info *pi)
3155{
3156	struct adapter *sc = pi->adapter;
3157	struct ifnet *ifp = pi->ifp;
3158	uint16_t *rss;
3159	struct sge_rxq *rxq;
3160	int rc, i, j;
3161
3162	ASSERT_SYNCHRONIZED_OP(sc);
3163	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3164	    ("%s: PORT_INIT_DONE already", __func__));
3165
3166	sysctl_ctx_init(&pi->ctx);
3167	pi->flags |= PORT_SYSCTL_CTX;
3168
3169	/*
3170	 * Allocate tx/rx/fl queues for this port.
3171	 */
3172	rc = t4_setup_port_queues(pi);
3173	if (rc != 0)
3174		goto done;	/* error message displayed already */
3175
3176	/*
3177	 * Setup RSS for this port.  Save a copy of the RSS table for later use.
3178	 */
3179	rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3180	for (i = 0; i < pi->rss_size;) {
3181		for_each_rxq(pi, j, rxq) {
3182			rss[i++] = rxq->iq.abs_id;
3183			if (i == pi->rss_size)
3184				break;
3185		}
3186	}
3187
3188	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3189	    pi->rss_size);
3190	if (rc != 0) {
3191		if_printf(ifp, "rss_config failed: %d\n", rc);
3192		goto done;
3193	}
3194
3195	pi->rss = rss;
3196	pi->flags |= PORT_INIT_DONE;
3197done:
3198	if (rc != 0)
3199		port_full_uninit(pi);
3200
3201	return (rc);
3202}
3203
3204/*
3205 * Idempotent.
3206 */
3207static int
3208port_full_uninit(struct port_info *pi)
3209{
3210	struct adapter *sc = pi->adapter;
3211	int i;
3212	struct sge_rxq *rxq;
3213	struct sge_txq *txq;
3214#ifdef TCP_OFFLOAD
3215	struct sge_ofld_rxq *ofld_rxq;
3216	struct sge_wrq *ofld_txq;
3217#endif
3218
3219	if (pi->flags & PORT_INIT_DONE) {
3220
3221		/* Need to quiesce queues.  XXX: ctrl queues? */
3222
3223		for_each_txq(pi, i, txq) {
3224			quiesce_eq(sc, &txq->eq);
3225		}
3226
3227#ifdef TCP_OFFLOAD
3228		for_each_ofld_txq(pi, i, ofld_txq) {
3229			quiesce_eq(sc, &ofld_txq->eq);
3230		}
3231#endif
3232
3233		for_each_rxq(pi, i, rxq) {
3234			quiesce_iq(sc, &rxq->iq);
3235			quiesce_fl(sc, &rxq->fl);
3236		}
3237
3238#ifdef TCP_OFFLOAD
3239		for_each_ofld_rxq(pi, i, ofld_rxq) {
3240			quiesce_iq(sc, &ofld_rxq->iq);
3241			quiesce_fl(sc, &ofld_rxq->fl);
3242		}
3243#endif
3244		free(pi->rss, M_CXGBE);
3245	}
3246
3247	t4_teardown_port_queues(pi);
3248	pi->flags &= ~PORT_INIT_DONE;
3249
3250	return (0);
3251}
3252
3253static void
3254quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3255{
3256	EQ_LOCK(eq);
3257	eq->flags |= EQ_DOOMED;
3258
3259	/*
3260	 * Wait for the response to a credit flush if one's
3261	 * pending.
3262	 */
3263	while (eq->flags & EQ_CRFLUSHED)
3264		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3265	EQ_UNLOCK(eq);
3266
3267	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3268	pause("callout", 10);		/* Still iffy */
3269
3270	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3271}
3272
3273static void
3274quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3275{
3276	(void) sc;	/* unused */
3277
3278	/* Synchronize with the interrupt handler */
3279	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3280		pause("iqfree", 1);
3281}
3282
3283static void
3284quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3285{
3286	mtx_lock(&sc->sfl_lock);
3287	FL_LOCK(fl);
3288	fl->flags |= FL_DOOMED;
3289	FL_UNLOCK(fl);
3290	mtx_unlock(&sc->sfl_lock);
3291
3292	callout_drain(&sc->sfl_callout);
3293	KASSERT((fl->flags & FL_STARVING) == 0,
3294	    ("%s: still starving", __func__));
3295}
3296
3297static int
3298t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3299    driver_intr_t *handler, void *arg, char *name)
3300{
3301	int rc;
3302
3303	irq->rid = rid;
3304	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3305	    RF_SHAREABLE | RF_ACTIVE);
3306	if (irq->res == NULL) {
3307		device_printf(sc->dev,
3308		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3309		return (ENOMEM);
3310	}
3311
3312	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3313	    NULL, handler, arg, &irq->tag);
3314	if (rc != 0) {
3315		device_printf(sc->dev,
3316		    "failed to setup interrupt for rid %d, name %s: %d\n",
3317		    rid, name, rc);
3318	} else if (name)
3319		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3320
3321	return (rc);
3322}
3323
3324static int
3325t4_free_irq(struct adapter *sc, struct irq *irq)
3326{
3327	if (irq->tag)
3328		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3329	if (irq->res)
3330		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3331
3332	bzero(irq, sizeof(*irq));
3333
3334	return (0);
3335}
3336
3337static void
3338reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3339    unsigned int end)
3340{
3341	uint32_t *p = (uint32_t *)(buf + start);
3342
3343	for ( ; start <= end; start += sizeof(uint32_t))
3344		*p++ = t4_read_reg(sc, start);
3345}
3346
3347static void
3348t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3349{
3350	int i, n;
3351	const unsigned int *reg_ranges;
3352	static const unsigned int t4_reg_ranges[] = {
3353		0x1008, 0x1108,
3354		0x1180, 0x11b4,
3355		0x11fc, 0x123c,
3356		0x1300, 0x173c,
3357		0x1800, 0x18fc,
3358		0x3000, 0x30d8,
3359		0x30e0, 0x5924,
3360		0x5960, 0x59d4,
3361		0x5a00, 0x5af8,
3362		0x6000, 0x6098,
3363		0x6100, 0x6150,
3364		0x6200, 0x6208,
3365		0x6240, 0x6248,
3366		0x6280, 0x6338,
3367		0x6370, 0x638c,
3368		0x6400, 0x643c,
3369		0x6500, 0x6524,
3370		0x6a00, 0x6a38,
3371		0x6a60, 0x6a78,
3372		0x6b00, 0x6b84,
3373		0x6bf0, 0x6c84,
3374		0x6cf0, 0x6d84,
3375		0x6df0, 0x6e84,
3376		0x6ef0, 0x6f84,
3377		0x6ff0, 0x7084,
3378		0x70f0, 0x7184,
3379		0x71f0, 0x7284,
3380		0x72f0, 0x7384,
3381		0x73f0, 0x7450,
3382		0x7500, 0x7530,
3383		0x7600, 0x761c,
3384		0x7680, 0x76cc,
3385		0x7700, 0x7798,
3386		0x77c0, 0x77fc,
3387		0x7900, 0x79fc,
3388		0x7b00, 0x7c38,
3389		0x7d00, 0x7efc,
3390		0x8dc0, 0x8e1c,
3391		0x8e30, 0x8e78,
3392		0x8ea0, 0x8f6c,
3393		0x8fc0, 0x9074,
3394		0x90fc, 0x90fc,
3395		0x9400, 0x9458,
3396		0x9600, 0x96bc,
3397		0x9800, 0x9808,
3398		0x9820, 0x983c,
3399		0x9850, 0x9864,
3400		0x9c00, 0x9c6c,
3401		0x9c80, 0x9cec,
3402		0x9d00, 0x9d6c,
3403		0x9d80, 0x9dec,
3404		0x9e00, 0x9e6c,
3405		0x9e80, 0x9eec,
3406		0x9f00, 0x9f6c,
3407		0x9f80, 0x9fec,
3408		0xd004, 0xd03c,
3409		0xdfc0, 0xdfe0,
3410		0xe000, 0xea7c,
3411		0xf000, 0x11110,
3412		0x11118, 0x11190,
3413		0x19040, 0x1906c,
3414		0x19078, 0x19080,
3415		0x1908c, 0x19124,
3416		0x19150, 0x191b0,
3417		0x191d0, 0x191e8,
3418		0x19238, 0x1924c,
3419		0x193f8, 0x19474,
3420		0x19490, 0x194f8,
3421		0x19800, 0x19f30,
3422		0x1a000, 0x1a06c,
3423		0x1a0b0, 0x1a120,
3424		0x1a128, 0x1a138,
3425		0x1a190, 0x1a1c4,
3426		0x1a1fc, 0x1a1fc,
3427		0x1e040, 0x1e04c,
3428		0x1e284, 0x1e28c,
3429		0x1e2c0, 0x1e2c0,
3430		0x1e2e0, 0x1e2e0,
3431		0x1e300, 0x1e384,
3432		0x1e3c0, 0x1e3c8,
3433		0x1e440, 0x1e44c,
3434		0x1e684, 0x1e68c,
3435		0x1e6c0, 0x1e6c0,
3436		0x1e6e0, 0x1e6e0,
3437		0x1e700, 0x1e784,
3438		0x1e7c0, 0x1e7c8,
3439		0x1e840, 0x1e84c,
3440		0x1ea84, 0x1ea8c,
3441		0x1eac0, 0x1eac0,
3442		0x1eae0, 0x1eae0,
3443		0x1eb00, 0x1eb84,
3444		0x1ebc0, 0x1ebc8,
3445		0x1ec40, 0x1ec4c,
3446		0x1ee84, 0x1ee8c,
3447		0x1eec0, 0x1eec0,
3448		0x1eee0, 0x1eee0,
3449		0x1ef00, 0x1ef84,
3450		0x1efc0, 0x1efc8,
3451		0x1f040, 0x1f04c,
3452		0x1f284, 0x1f28c,
3453		0x1f2c0, 0x1f2c0,
3454		0x1f2e0, 0x1f2e0,
3455		0x1f300, 0x1f384,
3456		0x1f3c0, 0x1f3c8,
3457		0x1f440, 0x1f44c,
3458		0x1f684, 0x1f68c,
3459		0x1f6c0, 0x1f6c0,
3460		0x1f6e0, 0x1f6e0,
3461		0x1f700, 0x1f784,
3462		0x1f7c0, 0x1f7c8,
3463		0x1f840, 0x1f84c,
3464		0x1fa84, 0x1fa8c,
3465		0x1fac0, 0x1fac0,
3466		0x1fae0, 0x1fae0,
3467		0x1fb00, 0x1fb84,
3468		0x1fbc0, 0x1fbc8,
3469		0x1fc40, 0x1fc4c,
3470		0x1fe84, 0x1fe8c,
3471		0x1fec0, 0x1fec0,
3472		0x1fee0, 0x1fee0,
3473		0x1ff00, 0x1ff84,
3474		0x1ffc0, 0x1ffc8,
3475		0x20000, 0x2002c,
3476		0x20100, 0x2013c,
3477		0x20190, 0x201c8,
3478		0x20200, 0x20318,
3479		0x20400, 0x20528,
3480		0x20540, 0x20614,
3481		0x21000, 0x21040,
3482		0x2104c, 0x21060,
3483		0x210c0, 0x210ec,
3484		0x21200, 0x21268,
3485		0x21270, 0x21284,
3486		0x212fc, 0x21388,
3487		0x21400, 0x21404,
3488		0x21500, 0x21518,
3489		0x2152c, 0x2153c,
3490		0x21550, 0x21554,
3491		0x21600, 0x21600,
3492		0x21608, 0x21628,
3493		0x21630, 0x2163c,
3494		0x21700, 0x2171c,
3495		0x21780, 0x2178c,
3496		0x21800, 0x21c38,
3497		0x21c80, 0x21d7c,
3498		0x21e00, 0x21e04,
3499		0x22000, 0x2202c,
3500		0x22100, 0x2213c,
3501		0x22190, 0x221c8,
3502		0x22200, 0x22318,
3503		0x22400, 0x22528,
3504		0x22540, 0x22614,
3505		0x23000, 0x23040,
3506		0x2304c, 0x23060,
3507		0x230c0, 0x230ec,
3508		0x23200, 0x23268,
3509		0x23270, 0x23284,
3510		0x232fc, 0x23388,
3511		0x23400, 0x23404,
3512		0x23500, 0x23518,
3513		0x2352c, 0x2353c,
3514		0x23550, 0x23554,
3515		0x23600, 0x23600,
3516		0x23608, 0x23628,
3517		0x23630, 0x2363c,
3518		0x23700, 0x2371c,
3519		0x23780, 0x2378c,
3520		0x23800, 0x23c38,
3521		0x23c80, 0x23d7c,
3522		0x23e00, 0x23e04,
3523		0x24000, 0x2402c,
3524		0x24100, 0x2413c,
3525		0x24190, 0x241c8,
3526		0x24200, 0x24318,
3527		0x24400, 0x24528,
3528		0x24540, 0x24614,
3529		0x25000, 0x25040,
3530		0x2504c, 0x25060,
3531		0x250c0, 0x250ec,
3532		0x25200, 0x25268,
3533		0x25270, 0x25284,
3534		0x252fc, 0x25388,
3535		0x25400, 0x25404,
3536		0x25500, 0x25518,
3537		0x2552c, 0x2553c,
3538		0x25550, 0x25554,
3539		0x25600, 0x25600,
3540		0x25608, 0x25628,
3541		0x25630, 0x2563c,
3542		0x25700, 0x2571c,
3543		0x25780, 0x2578c,
3544		0x25800, 0x25c38,
3545		0x25c80, 0x25d7c,
3546		0x25e00, 0x25e04,
3547		0x26000, 0x2602c,
3548		0x26100, 0x2613c,
3549		0x26190, 0x261c8,
3550		0x26200, 0x26318,
3551		0x26400, 0x26528,
3552		0x26540, 0x26614,
3553		0x27000, 0x27040,
3554		0x2704c, 0x27060,
3555		0x270c0, 0x270ec,
3556		0x27200, 0x27268,
3557		0x27270, 0x27284,
3558		0x272fc, 0x27388,
3559		0x27400, 0x27404,
3560		0x27500, 0x27518,
3561		0x2752c, 0x2753c,
3562		0x27550, 0x27554,
3563		0x27600, 0x27600,
3564		0x27608, 0x27628,
3565		0x27630, 0x2763c,
3566		0x27700, 0x2771c,
3567		0x27780, 0x2778c,
3568		0x27800, 0x27c38,
3569		0x27c80, 0x27d7c,
3570		0x27e00, 0x27e04
3571	};
3572	static const unsigned int t5_reg_ranges[] = {
3573		0x1008, 0x1148,
3574		0x1180, 0x11b4,
3575		0x11fc, 0x123c,
3576		0x1280, 0x173c,
3577		0x1800, 0x18fc,
3578		0x3000, 0x3028,
3579		0x3060, 0x30d8,
3580		0x30e0, 0x30fc,
3581		0x3140, 0x357c,
3582		0x35a8, 0x35cc,
3583		0x35ec, 0x35ec,
3584		0x3600, 0x5624,
3585		0x56cc, 0x575c,
3586		0x580c, 0x5814,
3587		0x5890, 0x58bc,
3588		0x5940, 0x59dc,
3589		0x59fc, 0x5a18,
3590		0x5a60, 0x5a9c,
3591		0x5b94, 0x5bfc,
3592		0x6000, 0x6040,
3593		0x6058, 0x614c,
3594		0x7700, 0x7798,
3595		0x77c0, 0x78fc,
3596		0x7b00, 0x7c54,
3597		0x7d00, 0x7efc,
3598		0x8dc0, 0x8de0,
3599		0x8df8, 0x8e84,
3600		0x8ea0, 0x8f84,
3601		0x8fc0, 0x90f8,
3602		0x9400, 0x9470,
3603		0x9600, 0x96f4,
3604		0x9800, 0x9808,
3605		0x9820, 0x983c,
3606		0x9850, 0x9864,
3607		0x9c00, 0x9c6c,
3608		0x9c80, 0x9cec,
3609		0x9d00, 0x9d6c,
3610		0x9d80, 0x9dec,
3611		0x9e00, 0x9e6c,
3612		0x9e80, 0x9eec,
3613		0x9f00, 0x9f6c,
3614		0x9f80, 0xa020,
3615		0xd004, 0xd03c,
3616		0xdfc0, 0xdfe0,
3617		0xe000, 0x11088,
3618		0x1109c, 0x11110,
3619		0x11118, 0x1117c,
3620		0x11190, 0x11204,
3621		0x19040, 0x1906c,
3622		0x19078, 0x19080,
3623		0x1908c, 0x19124,
3624		0x19150, 0x191b0,
3625		0x191d0, 0x191e8,
3626		0x19238, 0x19290,
3627		0x193f8, 0x19474,
3628		0x19490, 0x194cc,
3629		0x194f0, 0x194f8,
3630		0x19c00, 0x19c60,
3631		0x19c94, 0x19e10,
3632		0x19e50, 0x19f34,
3633		0x19f40, 0x19f50,
3634		0x19f90, 0x19fe4,
3635		0x1a000, 0x1a06c,
3636		0x1a0b0, 0x1a120,
3637		0x1a128, 0x1a138,
3638		0x1a190, 0x1a1c4,
3639		0x1a1fc, 0x1a1fc,
3640		0x1e008, 0x1e00c,
3641		0x1e040, 0x1e04c,
3642		0x1e284, 0x1e290,
3643		0x1e2c0, 0x1e2c0,
3644		0x1e2e0, 0x1e2e0,
3645		0x1e300, 0x1e384,
3646		0x1e3c0, 0x1e3c8,
3647		0x1e408, 0x1e40c,
3648		0x1e440, 0x1e44c,
3649		0x1e684, 0x1e690,
3650		0x1e6c0, 0x1e6c0,
3651		0x1e6e0, 0x1e6e0,
3652		0x1e700, 0x1e784,
3653		0x1e7c0, 0x1e7c8,
3654		0x1e808, 0x1e80c,
3655		0x1e840, 0x1e84c,
3656		0x1ea84, 0x1ea90,
3657		0x1eac0, 0x1eac0,
3658		0x1eae0, 0x1eae0,
3659		0x1eb00, 0x1eb84,
3660		0x1ebc0, 0x1ebc8,
3661		0x1ec08, 0x1ec0c,
3662		0x1ec40, 0x1ec4c,
3663		0x1ee84, 0x1ee90,
3664		0x1eec0, 0x1eec0,
3665		0x1eee0, 0x1eee0,
3666		0x1ef00, 0x1ef84,
3667		0x1efc0, 0x1efc8,
3668		0x1f008, 0x1f00c,
3669		0x1f040, 0x1f04c,
3670		0x1f284, 0x1f290,
3671		0x1f2c0, 0x1f2c0,
3672		0x1f2e0, 0x1f2e0,
3673		0x1f300, 0x1f384,
3674		0x1f3c0, 0x1f3c8,
3675		0x1f408, 0x1f40c,
3676		0x1f440, 0x1f44c,
3677		0x1f684, 0x1f690,
3678		0x1f6c0, 0x1f6c0,
3679		0x1f6e0, 0x1f6e0,
3680		0x1f700, 0x1f784,
3681		0x1f7c0, 0x1f7c8,
3682		0x1f808, 0x1f80c,
3683		0x1f840, 0x1f84c,
3684		0x1fa84, 0x1fa90,
3685		0x1fac0, 0x1fac0,
3686		0x1fae0, 0x1fae0,
3687		0x1fb00, 0x1fb84,
3688		0x1fbc0, 0x1fbc8,
3689		0x1fc08, 0x1fc0c,
3690		0x1fc40, 0x1fc4c,
3691		0x1fe84, 0x1fe90,
3692		0x1fec0, 0x1fec0,
3693		0x1fee0, 0x1fee0,
3694		0x1ff00, 0x1ff84,
3695		0x1ffc0, 0x1ffc8,
3696		0x30000, 0x30030,
3697		0x30100, 0x30144,
3698		0x30190, 0x301d0,
3699		0x30200, 0x30318,
3700		0x30400, 0x3052c,
3701		0x30540, 0x3061c,
3702		0x30800, 0x30834,
3703		0x308c0, 0x30908,
3704		0x30910, 0x309ac,
3705		0x30a00, 0x30a2c,
3706		0x30a44, 0x30a50,
3707		0x30a74, 0x30c24,
3708		0x30d00, 0x30d00,
3709		0x30d08, 0x30d14,
3710		0x30d1c, 0x30d20,
3711		0x30d3c, 0x30d50,
3712		0x31200, 0x3120c,
3713		0x31220, 0x31220,
3714		0x31240, 0x31240,
3715		0x31600, 0x3160c,
3716		0x31a00, 0x31a1c,
3717		0x31e00, 0x31e20,
3718		0x31e38, 0x31e3c,
3719		0x31e80, 0x31e80,
3720		0x31e88, 0x31ea8,
3721		0x31eb0, 0x31eb4,
3722		0x31ec8, 0x31ed4,
3723		0x31fb8, 0x32004,
3724		0x32200, 0x32200,
3725		0x32208, 0x32240,
3726		0x32248, 0x32280,
3727		0x32288, 0x322c0,
3728		0x322c8, 0x322fc,
3729		0x32600, 0x32630,
3730		0x32a00, 0x32abc,
3731		0x32b00, 0x32b70,
3732		0x33000, 0x33048,
3733		0x33060, 0x3309c,
3734		0x330f0, 0x33148,
3735		0x33160, 0x3319c,
3736		0x331f0, 0x332e4,
3737		0x332f8, 0x333e4,
3738		0x333f8, 0x33448,
3739		0x33460, 0x3349c,
3740		0x334f0, 0x33548,
3741		0x33560, 0x3359c,
3742		0x335f0, 0x336e4,
3743		0x336f8, 0x337e4,
3744		0x337f8, 0x337fc,
3745		0x33814, 0x33814,
3746		0x3382c, 0x3382c,
3747		0x33880, 0x3388c,
3748		0x338e8, 0x338ec,
3749		0x33900, 0x33948,
3750		0x33960, 0x3399c,
3751		0x339f0, 0x33ae4,
3752		0x33af8, 0x33b10,
3753		0x33b28, 0x33b28,
3754		0x33b3c, 0x33b50,
3755		0x33bf0, 0x33c10,
3756		0x33c28, 0x33c28,
3757		0x33c3c, 0x33c50,
3758		0x33cf0, 0x33cfc,
3759		0x34000, 0x34030,
3760		0x34100, 0x34144,
3761		0x34190, 0x341d0,
3762		0x34200, 0x34318,
3763		0x34400, 0x3452c,
3764		0x34540, 0x3461c,
3765		0x34800, 0x34834,
3766		0x348c0, 0x34908,
3767		0x34910, 0x349ac,
3768		0x34a00, 0x34a2c,
3769		0x34a44, 0x34a50,
3770		0x34a74, 0x34c24,
3771		0x34d00, 0x34d00,
3772		0x34d08, 0x34d14,
3773		0x34d1c, 0x34d20,
3774		0x34d3c, 0x34d50,
3775		0x35200, 0x3520c,
3776		0x35220, 0x35220,
3777		0x35240, 0x35240,
3778		0x35600, 0x3560c,
3779		0x35a00, 0x35a1c,
3780		0x35e00, 0x35e20,
3781		0x35e38, 0x35e3c,
3782		0x35e80, 0x35e80,
3783		0x35e88, 0x35ea8,
3784		0x35eb0, 0x35eb4,
3785		0x35ec8, 0x35ed4,
3786		0x35fb8, 0x36004,
3787		0x36200, 0x36200,
3788		0x36208, 0x36240,
3789		0x36248, 0x36280,
3790		0x36288, 0x362c0,
3791		0x362c8, 0x362fc,
3792		0x36600, 0x36630,
3793		0x36a00, 0x36abc,
3794		0x36b00, 0x36b70,
3795		0x37000, 0x37048,
3796		0x37060, 0x3709c,
3797		0x370f0, 0x37148,
3798		0x37160, 0x3719c,
3799		0x371f0, 0x372e4,
3800		0x372f8, 0x373e4,
3801		0x373f8, 0x37448,
3802		0x37460, 0x3749c,
3803		0x374f0, 0x37548,
3804		0x37560, 0x3759c,
3805		0x375f0, 0x376e4,
3806		0x376f8, 0x377e4,
3807		0x377f8, 0x377fc,
3808		0x37814, 0x37814,
3809		0x3782c, 0x3782c,
3810		0x37880, 0x3788c,
3811		0x378e8, 0x378ec,
3812		0x37900, 0x37948,
3813		0x37960, 0x3799c,
3814		0x379f0, 0x37ae4,
3815		0x37af8, 0x37b10,
3816		0x37b28, 0x37b28,
3817		0x37b3c, 0x37b50,
3818		0x37bf0, 0x37c10,
3819		0x37c28, 0x37c28,
3820		0x37c3c, 0x37c50,
3821		0x37cf0, 0x37cfc,
3822		0x38000, 0x38030,
3823		0x38100, 0x38144,
3824		0x38190, 0x381d0,
3825		0x38200, 0x38318,
3826		0x38400, 0x3852c,
3827		0x38540, 0x3861c,
3828		0x38800, 0x38834,
3829		0x388c0, 0x38908,
3830		0x38910, 0x389ac,
3831		0x38a00, 0x38a2c,
3832		0x38a44, 0x38a50,
3833		0x38a74, 0x38c24,
3834		0x38d00, 0x38d00,
3835		0x38d08, 0x38d14,
3836		0x38d1c, 0x38d20,
3837		0x38d3c, 0x38d50,
3838		0x39200, 0x3920c,
3839		0x39220, 0x39220,
3840		0x39240, 0x39240,
3841		0x39600, 0x3960c,
3842		0x39a00, 0x39a1c,
3843		0x39e00, 0x39e20,
3844		0x39e38, 0x39e3c,
3845		0x39e80, 0x39e80,
3846		0x39e88, 0x39ea8,
3847		0x39eb0, 0x39eb4,
3848		0x39ec8, 0x39ed4,
3849		0x39fb8, 0x3a004,
3850		0x3a200, 0x3a200,
3851		0x3a208, 0x3a240,
3852		0x3a248, 0x3a280,
3853		0x3a288, 0x3a2c0,
3854		0x3a2c8, 0x3a2fc,
3855		0x3a600, 0x3a630,
3856		0x3aa00, 0x3aabc,
3857		0x3ab00, 0x3ab70,
3858		0x3b000, 0x3b048,
3859		0x3b060, 0x3b09c,
3860		0x3b0f0, 0x3b148,
3861		0x3b160, 0x3b19c,
3862		0x3b1f0, 0x3b2e4,
3863		0x3b2f8, 0x3b3e4,
3864		0x3b3f8, 0x3b448,
3865		0x3b460, 0x3b49c,
3866		0x3b4f0, 0x3b548,
3867		0x3b560, 0x3b59c,
3868		0x3b5f0, 0x3b6e4,
3869		0x3b6f8, 0x3b7e4,
3870		0x3b7f8, 0x3b7fc,
3871		0x3b814, 0x3b814,
3872		0x3b82c, 0x3b82c,
3873		0x3b880, 0x3b88c,
3874		0x3b8e8, 0x3b8ec,
3875		0x3b900, 0x3b948,
3876		0x3b960, 0x3b99c,
3877		0x3b9f0, 0x3bae4,
3878		0x3baf8, 0x3bb10,
3879		0x3bb28, 0x3bb28,
3880		0x3bb3c, 0x3bb50,
3881		0x3bbf0, 0x3bc10,
3882		0x3bc28, 0x3bc28,
3883		0x3bc3c, 0x3bc50,
3884		0x3bcf0, 0x3bcfc,
3885		0x3c000, 0x3c030,
3886		0x3c100, 0x3c144,
3887		0x3c190, 0x3c1d0,
3888		0x3c200, 0x3c318,
3889		0x3c400, 0x3c52c,
3890		0x3c540, 0x3c61c,
3891		0x3c800, 0x3c834,
3892		0x3c8c0, 0x3c908,
3893		0x3c910, 0x3c9ac,
3894		0x3ca00, 0x3ca2c,
3895		0x3ca44, 0x3ca50,
3896		0x3ca74, 0x3cc24,
3897		0x3cd00, 0x3cd00,
3898		0x3cd08, 0x3cd14,
3899		0x3cd1c, 0x3cd20,
3900		0x3cd3c, 0x3cd50,
3901		0x3d200, 0x3d20c,
3902		0x3d220, 0x3d220,
3903		0x3d240, 0x3d240,
3904		0x3d600, 0x3d60c,
3905		0x3da00, 0x3da1c,
3906		0x3de00, 0x3de20,
3907		0x3de38, 0x3de3c,
3908		0x3de80, 0x3de80,
3909		0x3de88, 0x3dea8,
3910		0x3deb0, 0x3deb4,
3911		0x3dec8, 0x3ded4,
3912		0x3dfb8, 0x3e004,
3913		0x3e200, 0x3e200,
3914		0x3e208, 0x3e240,
3915		0x3e248, 0x3e280,
3916		0x3e288, 0x3e2c0,
3917		0x3e2c8, 0x3e2fc,
3918		0x3e600, 0x3e630,
3919		0x3ea00, 0x3eabc,
3920		0x3eb00, 0x3eb70,
3921		0x3f000, 0x3f048,
3922		0x3f060, 0x3f09c,
3923		0x3f0f0, 0x3f148,
3924		0x3f160, 0x3f19c,
3925		0x3f1f0, 0x3f2e4,
3926		0x3f2f8, 0x3f3e4,
3927		0x3f3f8, 0x3f448,
3928		0x3f460, 0x3f49c,
3929		0x3f4f0, 0x3f548,
3930		0x3f560, 0x3f59c,
3931		0x3f5f0, 0x3f6e4,
3932		0x3f6f8, 0x3f7e4,
3933		0x3f7f8, 0x3f7fc,
3934		0x3f814, 0x3f814,
3935		0x3f82c, 0x3f82c,
3936		0x3f880, 0x3f88c,
3937		0x3f8e8, 0x3f8ec,
3938		0x3f900, 0x3f948,
3939		0x3f960, 0x3f99c,
3940		0x3f9f0, 0x3fae4,
3941		0x3faf8, 0x3fb10,
3942		0x3fb28, 0x3fb28,
3943		0x3fb3c, 0x3fb50,
3944		0x3fbf0, 0x3fc10,
3945		0x3fc28, 0x3fc28,
3946		0x3fc3c, 0x3fc50,
3947		0x3fcf0, 0x3fcfc,
3948		0x40000, 0x4000c,
3949		0x40040, 0x40068,
3950		0x4007c, 0x40144,
3951		0x40180, 0x4018c,
3952		0x40200, 0x40298,
3953		0x402ac, 0x4033c,
3954		0x403f8, 0x403fc,
3955		0x41304, 0x413c4,
3956		0x41400, 0x4141c,
3957		0x41480, 0x414d0,
3958		0x44000, 0x44078,
3959		0x440c0, 0x44278,
3960		0x442c0, 0x44478,
3961		0x444c0, 0x44678,
3962		0x446c0, 0x44878,
3963		0x448c0, 0x449fc,
3964		0x45000, 0x45068,
3965		0x45080, 0x45084,
3966		0x450a0, 0x450b0,
3967		0x45200, 0x45268,
3968		0x45280, 0x45284,
3969		0x452a0, 0x452b0,
3970		0x460c0, 0x460e4,
3971		0x47000, 0x4708c,
3972		0x47200, 0x47250,
3973		0x47400, 0x47420,
3974		0x47600, 0x47618,
3975		0x47800, 0x47814,
3976		0x48000, 0x4800c,
3977		0x48040, 0x48068,
3978		0x4807c, 0x48144,
3979		0x48180, 0x4818c,
3980		0x48200, 0x48298,
3981		0x482ac, 0x4833c,
3982		0x483f8, 0x483fc,
3983		0x49304, 0x493c4,
3984		0x49400, 0x4941c,
3985		0x49480, 0x494d0,
3986		0x4c000, 0x4c078,
3987		0x4c0c0, 0x4c278,
3988		0x4c2c0, 0x4c478,
3989		0x4c4c0, 0x4c678,
3990		0x4c6c0, 0x4c878,
3991		0x4c8c0, 0x4c9fc,
3992		0x4d000, 0x4d068,
3993		0x4d080, 0x4d084,
3994		0x4d0a0, 0x4d0b0,
3995		0x4d200, 0x4d268,
3996		0x4d280, 0x4d284,
3997		0x4d2a0, 0x4d2b0,
3998		0x4e0c0, 0x4e0e4,
3999		0x4f000, 0x4f08c,
4000		0x4f200, 0x4f250,
4001		0x4f400, 0x4f420,
4002		0x4f600, 0x4f618,
4003		0x4f800, 0x4f814,
4004		0x50000, 0x500cc,
4005		0x50400, 0x50400,
4006		0x50800, 0x508cc,
4007		0x50c00, 0x50c00,
4008		0x51000, 0x5101c,
4009		0x51300, 0x51308,
4010	};
4011
4012	if (is_t4(sc)) {
4013		reg_ranges = &t4_reg_ranges[0];
4014		n = nitems(t4_reg_ranges);
4015	} else {
4016		reg_ranges = &t5_reg_ranges[0];
4017		n = nitems(t5_reg_ranges);
4018	}
4019
4020	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4021	for (i = 0; i < n; i += 2)
4022		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4023}
4024
4025static void
4026cxgbe_tick(void *arg)
4027{
4028	struct port_info *pi = arg;
4029	struct ifnet *ifp = pi->ifp;
4030	struct sge_txq *txq;
4031	int i, drops;
4032	struct port_stats *s = &pi->stats;
4033
4034	PORT_LOCK(pi);
4035	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4036		PORT_UNLOCK(pi);
4037		return;	/* without scheduling another callout */
4038	}
4039
4040	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4041
4042	ifp->if_opackets = s->tx_frames - s->tx_pause;
4043	ifp->if_ipackets = s->rx_frames - s->rx_pause;
4044	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4045	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4046	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4047	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4048	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4049	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4050	    s->rx_trunc3;
4051
4052	drops = s->tx_drop;
4053	for_each_txq(pi, i, txq)
4054		drops += txq->br->br_drops;
4055	ifp->if_snd.ifq_drops = drops;
4056
4057	ifp->if_oerrors = s->tx_error_frames;
4058	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4059	    s->rx_fcs_err + s->rx_len_err;
4060
4061	callout_schedule(&pi->tick, hz);
4062	PORT_UNLOCK(pi);
4063}
4064
4065static void
4066cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4067{
4068	struct ifnet *vlan;
4069
4070	if (arg != ifp || ifp->if_type != IFT_ETHER)
4071		return;
4072
4073	vlan = VLAN_DEVAT(ifp, vid);
4074	VLAN_SETCOOKIE(vlan, ifp);
4075}
4076
4077static int
4078cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4079{
4080
4081#ifdef INVARIANTS
4082	panic("%s: opcode 0x%02x on iq %p with payload %p",
4083	    __func__, rss->opcode, iq, m);
4084#else
4085	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4086	    __func__, rss->opcode, iq, m);
4087	m_freem(m);
4088#endif
4089	return (EDOOFUS);
4090}
4091
4092int
4093t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4094{
4095	uintptr_t *loc, new;
4096
4097	if (opcode >= nitems(sc->cpl_handler))
4098		return (EINVAL);
4099
4100	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4101	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4102	atomic_store_rel_ptr(loc, new);
4103
4104	return (0);
4105}
4106
4107static int
4108an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4109{
4110
4111#ifdef INVARIANTS
4112	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4113#else
4114	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4115	    __func__, iq, ctrl);
4116#endif
4117	return (EDOOFUS);
4118}
4119
4120int
4121t4_register_an_handler(struct adapter *sc, an_handler_t h)
4122{
4123	uintptr_t *loc, new;
4124
4125	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4126	loc = (uintptr_t *) &sc->an_handler;
4127	atomic_store_rel_ptr(loc, new);
4128
4129	return (0);
4130}
4131
4132static int
4133fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4134{
4135	const struct cpl_fw6_msg *cpl =
4136	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4137
4138#ifdef INVARIANTS
4139	panic("%s: fw_msg type %d", __func__, cpl->type);
4140#else
4141	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4142#endif
4143	return (EDOOFUS);
4144}
4145
4146int
4147t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4148{
4149	uintptr_t *loc, new;
4150
4151	if (type >= nitems(sc->fw_msg_handler))
4152		return (EINVAL);
4153
4154	/*
4155	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4156	 * handler dispatch table.  Reject any attempt to install a handler for
4157	 * this subtype.
4158	 */
4159	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4160		return (EINVAL);
4161
4162	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4163	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4164	atomic_store_rel_ptr(loc, new);
4165
4166	return (0);
4167}
4168
4169static int
4170t4_sysctls(struct adapter *sc)
4171{
4172	struct sysctl_ctx_list *ctx;
4173	struct sysctl_oid *oid;
4174	struct sysctl_oid_list *children, *c0;
4175	static char *caps[] = {
4176		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4177		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"	/* caps[1] niccaps */
4178		    "\6HASHFILTER\7ETHOFLD",
4179		"\20\1TOE",				/* caps[2] toecaps */
4180		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4181		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4182		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4183		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4184		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4185		    "\4PO_INITIAOR\5PO_TARGET"
4186	};
4187	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4188
4189	ctx = device_get_sysctl_ctx(sc->dev);
4190
4191	/*
4192	 * dev.t4nex.X.
4193	 */
4194	oid = device_get_sysctl_tree(sc->dev);
4195	c0 = children = SYSCTL_CHILDREN(oid);
4196
4197	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4198	    sc->params.nports, "# of ports");
4199
4200	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4201	    NULL, chip_rev(sc), "chip hardware revision");
4202
4203	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4204	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4205
4206	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4207	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4208
4209	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4210	    sc->cfcsum, "config file checksum");
4211
4212	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4213	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4214	    sysctl_bitfield, "A", "available doorbells");
4215
4216	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4217	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4218	    sysctl_bitfield, "A", "available link capabilities");
4219
4220	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4221	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4222	    sysctl_bitfield, "A", "available NIC capabilities");
4223
4224	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4225	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4226	    sysctl_bitfield, "A", "available TCP offload capabilities");
4227
4228	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4229	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4230	    sysctl_bitfield, "A", "available RDMA capabilities");
4231
4232	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4233	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4234	    sysctl_bitfield, "A", "available iSCSI capabilities");
4235
4236	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4237	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4238	    sysctl_bitfield, "A", "available FCoE capabilities");
4239
4240	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4241	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4242
4243	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4244	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4245	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4246	    "interrupt holdoff timer values (us)");
4247
4248	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4249	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4250	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4251	    "interrupt holdoff packet counter values");
4252
4253	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4254	    NULL, sc->tids.nftids, "number of filters");
4255
4256	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4257	    CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4258	    "chip temperature (in Celsius)");
4259
4260	t4_sge_sysctls(sc, ctx, children);
4261
4262	sc->lro_timeout = 100;
4263	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4264	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4265
4266#ifdef SBUF_DRAIN
4267	/*
4268	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4269	 */
4270	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4271	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4272	    "logs and miscellaneous information");
4273	children = SYSCTL_CHILDREN(oid);
4274
4275	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4276	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4277	    sysctl_cctrl, "A", "congestion control");
4278
4279	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4280	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4281	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4282
4283	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4284	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4285	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4286
4287	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4288	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4289	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4290
4291	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4292	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4293	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4294
4295	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4296	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4297	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4298
4299	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4300	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4301	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4302
4303	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4304	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4305	    sysctl_cim_la, "A", "CIM logic analyzer");
4306
4307	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4308	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4309	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4310
4311	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4312	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4313	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4314
4315	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4316	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4317	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4318
4319	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4320	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4321	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4322
4323	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4324	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4325	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4326
4327	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4328	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4329	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4330
4331	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4332	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4333	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4334
4335	if (is_t5(sc)) {
4336		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4337		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4338		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4339
4340		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4341		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4342		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4343	}
4344
4345	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4346	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4347	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4348
4349	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4350	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4351	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4352
4353	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4354	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4355	    sysctl_cpl_stats, "A", "CPL statistics");
4356
4357	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4358	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4359	    sysctl_ddp_stats, "A", "DDP statistics");
4360
4361	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4362	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4363	    sysctl_devlog, "A", "firmware's device log");
4364
4365	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4366	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4367	    sysctl_fcoe_stats, "A", "FCoE statistics");
4368
4369	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4370	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4371	    sysctl_hw_sched, "A", "hardware scheduler ");
4372
4373	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4374	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4375	    sysctl_l2t, "A", "hardware L2 table");
4376
4377	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4378	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4379	    sysctl_lb_stats, "A", "loopback statistics");
4380
4381	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4382	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4383	    sysctl_meminfo, "A", "memory regions");
4384
4385	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4386	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4387	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4388
4389	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4390	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4391	    sysctl_path_mtus, "A", "path MTUs");
4392
4393	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4394	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4395	    sysctl_pm_stats, "A", "PM statistics");
4396
4397	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4398	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4399	    sysctl_rdma_stats, "A", "RDMA statistics");
4400
4401	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4402	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4403	    sysctl_tcp_stats, "A", "TCP statistics");
4404
4405	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4406	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4407	    sysctl_tids, "A", "TID information");
4408
4409	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4410	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4411	    sysctl_tp_err_stats, "A", "TP error statistics");
4412
4413	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4414	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4415	    sysctl_tp_la, "A", "TP logic analyzer");
4416
4417	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4418	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4419	    sysctl_tx_rate, "A", "Tx rate");
4420
4421	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4422	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4423	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4424
4425	if (is_t5(sc)) {
4426		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4427		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4428		    sysctl_wcwr_stats, "A", "write combined work requests");
4429	}
4430#endif
4431
4432#ifdef TCP_OFFLOAD
4433	if (is_offload(sc)) {
4434		/*
4435		 * dev.t4nex.X.toe.
4436		 */
4437		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4438		    NULL, "TOE parameters");
4439		children = SYSCTL_CHILDREN(oid);
4440
4441		sc->tt.sndbuf = 256 * 1024;
4442		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4443		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4444
4445		sc->tt.ddp = 0;
4446		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4447		    &sc->tt.ddp, 0, "DDP allowed");
4448
4449		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4450		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4451		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4452
4453		sc->tt.ddp_thres =
4454		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4455		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4456		    &sc->tt.ddp_thres, 0, "DDP threshold");
4457
4458		sc->tt.rx_coalesce = 1;
4459		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4460		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4461	}
4462#endif
4463
4464
4465	return (0);
4466}
4467
4468static int
4469cxgbe_sysctls(struct port_info *pi)
4470{
4471	struct sysctl_ctx_list *ctx;
4472	struct sysctl_oid *oid;
4473	struct sysctl_oid_list *children;
4474
4475	ctx = device_get_sysctl_ctx(pi->dev);
4476
4477	/*
4478	 * dev.cxgbe.X.
4479	 */
4480	oid = device_get_sysctl_tree(pi->dev);
4481	children = SYSCTL_CHILDREN(oid);
4482
4483	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4484	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4485	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4486		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4487		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4488		    "PHY temperature (in Celsius)");
4489		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4490		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4491		    "PHY firmware version");
4492	}
4493	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4494	    &pi->nrxq, 0, "# of rx queues");
4495	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4496	    &pi->ntxq, 0, "# of tx queues");
4497	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4498	    &pi->first_rxq, 0, "index of first rx queue");
4499	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4500	    &pi->first_txq, 0, "index of first tx queue");
4501
4502#ifdef TCP_OFFLOAD
4503	if (is_offload(pi->adapter)) {
4504		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4505		    &pi->nofldrxq, 0,
4506		    "# of rx queues for offloaded TCP connections");
4507		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4508		    &pi->nofldtxq, 0,
4509		    "# of tx queues for offloaded TCP connections");
4510		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4511		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4512		    "index of first TOE rx queue");
4513		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4514		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4515		    "index of first TOE tx queue");
4516	}
4517#endif
4518
4519	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4520	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4521	    "holdoff timer index");
4522	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4523	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4524	    "holdoff packet counter index");
4525
4526	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4527	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4528	    "rx queue size");
4529	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4530	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4531	    "tx queue size");
4532
4533	/*
4534	 * dev.cxgbe.X.stats.
4535	 */
4536	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4537	    NULL, "port statistics");
4538	children = SYSCTL_CHILDREN(oid);
4539
4540#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4541	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4542	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4543	    sysctl_handle_t4_reg64, "QU", desc)
4544
4545	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4546	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4547	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4548	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4549	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4550	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4551	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4552	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4553	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4554	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4555	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4556	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4557	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4558	    "# of tx frames in this range",
4559	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4560	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4561	    "# of tx frames in this range",
4562	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4563	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4564	    "# of tx frames in this range",
4565	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4566	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4567	    "# of tx frames in this range",
4568	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4569	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4570	    "# of tx frames in this range",
4571	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4572	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4573	    "# of tx frames in this range",
4574	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4575	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4576	    "# of tx frames in this range",
4577	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4578	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4579	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4580	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4581	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4582	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4583	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4584	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4585	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4586	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4587	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4588	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4589	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4590	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4591	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4592	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4593	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4594	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4595	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4596	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4597	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4598
4599	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4600	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4601	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4602	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4603	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4604	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4605	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4606	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4607	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4608	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4609	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4610	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4611	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4612	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4613	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4614	    "# of frames received with bad FCS",
4615	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4616	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4617	    "# of frames received with length error",
4618	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4619	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4620	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4621	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4622	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4623	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4624	    "# of rx frames in this range",
4625	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4626	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4627	    "# of rx frames in this range",
4628	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4629	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4630	    "# of rx frames in this range",
4631	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4632	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4633	    "# of rx frames in this range",
4634	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4635	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4636	    "# of rx frames in this range",
4637	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4638	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4639	    "# of rx frames in this range",
4640	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4641	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4642	    "# of rx frames in this range",
4643	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4644	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4645	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4646	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4647	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4648	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4649	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4650	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4651	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4652	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4653	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4654	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4655	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4656	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4657	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4658	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4659	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4660	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4661	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4662
4663#undef SYSCTL_ADD_T4_REG64
4664
4665#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4666	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4667	    &pi->stats.name, desc)
4668
4669	/* We get these from port_stats and they may be stale by upto 1s */
4670	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4671	    "# drops due to buffer-group 0 overflows");
4672	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4673	    "# drops due to buffer-group 1 overflows");
4674	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4675	    "# drops due to buffer-group 2 overflows");
4676	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4677	    "# drops due to buffer-group 3 overflows");
4678	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4679	    "# of buffer-group 0 truncated packets");
4680	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4681	    "# of buffer-group 1 truncated packets");
4682	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4683	    "# of buffer-group 2 truncated packets");
4684	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4685	    "# of buffer-group 3 truncated packets");
4686
4687#undef SYSCTL_ADD_T4_PORTSTAT
4688
4689	return (0);
4690}
4691
4692static int
4693sysctl_int_array(SYSCTL_HANDLER_ARGS)
4694{
4695	int rc, *i;
4696	struct sbuf sb;
4697
4698	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4699	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4700		sbuf_printf(&sb, "%d ", *i);
4701	sbuf_trim(&sb);
4702	sbuf_finish(&sb);
4703	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4704	sbuf_delete(&sb);
4705	return (rc);
4706}
4707
4708static int
4709sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4710{
4711	int rc;
4712	struct sbuf *sb;
4713
4714	rc = sysctl_wire_old_buffer(req, 0);
4715	if (rc != 0)
4716		return(rc);
4717
4718	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4719	if (sb == NULL)
4720		return (ENOMEM);
4721
4722	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4723	rc = sbuf_finish(sb);
4724	sbuf_delete(sb);
4725
4726	return (rc);
4727}
4728
4729static int
4730sysctl_btphy(SYSCTL_HANDLER_ARGS)
4731{
4732	struct port_info *pi = arg1;
4733	int op = arg2;
4734	struct adapter *sc = pi->adapter;
4735	u_int v;
4736	int rc;
4737
4738	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4739	if (rc)
4740		return (rc);
4741	/* XXX: magic numbers */
4742	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4743	    &v);
4744	end_synchronized_op(sc, 0);
4745	if (rc)
4746		return (rc);
4747	if (op == 0)
4748		v /= 256;
4749
4750	rc = sysctl_handle_int(oidp, &v, 0, req);
4751	return (rc);
4752}
4753
4754static int
4755sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4756{
4757	struct port_info *pi = arg1;
4758	struct adapter *sc = pi->adapter;
4759	int idx, rc, i;
4760	struct sge_rxq *rxq;
4761#ifdef TCP_OFFLOAD
4762	struct sge_ofld_rxq *ofld_rxq;
4763#endif
4764	uint8_t v;
4765
4766	idx = pi->tmr_idx;
4767
4768	rc = sysctl_handle_int(oidp, &idx, 0, req);
4769	if (rc != 0 || req->newptr == NULL)
4770		return (rc);
4771
4772	if (idx < 0 || idx >= SGE_NTIMERS)
4773		return (EINVAL);
4774
4775	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4776	    "t4tmr");
4777	if (rc)
4778		return (rc);
4779
4780	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4781	for_each_rxq(pi, i, rxq) {
4782#ifdef atomic_store_rel_8
4783		atomic_store_rel_8(&rxq->iq.intr_params, v);
4784#else
4785		rxq->iq.intr_params = v;
4786#endif
4787	}
4788#ifdef TCP_OFFLOAD
4789	for_each_ofld_rxq(pi, i, ofld_rxq) {
4790#ifdef atomic_store_rel_8
4791		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4792#else
4793		ofld_rxq->iq.intr_params = v;
4794#endif
4795	}
4796#endif
4797	pi->tmr_idx = idx;
4798
4799	end_synchronized_op(sc, LOCK_HELD);
4800	return (0);
4801}
4802
4803static int
4804sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4805{
4806	struct port_info *pi = arg1;
4807	struct adapter *sc = pi->adapter;
4808	int idx, rc;
4809
4810	idx = pi->pktc_idx;
4811
4812	rc = sysctl_handle_int(oidp, &idx, 0, req);
4813	if (rc != 0 || req->newptr == NULL)
4814		return (rc);
4815
4816	if (idx < -1 || idx >= SGE_NCOUNTERS)
4817		return (EINVAL);
4818
4819	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4820	    "t4pktc");
4821	if (rc)
4822		return (rc);
4823
4824	if (pi->flags & PORT_INIT_DONE)
4825		rc = EBUSY; /* cannot be changed once the queues are created */
4826	else
4827		pi->pktc_idx = idx;
4828
4829	end_synchronized_op(sc, LOCK_HELD);
4830	return (rc);
4831}
4832
4833static int
4834sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4835{
4836	struct port_info *pi = arg1;
4837	struct adapter *sc = pi->adapter;
4838	int qsize, rc;
4839
4840	qsize = pi->qsize_rxq;
4841
4842	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4843	if (rc != 0 || req->newptr == NULL)
4844		return (rc);
4845
4846	if (qsize < 128 || (qsize & 7))
4847		return (EINVAL);
4848
4849	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4850	    "t4rxqs");
4851	if (rc)
4852		return (rc);
4853
4854	if (pi->flags & PORT_INIT_DONE)
4855		rc = EBUSY; /* cannot be changed once the queues are created */
4856	else
4857		pi->qsize_rxq = qsize;
4858
4859	end_synchronized_op(sc, LOCK_HELD);
4860	return (rc);
4861}
4862
4863static int
4864sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4865{
4866	struct port_info *pi = arg1;
4867	struct adapter *sc = pi->adapter;
4868	int qsize, rc;
4869
4870	qsize = pi->qsize_txq;
4871
4872	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4873	if (rc != 0 || req->newptr == NULL)
4874		return (rc);
4875
4876	/* bufring size must be powerof2 */
4877	if (qsize < 128 || !powerof2(qsize))
4878		return (EINVAL);
4879
4880	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4881	    "t4txqs");
4882	if (rc)
4883		return (rc);
4884
4885	if (pi->flags & PORT_INIT_DONE)
4886		rc = EBUSY; /* cannot be changed once the queues are created */
4887	else
4888		pi->qsize_txq = qsize;
4889
4890	end_synchronized_op(sc, LOCK_HELD);
4891	return (rc);
4892}
4893
4894static int
4895sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4896{
4897	struct adapter *sc = arg1;
4898	int reg = arg2;
4899	uint64_t val;
4900
4901	val = t4_read_reg64(sc, reg);
4902
4903	return (sysctl_handle_64(oidp, &val, 0, req));
4904}
4905
4906static int
4907sysctl_temperature(SYSCTL_HANDLER_ARGS)
4908{
4909	struct adapter *sc = arg1;
4910	int rc, t;
4911	uint32_t param, val;
4912
4913	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4914	if (rc)
4915		return (rc);
4916	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4917	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4918	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4919	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4920	end_synchronized_op(sc, 0);
4921	if (rc)
4922		return (rc);
4923
4924	/* unknown is returned as 0 but we display -1 in that case */
4925	t = val == 0 ? -1 : val;
4926
4927	rc = sysctl_handle_int(oidp, &t, 0, req);
4928	return (rc);
4929}
4930
4931#ifdef SBUF_DRAIN
4932static int
4933sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4934{
4935	struct adapter *sc = arg1;
4936	struct sbuf *sb;
4937	int rc, i;
4938	uint16_t incr[NMTUS][NCCTRL_WIN];
4939	static const char *dec_fac[] = {
4940		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4941		"0.9375"
4942	};
4943
4944	rc = sysctl_wire_old_buffer(req, 0);
4945	if (rc != 0)
4946		return (rc);
4947
4948	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4949	if (sb == NULL)
4950		return (ENOMEM);
4951
4952	t4_read_cong_tbl(sc, incr);
4953
4954	for (i = 0; i < NCCTRL_WIN; ++i) {
4955		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4956		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4957		    incr[5][i], incr[6][i], incr[7][i]);
4958		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4959		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4960		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4961		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4962	}
4963
4964	rc = sbuf_finish(sb);
4965	sbuf_delete(sb);
4966
4967	return (rc);
4968}
4969
4970static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4971	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
4972	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
4973	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
4974};
4975
4976static int
4977sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4978{
4979	struct adapter *sc = arg1;
4980	struct sbuf *sb;
4981	int rc, i, n, qid = arg2;
4982	uint32_t *buf, *p;
4983	char *qtype;
4984	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4985
4986	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4987	    ("%s: bad qid %d\n", __func__, qid));
4988
4989	if (qid < CIM_NUM_IBQ) {
4990		/* inbound queue */
4991		qtype = "IBQ";
4992		n = 4 * CIM_IBQ_SIZE;
4993		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4994		rc = t4_read_cim_ibq(sc, qid, buf, n);
4995	} else {
4996		/* outbound queue */
4997		qtype = "OBQ";
4998		qid -= CIM_NUM_IBQ;
4999		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5000		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5001		rc = t4_read_cim_obq(sc, qid, buf, n);
5002	}
5003
5004	if (rc < 0) {
5005		rc = -rc;
5006		goto done;
5007	}
5008	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5009
5010	rc = sysctl_wire_old_buffer(req, 0);
5011	if (rc != 0)
5012		goto done;
5013
5014	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5015	if (sb == NULL) {
5016		rc = ENOMEM;
5017		goto done;
5018	}
5019
5020	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5021	for (i = 0, p = buf; i < n; i += 16, p += 4)
5022		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5023		    p[2], p[3]);
5024
5025	rc = sbuf_finish(sb);
5026	sbuf_delete(sb);
5027done:
5028	free(buf, M_CXGBE);
5029	return (rc);
5030}
5031
5032static int
5033sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5034{
5035	struct adapter *sc = arg1;
5036	u_int cfg;
5037	struct sbuf *sb;
5038	uint32_t *buf, *p;
5039	int rc;
5040
5041	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5042	if (rc != 0)
5043		return (rc);
5044
5045	rc = sysctl_wire_old_buffer(req, 0);
5046	if (rc != 0)
5047		return (rc);
5048
5049	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5050	if (sb == NULL)
5051		return (ENOMEM);
5052
5053	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5054	    M_ZERO | M_WAITOK);
5055
5056	rc = -t4_cim_read_la(sc, buf, NULL);
5057	if (rc != 0)
5058		goto done;
5059
5060	sbuf_printf(sb, "Status   Data      PC%s",
5061	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5062	    "     LS0Stat  LS0Addr             LS0Data");
5063
5064	KASSERT((sc->params.cim_la_size & 7) == 0,
5065	    ("%s: p will walk off the end of buf", __func__));
5066
5067	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5068		if (cfg & F_UPDBGLACAPTPCONLY) {
5069			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5070			    p[6], p[7]);
5071			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5072			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5073			    p[4] & 0xff, p[5] >> 8);
5074			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5075			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5076			    p[1] & 0xf, p[2] >> 4);
5077		} else {
5078			sbuf_printf(sb,
5079			    "\n  %02x   %x%07x %x%07x %08x %08x "
5080			    "%08x%08x%08x%08x",
5081			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5082			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5083			    p[6], p[7]);
5084		}
5085	}
5086
5087	rc = sbuf_finish(sb);
5088	sbuf_delete(sb);
5089done:
5090	free(buf, M_CXGBE);
5091	return (rc);
5092}
5093
5094static int
5095sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5096{
5097	struct adapter *sc = arg1;
5098	u_int i;
5099	struct sbuf *sb;
5100	uint32_t *buf, *p;
5101	int rc;
5102
5103	rc = sysctl_wire_old_buffer(req, 0);
5104	if (rc != 0)
5105		return (rc);
5106
5107	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5108	if (sb == NULL)
5109		return (ENOMEM);
5110
5111	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5112	    M_ZERO | M_WAITOK);
5113
5114	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5115	p = buf;
5116
5117	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5118		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5119		    p[1], p[0]);
5120	}
5121
5122	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5123	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5124		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5125		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5126		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5127		    (p[1] >> 2) | ((p[2] & 3) << 30),
5128		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5129		    p[0] & 1);
5130	}
5131
5132	rc = sbuf_finish(sb);
5133	sbuf_delete(sb);
5134	free(buf, M_CXGBE);
5135	return (rc);
5136}
5137
5138static int
5139sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5140{
5141	struct adapter *sc = arg1;
5142	u_int i;
5143	struct sbuf *sb;
5144	uint32_t *buf, *p;
5145	int rc;
5146
5147	rc = sysctl_wire_old_buffer(req, 0);
5148	if (rc != 0)
5149		return (rc);
5150
5151	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5152	if (sb == NULL)
5153		return (ENOMEM);
5154
5155	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5156	    M_ZERO | M_WAITOK);
5157
5158	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5159	p = buf;
5160
5161	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5162	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5163		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5164		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5165		    p[4], p[3], p[2], p[1], p[0]);
5166	}
5167
5168	sbuf_printf(sb, "\n\nCntl ID               Data");
5169	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5170		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5171		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5172	}
5173
5174	rc = sbuf_finish(sb);
5175	sbuf_delete(sb);
5176	free(buf, M_CXGBE);
5177	return (rc);
5178}
5179
5180static int
5181sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5182{
5183	struct adapter *sc = arg1;
5184	struct sbuf *sb;
5185	int rc, i;
5186	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5187	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5188	uint16_t thres[CIM_NUM_IBQ];
5189	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5190	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5191	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5192
5193	if (is_t4(sc)) {
5194		cim_num_obq = CIM_NUM_OBQ;
5195		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5196		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5197	} else {
5198		cim_num_obq = CIM_NUM_OBQ_T5;
5199		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5200		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5201	}
5202	nq = CIM_NUM_IBQ + cim_num_obq;
5203
5204	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5205	if (rc == 0)
5206		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5207	if (rc != 0)
5208		return (rc);
5209
5210	t4_read_cimq_cfg(sc, base, size, thres);
5211
5212	rc = sysctl_wire_old_buffer(req, 0);
5213	if (rc != 0)
5214		return (rc);
5215
5216	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5217	if (sb == NULL)
5218		return (ENOMEM);
5219
5220	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5221
5222	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5223		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5224		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5225		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5226		    G_QUEREMFLITS(p[2]) * 16);
5227	for ( ; i < nq; i++, p += 4, wr += 2)
5228		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5229		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5230		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5231		    G_QUEREMFLITS(p[2]) * 16);
5232
5233	rc = sbuf_finish(sb);
5234	sbuf_delete(sb);
5235
5236	return (rc);
5237}
5238
5239static int
5240sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5241{
5242	struct adapter *sc = arg1;
5243	struct sbuf *sb;
5244	int rc;
5245	struct tp_cpl_stats stats;
5246
5247	rc = sysctl_wire_old_buffer(req, 0);
5248	if (rc != 0)
5249		return (rc);
5250
5251	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5252	if (sb == NULL)
5253		return (ENOMEM);
5254
5255	t4_tp_get_cpl_stats(sc, &stats);
5256
5257	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5258	    "channel 3\n");
5259	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5260		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5261	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5262		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5263
5264	rc = sbuf_finish(sb);
5265	sbuf_delete(sb);
5266
5267	return (rc);
5268}
5269
5270static int
5271sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5272{
5273	struct adapter *sc = arg1;
5274	struct sbuf *sb;
5275	int rc;
5276	struct tp_usm_stats stats;
5277
5278	rc = sysctl_wire_old_buffer(req, 0);
5279	if (rc != 0)
5280		return(rc);
5281
5282	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5283	if (sb == NULL)
5284		return (ENOMEM);
5285
5286	t4_get_usm_stats(sc, &stats);
5287
5288	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5289	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5290	sbuf_printf(sb, "Drops:  %u", stats.drops);
5291
5292	rc = sbuf_finish(sb);
5293	sbuf_delete(sb);
5294
5295	return (rc);
5296}
5297
5298const char *devlog_level_strings[] = {
5299	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5300	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5301	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5302	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5303	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5304	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5305};
5306
5307const char *devlog_facility_strings[] = {
5308	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5309	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5310	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5311	[FW_DEVLOG_FACILITY_RES]	= "RES",
5312	[FW_DEVLOG_FACILITY_HW]		= "HW",
5313	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5314	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5315	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5316	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5317	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5318	[FW_DEVLOG_FACILITY_VI]		= "VI",
5319	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5320	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5321	[FW_DEVLOG_FACILITY_TM]		= "TM",
5322	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5323	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5324	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5325	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5326	[FW_DEVLOG_FACILITY_RI]		= "RI",
5327	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5328	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5329	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5330	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5331};
5332
5333static int
5334sysctl_devlog(SYSCTL_HANDLER_ARGS)
5335{
5336	struct adapter *sc = arg1;
5337	struct devlog_params *dparams = &sc->params.devlog;
5338	struct fw_devlog_e *buf, *e;
5339	int i, j, rc, nentries, first = 0, m;
5340	struct sbuf *sb;
5341	uint64_t ftstamp = UINT64_MAX;
5342
5343	if (dparams->start == 0) {
5344		dparams->memtype = FW_MEMTYPE_EDC0;
5345		dparams->start = 0x84000;
5346		dparams->size = 32768;
5347	}
5348
5349	nentries = dparams->size / sizeof(struct fw_devlog_e);
5350
5351	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5352	if (buf == NULL)
5353		return (ENOMEM);
5354
5355	m = fwmtype_to_hwmtype(dparams->memtype);
5356	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5357	if (rc != 0)
5358		goto done;
5359
5360	for (i = 0; i < nentries; i++) {
5361		e = &buf[i];
5362
5363		if (e->timestamp == 0)
5364			break;	/* end */
5365
5366		e->timestamp = be64toh(e->timestamp);
5367		e->seqno = be32toh(e->seqno);
5368		for (j = 0; j < 8; j++)
5369			e->params[j] = be32toh(e->params[j]);
5370
5371		if (e->timestamp < ftstamp) {
5372			ftstamp = e->timestamp;
5373			first = i;
5374		}
5375	}
5376
5377	if (buf[first].timestamp == 0)
5378		goto done;	/* nothing in the log */
5379
5380	rc = sysctl_wire_old_buffer(req, 0);
5381	if (rc != 0)
5382		goto done;
5383
5384	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5385	if (sb == NULL) {
5386		rc = ENOMEM;
5387		goto done;
5388	}
5389	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5390	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5391
5392	i = first;
5393	do {
5394		e = &buf[i];
5395		if (e->timestamp == 0)
5396			break;	/* end */
5397
5398		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5399		    e->seqno, e->timestamp,
5400		    (e->level < nitems(devlog_level_strings) ?
5401			devlog_level_strings[e->level] : "UNKNOWN"),
5402		    (e->facility < nitems(devlog_facility_strings) ?
5403			devlog_facility_strings[e->facility] : "UNKNOWN"));
5404		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5405		    e->params[2], e->params[3], e->params[4],
5406		    e->params[5], e->params[6], e->params[7]);
5407
5408		if (++i == nentries)
5409			i = 0;
5410	} while (i != first);
5411
5412	rc = sbuf_finish(sb);
5413	sbuf_delete(sb);
5414done:
5415	free(buf, M_CXGBE);
5416	return (rc);
5417}
5418
5419static int
5420sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5421{
5422	struct adapter *sc = arg1;
5423	struct sbuf *sb;
5424	int rc;
5425	struct tp_fcoe_stats stats[4];
5426
5427	rc = sysctl_wire_old_buffer(req, 0);
5428	if (rc != 0)
5429		return (rc);
5430
5431	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5432	if (sb == NULL)
5433		return (ENOMEM);
5434
5435	t4_get_fcoe_stats(sc, 0, &stats[0]);
5436	t4_get_fcoe_stats(sc, 1, &stats[1]);
5437	t4_get_fcoe_stats(sc, 2, &stats[2]);
5438	t4_get_fcoe_stats(sc, 3, &stats[3]);
5439
5440	sbuf_printf(sb, "                   channel 0        channel 1        "
5441	    "channel 2        channel 3\n");
5442	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5443	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5444	    stats[3].octetsDDP);
5445	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5446	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5447	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5448	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5449	    stats[3].framesDrop);
5450
5451	rc = sbuf_finish(sb);
5452	sbuf_delete(sb);
5453
5454	return (rc);
5455}
5456
5457static int
5458sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5459{
5460	struct adapter *sc = arg1;
5461	struct sbuf *sb;
5462	int rc, i;
5463	unsigned int map, kbps, ipg, mode;
5464	unsigned int pace_tab[NTX_SCHED];
5465
5466	rc = sysctl_wire_old_buffer(req, 0);
5467	if (rc != 0)
5468		return (rc);
5469
5470	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5471	if (sb == NULL)
5472		return (ENOMEM);
5473
5474	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5475	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5476	t4_read_pace_tbl(sc, pace_tab);
5477
5478	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5479	    "Class IPG (0.1 ns)   Flow IPG (us)");
5480
5481	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5482		t4_get_tx_sched(sc, i, &kbps, &ipg);
5483		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5484		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5485		if (kbps)
5486			sbuf_printf(sb, "%9u     ", kbps);
5487		else
5488			sbuf_printf(sb, " disabled     ");
5489
5490		if (ipg)
5491			sbuf_printf(sb, "%13u        ", ipg);
5492		else
5493			sbuf_printf(sb, "     disabled        ");
5494
5495		if (pace_tab[i])
5496			sbuf_printf(sb, "%10u", pace_tab[i]);
5497		else
5498			sbuf_printf(sb, "  disabled");
5499	}
5500
5501	rc = sbuf_finish(sb);
5502	sbuf_delete(sb);
5503
5504	return (rc);
5505}
5506
5507static int
5508sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5509{
5510	struct adapter *sc = arg1;
5511	struct sbuf *sb;
5512	int rc, i, j;
5513	uint64_t *p0, *p1;
5514	struct lb_port_stats s[2];
5515	static const char *stat_name[] = {
5516		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5517		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5518		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5519		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5520		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5521		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5522		"BG2FramesTrunc:", "BG3FramesTrunc:"
5523	};
5524
5525	rc = sysctl_wire_old_buffer(req, 0);
5526	if (rc != 0)
5527		return (rc);
5528
5529	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5530	if (sb == NULL)
5531		return (ENOMEM);
5532
5533	memset(s, 0, sizeof(s));
5534
5535	for (i = 0; i < 4; i += 2) {
5536		t4_get_lb_stats(sc, i, &s[0]);
5537		t4_get_lb_stats(sc, i + 1, &s[1]);
5538
5539		p0 = &s[0].octets;
5540		p1 = &s[1].octets;
5541		sbuf_printf(sb, "%s                       Loopback %u"
5542		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5543
5544		for (j = 0; j < nitems(stat_name); j++)
5545			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5546				   *p0++, *p1++);
5547	}
5548
5549	rc = sbuf_finish(sb);
5550	sbuf_delete(sb);
5551
5552	return (rc);
5553}
5554
5555static int
5556sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5557{
5558	int rc = 0;
5559	struct port_info *pi = arg1;
5560	struct sbuf *sb;
5561	static const char *linkdnreasons[] = {
5562		"non-specific", "remote fault", "autoneg failed", "reserved3",
5563		"PHY overheated", "unknown", "rx los", "reserved7"
5564	};
5565
5566	rc = sysctl_wire_old_buffer(req, 0);
5567	if (rc != 0)
5568		return(rc);
5569	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5570	if (sb == NULL)
5571		return (ENOMEM);
5572
5573	if (pi->linkdnrc < 0)
5574		sbuf_printf(sb, "n/a");
5575	else if (pi->linkdnrc < nitems(linkdnreasons))
5576		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5577	else
5578		sbuf_printf(sb, "%d", pi->linkdnrc);
5579
5580	rc = sbuf_finish(sb);
5581	sbuf_delete(sb);
5582
5583	return (rc);
5584}
5585
5586struct mem_desc {
5587	unsigned int base;
5588	unsigned int limit;
5589	unsigned int idx;
5590};
5591
5592static int
5593mem_desc_cmp(const void *a, const void *b)
5594{
5595	return ((const struct mem_desc *)a)->base -
5596	       ((const struct mem_desc *)b)->base;
5597}
5598
5599static void
5600mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5601    unsigned int to)
5602{
5603	unsigned int size;
5604
5605	size = to - from + 1;
5606	if (size == 0)
5607		return;
5608
5609	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5610	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5611}
5612
5613static int
5614sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5615{
5616	struct adapter *sc = arg1;
5617	struct sbuf *sb;
5618	int rc, i, n;
5619	uint32_t lo, hi, used, alloc;
5620	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5621	static const char *region[] = {
5622		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5623		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5624		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5625		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5626		"RQUDP region:", "PBL region:", "TXPBL region:",
5627		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5628		"On-chip queues:"
5629	};
5630	struct mem_desc avail[4];
5631	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5632	struct mem_desc *md = mem;
5633
5634	rc = sysctl_wire_old_buffer(req, 0);
5635	if (rc != 0)
5636		return (rc);
5637
5638	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5639	if (sb == NULL)
5640		return (ENOMEM);
5641
5642	for (i = 0; i < nitems(mem); i++) {
5643		mem[i].limit = 0;
5644		mem[i].idx = i;
5645	}
5646
5647	/* Find and sort the populated memory ranges */
5648	i = 0;
5649	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5650	if (lo & F_EDRAM0_ENABLE) {
5651		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5652		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5653		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5654		avail[i].idx = 0;
5655		i++;
5656	}
5657	if (lo & F_EDRAM1_ENABLE) {
5658		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5659		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5660		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5661		avail[i].idx = 1;
5662		i++;
5663	}
5664	if (lo & F_EXT_MEM_ENABLE) {
5665		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5666		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5667		avail[i].limit = avail[i].base +
5668		    (G_EXT_MEM_SIZE(hi) << 20);
5669		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5670		i++;
5671	}
5672	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5673		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5674		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5675		avail[i].limit = avail[i].base +
5676		    (G_EXT_MEM1_SIZE(hi) << 20);
5677		avail[i].idx = 4;
5678		i++;
5679	}
5680	if (!i)                                    /* no memory available */
5681		return 0;
5682	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5683
5684	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5685	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5686	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5687	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5688	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5689	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5690	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5691	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5692	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5693
5694	/* the next few have explicit upper bounds */
5695	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5696	md->limit = md->base - 1 +
5697		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5698		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5699	md++;
5700
5701	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5702	md->limit = md->base - 1 +
5703		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5704		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5705	md++;
5706
5707	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5708		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5709		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5710		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5711	} else {
5712		md->base = 0;
5713		md->idx = nitems(region);  /* hide it */
5714	}
5715	md++;
5716
5717#define ulp_region(reg) \
5718	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5719	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5720
5721	ulp_region(RX_ISCSI);
5722	ulp_region(RX_TDDP);
5723	ulp_region(TX_TPT);
5724	ulp_region(RX_STAG);
5725	ulp_region(RX_RQ);
5726	ulp_region(RX_RQUDP);
5727	ulp_region(RX_PBL);
5728	ulp_region(TX_PBL);
5729#undef ulp_region
5730
5731	md->base = 0;
5732	md->idx = nitems(region);
5733	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5734		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5735		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5736		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5737	}
5738	md++;
5739
5740	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5741	md->limit = md->base + sc->tids.ntids - 1;
5742	md++;
5743	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5744	md->limit = md->base + sc->tids.ntids - 1;
5745	md++;
5746
5747	md->base = sc->vres.ocq.start;
5748	if (sc->vres.ocq.size)
5749		md->limit = md->base + sc->vres.ocq.size - 1;
5750	else
5751		md->idx = nitems(region);  /* hide it */
5752	md++;
5753
5754	/* add any address-space holes, there can be up to 3 */
5755	for (n = 0; n < i - 1; n++)
5756		if (avail[n].limit < avail[n + 1].base)
5757			(md++)->base = avail[n].limit;
5758	if (avail[n].limit)
5759		(md++)->base = avail[n].limit;
5760
5761	n = md - mem;
5762	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5763
5764	for (lo = 0; lo < i; lo++)
5765		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5766				avail[lo].limit - 1);
5767
5768	sbuf_printf(sb, "\n");
5769	for (i = 0; i < n; i++) {
5770		if (mem[i].idx >= nitems(region))
5771			continue;                        /* skip holes */
5772		if (!mem[i].limit)
5773			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5774		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5775				mem[i].limit);
5776	}
5777
5778	sbuf_printf(sb, "\n");
5779	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5780	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5781	mem_region_show(sb, "uP RAM:", lo, hi);
5782
5783	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5784	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5785	mem_region_show(sb, "uP Extmem2:", lo, hi);
5786
5787	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5788	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5789		   G_PMRXMAXPAGE(lo),
5790		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5791		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5792
5793	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5794	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5795	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5796		   G_PMTXMAXPAGE(lo),
5797		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5798		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5799	sbuf_printf(sb, "%u p-structs\n",
5800		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5801
5802	for (i = 0; i < 4; i++) {
5803		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5804		if (is_t4(sc)) {
5805			used = G_USED(lo);
5806			alloc = G_ALLOC(lo);
5807		} else {
5808			used = G_T5_USED(lo);
5809			alloc = G_T5_ALLOC(lo);
5810		}
5811		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5812			   i, used, alloc);
5813	}
5814	for (i = 0; i < 4; i++) {
5815		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5816		if (is_t4(sc)) {
5817			used = G_USED(lo);
5818			alloc = G_ALLOC(lo);
5819		} else {
5820			used = G_T5_USED(lo);
5821			alloc = G_T5_ALLOC(lo);
5822		}
5823		sbuf_printf(sb,
5824			   "\nLoopback %d using %u pages out of %u allocated",
5825			   i, used, alloc);
5826	}
5827
5828	rc = sbuf_finish(sb);
5829	sbuf_delete(sb);
5830
5831	return (rc);
5832}
5833
5834static inline void
5835tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5836{
5837	*mask = x | y;
5838	y = htobe64(y);
5839	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5840}
5841
5842static int
5843sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5844{
5845	struct adapter *sc = arg1;
5846	struct sbuf *sb;
5847	int rc, i, n;
5848
5849	rc = sysctl_wire_old_buffer(req, 0);
5850	if (rc != 0)
5851		return (rc);
5852
5853	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5854	if (sb == NULL)
5855		return (ENOMEM);
5856
5857	sbuf_printf(sb,
5858	    "Idx  Ethernet address     Mask     Vld Ports PF"
5859	    "  VF              Replication             P0 P1 P2 P3  ML");
5860	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5861	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5862	for (i = 0; i < n; i++) {
5863		uint64_t tcamx, tcamy, mask;
5864		uint32_t cls_lo, cls_hi;
5865		uint8_t addr[ETHER_ADDR_LEN];
5866
5867		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5868		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5869		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5870		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5871
5872		if (tcamx & tcamy)
5873			continue;
5874
5875		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5876		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5877			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5878			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5879			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5880			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5881			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5882
5883		if (cls_lo & F_REPLICATE) {
5884			struct fw_ldst_cmd ldst_cmd;
5885
5886			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5887			ldst_cmd.op_to_addrspace =
5888			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5889				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5890				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5891			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5892			ldst_cmd.u.mps.fid_ctl =
5893			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5894				V_FW_LDST_CMD_CTL(i));
5895
5896			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5897			    "t4mps");
5898			if (rc)
5899				break;
5900			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5901			    sizeof(ldst_cmd), &ldst_cmd);
5902			end_synchronized_op(sc, 0);
5903
5904			if (rc != 0) {
5905				sbuf_printf(sb,
5906				    " ------------ error %3u ------------", rc);
5907				rc = 0;
5908			} else {
5909				sbuf_printf(sb, " %08x %08x %08x %08x",
5910				    be32toh(ldst_cmd.u.mps.rplc127_96),
5911				    be32toh(ldst_cmd.u.mps.rplc95_64),
5912				    be32toh(ldst_cmd.u.mps.rplc63_32),
5913				    be32toh(ldst_cmd.u.mps.rplc31_0));
5914			}
5915		} else
5916			sbuf_printf(sb, "%36s", "");
5917
5918		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5919		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5920		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5921	}
5922
5923	if (rc)
5924		(void) sbuf_finish(sb);
5925	else
5926		rc = sbuf_finish(sb);
5927	sbuf_delete(sb);
5928
5929	return (rc);
5930}
5931
5932static int
5933sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5934{
5935	struct adapter *sc = arg1;
5936	struct sbuf *sb;
5937	int rc;
5938	uint16_t mtus[NMTUS];
5939
5940	rc = sysctl_wire_old_buffer(req, 0);
5941	if (rc != 0)
5942		return (rc);
5943
5944	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5945	if (sb == NULL)
5946		return (ENOMEM);
5947
5948	t4_read_mtu_tbl(sc, mtus, NULL);
5949
5950	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5951	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5952	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5953	    mtus[14], mtus[15]);
5954
5955	rc = sbuf_finish(sb);
5956	sbuf_delete(sb);
5957
5958	return (rc);
5959}
5960
5961static int
5962sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5963{
5964	struct adapter *sc = arg1;
5965	struct sbuf *sb;
5966	int rc, i;
5967	uint32_t cnt[PM_NSTATS];
5968	uint64_t cyc[PM_NSTATS];
5969	static const char *rx_stats[] = {
5970		"Read:", "Write bypass:", "Write mem:", "Flush:"
5971	};
5972	static const char *tx_stats[] = {
5973		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
5974	};
5975
5976	rc = sysctl_wire_old_buffer(req, 0);
5977	if (rc != 0)
5978		return (rc);
5979
5980	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5981	if (sb == NULL)
5982		return (ENOMEM);
5983
5984	t4_pmtx_get_stats(sc, cnt, cyc);
5985	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
5986	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
5987		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
5988		    cyc[i]);
5989
5990	t4_pmrx_get_stats(sc, cnt, cyc);
5991	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
5992	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
5993		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
5994		    cyc[i]);
5995
5996	rc = sbuf_finish(sb);
5997	sbuf_delete(sb);
5998
5999	return (rc);
6000}
6001
6002static int
6003sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6004{
6005	struct adapter *sc = arg1;
6006	struct sbuf *sb;
6007	int rc;
6008	struct tp_rdma_stats stats;
6009
6010	rc = sysctl_wire_old_buffer(req, 0);
6011	if (rc != 0)
6012		return (rc);
6013
6014	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6015	if (sb == NULL)
6016		return (ENOMEM);
6017
6018	t4_tp_get_rdma_stats(sc, &stats);
6019	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6020	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6021
6022	rc = sbuf_finish(sb);
6023	sbuf_delete(sb);
6024
6025	return (rc);
6026}
6027
6028static int
6029sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6030{
6031	struct adapter *sc = arg1;
6032	struct sbuf *sb;
6033	int rc;
6034	struct tp_tcp_stats v4, v6;
6035
6036	rc = sysctl_wire_old_buffer(req, 0);
6037	if (rc != 0)
6038		return (rc);
6039
6040	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6041	if (sb == NULL)
6042		return (ENOMEM);
6043
6044	t4_tp_get_tcp_stats(sc, &v4, &v6);
6045	sbuf_printf(sb,
6046	    "                                IP                 IPv6\n");
6047	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6048	    v4.tcpOutRsts, v6.tcpOutRsts);
6049	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6050	    v4.tcpInSegs, v6.tcpInSegs);
6051	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6052	    v4.tcpOutSegs, v6.tcpOutSegs);
6053	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6054	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6055
6056	rc = sbuf_finish(sb);
6057	sbuf_delete(sb);
6058
6059	return (rc);
6060}
6061
6062static int
6063sysctl_tids(SYSCTL_HANDLER_ARGS)
6064{
6065	struct adapter *sc = arg1;
6066	struct sbuf *sb;
6067	int rc;
6068	struct tid_info *t = &sc->tids;
6069
6070	rc = sysctl_wire_old_buffer(req, 0);
6071	if (rc != 0)
6072		return (rc);
6073
6074	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6075	if (sb == NULL)
6076		return (ENOMEM);
6077
6078	if (t->natids) {
6079		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6080		    t->atids_in_use);
6081	}
6082
6083	if (t->ntids) {
6084		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6085			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6086
6087			if (b) {
6088				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6089				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6090				    t->ntids - 1);
6091			} else {
6092				sbuf_printf(sb, "TID range: %u-%u",
6093				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6094				    t->ntids - 1);
6095			}
6096		} else
6097			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6098		sbuf_printf(sb, ", in use: %u\n",
6099		    atomic_load_acq_int(&t->tids_in_use));
6100	}
6101
6102	if (t->nstids) {
6103		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6104		    t->stid_base + t->nstids - 1, t->stids_in_use);
6105	}
6106
6107	if (t->nftids) {
6108		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6109		    t->ftid_base + t->nftids - 1);
6110	}
6111
6112	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6113	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6114	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6115
6116	rc = sbuf_finish(sb);
6117	sbuf_delete(sb);
6118
6119	return (rc);
6120}
6121
6122static int
6123sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6124{
6125	struct adapter *sc = arg1;
6126	struct sbuf *sb;
6127	int rc;
6128	struct tp_err_stats stats;
6129
6130	rc = sysctl_wire_old_buffer(req, 0);
6131	if (rc != 0)
6132		return (rc);
6133
6134	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6135	if (sb == NULL)
6136		return (ENOMEM);
6137
6138	t4_tp_get_err_stats(sc, &stats);
6139
6140	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6141		      "channel 3\n");
6142	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6143	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6144	    stats.macInErrs[3]);
6145	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6146	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6147	    stats.hdrInErrs[3]);
6148	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6149	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6150	    stats.tcpInErrs[3]);
6151	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6152	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6153	    stats.tcp6InErrs[3]);
6154	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6155	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6156	    stats.tnlCongDrops[3]);
6157	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6158	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6159	    stats.tnlTxDrops[3]);
6160	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6161	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6162	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6163	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6164	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6165	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6166	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6167	    stats.ofldNoNeigh, stats.ofldCongDefer);
6168
6169	rc = sbuf_finish(sb);
6170	sbuf_delete(sb);
6171
6172	return (rc);
6173}
6174
6175struct field_desc {
6176	const char *name;
6177	u_int start;
6178	u_int width;
6179};
6180
6181static void
6182field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6183{
6184	char buf[32];
6185	int line_size = 0;
6186
6187	while (f->name) {
6188		uint64_t mask = (1ULL << f->width) - 1;
6189		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6190		    ((uintmax_t)v >> f->start) & mask);
6191
6192		if (line_size + len >= 79) {
6193			line_size = 8;
6194			sbuf_printf(sb, "\n        ");
6195		}
6196		sbuf_printf(sb, "%s ", buf);
6197		line_size += len + 1;
6198		f++;
6199	}
6200	sbuf_printf(sb, "\n");
6201}
6202
6203static struct field_desc tp_la0[] = {
6204	{ "RcfOpCodeOut", 60, 4 },
6205	{ "State", 56, 4 },
6206	{ "WcfState", 52, 4 },
6207	{ "RcfOpcSrcOut", 50, 2 },
6208	{ "CRxError", 49, 1 },
6209	{ "ERxError", 48, 1 },
6210	{ "SanityFailed", 47, 1 },
6211	{ "SpuriousMsg", 46, 1 },
6212	{ "FlushInputMsg", 45, 1 },
6213	{ "FlushInputCpl", 44, 1 },
6214	{ "RssUpBit", 43, 1 },
6215	{ "RssFilterHit", 42, 1 },
6216	{ "Tid", 32, 10 },
6217	{ "InitTcb", 31, 1 },
6218	{ "LineNumber", 24, 7 },
6219	{ "Emsg", 23, 1 },
6220	{ "EdataOut", 22, 1 },
6221	{ "Cmsg", 21, 1 },
6222	{ "CdataOut", 20, 1 },
6223	{ "EreadPdu", 19, 1 },
6224	{ "CreadPdu", 18, 1 },
6225	{ "TunnelPkt", 17, 1 },
6226	{ "RcfPeerFin", 16, 1 },
6227	{ "RcfReasonOut", 12, 4 },
6228	{ "TxCchannel", 10, 2 },
6229	{ "RcfTxChannel", 8, 2 },
6230	{ "RxEchannel", 6, 2 },
6231	{ "RcfRxChannel", 5, 1 },
6232	{ "RcfDataOutSrdy", 4, 1 },
6233	{ "RxDvld", 3, 1 },
6234	{ "RxOoDvld", 2, 1 },
6235	{ "RxCongestion", 1, 1 },
6236	{ "TxCongestion", 0, 1 },
6237	{ NULL }
6238};
6239
6240static struct field_desc tp_la1[] = {
6241	{ "CplCmdIn", 56, 8 },
6242	{ "CplCmdOut", 48, 8 },
6243	{ "ESynOut", 47, 1 },
6244	{ "EAckOut", 46, 1 },
6245	{ "EFinOut", 45, 1 },
6246	{ "ERstOut", 44, 1 },
6247	{ "SynIn", 43, 1 },
6248	{ "AckIn", 42, 1 },
6249	{ "FinIn", 41, 1 },
6250	{ "RstIn", 40, 1 },
6251	{ "DataIn", 39, 1 },
6252	{ "DataInVld", 38, 1 },
6253	{ "PadIn", 37, 1 },
6254	{ "RxBufEmpty", 36, 1 },
6255	{ "RxDdp", 35, 1 },
6256	{ "RxFbCongestion", 34, 1 },
6257	{ "TxFbCongestion", 33, 1 },
6258	{ "TxPktSumSrdy", 32, 1 },
6259	{ "RcfUlpType", 28, 4 },
6260	{ "Eread", 27, 1 },
6261	{ "Ebypass", 26, 1 },
6262	{ "Esave", 25, 1 },
6263	{ "Static0", 24, 1 },
6264	{ "Cread", 23, 1 },
6265	{ "Cbypass", 22, 1 },
6266	{ "Csave", 21, 1 },
6267	{ "CPktOut", 20, 1 },
6268	{ "RxPagePoolFull", 18, 2 },
6269	{ "RxLpbkPkt", 17, 1 },
6270	{ "TxLpbkPkt", 16, 1 },
6271	{ "RxVfValid", 15, 1 },
6272	{ "SynLearned", 14, 1 },
6273	{ "SetDelEntry", 13, 1 },
6274	{ "SetInvEntry", 12, 1 },
6275	{ "CpcmdDvld", 11, 1 },
6276	{ "CpcmdSave", 10, 1 },
6277	{ "RxPstructsFull", 8, 2 },
6278	{ "EpcmdDvld", 7, 1 },
6279	{ "EpcmdFlush", 6, 1 },
6280	{ "EpcmdTrimPrefix", 5, 1 },
6281	{ "EpcmdTrimPostfix", 4, 1 },
6282	{ "ERssIp4Pkt", 3, 1 },
6283	{ "ERssIp6Pkt", 2, 1 },
6284	{ "ERssTcpUdpPkt", 1, 1 },
6285	{ "ERssFceFipPkt", 0, 1 },
6286	{ NULL }
6287};
6288
6289static struct field_desc tp_la2[] = {
6290	{ "CplCmdIn", 56, 8 },
6291	{ "MpsVfVld", 55, 1 },
6292	{ "MpsPf", 52, 3 },
6293	{ "MpsVf", 44, 8 },
6294	{ "SynIn", 43, 1 },
6295	{ "AckIn", 42, 1 },
6296	{ "FinIn", 41, 1 },
6297	{ "RstIn", 40, 1 },
6298	{ "DataIn", 39, 1 },
6299	{ "DataInVld", 38, 1 },
6300	{ "PadIn", 37, 1 },
6301	{ "RxBufEmpty", 36, 1 },
6302	{ "RxDdp", 35, 1 },
6303	{ "RxFbCongestion", 34, 1 },
6304	{ "TxFbCongestion", 33, 1 },
6305	{ "TxPktSumSrdy", 32, 1 },
6306	{ "RcfUlpType", 28, 4 },
6307	{ "Eread", 27, 1 },
6308	{ "Ebypass", 26, 1 },
6309	{ "Esave", 25, 1 },
6310	{ "Static0", 24, 1 },
6311	{ "Cread", 23, 1 },
6312	{ "Cbypass", 22, 1 },
6313	{ "Csave", 21, 1 },
6314	{ "CPktOut", 20, 1 },
6315	{ "RxPagePoolFull", 18, 2 },
6316	{ "RxLpbkPkt", 17, 1 },
6317	{ "TxLpbkPkt", 16, 1 },
6318	{ "RxVfValid", 15, 1 },
6319	{ "SynLearned", 14, 1 },
6320	{ "SetDelEntry", 13, 1 },
6321	{ "SetInvEntry", 12, 1 },
6322	{ "CpcmdDvld", 11, 1 },
6323	{ "CpcmdSave", 10, 1 },
6324	{ "RxPstructsFull", 8, 2 },
6325	{ "EpcmdDvld", 7, 1 },
6326	{ "EpcmdFlush", 6, 1 },
6327	{ "EpcmdTrimPrefix", 5, 1 },
6328	{ "EpcmdTrimPostfix", 4, 1 },
6329	{ "ERssIp4Pkt", 3, 1 },
6330	{ "ERssIp6Pkt", 2, 1 },
6331	{ "ERssTcpUdpPkt", 1, 1 },
6332	{ "ERssFceFipPkt", 0, 1 },
6333	{ NULL }
6334};
6335
6336static void
6337tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6338{
6339
6340	field_desc_show(sb, *p, tp_la0);
6341}
6342
6343static void
6344tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6345{
6346
6347	if (idx)
6348		sbuf_printf(sb, "\n");
6349	field_desc_show(sb, p[0], tp_la0);
6350	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6351		field_desc_show(sb, p[1], tp_la0);
6352}
6353
6354static void
6355tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6356{
6357
6358	if (idx)
6359		sbuf_printf(sb, "\n");
6360	field_desc_show(sb, p[0], tp_la0);
6361	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6362		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6363}
6364
6365static int
6366sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6367{
6368	struct adapter *sc = arg1;
6369	struct sbuf *sb;
6370	uint64_t *buf, *p;
6371	int rc;
6372	u_int i, inc;
6373	void (*show_func)(struct sbuf *, uint64_t *, int);
6374
6375	rc = sysctl_wire_old_buffer(req, 0);
6376	if (rc != 0)
6377		return (rc);
6378
6379	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6380	if (sb == NULL)
6381		return (ENOMEM);
6382
6383	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6384
6385	t4_tp_read_la(sc, buf, NULL);
6386	p = buf;
6387
6388	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6389	case 2:
6390		inc = 2;
6391		show_func = tp_la_show2;
6392		break;
6393	case 3:
6394		inc = 2;
6395		show_func = tp_la_show3;
6396		break;
6397	default:
6398		inc = 1;
6399		show_func = tp_la_show;
6400	}
6401
6402	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6403		(*show_func)(sb, p, i);
6404
6405	rc = sbuf_finish(sb);
6406	sbuf_delete(sb);
6407	free(buf, M_CXGBE);
6408	return (rc);
6409}
6410
6411static int
6412sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6413{
6414	struct adapter *sc = arg1;
6415	struct sbuf *sb;
6416	int rc;
6417	u64 nrate[NCHAN], orate[NCHAN];
6418
6419	rc = sysctl_wire_old_buffer(req, 0);
6420	if (rc != 0)
6421		return (rc);
6422
6423	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6424	if (sb == NULL)
6425		return (ENOMEM);
6426
6427	t4_get_chan_txrate(sc, nrate, orate);
6428	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6429		 "channel 3\n");
6430	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6431	    nrate[0], nrate[1], nrate[2], nrate[3]);
6432	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6433	    orate[0], orate[1], orate[2], orate[3]);
6434
6435	rc = sbuf_finish(sb);
6436	sbuf_delete(sb);
6437
6438	return (rc);
6439}
6440
6441static int
6442sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6443{
6444	struct adapter *sc = arg1;
6445	struct sbuf *sb;
6446	uint32_t *buf, *p;
6447	int rc, i;
6448
6449	rc = sysctl_wire_old_buffer(req, 0);
6450	if (rc != 0)
6451		return (rc);
6452
6453	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6454	if (sb == NULL)
6455		return (ENOMEM);
6456
6457	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6458	    M_ZERO | M_WAITOK);
6459
6460	t4_ulprx_read_la(sc, buf);
6461	p = buf;
6462
6463	sbuf_printf(sb, "      Pcmd        Type   Message"
6464	    "                Data");
6465	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6466		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6467		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6468	}
6469
6470	rc = sbuf_finish(sb);
6471	sbuf_delete(sb);
6472	free(buf, M_CXGBE);
6473	return (rc);
6474}
6475
6476static int
6477sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6478{
6479	struct adapter *sc = arg1;
6480	struct sbuf *sb;
6481	int rc, v;
6482
6483	rc = sysctl_wire_old_buffer(req, 0);
6484	if (rc != 0)
6485		return (rc);
6486
6487	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6488	if (sb == NULL)
6489		return (ENOMEM);
6490
6491	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6492	if (G_STATSOURCE_T5(v) == 7) {
6493		if (G_STATMODE(v) == 0) {
6494			sbuf_printf(sb, "total %d, incomplete %d",
6495			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6496			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6497		} else if (G_STATMODE(v) == 1) {
6498			sbuf_printf(sb, "total %d, data overflow %d",
6499			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6500			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6501		}
6502	}
6503	rc = sbuf_finish(sb);
6504	sbuf_delete(sb);
6505
6506	return (rc);
6507}
6508#endif
6509
6510static inline void
6511txq_start(struct ifnet *ifp, struct sge_txq *txq)
6512{
6513	struct buf_ring *br;
6514	struct mbuf *m;
6515
6516	TXQ_LOCK_ASSERT_OWNED(txq);
6517
6518	br = txq->br;
6519	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6520	if (m)
6521		t4_eth_tx(ifp, txq, m);
6522}
6523
6524void
6525t4_tx_callout(void *arg)
6526{
6527	struct sge_eq *eq = arg;
6528	struct adapter *sc;
6529
6530	if (EQ_TRYLOCK(eq) == 0)
6531		goto reschedule;
6532
6533	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6534		EQ_UNLOCK(eq);
6535reschedule:
6536		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6537			callout_schedule(&eq->tx_callout, 1);
6538		return;
6539	}
6540
6541	EQ_LOCK_ASSERT_OWNED(eq);
6542
6543	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6544
6545		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6546			struct sge_txq *txq = arg;
6547			struct port_info *pi = txq->ifp->if_softc;
6548
6549			sc = pi->adapter;
6550		} else {
6551			struct sge_wrq *wrq = arg;
6552
6553			sc = wrq->adapter;
6554		}
6555
6556		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6557	}
6558
6559	EQ_UNLOCK(eq);
6560}
6561
6562void
6563t4_tx_task(void *arg, int count)
6564{
6565	struct sge_eq *eq = arg;
6566
6567	EQ_LOCK(eq);
6568	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6569		struct sge_txq *txq = arg;
6570		txq_start(txq->ifp, txq);
6571	} else {
6572		struct sge_wrq *wrq = arg;
6573		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6574	}
6575	EQ_UNLOCK(eq);
6576}
6577
6578static uint32_t
6579fconf_to_mode(uint32_t fconf)
6580{
6581	uint32_t mode;
6582
6583	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6584	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6585
6586	if (fconf & F_FRAGMENTATION)
6587		mode |= T4_FILTER_IP_FRAGMENT;
6588
6589	if (fconf & F_MPSHITTYPE)
6590		mode |= T4_FILTER_MPS_HIT_TYPE;
6591
6592	if (fconf & F_MACMATCH)
6593		mode |= T4_FILTER_MAC_IDX;
6594
6595	if (fconf & F_ETHERTYPE)
6596		mode |= T4_FILTER_ETH_TYPE;
6597
6598	if (fconf & F_PROTOCOL)
6599		mode |= T4_FILTER_IP_PROTO;
6600
6601	if (fconf & F_TOS)
6602		mode |= T4_FILTER_IP_TOS;
6603
6604	if (fconf & F_VLAN)
6605		mode |= T4_FILTER_VLAN;
6606
6607	if (fconf & F_VNIC_ID)
6608		mode |= T4_FILTER_VNIC;
6609
6610	if (fconf & F_PORT)
6611		mode |= T4_FILTER_PORT;
6612
6613	if (fconf & F_FCOE)
6614		mode |= T4_FILTER_FCoE;
6615
6616	return (mode);
6617}
6618
6619static uint32_t
6620mode_to_fconf(uint32_t mode)
6621{
6622	uint32_t fconf = 0;
6623
6624	if (mode & T4_FILTER_IP_FRAGMENT)
6625		fconf |= F_FRAGMENTATION;
6626
6627	if (mode & T4_FILTER_MPS_HIT_TYPE)
6628		fconf |= F_MPSHITTYPE;
6629
6630	if (mode & T4_FILTER_MAC_IDX)
6631		fconf |= F_MACMATCH;
6632
6633	if (mode & T4_FILTER_ETH_TYPE)
6634		fconf |= F_ETHERTYPE;
6635
6636	if (mode & T4_FILTER_IP_PROTO)
6637		fconf |= F_PROTOCOL;
6638
6639	if (mode & T4_FILTER_IP_TOS)
6640		fconf |= F_TOS;
6641
6642	if (mode & T4_FILTER_VLAN)
6643		fconf |= F_VLAN;
6644
6645	if (mode & T4_FILTER_VNIC)
6646		fconf |= F_VNIC_ID;
6647
6648	if (mode & T4_FILTER_PORT)
6649		fconf |= F_PORT;
6650
6651	if (mode & T4_FILTER_FCoE)
6652		fconf |= F_FCOE;
6653
6654	return (fconf);
6655}
6656
6657static uint32_t
6658fspec_to_fconf(struct t4_filter_specification *fs)
6659{
6660	uint32_t fconf = 0;
6661
6662	if (fs->val.frag || fs->mask.frag)
6663		fconf |= F_FRAGMENTATION;
6664
6665	if (fs->val.matchtype || fs->mask.matchtype)
6666		fconf |= F_MPSHITTYPE;
6667
6668	if (fs->val.macidx || fs->mask.macidx)
6669		fconf |= F_MACMATCH;
6670
6671	if (fs->val.ethtype || fs->mask.ethtype)
6672		fconf |= F_ETHERTYPE;
6673
6674	if (fs->val.proto || fs->mask.proto)
6675		fconf |= F_PROTOCOL;
6676
6677	if (fs->val.tos || fs->mask.tos)
6678		fconf |= F_TOS;
6679
6680	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6681		fconf |= F_VLAN;
6682
6683	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6684		fconf |= F_VNIC_ID;
6685
6686	if (fs->val.iport || fs->mask.iport)
6687		fconf |= F_PORT;
6688
6689	if (fs->val.fcoe || fs->mask.fcoe)
6690		fconf |= F_FCOE;
6691
6692	return (fconf);
6693}
6694
6695static int
6696get_filter_mode(struct adapter *sc, uint32_t *mode)
6697{
6698	int rc;
6699	uint32_t fconf;
6700
6701	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6702	    "t4getfm");
6703	if (rc)
6704		return (rc);
6705
6706	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6707	    A_TP_VLAN_PRI_MAP);
6708
6709	if (sc->params.tp.vlan_pri_map != fconf) {
6710		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6711		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6712		    fconf);
6713		sc->params.tp.vlan_pri_map = fconf;
6714	}
6715
6716	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6717
6718	end_synchronized_op(sc, LOCK_HELD);
6719	return (0);
6720}
6721
6722static int
6723set_filter_mode(struct adapter *sc, uint32_t mode)
6724{
6725	uint32_t fconf;
6726	int rc;
6727
6728	fconf = mode_to_fconf(mode);
6729
6730	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6731	    "t4setfm");
6732	if (rc)
6733		return (rc);
6734
6735	if (sc->tids.ftids_in_use > 0) {
6736		rc = EBUSY;
6737		goto done;
6738	}
6739
6740#ifdef TCP_OFFLOAD
6741	if (sc->offload_map) {
6742		rc = EBUSY;
6743		goto done;
6744	}
6745#endif
6746
6747#ifdef notyet
6748	rc = -t4_set_filter_mode(sc, fconf);
6749	if (rc == 0)
6750		sc->filter_mode = fconf;
6751#else
6752	rc = ENOTSUP;
6753#endif
6754
6755done:
6756	end_synchronized_op(sc, LOCK_HELD);
6757	return (rc);
6758}
6759
6760static inline uint64_t
6761get_filter_hits(struct adapter *sc, uint32_t fid)
6762{
6763	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6764	uint64_t hits;
6765
6766	memwin_info(sc, 0, &mw_base, NULL);
6767	off = position_memwin(sc, 0,
6768	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6769	if (is_t4(sc)) {
6770		hits = t4_read_reg64(sc, mw_base + off + 16);
6771		hits = be64toh(hits);
6772	} else {
6773		hits = t4_read_reg(sc, mw_base + off + 24);
6774		hits = be32toh(hits);
6775	}
6776
6777	return (hits);
6778}
6779
6780static int
6781get_filter(struct adapter *sc, struct t4_filter *t)
6782{
6783	int i, rc, nfilters = sc->tids.nftids;
6784	struct filter_entry *f;
6785
6786	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6787	    "t4getf");
6788	if (rc)
6789		return (rc);
6790
6791	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6792	    t->idx >= nfilters) {
6793		t->idx = 0xffffffff;
6794		goto done;
6795	}
6796
6797	f = &sc->tids.ftid_tab[t->idx];
6798	for (i = t->idx; i < nfilters; i++, f++) {
6799		if (f->valid) {
6800			t->idx = i;
6801			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6802			t->smtidx = f->smtidx;
6803			if (f->fs.hitcnts)
6804				t->hits = get_filter_hits(sc, t->idx);
6805			else
6806				t->hits = UINT64_MAX;
6807			t->fs = f->fs;
6808
6809			goto done;
6810		}
6811	}
6812
6813	t->idx = 0xffffffff;
6814done:
6815	end_synchronized_op(sc, LOCK_HELD);
6816	return (0);
6817}
6818
6819static int
6820set_filter(struct adapter *sc, struct t4_filter *t)
6821{
6822	unsigned int nfilters, nports;
6823	struct filter_entry *f;
6824	int i, rc;
6825
6826	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6827	if (rc)
6828		return (rc);
6829
6830	nfilters = sc->tids.nftids;
6831	nports = sc->params.nports;
6832
6833	if (nfilters == 0) {
6834		rc = ENOTSUP;
6835		goto done;
6836	}
6837
6838	if (!(sc->flags & FULL_INIT_DONE)) {
6839		rc = EAGAIN;
6840		goto done;
6841	}
6842
6843	if (t->idx >= nfilters) {
6844		rc = EINVAL;
6845		goto done;
6846	}
6847
6848	/* Validate against the global filter mode */
6849	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6850	    sc->params.tp.vlan_pri_map) {
6851		rc = E2BIG;
6852		goto done;
6853	}
6854
6855	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6856		rc = EINVAL;
6857		goto done;
6858	}
6859
6860	if (t->fs.val.iport >= nports) {
6861		rc = EINVAL;
6862		goto done;
6863	}
6864
6865	/* Can't specify an iq if not steering to it */
6866	if (!t->fs.dirsteer && t->fs.iq) {
6867		rc = EINVAL;
6868		goto done;
6869	}
6870
6871	/* IPv6 filter idx must be 4 aligned */
6872	if (t->fs.type == 1 &&
6873	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6874		rc = EINVAL;
6875		goto done;
6876	}
6877
6878	if (sc->tids.ftid_tab == NULL) {
6879		KASSERT(sc->tids.ftids_in_use == 0,
6880		    ("%s: no memory allocated but filters_in_use > 0",
6881		    __func__));
6882
6883		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6884		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6885		if (sc->tids.ftid_tab == NULL) {
6886			rc = ENOMEM;
6887			goto done;
6888		}
6889		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6890	}
6891
6892	for (i = 0; i < 4; i++) {
6893		f = &sc->tids.ftid_tab[t->idx + i];
6894
6895		if (f->pending || f->valid) {
6896			rc = EBUSY;
6897			goto done;
6898		}
6899		if (f->locked) {
6900			rc = EPERM;
6901			goto done;
6902		}
6903
6904		if (t->fs.type == 0)
6905			break;
6906	}
6907
6908	f = &sc->tids.ftid_tab[t->idx];
6909	f->fs = t->fs;
6910
6911	rc = set_filter_wr(sc, t->idx);
6912done:
6913	end_synchronized_op(sc, 0);
6914
6915	if (rc == 0) {
6916		mtx_lock(&sc->tids.ftid_lock);
6917		for (;;) {
6918			if (f->pending == 0) {
6919				rc = f->valid ? 0 : EIO;
6920				break;
6921			}
6922
6923			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6924			    PCATCH, "t4setfw", 0)) {
6925				rc = EINPROGRESS;
6926				break;
6927			}
6928		}
6929		mtx_unlock(&sc->tids.ftid_lock);
6930	}
6931	return (rc);
6932}
6933
6934static int
6935del_filter(struct adapter *sc, struct t4_filter *t)
6936{
6937	unsigned int nfilters;
6938	struct filter_entry *f;
6939	int rc;
6940
6941	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6942	if (rc)
6943		return (rc);
6944
6945	nfilters = sc->tids.nftids;
6946
6947	if (nfilters == 0) {
6948		rc = ENOTSUP;
6949		goto done;
6950	}
6951
6952	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6953	    t->idx >= nfilters) {
6954		rc = EINVAL;
6955		goto done;
6956	}
6957
6958	if (!(sc->flags & FULL_INIT_DONE)) {
6959		rc = EAGAIN;
6960		goto done;
6961	}
6962
6963	f = &sc->tids.ftid_tab[t->idx];
6964
6965	if (f->pending) {
6966		rc = EBUSY;
6967		goto done;
6968	}
6969	if (f->locked) {
6970		rc = EPERM;
6971		goto done;
6972	}
6973
6974	if (f->valid) {
6975		t->fs = f->fs;	/* extra info for the caller */
6976		rc = del_filter_wr(sc, t->idx);
6977	}
6978
6979done:
6980	end_synchronized_op(sc, 0);
6981
6982	if (rc == 0) {
6983		mtx_lock(&sc->tids.ftid_lock);
6984		for (;;) {
6985			if (f->pending == 0) {
6986				rc = f->valid ? EIO : 0;
6987				break;
6988			}
6989
6990			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6991			    PCATCH, "t4delfw", 0)) {
6992				rc = EINPROGRESS;
6993				break;
6994			}
6995		}
6996		mtx_unlock(&sc->tids.ftid_lock);
6997	}
6998
6999	return (rc);
7000}
7001
7002static void
7003clear_filter(struct filter_entry *f)
7004{
7005	if (f->l2t)
7006		t4_l2t_release(f->l2t);
7007
7008	bzero(f, sizeof (*f));
7009}
7010
7011static int
7012set_filter_wr(struct adapter *sc, int fidx)
7013{
7014	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7015	struct wrqe *wr;
7016	struct fw_filter_wr *fwr;
7017	unsigned int ftid;
7018
7019	ASSERT_SYNCHRONIZED_OP(sc);
7020
7021	if (f->fs.newdmac || f->fs.newvlan) {
7022		/* This filter needs an L2T entry; allocate one. */
7023		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7024		if (f->l2t == NULL)
7025			return (EAGAIN);
7026		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7027		    f->fs.dmac)) {
7028			t4_l2t_release(f->l2t);
7029			f->l2t = NULL;
7030			return (ENOMEM);
7031		}
7032	}
7033
7034	ftid = sc->tids.ftid_base + fidx;
7035
7036	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7037	if (wr == NULL)
7038		return (ENOMEM);
7039
7040	fwr = wrtod(wr);
7041	bzero(fwr, sizeof (*fwr));
7042
7043	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7044	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7045	fwr->tid_to_iq =
7046	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7047		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7048		V_FW_FILTER_WR_NOREPLY(0) |
7049		V_FW_FILTER_WR_IQ(f->fs.iq));
7050	fwr->del_filter_to_l2tix =
7051	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7052		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7053		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7054		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7055		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7056		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7057		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7058		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7059		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7060		    f->fs.newvlan == VLAN_REWRITE) |
7061		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7062		    f->fs.newvlan == VLAN_REWRITE) |
7063		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7064		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7065		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7066		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7067	fwr->ethtype = htobe16(f->fs.val.ethtype);
7068	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7069	fwr->frag_to_ovlan_vldm =
7070	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7071		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7072		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7073		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7074		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7075		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7076	fwr->smac_sel = 0;
7077	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7078	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7079	fwr->maci_to_matchtypem =
7080	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7081		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7082		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7083		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7084		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7085		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7086		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7087		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7088	fwr->ptcl = f->fs.val.proto;
7089	fwr->ptclm = f->fs.mask.proto;
7090	fwr->ttyp = f->fs.val.tos;
7091	fwr->ttypm = f->fs.mask.tos;
7092	fwr->ivlan = htobe16(f->fs.val.vlan);
7093	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7094	fwr->ovlan = htobe16(f->fs.val.vnic);
7095	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7096	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7097	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7098	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7099	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7100	fwr->lp = htobe16(f->fs.val.dport);
7101	fwr->lpm = htobe16(f->fs.mask.dport);
7102	fwr->fp = htobe16(f->fs.val.sport);
7103	fwr->fpm = htobe16(f->fs.mask.sport);
7104	if (f->fs.newsmac)
7105		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7106
7107	f->pending = 1;
7108	sc->tids.ftids_in_use++;
7109
7110	t4_wrq_tx(sc, wr);
7111	return (0);
7112}
7113
7114static int
7115del_filter_wr(struct adapter *sc, int fidx)
7116{
7117	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7118	struct wrqe *wr;
7119	struct fw_filter_wr *fwr;
7120	unsigned int ftid;
7121
7122	ftid = sc->tids.ftid_base + fidx;
7123
7124	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7125	if (wr == NULL)
7126		return (ENOMEM);
7127	fwr = wrtod(wr);
7128	bzero(fwr, sizeof (*fwr));
7129
7130	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7131
7132	f->pending = 1;
7133	t4_wrq_tx(sc, wr);
7134	return (0);
7135}
7136
7137int
7138t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7139{
7140	struct adapter *sc = iq->adapter;
7141	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7142	unsigned int idx = GET_TID(rpl);
7143
7144	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7145	    rss->opcode));
7146
7147	if (idx >= sc->tids.ftid_base &&
7148	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7149		unsigned int rc = G_COOKIE(rpl->cookie);
7150		struct filter_entry *f = &sc->tids.ftid_tab[idx];
7151
7152		mtx_lock(&sc->tids.ftid_lock);
7153		if (rc == FW_FILTER_WR_FLT_ADDED) {
7154			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7155			    __func__, idx));
7156			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7157			f->pending = 0;  /* asynchronous setup completed */
7158			f->valid = 1;
7159		} else {
7160			if (rc != FW_FILTER_WR_FLT_DELETED) {
7161				/* Add or delete failed, display an error */
7162				log(LOG_ERR,
7163				    "filter %u setup failed with error %u\n",
7164				    idx, rc);
7165			}
7166
7167			clear_filter(f);
7168			sc->tids.ftids_in_use--;
7169		}
7170		wakeup(&sc->tids.ftid_tab);
7171		mtx_unlock(&sc->tids.ftid_lock);
7172	}
7173
7174	return (0);
7175}
7176
7177static int
7178get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7179{
7180	int rc;
7181
7182	if (cntxt->cid > M_CTXTQID)
7183		return (EINVAL);
7184
7185	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7186	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7187		return (EINVAL);
7188
7189	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7190	if (rc)
7191		return (rc);
7192
7193	if (sc->flags & FW_OK) {
7194		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7195		    &cntxt->data[0]);
7196		if (rc == 0)
7197			goto done;
7198	}
7199
7200	/*
7201	 * Read via firmware failed or wasn't even attempted.  Read directly via
7202	 * the backdoor.
7203	 */
7204	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7205done:
7206	end_synchronized_op(sc, 0);
7207	return (rc);
7208}
7209
7210static int
7211load_fw(struct adapter *sc, struct t4_data *fw)
7212{
7213	int rc;
7214	uint8_t *fw_data;
7215
7216	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7217	if (rc)
7218		return (rc);
7219
7220	if (sc->flags & FULL_INIT_DONE) {
7221		rc = EBUSY;
7222		goto done;
7223	}
7224
7225	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7226	if (fw_data == NULL) {
7227		rc = ENOMEM;
7228		goto done;
7229	}
7230
7231	rc = copyin(fw->data, fw_data, fw->len);
7232	if (rc == 0)
7233		rc = -t4_load_fw(sc, fw_data, fw->len);
7234
7235	free(fw_data, M_CXGBE);
7236done:
7237	end_synchronized_op(sc, 0);
7238	return (rc);
7239}
7240
7241static int
7242read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7243{
7244	uint32_t addr, off, remaining, i, n;
7245	uint32_t *buf, *b;
7246	uint32_t mw_base, mw_aperture;
7247	int rc;
7248	uint8_t *dst;
7249
7250	rc = validate_mem_range(sc, mr->addr, mr->len);
7251	if (rc != 0)
7252		return (rc);
7253
7254	memwin_info(sc, win, &mw_base, &mw_aperture);
7255	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7256	addr = mr->addr;
7257	remaining = mr->len;
7258	dst = (void *)mr->data;
7259
7260	while (remaining) {
7261		off = position_memwin(sc, win, addr);
7262
7263		/* number of bytes that we'll copy in the inner loop */
7264		n = min(remaining, mw_aperture - off);
7265		for (i = 0; i < n; i += 4)
7266			*b++ = t4_read_reg(sc, mw_base + off + i);
7267
7268		rc = copyout(buf, dst, n);
7269		if (rc != 0)
7270			break;
7271
7272		b = buf;
7273		dst += n;
7274		remaining -= n;
7275		addr += n;
7276	}
7277
7278	free(buf, M_CXGBE);
7279	return (rc);
7280}
7281
7282static int
7283read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7284{
7285	int rc;
7286
7287	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7288		return (EINVAL);
7289
7290	if (i2cd->len > 1) {
7291		/* XXX: need fw support for longer reads in one go */
7292		return (ENOTSUP);
7293	}
7294
7295	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7296	if (rc)
7297		return (rc);
7298	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7299	    i2cd->offset, &i2cd->data[0]);
7300	end_synchronized_op(sc, 0);
7301
7302	return (rc);
7303}
7304
7305static int
7306in_range(int val, int lo, int hi)
7307{
7308
7309	return (val < 0 || (val <= hi && val >= lo));
7310}
7311
7312static int
7313set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7314{
7315	int fw_subcmd, fw_type, rc;
7316
7317	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7318	if (rc)
7319		return (rc);
7320
7321	if (!(sc->flags & FULL_INIT_DONE)) {
7322		rc = EAGAIN;
7323		goto done;
7324	}
7325
7326	/*
7327	 * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7328	 * sub-command and type are in common locations.)
7329	 */
7330	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7331		fw_subcmd = FW_SCHED_SC_CONFIG;
7332	else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7333		fw_subcmd = FW_SCHED_SC_PARAMS;
7334	else {
7335		rc = EINVAL;
7336		goto done;
7337	}
7338	if (p->type == SCHED_CLASS_TYPE_PACKET)
7339		fw_type = FW_SCHED_TYPE_PKTSCHED;
7340	else {
7341		rc = EINVAL;
7342		goto done;
7343	}
7344
7345	if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7346		/* Vet our parameters ..*/
7347		if (p->u.config.minmax < 0) {
7348			rc = EINVAL;
7349			goto done;
7350		}
7351
7352		/* And pass the request to the firmware ...*/
7353		rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7354		goto done;
7355	}
7356
7357	if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7358		int fw_level;
7359		int fw_mode;
7360		int fw_rateunit;
7361		int fw_ratemode;
7362
7363		if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7364			fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7365		else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7366			fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7367		else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7368			fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7369		else {
7370			rc = EINVAL;
7371			goto done;
7372		}
7373
7374		if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7375			fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7376		else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7377			fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7378		else {
7379			rc = EINVAL;
7380			goto done;
7381		}
7382
7383		if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7384			fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7385		else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7386			fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7387		else {
7388			rc = EINVAL;
7389			goto done;
7390		}
7391
7392		if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7393			fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7394		else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7395			fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7396		else {
7397			rc = EINVAL;
7398			goto done;
7399		}
7400
7401		/* Vet our parameters ... */
7402		if (!in_range(p->u.params.channel, 0, 3) ||
7403		    !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7404		    !in_range(p->u.params.minrate, 0, 10000000) ||
7405		    !in_range(p->u.params.maxrate, 0, 10000000) ||
7406		    !in_range(p->u.params.weight, 0, 100)) {
7407			rc = ERANGE;
7408			goto done;
7409		}
7410
7411		/*
7412		 * Translate any unset parameters into the firmware's
7413		 * nomenclature and/or fail the call if the parameters
7414		 * are required ...
7415		 */
7416		if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7417		    p->u.params.channel < 0 || p->u.params.cl < 0) {
7418			rc = EINVAL;
7419			goto done;
7420		}
7421		if (p->u.params.minrate < 0)
7422			p->u.params.minrate = 0;
7423		if (p->u.params.maxrate < 0) {
7424			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7425			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7426				rc = EINVAL;
7427				goto done;
7428			} else
7429				p->u.params.maxrate = 0;
7430		}
7431		if (p->u.params.weight < 0) {
7432			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7433				rc = EINVAL;
7434				goto done;
7435			} else
7436				p->u.params.weight = 0;
7437		}
7438		if (p->u.params.pktsize < 0) {
7439			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7440			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7441				rc = EINVAL;
7442				goto done;
7443			} else
7444				p->u.params.pktsize = 0;
7445		}
7446
7447		/* See what the firmware thinks of the request ... */
7448		rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7449		    fw_rateunit, fw_ratemode, p->u.params.channel,
7450		    p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7451		    p->u.params.weight, p->u.params.pktsize);
7452		goto done;
7453	}
7454
7455	rc = EINVAL;
7456done:
7457	end_synchronized_op(sc, 0);
7458	return (rc);
7459}
7460
7461static int
7462set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7463{
7464	struct port_info *pi = NULL;
7465	struct sge_txq *txq;
7466	uint32_t fw_mnem, fw_queue, fw_class;
7467	int i, rc;
7468
7469	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7470	if (rc)
7471		return (rc);
7472
7473	if (!(sc->flags & FULL_INIT_DONE)) {
7474		rc = EAGAIN;
7475		goto done;
7476	}
7477
7478	if (p->port >= sc->params.nports) {
7479		rc = EINVAL;
7480		goto done;
7481	}
7482
7483	pi = sc->port[p->port];
7484	if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7485		rc = EINVAL;
7486		goto done;
7487	}
7488
7489	/*
7490	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7491	 * Scheduling Class in this case).
7492	 */
7493	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7494	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7495	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7496
7497	/*
7498	 * If op.queue is non-negative, then we're only changing the scheduling
7499	 * on a single specified TX queue.
7500	 */
7501	if (p->queue >= 0) {
7502		txq = &sc->sge.txq[pi->first_txq + p->queue];
7503		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7504		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7505		    &fw_class);
7506		goto done;
7507	}
7508
7509	/*
7510	 * Change the scheduling on all the TX queues for the
7511	 * interface.
7512	 */
7513	for_each_txq(pi, i, txq) {
7514		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7515		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7516		    &fw_class);
7517		if (rc)
7518			goto done;
7519	}
7520
7521	rc = 0;
7522done:
7523	end_synchronized_op(sc, 0);
7524	return (rc);
7525}
7526
7527int
7528t4_os_find_pci_capability(struct adapter *sc, int cap)
7529{
7530	int i;
7531
7532	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7533}
7534
7535int
7536t4_os_pci_save_state(struct adapter *sc)
7537{
7538	device_t dev;
7539	struct pci_devinfo *dinfo;
7540
7541	dev = sc->dev;
7542	dinfo = device_get_ivars(dev);
7543
7544	pci_cfg_save(dev, dinfo, 0);
7545	return (0);
7546}
7547
7548int
7549t4_os_pci_restore_state(struct adapter *sc)
7550{
7551	device_t dev;
7552	struct pci_devinfo *dinfo;
7553
7554	dev = sc->dev;
7555	dinfo = device_get_ivars(dev);
7556
7557	pci_cfg_restore(dev, dinfo);
7558	return (0);
7559}
7560
7561void
7562t4_os_portmod_changed(const struct adapter *sc, int idx)
7563{
7564	struct port_info *pi = sc->port[idx];
7565	static const char *mod_str[] = {
7566		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7567	};
7568
7569	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7570		if_printf(pi->ifp, "transceiver unplugged.\n");
7571	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7572		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7573	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7574		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7575	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7576		if_printf(pi->ifp, "%s transceiver inserted.\n",
7577		    mod_str[pi->mod_type]);
7578	} else {
7579		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7580		    pi->mod_type);
7581	}
7582}
7583
7584void
7585t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7586{
7587	struct port_info *pi = sc->port[idx];
7588	struct ifnet *ifp = pi->ifp;
7589
7590	if (link_stat) {
7591		pi->linkdnrc = -1;
7592		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7593		if_link_state_change(ifp, LINK_STATE_UP);
7594	} else {
7595		if (reason >= 0)
7596			pi->linkdnrc = reason;
7597		if_link_state_change(ifp, LINK_STATE_DOWN);
7598	}
7599}
7600
7601void
7602t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7603{
7604	struct adapter *sc;
7605
7606	sx_slock(&t4_list_lock);
7607	SLIST_FOREACH(sc, &t4_list, link) {
7608		/*
7609		 * func should not make any assumptions about what state sc is
7610		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7611		 */
7612		func(sc, arg);
7613	}
7614	sx_sunlock(&t4_list_lock);
7615}
7616
7617static int
7618t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7619{
7620       return (0);
7621}
7622
7623static int
7624t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7625{
7626       return (0);
7627}
7628
7629static int
7630t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7631    struct thread *td)
7632{
7633	int rc;
7634	struct adapter *sc = dev->si_drv1;
7635
7636	rc = priv_check(td, PRIV_DRIVER);
7637	if (rc != 0)
7638		return (rc);
7639
7640	switch (cmd) {
7641	case CHELSIO_T4_GETREG: {
7642		struct t4_reg *edata = (struct t4_reg *)data;
7643
7644		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7645			return (EFAULT);
7646
7647		if (edata->size == 4)
7648			edata->val = t4_read_reg(sc, edata->addr);
7649		else if (edata->size == 8)
7650			edata->val = t4_read_reg64(sc, edata->addr);
7651		else
7652			return (EINVAL);
7653
7654		break;
7655	}
7656	case CHELSIO_T4_SETREG: {
7657		struct t4_reg *edata = (struct t4_reg *)data;
7658
7659		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7660			return (EFAULT);
7661
7662		if (edata->size == 4) {
7663			if (edata->val & 0xffffffff00000000)
7664				return (EINVAL);
7665			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7666		} else if (edata->size == 8)
7667			t4_write_reg64(sc, edata->addr, edata->val);
7668		else
7669			return (EINVAL);
7670		break;
7671	}
7672	case CHELSIO_T4_REGDUMP: {
7673		struct t4_regdump *regs = (struct t4_regdump *)data;
7674		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7675		uint8_t *buf;
7676
7677		if (regs->len < reglen) {
7678			regs->len = reglen; /* hint to the caller */
7679			return (ENOBUFS);
7680		}
7681
7682		regs->len = reglen;
7683		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7684		t4_get_regs(sc, regs, buf);
7685		rc = copyout(buf, regs->data, reglen);
7686		free(buf, M_CXGBE);
7687		break;
7688	}
7689	case CHELSIO_T4_GET_FILTER_MODE:
7690		rc = get_filter_mode(sc, (uint32_t *)data);
7691		break;
7692	case CHELSIO_T4_SET_FILTER_MODE:
7693		rc = set_filter_mode(sc, *(uint32_t *)data);
7694		break;
7695	case CHELSIO_T4_GET_FILTER:
7696		rc = get_filter(sc, (struct t4_filter *)data);
7697		break;
7698	case CHELSIO_T4_SET_FILTER:
7699		rc = set_filter(sc, (struct t4_filter *)data);
7700		break;
7701	case CHELSIO_T4_DEL_FILTER:
7702		rc = del_filter(sc, (struct t4_filter *)data);
7703		break;
7704	case CHELSIO_T4_GET_SGE_CONTEXT:
7705		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7706		break;
7707	case CHELSIO_T4_LOAD_FW:
7708		rc = load_fw(sc, (struct t4_data *)data);
7709		break;
7710	case CHELSIO_T4_GET_MEM:
7711		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7712		break;
7713	case CHELSIO_T4_GET_I2C:
7714		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7715		break;
7716	case CHELSIO_T4_CLEAR_STATS: {
7717		int i;
7718		u_int port_id = *(uint32_t *)data;
7719		struct port_info *pi;
7720
7721		if (port_id >= sc->params.nports)
7722			return (EINVAL);
7723
7724		/* MAC stats */
7725		t4_clr_port_stats(sc, port_id);
7726
7727		pi = sc->port[port_id];
7728		if (pi->flags & PORT_INIT_DONE) {
7729			struct sge_rxq *rxq;
7730			struct sge_txq *txq;
7731			struct sge_wrq *wrq;
7732
7733			for_each_rxq(pi, i, rxq) {
7734#if defined(INET) || defined(INET6)
7735				rxq->lro.lro_queued = 0;
7736				rxq->lro.lro_flushed = 0;
7737#endif
7738				rxq->rxcsum = 0;
7739				rxq->vlan_extraction = 0;
7740			}
7741
7742			for_each_txq(pi, i, txq) {
7743				txq->txcsum = 0;
7744				txq->tso_wrs = 0;
7745				txq->vlan_insertion = 0;
7746				txq->imm_wrs = 0;
7747				txq->sgl_wrs = 0;
7748				txq->txpkt_wrs = 0;
7749				txq->txpkts_wrs = 0;
7750				txq->txpkts_pkts = 0;
7751				txq->br->br_drops = 0;
7752				txq->no_dmamap = 0;
7753				txq->no_desc = 0;
7754			}
7755
7756#ifdef TCP_OFFLOAD
7757			/* nothing to clear for each ofld_rxq */
7758
7759			for_each_ofld_txq(pi, i, wrq) {
7760				wrq->tx_wrs = 0;
7761				wrq->no_desc = 0;
7762			}
7763#endif
7764			wrq = &sc->sge.ctrlq[pi->port_id];
7765			wrq->tx_wrs = 0;
7766			wrq->no_desc = 0;
7767		}
7768		break;
7769	}
7770	case CHELSIO_T4_SCHED_CLASS:
7771		rc = set_sched_class(sc, (struct t4_sched_params *)data);
7772		break;
7773	case CHELSIO_T4_SCHED_QUEUE:
7774		rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7775		break;
7776	case CHELSIO_T4_GET_TRACER:
7777		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7778		break;
7779	case CHELSIO_T4_SET_TRACER:
7780		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7781		break;
7782	default:
7783		rc = EINVAL;
7784	}
7785
7786	return (rc);
7787}
7788
7789#ifdef TCP_OFFLOAD
7790static int
7791toe_capability(struct port_info *pi, int enable)
7792{
7793	int rc;
7794	struct adapter *sc = pi->adapter;
7795
7796	ASSERT_SYNCHRONIZED_OP(sc);
7797
7798	if (!is_offload(sc))
7799		return (ENODEV);
7800
7801	if (enable) {
7802		if (!(sc->flags & FULL_INIT_DONE)) {
7803			rc = cxgbe_init_synchronized(pi);
7804			if (rc)
7805				return (rc);
7806		}
7807
7808		if (isset(&sc->offload_map, pi->port_id))
7809			return (0);
7810
7811		if (!(sc->flags & TOM_INIT_DONE)) {
7812			rc = t4_activate_uld(sc, ULD_TOM);
7813			if (rc == EAGAIN) {
7814				log(LOG_WARNING,
7815				    "You must kldload t4_tom.ko before trying "
7816				    "to enable TOE on a cxgbe interface.\n");
7817			}
7818			if (rc != 0)
7819				return (rc);
7820			KASSERT(sc->tom_softc != NULL,
7821			    ("%s: TOM activated but softc NULL", __func__));
7822			KASSERT(sc->flags & TOM_INIT_DONE,
7823			    ("%s: TOM activated but flag not set", __func__));
7824		}
7825
7826		setbit(&sc->offload_map, pi->port_id);
7827	} else {
7828		if (!isset(&sc->offload_map, pi->port_id))
7829			return (0);
7830
7831		KASSERT(sc->flags & TOM_INIT_DONE,
7832		    ("%s: TOM never initialized?", __func__));
7833		clrbit(&sc->offload_map, pi->port_id);
7834	}
7835
7836	return (0);
7837}
7838
7839/*
7840 * Add an upper layer driver to the global list.
7841 */
7842int
7843t4_register_uld(struct uld_info *ui)
7844{
7845	int rc = 0;
7846	struct uld_info *u;
7847
7848	sx_xlock(&t4_uld_list_lock);
7849	SLIST_FOREACH(u, &t4_uld_list, link) {
7850	    if (u->uld_id == ui->uld_id) {
7851		    rc = EEXIST;
7852		    goto done;
7853	    }
7854	}
7855
7856	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7857	ui->refcount = 0;
7858done:
7859	sx_xunlock(&t4_uld_list_lock);
7860	return (rc);
7861}
7862
7863int
7864t4_unregister_uld(struct uld_info *ui)
7865{
7866	int rc = EINVAL;
7867	struct uld_info *u;
7868
7869	sx_xlock(&t4_uld_list_lock);
7870
7871	SLIST_FOREACH(u, &t4_uld_list, link) {
7872	    if (u == ui) {
7873		    if (ui->refcount > 0) {
7874			    rc = EBUSY;
7875			    goto done;
7876		    }
7877
7878		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7879		    rc = 0;
7880		    goto done;
7881	    }
7882	}
7883done:
7884	sx_xunlock(&t4_uld_list_lock);
7885	return (rc);
7886}
7887
7888int
7889t4_activate_uld(struct adapter *sc, int id)
7890{
7891	int rc = EAGAIN;
7892	struct uld_info *ui;
7893
7894	ASSERT_SYNCHRONIZED_OP(sc);
7895
7896	sx_slock(&t4_uld_list_lock);
7897
7898	SLIST_FOREACH(ui, &t4_uld_list, link) {
7899		if (ui->uld_id == id) {
7900			rc = ui->activate(sc);
7901			if (rc == 0)
7902				ui->refcount++;
7903			goto done;
7904		}
7905	}
7906done:
7907	sx_sunlock(&t4_uld_list_lock);
7908
7909	return (rc);
7910}
7911
7912int
7913t4_deactivate_uld(struct adapter *sc, int id)
7914{
7915	int rc = EINVAL;
7916	struct uld_info *ui;
7917
7918	ASSERT_SYNCHRONIZED_OP(sc);
7919
7920	sx_slock(&t4_uld_list_lock);
7921
7922	SLIST_FOREACH(ui, &t4_uld_list, link) {
7923		if (ui->uld_id == id) {
7924			rc = ui->deactivate(sc);
7925			if (rc == 0)
7926				ui->refcount--;
7927			goto done;
7928		}
7929	}
7930done:
7931	sx_sunlock(&t4_uld_list_lock);
7932
7933	return (rc);
7934}
7935#endif
7936
7937/*
7938 * Come up with reasonable defaults for some of the tunables, provided they're
7939 * not set by the user (in which case we'll use the values as is).
7940 */
7941static void
7942tweak_tunables(void)
7943{
7944	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7945
7946	if (t4_ntxq10g < 1)
7947		t4_ntxq10g = min(nc, NTXQ_10G);
7948
7949	if (t4_ntxq1g < 1)
7950		t4_ntxq1g = min(nc, NTXQ_1G);
7951
7952	if (t4_nrxq10g < 1)
7953		t4_nrxq10g = min(nc, NRXQ_10G);
7954
7955	if (t4_nrxq1g < 1)
7956		t4_nrxq1g = min(nc, NRXQ_1G);
7957
7958#ifdef TCP_OFFLOAD
7959	if (t4_nofldtxq10g < 1)
7960		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7961
7962	if (t4_nofldtxq1g < 1)
7963		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7964
7965	if (t4_nofldrxq10g < 1)
7966		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7967
7968	if (t4_nofldrxq1g < 1)
7969		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7970
7971	if (t4_toecaps_allowed == -1)
7972		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7973#else
7974	if (t4_toecaps_allowed == -1)
7975		t4_toecaps_allowed = 0;
7976#endif
7977
7978	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7979		t4_tmr_idx_10g = TMR_IDX_10G;
7980
7981	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7982		t4_pktc_idx_10g = PKTC_IDX_10G;
7983
7984	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7985		t4_tmr_idx_1g = TMR_IDX_1G;
7986
7987	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7988		t4_pktc_idx_1g = PKTC_IDX_1G;
7989
7990	if (t4_qsize_txq < 128)
7991		t4_qsize_txq = 128;
7992
7993	if (t4_qsize_rxq < 128)
7994		t4_qsize_rxq = 128;
7995	while (t4_qsize_rxq & 7)
7996		t4_qsize_rxq++;
7997
7998	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7999}
8000
8001static int
8002mod_event(module_t mod, int cmd, void *arg)
8003{
8004	int rc = 0;
8005	static int loaded = 0;
8006
8007	switch (cmd) {
8008	case MOD_LOAD:
8009		if (atomic_fetchadd_int(&loaded, 1))
8010			break;
8011		t4_sge_modload();
8012		sx_init(&t4_list_lock, "T4/T5 adapters");
8013		SLIST_INIT(&t4_list);
8014#ifdef TCP_OFFLOAD
8015		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8016		SLIST_INIT(&t4_uld_list);
8017#endif
8018		t4_tracer_modload();
8019		tweak_tunables();
8020		break;
8021
8022	case MOD_UNLOAD:
8023		if (atomic_fetchadd_int(&loaded, -1) > 1)
8024			break;
8025		t4_tracer_modunload();
8026#ifdef TCP_OFFLOAD
8027		sx_slock(&t4_uld_list_lock);
8028		if (!SLIST_EMPTY(&t4_uld_list)) {
8029			rc = EBUSY;
8030			sx_sunlock(&t4_uld_list_lock);
8031			break;
8032		}
8033		sx_sunlock(&t4_uld_list_lock);
8034		sx_destroy(&t4_uld_list_lock);
8035#endif
8036		sx_slock(&t4_list_lock);
8037		if (!SLIST_EMPTY(&t4_list)) {
8038			rc = EBUSY;
8039			sx_sunlock(&t4_list_lock);
8040			break;
8041		}
8042		sx_sunlock(&t4_list_lock);
8043		sx_destroy(&t4_list_lock);
8044		break;
8045	}
8046
8047	return (rc);
8048}
8049
8050static devclass_t t4_devclass, t5_devclass;
8051static devclass_t cxgbe_devclass, cxl_devclass;
8052
8053DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8054MODULE_VERSION(t4nex, 1);
8055MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8056
8057DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8058MODULE_VERSION(t5nex, 1);
8059MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8060
8061DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8062MODULE_VERSION(cxgbe, 1);
8063
8064DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8065MODULE_VERSION(cxl, 1);
8066