t4_main.c revision 264736
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_main.c 264736 2014-04-21 17:17:23Z emax $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/conf.h>
36#include <sys/priv.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/module.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/taskqueue.h>
43#include <sys/pciio.h>
44#include <dev/pci/pcireg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pci_private.h>
47#include <sys/firmware.h>
48#include <sys/sbuf.h>
49#include <sys/smp.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_types.h>
56#include <net/if_dl.h>
57#include <net/if_vlan_var.h>
58#if defined(__i386__) || defined(__amd64__)
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#endif
62
63#include "common/common.h"
64#include "common/t4_msg.h"
65#include "common/t4_regs.h"
66#include "common/t4_regs_values.h"
67#include "t4_ioctl.h"
68#include "t4_l2t.h"
69
70/* T4 bus driver interface */
71static int t4_probe(device_t);
72static int t4_attach(device_t);
73static int t4_detach(device_t);
74static device_method_t t4_methods[] = {
75	DEVMETHOD(device_probe,		t4_probe),
76	DEVMETHOD(device_attach,	t4_attach),
77	DEVMETHOD(device_detach,	t4_detach),
78
79	DEVMETHOD_END
80};
81static driver_t t4_driver = {
82	"t4nex",
83	t4_methods,
84	sizeof(struct adapter)
85};
86
87
88/* T4 port (cxgbe) interface */
89static int cxgbe_probe(device_t);
90static int cxgbe_attach(device_t);
91static int cxgbe_detach(device_t);
92static device_method_t cxgbe_methods[] = {
93	DEVMETHOD(device_probe,		cxgbe_probe),
94	DEVMETHOD(device_attach,	cxgbe_attach),
95	DEVMETHOD(device_detach,	cxgbe_detach),
96	{ 0, 0 }
97};
98static driver_t cxgbe_driver = {
99	"cxgbe",
100	cxgbe_methods,
101	sizeof(struct port_info)
102};
103
104static d_ioctl_t t4_ioctl;
105static d_open_t t4_open;
106static d_close_t t4_close;
107
108static struct cdevsw t4_cdevsw = {
109       .d_version = D_VERSION,
110       .d_flags = 0,
111       .d_open = t4_open,
112       .d_close = t4_close,
113       .d_ioctl = t4_ioctl,
114       .d_name = "t4nex",
115};
116
117/* T5 bus driver interface */
118static int t5_probe(device_t);
119static device_method_t t5_methods[] = {
120	DEVMETHOD(device_probe,		t5_probe),
121	DEVMETHOD(device_attach,	t4_attach),
122	DEVMETHOD(device_detach,	t4_detach),
123
124	DEVMETHOD_END
125};
126static driver_t t5_driver = {
127	"t5nex",
128	t5_methods,
129	sizeof(struct adapter)
130};
131
132
133/* T5 port (cxl) interface */
134static driver_t cxl_driver = {
135	"cxl",
136	cxgbe_methods,
137	sizeof(struct port_info)
138};
139
140static struct cdevsw t5_cdevsw = {
141       .d_version = D_VERSION,
142       .d_flags = 0,
143       .d_open = t4_open,
144       .d_close = t4_close,
145       .d_ioctl = t4_ioctl,
146       .d_name = "t5nex",
147};
148
149/* ifnet + media interface */
150static void cxgbe_init(void *);
151static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153static void cxgbe_qflush(struct ifnet *);
154static int cxgbe_media_change(struct ifnet *);
155static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159/*
160 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161 * then ADAPTER_LOCK, then t4_uld_list_lock.
162 */
163static struct sx t4_list_lock;
164SLIST_HEAD(, adapter) t4_list;
165#ifdef TCP_OFFLOAD
166static struct sx t4_uld_list_lock;
167SLIST_HEAD(, uld_info) t4_uld_list;
168#endif
169
170/*
171 * Tunables.  See tweak_tunables() too.
172 *
173 * Each tunable is set to a default value here if it's known at compile-time.
174 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175 * provide a reasonable default when the driver is loaded.
176 *
177 * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178 * T5 are under hw.cxl.
179 */
180
181/*
182 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183 */
184#define NTXQ_10G 16
185static int t4_ntxq10g = -1;
186TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188#define NRXQ_10G 8
189static int t4_nrxq10g = -1;
190TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192#define NTXQ_1G 4
193static int t4_ntxq1g = -1;
194TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196#define NRXQ_1G 2
197static int t4_nrxq1g = -1;
198TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200static int t4_rsrv_noflowq = 0;
201TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203#ifdef TCP_OFFLOAD
204#define NOFLDTXQ_10G 8
205static int t4_nofldtxq10g = -1;
206TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208#define NOFLDRXQ_10G 2
209static int t4_nofldrxq10g = -1;
210TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212#define NOFLDTXQ_1G 2
213static int t4_nofldtxq1g = -1;
214TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216#define NOFLDRXQ_1G 1
217static int t4_nofldrxq1g = -1;
218TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219#endif
220
221/*
222 * Holdoff parameters for 10G and 1G ports.
223 */
224#define TMR_IDX_10G 1
225static int t4_tmr_idx_10g = TMR_IDX_10G;
226TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228#define PKTC_IDX_10G (-1)
229static int t4_pktc_idx_10g = PKTC_IDX_10G;
230TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232#define TMR_IDX_1G 1
233static int t4_tmr_idx_1g = TMR_IDX_1G;
234TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236#define PKTC_IDX_1G (-1)
237static int t4_pktc_idx_1g = PKTC_IDX_1G;
238TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240/*
241 * Size (# of entries) of each tx and rx queue.
242 */
243static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249/*
250 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251 */
252static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255/*
256 * Configuration file.
257 */
258#define DEFAULT_CF	"default"
259#define FLASH_CF	"flash"
260#define UWIRE_CF	"uwire"
261#define FPGA_CF		"fpga"
262static char t4_cfg_file[32] = DEFAULT_CF;
263TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265/*
266 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267 * encouraged respectively).
268 */
269static unsigned int t4_fw_install = 1;
270TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272/*
273 * ASIC features that will be used.  Disable the ones you don't want so that the
274 * chip resources aren't wasted on features that will not be used.
275 */
276static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
277TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282static int t4_toecaps_allowed = -1;
283TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285static int t4_rdmacaps_allowed = 0;
286TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288static int t4_iscsicaps_allowed = 0;
289TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291static int t4_fcoecaps_allowed = 0;
292TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294static int t5_write_combine = 0;
295TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297struct intrs_and_queues {
298	int intr_type;		/* INTx, MSI, or MSI-X */
299	int nirq;		/* Number of vectors */
300	int intr_flags;
301	int ntxq10g;		/* # of NIC txq's for each 10G port */
302	int nrxq10g;		/* # of NIC rxq's for each 10G port */
303	int ntxq1g;		/* # of NIC txq's for each 1G port */
304	int nrxq1g;		/* # of NIC rxq's for each 1G port */
305	int rsrv_noflowq;	/* Flag whether to reserve queue 0 */
306#ifdef TCP_OFFLOAD
307	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
308	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
309	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
310	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
311#endif
312};
313
314struct filter_entry {
315        uint32_t valid:1;	/* filter allocated and valid */
316        uint32_t locked:1;	/* filter is administratively locked */
317        uint32_t pending:1;	/* filter action is pending firmware reply */
318	uint32_t smtidx:8;	/* Source MAC Table index for smac */
319	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
320
321        struct t4_filter_specification fs;
322};
323
324enum {
325	XGMAC_MTU	= (1 << 0),
326	XGMAC_PROMISC	= (1 << 1),
327	XGMAC_ALLMULTI	= (1 << 2),
328	XGMAC_VLANEX	= (1 << 3),
329	XGMAC_UCADDR	= (1 << 4),
330	XGMAC_MCADDRS	= (1 << 5),
331
332	XGMAC_ALL	= 0xffff
333};
334
335static int map_bars_0_and_4(struct adapter *);
336static int map_bar_2(struct adapter *);
337static void setup_memwin(struct adapter *);
338static int validate_mem_range(struct adapter *, uint32_t, int);
339static int fwmtype_to_hwmtype(int);
340static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341    uint32_t *);
342static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343static uint32_t position_memwin(struct adapter *, int, uint32_t);
344static int cfg_itype_and_nqueues(struct adapter *, int, int,
345    struct intrs_and_queues *);
346static int prep_firmware(struct adapter *);
347static int partition_resources(struct adapter *, const struct firmware *,
348    const char *);
349static int get_params__pre_init(struct adapter *);
350static int get_params__post_init(struct adapter *);
351static int set_params__post_init(struct adapter *);
352static void t4_set_desc(struct adapter *);
353static void build_medialist(struct port_info *);
354static int update_mac_settings(struct port_info *, int);
355static int cxgbe_init_synchronized(struct port_info *);
356static int cxgbe_uninit_synchronized(struct port_info *);
357static int setup_intr_handlers(struct adapter *);
358static int adapter_full_init(struct adapter *);
359static int adapter_full_uninit(struct adapter *);
360static int port_full_init(struct port_info *);
361static int port_full_uninit(struct port_info *);
362static void quiesce_eq(struct adapter *, struct sge_eq *);
363static void quiesce_iq(struct adapter *, struct sge_iq *);
364static void quiesce_fl(struct adapter *, struct sge_fl *);
365static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366    driver_intr_t *, void *, char *);
367static int t4_free_irq(struct adapter *, struct irq *);
368static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369    unsigned int);
370static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371static void cxgbe_tick(void *);
372static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
373static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
374    struct mbuf *);
375static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
376static int fw_msg_not_handled(struct adapter *, const __be64 *);
377static int t4_sysctls(struct adapter *);
378static int cxgbe_sysctls(struct port_info *);
379static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
380static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
381static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
382static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
383static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
384static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
385static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
386static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
387static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
388static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
389#ifdef SBUF_DRAIN
390static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
391static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
392static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
393static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
394static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
395static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
396static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
397static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
398static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
399static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
400static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
401static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
402static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
403static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
404static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
405static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
406static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
407static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
408static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
409static int sysctl_tids(SYSCTL_HANDLER_ARGS);
410static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
411static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
412static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
413static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
414static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
415#endif
416static inline void txq_start(struct ifnet *, struct sge_txq *);
417static uint32_t fconf_to_mode(uint32_t);
418static uint32_t mode_to_fconf(uint32_t);
419static uint32_t fspec_to_fconf(struct t4_filter_specification *);
420static int get_filter_mode(struct adapter *, uint32_t *);
421static int set_filter_mode(struct adapter *, uint32_t);
422static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
423static int get_filter(struct adapter *, struct t4_filter *);
424static int set_filter(struct adapter *, struct t4_filter *);
425static int del_filter(struct adapter *, struct t4_filter *);
426static void clear_filter(struct filter_entry *);
427static int set_filter_wr(struct adapter *, int);
428static int del_filter_wr(struct adapter *, int);
429static int get_sge_context(struct adapter *, struct t4_sge_context *);
430static int load_fw(struct adapter *, struct t4_data *);
431static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
432static int read_i2c(struct adapter *, struct t4_i2c_data *);
433static int set_sched_class(struct adapter *, struct t4_sched_params *);
434static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
435#ifdef TCP_OFFLOAD
436static int toe_capability(struct port_info *, int);
437#endif
438static int mod_event(module_t, int, void *);
439
440struct {
441	uint16_t device;
442	char *desc;
443} t4_pciids[] = {
444	{0xa000, "Chelsio Terminator 4 FPGA"},
445	{0x4400, "Chelsio T440-dbg"},
446	{0x4401, "Chelsio T420-CR"},
447	{0x4402, "Chelsio T422-CR"},
448	{0x4403, "Chelsio T440-CR"},
449	{0x4404, "Chelsio T420-BCH"},
450	{0x4405, "Chelsio T440-BCH"},
451	{0x4406, "Chelsio T440-CH"},
452	{0x4407, "Chelsio T420-SO"},
453	{0x4408, "Chelsio T420-CX"},
454	{0x4409, "Chelsio T420-BT"},
455	{0x440a, "Chelsio T404-BT"},
456	{0x440e, "Chelsio T440-LP-CR"},
457}, t5_pciids[] = {
458	{0xb000, "Chelsio Terminator 5 FPGA"},
459	{0x5400, "Chelsio T580-dbg"},
460	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
461	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
462	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
463	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
464	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
465	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
466	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
467	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
468	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
469	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
470	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
471	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
472#ifdef notyet
473	{0x5404,  "Chelsio T520-BCH"},
474	{0x5405,  "Chelsio T540-BCH"},
475	{0x5406,  "Chelsio T540-CH"},
476	{0x5408,  "Chelsio T520-CX"},
477	{0x540b,  "Chelsio B520-SR"},
478	{0x540c,  "Chelsio B504-BT"},
479	{0x540f,  "Chelsio Amsterdam"},
480	{0x5413,  "Chelsio T580-CHR"},
481#endif
482};
483
484#ifdef TCP_OFFLOAD
485/*
486 * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
487 * exactly the same for both rxq and ofld_rxq.
488 */
489CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
490CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
491#endif
492
493/* No easy way to include t4_msg.h before adapter.h so we check this way */
494CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
495CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
496
497static int
498t4_probe(device_t dev)
499{
500	int i;
501	uint16_t v = pci_get_vendor(dev);
502	uint16_t d = pci_get_device(dev);
503	uint8_t f = pci_get_function(dev);
504
505	if (v != PCI_VENDOR_ID_CHELSIO)
506		return (ENXIO);
507
508	/* Attach only to PF0 of the FPGA */
509	if (d == 0xa000 && f != 0)
510		return (ENXIO);
511
512	for (i = 0; i < nitems(t4_pciids); i++) {
513		if (d == t4_pciids[i].device) {
514			device_set_desc(dev, t4_pciids[i].desc);
515			return (BUS_PROBE_DEFAULT);
516		}
517	}
518
519	return (ENXIO);
520}
521
522static int
523t5_probe(device_t dev)
524{
525	int i;
526	uint16_t v = pci_get_vendor(dev);
527	uint16_t d = pci_get_device(dev);
528	uint8_t f = pci_get_function(dev);
529
530	if (v != PCI_VENDOR_ID_CHELSIO)
531		return (ENXIO);
532
533	/* Attach only to PF0 of the FPGA */
534	if (d == 0xb000 && f != 0)
535		return (ENXIO);
536
537	for (i = 0; i < nitems(t5_pciids); i++) {
538		if (d == t5_pciids[i].device) {
539			device_set_desc(dev, t5_pciids[i].desc);
540			return (BUS_PROBE_DEFAULT);
541		}
542	}
543
544	return (ENXIO);
545}
546
547static int
548t4_attach(device_t dev)
549{
550	struct adapter *sc;
551	int rc = 0, i, n10g, n1g, rqidx, tqidx;
552	struct intrs_and_queues iaq;
553	struct sge *s;
554#ifdef TCP_OFFLOAD
555	int ofld_rqidx, ofld_tqidx;
556#endif
557
558	sc = device_get_softc(dev);
559	sc->dev = dev;
560
561	pci_enable_busmaster(dev);
562	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
563		uint32_t v;
564
565		pci_set_max_read_req(dev, 4096);
566		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
567		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
568		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
569	}
570
571	sc->traceq = -1;
572	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
573	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
574	    device_get_nameunit(dev));
575
576	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
577	    device_get_nameunit(dev));
578	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
579	sx_xlock(&t4_list_lock);
580	SLIST_INSERT_HEAD(&t4_list, sc, link);
581	sx_xunlock(&t4_list_lock);
582
583	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
584	TAILQ_INIT(&sc->sfl);
585	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
586
587	rc = map_bars_0_and_4(sc);
588	if (rc != 0)
589		goto done; /* error message displayed already */
590
591	/*
592	 * This is the real PF# to which we're attaching.  Works from within PCI
593	 * passthrough environments too, where pci_get_function() could return a
594	 * different PF# depending on the passthrough configuration.  We need to
595	 * use the real PF# in all our communication with the firmware.
596	 */
597	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
598	sc->mbox = sc->pf;
599
600	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
601	sc->an_handler = an_not_handled;
602	for (i = 0; i < nitems(sc->cpl_handler); i++)
603		sc->cpl_handler[i] = cpl_not_handled;
604	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
605		sc->fw_msg_handler[i] = fw_msg_not_handled;
606	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
607	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
608	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
609	t4_init_sge_cpl_handlers(sc);
610
611	/* Prepare the adapter for operation */
612	rc = -t4_prep_adapter(sc);
613	if (rc != 0) {
614		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
615		goto done;
616	}
617
618	/*
619	 * Do this really early, with the memory windows set up even before the
620	 * character device.  The userland tool's register i/o and mem read
621	 * will work even in "recovery mode".
622	 */
623	setup_memwin(sc);
624	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
625	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
626	    device_get_nameunit(dev));
627	if (sc->cdev == NULL)
628		device_printf(dev, "failed to create nexus char device.\n");
629	else
630		sc->cdev->si_drv1 = sc;
631
632	/* Go no further if recovery mode has been requested. */
633	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
634		device_printf(dev, "recovery mode.\n");
635		goto done;
636	}
637
638	/* Prepare the firmware for operation */
639	rc = prep_firmware(sc);
640	if (rc != 0)
641		goto done; /* error message displayed already */
642
643	rc = get_params__post_init(sc);
644	if (rc != 0)
645		goto done; /* error message displayed already */
646
647	rc = set_params__post_init(sc);
648	if (rc != 0)
649		goto done; /* error message displayed already */
650
651	rc = map_bar_2(sc);
652	if (rc != 0)
653		goto done; /* error message displayed already */
654
655	rc = t4_create_dma_tag(sc);
656	if (rc != 0)
657		goto done; /* error message displayed already */
658
659	/*
660	 * First pass over all the ports - allocate VIs and initialize some
661	 * basic parameters like mac address, port type, etc.  We also figure
662	 * out whether a port is 10G or 1G and use that information when
663	 * calculating how many interrupts to attempt to allocate.
664	 */
665	n10g = n1g = 0;
666	for_each_port(sc, i) {
667		struct port_info *pi;
668
669		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
670		sc->port[i] = pi;
671
672		/* These must be set before t4_port_init */
673		pi->adapter = sc;
674		pi->port_id = i;
675
676		/* Allocate the vi and initialize parameters like mac addr */
677		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
678		if (rc != 0) {
679			device_printf(dev, "unable to initialize port %d: %d\n",
680			    i, rc);
681			free(pi, M_CXGBE);
682			sc->port[i] = NULL;
683			goto done;
684		}
685
686		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
687		    device_get_nameunit(dev), i);
688		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
689		sc->chan_map[pi->tx_chan] = i;
690
691		if (is_10G_port(pi) || is_40G_port(pi)) {
692			n10g++;
693			pi->tmr_idx = t4_tmr_idx_10g;
694			pi->pktc_idx = t4_pktc_idx_10g;
695		} else {
696			n1g++;
697			pi->tmr_idx = t4_tmr_idx_1g;
698			pi->pktc_idx = t4_pktc_idx_1g;
699		}
700
701		pi->xact_addr_filt = -1;
702		pi->linkdnrc = -1;
703
704		pi->qsize_rxq = t4_qsize_rxq;
705		pi->qsize_txq = t4_qsize_txq;
706
707		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
708		if (pi->dev == NULL) {
709			device_printf(dev,
710			    "failed to add device for port %d.\n", i);
711			rc = ENXIO;
712			goto done;
713		}
714		device_set_softc(pi->dev, pi);
715	}
716
717	/*
718	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
719	 */
720	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
721	if (rc != 0)
722		goto done; /* error message displayed already */
723
724	sc->intr_type = iaq.intr_type;
725	sc->intr_count = iaq.nirq;
726	sc->flags |= iaq.intr_flags;
727
728	s = &sc->sge;
729	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
730	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
731	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
732	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
733	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
734
735#ifdef TCP_OFFLOAD
736	if (is_offload(sc)) {
737
738		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
739		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
740		s->neq += s->nofldtxq + s->nofldrxq;
741		s->niq += s->nofldrxq;
742
743		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
744		    M_CXGBE, M_ZERO | M_WAITOK);
745		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
746		    M_CXGBE, M_ZERO | M_WAITOK);
747	}
748#endif
749
750	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
751	    M_ZERO | M_WAITOK);
752	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
753	    M_ZERO | M_WAITOK);
754	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
755	    M_ZERO | M_WAITOK);
756	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
757	    M_ZERO | M_WAITOK);
758	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
759	    M_ZERO | M_WAITOK);
760
761	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
762	    M_ZERO | M_WAITOK);
763
764	t4_init_l2t(sc, M_WAITOK);
765
766	/*
767	 * Second pass over the ports.  This time we know the number of rx and
768	 * tx queues that each port should get.
769	 */
770	rqidx = tqidx = 0;
771#ifdef TCP_OFFLOAD
772	ofld_rqidx = ofld_tqidx = 0;
773#endif
774	for_each_port(sc, i) {
775		struct port_info *pi = sc->port[i];
776
777		if (pi == NULL)
778			continue;
779
780		pi->first_rxq = rqidx;
781		pi->first_txq = tqidx;
782		if (is_10G_port(pi) || is_40G_port(pi)) {
783			pi->nrxq = iaq.nrxq10g;
784			pi->ntxq = iaq.ntxq10g;
785		} else {
786			pi->nrxq = iaq.nrxq1g;
787			pi->ntxq = iaq.ntxq1g;
788		}
789
790		if (pi->ntxq > 1)
791			pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
792		else
793			pi->rsrv_noflowq = 0;
794
795		rqidx += pi->nrxq;
796		tqidx += pi->ntxq;
797
798#ifdef TCP_OFFLOAD
799		if (is_offload(sc)) {
800			pi->first_ofld_rxq = ofld_rqidx;
801			pi->first_ofld_txq = ofld_tqidx;
802			if (is_10G_port(pi) || is_40G_port(pi)) {
803				pi->nofldrxq = iaq.nofldrxq10g;
804				pi->nofldtxq = iaq.nofldtxq10g;
805			} else {
806				pi->nofldrxq = iaq.nofldrxq1g;
807				pi->nofldtxq = iaq.nofldtxq1g;
808			}
809			ofld_rqidx += pi->nofldrxq;
810			ofld_tqidx += pi->nofldtxq;
811		}
812#endif
813	}
814
815	rc = setup_intr_handlers(sc);
816	if (rc != 0) {
817		device_printf(dev,
818		    "failed to setup interrupt handlers: %d\n", rc);
819		goto done;
820	}
821
822	rc = bus_generic_attach(dev);
823	if (rc != 0) {
824		device_printf(dev,
825		    "failed to attach all child ports: %d\n", rc);
826		goto done;
827	}
828
829	device_printf(dev,
830	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
831	    sc->params.pci.width, sc->params.nports, sc->intr_count,
832	    sc->intr_type == INTR_MSIX ? "MSI-X" :
833	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
834	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
835
836	t4_set_desc(sc);
837
838done:
839	if (rc != 0 && sc->cdev) {
840		/* cdev was created and so cxgbetool works; recover that way. */
841		device_printf(dev,
842		    "error during attach, adapter is now in recovery mode.\n");
843		rc = 0;
844	}
845
846	if (rc != 0)
847		t4_detach(dev);
848	else
849		t4_sysctls(sc);
850
851	return (rc);
852}
853
854/*
855 * Idempotent
856 */
857static int
858t4_detach(device_t dev)
859{
860	struct adapter *sc;
861	struct port_info *pi;
862	int i, rc;
863
864	sc = device_get_softc(dev);
865
866	if (sc->flags & FULL_INIT_DONE)
867		t4_intr_disable(sc);
868
869	if (sc->cdev) {
870		destroy_dev(sc->cdev);
871		sc->cdev = NULL;
872	}
873
874	rc = bus_generic_detach(dev);
875	if (rc) {
876		device_printf(dev,
877		    "failed to detach child devices: %d\n", rc);
878		return (rc);
879	}
880
881	for (i = 0; i < sc->intr_count; i++)
882		t4_free_irq(sc, &sc->irq[i]);
883
884	for (i = 0; i < MAX_NPORTS; i++) {
885		pi = sc->port[i];
886		if (pi) {
887			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
888			if (pi->dev)
889				device_delete_child(dev, pi->dev);
890
891			mtx_destroy(&pi->pi_lock);
892			free(pi, M_CXGBE);
893		}
894	}
895
896	if (sc->flags & FULL_INIT_DONE)
897		adapter_full_uninit(sc);
898
899	if (sc->flags & FW_OK)
900		t4_fw_bye(sc, sc->mbox);
901
902	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
903		pci_release_msi(dev);
904
905	if (sc->regs_res)
906		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
907		    sc->regs_res);
908
909	if (sc->udbs_res)
910		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
911		    sc->udbs_res);
912
913	if (sc->msix_res)
914		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
915		    sc->msix_res);
916
917	if (sc->l2t)
918		t4_free_l2t(sc->l2t);
919
920#ifdef TCP_OFFLOAD
921	free(sc->sge.ofld_rxq, M_CXGBE);
922	free(sc->sge.ofld_txq, M_CXGBE);
923#endif
924	free(sc->irq, M_CXGBE);
925	free(sc->sge.rxq, M_CXGBE);
926	free(sc->sge.txq, M_CXGBE);
927	free(sc->sge.ctrlq, M_CXGBE);
928	free(sc->sge.iqmap, M_CXGBE);
929	free(sc->sge.eqmap, M_CXGBE);
930	free(sc->tids.ftid_tab, M_CXGBE);
931	t4_destroy_dma_tag(sc);
932	if (mtx_initialized(&sc->sc_lock)) {
933		sx_xlock(&t4_list_lock);
934		SLIST_REMOVE(&t4_list, sc, adapter, link);
935		sx_xunlock(&t4_list_lock);
936		mtx_destroy(&sc->sc_lock);
937	}
938
939	if (mtx_initialized(&sc->tids.ftid_lock))
940		mtx_destroy(&sc->tids.ftid_lock);
941	if (mtx_initialized(&sc->sfl_lock))
942		mtx_destroy(&sc->sfl_lock);
943	if (mtx_initialized(&sc->ifp_lock))
944		mtx_destroy(&sc->ifp_lock);
945
946	bzero(sc, sizeof(*sc));
947
948	return (0);
949}
950
951
952static int
953cxgbe_probe(device_t dev)
954{
955	char buf[128];
956	struct port_info *pi = device_get_softc(dev);
957
958	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
959	device_set_desc_copy(dev, buf);
960
961	return (BUS_PROBE_DEFAULT);
962}
963
964#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
965    IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
966    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
967#define T4_CAP_ENABLE (T4_CAP)
968
969static int
970cxgbe_attach(device_t dev)
971{
972	struct port_info *pi = device_get_softc(dev);
973	struct ifnet *ifp;
974
975	/* Allocate an ifnet and set it up */
976	ifp = if_alloc(IFT_ETHER);
977	if (ifp == NULL) {
978		device_printf(dev, "Cannot allocate ifnet\n");
979		return (ENOMEM);
980	}
981	pi->ifp = ifp;
982	ifp->if_softc = pi;
983
984	callout_init(&pi->tick, CALLOUT_MPSAFE);
985
986	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
987	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
988
989	ifp->if_init = cxgbe_init;
990	ifp->if_ioctl = cxgbe_ioctl;
991	ifp->if_transmit = cxgbe_transmit;
992	ifp->if_qflush = cxgbe_qflush;
993
994	ifp->if_capabilities = T4_CAP;
995#ifdef TCP_OFFLOAD
996	if (is_offload(pi->adapter))
997		ifp->if_capabilities |= IFCAP_TOE;
998#endif
999	ifp->if_capenable = T4_CAP_ENABLE;
1000	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1001	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1002
1003	/* Initialize ifmedia for this port */
1004	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1005	    cxgbe_media_status);
1006	build_medialist(pi);
1007
1008	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1009	    EVENTHANDLER_PRI_ANY);
1010
1011	ether_ifattach(ifp, pi->hw_addr);
1012
1013#ifdef TCP_OFFLOAD
1014	if (is_offload(pi->adapter)) {
1015		device_printf(dev,
1016		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1017		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1018	} else
1019#endif
1020		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1021
1022	cxgbe_sysctls(pi);
1023
1024	return (0);
1025}
1026
1027static int
1028cxgbe_detach(device_t dev)
1029{
1030	struct port_info *pi = device_get_softc(dev);
1031	struct adapter *sc = pi->adapter;
1032	struct ifnet *ifp = pi->ifp;
1033
1034	/* Tell if_ioctl and if_init that the port is going away */
1035	ADAPTER_LOCK(sc);
1036	SET_DOOMED(pi);
1037	wakeup(&sc->flags);
1038	while (IS_BUSY(sc))
1039		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1040	SET_BUSY(sc);
1041#ifdef INVARIANTS
1042	sc->last_op = "t4detach";
1043	sc->last_op_thr = curthread;
1044#endif
1045	ADAPTER_UNLOCK(sc);
1046
1047	if (pi->flags & HAS_TRACEQ) {
1048		sc->traceq = -1;	/* cloner should not create ifnet */
1049		t4_tracer_port_detach(sc);
1050	}
1051
1052	if (pi->vlan_c)
1053		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1054
1055	PORT_LOCK(pi);
1056	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1057	callout_stop(&pi->tick);
1058	PORT_UNLOCK(pi);
1059	callout_drain(&pi->tick);
1060
1061	/* Let detach proceed even if these fail. */
1062	cxgbe_uninit_synchronized(pi);
1063	port_full_uninit(pi);
1064
1065	ifmedia_removeall(&pi->media);
1066	ether_ifdetach(pi->ifp);
1067	if_free(pi->ifp);
1068
1069	ADAPTER_LOCK(sc);
1070	CLR_BUSY(sc);
1071	wakeup(&sc->flags);
1072	ADAPTER_UNLOCK(sc);
1073
1074	return (0);
1075}
1076
1077static void
1078cxgbe_init(void *arg)
1079{
1080	struct port_info *pi = arg;
1081	struct adapter *sc = pi->adapter;
1082
1083	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1084		return;
1085	cxgbe_init_synchronized(pi);
1086	end_synchronized_op(sc, 0);
1087}
1088
1089static int
1090cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1091{
1092	int rc = 0, mtu, flags;
1093	struct port_info *pi = ifp->if_softc;
1094	struct adapter *sc = pi->adapter;
1095	struct ifreq *ifr = (struct ifreq *)data;
1096	uint32_t mask;
1097
1098	switch (cmd) {
1099	case SIOCSIFMTU:
1100		mtu = ifr->ifr_mtu;
1101		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1102			return (EINVAL);
1103
1104		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1105		if (rc)
1106			return (rc);
1107		ifp->if_mtu = mtu;
1108		if (pi->flags & PORT_INIT_DONE) {
1109			t4_update_fl_bufsize(ifp);
1110			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1111				rc = update_mac_settings(pi, XGMAC_MTU);
1112		}
1113		end_synchronized_op(sc, 0);
1114		break;
1115
1116	case SIOCSIFFLAGS:
1117		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1118		if (rc)
1119			return (rc);
1120
1121		if (ifp->if_flags & IFF_UP) {
1122			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1123				flags = pi->if_flags;
1124				if ((ifp->if_flags ^ flags) &
1125				    (IFF_PROMISC | IFF_ALLMULTI)) {
1126					rc = update_mac_settings(pi,
1127					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1128				}
1129			} else
1130				rc = cxgbe_init_synchronized(pi);
1131			pi->if_flags = ifp->if_flags;
1132		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1133			rc = cxgbe_uninit_synchronized(pi);
1134		end_synchronized_op(sc, 0);
1135		break;
1136
1137	case SIOCADDMULTI:
1138	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1139		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1140		if (rc)
1141			return (rc);
1142		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1143			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1144		end_synchronized_op(sc, LOCK_HELD);
1145		break;
1146
1147	case SIOCSIFCAP:
1148		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1149		if (rc)
1150			return (rc);
1151
1152		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1153		if (mask & IFCAP_TXCSUM) {
1154			ifp->if_capenable ^= IFCAP_TXCSUM;
1155			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1156
1157			if (IFCAP_TSO4 & ifp->if_capenable &&
1158			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1159				ifp->if_capenable &= ~IFCAP_TSO4;
1160				if_printf(ifp,
1161				    "tso4 disabled due to -txcsum.\n");
1162			}
1163		}
1164		if (mask & IFCAP_TXCSUM_IPV6) {
1165			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1166			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1167
1168			if (IFCAP_TSO6 & ifp->if_capenable &&
1169			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1170				ifp->if_capenable &= ~IFCAP_TSO6;
1171				if_printf(ifp,
1172				    "tso6 disabled due to -txcsum6.\n");
1173			}
1174		}
1175		if (mask & IFCAP_RXCSUM)
1176			ifp->if_capenable ^= IFCAP_RXCSUM;
1177		if (mask & IFCAP_RXCSUM_IPV6)
1178			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1179
1180		/*
1181		 * Note that we leave CSUM_TSO alone (it is always set).  The
1182		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1183		 * sending a TSO request our way, so it's sufficient to toggle
1184		 * IFCAP_TSOx only.
1185		 */
1186		if (mask & IFCAP_TSO4) {
1187			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1188			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1189				if_printf(ifp, "enable txcsum first.\n");
1190				rc = EAGAIN;
1191				goto fail;
1192			}
1193			ifp->if_capenable ^= IFCAP_TSO4;
1194		}
1195		if (mask & IFCAP_TSO6) {
1196			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1197			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1198				if_printf(ifp, "enable txcsum6 first.\n");
1199				rc = EAGAIN;
1200				goto fail;
1201			}
1202			ifp->if_capenable ^= IFCAP_TSO6;
1203		}
1204		if (mask & IFCAP_LRO) {
1205#if defined(INET) || defined(INET6)
1206			int i;
1207			struct sge_rxq *rxq;
1208
1209			ifp->if_capenable ^= IFCAP_LRO;
1210			for_each_rxq(pi, i, rxq) {
1211				if (ifp->if_capenable & IFCAP_LRO)
1212					rxq->iq.flags |= IQ_LRO_ENABLED;
1213				else
1214					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1215			}
1216#endif
1217		}
1218#ifdef TCP_OFFLOAD
1219		if (mask & IFCAP_TOE) {
1220			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1221
1222			rc = toe_capability(pi, enable);
1223			if (rc != 0)
1224				goto fail;
1225
1226			ifp->if_capenable ^= mask;
1227		}
1228#endif
1229		if (mask & IFCAP_VLAN_HWTAGGING) {
1230			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1231			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1232				rc = update_mac_settings(pi, XGMAC_VLANEX);
1233		}
1234		if (mask & IFCAP_VLAN_MTU) {
1235			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1236
1237			/* Need to find out how to disable auto-mtu-inflation */
1238		}
1239		if (mask & IFCAP_VLAN_HWTSO)
1240			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1241		if (mask & IFCAP_VLAN_HWCSUM)
1242			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1243
1244#ifdef VLAN_CAPABILITIES
1245		VLAN_CAPABILITIES(ifp);
1246#endif
1247fail:
1248		end_synchronized_op(sc, 0);
1249		break;
1250
1251	case SIOCSIFMEDIA:
1252	case SIOCGIFMEDIA:
1253		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1254		break;
1255
1256	default:
1257		rc = ether_ioctl(ifp, cmd, data);
1258	}
1259
1260	return (rc);
1261}
1262
1263static int
1264cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1265{
1266	struct port_info *pi = ifp->if_softc;
1267	struct adapter *sc = pi->adapter;
1268	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1269	struct buf_ring *br;
1270	int rc;
1271
1272	M_ASSERTPKTHDR(m);
1273
1274	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1275		m_freem(m);
1276		return (ENETDOWN);
1277	}
1278
1279	if (m->m_flags & M_FLOWID)
1280		txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1281		    + pi->rsrv_noflowq);
1282	br = txq->br;
1283
1284	if (TXQ_TRYLOCK(txq) == 0) {
1285		struct sge_eq *eq = &txq->eq;
1286
1287		/*
1288		 * It is possible that t4_eth_tx finishes up and releases the
1289		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1290		 * need to make sure that this mbuf doesn't just sit there in
1291		 * the drbr.
1292		 */
1293
1294		rc = drbr_enqueue(ifp, br, m);
1295		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1296		    !(eq->flags & EQ_DOOMED))
1297			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1298		return (rc);
1299	}
1300
1301	/*
1302	 * txq->m is the mbuf that is held up due to a temporary shortage of
1303	 * resources and it should be put on the wire first.  Then what's in
1304	 * drbr and finally the mbuf that was just passed in to us.
1305	 *
1306	 * Return code should indicate the fate of the mbuf that was passed in
1307	 * this time.
1308	 */
1309
1310	TXQ_LOCK_ASSERT_OWNED(txq);
1311	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1312
1313		/* Queued for transmission. */
1314
1315		rc = drbr_enqueue(ifp, br, m);
1316		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1317		(void) t4_eth_tx(ifp, txq, m);
1318		TXQ_UNLOCK(txq);
1319		return (rc);
1320	}
1321
1322	/* Direct transmission. */
1323	rc = t4_eth_tx(ifp, txq, m);
1324	if (rc != 0 && txq->m)
1325		rc = 0;	/* held, will be transmitted soon (hopefully) */
1326
1327	TXQ_UNLOCK(txq);
1328	return (rc);
1329}
1330
1331static void
1332cxgbe_qflush(struct ifnet *ifp)
1333{
1334	struct port_info *pi = ifp->if_softc;
1335	struct sge_txq *txq;
1336	int i;
1337	struct mbuf *m;
1338
1339	/* queues do not exist if !PORT_INIT_DONE. */
1340	if (pi->flags & PORT_INIT_DONE) {
1341		for_each_txq(pi, i, txq) {
1342			TXQ_LOCK(txq);
1343			m_freem(txq->m);
1344			txq->m = NULL;
1345			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1346				m_freem(m);
1347			TXQ_UNLOCK(txq);
1348		}
1349	}
1350	if_qflush(ifp);
1351}
1352
1353static int
1354cxgbe_media_change(struct ifnet *ifp)
1355{
1356	struct port_info *pi = ifp->if_softc;
1357
1358	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1359
1360	return (EOPNOTSUPP);
1361}
1362
1363static void
1364cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1365{
1366	struct port_info *pi = ifp->if_softc;
1367	struct ifmedia_entry *cur = pi->media.ifm_cur;
1368	int speed = pi->link_cfg.speed;
1369	int data = (pi->port_type << 8) | pi->mod_type;
1370
1371	if (cur->ifm_data != data) {
1372		build_medialist(pi);
1373		cur = pi->media.ifm_cur;
1374	}
1375
1376	ifmr->ifm_status = IFM_AVALID;
1377	if (!pi->link_cfg.link_ok)
1378		return;
1379
1380	ifmr->ifm_status |= IFM_ACTIVE;
1381
1382	/* active and current will differ iff current media is autoselect. */
1383	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1384		return;
1385
1386	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1387	if (speed == SPEED_10000)
1388		ifmr->ifm_active |= IFM_10G_T;
1389	else if (speed == SPEED_1000)
1390		ifmr->ifm_active |= IFM_1000_T;
1391	else if (speed == SPEED_100)
1392		ifmr->ifm_active |= IFM_100_TX;
1393	else if (speed == SPEED_10)
1394		ifmr->ifm_active |= IFM_10_T;
1395	else
1396		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1397			    speed));
1398}
1399
1400void
1401t4_fatal_err(struct adapter *sc)
1402{
1403	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1404	t4_intr_disable(sc);
1405	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1406	    device_get_nameunit(sc->dev));
1407}
1408
1409static int
1410map_bars_0_and_4(struct adapter *sc)
1411{
1412	sc->regs_rid = PCIR_BAR(0);
1413	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1414	    &sc->regs_rid, RF_ACTIVE);
1415	if (sc->regs_res == NULL) {
1416		device_printf(sc->dev, "cannot map registers.\n");
1417		return (ENXIO);
1418	}
1419	sc->bt = rman_get_bustag(sc->regs_res);
1420	sc->bh = rman_get_bushandle(sc->regs_res);
1421	sc->mmio_len = rman_get_size(sc->regs_res);
1422	setbit(&sc->doorbells, DOORBELL_KDB);
1423
1424	sc->msix_rid = PCIR_BAR(4);
1425	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1426	    &sc->msix_rid, RF_ACTIVE);
1427	if (sc->msix_res == NULL) {
1428		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1429		return (ENXIO);
1430	}
1431
1432	return (0);
1433}
1434
1435static int
1436map_bar_2(struct adapter *sc)
1437{
1438
1439	/*
1440	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1441	 * to map it if RDMA is disabled.
1442	 */
1443	if (is_t4(sc) && sc->rdmacaps == 0)
1444		return (0);
1445
1446	sc->udbs_rid = PCIR_BAR(2);
1447	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1448	    &sc->udbs_rid, RF_ACTIVE);
1449	if (sc->udbs_res == NULL) {
1450		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1451		return (ENXIO);
1452	}
1453	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1454
1455	if (is_t5(sc)) {
1456		setbit(&sc->doorbells, DOORBELL_UDB);
1457#if defined(__i386__) || defined(__amd64__)
1458		if (t5_write_combine) {
1459			int rc;
1460
1461			/*
1462			 * Enable write combining on BAR2.  This is the
1463			 * userspace doorbell BAR and is split into 128B
1464			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1465			 * with an egress queue.  The first 64B has the doorbell
1466			 * and the second 64B can be used to submit a tx work
1467			 * request with an implicit doorbell.
1468			 */
1469
1470			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1471			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1472			if (rc == 0) {
1473				clrbit(&sc->doorbells, DOORBELL_UDB);
1474				setbit(&sc->doorbells, DOORBELL_WCWR);
1475				setbit(&sc->doorbells, DOORBELL_UDBWC);
1476			} else {
1477				device_printf(sc->dev,
1478				    "couldn't enable write combining: %d\n",
1479				    rc);
1480			}
1481
1482			t4_write_reg(sc, A_SGE_STAT_CFG,
1483			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1484		}
1485#endif
1486	}
1487
1488	return (0);
1489}
1490
1491static const struct memwin t4_memwin[] = {
1492	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1493	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1494	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1495};
1496
1497static const struct memwin t5_memwin[] = {
1498	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1499	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1500	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1501};
1502
1503static void
1504setup_memwin(struct adapter *sc)
1505{
1506	const struct memwin *mw;
1507	int i, n;
1508	uint32_t bar0;
1509
1510	if (is_t4(sc)) {
1511		/*
1512		 * Read low 32b of bar0 indirectly via the hardware backdoor
1513		 * mechanism.  Works from within PCI passthrough environments
1514		 * too, where rman_get_start() can return a different value.  We
1515		 * need to program the T4 memory window decoders with the actual
1516		 * addresses that will be coming across the PCIe link.
1517		 */
1518		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1519		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1520
1521		mw = &t4_memwin[0];
1522		n = nitems(t4_memwin);
1523	} else {
1524		/* T5 uses the relative offset inside the PCIe BAR */
1525		bar0 = 0;
1526
1527		mw = &t5_memwin[0];
1528		n = nitems(t5_memwin);
1529	}
1530
1531	for (i = 0; i < n; i++, mw++) {
1532		t4_write_reg(sc,
1533		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1534		    (mw->base + bar0) | V_BIR(0) |
1535		    V_WINDOW(ilog2(mw->aperture) - 10));
1536	}
1537
1538	/* flush */
1539	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1540}
1541
1542/*
1543 * Verify that the memory range specified by the addr/len pair is valid and lies
1544 * entirely within a single region (EDCx or MCx).
1545 */
1546static int
1547validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1548{
1549	uint32_t em, addr_len, maddr, mlen;
1550
1551	/* Memory can only be accessed in naturally aligned 4 byte units */
1552	if (addr & 3 || len & 3 || len == 0)
1553		return (EINVAL);
1554
1555	/* Enabled memories */
1556	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1557	if (em & F_EDRAM0_ENABLE) {
1558		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1559		maddr = G_EDRAM0_BASE(addr_len) << 20;
1560		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1561		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1562		    addr + len <= maddr + mlen)
1563			return (0);
1564	}
1565	if (em & F_EDRAM1_ENABLE) {
1566		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1567		maddr = G_EDRAM1_BASE(addr_len) << 20;
1568		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1569		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1570		    addr + len <= maddr + mlen)
1571			return (0);
1572	}
1573	if (em & F_EXT_MEM_ENABLE) {
1574		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1575		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1576		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1577		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1578		    addr + len <= maddr + mlen)
1579			return (0);
1580	}
1581	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1582		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1583		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1584		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1585		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1586		    addr + len <= maddr + mlen)
1587			return (0);
1588	}
1589
1590	return (EFAULT);
1591}
1592
1593static int
1594fwmtype_to_hwmtype(int mtype)
1595{
1596
1597	switch (mtype) {
1598	case FW_MEMTYPE_EDC0:
1599		return (MEM_EDC0);
1600	case FW_MEMTYPE_EDC1:
1601		return (MEM_EDC1);
1602	case FW_MEMTYPE_EXTMEM:
1603		return (MEM_MC0);
1604	case FW_MEMTYPE_EXTMEM1:
1605		return (MEM_MC1);
1606	default:
1607		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1608	}
1609}
1610
1611/*
1612 * Verify that the memory range specified by the memtype/offset/len pair is
1613 * valid and lies entirely within the memtype specified.  The global address of
1614 * the start of the range is returned in addr.
1615 */
1616static int
1617validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1618    uint32_t *addr)
1619{
1620	uint32_t em, addr_len, maddr, mlen;
1621
1622	/* Memory can only be accessed in naturally aligned 4 byte units */
1623	if (off & 3 || len & 3 || len == 0)
1624		return (EINVAL);
1625
1626	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1627	switch (fwmtype_to_hwmtype(mtype)) {
1628	case MEM_EDC0:
1629		if (!(em & F_EDRAM0_ENABLE))
1630			return (EINVAL);
1631		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1632		maddr = G_EDRAM0_BASE(addr_len) << 20;
1633		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1634		break;
1635	case MEM_EDC1:
1636		if (!(em & F_EDRAM1_ENABLE))
1637			return (EINVAL);
1638		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1639		maddr = G_EDRAM1_BASE(addr_len) << 20;
1640		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1641		break;
1642	case MEM_MC:
1643		if (!(em & F_EXT_MEM_ENABLE))
1644			return (EINVAL);
1645		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1646		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1647		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1648		break;
1649	case MEM_MC1:
1650		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1651			return (EINVAL);
1652		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1653		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1654		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1655		break;
1656	default:
1657		return (EINVAL);
1658	}
1659
1660	if (mlen > 0 && off < mlen && off + len <= mlen) {
1661		*addr = maddr + off;	/* global address */
1662		return (0);
1663	}
1664
1665	return (EFAULT);
1666}
1667
1668static void
1669memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1670{
1671	const struct memwin *mw;
1672
1673	if (is_t4(sc)) {
1674		KASSERT(win >= 0 && win < nitems(t4_memwin),
1675		    ("%s: incorrect memwin# (%d)", __func__, win));
1676		mw = &t4_memwin[win];
1677	} else {
1678		KASSERT(win >= 0 && win < nitems(t5_memwin),
1679		    ("%s: incorrect memwin# (%d)", __func__, win));
1680		mw = &t5_memwin[win];
1681	}
1682
1683	if (base != NULL)
1684		*base = mw->base;
1685	if (aperture != NULL)
1686		*aperture = mw->aperture;
1687}
1688
1689/*
1690 * Positions the memory window such that it can be used to access the specified
1691 * address in the chip's address space.  The return value is the offset of addr
1692 * from the start of the window.
1693 */
1694static uint32_t
1695position_memwin(struct adapter *sc, int n, uint32_t addr)
1696{
1697	uint32_t start, pf;
1698	uint32_t reg;
1699
1700	KASSERT(n >= 0 && n <= 3,
1701	    ("%s: invalid window %d.", __func__, n));
1702	KASSERT((addr & 3) == 0,
1703	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1704
1705	if (is_t4(sc)) {
1706		pf = 0;
1707		start = addr & ~0xf;	/* start must be 16B aligned */
1708	} else {
1709		pf = V_PFNUM(sc->pf);
1710		start = addr & ~0x7f;	/* start must be 128B aligned */
1711	}
1712	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1713
1714	t4_write_reg(sc, reg, start | pf);
1715	t4_read_reg(sc, reg);
1716
1717	return (addr - start);
1718}
1719
1720static int
1721cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1722    struct intrs_and_queues *iaq)
1723{
1724	int rc, itype, navail, nrxq10g, nrxq1g, n;
1725	int nofldrxq10g = 0, nofldrxq1g = 0;
1726
1727	bzero(iaq, sizeof(*iaq));
1728
1729	iaq->ntxq10g = t4_ntxq10g;
1730	iaq->ntxq1g = t4_ntxq1g;
1731	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1732	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1733	iaq->rsrv_noflowq = t4_rsrv_noflowq;
1734#ifdef TCP_OFFLOAD
1735	if (is_offload(sc)) {
1736		iaq->nofldtxq10g = t4_nofldtxq10g;
1737		iaq->nofldtxq1g = t4_nofldtxq1g;
1738		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1739		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1740	}
1741#endif
1742
1743	for (itype = INTR_MSIX; itype; itype >>= 1) {
1744
1745		if ((itype & t4_intr_types) == 0)
1746			continue;	/* not allowed */
1747
1748		if (itype == INTR_MSIX)
1749			navail = pci_msix_count(sc->dev);
1750		else if (itype == INTR_MSI)
1751			navail = pci_msi_count(sc->dev);
1752		else
1753			navail = 1;
1754restart:
1755		if (navail == 0)
1756			continue;
1757
1758		iaq->intr_type = itype;
1759		iaq->intr_flags = 0;
1760
1761		/*
1762		 * Best option: an interrupt vector for errors, one for the
1763		 * firmware event queue, and one each for each rxq (NIC as well
1764		 * as offload).
1765		 */
1766		iaq->nirq = T4_EXTRA_INTR;
1767		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1768		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1769		if (iaq->nirq <= navail &&
1770		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1771			iaq->intr_flags |= INTR_DIRECT;
1772			goto allocate;
1773		}
1774
1775		/*
1776		 * Second best option: an interrupt vector for errors, one for
1777		 * the firmware event queue, and one each for either NIC or
1778		 * offload rxq's.
1779		 */
1780		iaq->nirq = T4_EXTRA_INTR;
1781		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1782		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1783		if (iaq->nirq <= navail &&
1784		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1785			goto allocate;
1786
1787		/*
1788		 * Next best option: an interrupt vector for errors, one for the
1789		 * firmware event queue, and at least one per port.  At this
1790		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1791		 * what's available to us.
1792		 */
1793		iaq->nirq = T4_EXTRA_INTR;
1794		iaq->nirq += n10g + n1g;
1795		if (iaq->nirq <= navail) {
1796			int leftover = navail - iaq->nirq;
1797
1798			if (n10g > 0) {
1799				int target = max(nrxq10g, nofldrxq10g);
1800
1801				n = 1;
1802				while (n < target && leftover >= n10g) {
1803					leftover -= n10g;
1804					iaq->nirq += n10g;
1805					n++;
1806				}
1807				iaq->nrxq10g = min(n, nrxq10g);
1808#ifdef TCP_OFFLOAD
1809				if (is_offload(sc))
1810					iaq->nofldrxq10g = min(n, nofldrxq10g);
1811#endif
1812			}
1813
1814			if (n1g > 0) {
1815				int target = max(nrxq1g, nofldrxq1g);
1816
1817				n = 1;
1818				while (n < target && leftover >= n1g) {
1819					leftover -= n1g;
1820					iaq->nirq += n1g;
1821					n++;
1822				}
1823				iaq->nrxq1g = min(n, nrxq1g);
1824#ifdef TCP_OFFLOAD
1825				if (is_offload(sc))
1826					iaq->nofldrxq1g = min(n, nofldrxq1g);
1827#endif
1828			}
1829
1830			if (itype != INTR_MSI || powerof2(iaq->nirq))
1831				goto allocate;
1832		}
1833
1834		/*
1835		 * Least desirable option: one interrupt vector for everything.
1836		 */
1837		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1838#ifdef TCP_OFFLOAD
1839		if (is_offload(sc))
1840			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1841#endif
1842
1843allocate:
1844		navail = iaq->nirq;
1845		rc = 0;
1846		if (itype == INTR_MSIX)
1847			rc = pci_alloc_msix(sc->dev, &navail);
1848		else if (itype == INTR_MSI)
1849			rc = pci_alloc_msi(sc->dev, &navail);
1850
1851		if (rc == 0) {
1852			if (navail == iaq->nirq)
1853				return (0);
1854
1855			/*
1856			 * Didn't get the number requested.  Use whatever number
1857			 * the kernel is willing to allocate (it's in navail).
1858			 */
1859			device_printf(sc->dev, "fewer vectors than requested, "
1860			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1861			    itype, iaq->nirq, navail);
1862			pci_release_msi(sc->dev);
1863			goto restart;
1864		}
1865
1866		device_printf(sc->dev,
1867		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1868		    itype, rc, iaq->nirq, navail);
1869	}
1870
1871	device_printf(sc->dev,
1872	    "failed to find a usable interrupt type.  "
1873	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1874	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1875
1876	return (ENXIO);
1877}
1878
1879#define FW_VERSION(chip) ( \
1880    V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1881    V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1882    V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1883    V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1884#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1885
1886struct fw_info {
1887	uint8_t chip;
1888	char *kld_name;
1889	char *fw_mod_name;
1890	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1891} fw_info[] = {
1892	{
1893		.chip = CHELSIO_T4,
1894		.kld_name = "t4fw_cfg",
1895		.fw_mod_name = "t4fw",
1896		.fw_hdr = {
1897			.chip = FW_HDR_CHIP_T4,
1898			.fw_ver = htobe32_const(FW_VERSION(T4)),
1899			.intfver_nic = FW_INTFVER(T4, NIC),
1900			.intfver_vnic = FW_INTFVER(T4, VNIC),
1901			.intfver_ofld = FW_INTFVER(T4, OFLD),
1902			.intfver_ri = FW_INTFVER(T4, RI),
1903			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1904			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1905			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1906			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1907		},
1908	}, {
1909		.chip = CHELSIO_T5,
1910		.kld_name = "t5fw_cfg",
1911		.fw_mod_name = "t5fw",
1912		.fw_hdr = {
1913			.chip = FW_HDR_CHIP_T5,
1914			.fw_ver = htobe32_const(FW_VERSION(T5)),
1915			.intfver_nic = FW_INTFVER(T5, NIC),
1916			.intfver_vnic = FW_INTFVER(T5, VNIC),
1917			.intfver_ofld = FW_INTFVER(T5, OFLD),
1918			.intfver_ri = FW_INTFVER(T5, RI),
1919			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1920			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1921			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1922			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1923		},
1924	}
1925};
1926
1927static struct fw_info *
1928find_fw_info(int chip)
1929{
1930	int i;
1931
1932	for (i = 0; i < nitems(fw_info); i++) {
1933		if (fw_info[i].chip == chip)
1934			return (&fw_info[i]);
1935	}
1936	return (NULL);
1937}
1938
1939/*
1940 * Is the given firmware API compatible with the one the driver was compiled
1941 * with?
1942 */
1943static int
1944fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1945{
1946
1947	/* short circuit if it's the exact same firmware version */
1948	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1949		return (1);
1950
1951	/*
1952	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1953	 * features that are supported in the driver.
1954	 */
1955#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1956	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1957	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1958	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1959		return (1);
1960#undef SAME_INTF
1961
1962	return (0);
1963}
1964
1965/*
1966 * The firmware in the KLD is usable, but should it be installed?  This routine
1967 * explains itself in detail if it indicates the KLD firmware should be
1968 * installed.
1969 */
1970static int
1971should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1972{
1973	const char *reason;
1974
1975	if (!card_fw_usable) {
1976		reason = "incompatible or unusable";
1977		goto install;
1978	}
1979
1980	if (k > c) {
1981		reason = "older than the version bundled with this driver";
1982		goto install;
1983	}
1984
1985	if (t4_fw_install == 2 && k != c) {
1986		reason = "different than the version bundled with this driver";
1987		goto install;
1988	}
1989
1990	return (0);
1991
1992install:
1993	if (t4_fw_install == 0) {
1994		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1995		    "but the driver is prohibited from installing a different "
1996		    "firmware on the card.\n",
1997		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1998		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1999
2000		return (0);
2001	}
2002
2003	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2004	    "installing firmware %u.%u.%u.%u on card.\n",
2005	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2006	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2007	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2008	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2009
2010	return (1);
2011}
2012/*
2013 * Establish contact with the firmware and determine if we are the master driver
2014 * or not, and whether we are responsible for chip initialization.
2015 */
2016static int
2017prep_firmware(struct adapter *sc)
2018{
2019	const struct firmware *fw = NULL, *default_cfg;
2020	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2021	enum dev_state state;
2022	struct fw_info *fw_info;
2023	struct fw_hdr *card_fw;		/* fw on the card */
2024	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2025	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2026					   against */
2027
2028	/* Contact firmware. */
2029	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2030	if (rc < 0 || state == DEV_STATE_ERR) {
2031		rc = -rc;
2032		device_printf(sc->dev,
2033		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2034		return (rc);
2035	}
2036	pf = rc;
2037	if (pf == sc->mbox)
2038		sc->flags |= MASTER_PF;
2039	else if (state == DEV_STATE_UNINIT) {
2040		/*
2041		 * We didn't get to be the master so we definitely won't be
2042		 * configuring the chip.  It's a bug if someone else hasn't
2043		 * configured it already.
2044		 */
2045		device_printf(sc->dev, "couldn't be master(%d), "
2046		    "device not already initialized either(%d).\n", rc, state);
2047		return (EDOOFUS);
2048	}
2049
2050	/* This is the firmware whose headers the driver was compiled against */
2051	fw_info = find_fw_info(chip_id(sc));
2052	if (fw_info == NULL) {
2053		device_printf(sc->dev,
2054		    "unable to look up firmware information for chip %d.\n",
2055		    chip_id(sc));
2056		return (EINVAL);
2057	}
2058	drv_fw = &fw_info->fw_hdr;
2059
2060	/*
2061	 * The firmware KLD contains many modules.  The KLD name is also the
2062	 * name of the module that contains the default config file.
2063	 */
2064	default_cfg = firmware_get(fw_info->kld_name);
2065
2066	/* Read the header of the firmware on the card */
2067	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2068	rc = -t4_read_flash(sc, FLASH_FW_START,
2069	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2070	if (rc == 0)
2071		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2072	else {
2073		device_printf(sc->dev,
2074		    "Unable to read card's firmware header: %d\n", rc);
2075		card_fw_usable = 0;
2076	}
2077
2078	/* This is the firmware in the KLD */
2079	fw = firmware_get(fw_info->fw_mod_name);
2080	if (fw != NULL) {
2081		kld_fw = (const void *)fw->data;
2082		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2083	} else {
2084		kld_fw = NULL;
2085		kld_fw_usable = 0;
2086	}
2087
2088	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2089	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2090		/*
2091		 * Common case: the firmware on the card is an exact match and
2092		 * the KLD is an exact match too, or the KLD is
2093		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2094		 * here -- use cxgbetool loadfw if you want to reinstall the
2095		 * same firmware as the one on the card.
2096		 */
2097	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2098	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2099	    be32toh(card_fw->fw_ver))) {
2100
2101		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2102		if (rc != 0) {
2103			device_printf(sc->dev,
2104			    "failed to install firmware: %d\n", rc);
2105			goto done;
2106		}
2107
2108		/* Installed successfully, update the cached header too. */
2109		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2110		card_fw_usable = 1;
2111		need_fw_reset = 0;	/* already reset as part of load_fw */
2112	}
2113
2114	if (!card_fw_usable) {
2115		uint32_t d, c, k;
2116
2117		d = ntohl(drv_fw->fw_ver);
2118		c = ntohl(card_fw->fw_ver);
2119		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2120
2121		device_printf(sc->dev, "Cannot find a usable firmware: "
2122		    "fw_install %d, chip state %d, "
2123		    "driver compiled with %d.%d.%d.%d, "
2124		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2125		    t4_fw_install, state,
2126		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2127		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2128		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2129		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2130		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2131		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2132		rc = EINVAL;
2133		goto done;
2134	}
2135
2136	/* We're using whatever's on the card and it's known to be good. */
2137	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2138	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2139	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2140	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2141	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2142	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2143	t4_get_tp_version(sc, &sc->params.tp_vers);
2144
2145	/* Reset device */
2146	if (need_fw_reset &&
2147	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2148		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2149		if (rc != ETIMEDOUT && rc != EIO)
2150			t4_fw_bye(sc, sc->mbox);
2151		goto done;
2152	}
2153	sc->flags |= FW_OK;
2154
2155	rc = get_params__pre_init(sc);
2156	if (rc != 0)
2157		goto done; /* error message displayed already */
2158
2159	/* Partition adapter resources as specified in the config file. */
2160	if (state == DEV_STATE_UNINIT) {
2161
2162		KASSERT(sc->flags & MASTER_PF,
2163		    ("%s: trying to change chip settings when not master.",
2164		    __func__));
2165
2166		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2167		if (rc != 0)
2168			goto done;	/* error message displayed already */
2169
2170		t4_tweak_chip_settings(sc);
2171
2172		/* get basic stuff going */
2173		rc = -t4_fw_initialize(sc, sc->mbox);
2174		if (rc != 0) {
2175			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2176			goto done;
2177		}
2178	} else {
2179		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2180		sc->cfcsum = 0;
2181	}
2182
2183done:
2184	free(card_fw, M_CXGBE);
2185	if (fw != NULL)
2186		firmware_put(fw, FIRMWARE_UNLOAD);
2187	if (default_cfg != NULL)
2188		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2189
2190	return (rc);
2191}
2192
2193#define FW_PARAM_DEV(param) \
2194	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2195	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2196#define FW_PARAM_PFVF(param) \
2197	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2198	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2199
2200/*
2201 * Partition chip resources for use between various PFs, VFs, etc.
2202 */
2203static int
2204partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2205    const char *name_prefix)
2206{
2207	const struct firmware *cfg = NULL;
2208	int rc = 0;
2209	struct fw_caps_config_cmd caps;
2210	uint32_t mtype, moff, finicsum, cfcsum;
2211
2212	/*
2213	 * Figure out what configuration file to use.  Pick the default config
2214	 * file for the card if the user hasn't specified one explicitly.
2215	 */
2216	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2217	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2218		/* Card specific overrides go here. */
2219		if (pci_get_device(sc->dev) == 0x440a)
2220			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2221		if (is_fpga(sc))
2222			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2223	}
2224
2225	/*
2226	 * We need to load another module if the profile is anything except
2227	 * "default" or "flash".
2228	 */
2229	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2230	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2231		char s[32];
2232
2233		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2234		cfg = firmware_get(s);
2235		if (cfg == NULL) {
2236			if (default_cfg != NULL) {
2237				device_printf(sc->dev,
2238				    "unable to load module \"%s\" for "
2239				    "configuration profile \"%s\", will use "
2240				    "the default config file instead.\n",
2241				    s, sc->cfg_file);
2242				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2243				    "%s", DEFAULT_CF);
2244			} else {
2245				device_printf(sc->dev,
2246				    "unable to load module \"%s\" for "
2247				    "configuration profile \"%s\", will use "
2248				    "the config file on the card's flash "
2249				    "instead.\n", s, sc->cfg_file);
2250				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2251				    "%s", FLASH_CF);
2252			}
2253		}
2254	}
2255
2256	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2257	    default_cfg == NULL) {
2258		device_printf(sc->dev,
2259		    "default config file not available, will use the config "
2260		    "file on the card's flash instead.\n");
2261		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2262	}
2263
2264	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2265		u_int cflen, i, n;
2266		const uint32_t *cfdata;
2267		uint32_t param, val, addr, off, mw_base, mw_aperture;
2268
2269		KASSERT(cfg != NULL || default_cfg != NULL,
2270		    ("%s: no config to upload", __func__));
2271
2272		/*
2273		 * Ask the firmware where it wants us to upload the config file.
2274		 */
2275		param = FW_PARAM_DEV(CF);
2276		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2277		if (rc != 0) {
2278			/* No support for config file?  Shouldn't happen. */
2279			device_printf(sc->dev,
2280			    "failed to query config file location: %d.\n", rc);
2281			goto done;
2282		}
2283		mtype = G_FW_PARAMS_PARAM_Y(val);
2284		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2285
2286		/*
2287		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2288		 * useless stuffing/comments at the end of the config file so
2289		 * it's ok to simply throw away the last remaining bytes when
2290		 * the config file is not an exact multiple of 4.  This also
2291		 * helps with the validate_mt_off_len check.
2292		 */
2293		if (cfg != NULL) {
2294			cflen = cfg->datasize & ~3;
2295			cfdata = cfg->data;
2296		} else {
2297			cflen = default_cfg->datasize & ~3;
2298			cfdata = default_cfg->data;
2299		}
2300
2301		if (cflen > FLASH_CFG_MAX_SIZE) {
2302			device_printf(sc->dev,
2303			    "config file too long (%d, max allowed is %d).  "
2304			    "Will try to use the config on the card, if any.\n",
2305			    cflen, FLASH_CFG_MAX_SIZE);
2306			goto use_config_on_flash;
2307		}
2308
2309		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2310		if (rc != 0) {
2311			device_printf(sc->dev,
2312			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2313			    "Will try to use the config on the card, if any.\n",
2314			    __func__, mtype, moff, cflen, rc);
2315			goto use_config_on_flash;
2316		}
2317
2318		memwin_info(sc, 2, &mw_base, &mw_aperture);
2319		while (cflen) {
2320			off = position_memwin(sc, 2, addr);
2321			n = min(cflen, mw_aperture - off);
2322			for (i = 0; i < n; i += 4)
2323				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2324			cflen -= n;
2325			addr += n;
2326		}
2327	} else {
2328use_config_on_flash:
2329		mtype = FW_MEMTYPE_FLASH;
2330		moff = t4_flash_cfg_addr(sc);
2331	}
2332
2333	bzero(&caps, sizeof(caps));
2334	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2335	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2336	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2337	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2338	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2339	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2340	if (rc != 0) {
2341		device_printf(sc->dev,
2342		    "failed to pre-process config file: %d "
2343		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2344		goto done;
2345	}
2346
2347	finicsum = be32toh(caps.finicsum);
2348	cfcsum = be32toh(caps.cfcsum);
2349	if (finicsum != cfcsum) {
2350		device_printf(sc->dev,
2351		    "WARNING: config file checksum mismatch: %08x %08x\n",
2352		    finicsum, cfcsum);
2353	}
2354	sc->cfcsum = cfcsum;
2355
2356#define LIMIT_CAPS(x) do { \
2357	caps.x &= htobe16(t4_##x##_allowed); \
2358	sc->x = htobe16(caps.x); \
2359} while (0)
2360
2361	/*
2362	 * Let the firmware know what features will (not) be used so it can tune
2363	 * things accordingly.
2364	 */
2365	LIMIT_CAPS(linkcaps);
2366	LIMIT_CAPS(niccaps);
2367	LIMIT_CAPS(toecaps);
2368	LIMIT_CAPS(rdmacaps);
2369	LIMIT_CAPS(iscsicaps);
2370	LIMIT_CAPS(fcoecaps);
2371#undef LIMIT_CAPS
2372
2373	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2374	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2375	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2376	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2377	if (rc != 0) {
2378		device_printf(sc->dev,
2379		    "failed to process config file: %d.\n", rc);
2380	}
2381done:
2382	if (cfg != NULL)
2383		firmware_put(cfg, FIRMWARE_UNLOAD);
2384	return (rc);
2385}
2386
2387/*
2388 * Retrieve parameters that are needed (or nice to have) very early.
2389 */
2390static int
2391get_params__pre_init(struct adapter *sc)
2392{
2393	int rc;
2394	uint32_t param[2], val[2];
2395	struct fw_devlog_cmd cmd;
2396	struct devlog_params *dlog = &sc->params.devlog;
2397
2398	param[0] = FW_PARAM_DEV(PORTVEC);
2399	param[1] = FW_PARAM_DEV(CCLK);
2400	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2401	if (rc != 0) {
2402		device_printf(sc->dev,
2403		    "failed to query parameters (pre_init): %d.\n", rc);
2404		return (rc);
2405	}
2406
2407	sc->params.portvec = val[0];
2408	sc->params.nports = bitcount32(val[0]);
2409	sc->params.vpd.cclk = val[1];
2410
2411	/* Read device log parameters. */
2412	bzero(&cmd, sizeof(cmd));
2413	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2414	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2415	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2416	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2417	if (rc != 0) {
2418		device_printf(sc->dev,
2419		    "failed to get devlog parameters: %d.\n", rc);
2420		bzero(dlog, sizeof (*dlog));
2421		rc = 0;	/* devlog isn't critical for device operation */
2422	} else {
2423		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2424		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2425		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2426		dlog->size = be32toh(cmd.memsize_devlog);
2427	}
2428
2429	return (rc);
2430}
2431
2432/*
2433 * Retrieve various parameters that are of interest to the driver.  The device
2434 * has been initialized by the firmware at this point.
2435 */
2436static int
2437get_params__post_init(struct adapter *sc)
2438{
2439	int rc;
2440	uint32_t param[7], val[7];
2441	struct fw_caps_config_cmd caps;
2442
2443	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2444	param[1] = FW_PARAM_PFVF(EQ_START);
2445	param[2] = FW_PARAM_PFVF(FILTER_START);
2446	param[3] = FW_PARAM_PFVF(FILTER_END);
2447	param[4] = FW_PARAM_PFVF(L2T_START);
2448	param[5] = FW_PARAM_PFVF(L2T_END);
2449	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2450	if (rc != 0) {
2451		device_printf(sc->dev,
2452		    "failed to query parameters (post_init): %d.\n", rc);
2453		return (rc);
2454	}
2455
2456	sc->sge.iq_start = val[0];
2457	sc->sge.eq_start = val[1];
2458	sc->tids.ftid_base = val[2];
2459	sc->tids.nftids = val[3] - val[2] + 1;
2460	sc->vres.l2t.start = val[4];
2461	sc->vres.l2t.size = val[5] - val[4] + 1;
2462	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2463	    ("%s: L2 table size (%u) larger than expected (%u)",
2464	    __func__, sc->vres.l2t.size, L2T_SIZE));
2465
2466	/* get capabilites */
2467	bzero(&caps, sizeof(caps));
2468	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2469	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2470	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2471	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2472	if (rc != 0) {
2473		device_printf(sc->dev,
2474		    "failed to get card capabilities: %d.\n", rc);
2475		return (rc);
2476	}
2477
2478	if (caps.toecaps) {
2479		/* query offload-related parameters */
2480		param[0] = FW_PARAM_DEV(NTID);
2481		param[1] = FW_PARAM_PFVF(SERVER_START);
2482		param[2] = FW_PARAM_PFVF(SERVER_END);
2483		param[3] = FW_PARAM_PFVF(TDDP_START);
2484		param[4] = FW_PARAM_PFVF(TDDP_END);
2485		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2486		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2487		if (rc != 0) {
2488			device_printf(sc->dev,
2489			    "failed to query TOE parameters: %d.\n", rc);
2490			return (rc);
2491		}
2492		sc->tids.ntids = val[0];
2493		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2494		sc->tids.stid_base = val[1];
2495		sc->tids.nstids = val[2] - val[1] + 1;
2496		sc->vres.ddp.start = val[3];
2497		sc->vres.ddp.size = val[4] - val[3] + 1;
2498		sc->params.ofldq_wr_cred = val[5];
2499		sc->params.offload = 1;
2500	}
2501	if (caps.rdmacaps) {
2502		param[0] = FW_PARAM_PFVF(STAG_START);
2503		param[1] = FW_PARAM_PFVF(STAG_END);
2504		param[2] = FW_PARAM_PFVF(RQ_START);
2505		param[3] = FW_PARAM_PFVF(RQ_END);
2506		param[4] = FW_PARAM_PFVF(PBL_START);
2507		param[5] = FW_PARAM_PFVF(PBL_END);
2508		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2509		if (rc != 0) {
2510			device_printf(sc->dev,
2511			    "failed to query RDMA parameters(1): %d.\n", rc);
2512			return (rc);
2513		}
2514		sc->vres.stag.start = val[0];
2515		sc->vres.stag.size = val[1] - val[0] + 1;
2516		sc->vres.rq.start = val[2];
2517		sc->vres.rq.size = val[3] - val[2] + 1;
2518		sc->vres.pbl.start = val[4];
2519		sc->vres.pbl.size = val[5] - val[4] + 1;
2520
2521		param[0] = FW_PARAM_PFVF(SQRQ_START);
2522		param[1] = FW_PARAM_PFVF(SQRQ_END);
2523		param[2] = FW_PARAM_PFVF(CQ_START);
2524		param[3] = FW_PARAM_PFVF(CQ_END);
2525		param[4] = FW_PARAM_PFVF(OCQ_START);
2526		param[5] = FW_PARAM_PFVF(OCQ_END);
2527		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2528		if (rc != 0) {
2529			device_printf(sc->dev,
2530			    "failed to query RDMA parameters(2): %d.\n", rc);
2531			return (rc);
2532		}
2533		sc->vres.qp.start = val[0];
2534		sc->vres.qp.size = val[1] - val[0] + 1;
2535		sc->vres.cq.start = val[2];
2536		sc->vres.cq.size = val[3] - val[2] + 1;
2537		sc->vres.ocq.start = val[4];
2538		sc->vres.ocq.size = val[5] - val[4] + 1;
2539	}
2540	if (caps.iscsicaps) {
2541		param[0] = FW_PARAM_PFVF(ISCSI_START);
2542		param[1] = FW_PARAM_PFVF(ISCSI_END);
2543		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2544		if (rc != 0) {
2545			device_printf(sc->dev,
2546			    "failed to query iSCSI parameters: %d.\n", rc);
2547			return (rc);
2548		}
2549		sc->vres.iscsi.start = val[0];
2550		sc->vres.iscsi.size = val[1] - val[0] + 1;
2551	}
2552
2553	/*
2554	 * We've got the params we wanted to query via the firmware.  Now grab
2555	 * some others directly from the chip.
2556	 */
2557	rc = t4_read_chip_settings(sc);
2558
2559	return (rc);
2560}
2561
2562static int
2563set_params__post_init(struct adapter *sc)
2564{
2565	uint32_t param, val;
2566
2567	/* ask for encapsulated CPLs */
2568	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2569	val = 1;
2570	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2571
2572	return (0);
2573}
2574
2575#undef FW_PARAM_PFVF
2576#undef FW_PARAM_DEV
2577
2578static void
2579t4_set_desc(struct adapter *sc)
2580{
2581	char buf[128];
2582	struct adapter_params *p = &sc->params;
2583
2584	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2585	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2586	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2587
2588	device_set_desc_copy(sc->dev, buf);
2589}
2590
2591static void
2592build_medialist(struct port_info *pi)
2593{
2594	struct ifmedia *media = &pi->media;
2595	int data, m;
2596
2597	PORT_LOCK(pi);
2598
2599	ifmedia_removeall(media);
2600
2601	m = IFM_ETHER | IFM_FDX;
2602	data = (pi->port_type << 8) | pi->mod_type;
2603
2604	switch(pi->port_type) {
2605	case FW_PORT_TYPE_BT_XFI:
2606		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2607		break;
2608
2609	case FW_PORT_TYPE_BT_XAUI:
2610		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2611		/* fall through */
2612
2613	case FW_PORT_TYPE_BT_SGMII:
2614		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2615		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2616		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2617		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2618		break;
2619
2620	case FW_PORT_TYPE_CX4:
2621		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2622		ifmedia_set(media, m | IFM_10G_CX4);
2623		break;
2624
2625	case FW_PORT_TYPE_SFP:
2626	case FW_PORT_TYPE_FIBER_XFI:
2627	case FW_PORT_TYPE_FIBER_XAUI:
2628		switch (pi->mod_type) {
2629
2630		case FW_PORT_MOD_TYPE_LR:
2631			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2632			ifmedia_set(media, m | IFM_10G_LR);
2633			break;
2634
2635		case FW_PORT_MOD_TYPE_SR:
2636			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2637			ifmedia_set(media, m | IFM_10G_SR);
2638			break;
2639
2640		case FW_PORT_MOD_TYPE_LRM:
2641			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2642			ifmedia_set(media, m | IFM_10G_LRM);
2643			break;
2644
2645		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2646		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2647			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2648			ifmedia_set(media, m | IFM_10G_TWINAX);
2649			break;
2650
2651		case FW_PORT_MOD_TYPE_NONE:
2652			m &= ~IFM_FDX;
2653			ifmedia_add(media, m | IFM_NONE, data, NULL);
2654			ifmedia_set(media, m | IFM_NONE);
2655			break;
2656
2657		case FW_PORT_MOD_TYPE_NA:
2658		case FW_PORT_MOD_TYPE_ER:
2659		default:
2660			device_printf(pi->dev,
2661			    "unknown port_type (%d), mod_type (%d)\n",
2662			    pi->port_type, pi->mod_type);
2663			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2664			ifmedia_set(media, m | IFM_UNKNOWN);
2665			break;
2666		}
2667		break;
2668
2669	case FW_PORT_TYPE_QSFP:
2670		switch (pi->mod_type) {
2671
2672		case FW_PORT_MOD_TYPE_LR:
2673			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2674			ifmedia_set(media, m | IFM_40G_LR4);
2675			break;
2676
2677		case FW_PORT_MOD_TYPE_SR:
2678			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2679			ifmedia_set(media, m | IFM_40G_SR4);
2680			break;
2681
2682		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2683		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2684			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2685			ifmedia_set(media, m | IFM_40G_CR4);
2686			break;
2687
2688		case FW_PORT_MOD_TYPE_NONE:
2689			m &= ~IFM_FDX;
2690			ifmedia_add(media, m | IFM_NONE, data, NULL);
2691			ifmedia_set(media, m | IFM_NONE);
2692			break;
2693
2694		default:
2695			device_printf(pi->dev,
2696			    "unknown port_type (%d), mod_type (%d)\n",
2697			    pi->port_type, pi->mod_type);
2698			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2699			ifmedia_set(media, m | IFM_UNKNOWN);
2700			break;
2701		}
2702		break;
2703
2704	default:
2705		device_printf(pi->dev,
2706		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2707		    pi->mod_type);
2708		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2709		ifmedia_set(media, m | IFM_UNKNOWN);
2710		break;
2711	}
2712
2713	PORT_UNLOCK(pi);
2714}
2715
2716#define FW_MAC_EXACT_CHUNK	7
2717
2718/*
2719 * Program the port's XGMAC based on parameters in ifnet.  The caller also
2720 * indicates which parameters should be programmed (the rest are left alone).
2721 */
2722static int
2723update_mac_settings(struct port_info *pi, int flags)
2724{
2725	int rc;
2726	struct ifnet *ifp = pi->ifp;
2727	struct adapter *sc = pi->adapter;
2728	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2729
2730	ASSERT_SYNCHRONIZED_OP(sc);
2731	KASSERT(flags, ("%s: not told what to update.", __func__));
2732
2733	if (flags & XGMAC_MTU)
2734		mtu = ifp->if_mtu;
2735
2736	if (flags & XGMAC_PROMISC)
2737		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2738
2739	if (flags & XGMAC_ALLMULTI)
2740		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2741
2742	if (flags & XGMAC_VLANEX)
2743		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2744
2745	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2746	    vlanex, false);
2747	if (rc) {
2748		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2749		return (rc);
2750	}
2751
2752	if (flags & XGMAC_UCADDR) {
2753		uint8_t ucaddr[ETHER_ADDR_LEN];
2754
2755		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2756		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2757		    ucaddr, true, true);
2758		if (rc < 0) {
2759			rc = -rc;
2760			if_printf(ifp, "change_mac failed: %d\n", rc);
2761			return (rc);
2762		} else {
2763			pi->xact_addr_filt = rc;
2764			rc = 0;
2765		}
2766	}
2767
2768	if (flags & XGMAC_MCADDRS) {
2769		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2770		int del = 1;
2771		uint64_t hash = 0;
2772		struct ifmultiaddr *ifma;
2773		int i = 0, j;
2774
2775		if_maddr_rlock(ifp);
2776		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2777			if (ifma->ifma_addr->sa_family != AF_LINK)
2778				continue;
2779			mcaddr[i++] =
2780			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2781
2782			if (i == FW_MAC_EXACT_CHUNK) {
2783				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2784				    del, i, mcaddr, NULL, &hash, 0);
2785				if (rc < 0) {
2786					rc = -rc;
2787					for (j = 0; j < i; j++) {
2788						if_printf(ifp,
2789						    "failed to add mc address"
2790						    " %02x:%02x:%02x:"
2791						    "%02x:%02x:%02x rc=%d\n",
2792						    mcaddr[j][0], mcaddr[j][1],
2793						    mcaddr[j][2], mcaddr[j][3],
2794						    mcaddr[j][4], mcaddr[j][5],
2795						    rc);
2796					}
2797					goto mcfail;
2798				}
2799				del = 0;
2800				i = 0;
2801			}
2802		}
2803		if (i > 0) {
2804			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2805			    del, i, mcaddr, NULL, &hash, 0);
2806			if (rc < 0) {
2807				rc = -rc;
2808				for (j = 0; j < i; j++) {
2809					if_printf(ifp,
2810					    "failed to add mc address"
2811					    " %02x:%02x:%02x:"
2812					    "%02x:%02x:%02x rc=%d\n",
2813					    mcaddr[j][0], mcaddr[j][1],
2814					    mcaddr[j][2], mcaddr[j][3],
2815					    mcaddr[j][4], mcaddr[j][5],
2816					    rc);
2817				}
2818				goto mcfail;
2819			}
2820		}
2821
2822		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2823		if (rc != 0)
2824			if_printf(ifp, "failed to set mc address hash: %d", rc);
2825mcfail:
2826		if_maddr_runlock(ifp);
2827	}
2828
2829	return (rc);
2830}
2831
2832int
2833begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2834    char *wmesg)
2835{
2836	int rc, pri;
2837
2838#ifdef WITNESS
2839	/* the caller thinks it's ok to sleep, but is it really? */
2840	if (flags & SLEEP_OK)
2841		pause("t4slptst", 1);
2842#endif
2843
2844	if (INTR_OK)
2845		pri = PCATCH;
2846	else
2847		pri = 0;
2848
2849	ADAPTER_LOCK(sc);
2850	for (;;) {
2851
2852		if (pi && IS_DOOMED(pi)) {
2853			rc = ENXIO;
2854			goto done;
2855		}
2856
2857		if (!IS_BUSY(sc)) {
2858			rc = 0;
2859			break;
2860		}
2861
2862		if (!(flags & SLEEP_OK)) {
2863			rc = EBUSY;
2864			goto done;
2865		}
2866
2867		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2868			rc = EINTR;
2869			goto done;
2870		}
2871	}
2872
2873	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2874	SET_BUSY(sc);
2875#ifdef INVARIANTS
2876	sc->last_op = wmesg;
2877	sc->last_op_thr = curthread;
2878#endif
2879
2880done:
2881	if (!(flags & HOLD_LOCK) || rc)
2882		ADAPTER_UNLOCK(sc);
2883
2884	return (rc);
2885}
2886
2887void
2888end_synchronized_op(struct adapter *sc, int flags)
2889{
2890
2891	if (flags & LOCK_HELD)
2892		ADAPTER_LOCK_ASSERT_OWNED(sc);
2893	else
2894		ADAPTER_LOCK(sc);
2895
2896	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2897	CLR_BUSY(sc);
2898	wakeup(&sc->flags);
2899	ADAPTER_UNLOCK(sc);
2900}
2901
2902static int
2903cxgbe_init_synchronized(struct port_info *pi)
2904{
2905	struct adapter *sc = pi->adapter;
2906	struct ifnet *ifp = pi->ifp;
2907	int rc = 0;
2908
2909	ASSERT_SYNCHRONIZED_OP(sc);
2910
2911	if (isset(&sc->open_device_map, pi->port_id)) {
2912		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2913		    ("mismatch between open_device_map and if_drv_flags"));
2914		return (0);	/* already running */
2915	}
2916
2917	if (!(sc->flags & FULL_INIT_DONE) &&
2918	    ((rc = adapter_full_init(sc)) != 0))
2919		return (rc);	/* error message displayed already */
2920
2921	if (!(pi->flags & PORT_INIT_DONE) &&
2922	    ((rc = port_full_init(pi)) != 0))
2923		return (rc); /* error message displayed already */
2924
2925	rc = update_mac_settings(pi, XGMAC_ALL);
2926	if (rc)
2927		goto done;	/* error message displayed already */
2928
2929	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2930	if (rc != 0) {
2931		if_printf(ifp, "start_link failed: %d\n", rc);
2932		goto done;
2933	}
2934
2935	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2936	if (rc != 0) {
2937		if_printf(ifp, "enable_vi failed: %d\n", rc);
2938		goto done;
2939	}
2940
2941	/*
2942	 * The first iq of the first port to come up is used for tracing.
2943	 */
2944	if (sc->traceq < 0) {
2945		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2946		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2947		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2948		    V_QUEUENUMBER(sc->traceq));
2949		pi->flags |= HAS_TRACEQ;
2950	}
2951
2952	/* all ok */
2953	setbit(&sc->open_device_map, pi->port_id);
2954	PORT_LOCK(pi);
2955	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2956	PORT_UNLOCK(pi);
2957
2958	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2959done:
2960	if (rc != 0)
2961		cxgbe_uninit_synchronized(pi);
2962
2963	return (rc);
2964}
2965
2966/*
2967 * Idempotent.
2968 */
2969static int
2970cxgbe_uninit_synchronized(struct port_info *pi)
2971{
2972	struct adapter *sc = pi->adapter;
2973	struct ifnet *ifp = pi->ifp;
2974	int rc;
2975
2976	ASSERT_SYNCHRONIZED_OP(sc);
2977
2978	/*
2979	 * Disable the VI so that all its data in either direction is discarded
2980	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2981	 * tick) intact as the TP can deliver negative advice or data that it's
2982	 * holding in its RAM (for an offloaded connection) even after the VI is
2983	 * disabled.
2984	 */
2985	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2986	if (rc) {
2987		if_printf(ifp, "disable_vi failed: %d\n", rc);
2988		return (rc);
2989	}
2990
2991	clrbit(&sc->open_device_map, pi->port_id);
2992	PORT_LOCK(pi);
2993	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2994	PORT_UNLOCK(pi);
2995
2996	pi->link_cfg.link_ok = 0;
2997	pi->link_cfg.speed = 0;
2998	pi->linkdnrc = -1;
2999	t4_os_link_changed(sc, pi->port_id, 0, -1);
3000
3001	return (0);
3002}
3003
3004/*
3005 * It is ok for this function to fail midway and return right away.  t4_detach
3006 * will walk the entire sc->irq list and clean up whatever is valid.
3007 */
3008static int
3009setup_intr_handlers(struct adapter *sc)
3010{
3011	int rc, rid, p, q;
3012	char s[8];
3013	struct irq *irq;
3014	struct port_info *pi;
3015	struct sge_rxq *rxq;
3016#ifdef TCP_OFFLOAD
3017	struct sge_ofld_rxq *ofld_rxq;
3018#endif
3019
3020	/*
3021	 * Setup interrupts.
3022	 */
3023	irq = &sc->irq[0];
3024	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3025	if (sc->intr_count == 1) {
3026		KASSERT(!(sc->flags & INTR_DIRECT),
3027		    ("%s: single interrupt && INTR_DIRECT?", __func__));
3028
3029		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3030		if (rc != 0)
3031			return (rc);
3032	} else {
3033		/* Multiple interrupts. */
3034		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3035		    ("%s: too few intr.", __func__));
3036
3037		/* The first one is always error intr */
3038		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3039		if (rc != 0)
3040			return (rc);
3041		irq++;
3042		rid++;
3043
3044		/* The second one is always the firmware event queue */
3045		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3046		    "evt");
3047		if (rc != 0)
3048			return (rc);
3049		irq++;
3050		rid++;
3051
3052		/*
3053		 * Note that if INTR_DIRECT is not set then either the NIC rx
3054		 * queues or (exclusive or) the TOE rx queueus will be taking
3055		 * direct interrupts.
3056		 *
3057		 * There is no need to check for is_offload(sc) as nofldrxq
3058		 * will be 0 if offload is disabled.
3059		 */
3060		for_each_port(sc, p) {
3061			pi = sc->port[p];
3062
3063#ifdef TCP_OFFLOAD
3064			/*
3065			 * Skip over the NIC queues if they aren't taking direct
3066			 * interrupts.
3067			 */
3068			if (!(sc->flags & INTR_DIRECT) &&
3069			    pi->nofldrxq > pi->nrxq)
3070				goto ofld_queues;
3071#endif
3072			rxq = &sc->sge.rxq[pi->first_rxq];
3073			for (q = 0; q < pi->nrxq; q++, rxq++) {
3074				snprintf(s, sizeof(s), "%d.%d", p, q);
3075				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3076				    s);
3077				if (rc != 0)
3078					return (rc);
3079				irq++;
3080				rid++;
3081			}
3082
3083#ifdef TCP_OFFLOAD
3084			/*
3085			 * Skip over the offload queues if they aren't taking
3086			 * direct interrupts.
3087			 */
3088			if (!(sc->flags & INTR_DIRECT))
3089				continue;
3090ofld_queues:
3091			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3092			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3093				snprintf(s, sizeof(s), "%d,%d", p, q);
3094				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3095				    ofld_rxq, s);
3096				if (rc != 0)
3097					return (rc);
3098				irq++;
3099				rid++;
3100			}
3101#endif
3102		}
3103	}
3104
3105	return (0);
3106}
3107
3108static int
3109adapter_full_init(struct adapter *sc)
3110{
3111	int rc, i;
3112
3113	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3114	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3115	    ("%s: FULL_INIT_DONE already", __func__));
3116
3117	/*
3118	 * queues that belong to the adapter (not any particular port).
3119	 */
3120	rc = t4_setup_adapter_queues(sc);
3121	if (rc != 0)
3122		goto done;
3123
3124	for (i = 0; i < nitems(sc->tq); i++) {
3125		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3126		    taskqueue_thread_enqueue, &sc->tq[i]);
3127		if (sc->tq[i] == NULL) {
3128			device_printf(sc->dev,
3129			    "failed to allocate task queue %d\n", i);
3130			rc = ENOMEM;
3131			goto done;
3132		}
3133		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3134		    device_get_nameunit(sc->dev), i);
3135	}
3136
3137	t4_intr_enable(sc);
3138	sc->flags |= FULL_INIT_DONE;
3139done:
3140	if (rc != 0)
3141		adapter_full_uninit(sc);
3142
3143	return (rc);
3144}
3145
3146static int
3147adapter_full_uninit(struct adapter *sc)
3148{
3149	int i;
3150
3151	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3152
3153	t4_teardown_adapter_queues(sc);
3154
3155	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3156		taskqueue_free(sc->tq[i]);
3157		sc->tq[i] = NULL;
3158	}
3159
3160	sc->flags &= ~FULL_INIT_DONE;
3161
3162	return (0);
3163}
3164
3165static int
3166port_full_init(struct port_info *pi)
3167{
3168	struct adapter *sc = pi->adapter;
3169	struct ifnet *ifp = pi->ifp;
3170	uint16_t *rss;
3171	struct sge_rxq *rxq;
3172	int rc, i, j;
3173
3174	ASSERT_SYNCHRONIZED_OP(sc);
3175	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3176	    ("%s: PORT_INIT_DONE already", __func__));
3177
3178	sysctl_ctx_init(&pi->ctx);
3179	pi->flags |= PORT_SYSCTL_CTX;
3180
3181	/*
3182	 * Allocate tx/rx/fl queues for this port.
3183	 */
3184	rc = t4_setup_port_queues(pi);
3185	if (rc != 0)
3186		goto done;	/* error message displayed already */
3187
3188	/*
3189	 * Setup RSS for this port.  Save a copy of the RSS table for later use.
3190	 */
3191	rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3192	for (i = 0; i < pi->rss_size;) {
3193		for_each_rxq(pi, j, rxq) {
3194			rss[i++] = rxq->iq.abs_id;
3195			if (i == pi->rss_size)
3196				break;
3197		}
3198	}
3199
3200	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3201	    pi->rss_size);
3202	if (rc != 0) {
3203		if_printf(ifp, "rss_config failed: %d\n", rc);
3204		goto done;
3205	}
3206
3207	pi->rss = rss;
3208	pi->flags |= PORT_INIT_DONE;
3209done:
3210	if (rc != 0)
3211		port_full_uninit(pi);
3212
3213	return (rc);
3214}
3215
3216/*
3217 * Idempotent.
3218 */
3219static int
3220port_full_uninit(struct port_info *pi)
3221{
3222	struct adapter *sc = pi->adapter;
3223	int i;
3224	struct sge_rxq *rxq;
3225	struct sge_txq *txq;
3226#ifdef TCP_OFFLOAD
3227	struct sge_ofld_rxq *ofld_rxq;
3228	struct sge_wrq *ofld_txq;
3229#endif
3230
3231	if (pi->flags & PORT_INIT_DONE) {
3232
3233		/* Need to quiesce queues.  XXX: ctrl queues? */
3234
3235		for_each_txq(pi, i, txq) {
3236			quiesce_eq(sc, &txq->eq);
3237		}
3238
3239#ifdef TCP_OFFLOAD
3240		for_each_ofld_txq(pi, i, ofld_txq) {
3241			quiesce_eq(sc, &ofld_txq->eq);
3242		}
3243#endif
3244
3245		for_each_rxq(pi, i, rxq) {
3246			quiesce_iq(sc, &rxq->iq);
3247			quiesce_fl(sc, &rxq->fl);
3248		}
3249
3250#ifdef TCP_OFFLOAD
3251		for_each_ofld_rxq(pi, i, ofld_rxq) {
3252			quiesce_iq(sc, &ofld_rxq->iq);
3253			quiesce_fl(sc, &ofld_rxq->fl);
3254		}
3255#endif
3256		free(pi->rss, M_CXGBE);
3257	}
3258
3259	t4_teardown_port_queues(pi);
3260	pi->flags &= ~PORT_INIT_DONE;
3261
3262	return (0);
3263}
3264
3265static void
3266quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3267{
3268	EQ_LOCK(eq);
3269	eq->flags |= EQ_DOOMED;
3270
3271	/*
3272	 * Wait for the response to a credit flush if one's
3273	 * pending.
3274	 */
3275	while (eq->flags & EQ_CRFLUSHED)
3276		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3277	EQ_UNLOCK(eq);
3278
3279	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3280	pause("callout", 10);		/* Still iffy */
3281
3282	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3283}
3284
3285static void
3286quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3287{
3288	(void) sc;	/* unused */
3289
3290	/* Synchronize with the interrupt handler */
3291	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3292		pause("iqfree", 1);
3293}
3294
3295static void
3296quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3297{
3298	mtx_lock(&sc->sfl_lock);
3299	FL_LOCK(fl);
3300	fl->flags |= FL_DOOMED;
3301	FL_UNLOCK(fl);
3302	mtx_unlock(&sc->sfl_lock);
3303
3304	callout_drain(&sc->sfl_callout);
3305	KASSERT((fl->flags & FL_STARVING) == 0,
3306	    ("%s: still starving", __func__));
3307}
3308
3309static int
3310t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3311    driver_intr_t *handler, void *arg, char *name)
3312{
3313	int rc;
3314
3315	irq->rid = rid;
3316	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3317	    RF_SHAREABLE | RF_ACTIVE);
3318	if (irq->res == NULL) {
3319		device_printf(sc->dev,
3320		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3321		return (ENOMEM);
3322	}
3323
3324	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3325	    NULL, handler, arg, &irq->tag);
3326	if (rc != 0) {
3327		device_printf(sc->dev,
3328		    "failed to setup interrupt for rid %d, name %s: %d\n",
3329		    rid, name, rc);
3330	} else if (name)
3331		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3332
3333	return (rc);
3334}
3335
3336static int
3337t4_free_irq(struct adapter *sc, struct irq *irq)
3338{
3339	if (irq->tag)
3340		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3341	if (irq->res)
3342		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3343
3344	bzero(irq, sizeof(*irq));
3345
3346	return (0);
3347}
3348
3349static void
3350reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3351    unsigned int end)
3352{
3353	uint32_t *p = (uint32_t *)(buf + start);
3354
3355	for ( ; start <= end; start += sizeof(uint32_t))
3356		*p++ = t4_read_reg(sc, start);
3357}
3358
3359static void
3360t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3361{
3362	int i, n;
3363	const unsigned int *reg_ranges;
3364	static const unsigned int t4_reg_ranges[] = {
3365		0x1008, 0x1108,
3366		0x1180, 0x11b4,
3367		0x11fc, 0x123c,
3368		0x1300, 0x173c,
3369		0x1800, 0x18fc,
3370		0x3000, 0x30d8,
3371		0x30e0, 0x5924,
3372		0x5960, 0x59d4,
3373		0x5a00, 0x5af8,
3374		0x6000, 0x6098,
3375		0x6100, 0x6150,
3376		0x6200, 0x6208,
3377		0x6240, 0x6248,
3378		0x6280, 0x6338,
3379		0x6370, 0x638c,
3380		0x6400, 0x643c,
3381		0x6500, 0x6524,
3382		0x6a00, 0x6a38,
3383		0x6a60, 0x6a78,
3384		0x6b00, 0x6b84,
3385		0x6bf0, 0x6c84,
3386		0x6cf0, 0x6d84,
3387		0x6df0, 0x6e84,
3388		0x6ef0, 0x6f84,
3389		0x6ff0, 0x7084,
3390		0x70f0, 0x7184,
3391		0x71f0, 0x7284,
3392		0x72f0, 0x7384,
3393		0x73f0, 0x7450,
3394		0x7500, 0x7530,
3395		0x7600, 0x761c,
3396		0x7680, 0x76cc,
3397		0x7700, 0x7798,
3398		0x77c0, 0x77fc,
3399		0x7900, 0x79fc,
3400		0x7b00, 0x7c38,
3401		0x7d00, 0x7efc,
3402		0x8dc0, 0x8e1c,
3403		0x8e30, 0x8e78,
3404		0x8ea0, 0x8f6c,
3405		0x8fc0, 0x9074,
3406		0x90fc, 0x90fc,
3407		0x9400, 0x9458,
3408		0x9600, 0x96bc,
3409		0x9800, 0x9808,
3410		0x9820, 0x983c,
3411		0x9850, 0x9864,
3412		0x9c00, 0x9c6c,
3413		0x9c80, 0x9cec,
3414		0x9d00, 0x9d6c,
3415		0x9d80, 0x9dec,
3416		0x9e00, 0x9e6c,
3417		0x9e80, 0x9eec,
3418		0x9f00, 0x9f6c,
3419		0x9f80, 0x9fec,
3420		0xd004, 0xd03c,
3421		0xdfc0, 0xdfe0,
3422		0xe000, 0xea7c,
3423		0xf000, 0x11110,
3424		0x11118, 0x11190,
3425		0x19040, 0x1906c,
3426		0x19078, 0x19080,
3427		0x1908c, 0x19124,
3428		0x19150, 0x191b0,
3429		0x191d0, 0x191e8,
3430		0x19238, 0x1924c,
3431		0x193f8, 0x19474,
3432		0x19490, 0x194f8,
3433		0x19800, 0x19f30,
3434		0x1a000, 0x1a06c,
3435		0x1a0b0, 0x1a120,
3436		0x1a128, 0x1a138,
3437		0x1a190, 0x1a1c4,
3438		0x1a1fc, 0x1a1fc,
3439		0x1e040, 0x1e04c,
3440		0x1e284, 0x1e28c,
3441		0x1e2c0, 0x1e2c0,
3442		0x1e2e0, 0x1e2e0,
3443		0x1e300, 0x1e384,
3444		0x1e3c0, 0x1e3c8,
3445		0x1e440, 0x1e44c,
3446		0x1e684, 0x1e68c,
3447		0x1e6c0, 0x1e6c0,
3448		0x1e6e0, 0x1e6e0,
3449		0x1e700, 0x1e784,
3450		0x1e7c0, 0x1e7c8,
3451		0x1e840, 0x1e84c,
3452		0x1ea84, 0x1ea8c,
3453		0x1eac0, 0x1eac0,
3454		0x1eae0, 0x1eae0,
3455		0x1eb00, 0x1eb84,
3456		0x1ebc0, 0x1ebc8,
3457		0x1ec40, 0x1ec4c,
3458		0x1ee84, 0x1ee8c,
3459		0x1eec0, 0x1eec0,
3460		0x1eee0, 0x1eee0,
3461		0x1ef00, 0x1ef84,
3462		0x1efc0, 0x1efc8,
3463		0x1f040, 0x1f04c,
3464		0x1f284, 0x1f28c,
3465		0x1f2c0, 0x1f2c0,
3466		0x1f2e0, 0x1f2e0,
3467		0x1f300, 0x1f384,
3468		0x1f3c0, 0x1f3c8,
3469		0x1f440, 0x1f44c,
3470		0x1f684, 0x1f68c,
3471		0x1f6c0, 0x1f6c0,
3472		0x1f6e0, 0x1f6e0,
3473		0x1f700, 0x1f784,
3474		0x1f7c0, 0x1f7c8,
3475		0x1f840, 0x1f84c,
3476		0x1fa84, 0x1fa8c,
3477		0x1fac0, 0x1fac0,
3478		0x1fae0, 0x1fae0,
3479		0x1fb00, 0x1fb84,
3480		0x1fbc0, 0x1fbc8,
3481		0x1fc40, 0x1fc4c,
3482		0x1fe84, 0x1fe8c,
3483		0x1fec0, 0x1fec0,
3484		0x1fee0, 0x1fee0,
3485		0x1ff00, 0x1ff84,
3486		0x1ffc0, 0x1ffc8,
3487		0x20000, 0x2002c,
3488		0x20100, 0x2013c,
3489		0x20190, 0x201c8,
3490		0x20200, 0x20318,
3491		0x20400, 0x20528,
3492		0x20540, 0x20614,
3493		0x21000, 0x21040,
3494		0x2104c, 0x21060,
3495		0x210c0, 0x210ec,
3496		0x21200, 0x21268,
3497		0x21270, 0x21284,
3498		0x212fc, 0x21388,
3499		0x21400, 0x21404,
3500		0x21500, 0x21518,
3501		0x2152c, 0x2153c,
3502		0x21550, 0x21554,
3503		0x21600, 0x21600,
3504		0x21608, 0x21628,
3505		0x21630, 0x2163c,
3506		0x21700, 0x2171c,
3507		0x21780, 0x2178c,
3508		0x21800, 0x21c38,
3509		0x21c80, 0x21d7c,
3510		0x21e00, 0x21e04,
3511		0x22000, 0x2202c,
3512		0x22100, 0x2213c,
3513		0x22190, 0x221c8,
3514		0x22200, 0x22318,
3515		0x22400, 0x22528,
3516		0x22540, 0x22614,
3517		0x23000, 0x23040,
3518		0x2304c, 0x23060,
3519		0x230c0, 0x230ec,
3520		0x23200, 0x23268,
3521		0x23270, 0x23284,
3522		0x232fc, 0x23388,
3523		0x23400, 0x23404,
3524		0x23500, 0x23518,
3525		0x2352c, 0x2353c,
3526		0x23550, 0x23554,
3527		0x23600, 0x23600,
3528		0x23608, 0x23628,
3529		0x23630, 0x2363c,
3530		0x23700, 0x2371c,
3531		0x23780, 0x2378c,
3532		0x23800, 0x23c38,
3533		0x23c80, 0x23d7c,
3534		0x23e00, 0x23e04,
3535		0x24000, 0x2402c,
3536		0x24100, 0x2413c,
3537		0x24190, 0x241c8,
3538		0x24200, 0x24318,
3539		0x24400, 0x24528,
3540		0x24540, 0x24614,
3541		0x25000, 0x25040,
3542		0x2504c, 0x25060,
3543		0x250c0, 0x250ec,
3544		0x25200, 0x25268,
3545		0x25270, 0x25284,
3546		0x252fc, 0x25388,
3547		0x25400, 0x25404,
3548		0x25500, 0x25518,
3549		0x2552c, 0x2553c,
3550		0x25550, 0x25554,
3551		0x25600, 0x25600,
3552		0x25608, 0x25628,
3553		0x25630, 0x2563c,
3554		0x25700, 0x2571c,
3555		0x25780, 0x2578c,
3556		0x25800, 0x25c38,
3557		0x25c80, 0x25d7c,
3558		0x25e00, 0x25e04,
3559		0x26000, 0x2602c,
3560		0x26100, 0x2613c,
3561		0x26190, 0x261c8,
3562		0x26200, 0x26318,
3563		0x26400, 0x26528,
3564		0x26540, 0x26614,
3565		0x27000, 0x27040,
3566		0x2704c, 0x27060,
3567		0x270c0, 0x270ec,
3568		0x27200, 0x27268,
3569		0x27270, 0x27284,
3570		0x272fc, 0x27388,
3571		0x27400, 0x27404,
3572		0x27500, 0x27518,
3573		0x2752c, 0x2753c,
3574		0x27550, 0x27554,
3575		0x27600, 0x27600,
3576		0x27608, 0x27628,
3577		0x27630, 0x2763c,
3578		0x27700, 0x2771c,
3579		0x27780, 0x2778c,
3580		0x27800, 0x27c38,
3581		0x27c80, 0x27d7c,
3582		0x27e00, 0x27e04
3583	};
3584	static const unsigned int t5_reg_ranges[] = {
3585		0x1008, 0x1148,
3586		0x1180, 0x11b4,
3587		0x11fc, 0x123c,
3588		0x1280, 0x173c,
3589		0x1800, 0x18fc,
3590		0x3000, 0x3028,
3591		0x3060, 0x30d8,
3592		0x30e0, 0x30fc,
3593		0x3140, 0x357c,
3594		0x35a8, 0x35cc,
3595		0x35ec, 0x35ec,
3596		0x3600, 0x5624,
3597		0x56cc, 0x575c,
3598		0x580c, 0x5814,
3599		0x5890, 0x58bc,
3600		0x5940, 0x59dc,
3601		0x59fc, 0x5a18,
3602		0x5a60, 0x5a9c,
3603		0x5b94, 0x5bfc,
3604		0x6000, 0x6040,
3605		0x6058, 0x614c,
3606		0x7700, 0x7798,
3607		0x77c0, 0x78fc,
3608		0x7b00, 0x7c54,
3609		0x7d00, 0x7efc,
3610		0x8dc0, 0x8de0,
3611		0x8df8, 0x8e84,
3612		0x8ea0, 0x8f84,
3613		0x8fc0, 0x90f8,
3614		0x9400, 0x9470,
3615		0x9600, 0x96f4,
3616		0x9800, 0x9808,
3617		0x9820, 0x983c,
3618		0x9850, 0x9864,
3619		0x9c00, 0x9c6c,
3620		0x9c80, 0x9cec,
3621		0x9d00, 0x9d6c,
3622		0x9d80, 0x9dec,
3623		0x9e00, 0x9e6c,
3624		0x9e80, 0x9eec,
3625		0x9f00, 0x9f6c,
3626		0x9f80, 0xa020,
3627		0xd004, 0xd03c,
3628		0xdfc0, 0xdfe0,
3629		0xe000, 0x11088,
3630		0x1109c, 0x11110,
3631		0x11118, 0x1117c,
3632		0x11190, 0x11204,
3633		0x19040, 0x1906c,
3634		0x19078, 0x19080,
3635		0x1908c, 0x19124,
3636		0x19150, 0x191b0,
3637		0x191d0, 0x191e8,
3638		0x19238, 0x19290,
3639		0x193f8, 0x19474,
3640		0x19490, 0x194cc,
3641		0x194f0, 0x194f8,
3642		0x19c00, 0x19c60,
3643		0x19c94, 0x19e10,
3644		0x19e50, 0x19f34,
3645		0x19f40, 0x19f50,
3646		0x19f90, 0x19fe4,
3647		0x1a000, 0x1a06c,
3648		0x1a0b0, 0x1a120,
3649		0x1a128, 0x1a138,
3650		0x1a190, 0x1a1c4,
3651		0x1a1fc, 0x1a1fc,
3652		0x1e008, 0x1e00c,
3653		0x1e040, 0x1e04c,
3654		0x1e284, 0x1e290,
3655		0x1e2c0, 0x1e2c0,
3656		0x1e2e0, 0x1e2e0,
3657		0x1e300, 0x1e384,
3658		0x1e3c0, 0x1e3c8,
3659		0x1e408, 0x1e40c,
3660		0x1e440, 0x1e44c,
3661		0x1e684, 0x1e690,
3662		0x1e6c0, 0x1e6c0,
3663		0x1e6e0, 0x1e6e0,
3664		0x1e700, 0x1e784,
3665		0x1e7c0, 0x1e7c8,
3666		0x1e808, 0x1e80c,
3667		0x1e840, 0x1e84c,
3668		0x1ea84, 0x1ea90,
3669		0x1eac0, 0x1eac0,
3670		0x1eae0, 0x1eae0,
3671		0x1eb00, 0x1eb84,
3672		0x1ebc0, 0x1ebc8,
3673		0x1ec08, 0x1ec0c,
3674		0x1ec40, 0x1ec4c,
3675		0x1ee84, 0x1ee90,
3676		0x1eec0, 0x1eec0,
3677		0x1eee0, 0x1eee0,
3678		0x1ef00, 0x1ef84,
3679		0x1efc0, 0x1efc8,
3680		0x1f008, 0x1f00c,
3681		0x1f040, 0x1f04c,
3682		0x1f284, 0x1f290,
3683		0x1f2c0, 0x1f2c0,
3684		0x1f2e0, 0x1f2e0,
3685		0x1f300, 0x1f384,
3686		0x1f3c0, 0x1f3c8,
3687		0x1f408, 0x1f40c,
3688		0x1f440, 0x1f44c,
3689		0x1f684, 0x1f690,
3690		0x1f6c0, 0x1f6c0,
3691		0x1f6e0, 0x1f6e0,
3692		0x1f700, 0x1f784,
3693		0x1f7c0, 0x1f7c8,
3694		0x1f808, 0x1f80c,
3695		0x1f840, 0x1f84c,
3696		0x1fa84, 0x1fa90,
3697		0x1fac0, 0x1fac0,
3698		0x1fae0, 0x1fae0,
3699		0x1fb00, 0x1fb84,
3700		0x1fbc0, 0x1fbc8,
3701		0x1fc08, 0x1fc0c,
3702		0x1fc40, 0x1fc4c,
3703		0x1fe84, 0x1fe90,
3704		0x1fec0, 0x1fec0,
3705		0x1fee0, 0x1fee0,
3706		0x1ff00, 0x1ff84,
3707		0x1ffc0, 0x1ffc8,
3708		0x30000, 0x30030,
3709		0x30100, 0x30144,
3710		0x30190, 0x301d0,
3711		0x30200, 0x30318,
3712		0x30400, 0x3052c,
3713		0x30540, 0x3061c,
3714		0x30800, 0x30834,
3715		0x308c0, 0x30908,
3716		0x30910, 0x309ac,
3717		0x30a00, 0x30a2c,
3718		0x30a44, 0x30a50,
3719		0x30a74, 0x30c24,
3720		0x30d00, 0x30d00,
3721		0x30d08, 0x30d14,
3722		0x30d1c, 0x30d20,
3723		0x30d3c, 0x30d50,
3724		0x31200, 0x3120c,
3725		0x31220, 0x31220,
3726		0x31240, 0x31240,
3727		0x31600, 0x3160c,
3728		0x31a00, 0x31a1c,
3729		0x31e00, 0x31e20,
3730		0x31e38, 0x31e3c,
3731		0x31e80, 0x31e80,
3732		0x31e88, 0x31ea8,
3733		0x31eb0, 0x31eb4,
3734		0x31ec8, 0x31ed4,
3735		0x31fb8, 0x32004,
3736		0x32200, 0x32200,
3737		0x32208, 0x32240,
3738		0x32248, 0x32280,
3739		0x32288, 0x322c0,
3740		0x322c8, 0x322fc,
3741		0x32600, 0x32630,
3742		0x32a00, 0x32abc,
3743		0x32b00, 0x32b70,
3744		0x33000, 0x33048,
3745		0x33060, 0x3309c,
3746		0x330f0, 0x33148,
3747		0x33160, 0x3319c,
3748		0x331f0, 0x332e4,
3749		0x332f8, 0x333e4,
3750		0x333f8, 0x33448,
3751		0x33460, 0x3349c,
3752		0x334f0, 0x33548,
3753		0x33560, 0x3359c,
3754		0x335f0, 0x336e4,
3755		0x336f8, 0x337e4,
3756		0x337f8, 0x337fc,
3757		0x33814, 0x33814,
3758		0x3382c, 0x3382c,
3759		0x33880, 0x3388c,
3760		0x338e8, 0x338ec,
3761		0x33900, 0x33948,
3762		0x33960, 0x3399c,
3763		0x339f0, 0x33ae4,
3764		0x33af8, 0x33b10,
3765		0x33b28, 0x33b28,
3766		0x33b3c, 0x33b50,
3767		0x33bf0, 0x33c10,
3768		0x33c28, 0x33c28,
3769		0x33c3c, 0x33c50,
3770		0x33cf0, 0x33cfc,
3771		0x34000, 0x34030,
3772		0x34100, 0x34144,
3773		0x34190, 0x341d0,
3774		0x34200, 0x34318,
3775		0x34400, 0x3452c,
3776		0x34540, 0x3461c,
3777		0x34800, 0x34834,
3778		0x348c0, 0x34908,
3779		0x34910, 0x349ac,
3780		0x34a00, 0x34a2c,
3781		0x34a44, 0x34a50,
3782		0x34a74, 0x34c24,
3783		0x34d00, 0x34d00,
3784		0x34d08, 0x34d14,
3785		0x34d1c, 0x34d20,
3786		0x34d3c, 0x34d50,
3787		0x35200, 0x3520c,
3788		0x35220, 0x35220,
3789		0x35240, 0x35240,
3790		0x35600, 0x3560c,
3791		0x35a00, 0x35a1c,
3792		0x35e00, 0x35e20,
3793		0x35e38, 0x35e3c,
3794		0x35e80, 0x35e80,
3795		0x35e88, 0x35ea8,
3796		0x35eb0, 0x35eb4,
3797		0x35ec8, 0x35ed4,
3798		0x35fb8, 0x36004,
3799		0x36200, 0x36200,
3800		0x36208, 0x36240,
3801		0x36248, 0x36280,
3802		0x36288, 0x362c0,
3803		0x362c8, 0x362fc,
3804		0x36600, 0x36630,
3805		0x36a00, 0x36abc,
3806		0x36b00, 0x36b70,
3807		0x37000, 0x37048,
3808		0x37060, 0x3709c,
3809		0x370f0, 0x37148,
3810		0x37160, 0x3719c,
3811		0x371f0, 0x372e4,
3812		0x372f8, 0x373e4,
3813		0x373f8, 0x37448,
3814		0x37460, 0x3749c,
3815		0x374f0, 0x37548,
3816		0x37560, 0x3759c,
3817		0x375f0, 0x376e4,
3818		0x376f8, 0x377e4,
3819		0x377f8, 0x377fc,
3820		0x37814, 0x37814,
3821		0x3782c, 0x3782c,
3822		0x37880, 0x3788c,
3823		0x378e8, 0x378ec,
3824		0x37900, 0x37948,
3825		0x37960, 0x3799c,
3826		0x379f0, 0x37ae4,
3827		0x37af8, 0x37b10,
3828		0x37b28, 0x37b28,
3829		0x37b3c, 0x37b50,
3830		0x37bf0, 0x37c10,
3831		0x37c28, 0x37c28,
3832		0x37c3c, 0x37c50,
3833		0x37cf0, 0x37cfc,
3834		0x38000, 0x38030,
3835		0x38100, 0x38144,
3836		0x38190, 0x381d0,
3837		0x38200, 0x38318,
3838		0x38400, 0x3852c,
3839		0x38540, 0x3861c,
3840		0x38800, 0x38834,
3841		0x388c0, 0x38908,
3842		0x38910, 0x389ac,
3843		0x38a00, 0x38a2c,
3844		0x38a44, 0x38a50,
3845		0x38a74, 0x38c24,
3846		0x38d00, 0x38d00,
3847		0x38d08, 0x38d14,
3848		0x38d1c, 0x38d20,
3849		0x38d3c, 0x38d50,
3850		0x39200, 0x3920c,
3851		0x39220, 0x39220,
3852		0x39240, 0x39240,
3853		0x39600, 0x3960c,
3854		0x39a00, 0x39a1c,
3855		0x39e00, 0x39e20,
3856		0x39e38, 0x39e3c,
3857		0x39e80, 0x39e80,
3858		0x39e88, 0x39ea8,
3859		0x39eb0, 0x39eb4,
3860		0x39ec8, 0x39ed4,
3861		0x39fb8, 0x3a004,
3862		0x3a200, 0x3a200,
3863		0x3a208, 0x3a240,
3864		0x3a248, 0x3a280,
3865		0x3a288, 0x3a2c0,
3866		0x3a2c8, 0x3a2fc,
3867		0x3a600, 0x3a630,
3868		0x3aa00, 0x3aabc,
3869		0x3ab00, 0x3ab70,
3870		0x3b000, 0x3b048,
3871		0x3b060, 0x3b09c,
3872		0x3b0f0, 0x3b148,
3873		0x3b160, 0x3b19c,
3874		0x3b1f0, 0x3b2e4,
3875		0x3b2f8, 0x3b3e4,
3876		0x3b3f8, 0x3b448,
3877		0x3b460, 0x3b49c,
3878		0x3b4f0, 0x3b548,
3879		0x3b560, 0x3b59c,
3880		0x3b5f0, 0x3b6e4,
3881		0x3b6f8, 0x3b7e4,
3882		0x3b7f8, 0x3b7fc,
3883		0x3b814, 0x3b814,
3884		0x3b82c, 0x3b82c,
3885		0x3b880, 0x3b88c,
3886		0x3b8e8, 0x3b8ec,
3887		0x3b900, 0x3b948,
3888		0x3b960, 0x3b99c,
3889		0x3b9f0, 0x3bae4,
3890		0x3baf8, 0x3bb10,
3891		0x3bb28, 0x3bb28,
3892		0x3bb3c, 0x3bb50,
3893		0x3bbf0, 0x3bc10,
3894		0x3bc28, 0x3bc28,
3895		0x3bc3c, 0x3bc50,
3896		0x3bcf0, 0x3bcfc,
3897		0x3c000, 0x3c030,
3898		0x3c100, 0x3c144,
3899		0x3c190, 0x3c1d0,
3900		0x3c200, 0x3c318,
3901		0x3c400, 0x3c52c,
3902		0x3c540, 0x3c61c,
3903		0x3c800, 0x3c834,
3904		0x3c8c0, 0x3c908,
3905		0x3c910, 0x3c9ac,
3906		0x3ca00, 0x3ca2c,
3907		0x3ca44, 0x3ca50,
3908		0x3ca74, 0x3cc24,
3909		0x3cd00, 0x3cd00,
3910		0x3cd08, 0x3cd14,
3911		0x3cd1c, 0x3cd20,
3912		0x3cd3c, 0x3cd50,
3913		0x3d200, 0x3d20c,
3914		0x3d220, 0x3d220,
3915		0x3d240, 0x3d240,
3916		0x3d600, 0x3d60c,
3917		0x3da00, 0x3da1c,
3918		0x3de00, 0x3de20,
3919		0x3de38, 0x3de3c,
3920		0x3de80, 0x3de80,
3921		0x3de88, 0x3dea8,
3922		0x3deb0, 0x3deb4,
3923		0x3dec8, 0x3ded4,
3924		0x3dfb8, 0x3e004,
3925		0x3e200, 0x3e200,
3926		0x3e208, 0x3e240,
3927		0x3e248, 0x3e280,
3928		0x3e288, 0x3e2c0,
3929		0x3e2c8, 0x3e2fc,
3930		0x3e600, 0x3e630,
3931		0x3ea00, 0x3eabc,
3932		0x3eb00, 0x3eb70,
3933		0x3f000, 0x3f048,
3934		0x3f060, 0x3f09c,
3935		0x3f0f0, 0x3f148,
3936		0x3f160, 0x3f19c,
3937		0x3f1f0, 0x3f2e4,
3938		0x3f2f8, 0x3f3e4,
3939		0x3f3f8, 0x3f448,
3940		0x3f460, 0x3f49c,
3941		0x3f4f0, 0x3f548,
3942		0x3f560, 0x3f59c,
3943		0x3f5f0, 0x3f6e4,
3944		0x3f6f8, 0x3f7e4,
3945		0x3f7f8, 0x3f7fc,
3946		0x3f814, 0x3f814,
3947		0x3f82c, 0x3f82c,
3948		0x3f880, 0x3f88c,
3949		0x3f8e8, 0x3f8ec,
3950		0x3f900, 0x3f948,
3951		0x3f960, 0x3f99c,
3952		0x3f9f0, 0x3fae4,
3953		0x3faf8, 0x3fb10,
3954		0x3fb28, 0x3fb28,
3955		0x3fb3c, 0x3fb50,
3956		0x3fbf0, 0x3fc10,
3957		0x3fc28, 0x3fc28,
3958		0x3fc3c, 0x3fc50,
3959		0x3fcf0, 0x3fcfc,
3960		0x40000, 0x4000c,
3961		0x40040, 0x40068,
3962		0x4007c, 0x40144,
3963		0x40180, 0x4018c,
3964		0x40200, 0x40298,
3965		0x402ac, 0x4033c,
3966		0x403f8, 0x403fc,
3967		0x41304, 0x413c4,
3968		0x41400, 0x4141c,
3969		0x41480, 0x414d0,
3970		0x44000, 0x44078,
3971		0x440c0, 0x44278,
3972		0x442c0, 0x44478,
3973		0x444c0, 0x44678,
3974		0x446c0, 0x44878,
3975		0x448c0, 0x449fc,
3976		0x45000, 0x45068,
3977		0x45080, 0x45084,
3978		0x450a0, 0x450b0,
3979		0x45200, 0x45268,
3980		0x45280, 0x45284,
3981		0x452a0, 0x452b0,
3982		0x460c0, 0x460e4,
3983		0x47000, 0x4708c,
3984		0x47200, 0x47250,
3985		0x47400, 0x47420,
3986		0x47600, 0x47618,
3987		0x47800, 0x47814,
3988		0x48000, 0x4800c,
3989		0x48040, 0x48068,
3990		0x4807c, 0x48144,
3991		0x48180, 0x4818c,
3992		0x48200, 0x48298,
3993		0x482ac, 0x4833c,
3994		0x483f8, 0x483fc,
3995		0x49304, 0x493c4,
3996		0x49400, 0x4941c,
3997		0x49480, 0x494d0,
3998		0x4c000, 0x4c078,
3999		0x4c0c0, 0x4c278,
4000		0x4c2c0, 0x4c478,
4001		0x4c4c0, 0x4c678,
4002		0x4c6c0, 0x4c878,
4003		0x4c8c0, 0x4c9fc,
4004		0x4d000, 0x4d068,
4005		0x4d080, 0x4d084,
4006		0x4d0a0, 0x4d0b0,
4007		0x4d200, 0x4d268,
4008		0x4d280, 0x4d284,
4009		0x4d2a0, 0x4d2b0,
4010		0x4e0c0, 0x4e0e4,
4011		0x4f000, 0x4f08c,
4012		0x4f200, 0x4f250,
4013		0x4f400, 0x4f420,
4014		0x4f600, 0x4f618,
4015		0x4f800, 0x4f814,
4016		0x50000, 0x500cc,
4017		0x50400, 0x50400,
4018		0x50800, 0x508cc,
4019		0x50c00, 0x50c00,
4020		0x51000, 0x5101c,
4021		0x51300, 0x51308,
4022	};
4023
4024	if (is_t4(sc)) {
4025		reg_ranges = &t4_reg_ranges[0];
4026		n = nitems(t4_reg_ranges);
4027	} else {
4028		reg_ranges = &t5_reg_ranges[0];
4029		n = nitems(t5_reg_ranges);
4030	}
4031
4032	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4033	for (i = 0; i < n; i += 2)
4034		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4035}
4036
4037static void
4038cxgbe_tick(void *arg)
4039{
4040	struct port_info *pi = arg;
4041	struct ifnet *ifp = pi->ifp;
4042	struct sge_txq *txq;
4043	int i, drops;
4044	struct port_stats *s = &pi->stats;
4045
4046	PORT_LOCK(pi);
4047	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4048		PORT_UNLOCK(pi);
4049		return;	/* without scheduling another callout */
4050	}
4051
4052	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4053
4054	ifp->if_opackets = s->tx_frames - s->tx_pause;
4055	ifp->if_ipackets = s->rx_frames - s->rx_pause;
4056	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4057	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4058	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4059	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4060	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4061	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4062	    s->rx_trunc3;
4063
4064	drops = s->tx_drop;
4065	for_each_txq(pi, i, txq)
4066		drops += txq->br->br_drops;
4067	ifp->if_snd.ifq_drops = drops;
4068
4069	ifp->if_oerrors = s->tx_error_frames;
4070	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4071	    s->rx_fcs_err + s->rx_len_err;
4072
4073	callout_schedule(&pi->tick, hz);
4074	PORT_UNLOCK(pi);
4075}
4076
4077static void
4078cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4079{
4080	struct ifnet *vlan;
4081
4082	if (arg != ifp || ifp->if_type != IFT_ETHER)
4083		return;
4084
4085	vlan = VLAN_DEVAT(ifp, vid);
4086	VLAN_SETCOOKIE(vlan, ifp);
4087}
4088
4089static int
4090cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4091{
4092
4093#ifdef INVARIANTS
4094	panic("%s: opcode 0x%02x on iq %p with payload %p",
4095	    __func__, rss->opcode, iq, m);
4096#else
4097	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4098	    __func__, rss->opcode, iq, m);
4099	m_freem(m);
4100#endif
4101	return (EDOOFUS);
4102}
4103
4104int
4105t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4106{
4107	uintptr_t *loc, new;
4108
4109	if (opcode >= nitems(sc->cpl_handler))
4110		return (EINVAL);
4111
4112	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4113	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4114	atomic_store_rel_ptr(loc, new);
4115
4116	return (0);
4117}
4118
4119static int
4120an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4121{
4122
4123#ifdef INVARIANTS
4124	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4125#else
4126	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4127	    __func__, iq, ctrl);
4128#endif
4129	return (EDOOFUS);
4130}
4131
4132int
4133t4_register_an_handler(struct adapter *sc, an_handler_t h)
4134{
4135	uintptr_t *loc, new;
4136
4137	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4138	loc = (uintptr_t *) &sc->an_handler;
4139	atomic_store_rel_ptr(loc, new);
4140
4141	return (0);
4142}
4143
4144static int
4145fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4146{
4147	const struct cpl_fw6_msg *cpl =
4148	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4149
4150#ifdef INVARIANTS
4151	panic("%s: fw_msg type %d", __func__, cpl->type);
4152#else
4153	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4154#endif
4155	return (EDOOFUS);
4156}
4157
4158int
4159t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4160{
4161	uintptr_t *loc, new;
4162
4163	if (type >= nitems(sc->fw_msg_handler))
4164		return (EINVAL);
4165
4166	/*
4167	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4168	 * handler dispatch table.  Reject any attempt to install a handler for
4169	 * this subtype.
4170	 */
4171	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4172		return (EINVAL);
4173
4174	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4175	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4176	atomic_store_rel_ptr(loc, new);
4177
4178	return (0);
4179}
4180
4181static int
4182t4_sysctls(struct adapter *sc)
4183{
4184	struct sysctl_ctx_list *ctx;
4185	struct sysctl_oid *oid;
4186	struct sysctl_oid_list *children, *c0;
4187	static char *caps[] = {
4188		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4189		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"	/* caps[1] niccaps */
4190		    "\6HASHFILTER\7ETHOFLD",
4191		"\20\1TOE",				/* caps[2] toecaps */
4192		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4193		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4194		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4195		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4196		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4197		    "\4PO_INITIAOR\5PO_TARGET"
4198	};
4199	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4200
4201	ctx = device_get_sysctl_ctx(sc->dev);
4202
4203	/*
4204	 * dev.t4nex.X.
4205	 */
4206	oid = device_get_sysctl_tree(sc->dev);
4207	c0 = children = SYSCTL_CHILDREN(oid);
4208
4209	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4210	    sc->params.nports, "# of ports");
4211
4212	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4213	    NULL, chip_rev(sc), "chip hardware revision");
4214
4215	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4216	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4217
4218	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4219	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4220
4221	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4222	    sc->cfcsum, "config file checksum");
4223
4224	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4225	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4226	    sysctl_bitfield, "A", "available doorbells");
4227
4228	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4229	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4230	    sysctl_bitfield, "A", "available link capabilities");
4231
4232	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4233	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4234	    sysctl_bitfield, "A", "available NIC capabilities");
4235
4236	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4237	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4238	    sysctl_bitfield, "A", "available TCP offload capabilities");
4239
4240	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4241	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4242	    sysctl_bitfield, "A", "available RDMA capabilities");
4243
4244	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4245	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4246	    sysctl_bitfield, "A", "available iSCSI capabilities");
4247
4248	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4249	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4250	    sysctl_bitfield, "A", "available FCoE capabilities");
4251
4252	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4253	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4254
4255	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4256	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4257	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4258	    "interrupt holdoff timer values (us)");
4259
4260	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4261	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4262	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4263	    "interrupt holdoff packet counter values");
4264
4265	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4266	    NULL, sc->tids.nftids, "number of filters");
4267
4268	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4269	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4270	    "chip temperature (in Celsius)");
4271
4272	t4_sge_sysctls(sc, ctx, children);
4273
4274	sc->lro_timeout = 100;
4275	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4276	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4277
4278#ifdef SBUF_DRAIN
4279	/*
4280	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4281	 */
4282	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4283	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4284	    "logs and miscellaneous information");
4285	children = SYSCTL_CHILDREN(oid);
4286
4287	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4288	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4289	    sysctl_cctrl, "A", "congestion control");
4290
4291	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4292	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4293	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4294
4295	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4296	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4297	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4298
4299	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4300	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4301	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4302
4303	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4304	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4305	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4306
4307	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4308	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4309	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4310
4311	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4312	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4313	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4314
4315	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4316	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4317	    sysctl_cim_la, "A", "CIM logic analyzer");
4318
4319	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4320	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4321	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4322
4323	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4324	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4325	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4326
4327	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4328	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4329	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4330
4331	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4332	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4333	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4334
4335	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4336	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4337	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4338
4339	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4340	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4341	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4342
4343	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4344	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4345	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4346
4347	if (is_t5(sc)) {
4348		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4349		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4350		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4351
4352		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4353		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4354		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4355	}
4356
4357	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4358	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4359	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4360
4361	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4362	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4363	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4364
4365	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4366	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4367	    sysctl_cpl_stats, "A", "CPL statistics");
4368
4369	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4370	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4371	    sysctl_ddp_stats, "A", "DDP statistics");
4372
4373	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4374	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4375	    sysctl_devlog, "A", "firmware's device log");
4376
4377	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4378	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4379	    sysctl_fcoe_stats, "A", "FCoE statistics");
4380
4381	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4382	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4383	    sysctl_hw_sched, "A", "hardware scheduler ");
4384
4385	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4386	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4387	    sysctl_l2t, "A", "hardware L2 table");
4388
4389	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4390	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4391	    sysctl_lb_stats, "A", "loopback statistics");
4392
4393	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4394	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4395	    sysctl_meminfo, "A", "memory regions");
4396
4397	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4398	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4399	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4400
4401	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4402	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4403	    sysctl_path_mtus, "A", "path MTUs");
4404
4405	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4406	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4407	    sysctl_pm_stats, "A", "PM statistics");
4408
4409	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4410	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4411	    sysctl_rdma_stats, "A", "RDMA statistics");
4412
4413	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4414	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4415	    sysctl_tcp_stats, "A", "TCP statistics");
4416
4417	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4418	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4419	    sysctl_tids, "A", "TID information");
4420
4421	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4422	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4423	    sysctl_tp_err_stats, "A", "TP error statistics");
4424
4425	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4426	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4427	    sysctl_tp_la, "A", "TP logic analyzer");
4428
4429	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4430	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4431	    sysctl_tx_rate, "A", "Tx rate");
4432
4433	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4434	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4435	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4436
4437	if (is_t5(sc)) {
4438		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4439		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4440		    sysctl_wcwr_stats, "A", "write combined work requests");
4441	}
4442#endif
4443
4444#ifdef TCP_OFFLOAD
4445	if (is_offload(sc)) {
4446		/*
4447		 * dev.t4nex.X.toe.
4448		 */
4449		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4450		    NULL, "TOE parameters");
4451		children = SYSCTL_CHILDREN(oid);
4452
4453		sc->tt.sndbuf = 256 * 1024;
4454		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4455		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4456
4457		sc->tt.ddp = 0;
4458		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4459		    &sc->tt.ddp, 0, "DDP allowed");
4460
4461		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4462		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4463		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4464
4465		sc->tt.ddp_thres =
4466		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4467		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4468		    &sc->tt.ddp_thres, 0, "DDP threshold");
4469
4470		sc->tt.rx_coalesce = 1;
4471		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4472		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4473	}
4474#endif
4475
4476
4477	return (0);
4478}
4479
4480static int
4481cxgbe_sysctls(struct port_info *pi)
4482{
4483	struct sysctl_ctx_list *ctx;
4484	struct sysctl_oid *oid;
4485	struct sysctl_oid_list *children;
4486
4487	ctx = device_get_sysctl_ctx(pi->dev);
4488
4489	/*
4490	 * dev.cxgbe.X.
4491	 */
4492	oid = device_get_sysctl_tree(pi->dev);
4493	children = SYSCTL_CHILDREN(oid);
4494
4495	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4496	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4497	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4498		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4499		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4500		    "PHY temperature (in Celsius)");
4501		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4502		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4503		    "PHY firmware version");
4504	}
4505	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4506	    &pi->nrxq, 0, "# of rx queues");
4507	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4508	    &pi->ntxq, 0, "# of tx queues");
4509	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4510	    &pi->first_rxq, 0, "index of first rx queue");
4511	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4512	    &pi->first_txq, 0, "index of first tx queue");
4513	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4514	    CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4515	    "Reserve queue 0 for non-flowid packets");
4516
4517#ifdef TCP_OFFLOAD
4518	if (is_offload(pi->adapter)) {
4519		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4520		    &pi->nofldrxq, 0,
4521		    "# of rx queues for offloaded TCP connections");
4522		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4523		    &pi->nofldtxq, 0,
4524		    "# of tx queues for offloaded TCP connections");
4525		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4526		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4527		    "index of first TOE rx queue");
4528		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4529		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4530		    "index of first TOE tx queue");
4531	}
4532#endif
4533
4534	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4535	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4536	    "holdoff timer index");
4537	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4538	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4539	    "holdoff packet counter index");
4540
4541	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4542	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4543	    "rx queue size");
4544	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4545	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4546	    "tx queue size");
4547
4548	/*
4549	 * dev.cxgbe.X.stats.
4550	 */
4551	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4552	    NULL, "port statistics");
4553	children = SYSCTL_CHILDREN(oid);
4554
4555#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4556	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4557	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4558	    sysctl_handle_t4_reg64, "QU", desc)
4559
4560	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4561	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4562	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4563	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4564	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4565	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4566	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4567	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4568	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4569	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4570	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4571	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4572	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4573	    "# of tx frames in this range",
4574	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4575	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4576	    "# of tx frames in this range",
4577	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4578	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4579	    "# of tx frames in this range",
4580	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4581	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4582	    "# of tx frames in this range",
4583	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4584	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4585	    "# of tx frames in this range",
4586	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4587	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4588	    "# of tx frames in this range",
4589	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4590	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4591	    "# of tx frames in this range",
4592	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4593	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4594	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4595	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4596	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4597	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4598	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4599	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4600	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4601	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4602	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4603	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4604	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4605	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4606	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4607	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4608	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4609	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4610	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4611	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4612	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4613
4614	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4615	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4616	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4617	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4618	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4619	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4620	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4621	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4622	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4623	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4624	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4625	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4626	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4627	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4628	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4629	    "# of frames received with bad FCS",
4630	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4631	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4632	    "# of frames received with length error",
4633	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4634	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4635	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4636	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4637	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4638	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4639	    "# of rx frames in this range",
4640	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4641	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4642	    "# of rx frames in this range",
4643	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4644	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4645	    "# of rx frames in this range",
4646	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4647	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4648	    "# of rx frames in this range",
4649	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4650	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4651	    "# of rx frames in this range",
4652	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4653	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4654	    "# of rx frames in this range",
4655	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4656	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4657	    "# of rx frames in this range",
4658	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4659	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4660	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4661	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4662	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4663	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4664	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4665	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4666	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4667	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4668	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4669	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4670	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4671	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4672	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4673	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4674	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4675	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4676	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4677
4678#undef SYSCTL_ADD_T4_REG64
4679
4680#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4681	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4682	    &pi->stats.name, desc)
4683
4684	/* We get these from port_stats and they may be stale by upto 1s */
4685	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4686	    "# drops due to buffer-group 0 overflows");
4687	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4688	    "# drops due to buffer-group 1 overflows");
4689	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4690	    "# drops due to buffer-group 2 overflows");
4691	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4692	    "# drops due to buffer-group 3 overflows");
4693	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4694	    "# of buffer-group 0 truncated packets");
4695	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4696	    "# of buffer-group 1 truncated packets");
4697	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4698	    "# of buffer-group 2 truncated packets");
4699	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4700	    "# of buffer-group 3 truncated packets");
4701
4702#undef SYSCTL_ADD_T4_PORTSTAT
4703
4704	return (0);
4705}
4706
4707static int
4708sysctl_int_array(SYSCTL_HANDLER_ARGS)
4709{
4710	int rc, *i;
4711	struct sbuf sb;
4712
4713	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4714	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4715		sbuf_printf(&sb, "%d ", *i);
4716	sbuf_trim(&sb);
4717	sbuf_finish(&sb);
4718	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4719	sbuf_delete(&sb);
4720	return (rc);
4721}
4722
4723static int
4724sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4725{
4726	int rc;
4727	struct sbuf *sb;
4728
4729	rc = sysctl_wire_old_buffer(req, 0);
4730	if (rc != 0)
4731		return(rc);
4732
4733	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4734	if (sb == NULL)
4735		return (ENOMEM);
4736
4737	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4738	rc = sbuf_finish(sb);
4739	sbuf_delete(sb);
4740
4741	return (rc);
4742}
4743
4744static int
4745sysctl_btphy(SYSCTL_HANDLER_ARGS)
4746{
4747	struct port_info *pi = arg1;
4748	int op = arg2;
4749	struct adapter *sc = pi->adapter;
4750	u_int v;
4751	int rc;
4752
4753	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4754	if (rc)
4755		return (rc);
4756	/* XXX: magic numbers */
4757	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4758	    &v);
4759	end_synchronized_op(sc, 0);
4760	if (rc)
4761		return (rc);
4762	if (op == 0)
4763		v /= 256;
4764
4765	rc = sysctl_handle_int(oidp, &v, 0, req);
4766	return (rc);
4767}
4768
4769static int
4770sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4771{
4772	struct port_info *pi = arg1;
4773	int rc, val;
4774
4775	val = pi->rsrv_noflowq;
4776	rc = sysctl_handle_int(oidp, &val, 0, req);
4777	if (rc != 0 || req->newptr == NULL)
4778		return (rc);
4779
4780	if ((val >= 1) && (pi->ntxq > 1))
4781		pi->rsrv_noflowq = 1;
4782	else
4783		pi->rsrv_noflowq = 0;
4784
4785	return (rc);
4786}
4787
4788static int
4789sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4790{
4791	struct port_info *pi = arg1;
4792	struct adapter *sc = pi->adapter;
4793	int idx, rc, i;
4794	struct sge_rxq *rxq;
4795#ifdef TCP_OFFLOAD
4796	struct sge_ofld_rxq *ofld_rxq;
4797#endif
4798	uint8_t v;
4799
4800	idx = pi->tmr_idx;
4801
4802	rc = sysctl_handle_int(oidp, &idx, 0, req);
4803	if (rc != 0 || req->newptr == NULL)
4804		return (rc);
4805
4806	if (idx < 0 || idx >= SGE_NTIMERS)
4807		return (EINVAL);
4808
4809	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4810	    "t4tmr");
4811	if (rc)
4812		return (rc);
4813
4814	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4815	for_each_rxq(pi, i, rxq) {
4816#ifdef atomic_store_rel_8
4817		atomic_store_rel_8(&rxq->iq.intr_params, v);
4818#else
4819		rxq->iq.intr_params = v;
4820#endif
4821	}
4822#ifdef TCP_OFFLOAD
4823	for_each_ofld_rxq(pi, i, ofld_rxq) {
4824#ifdef atomic_store_rel_8
4825		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4826#else
4827		ofld_rxq->iq.intr_params = v;
4828#endif
4829	}
4830#endif
4831	pi->tmr_idx = idx;
4832
4833	end_synchronized_op(sc, LOCK_HELD);
4834	return (0);
4835}
4836
4837static int
4838sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4839{
4840	struct port_info *pi = arg1;
4841	struct adapter *sc = pi->adapter;
4842	int idx, rc;
4843
4844	idx = pi->pktc_idx;
4845
4846	rc = sysctl_handle_int(oidp, &idx, 0, req);
4847	if (rc != 0 || req->newptr == NULL)
4848		return (rc);
4849
4850	if (idx < -1 || idx >= SGE_NCOUNTERS)
4851		return (EINVAL);
4852
4853	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4854	    "t4pktc");
4855	if (rc)
4856		return (rc);
4857
4858	if (pi->flags & PORT_INIT_DONE)
4859		rc = EBUSY; /* cannot be changed once the queues are created */
4860	else
4861		pi->pktc_idx = idx;
4862
4863	end_synchronized_op(sc, LOCK_HELD);
4864	return (rc);
4865}
4866
4867static int
4868sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4869{
4870	struct port_info *pi = arg1;
4871	struct adapter *sc = pi->adapter;
4872	int qsize, rc;
4873
4874	qsize = pi->qsize_rxq;
4875
4876	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4877	if (rc != 0 || req->newptr == NULL)
4878		return (rc);
4879
4880	if (qsize < 128 || (qsize & 7))
4881		return (EINVAL);
4882
4883	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4884	    "t4rxqs");
4885	if (rc)
4886		return (rc);
4887
4888	if (pi->flags & PORT_INIT_DONE)
4889		rc = EBUSY; /* cannot be changed once the queues are created */
4890	else
4891		pi->qsize_rxq = qsize;
4892
4893	end_synchronized_op(sc, LOCK_HELD);
4894	return (rc);
4895}
4896
4897static int
4898sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4899{
4900	struct port_info *pi = arg1;
4901	struct adapter *sc = pi->adapter;
4902	int qsize, rc;
4903
4904	qsize = pi->qsize_txq;
4905
4906	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4907	if (rc != 0 || req->newptr == NULL)
4908		return (rc);
4909
4910	/* bufring size must be powerof2 */
4911	if (qsize < 128 || !powerof2(qsize))
4912		return (EINVAL);
4913
4914	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4915	    "t4txqs");
4916	if (rc)
4917		return (rc);
4918
4919	if (pi->flags & PORT_INIT_DONE)
4920		rc = EBUSY; /* cannot be changed once the queues are created */
4921	else
4922		pi->qsize_txq = qsize;
4923
4924	end_synchronized_op(sc, LOCK_HELD);
4925	return (rc);
4926}
4927
4928static int
4929sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4930{
4931	struct adapter *sc = arg1;
4932	int reg = arg2;
4933	uint64_t val;
4934
4935	val = t4_read_reg64(sc, reg);
4936
4937	return (sysctl_handle_64(oidp, &val, 0, req));
4938}
4939
4940static int
4941sysctl_temperature(SYSCTL_HANDLER_ARGS)
4942{
4943	struct adapter *sc = arg1;
4944	int rc, t;
4945	uint32_t param, val;
4946
4947	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4948	if (rc)
4949		return (rc);
4950	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4951	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4952	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4953	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4954	end_synchronized_op(sc, 0);
4955	if (rc)
4956		return (rc);
4957
4958	/* unknown is returned as 0 but we display -1 in that case */
4959	t = val == 0 ? -1 : val;
4960
4961	rc = sysctl_handle_int(oidp, &t, 0, req);
4962	return (rc);
4963}
4964
4965#ifdef SBUF_DRAIN
4966static int
4967sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4968{
4969	struct adapter *sc = arg1;
4970	struct sbuf *sb;
4971	int rc, i;
4972	uint16_t incr[NMTUS][NCCTRL_WIN];
4973	static const char *dec_fac[] = {
4974		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4975		"0.9375"
4976	};
4977
4978	rc = sysctl_wire_old_buffer(req, 0);
4979	if (rc != 0)
4980		return (rc);
4981
4982	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4983	if (sb == NULL)
4984		return (ENOMEM);
4985
4986	t4_read_cong_tbl(sc, incr);
4987
4988	for (i = 0; i < NCCTRL_WIN; ++i) {
4989		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4990		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4991		    incr[5][i], incr[6][i], incr[7][i]);
4992		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4993		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4994		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4995		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4996	}
4997
4998	rc = sbuf_finish(sb);
4999	sbuf_delete(sb);
5000
5001	return (rc);
5002}
5003
5004static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5005	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
5006	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
5007	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
5008};
5009
5010static int
5011sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5012{
5013	struct adapter *sc = arg1;
5014	struct sbuf *sb;
5015	int rc, i, n, qid = arg2;
5016	uint32_t *buf, *p;
5017	char *qtype;
5018	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5019
5020	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5021	    ("%s: bad qid %d\n", __func__, qid));
5022
5023	if (qid < CIM_NUM_IBQ) {
5024		/* inbound queue */
5025		qtype = "IBQ";
5026		n = 4 * CIM_IBQ_SIZE;
5027		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5028		rc = t4_read_cim_ibq(sc, qid, buf, n);
5029	} else {
5030		/* outbound queue */
5031		qtype = "OBQ";
5032		qid -= CIM_NUM_IBQ;
5033		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5034		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5035		rc = t4_read_cim_obq(sc, qid, buf, n);
5036	}
5037
5038	if (rc < 0) {
5039		rc = -rc;
5040		goto done;
5041	}
5042	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5043
5044	rc = sysctl_wire_old_buffer(req, 0);
5045	if (rc != 0)
5046		goto done;
5047
5048	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5049	if (sb == NULL) {
5050		rc = ENOMEM;
5051		goto done;
5052	}
5053
5054	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5055	for (i = 0, p = buf; i < n; i += 16, p += 4)
5056		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5057		    p[2], p[3]);
5058
5059	rc = sbuf_finish(sb);
5060	sbuf_delete(sb);
5061done:
5062	free(buf, M_CXGBE);
5063	return (rc);
5064}
5065
5066static int
5067sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5068{
5069	struct adapter *sc = arg1;
5070	u_int cfg;
5071	struct sbuf *sb;
5072	uint32_t *buf, *p;
5073	int rc;
5074
5075	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5076	if (rc != 0)
5077		return (rc);
5078
5079	rc = sysctl_wire_old_buffer(req, 0);
5080	if (rc != 0)
5081		return (rc);
5082
5083	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5084	if (sb == NULL)
5085		return (ENOMEM);
5086
5087	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5088	    M_ZERO | M_WAITOK);
5089
5090	rc = -t4_cim_read_la(sc, buf, NULL);
5091	if (rc != 0)
5092		goto done;
5093
5094	sbuf_printf(sb, "Status   Data      PC%s",
5095	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5096	    "     LS0Stat  LS0Addr             LS0Data");
5097
5098	KASSERT((sc->params.cim_la_size & 7) == 0,
5099	    ("%s: p will walk off the end of buf", __func__));
5100
5101	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5102		if (cfg & F_UPDBGLACAPTPCONLY) {
5103			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5104			    p[6], p[7]);
5105			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5106			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5107			    p[4] & 0xff, p[5] >> 8);
5108			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5109			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5110			    p[1] & 0xf, p[2] >> 4);
5111		} else {
5112			sbuf_printf(sb,
5113			    "\n  %02x   %x%07x %x%07x %08x %08x "
5114			    "%08x%08x%08x%08x",
5115			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5116			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5117			    p[6], p[7]);
5118		}
5119	}
5120
5121	rc = sbuf_finish(sb);
5122	sbuf_delete(sb);
5123done:
5124	free(buf, M_CXGBE);
5125	return (rc);
5126}
5127
5128static int
5129sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5130{
5131	struct adapter *sc = arg1;
5132	u_int i;
5133	struct sbuf *sb;
5134	uint32_t *buf, *p;
5135	int rc;
5136
5137	rc = sysctl_wire_old_buffer(req, 0);
5138	if (rc != 0)
5139		return (rc);
5140
5141	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5142	if (sb == NULL)
5143		return (ENOMEM);
5144
5145	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5146	    M_ZERO | M_WAITOK);
5147
5148	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5149	p = buf;
5150
5151	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5152		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5153		    p[1], p[0]);
5154	}
5155
5156	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5157	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5158		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5159		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5160		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5161		    (p[1] >> 2) | ((p[2] & 3) << 30),
5162		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5163		    p[0] & 1);
5164	}
5165
5166	rc = sbuf_finish(sb);
5167	sbuf_delete(sb);
5168	free(buf, M_CXGBE);
5169	return (rc);
5170}
5171
5172static int
5173sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5174{
5175	struct adapter *sc = arg1;
5176	u_int i;
5177	struct sbuf *sb;
5178	uint32_t *buf, *p;
5179	int rc;
5180
5181	rc = sysctl_wire_old_buffer(req, 0);
5182	if (rc != 0)
5183		return (rc);
5184
5185	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5186	if (sb == NULL)
5187		return (ENOMEM);
5188
5189	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5190	    M_ZERO | M_WAITOK);
5191
5192	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5193	p = buf;
5194
5195	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5196	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5197		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5198		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5199		    p[4], p[3], p[2], p[1], p[0]);
5200	}
5201
5202	sbuf_printf(sb, "\n\nCntl ID               Data");
5203	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5204		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5205		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5206	}
5207
5208	rc = sbuf_finish(sb);
5209	sbuf_delete(sb);
5210	free(buf, M_CXGBE);
5211	return (rc);
5212}
5213
5214static int
5215sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5216{
5217	struct adapter *sc = arg1;
5218	struct sbuf *sb;
5219	int rc, i;
5220	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5221	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5222	uint16_t thres[CIM_NUM_IBQ];
5223	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5224	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5225	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5226
5227	if (is_t4(sc)) {
5228		cim_num_obq = CIM_NUM_OBQ;
5229		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5230		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5231	} else {
5232		cim_num_obq = CIM_NUM_OBQ_T5;
5233		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5234		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5235	}
5236	nq = CIM_NUM_IBQ + cim_num_obq;
5237
5238	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5239	if (rc == 0)
5240		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5241	if (rc != 0)
5242		return (rc);
5243
5244	t4_read_cimq_cfg(sc, base, size, thres);
5245
5246	rc = sysctl_wire_old_buffer(req, 0);
5247	if (rc != 0)
5248		return (rc);
5249
5250	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5251	if (sb == NULL)
5252		return (ENOMEM);
5253
5254	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5255
5256	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5257		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5258		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5259		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5260		    G_QUEREMFLITS(p[2]) * 16);
5261	for ( ; i < nq; i++, p += 4, wr += 2)
5262		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5263		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5264		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5265		    G_QUEREMFLITS(p[2]) * 16);
5266
5267	rc = sbuf_finish(sb);
5268	sbuf_delete(sb);
5269
5270	return (rc);
5271}
5272
5273static int
5274sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5275{
5276	struct adapter *sc = arg1;
5277	struct sbuf *sb;
5278	int rc;
5279	struct tp_cpl_stats stats;
5280
5281	rc = sysctl_wire_old_buffer(req, 0);
5282	if (rc != 0)
5283		return (rc);
5284
5285	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5286	if (sb == NULL)
5287		return (ENOMEM);
5288
5289	t4_tp_get_cpl_stats(sc, &stats);
5290
5291	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5292	    "channel 3\n");
5293	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5294		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5295	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5296		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5297
5298	rc = sbuf_finish(sb);
5299	sbuf_delete(sb);
5300
5301	return (rc);
5302}
5303
5304static int
5305sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5306{
5307	struct adapter *sc = arg1;
5308	struct sbuf *sb;
5309	int rc;
5310	struct tp_usm_stats stats;
5311
5312	rc = sysctl_wire_old_buffer(req, 0);
5313	if (rc != 0)
5314		return(rc);
5315
5316	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5317	if (sb == NULL)
5318		return (ENOMEM);
5319
5320	t4_get_usm_stats(sc, &stats);
5321
5322	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5323	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5324	sbuf_printf(sb, "Drops:  %u", stats.drops);
5325
5326	rc = sbuf_finish(sb);
5327	sbuf_delete(sb);
5328
5329	return (rc);
5330}
5331
5332const char *devlog_level_strings[] = {
5333	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5334	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5335	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5336	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5337	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5338	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5339};
5340
5341const char *devlog_facility_strings[] = {
5342	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5343	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5344	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5345	[FW_DEVLOG_FACILITY_RES]	= "RES",
5346	[FW_DEVLOG_FACILITY_HW]		= "HW",
5347	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5348	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5349	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5350	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5351	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5352	[FW_DEVLOG_FACILITY_VI]		= "VI",
5353	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5354	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5355	[FW_DEVLOG_FACILITY_TM]		= "TM",
5356	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5357	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5358	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5359	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5360	[FW_DEVLOG_FACILITY_RI]		= "RI",
5361	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5362	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5363	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5364	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5365};
5366
5367static int
5368sysctl_devlog(SYSCTL_HANDLER_ARGS)
5369{
5370	struct adapter *sc = arg1;
5371	struct devlog_params *dparams = &sc->params.devlog;
5372	struct fw_devlog_e *buf, *e;
5373	int i, j, rc, nentries, first = 0, m;
5374	struct sbuf *sb;
5375	uint64_t ftstamp = UINT64_MAX;
5376
5377	if (dparams->start == 0) {
5378		dparams->memtype = FW_MEMTYPE_EDC0;
5379		dparams->start = 0x84000;
5380		dparams->size = 32768;
5381	}
5382
5383	nentries = dparams->size / sizeof(struct fw_devlog_e);
5384
5385	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5386	if (buf == NULL)
5387		return (ENOMEM);
5388
5389	m = fwmtype_to_hwmtype(dparams->memtype);
5390	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5391	if (rc != 0)
5392		goto done;
5393
5394	for (i = 0; i < nentries; i++) {
5395		e = &buf[i];
5396
5397		if (e->timestamp == 0)
5398			break;	/* end */
5399
5400		e->timestamp = be64toh(e->timestamp);
5401		e->seqno = be32toh(e->seqno);
5402		for (j = 0; j < 8; j++)
5403			e->params[j] = be32toh(e->params[j]);
5404
5405		if (e->timestamp < ftstamp) {
5406			ftstamp = e->timestamp;
5407			first = i;
5408		}
5409	}
5410
5411	if (buf[first].timestamp == 0)
5412		goto done;	/* nothing in the log */
5413
5414	rc = sysctl_wire_old_buffer(req, 0);
5415	if (rc != 0)
5416		goto done;
5417
5418	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5419	if (sb == NULL) {
5420		rc = ENOMEM;
5421		goto done;
5422	}
5423	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5424	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5425
5426	i = first;
5427	do {
5428		e = &buf[i];
5429		if (e->timestamp == 0)
5430			break;	/* end */
5431
5432		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5433		    e->seqno, e->timestamp,
5434		    (e->level < nitems(devlog_level_strings) ?
5435			devlog_level_strings[e->level] : "UNKNOWN"),
5436		    (e->facility < nitems(devlog_facility_strings) ?
5437			devlog_facility_strings[e->facility] : "UNKNOWN"));
5438		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5439		    e->params[2], e->params[3], e->params[4],
5440		    e->params[5], e->params[6], e->params[7]);
5441
5442		if (++i == nentries)
5443			i = 0;
5444	} while (i != first);
5445
5446	rc = sbuf_finish(sb);
5447	sbuf_delete(sb);
5448done:
5449	free(buf, M_CXGBE);
5450	return (rc);
5451}
5452
5453static int
5454sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5455{
5456	struct adapter *sc = arg1;
5457	struct sbuf *sb;
5458	int rc;
5459	struct tp_fcoe_stats stats[4];
5460
5461	rc = sysctl_wire_old_buffer(req, 0);
5462	if (rc != 0)
5463		return (rc);
5464
5465	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5466	if (sb == NULL)
5467		return (ENOMEM);
5468
5469	t4_get_fcoe_stats(sc, 0, &stats[0]);
5470	t4_get_fcoe_stats(sc, 1, &stats[1]);
5471	t4_get_fcoe_stats(sc, 2, &stats[2]);
5472	t4_get_fcoe_stats(sc, 3, &stats[3]);
5473
5474	sbuf_printf(sb, "                   channel 0        channel 1        "
5475	    "channel 2        channel 3\n");
5476	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5477	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5478	    stats[3].octetsDDP);
5479	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5480	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5481	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5482	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5483	    stats[3].framesDrop);
5484
5485	rc = sbuf_finish(sb);
5486	sbuf_delete(sb);
5487
5488	return (rc);
5489}
5490
5491static int
5492sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5493{
5494	struct adapter *sc = arg1;
5495	struct sbuf *sb;
5496	int rc, i;
5497	unsigned int map, kbps, ipg, mode;
5498	unsigned int pace_tab[NTX_SCHED];
5499
5500	rc = sysctl_wire_old_buffer(req, 0);
5501	if (rc != 0)
5502		return (rc);
5503
5504	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5505	if (sb == NULL)
5506		return (ENOMEM);
5507
5508	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5509	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5510	t4_read_pace_tbl(sc, pace_tab);
5511
5512	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5513	    "Class IPG (0.1 ns)   Flow IPG (us)");
5514
5515	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5516		t4_get_tx_sched(sc, i, &kbps, &ipg);
5517		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5518		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5519		if (kbps)
5520			sbuf_printf(sb, "%9u     ", kbps);
5521		else
5522			sbuf_printf(sb, " disabled     ");
5523
5524		if (ipg)
5525			sbuf_printf(sb, "%13u        ", ipg);
5526		else
5527			sbuf_printf(sb, "     disabled        ");
5528
5529		if (pace_tab[i])
5530			sbuf_printf(sb, "%10u", pace_tab[i]);
5531		else
5532			sbuf_printf(sb, "  disabled");
5533	}
5534
5535	rc = sbuf_finish(sb);
5536	sbuf_delete(sb);
5537
5538	return (rc);
5539}
5540
5541static int
5542sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5543{
5544	struct adapter *sc = arg1;
5545	struct sbuf *sb;
5546	int rc, i, j;
5547	uint64_t *p0, *p1;
5548	struct lb_port_stats s[2];
5549	static const char *stat_name[] = {
5550		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5551		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5552		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5553		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5554		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5555		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5556		"BG2FramesTrunc:", "BG3FramesTrunc:"
5557	};
5558
5559	rc = sysctl_wire_old_buffer(req, 0);
5560	if (rc != 0)
5561		return (rc);
5562
5563	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5564	if (sb == NULL)
5565		return (ENOMEM);
5566
5567	memset(s, 0, sizeof(s));
5568
5569	for (i = 0; i < 4; i += 2) {
5570		t4_get_lb_stats(sc, i, &s[0]);
5571		t4_get_lb_stats(sc, i + 1, &s[1]);
5572
5573		p0 = &s[0].octets;
5574		p1 = &s[1].octets;
5575		sbuf_printf(sb, "%s                       Loopback %u"
5576		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5577
5578		for (j = 0; j < nitems(stat_name); j++)
5579			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5580				   *p0++, *p1++);
5581	}
5582
5583	rc = sbuf_finish(sb);
5584	sbuf_delete(sb);
5585
5586	return (rc);
5587}
5588
5589static int
5590sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5591{
5592	int rc = 0;
5593	struct port_info *pi = arg1;
5594	struct sbuf *sb;
5595	static const char *linkdnreasons[] = {
5596		"non-specific", "remote fault", "autoneg failed", "reserved3",
5597		"PHY overheated", "unknown", "rx los", "reserved7"
5598	};
5599
5600	rc = sysctl_wire_old_buffer(req, 0);
5601	if (rc != 0)
5602		return(rc);
5603	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5604	if (sb == NULL)
5605		return (ENOMEM);
5606
5607	if (pi->linkdnrc < 0)
5608		sbuf_printf(sb, "n/a");
5609	else if (pi->linkdnrc < nitems(linkdnreasons))
5610		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5611	else
5612		sbuf_printf(sb, "%d", pi->linkdnrc);
5613
5614	rc = sbuf_finish(sb);
5615	sbuf_delete(sb);
5616
5617	return (rc);
5618}
5619
5620struct mem_desc {
5621	unsigned int base;
5622	unsigned int limit;
5623	unsigned int idx;
5624};
5625
5626static int
5627mem_desc_cmp(const void *a, const void *b)
5628{
5629	return ((const struct mem_desc *)a)->base -
5630	       ((const struct mem_desc *)b)->base;
5631}
5632
5633static void
5634mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5635    unsigned int to)
5636{
5637	unsigned int size;
5638
5639	size = to - from + 1;
5640	if (size == 0)
5641		return;
5642
5643	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5644	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5645}
5646
5647static int
5648sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5649{
5650	struct adapter *sc = arg1;
5651	struct sbuf *sb;
5652	int rc, i, n;
5653	uint32_t lo, hi, used, alloc;
5654	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5655	static const char *region[] = {
5656		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5657		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5658		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5659		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5660		"RQUDP region:", "PBL region:", "TXPBL region:",
5661		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5662		"On-chip queues:"
5663	};
5664	struct mem_desc avail[4];
5665	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5666	struct mem_desc *md = mem;
5667
5668	rc = sysctl_wire_old_buffer(req, 0);
5669	if (rc != 0)
5670		return (rc);
5671
5672	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5673	if (sb == NULL)
5674		return (ENOMEM);
5675
5676	for (i = 0; i < nitems(mem); i++) {
5677		mem[i].limit = 0;
5678		mem[i].idx = i;
5679	}
5680
5681	/* Find and sort the populated memory ranges */
5682	i = 0;
5683	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5684	if (lo & F_EDRAM0_ENABLE) {
5685		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5686		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5687		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5688		avail[i].idx = 0;
5689		i++;
5690	}
5691	if (lo & F_EDRAM1_ENABLE) {
5692		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5693		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5694		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5695		avail[i].idx = 1;
5696		i++;
5697	}
5698	if (lo & F_EXT_MEM_ENABLE) {
5699		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5700		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5701		avail[i].limit = avail[i].base +
5702		    (G_EXT_MEM_SIZE(hi) << 20);
5703		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5704		i++;
5705	}
5706	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5707		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5708		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5709		avail[i].limit = avail[i].base +
5710		    (G_EXT_MEM1_SIZE(hi) << 20);
5711		avail[i].idx = 4;
5712		i++;
5713	}
5714	if (!i)                                    /* no memory available */
5715		return 0;
5716	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5717
5718	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5719	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5720	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5721	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5722	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5723	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5724	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5725	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5726	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5727
5728	/* the next few have explicit upper bounds */
5729	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5730	md->limit = md->base - 1 +
5731		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5732		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5733	md++;
5734
5735	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5736	md->limit = md->base - 1 +
5737		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5738		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5739	md++;
5740
5741	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5742		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5743		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5744		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5745	} else {
5746		md->base = 0;
5747		md->idx = nitems(region);  /* hide it */
5748	}
5749	md++;
5750
5751#define ulp_region(reg) \
5752	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5753	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5754
5755	ulp_region(RX_ISCSI);
5756	ulp_region(RX_TDDP);
5757	ulp_region(TX_TPT);
5758	ulp_region(RX_STAG);
5759	ulp_region(RX_RQ);
5760	ulp_region(RX_RQUDP);
5761	ulp_region(RX_PBL);
5762	ulp_region(TX_PBL);
5763#undef ulp_region
5764
5765	md->base = 0;
5766	md->idx = nitems(region);
5767	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5768		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5769		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5770		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5771	}
5772	md++;
5773
5774	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5775	md->limit = md->base + sc->tids.ntids - 1;
5776	md++;
5777	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5778	md->limit = md->base + sc->tids.ntids - 1;
5779	md++;
5780
5781	md->base = sc->vres.ocq.start;
5782	if (sc->vres.ocq.size)
5783		md->limit = md->base + sc->vres.ocq.size - 1;
5784	else
5785		md->idx = nitems(region);  /* hide it */
5786	md++;
5787
5788	/* add any address-space holes, there can be up to 3 */
5789	for (n = 0; n < i - 1; n++)
5790		if (avail[n].limit < avail[n + 1].base)
5791			(md++)->base = avail[n].limit;
5792	if (avail[n].limit)
5793		(md++)->base = avail[n].limit;
5794
5795	n = md - mem;
5796	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5797
5798	for (lo = 0; lo < i; lo++)
5799		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5800				avail[lo].limit - 1);
5801
5802	sbuf_printf(sb, "\n");
5803	for (i = 0; i < n; i++) {
5804		if (mem[i].idx >= nitems(region))
5805			continue;                        /* skip holes */
5806		if (!mem[i].limit)
5807			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5808		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5809				mem[i].limit);
5810	}
5811
5812	sbuf_printf(sb, "\n");
5813	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5814	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5815	mem_region_show(sb, "uP RAM:", lo, hi);
5816
5817	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5818	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5819	mem_region_show(sb, "uP Extmem2:", lo, hi);
5820
5821	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5822	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5823		   G_PMRXMAXPAGE(lo),
5824		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5825		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5826
5827	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5828	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5829	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5830		   G_PMTXMAXPAGE(lo),
5831		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5832		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5833	sbuf_printf(sb, "%u p-structs\n",
5834		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5835
5836	for (i = 0; i < 4; i++) {
5837		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5838		if (is_t4(sc)) {
5839			used = G_USED(lo);
5840			alloc = G_ALLOC(lo);
5841		} else {
5842			used = G_T5_USED(lo);
5843			alloc = G_T5_ALLOC(lo);
5844		}
5845		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5846			   i, used, alloc);
5847	}
5848	for (i = 0; i < 4; i++) {
5849		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5850		if (is_t4(sc)) {
5851			used = G_USED(lo);
5852			alloc = G_ALLOC(lo);
5853		} else {
5854			used = G_T5_USED(lo);
5855			alloc = G_T5_ALLOC(lo);
5856		}
5857		sbuf_printf(sb,
5858			   "\nLoopback %d using %u pages out of %u allocated",
5859			   i, used, alloc);
5860	}
5861
5862	rc = sbuf_finish(sb);
5863	sbuf_delete(sb);
5864
5865	return (rc);
5866}
5867
5868static inline void
5869tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5870{
5871	*mask = x | y;
5872	y = htobe64(y);
5873	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5874}
5875
5876static int
5877sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5878{
5879	struct adapter *sc = arg1;
5880	struct sbuf *sb;
5881	int rc, i, n;
5882
5883	rc = sysctl_wire_old_buffer(req, 0);
5884	if (rc != 0)
5885		return (rc);
5886
5887	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5888	if (sb == NULL)
5889		return (ENOMEM);
5890
5891	sbuf_printf(sb,
5892	    "Idx  Ethernet address     Mask     Vld Ports PF"
5893	    "  VF              Replication             P0 P1 P2 P3  ML");
5894	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5895	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5896	for (i = 0; i < n; i++) {
5897		uint64_t tcamx, tcamy, mask;
5898		uint32_t cls_lo, cls_hi;
5899		uint8_t addr[ETHER_ADDR_LEN];
5900
5901		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5902		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5903		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5904		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5905
5906		if (tcamx & tcamy)
5907			continue;
5908
5909		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5910		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5911			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5912			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5913			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5914			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5915			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5916
5917		if (cls_lo & F_REPLICATE) {
5918			struct fw_ldst_cmd ldst_cmd;
5919
5920			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5921			ldst_cmd.op_to_addrspace =
5922			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5923				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5924				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5925			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5926			ldst_cmd.u.mps.fid_ctl =
5927			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5928				V_FW_LDST_CMD_CTL(i));
5929
5930			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5931			    "t4mps");
5932			if (rc)
5933				break;
5934			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5935			    sizeof(ldst_cmd), &ldst_cmd);
5936			end_synchronized_op(sc, 0);
5937
5938			if (rc != 0) {
5939				sbuf_printf(sb,
5940				    " ------------ error %3u ------------", rc);
5941				rc = 0;
5942			} else {
5943				sbuf_printf(sb, " %08x %08x %08x %08x",
5944				    be32toh(ldst_cmd.u.mps.rplc127_96),
5945				    be32toh(ldst_cmd.u.mps.rplc95_64),
5946				    be32toh(ldst_cmd.u.mps.rplc63_32),
5947				    be32toh(ldst_cmd.u.mps.rplc31_0));
5948			}
5949		} else
5950			sbuf_printf(sb, "%36s", "");
5951
5952		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5953		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5954		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5955	}
5956
5957	if (rc)
5958		(void) sbuf_finish(sb);
5959	else
5960		rc = sbuf_finish(sb);
5961	sbuf_delete(sb);
5962
5963	return (rc);
5964}
5965
5966static int
5967sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5968{
5969	struct adapter *sc = arg1;
5970	struct sbuf *sb;
5971	int rc;
5972	uint16_t mtus[NMTUS];
5973
5974	rc = sysctl_wire_old_buffer(req, 0);
5975	if (rc != 0)
5976		return (rc);
5977
5978	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5979	if (sb == NULL)
5980		return (ENOMEM);
5981
5982	t4_read_mtu_tbl(sc, mtus, NULL);
5983
5984	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5985	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5986	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5987	    mtus[14], mtus[15]);
5988
5989	rc = sbuf_finish(sb);
5990	sbuf_delete(sb);
5991
5992	return (rc);
5993}
5994
5995static int
5996sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5997{
5998	struct adapter *sc = arg1;
5999	struct sbuf *sb;
6000	int rc, i;
6001	uint32_t cnt[PM_NSTATS];
6002	uint64_t cyc[PM_NSTATS];
6003	static const char *rx_stats[] = {
6004		"Read:", "Write bypass:", "Write mem:", "Flush:"
6005	};
6006	static const char *tx_stats[] = {
6007		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6008	};
6009
6010	rc = sysctl_wire_old_buffer(req, 0);
6011	if (rc != 0)
6012		return (rc);
6013
6014	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6015	if (sb == NULL)
6016		return (ENOMEM);
6017
6018	t4_pmtx_get_stats(sc, cnt, cyc);
6019	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6020	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6021		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6022		    cyc[i]);
6023
6024	t4_pmrx_get_stats(sc, cnt, cyc);
6025	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6026	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6027		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6028		    cyc[i]);
6029
6030	rc = sbuf_finish(sb);
6031	sbuf_delete(sb);
6032
6033	return (rc);
6034}
6035
6036static int
6037sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6038{
6039	struct adapter *sc = arg1;
6040	struct sbuf *sb;
6041	int rc;
6042	struct tp_rdma_stats stats;
6043
6044	rc = sysctl_wire_old_buffer(req, 0);
6045	if (rc != 0)
6046		return (rc);
6047
6048	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6049	if (sb == NULL)
6050		return (ENOMEM);
6051
6052	t4_tp_get_rdma_stats(sc, &stats);
6053	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6054	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6055
6056	rc = sbuf_finish(sb);
6057	sbuf_delete(sb);
6058
6059	return (rc);
6060}
6061
6062static int
6063sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6064{
6065	struct adapter *sc = arg1;
6066	struct sbuf *sb;
6067	int rc;
6068	struct tp_tcp_stats v4, v6;
6069
6070	rc = sysctl_wire_old_buffer(req, 0);
6071	if (rc != 0)
6072		return (rc);
6073
6074	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6075	if (sb == NULL)
6076		return (ENOMEM);
6077
6078	t4_tp_get_tcp_stats(sc, &v4, &v6);
6079	sbuf_printf(sb,
6080	    "                                IP                 IPv6\n");
6081	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6082	    v4.tcpOutRsts, v6.tcpOutRsts);
6083	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6084	    v4.tcpInSegs, v6.tcpInSegs);
6085	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6086	    v4.tcpOutSegs, v6.tcpOutSegs);
6087	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6088	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6089
6090	rc = sbuf_finish(sb);
6091	sbuf_delete(sb);
6092
6093	return (rc);
6094}
6095
6096static int
6097sysctl_tids(SYSCTL_HANDLER_ARGS)
6098{
6099	struct adapter *sc = arg1;
6100	struct sbuf *sb;
6101	int rc;
6102	struct tid_info *t = &sc->tids;
6103
6104	rc = sysctl_wire_old_buffer(req, 0);
6105	if (rc != 0)
6106		return (rc);
6107
6108	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6109	if (sb == NULL)
6110		return (ENOMEM);
6111
6112	if (t->natids) {
6113		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6114		    t->atids_in_use);
6115	}
6116
6117	if (t->ntids) {
6118		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6119			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6120
6121			if (b) {
6122				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6123				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6124				    t->ntids - 1);
6125			} else {
6126				sbuf_printf(sb, "TID range: %u-%u",
6127				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6128				    t->ntids - 1);
6129			}
6130		} else
6131			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6132		sbuf_printf(sb, ", in use: %u\n",
6133		    atomic_load_acq_int(&t->tids_in_use));
6134	}
6135
6136	if (t->nstids) {
6137		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6138		    t->stid_base + t->nstids - 1, t->stids_in_use);
6139	}
6140
6141	if (t->nftids) {
6142		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6143		    t->ftid_base + t->nftids - 1);
6144	}
6145
6146	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6147	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6148	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6149
6150	rc = sbuf_finish(sb);
6151	sbuf_delete(sb);
6152
6153	return (rc);
6154}
6155
6156static int
6157sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6158{
6159	struct adapter *sc = arg1;
6160	struct sbuf *sb;
6161	int rc;
6162	struct tp_err_stats stats;
6163
6164	rc = sysctl_wire_old_buffer(req, 0);
6165	if (rc != 0)
6166		return (rc);
6167
6168	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6169	if (sb == NULL)
6170		return (ENOMEM);
6171
6172	t4_tp_get_err_stats(sc, &stats);
6173
6174	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6175		      "channel 3\n");
6176	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6177	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6178	    stats.macInErrs[3]);
6179	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6180	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6181	    stats.hdrInErrs[3]);
6182	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6183	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6184	    stats.tcpInErrs[3]);
6185	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6186	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6187	    stats.tcp6InErrs[3]);
6188	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6189	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6190	    stats.tnlCongDrops[3]);
6191	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6192	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6193	    stats.tnlTxDrops[3]);
6194	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6195	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6196	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6197	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6198	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6199	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6200	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6201	    stats.ofldNoNeigh, stats.ofldCongDefer);
6202
6203	rc = sbuf_finish(sb);
6204	sbuf_delete(sb);
6205
6206	return (rc);
6207}
6208
6209struct field_desc {
6210	const char *name;
6211	u_int start;
6212	u_int width;
6213};
6214
6215static void
6216field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6217{
6218	char buf[32];
6219	int line_size = 0;
6220
6221	while (f->name) {
6222		uint64_t mask = (1ULL << f->width) - 1;
6223		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6224		    ((uintmax_t)v >> f->start) & mask);
6225
6226		if (line_size + len >= 79) {
6227			line_size = 8;
6228			sbuf_printf(sb, "\n        ");
6229		}
6230		sbuf_printf(sb, "%s ", buf);
6231		line_size += len + 1;
6232		f++;
6233	}
6234	sbuf_printf(sb, "\n");
6235}
6236
6237static struct field_desc tp_la0[] = {
6238	{ "RcfOpCodeOut", 60, 4 },
6239	{ "State", 56, 4 },
6240	{ "WcfState", 52, 4 },
6241	{ "RcfOpcSrcOut", 50, 2 },
6242	{ "CRxError", 49, 1 },
6243	{ "ERxError", 48, 1 },
6244	{ "SanityFailed", 47, 1 },
6245	{ "SpuriousMsg", 46, 1 },
6246	{ "FlushInputMsg", 45, 1 },
6247	{ "FlushInputCpl", 44, 1 },
6248	{ "RssUpBit", 43, 1 },
6249	{ "RssFilterHit", 42, 1 },
6250	{ "Tid", 32, 10 },
6251	{ "InitTcb", 31, 1 },
6252	{ "LineNumber", 24, 7 },
6253	{ "Emsg", 23, 1 },
6254	{ "EdataOut", 22, 1 },
6255	{ "Cmsg", 21, 1 },
6256	{ "CdataOut", 20, 1 },
6257	{ "EreadPdu", 19, 1 },
6258	{ "CreadPdu", 18, 1 },
6259	{ "TunnelPkt", 17, 1 },
6260	{ "RcfPeerFin", 16, 1 },
6261	{ "RcfReasonOut", 12, 4 },
6262	{ "TxCchannel", 10, 2 },
6263	{ "RcfTxChannel", 8, 2 },
6264	{ "RxEchannel", 6, 2 },
6265	{ "RcfRxChannel", 5, 1 },
6266	{ "RcfDataOutSrdy", 4, 1 },
6267	{ "RxDvld", 3, 1 },
6268	{ "RxOoDvld", 2, 1 },
6269	{ "RxCongestion", 1, 1 },
6270	{ "TxCongestion", 0, 1 },
6271	{ NULL }
6272};
6273
6274static struct field_desc tp_la1[] = {
6275	{ "CplCmdIn", 56, 8 },
6276	{ "CplCmdOut", 48, 8 },
6277	{ "ESynOut", 47, 1 },
6278	{ "EAckOut", 46, 1 },
6279	{ "EFinOut", 45, 1 },
6280	{ "ERstOut", 44, 1 },
6281	{ "SynIn", 43, 1 },
6282	{ "AckIn", 42, 1 },
6283	{ "FinIn", 41, 1 },
6284	{ "RstIn", 40, 1 },
6285	{ "DataIn", 39, 1 },
6286	{ "DataInVld", 38, 1 },
6287	{ "PadIn", 37, 1 },
6288	{ "RxBufEmpty", 36, 1 },
6289	{ "RxDdp", 35, 1 },
6290	{ "RxFbCongestion", 34, 1 },
6291	{ "TxFbCongestion", 33, 1 },
6292	{ "TxPktSumSrdy", 32, 1 },
6293	{ "RcfUlpType", 28, 4 },
6294	{ "Eread", 27, 1 },
6295	{ "Ebypass", 26, 1 },
6296	{ "Esave", 25, 1 },
6297	{ "Static0", 24, 1 },
6298	{ "Cread", 23, 1 },
6299	{ "Cbypass", 22, 1 },
6300	{ "Csave", 21, 1 },
6301	{ "CPktOut", 20, 1 },
6302	{ "RxPagePoolFull", 18, 2 },
6303	{ "RxLpbkPkt", 17, 1 },
6304	{ "TxLpbkPkt", 16, 1 },
6305	{ "RxVfValid", 15, 1 },
6306	{ "SynLearned", 14, 1 },
6307	{ "SetDelEntry", 13, 1 },
6308	{ "SetInvEntry", 12, 1 },
6309	{ "CpcmdDvld", 11, 1 },
6310	{ "CpcmdSave", 10, 1 },
6311	{ "RxPstructsFull", 8, 2 },
6312	{ "EpcmdDvld", 7, 1 },
6313	{ "EpcmdFlush", 6, 1 },
6314	{ "EpcmdTrimPrefix", 5, 1 },
6315	{ "EpcmdTrimPostfix", 4, 1 },
6316	{ "ERssIp4Pkt", 3, 1 },
6317	{ "ERssIp6Pkt", 2, 1 },
6318	{ "ERssTcpUdpPkt", 1, 1 },
6319	{ "ERssFceFipPkt", 0, 1 },
6320	{ NULL }
6321};
6322
6323static struct field_desc tp_la2[] = {
6324	{ "CplCmdIn", 56, 8 },
6325	{ "MpsVfVld", 55, 1 },
6326	{ "MpsPf", 52, 3 },
6327	{ "MpsVf", 44, 8 },
6328	{ "SynIn", 43, 1 },
6329	{ "AckIn", 42, 1 },
6330	{ "FinIn", 41, 1 },
6331	{ "RstIn", 40, 1 },
6332	{ "DataIn", 39, 1 },
6333	{ "DataInVld", 38, 1 },
6334	{ "PadIn", 37, 1 },
6335	{ "RxBufEmpty", 36, 1 },
6336	{ "RxDdp", 35, 1 },
6337	{ "RxFbCongestion", 34, 1 },
6338	{ "TxFbCongestion", 33, 1 },
6339	{ "TxPktSumSrdy", 32, 1 },
6340	{ "RcfUlpType", 28, 4 },
6341	{ "Eread", 27, 1 },
6342	{ "Ebypass", 26, 1 },
6343	{ "Esave", 25, 1 },
6344	{ "Static0", 24, 1 },
6345	{ "Cread", 23, 1 },
6346	{ "Cbypass", 22, 1 },
6347	{ "Csave", 21, 1 },
6348	{ "CPktOut", 20, 1 },
6349	{ "RxPagePoolFull", 18, 2 },
6350	{ "RxLpbkPkt", 17, 1 },
6351	{ "TxLpbkPkt", 16, 1 },
6352	{ "RxVfValid", 15, 1 },
6353	{ "SynLearned", 14, 1 },
6354	{ "SetDelEntry", 13, 1 },
6355	{ "SetInvEntry", 12, 1 },
6356	{ "CpcmdDvld", 11, 1 },
6357	{ "CpcmdSave", 10, 1 },
6358	{ "RxPstructsFull", 8, 2 },
6359	{ "EpcmdDvld", 7, 1 },
6360	{ "EpcmdFlush", 6, 1 },
6361	{ "EpcmdTrimPrefix", 5, 1 },
6362	{ "EpcmdTrimPostfix", 4, 1 },
6363	{ "ERssIp4Pkt", 3, 1 },
6364	{ "ERssIp6Pkt", 2, 1 },
6365	{ "ERssTcpUdpPkt", 1, 1 },
6366	{ "ERssFceFipPkt", 0, 1 },
6367	{ NULL }
6368};
6369
6370static void
6371tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6372{
6373
6374	field_desc_show(sb, *p, tp_la0);
6375}
6376
6377static void
6378tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6379{
6380
6381	if (idx)
6382		sbuf_printf(sb, "\n");
6383	field_desc_show(sb, p[0], tp_la0);
6384	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6385		field_desc_show(sb, p[1], tp_la0);
6386}
6387
6388static void
6389tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6390{
6391
6392	if (idx)
6393		sbuf_printf(sb, "\n");
6394	field_desc_show(sb, p[0], tp_la0);
6395	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6396		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6397}
6398
6399static int
6400sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6401{
6402	struct adapter *sc = arg1;
6403	struct sbuf *sb;
6404	uint64_t *buf, *p;
6405	int rc;
6406	u_int i, inc;
6407	void (*show_func)(struct sbuf *, uint64_t *, int);
6408
6409	rc = sysctl_wire_old_buffer(req, 0);
6410	if (rc != 0)
6411		return (rc);
6412
6413	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6414	if (sb == NULL)
6415		return (ENOMEM);
6416
6417	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6418
6419	t4_tp_read_la(sc, buf, NULL);
6420	p = buf;
6421
6422	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6423	case 2:
6424		inc = 2;
6425		show_func = tp_la_show2;
6426		break;
6427	case 3:
6428		inc = 2;
6429		show_func = tp_la_show3;
6430		break;
6431	default:
6432		inc = 1;
6433		show_func = tp_la_show;
6434	}
6435
6436	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6437		(*show_func)(sb, p, i);
6438
6439	rc = sbuf_finish(sb);
6440	sbuf_delete(sb);
6441	free(buf, M_CXGBE);
6442	return (rc);
6443}
6444
6445static int
6446sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6447{
6448	struct adapter *sc = arg1;
6449	struct sbuf *sb;
6450	int rc;
6451	u64 nrate[NCHAN], orate[NCHAN];
6452
6453	rc = sysctl_wire_old_buffer(req, 0);
6454	if (rc != 0)
6455		return (rc);
6456
6457	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6458	if (sb == NULL)
6459		return (ENOMEM);
6460
6461	t4_get_chan_txrate(sc, nrate, orate);
6462	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6463		 "channel 3\n");
6464	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6465	    nrate[0], nrate[1], nrate[2], nrate[3]);
6466	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6467	    orate[0], orate[1], orate[2], orate[3]);
6468
6469	rc = sbuf_finish(sb);
6470	sbuf_delete(sb);
6471
6472	return (rc);
6473}
6474
6475static int
6476sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6477{
6478	struct adapter *sc = arg1;
6479	struct sbuf *sb;
6480	uint32_t *buf, *p;
6481	int rc, i;
6482
6483	rc = sysctl_wire_old_buffer(req, 0);
6484	if (rc != 0)
6485		return (rc);
6486
6487	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6488	if (sb == NULL)
6489		return (ENOMEM);
6490
6491	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6492	    M_ZERO | M_WAITOK);
6493
6494	t4_ulprx_read_la(sc, buf);
6495	p = buf;
6496
6497	sbuf_printf(sb, "      Pcmd        Type   Message"
6498	    "                Data");
6499	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6500		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6501		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6502	}
6503
6504	rc = sbuf_finish(sb);
6505	sbuf_delete(sb);
6506	free(buf, M_CXGBE);
6507	return (rc);
6508}
6509
6510static int
6511sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6512{
6513	struct adapter *sc = arg1;
6514	struct sbuf *sb;
6515	int rc, v;
6516
6517	rc = sysctl_wire_old_buffer(req, 0);
6518	if (rc != 0)
6519		return (rc);
6520
6521	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6522	if (sb == NULL)
6523		return (ENOMEM);
6524
6525	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6526	if (G_STATSOURCE_T5(v) == 7) {
6527		if (G_STATMODE(v) == 0) {
6528			sbuf_printf(sb, "total %d, incomplete %d",
6529			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6530			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6531		} else if (G_STATMODE(v) == 1) {
6532			sbuf_printf(sb, "total %d, data overflow %d",
6533			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6534			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6535		}
6536	}
6537	rc = sbuf_finish(sb);
6538	sbuf_delete(sb);
6539
6540	return (rc);
6541}
6542#endif
6543
6544static inline void
6545txq_start(struct ifnet *ifp, struct sge_txq *txq)
6546{
6547	struct buf_ring *br;
6548	struct mbuf *m;
6549
6550	TXQ_LOCK_ASSERT_OWNED(txq);
6551
6552	br = txq->br;
6553	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6554	if (m)
6555		t4_eth_tx(ifp, txq, m);
6556}
6557
6558void
6559t4_tx_callout(void *arg)
6560{
6561	struct sge_eq *eq = arg;
6562	struct adapter *sc;
6563
6564	if (EQ_TRYLOCK(eq) == 0)
6565		goto reschedule;
6566
6567	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6568		EQ_UNLOCK(eq);
6569reschedule:
6570		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6571			callout_schedule(&eq->tx_callout, 1);
6572		return;
6573	}
6574
6575	EQ_LOCK_ASSERT_OWNED(eq);
6576
6577	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6578
6579		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6580			struct sge_txq *txq = arg;
6581			struct port_info *pi = txq->ifp->if_softc;
6582
6583			sc = pi->adapter;
6584		} else {
6585			struct sge_wrq *wrq = arg;
6586
6587			sc = wrq->adapter;
6588		}
6589
6590		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6591	}
6592
6593	EQ_UNLOCK(eq);
6594}
6595
6596void
6597t4_tx_task(void *arg, int count)
6598{
6599	struct sge_eq *eq = arg;
6600
6601	EQ_LOCK(eq);
6602	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6603		struct sge_txq *txq = arg;
6604		txq_start(txq->ifp, txq);
6605	} else {
6606		struct sge_wrq *wrq = arg;
6607		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6608	}
6609	EQ_UNLOCK(eq);
6610}
6611
6612static uint32_t
6613fconf_to_mode(uint32_t fconf)
6614{
6615	uint32_t mode;
6616
6617	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6618	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6619
6620	if (fconf & F_FRAGMENTATION)
6621		mode |= T4_FILTER_IP_FRAGMENT;
6622
6623	if (fconf & F_MPSHITTYPE)
6624		mode |= T4_FILTER_MPS_HIT_TYPE;
6625
6626	if (fconf & F_MACMATCH)
6627		mode |= T4_FILTER_MAC_IDX;
6628
6629	if (fconf & F_ETHERTYPE)
6630		mode |= T4_FILTER_ETH_TYPE;
6631
6632	if (fconf & F_PROTOCOL)
6633		mode |= T4_FILTER_IP_PROTO;
6634
6635	if (fconf & F_TOS)
6636		mode |= T4_FILTER_IP_TOS;
6637
6638	if (fconf & F_VLAN)
6639		mode |= T4_FILTER_VLAN;
6640
6641	if (fconf & F_VNIC_ID)
6642		mode |= T4_FILTER_VNIC;
6643
6644	if (fconf & F_PORT)
6645		mode |= T4_FILTER_PORT;
6646
6647	if (fconf & F_FCOE)
6648		mode |= T4_FILTER_FCoE;
6649
6650	return (mode);
6651}
6652
6653static uint32_t
6654mode_to_fconf(uint32_t mode)
6655{
6656	uint32_t fconf = 0;
6657
6658	if (mode & T4_FILTER_IP_FRAGMENT)
6659		fconf |= F_FRAGMENTATION;
6660
6661	if (mode & T4_FILTER_MPS_HIT_TYPE)
6662		fconf |= F_MPSHITTYPE;
6663
6664	if (mode & T4_FILTER_MAC_IDX)
6665		fconf |= F_MACMATCH;
6666
6667	if (mode & T4_FILTER_ETH_TYPE)
6668		fconf |= F_ETHERTYPE;
6669
6670	if (mode & T4_FILTER_IP_PROTO)
6671		fconf |= F_PROTOCOL;
6672
6673	if (mode & T4_FILTER_IP_TOS)
6674		fconf |= F_TOS;
6675
6676	if (mode & T4_FILTER_VLAN)
6677		fconf |= F_VLAN;
6678
6679	if (mode & T4_FILTER_VNIC)
6680		fconf |= F_VNIC_ID;
6681
6682	if (mode & T4_FILTER_PORT)
6683		fconf |= F_PORT;
6684
6685	if (mode & T4_FILTER_FCoE)
6686		fconf |= F_FCOE;
6687
6688	return (fconf);
6689}
6690
6691static uint32_t
6692fspec_to_fconf(struct t4_filter_specification *fs)
6693{
6694	uint32_t fconf = 0;
6695
6696	if (fs->val.frag || fs->mask.frag)
6697		fconf |= F_FRAGMENTATION;
6698
6699	if (fs->val.matchtype || fs->mask.matchtype)
6700		fconf |= F_MPSHITTYPE;
6701
6702	if (fs->val.macidx || fs->mask.macidx)
6703		fconf |= F_MACMATCH;
6704
6705	if (fs->val.ethtype || fs->mask.ethtype)
6706		fconf |= F_ETHERTYPE;
6707
6708	if (fs->val.proto || fs->mask.proto)
6709		fconf |= F_PROTOCOL;
6710
6711	if (fs->val.tos || fs->mask.tos)
6712		fconf |= F_TOS;
6713
6714	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6715		fconf |= F_VLAN;
6716
6717	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6718		fconf |= F_VNIC_ID;
6719
6720	if (fs->val.iport || fs->mask.iport)
6721		fconf |= F_PORT;
6722
6723	if (fs->val.fcoe || fs->mask.fcoe)
6724		fconf |= F_FCOE;
6725
6726	return (fconf);
6727}
6728
6729static int
6730get_filter_mode(struct adapter *sc, uint32_t *mode)
6731{
6732	int rc;
6733	uint32_t fconf;
6734
6735	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6736	    "t4getfm");
6737	if (rc)
6738		return (rc);
6739
6740	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6741	    A_TP_VLAN_PRI_MAP);
6742
6743	if (sc->params.tp.vlan_pri_map != fconf) {
6744		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6745		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6746		    fconf);
6747		sc->params.tp.vlan_pri_map = fconf;
6748	}
6749
6750	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6751
6752	end_synchronized_op(sc, LOCK_HELD);
6753	return (0);
6754}
6755
6756static int
6757set_filter_mode(struct adapter *sc, uint32_t mode)
6758{
6759	uint32_t fconf;
6760	int rc;
6761
6762	fconf = mode_to_fconf(mode);
6763
6764	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6765	    "t4setfm");
6766	if (rc)
6767		return (rc);
6768
6769	if (sc->tids.ftids_in_use > 0) {
6770		rc = EBUSY;
6771		goto done;
6772	}
6773
6774#ifdef TCP_OFFLOAD
6775	if (sc->offload_map) {
6776		rc = EBUSY;
6777		goto done;
6778	}
6779#endif
6780
6781#ifdef notyet
6782	rc = -t4_set_filter_mode(sc, fconf);
6783	if (rc == 0)
6784		sc->filter_mode = fconf;
6785#else
6786	rc = ENOTSUP;
6787#endif
6788
6789done:
6790	end_synchronized_op(sc, LOCK_HELD);
6791	return (rc);
6792}
6793
6794static inline uint64_t
6795get_filter_hits(struct adapter *sc, uint32_t fid)
6796{
6797	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6798	uint64_t hits;
6799
6800	memwin_info(sc, 0, &mw_base, NULL);
6801	off = position_memwin(sc, 0,
6802	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6803	if (is_t4(sc)) {
6804		hits = t4_read_reg64(sc, mw_base + off + 16);
6805		hits = be64toh(hits);
6806	} else {
6807		hits = t4_read_reg(sc, mw_base + off + 24);
6808		hits = be32toh(hits);
6809	}
6810
6811	return (hits);
6812}
6813
6814static int
6815get_filter(struct adapter *sc, struct t4_filter *t)
6816{
6817	int i, rc, nfilters = sc->tids.nftids;
6818	struct filter_entry *f;
6819
6820	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6821	    "t4getf");
6822	if (rc)
6823		return (rc);
6824
6825	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6826	    t->idx >= nfilters) {
6827		t->idx = 0xffffffff;
6828		goto done;
6829	}
6830
6831	f = &sc->tids.ftid_tab[t->idx];
6832	for (i = t->idx; i < nfilters; i++, f++) {
6833		if (f->valid) {
6834			t->idx = i;
6835			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6836			t->smtidx = f->smtidx;
6837			if (f->fs.hitcnts)
6838				t->hits = get_filter_hits(sc, t->idx);
6839			else
6840				t->hits = UINT64_MAX;
6841			t->fs = f->fs;
6842
6843			goto done;
6844		}
6845	}
6846
6847	t->idx = 0xffffffff;
6848done:
6849	end_synchronized_op(sc, LOCK_HELD);
6850	return (0);
6851}
6852
6853static int
6854set_filter(struct adapter *sc, struct t4_filter *t)
6855{
6856	unsigned int nfilters, nports;
6857	struct filter_entry *f;
6858	int i, rc;
6859
6860	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6861	if (rc)
6862		return (rc);
6863
6864	nfilters = sc->tids.nftids;
6865	nports = sc->params.nports;
6866
6867	if (nfilters == 0) {
6868		rc = ENOTSUP;
6869		goto done;
6870	}
6871
6872	if (!(sc->flags & FULL_INIT_DONE)) {
6873		rc = EAGAIN;
6874		goto done;
6875	}
6876
6877	if (t->idx >= nfilters) {
6878		rc = EINVAL;
6879		goto done;
6880	}
6881
6882	/* Validate against the global filter mode */
6883	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6884	    sc->params.tp.vlan_pri_map) {
6885		rc = E2BIG;
6886		goto done;
6887	}
6888
6889	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6890		rc = EINVAL;
6891		goto done;
6892	}
6893
6894	if (t->fs.val.iport >= nports) {
6895		rc = EINVAL;
6896		goto done;
6897	}
6898
6899	/* Can't specify an iq if not steering to it */
6900	if (!t->fs.dirsteer && t->fs.iq) {
6901		rc = EINVAL;
6902		goto done;
6903	}
6904
6905	/* IPv6 filter idx must be 4 aligned */
6906	if (t->fs.type == 1 &&
6907	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6908		rc = EINVAL;
6909		goto done;
6910	}
6911
6912	if (sc->tids.ftid_tab == NULL) {
6913		KASSERT(sc->tids.ftids_in_use == 0,
6914		    ("%s: no memory allocated but filters_in_use > 0",
6915		    __func__));
6916
6917		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6918		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6919		if (sc->tids.ftid_tab == NULL) {
6920			rc = ENOMEM;
6921			goto done;
6922		}
6923		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6924	}
6925
6926	for (i = 0; i < 4; i++) {
6927		f = &sc->tids.ftid_tab[t->idx + i];
6928
6929		if (f->pending || f->valid) {
6930			rc = EBUSY;
6931			goto done;
6932		}
6933		if (f->locked) {
6934			rc = EPERM;
6935			goto done;
6936		}
6937
6938		if (t->fs.type == 0)
6939			break;
6940	}
6941
6942	f = &sc->tids.ftid_tab[t->idx];
6943	f->fs = t->fs;
6944
6945	rc = set_filter_wr(sc, t->idx);
6946done:
6947	end_synchronized_op(sc, 0);
6948
6949	if (rc == 0) {
6950		mtx_lock(&sc->tids.ftid_lock);
6951		for (;;) {
6952			if (f->pending == 0) {
6953				rc = f->valid ? 0 : EIO;
6954				break;
6955			}
6956
6957			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6958			    PCATCH, "t4setfw", 0)) {
6959				rc = EINPROGRESS;
6960				break;
6961			}
6962		}
6963		mtx_unlock(&sc->tids.ftid_lock);
6964	}
6965	return (rc);
6966}
6967
6968static int
6969del_filter(struct adapter *sc, struct t4_filter *t)
6970{
6971	unsigned int nfilters;
6972	struct filter_entry *f;
6973	int rc;
6974
6975	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6976	if (rc)
6977		return (rc);
6978
6979	nfilters = sc->tids.nftids;
6980
6981	if (nfilters == 0) {
6982		rc = ENOTSUP;
6983		goto done;
6984	}
6985
6986	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6987	    t->idx >= nfilters) {
6988		rc = EINVAL;
6989		goto done;
6990	}
6991
6992	if (!(sc->flags & FULL_INIT_DONE)) {
6993		rc = EAGAIN;
6994		goto done;
6995	}
6996
6997	f = &sc->tids.ftid_tab[t->idx];
6998
6999	if (f->pending) {
7000		rc = EBUSY;
7001		goto done;
7002	}
7003	if (f->locked) {
7004		rc = EPERM;
7005		goto done;
7006	}
7007
7008	if (f->valid) {
7009		t->fs = f->fs;	/* extra info for the caller */
7010		rc = del_filter_wr(sc, t->idx);
7011	}
7012
7013done:
7014	end_synchronized_op(sc, 0);
7015
7016	if (rc == 0) {
7017		mtx_lock(&sc->tids.ftid_lock);
7018		for (;;) {
7019			if (f->pending == 0) {
7020				rc = f->valid ? EIO : 0;
7021				break;
7022			}
7023
7024			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7025			    PCATCH, "t4delfw", 0)) {
7026				rc = EINPROGRESS;
7027				break;
7028			}
7029		}
7030		mtx_unlock(&sc->tids.ftid_lock);
7031	}
7032
7033	return (rc);
7034}
7035
7036static void
7037clear_filter(struct filter_entry *f)
7038{
7039	if (f->l2t)
7040		t4_l2t_release(f->l2t);
7041
7042	bzero(f, sizeof (*f));
7043}
7044
7045static int
7046set_filter_wr(struct adapter *sc, int fidx)
7047{
7048	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7049	struct wrqe *wr;
7050	struct fw_filter_wr *fwr;
7051	unsigned int ftid;
7052
7053	ASSERT_SYNCHRONIZED_OP(sc);
7054
7055	if (f->fs.newdmac || f->fs.newvlan) {
7056		/* This filter needs an L2T entry; allocate one. */
7057		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7058		if (f->l2t == NULL)
7059			return (EAGAIN);
7060		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7061		    f->fs.dmac)) {
7062			t4_l2t_release(f->l2t);
7063			f->l2t = NULL;
7064			return (ENOMEM);
7065		}
7066	}
7067
7068	ftid = sc->tids.ftid_base + fidx;
7069
7070	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7071	if (wr == NULL)
7072		return (ENOMEM);
7073
7074	fwr = wrtod(wr);
7075	bzero(fwr, sizeof (*fwr));
7076
7077	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7078	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7079	fwr->tid_to_iq =
7080	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7081		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7082		V_FW_FILTER_WR_NOREPLY(0) |
7083		V_FW_FILTER_WR_IQ(f->fs.iq));
7084	fwr->del_filter_to_l2tix =
7085	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7086		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7087		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7088		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7089		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7090		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7091		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7092		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7093		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7094		    f->fs.newvlan == VLAN_REWRITE) |
7095		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7096		    f->fs.newvlan == VLAN_REWRITE) |
7097		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7098		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7099		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7100		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7101	fwr->ethtype = htobe16(f->fs.val.ethtype);
7102	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7103	fwr->frag_to_ovlan_vldm =
7104	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7105		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7106		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7107		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7108		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7109		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7110	fwr->smac_sel = 0;
7111	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7112	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7113	fwr->maci_to_matchtypem =
7114	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7115		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7116		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7117		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7118		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7119		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7120		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7121		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7122	fwr->ptcl = f->fs.val.proto;
7123	fwr->ptclm = f->fs.mask.proto;
7124	fwr->ttyp = f->fs.val.tos;
7125	fwr->ttypm = f->fs.mask.tos;
7126	fwr->ivlan = htobe16(f->fs.val.vlan);
7127	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7128	fwr->ovlan = htobe16(f->fs.val.vnic);
7129	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7130	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7131	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7132	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7133	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7134	fwr->lp = htobe16(f->fs.val.dport);
7135	fwr->lpm = htobe16(f->fs.mask.dport);
7136	fwr->fp = htobe16(f->fs.val.sport);
7137	fwr->fpm = htobe16(f->fs.mask.sport);
7138	if (f->fs.newsmac)
7139		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7140
7141	f->pending = 1;
7142	sc->tids.ftids_in_use++;
7143
7144	t4_wrq_tx(sc, wr);
7145	return (0);
7146}
7147
7148static int
7149del_filter_wr(struct adapter *sc, int fidx)
7150{
7151	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7152	struct wrqe *wr;
7153	struct fw_filter_wr *fwr;
7154	unsigned int ftid;
7155
7156	ftid = sc->tids.ftid_base + fidx;
7157
7158	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7159	if (wr == NULL)
7160		return (ENOMEM);
7161	fwr = wrtod(wr);
7162	bzero(fwr, sizeof (*fwr));
7163
7164	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7165
7166	f->pending = 1;
7167	t4_wrq_tx(sc, wr);
7168	return (0);
7169}
7170
7171int
7172t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7173{
7174	struct adapter *sc = iq->adapter;
7175	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7176	unsigned int idx = GET_TID(rpl);
7177
7178	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7179	    rss->opcode));
7180
7181	if (idx >= sc->tids.ftid_base &&
7182	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7183		unsigned int rc = G_COOKIE(rpl->cookie);
7184		struct filter_entry *f = &sc->tids.ftid_tab[idx];
7185
7186		mtx_lock(&sc->tids.ftid_lock);
7187		if (rc == FW_FILTER_WR_FLT_ADDED) {
7188			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7189			    __func__, idx));
7190			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7191			f->pending = 0;  /* asynchronous setup completed */
7192			f->valid = 1;
7193		} else {
7194			if (rc != FW_FILTER_WR_FLT_DELETED) {
7195				/* Add or delete failed, display an error */
7196				log(LOG_ERR,
7197				    "filter %u setup failed with error %u\n",
7198				    idx, rc);
7199			}
7200
7201			clear_filter(f);
7202			sc->tids.ftids_in_use--;
7203		}
7204		wakeup(&sc->tids.ftid_tab);
7205		mtx_unlock(&sc->tids.ftid_lock);
7206	}
7207
7208	return (0);
7209}
7210
7211static int
7212get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7213{
7214	int rc;
7215
7216	if (cntxt->cid > M_CTXTQID)
7217		return (EINVAL);
7218
7219	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7220	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7221		return (EINVAL);
7222
7223	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7224	if (rc)
7225		return (rc);
7226
7227	if (sc->flags & FW_OK) {
7228		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7229		    &cntxt->data[0]);
7230		if (rc == 0)
7231			goto done;
7232	}
7233
7234	/*
7235	 * Read via firmware failed or wasn't even attempted.  Read directly via
7236	 * the backdoor.
7237	 */
7238	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7239done:
7240	end_synchronized_op(sc, 0);
7241	return (rc);
7242}
7243
7244static int
7245load_fw(struct adapter *sc, struct t4_data *fw)
7246{
7247	int rc;
7248	uint8_t *fw_data;
7249
7250	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7251	if (rc)
7252		return (rc);
7253
7254	if (sc->flags & FULL_INIT_DONE) {
7255		rc = EBUSY;
7256		goto done;
7257	}
7258
7259	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7260	if (fw_data == NULL) {
7261		rc = ENOMEM;
7262		goto done;
7263	}
7264
7265	rc = copyin(fw->data, fw_data, fw->len);
7266	if (rc == 0)
7267		rc = -t4_load_fw(sc, fw_data, fw->len);
7268
7269	free(fw_data, M_CXGBE);
7270done:
7271	end_synchronized_op(sc, 0);
7272	return (rc);
7273}
7274
7275static int
7276read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7277{
7278	uint32_t addr, off, remaining, i, n;
7279	uint32_t *buf, *b;
7280	uint32_t mw_base, mw_aperture;
7281	int rc;
7282	uint8_t *dst;
7283
7284	rc = validate_mem_range(sc, mr->addr, mr->len);
7285	if (rc != 0)
7286		return (rc);
7287
7288	memwin_info(sc, win, &mw_base, &mw_aperture);
7289	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7290	addr = mr->addr;
7291	remaining = mr->len;
7292	dst = (void *)mr->data;
7293
7294	while (remaining) {
7295		off = position_memwin(sc, win, addr);
7296
7297		/* number of bytes that we'll copy in the inner loop */
7298		n = min(remaining, mw_aperture - off);
7299		for (i = 0; i < n; i += 4)
7300			*b++ = t4_read_reg(sc, mw_base + off + i);
7301
7302		rc = copyout(buf, dst, n);
7303		if (rc != 0)
7304			break;
7305
7306		b = buf;
7307		dst += n;
7308		remaining -= n;
7309		addr += n;
7310	}
7311
7312	free(buf, M_CXGBE);
7313	return (rc);
7314}
7315
7316static int
7317read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7318{
7319	int rc;
7320
7321	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7322		return (EINVAL);
7323
7324	if (i2cd->len > 1) {
7325		/* XXX: need fw support for longer reads in one go */
7326		return (ENOTSUP);
7327	}
7328
7329	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7330	if (rc)
7331		return (rc);
7332	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7333	    i2cd->offset, &i2cd->data[0]);
7334	end_synchronized_op(sc, 0);
7335
7336	return (rc);
7337}
7338
7339static int
7340in_range(int val, int lo, int hi)
7341{
7342
7343	return (val < 0 || (val <= hi && val >= lo));
7344}
7345
7346static int
7347set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7348{
7349	int fw_subcmd, fw_type, rc;
7350
7351	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7352	if (rc)
7353		return (rc);
7354
7355	if (!(sc->flags & FULL_INIT_DONE)) {
7356		rc = EAGAIN;
7357		goto done;
7358	}
7359
7360	/*
7361	 * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7362	 * sub-command and type are in common locations.)
7363	 */
7364	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7365		fw_subcmd = FW_SCHED_SC_CONFIG;
7366	else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7367		fw_subcmd = FW_SCHED_SC_PARAMS;
7368	else {
7369		rc = EINVAL;
7370		goto done;
7371	}
7372	if (p->type == SCHED_CLASS_TYPE_PACKET)
7373		fw_type = FW_SCHED_TYPE_PKTSCHED;
7374	else {
7375		rc = EINVAL;
7376		goto done;
7377	}
7378
7379	if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7380		/* Vet our parameters ..*/
7381		if (p->u.config.minmax < 0) {
7382			rc = EINVAL;
7383			goto done;
7384		}
7385
7386		/* And pass the request to the firmware ...*/
7387		rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7388		goto done;
7389	}
7390
7391	if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7392		int fw_level;
7393		int fw_mode;
7394		int fw_rateunit;
7395		int fw_ratemode;
7396
7397		if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7398			fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7399		else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7400			fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7401		else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7402			fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7403		else {
7404			rc = EINVAL;
7405			goto done;
7406		}
7407
7408		if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7409			fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7410		else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7411			fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7412		else {
7413			rc = EINVAL;
7414			goto done;
7415		}
7416
7417		if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7418			fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7419		else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7420			fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7421		else {
7422			rc = EINVAL;
7423			goto done;
7424		}
7425
7426		if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7427			fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7428		else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7429			fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7430		else {
7431			rc = EINVAL;
7432			goto done;
7433		}
7434
7435		/* Vet our parameters ... */
7436		if (!in_range(p->u.params.channel, 0, 3) ||
7437		    !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7438		    !in_range(p->u.params.minrate, 0, 10000000) ||
7439		    !in_range(p->u.params.maxrate, 0, 10000000) ||
7440		    !in_range(p->u.params.weight, 0, 100)) {
7441			rc = ERANGE;
7442			goto done;
7443		}
7444
7445		/*
7446		 * Translate any unset parameters into the firmware's
7447		 * nomenclature and/or fail the call if the parameters
7448		 * are required ...
7449		 */
7450		if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7451		    p->u.params.channel < 0 || p->u.params.cl < 0) {
7452			rc = EINVAL;
7453			goto done;
7454		}
7455		if (p->u.params.minrate < 0)
7456			p->u.params.minrate = 0;
7457		if (p->u.params.maxrate < 0) {
7458			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7459			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7460				rc = EINVAL;
7461				goto done;
7462			} else
7463				p->u.params.maxrate = 0;
7464		}
7465		if (p->u.params.weight < 0) {
7466			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7467				rc = EINVAL;
7468				goto done;
7469			} else
7470				p->u.params.weight = 0;
7471		}
7472		if (p->u.params.pktsize < 0) {
7473			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7474			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7475				rc = EINVAL;
7476				goto done;
7477			} else
7478				p->u.params.pktsize = 0;
7479		}
7480
7481		/* See what the firmware thinks of the request ... */
7482		rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7483		    fw_rateunit, fw_ratemode, p->u.params.channel,
7484		    p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7485		    p->u.params.weight, p->u.params.pktsize);
7486		goto done;
7487	}
7488
7489	rc = EINVAL;
7490done:
7491	end_synchronized_op(sc, 0);
7492	return (rc);
7493}
7494
7495static int
7496set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7497{
7498	struct port_info *pi = NULL;
7499	struct sge_txq *txq;
7500	uint32_t fw_mnem, fw_queue, fw_class;
7501	int i, rc;
7502
7503	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7504	if (rc)
7505		return (rc);
7506
7507	if (!(sc->flags & FULL_INIT_DONE)) {
7508		rc = EAGAIN;
7509		goto done;
7510	}
7511
7512	if (p->port >= sc->params.nports) {
7513		rc = EINVAL;
7514		goto done;
7515	}
7516
7517	pi = sc->port[p->port];
7518	if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7519		rc = EINVAL;
7520		goto done;
7521	}
7522
7523	/*
7524	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7525	 * Scheduling Class in this case).
7526	 */
7527	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7528	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7529	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7530
7531	/*
7532	 * If op.queue is non-negative, then we're only changing the scheduling
7533	 * on a single specified TX queue.
7534	 */
7535	if (p->queue >= 0) {
7536		txq = &sc->sge.txq[pi->first_txq + p->queue];
7537		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7538		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7539		    &fw_class);
7540		goto done;
7541	}
7542
7543	/*
7544	 * Change the scheduling on all the TX queues for the
7545	 * interface.
7546	 */
7547	for_each_txq(pi, i, txq) {
7548		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7549		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7550		    &fw_class);
7551		if (rc)
7552			goto done;
7553	}
7554
7555	rc = 0;
7556done:
7557	end_synchronized_op(sc, 0);
7558	return (rc);
7559}
7560
7561int
7562t4_os_find_pci_capability(struct adapter *sc, int cap)
7563{
7564	int i;
7565
7566	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7567}
7568
7569int
7570t4_os_pci_save_state(struct adapter *sc)
7571{
7572	device_t dev;
7573	struct pci_devinfo *dinfo;
7574
7575	dev = sc->dev;
7576	dinfo = device_get_ivars(dev);
7577
7578	pci_cfg_save(dev, dinfo, 0);
7579	return (0);
7580}
7581
7582int
7583t4_os_pci_restore_state(struct adapter *sc)
7584{
7585	device_t dev;
7586	struct pci_devinfo *dinfo;
7587
7588	dev = sc->dev;
7589	dinfo = device_get_ivars(dev);
7590
7591	pci_cfg_restore(dev, dinfo);
7592	return (0);
7593}
7594
7595void
7596t4_os_portmod_changed(const struct adapter *sc, int idx)
7597{
7598	struct port_info *pi = sc->port[idx];
7599	static const char *mod_str[] = {
7600		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7601	};
7602
7603	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7604		if_printf(pi->ifp, "transceiver unplugged.\n");
7605	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7606		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7607	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7608		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7609	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7610		if_printf(pi->ifp, "%s transceiver inserted.\n",
7611		    mod_str[pi->mod_type]);
7612	} else {
7613		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7614		    pi->mod_type);
7615	}
7616}
7617
7618void
7619t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7620{
7621	struct port_info *pi = sc->port[idx];
7622	struct ifnet *ifp = pi->ifp;
7623
7624	if (link_stat) {
7625		pi->linkdnrc = -1;
7626		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7627		if_link_state_change(ifp, LINK_STATE_UP);
7628	} else {
7629		if (reason >= 0)
7630			pi->linkdnrc = reason;
7631		if_link_state_change(ifp, LINK_STATE_DOWN);
7632	}
7633}
7634
7635void
7636t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7637{
7638	struct adapter *sc;
7639
7640	sx_slock(&t4_list_lock);
7641	SLIST_FOREACH(sc, &t4_list, link) {
7642		/*
7643		 * func should not make any assumptions about what state sc is
7644		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7645		 */
7646		func(sc, arg);
7647	}
7648	sx_sunlock(&t4_list_lock);
7649}
7650
7651static int
7652t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7653{
7654       return (0);
7655}
7656
7657static int
7658t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7659{
7660       return (0);
7661}
7662
7663static int
7664t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7665    struct thread *td)
7666{
7667	int rc;
7668	struct adapter *sc = dev->si_drv1;
7669
7670	rc = priv_check(td, PRIV_DRIVER);
7671	if (rc != 0)
7672		return (rc);
7673
7674	switch (cmd) {
7675	case CHELSIO_T4_GETREG: {
7676		struct t4_reg *edata = (struct t4_reg *)data;
7677
7678		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7679			return (EFAULT);
7680
7681		if (edata->size == 4)
7682			edata->val = t4_read_reg(sc, edata->addr);
7683		else if (edata->size == 8)
7684			edata->val = t4_read_reg64(sc, edata->addr);
7685		else
7686			return (EINVAL);
7687
7688		break;
7689	}
7690	case CHELSIO_T4_SETREG: {
7691		struct t4_reg *edata = (struct t4_reg *)data;
7692
7693		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7694			return (EFAULT);
7695
7696		if (edata->size == 4) {
7697			if (edata->val & 0xffffffff00000000)
7698				return (EINVAL);
7699			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7700		} else if (edata->size == 8)
7701			t4_write_reg64(sc, edata->addr, edata->val);
7702		else
7703			return (EINVAL);
7704		break;
7705	}
7706	case CHELSIO_T4_REGDUMP: {
7707		struct t4_regdump *regs = (struct t4_regdump *)data;
7708		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7709		uint8_t *buf;
7710
7711		if (regs->len < reglen) {
7712			regs->len = reglen; /* hint to the caller */
7713			return (ENOBUFS);
7714		}
7715
7716		regs->len = reglen;
7717		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7718		t4_get_regs(sc, regs, buf);
7719		rc = copyout(buf, regs->data, reglen);
7720		free(buf, M_CXGBE);
7721		break;
7722	}
7723	case CHELSIO_T4_GET_FILTER_MODE:
7724		rc = get_filter_mode(sc, (uint32_t *)data);
7725		break;
7726	case CHELSIO_T4_SET_FILTER_MODE:
7727		rc = set_filter_mode(sc, *(uint32_t *)data);
7728		break;
7729	case CHELSIO_T4_GET_FILTER:
7730		rc = get_filter(sc, (struct t4_filter *)data);
7731		break;
7732	case CHELSIO_T4_SET_FILTER:
7733		rc = set_filter(sc, (struct t4_filter *)data);
7734		break;
7735	case CHELSIO_T4_DEL_FILTER:
7736		rc = del_filter(sc, (struct t4_filter *)data);
7737		break;
7738	case CHELSIO_T4_GET_SGE_CONTEXT:
7739		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7740		break;
7741	case CHELSIO_T4_LOAD_FW:
7742		rc = load_fw(sc, (struct t4_data *)data);
7743		break;
7744	case CHELSIO_T4_GET_MEM:
7745		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7746		break;
7747	case CHELSIO_T4_GET_I2C:
7748		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7749		break;
7750	case CHELSIO_T4_CLEAR_STATS: {
7751		int i;
7752		u_int port_id = *(uint32_t *)data;
7753		struct port_info *pi;
7754
7755		if (port_id >= sc->params.nports)
7756			return (EINVAL);
7757
7758		/* MAC stats */
7759		t4_clr_port_stats(sc, port_id);
7760
7761		pi = sc->port[port_id];
7762		if (pi->flags & PORT_INIT_DONE) {
7763			struct sge_rxq *rxq;
7764			struct sge_txq *txq;
7765			struct sge_wrq *wrq;
7766
7767			for_each_rxq(pi, i, rxq) {
7768#if defined(INET) || defined(INET6)
7769				rxq->lro.lro_queued = 0;
7770				rxq->lro.lro_flushed = 0;
7771#endif
7772				rxq->rxcsum = 0;
7773				rxq->vlan_extraction = 0;
7774			}
7775
7776			for_each_txq(pi, i, txq) {
7777				txq->txcsum = 0;
7778				txq->tso_wrs = 0;
7779				txq->vlan_insertion = 0;
7780				txq->imm_wrs = 0;
7781				txq->sgl_wrs = 0;
7782				txq->txpkt_wrs = 0;
7783				txq->txpkts_wrs = 0;
7784				txq->txpkts_pkts = 0;
7785				txq->br->br_drops = 0;
7786				txq->no_dmamap = 0;
7787				txq->no_desc = 0;
7788			}
7789
7790#ifdef TCP_OFFLOAD
7791			/* nothing to clear for each ofld_rxq */
7792
7793			for_each_ofld_txq(pi, i, wrq) {
7794				wrq->tx_wrs = 0;
7795				wrq->no_desc = 0;
7796			}
7797#endif
7798			wrq = &sc->sge.ctrlq[pi->port_id];
7799			wrq->tx_wrs = 0;
7800			wrq->no_desc = 0;
7801		}
7802		break;
7803	}
7804	case CHELSIO_T4_SCHED_CLASS:
7805		rc = set_sched_class(sc, (struct t4_sched_params *)data);
7806		break;
7807	case CHELSIO_T4_SCHED_QUEUE:
7808		rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7809		break;
7810	case CHELSIO_T4_GET_TRACER:
7811		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7812		break;
7813	case CHELSIO_T4_SET_TRACER:
7814		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7815		break;
7816	default:
7817		rc = EINVAL;
7818	}
7819
7820	return (rc);
7821}
7822
7823#ifdef TCP_OFFLOAD
7824static int
7825toe_capability(struct port_info *pi, int enable)
7826{
7827	int rc;
7828	struct adapter *sc = pi->adapter;
7829
7830	ASSERT_SYNCHRONIZED_OP(sc);
7831
7832	if (!is_offload(sc))
7833		return (ENODEV);
7834
7835	if (enable) {
7836		if (!(sc->flags & FULL_INIT_DONE)) {
7837			rc = cxgbe_init_synchronized(pi);
7838			if (rc)
7839				return (rc);
7840		}
7841
7842		if (isset(&sc->offload_map, pi->port_id))
7843			return (0);
7844
7845		if (!(sc->flags & TOM_INIT_DONE)) {
7846			rc = t4_activate_uld(sc, ULD_TOM);
7847			if (rc == EAGAIN) {
7848				log(LOG_WARNING,
7849				    "You must kldload t4_tom.ko before trying "
7850				    "to enable TOE on a cxgbe interface.\n");
7851			}
7852			if (rc != 0)
7853				return (rc);
7854			KASSERT(sc->tom_softc != NULL,
7855			    ("%s: TOM activated but softc NULL", __func__));
7856			KASSERT(sc->flags & TOM_INIT_DONE,
7857			    ("%s: TOM activated but flag not set", __func__));
7858		}
7859
7860		setbit(&sc->offload_map, pi->port_id);
7861	} else {
7862		if (!isset(&sc->offload_map, pi->port_id))
7863			return (0);
7864
7865		KASSERT(sc->flags & TOM_INIT_DONE,
7866		    ("%s: TOM never initialized?", __func__));
7867		clrbit(&sc->offload_map, pi->port_id);
7868	}
7869
7870	return (0);
7871}
7872
7873/*
7874 * Add an upper layer driver to the global list.
7875 */
7876int
7877t4_register_uld(struct uld_info *ui)
7878{
7879	int rc = 0;
7880	struct uld_info *u;
7881
7882	sx_xlock(&t4_uld_list_lock);
7883	SLIST_FOREACH(u, &t4_uld_list, link) {
7884	    if (u->uld_id == ui->uld_id) {
7885		    rc = EEXIST;
7886		    goto done;
7887	    }
7888	}
7889
7890	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7891	ui->refcount = 0;
7892done:
7893	sx_xunlock(&t4_uld_list_lock);
7894	return (rc);
7895}
7896
7897int
7898t4_unregister_uld(struct uld_info *ui)
7899{
7900	int rc = EINVAL;
7901	struct uld_info *u;
7902
7903	sx_xlock(&t4_uld_list_lock);
7904
7905	SLIST_FOREACH(u, &t4_uld_list, link) {
7906	    if (u == ui) {
7907		    if (ui->refcount > 0) {
7908			    rc = EBUSY;
7909			    goto done;
7910		    }
7911
7912		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7913		    rc = 0;
7914		    goto done;
7915	    }
7916	}
7917done:
7918	sx_xunlock(&t4_uld_list_lock);
7919	return (rc);
7920}
7921
7922int
7923t4_activate_uld(struct adapter *sc, int id)
7924{
7925	int rc = EAGAIN;
7926	struct uld_info *ui;
7927
7928	ASSERT_SYNCHRONIZED_OP(sc);
7929
7930	sx_slock(&t4_uld_list_lock);
7931
7932	SLIST_FOREACH(ui, &t4_uld_list, link) {
7933		if (ui->uld_id == id) {
7934			rc = ui->activate(sc);
7935			if (rc == 0)
7936				ui->refcount++;
7937			goto done;
7938		}
7939	}
7940done:
7941	sx_sunlock(&t4_uld_list_lock);
7942
7943	return (rc);
7944}
7945
7946int
7947t4_deactivate_uld(struct adapter *sc, int id)
7948{
7949	int rc = EINVAL;
7950	struct uld_info *ui;
7951
7952	ASSERT_SYNCHRONIZED_OP(sc);
7953
7954	sx_slock(&t4_uld_list_lock);
7955
7956	SLIST_FOREACH(ui, &t4_uld_list, link) {
7957		if (ui->uld_id == id) {
7958			rc = ui->deactivate(sc);
7959			if (rc == 0)
7960				ui->refcount--;
7961			goto done;
7962		}
7963	}
7964done:
7965	sx_sunlock(&t4_uld_list_lock);
7966
7967	return (rc);
7968}
7969#endif
7970
7971/*
7972 * Come up with reasonable defaults for some of the tunables, provided they're
7973 * not set by the user (in which case we'll use the values as is).
7974 */
7975static void
7976tweak_tunables(void)
7977{
7978	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7979
7980	if (t4_ntxq10g < 1)
7981		t4_ntxq10g = min(nc, NTXQ_10G);
7982
7983	if (t4_ntxq1g < 1)
7984		t4_ntxq1g = min(nc, NTXQ_1G);
7985
7986	if (t4_nrxq10g < 1)
7987		t4_nrxq10g = min(nc, NRXQ_10G);
7988
7989	if (t4_nrxq1g < 1)
7990		t4_nrxq1g = min(nc, NRXQ_1G);
7991
7992#ifdef TCP_OFFLOAD
7993	if (t4_nofldtxq10g < 1)
7994		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7995
7996	if (t4_nofldtxq1g < 1)
7997		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7998
7999	if (t4_nofldrxq10g < 1)
8000		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8001
8002	if (t4_nofldrxq1g < 1)
8003		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8004
8005	if (t4_toecaps_allowed == -1)
8006		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8007#else
8008	if (t4_toecaps_allowed == -1)
8009		t4_toecaps_allowed = 0;
8010#endif
8011
8012	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8013		t4_tmr_idx_10g = TMR_IDX_10G;
8014
8015	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8016		t4_pktc_idx_10g = PKTC_IDX_10G;
8017
8018	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8019		t4_tmr_idx_1g = TMR_IDX_1G;
8020
8021	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8022		t4_pktc_idx_1g = PKTC_IDX_1G;
8023
8024	if (t4_qsize_txq < 128)
8025		t4_qsize_txq = 128;
8026
8027	if (t4_qsize_rxq < 128)
8028		t4_qsize_rxq = 128;
8029	while (t4_qsize_rxq & 7)
8030		t4_qsize_rxq++;
8031
8032	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8033}
8034
8035static int
8036mod_event(module_t mod, int cmd, void *arg)
8037{
8038	int rc = 0;
8039	static int loaded = 0;
8040
8041	switch (cmd) {
8042	case MOD_LOAD:
8043		if (atomic_fetchadd_int(&loaded, 1))
8044			break;
8045		t4_sge_modload();
8046		sx_init(&t4_list_lock, "T4/T5 adapters");
8047		SLIST_INIT(&t4_list);
8048#ifdef TCP_OFFLOAD
8049		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8050		SLIST_INIT(&t4_uld_list);
8051#endif
8052		t4_tracer_modload();
8053		tweak_tunables();
8054		break;
8055
8056	case MOD_UNLOAD:
8057		if (atomic_fetchadd_int(&loaded, -1) > 1)
8058			break;
8059		t4_tracer_modunload();
8060#ifdef TCP_OFFLOAD
8061		sx_slock(&t4_uld_list_lock);
8062		if (!SLIST_EMPTY(&t4_uld_list)) {
8063			rc = EBUSY;
8064			sx_sunlock(&t4_uld_list_lock);
8065			break;
8066		}
8067		sx_sunlock(&t4_uld_list_lock);
8068		sx_destroy(&t4_uld_list_lock);
8069#endif
8070		sx_slock(&t4_list_lock);
8071		if (!SLIST_EMPTY(&t4_list)) {
8072			rc = EBUSY;
8073			sx_sunlock(&t4_list_lock);
8074			break;
8075		}
8076		sx_sunlock(&t4_list_lock);
8077		sx_destroy(&t4_list_lock);
8078		break;
8079	}
8080
8081	return (rc);
8082}
8083
8084static devclass_t t4_devclass, t5_devclass;
8085static devclass_t cxgbe_devclass, cxl_devclass;
8086
8087DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8088MODULE_VERSION(t4nex, 1);
8089MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8090
8091DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8092MODULE_VERSION(t5nex, 1);
8093MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8094
8095DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8096MODULE_VERSION(cxgbe, 1);
8097
8098DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8099MODULE_VERSION(cxl, 1);
8100