t4_main.c revision 265421
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_main.c 265421 2014-05-06 04:22:06Z np $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/conf.h>
36#include <sys/priv.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/module.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/taskqueue.h>
43#include <sys/pciio.h>
44#include <dev/pci/pcireg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pci_private.h>
47#include <sys/firmware.h>
48#include <sys/sbuf.h>
49#include <sys/smp.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_types.h>
56#include <net/if_dl.h>
57#include <net/if_vlan_var.h>
58#if defined(__i386__) || defined(__amd64__)
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#endif
62
63#include "common/common.h"
64#include "common/t4_msg.h"
65#include "common/t4_regs.h"
66#include "common/t4_regs_values.h"
67#include "t4_ioctl.h"
68#include "t4_l2t.h"
69
70/* T4 bus driver interface */
71static int t4_probe(device_t);
72static int t4_attach(device_t);
73static int t4_detach(device_t);
74static device_method_t t4_methods[] = {
75	DEVMETHOD(device_probe,		t4_probe),
76	DEVMETHOD(device_attach,	t4_attach),
77	DEVMETHOD(device_detach,	t4_detach),
78
79	DEVMETHOD_END
80};
81static driver_t t4_driver = {
82	"t4nex",
83	t4_methods,
84	sizeof(struct adapter)
85};
86
87
88/* T4 port (cxgbe) interface */
89static int cxgbe_probe(device_t);
90static int cxgbe_attach(device_t);
91static int cxgbe_detach(device_t);
92static device_method_t cxgbe_methods[] = {
93	DEVMETHOD(device_probe,		cxgbe_probe),
94	DEVMETHOD(device_attach,	cxgbe_attach),
95	DEVMETHOD(device_detach,	cxgbe_detach),
96	{ 0, 0 }
97};
98static driver_t cxgbe_driver = {
99	"cxgbe",
100	cxgbe_methods,
101	sizeof(struct port_info)
102};
103
104static d_ioctl_t t4_ioctl;
105static d_open_t t4_open;
106static d_close_t t4_close;
107
108static struct cdevsw t4_cdevsw = {
109       .d_version = D_VERSION,
110       .d_flags = 0,
111       .d_open = t4_open,
112       .d_close = t4_close,
113       .d_ioctl = t4_ioctl,
114       .d_name = "t4nex",
115};
116
117/* T5 bus driver interface */
118static int t5_probe(device_t);
119static device_method_t t5_methods[] = {
120	DEVMETHOD(device_probe,		t5_probe),
121	DEVMETHOD(device_attach,	t4_attach),
122	DEVMETHOD(device_detach,	t4_detach),
123
124	DEVMETHOD_END
125};
126static driver_t t5_driver = {
127	"t5nex",
128	t5_methods,
129	sizeof(struct adapter)
130};
131
132
133/* T5 port (cxl) interface */
134static driver_t cxl_driver = {
135	"cxl",
136	cxgbe_methods,
137	sizeof(struct port_info)
138};
139
140static struct cdevsw t5_cdevsw = {
141       .d_version = D_VERSION,
142       .d_flags = 0,
143       .d_open = t4_open,
144       .d_close = t4_close,
145       .d_ioctl = t4_ioctl,
146       .d_name = "t5nex",
147};
148
149/* ifnet + media interface */
150static void cxgbe_init(void *);
151static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153static void cxgbe_qflush(struct ifnet *);
154static int cxgbe_media_change(struct ifnet *);
155static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159/*
160 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161 * then ADAPTER_LOCK, then t4_uld_list_lock.
162 */
163static struct sx t4_list_lock;
164SLIST_HEAD(, adapter) t4_list;
165#ifdef TCP_OFFLOAD
166static struct sx t4_uld_list_lock;
167SLIST_HEAD(, uld_info) t4_uld_list;
168#endif
169
170/*
171 * Tunables.  See tweak_tunables() too.
172 *
173 * Each tunable is set to a default value here if it's known at compile-time.
174 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175 * provide a reasonable default when the driver is loaded.
176 *
177 * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
178 * T5 are under hw.cxl.
179 */
180
181/*
182 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183 */
184#define NTXQ_10G 16
185static int t4_ntxq10g = -1;
186TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188#define NRXQ_10G 8
189static int t4_nrxq10g = -1;
190TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192#define NTXQ_1G 4
193static int t4_ntxq1g = -1;
194TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196#define NRXQ_1G 2
197static int t4_nrxq1g = -1;
198TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200static int t4_rsrv_noflowq = 0;
201TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
202
203#ifdef TCP_OFFLOAD
204#define NOFLDTXQ_10G 8
205static int t4_nofldtxq10g = -1;
206TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
207
208#define NOFLDRXQ_10G 2
209static int t4_nofldrxq10g = -1;
210TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
211
212#define NOFLDTXQ_1G 2
213static int t4_nofldtxq1g = -1;
214TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
215
216#define NOFLDRXQ_1G 1
217static int t4_nofldrxq1g = -1;
218TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
219#endif
220
221/*
222 * Holdoff parameters for 10G and 1G ports.
223 */
224#define TMR_IDX_10G 1
225static int t4_tmr_idx_10g = TMR_IDX_10G;
226TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
227
228#define PKTC_IDX_10G (-1)
229static int t4_pktc_idx_10g = PKTC_IDX_10G;
230TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
231
232#define TMR_IDX_1G 1
233static int t4_tmr_idx_1g = TMR_IDX_1G;
234TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
235
236#define PKTC_IDX_1G (-1)
237static int t4_pktc_idx_1g = PKTC_IDX_1G;
238TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
239
240/*
241 * Size (# of entries) of each tx and rx queue.
242 */
243static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
244TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
245
246static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
247TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
248
249/*
250 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
251 */
252static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
253TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
254
255/*
256 * Configuration file.
257 */
258#define DEFAULT_CF	"default"
259#define FLASH_CF	"flash"
260#define UWIRE_CF	"uwire"
261#define FPGA_CF		"fpga"
262static char t4_cfg_file[32] = DEFAULT_CF;
263TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
264
265/*
266 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
267 * encouraged respectively).
268 */
269static unsigned int t4_fw_install = 1;
270TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
271
272/*
273 * ASIC features that will be used.  Disable the ones you don't want so that the
274 * chip resources aren't wasted on features that will not be used.
275 */
276static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
277TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
278
279static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
280TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
281
282static int t4_toecaps_allowed = -1;
283TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
284
285static int t4_rdmacaps_allowed = 0;
286TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
287
288static int t4_iscsicaps_allowed = 0;
289TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
290
291static int t4_fcoecaps_allowed = 0;
292TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
293
294static int t5_write_combine = 0;
295TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
296
297struct intrs_and_queues {
298	int intr_type;		/* INTx, MSI, or MSI-X */
299	int nirq;		/* Number of vectors */
300	int intr_flags;
301	int ntxq10g;		/* # of NIC txq's for each 10G port */
302	int nrxq10g;		/* # of NIC rxq's for each 10G port */
303	int ntxq1g;		/* # of NIC txq's for each 1G port */
304	int nrxq1g;		/* # of NIC rxq's for each 1G port */
305	int rsrv_noflowq;	/* Flag whether to reserve queue 0 */
306#ifdef TCP_OFFLOAD
307	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
308	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
309	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
310	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
311#endif
312};
313
314struct filter_entry {
315        uint32_t valid:1;	/* filter allocated and valid */
316        uint32_t locked:1;	/* filter is administratively locked */
317        uint32_t pending:1;	/* filter action is pending firmware reply */
318	uint32_t smtidx:8;	/* Source MAC Table index for smac */
319	struct l2t_entry *l2t;	/* Layer Two Table entry for dmac */
320
321        struct t4_filter_specification fs;
322};
323
324enum {
325	XGMAC_MTU	= (1 << 0),
326	XGMAC_PROMISC	= (1 << 1),
327	XGMAC_ALLMULTI	= (1 << 2),
328	XGMAC_VLANEX	= (1 << 3),
329	XGMAC_UCADDR	= (1 << 4),
330	XGMAC_MCADDRS	= (1 << 5),
331
332	XGMAC_ALL	= 0xffff
333};
334
335static int map_bars_0_and_4(struct adapter *);
336static int map_bar_2(struct adapter *);
337static void setup_memwin(struct adapter *);
338static int validate_mem_range(struct adapter *, uint32_t, int);
339static int fwmtype_to_hwmtype(int);
340static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
341    uint32_t *);
342static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
343static uint32_t position_memwin(struct adapter *, int, uint32_t);
344static int cfg_itype_and_nqueues(struct adapter *, int, int,
345    struct intrs_and_queues *);
346static int prep_firmware(struct adapter *);
347static int partition_resources(struct adapter *, const struct firmware *,
348    const char *);
349static int get_params__pre_init(struct adapter *);
350static int get_params__post_init(struct adapter *);
351static int set_params__post_init(struct adapter *);
352static void t4_set_desc(struct adapter *);
353static void build_medialist(struct port_info *);
354static int update_mac_settings(struct port_info *, int);
355static int cxgbe_init_synchronized(struct port_info *);
356static int cxgbe_uninit_synchronized(struct port_info *);
357static int setup_intr_handlers(struct adapter *);
358static int adapter_full_init(struct adapter *);
359static int adapter_full_uninit(struct adapter *);
360static int port_full_init(struct port_info *);
361static int port_full_uninit(struct port_info *);
362static void quiesce_eq(struct adapter *, struct sge_eq *);
363static void quiesce_iq(struct adapter *, struct sge_iq *);
364static void quiesce_fl(struct adapter *, struct sge_fl *);
365static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
366    driver_intr_t *, void *, char *);
367static int t4_free_irq(struct adapter *, struct irq *);
368static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
369    unsigned int);
370static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
371static void cxgbe_tick(void *);
372static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
373static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
374    struct mbuf *);
375static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
376static int fw_msg_not_handled(struct adapter *, const __be64 *);
377static int t4_sysctls(struct adapter *);
378static int cxgbe_sysctls(struct port_info *);
379static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
380static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
381static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
382static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
383static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
384static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
385static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
386static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
387static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
388static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
389#ifdef SBUF_DRAIN
390static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
391static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
392static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
393static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
394static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
395static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
396static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
397static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
398static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
399static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
400static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
401static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
402static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
403static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
404static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
405static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
406static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
407static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
408static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
409static int sysctl_tids(SYSCTL_HANDLER_ARGS);
410static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
411static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
412static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
413static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
414static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
415#endif
416static inline void txq_start(struct ifnet *, struct sge_txq *);
417static uint32_t fconf_to_mode(uint32_t);
418static uint32_t mode_to_fconf(uint32_t);
419static uint32_t fspec_to_fconf(struct t4_filter_specification *);
420static int get_filter_mode(struct adapter *, uint32_t *);
421static int set_filter_mode(struct adapter *, uint32_t);
422static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
423static int get_filter(struct adapter *, struct t4_filter *);
424static int set_filter(struct adapter *, struct t4_filter *);
425static int del_filter(struct adapter *, struct t4_filter *);
426static void clear_filter(struct filter_entry *);
427static int set_filter_wr(struct adapter *, int);
428static int del_filter_wr(struct adapter *, int);
429static int get_sge_context(struct adapter *, struct t4_sge_context *);
430static int load_fw(struct adapter *, struct t4_data *);
431static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
432static int read_i2c(struct adapter *, struct t4_i2c_data *);
433static int set_sched_class(struct adapter *, struct t4_sched_params *);
434static int set_sched_queue(struct adapter *, struct t4_sched_queue *);
435#ifdef TCP_OFFLOAD
436static int toe_capability(struct port_info *, int);
437#endif
438static int mod_event(module_t, int, void *);
439
440struct {
441	uint16_t device;
442	char *desc;
443} t4_pciids[] = {
444	{0xa000, "Chelsio Terminator 4 FPGA"},
445	{0x4400, "Chelsio T440-dbg"},
446	{0x4401, "Chelsio T420-CR"},
447	{0x4402, "Chelsio T422-CR"},
448	{0x4403, "Chelsio T440-CR"},
449	{0x4404, "Chelsio T420-BCH"},
450	{0x4405, "Chelsio T440-BCH"},
451	{0x4406, "Chelsio T440-CH"},
452	{0x4407, "Chelsio T420-SO"},
453	{0x4408, "Chelsio T420-CX"},
454	{0x4409, "Chelsio T420-BT"},
455	{0x440a, "Chelsio T404-BT"},
456	{0x440e, "Chelsio T440-LP-CR"},
457}, t5_pciids[] = {
458	{0xb000, "Chelsio Terminator 5 FPGA"},
459	{0x5400, "Chelsio T580-dbg"},
460	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
461	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
462	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
463	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
464	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
465	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
466	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
467	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
468	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
469	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
470	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
471	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
472#ifdef notyet
473	{0x5404,  "Chelsio T520-BCH"},
474	{0x5405,  "Chelsio T540-BCH"},
475	{0x5406,  "Chelsio T540-CH"},
476	{0x5408,  "Chelsio T520-CX"},
477	{0x540b,  "Chelsio B520-SR"},
478	{0x540c,  "Chelsio B504-BT"},
479	{0x540f,  "Chelsio Amsterdam"},
480	{0x5413,  "Chelsio T580-CHR"},
481#endif
482};
483
484#ifdef TCP_OFFLOAD
485/*
486 * service_iq() has an iq and needs the fl.  Offset of fl from the iq should be
487 * exactly the same for both rxq and ofld_rxq.
488 */
489CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
490CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
491#endif
492
493/* No easy way to include t4_msg.h before adapter.h so we check this way */
494CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
495CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
496
497static int
498t4_probe(device_t dev)
499{
500	int i;
501	uint16_t v = pci_get_vendor(dev);
502	uint16_t d = pci_get_device(dev);
503	uint8_t f = pci_get_function(dev);
504
505	if (v != PCI_VENDOR_ID_CHELSIO)
506		return (ENXIO);
507
508	/* Attach only to PF0 of the FPGA */
509	if (d == 0xa000 && f != 0)
510		return (ENXIO);
511
512	for (i = 0; i < nitems(t4_pciids); i++) {
513		if (d == t4_pciids[i].device) {
514			device_set_desc(dev, t4_pciids[i].desc);
515			return (BUS_PROBE_DEFAULT);
516		}
517	}
518
519	return (ENXIO);
520}
521
522static int
523t5_probe(device_t dev)
524{
525	int i;
526	uint16_t v = pci_get_vendor(dev);
527	uint16_t d = pci_get_device(dev);
528	uint8_t f = pci_get_function(dev);
529
530	if (v != PCI_VENDOR_ID_CHELSIO)
531		return (ENXIO);
532
533	/* Attach only to PF0 of the FPGA */
534	if (d == 0xb000 && f != 0)
535		return (ENXIO);
536
537	for (i = 0; i < nitems(t5_pciids); i++) {
538		if (d == t5_pciids[i].device) {
539			device_set_desc(dev, t5_pciids[i].desc);
540			return (BUS_PROBE_DEFAULT);
541		}
542	}
543
544	return (ENXIO);
545}
546
547static int
548t4_attach(device_t dev)
549{
550	struct adapter *sc;
551	int rc = 0, i, n10g, n1g, rqidx, tqidx;
552	struct intrs_and_queues iaq;
553	struct sge *s;
554#ifdef TCP_OFFLOAD
555	int ofld_rqidx, ofld_tqidx;
556#endif
557
558	sc = device_get_softc(dev);
559	sc->dev = dev;
560
561	pci_enable_busmaster(dev);
562	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
563		uint32_t v;
564
565		pci_set_max_read_req(dev, 4096);
566		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
567		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
568		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
569	}
570
571	sc->traceq = -1;
572	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
573	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
574	    device_get_nameunit(dev));
575
576	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
577	    device_get_nameunit(dev));
578	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
579	sx_xlock(&t4_list_lock);
580	SLIST_INSERT_HEAD(&t4_list, sc, link);
581	sx_xunlock(&t4_list_lock);
582
583	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
584	TAILQ_INIT(&sc->sfl);
585	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
586
587	rc = map_bars_0_and_4(sc);
588	if (rc != 0)
589		goto done; /* error message displayed already */
590
591	/*
592	 * This is the real PF# to which we're attaching.  Works from within PCI
593	 * passthrough environments too, where pci_get_function() could return a
594	 * different PF# depending on the passthrough configuration.  We need to
595	 * use the real PF# in all our communication with the firmware.
596	 */
597	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
598	sc->mbox = sc->pf;
599
600	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
601	sc->an_handler = an_not_handled;
602	for (i = 0; i < nitems(sc->cpl_handler); i++)
603		sc->cpl_handler[i] = cpl_not_handled;
604	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
605		sc->fw_msg_handler[i] = fw_msg_not_handled;
606	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
607	t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
608	t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
609	t4_init_sge_cpl_handlers(sc);
610
611	/* Prepare the adapter for operation */
612	rc = -t4_prep_adapter(sc);
613	if (rc != 0) {
614		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
615		goto done;
616	}
617
618	/*
619	 * Do this really early, with the memory windows set up even before the
620	 * character device.  The userland tool's register i/o and mem read
621	 * will work even in "recovery mode".
622	 */
623	setup_memwin(sc);
624	sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
625	    device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
626	    device_get_nameunit(dev));
627	if (sc->cdev == NULL)
628		device_printf(dev, "failed to create nexus char device.\n");
629	else
630		sc->cdev->si_drv1 = sc;
631
632	/* Go no further if recovery mode has been requested. */
633	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
634		device_printf(dev, "recovery mode.\n");
635		goto done;
636	}
637
638	/* Prepare the firmware for operation */
639	rc = prep_firmware(sc);
640	if (rc != 0)
641		goto done; /* error message displayed already */
642
643	rc = get_params__post_init(sc);
644	if (rc != 0)
645		goto done; /* error message displayed already */
646
647	rc = set_params__post_init(sc);
648	if (rc != 0)
649		goto done; /* error message displayed already */
650
651	rc = map_bar_2(sc);
652	if (rc != 0)
653		goto done; /* error message displayed already */
654
655	rc = t4_create_dma_tag(sc);
656	if (rc != 0)
657		goto done; /* error message displayed already */
658
659	/*
660	 * First pass over all the ports - allocate VIs and initialize some
661	 * basic parameters like mac address, port type, etc.  We also figure
662	 * out whether a port is 10G or 1G and use that information when
663	 * calculating how many interrupts to attempt to allocate.
664	 */
665	n10g = n1g = 0;
666	for_each_port(sc, i) {
667		struct port_info *pi;
668
669		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
670		sc->port[i] = pi;
671
672		/* These must be set before t4_port_init */
673		pi->adapter = sc;
674		pi->port_id = i;
675
676		/* Allocate the vi and initialize parameters like mac addr */
677		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
678		if (rc != 0) {
679			device_printf(dev, "unable to initialize port %d: %d\n",
680			    i, rc);
681			free(pi, M_CXGBE);
682			sc->port[i] = NULL;
683			goto done;
684		}
685
686		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
687		    device_get_nameunit(dev), i);
688		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
689		sc->chan_map[pi->tx_chan] = i;
690
691		if (is_10G_port(pi) || is_40G_port(pi)) {
692			n10g++;
693			pi->tmr_idx = t4_tmr_idx_10g;
694			pi->pktc_idx = t4_pktc_idx_10g;
695		} else {
696			n1g++;
697			pi->tmr_idx = t4_tmr_idx_1g;
698			pi->pktc_idx = t4_pktc_idx_1g;
699		}
700
701		pi->xact_addr_filt = -1;
702		pi->linkdnrc = -1;
703
704		pi->qsize_rxq = t4_qsize_rxq;
705		pi->qsize_txq = t4_qsize_txq;
706
707		pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
708		if (pi->dev == NULL) {
709			device_printf(dev,
710			    "failed to add device for port %d.\n", i);
711			rc = ENXIO;
712			goto done;
713		}
714		device_set_softc(pi->dev, pi);
715	}
716
717	/*
718	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
719	 */
720	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
721	if (rc != 0)
722		goto done; /* error message displayed already */
723
724	sc->intr_type = iaq.intr_type;
725	sc->intr_count = iaq.nirq;
726	sc->flags |= iaq.intr_flags;
727
728	s = &sc->sge;
729	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
730	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
731	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
732	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
733	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
734
735#ifdef TCP_OFFLOAD
736	if (is_offload(sc)) {
737
738		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
739		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
740		s->neq += s->nofldtxq + s->nofldrxq;
741		s->niq += s->nofldrxq;
742
743		s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
744		    M_CXGBE, M_ZERO | M_WAITOK);
745		s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
746		    M_CXGBE, M_ZERO | M_WAITOK);
747	}
748#endif
749
750	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
751	    M_ZERO | M_WAITOK);
752	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
753	    M_ZERO | M_WAITOK);
754	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
755	    M_ZERO | M_WAITOK);
756	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
757	    M_ZERO | M_WAITOK);
758	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
759	    M_ZERO | M_WAITOK);
760
761	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
762	    M_ZERO | M_WAITOK);
763
764	t4_init_l2t(sc, M_WAITOK);
765
766	/*
767	 * Second pass over the ports.  This time we know the number of rx and
768	 * tx queues that each port should get.
769	 */
770	rqidx = tqidx = 0;
771#ifdef TCP_OFFLOAD
772	ofld_rqidx = ofld_tqidx = 0;
773#endif
774	for_each_port(sc, i) {
775		struct port_info *pi = sc->port[i];
776
777		if (pi == NULL)
778			continue;
779
780		pi->first_rxq = rqidx;
781		pi->first_txq = tqidx;
782		if (is_10G_port(pi) || is_40G_port(pi)) {
783			pi->nrxq = iaq.nrxq10g;
784			pi->ntxq = iaq.ntxq10g;
785		} else {
786			pi->nrxq = iaq.nrxq1g;
787			pi->ntxq = iaq.ntxq1g;
788		}
789
790		if (pi->ntxq > 1)
791			pi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
792		else
793			pi->rsrv_noflowq = 0;
794
795		rqidx += pi->nrxq;
796		tqidx += pi->ntxq;
797
798#ifdef TCP_OFFLOAD
799		if (is_offload(sc)) {
800			pi->first_ofld_rxq = ofld_rqidx;
801			pi->first_ofld_txq = ofld_tqidx;
802			if (is_10G_port(pi) || is_40G_port(pi)) {
803				pi->nofldrxq = iaq.nofldrxq10g;
804				pi->nofldtxq = iaq.nofldtxq10g;
805			} else {
806				pi->nofldrxq = iaq.nofldrxq1g;
807				pi->nofldtxq = iaq.nofldtxq1g;
808			}
809			ofld_rqidx += pi->nofldrxq;
810			ofld_tqidx += pi->nofldtxq;
811		}
812#endif
813	}
814
815	rc = setup_intr_handlers(sc);
816	if (rc != 0) {
817		device_printf(dev,
818		    "failed to setup interrupt handlers: %d\n", rc);
819		goto done;
820	}
821
822	rc = bus_generic_attach(dev);
823	if (rc != 0) {
824		device_printf(dev,
825		    "failed to attach all child ports: %d\n", rc);
826		goto done;
827	}
828
829	device_printf(dev,
830	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
831	    sc->params.pci.width, sc->params.nports, sc->intr_count,
832	    sc->intr_type == INTR_MSIX ? "MSI-X" :
833	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
834	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
835
836	t4_set_desc(sc);
837
838done:
839	if (rc != 0 && sc->cdev) {
840		/* cdev was created and so cxgbetool works; recover that way. */
841		device_printf(dev,
842		    "error during attach, adapter is now in recovery mode.\n");
843		rc = 0;
844	}
845
846	if (rc != 0)
847		t4_detach(dev);
848	else
849		t4_sysctls(sc);
850
851	return (rc);
852}
853
854/*
855 * Idempotent
856 */
857static int
858t4_detach(device_t dev)
859{
860	struct adapter *sc;
861	struct port_info *pi;
862	int i, rc;
863
864	sc = device_get_softc(dev);
865
866	if (sc->flags & FULL_INIT_DONE)
867		t4_intr_disable(sc);
868
869	if (sc->cdev) {
870		destroy_dev(sc->cdev);
871		sc->cdev = NULL;
872	}
873
874	rc = bus_generic_detach(dev);
875	if (rc) {
876		device_printf(dev,
877		    "failed to detach child devices: %d\n", rc);
878		return (rc);
879	}
880
881	for (i = 0; i < sc->intr_count; i++)
882		t4_free_irq(sc, &sc->irq[i]);
883
884	for (i = 0; i < MAX_NPORTS; i++) {
885		pi = sc->port[i];
886		if (pi) {
887			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
888			if (pi->dev)
889				device_delete_child(dev, pi->dev);
890
891			mtx_destroy(&pi->pi_lock);
892			free(pi, M_CXGBE);
893		}
894	}
895
896	if (sc->flags & FULL_INIT_DONE)
897		adapter_full_uninit(sc);
898
899	if (sc->flags & FW_OK)
900		t4_fw_bye(sc, sc->mbox);
901
902	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
903		pci_release_msi(dev);
904
905	if (sc->regs_res)
906		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
907		    sc->regs_res);
908
909	if (sc->udbs_res)
910		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
911		    sc->udbs_res);
912
913	if (sc->msix_res)
914		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
915		    sc->msix_res);
916
917	if (sc->l2t)
918		t4_free_l2t(sc->l2t);
919
920#ifdef TCP_OFFLOAD
921	free(sc->sge.ofld_rxq, M_CXGBE);
922	free(sc->sge.ofld_txq, M_CXGBE);
923#endif
924	free(sc->irq, M_CXGBE);
925	free(sc->sge.rxq, M_CXGBE);
926	free(sc->sge.txq, M_CXGBE);
927	free(sc->sge.ctrlq, M_CXGBE);
928	free(sc->sge.iqmap, M_CXGBE);
929	free(sc->sge.eqmap, M_CXGBE);
930	free(sc->tids.ftid_tab, M_CXGBE);
931	t4_destroy_dma_tag(sc);
932	if (mtx_initialized(&sc->sc_lock)) {
933		sx_xlock(&t4_list_lock);
934		SLIST_REMOVE(&t4_list, sc, adapter, link);
935		sx_xunlock(&t4_list_lock);
936		mtx_destroy(&sc->sc_lock);
937	}
938
939	if (mtx_initialized(&sc->tids.ftid_lock))
940		mtx_destroy(&sc->tids.ftid_lock);
941	if (mtx_initialized(&sc->sfl_lock))
942		mtx_destroy(&sc->sfl_lock);
943	if (mtx_initialized(&sc->ifp_lock))
944		mtx_destroy(&sc->ifp_lock);
945
946	bzero(sc, sizeof(*sc));
947
948	return (0);
949}
950
951
952static int
953cxgbe_probe(device_t dev)
954{
955	char buf[128];
956	struct port_info *pi = device_get_softc(dev);
957
958	snprintf(buf, sizeof(buf), "port %d", pi->port_id);
959	device_set_desc_copy(dev, buf);
960
961	return (BUS_PROBE_DEFAULT);
962}
963
964#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
965    IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
966    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
967#define T4_CAP_ENABLE (T4_CAP)
968
969static int
970cxgbe_attach(device_t dev)
971{
972	struct port_info *pi = device_get_softc(dev);
973	struct ifnet *ifp;
974
975	/* Allocate an ifnet and set it up */
976	ifp = if_alloc(IFT_ETHER);
977	if (ifp == NULL) {
978		device_printf(dev, "Cannot allocate ifnet\n");
979		return (ENOMEM);
980	}
981	pi->ifp = ifp;
982	ifp->if_softc = pi;
983
984	callout_init(&pi->tick, CALLOUT_MPSAFE);
985
986	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
987	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
988
989	ifp->if_init = cxgbe_init;
990	ifp->if_ioctl = cxgbe_ioctl;
991	ifp->if_transmit = cxgbe_transmit;
992	ifp->if_qflush = cxgbe_qflush;
993
994	ifp->if_capabilities = T4_CAP;
995#ifdef TCP_OFFLOAD
996	if (is_offload(pi->adapter))
997		ifp->if_capabilities |= IFCAP_TOE;
998#endif
999	ifp->if_capenable = T4_CAP_ENABLE;
1000	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
1001	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
1002
1003	/* Initialize ifmedia for this port */
1004	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
1005	    cxgbe_media_status);
1006	build_medialist(pi);
1007
1008	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
1009	    EVENTHANDLER_PRI_ANY);
1010
1011	ether_ifattach(ifp, pi->hw_addr);
1012
1013#ifdef TCP_OFFLOAD
1014	if (is_offload(pi->adapter)) {
1015		device_printf(dev,
1016		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1017		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1018	} else
1019#endif
1020		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1021
1022	cxgbe_sysctls(pi);
1023
1024	return (0);
1025}
1026
1027static int
1028cxgbe_detach(device_t dev)
1029{
1030	struct port_info *pi = device_get_softc(dev);
1031	struct adapter *sc = pi->adapter;
1032	struct ifnet *ifp = pi->ifp;
1033
1034	/* Tell if_ioctl and if_init that the port is going away */
1035	ADAPTER_LOCK(sc);
1036	SET_DOOMED(pi);
1037	wakeup(&sc->flags);
1038	while (IS_BUSY(sc))
1039		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1040	SET_BUSY(sc);
1041#ifdef INVARIANTS
1042	sc->last_op = "t4detach";
1043	sc->last_op_thr = curthread;
1044#endif
1045	ADAPTER_UNLOCK(sc);
1046
1047	if (pi->flags & HAS_TRACEQ) {
1048		sc->traceq = -1;	/* cloner should not create ifnet */
1049		t4_tracer_port_detach(sc);
1050	}
1051
1052	if (pi->vlan_c)
1053		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1054
1055	PORT_LOCK(pi);
1056	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1057	callout_stop(&pi->tick);
1058	PORT_UNLOCK(pi);
1059	callout_drain(&pi->tick);
1060
1061	/* Let detach proceed even if these fail. */
1062	cxgbe_uninit_synchronized(pi);
1063	port_full_uninit(pi);
1064
1065	ifmedia_removeall(&pi->media);
1066	ether_ifdetach(pi->ifp);
1067	if_free(pi->ifp);
1068
1069	ADAPTER_LOCK(sc);
1070	CLR_BUSY(sc);
1071	wakeup(&sc->flags);
1072	ADAPTER_UNLOCK(sc);
1073
1074	return (0);
1075}
1076
1077static void
1078cxgbe_init(void *arg)
1079{
1080	struct port_info *pi = arg;
1081	struct adapter *sc = pi->adapter;
1082
1083	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1084		return;
1085	cxgbe_init_synchronized(pi);
1086	end_synchronized_op(sc, 0);
1087}
1088
1089static int
1090cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1091{
1092	int rc = 0, mtu, flags;
1093	struct port_info *pi = ifp->if_softc;
1094	struct adapter *sc = pi->adapter;
1095	struct ifreq *ifr = (struct ifreq *)data;
1096	uint32_t mask;
1097
1098	switch (cmd) {
1099	case SIOCSIFMTU:
1100		mtu = ifr->ifr_mtu;
1101		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1102			return (EINVAL);
1103
1104		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1105		if (rc)
1106			return (rc);
1107		ifp->if_mtu = mtu;
1108		if (pi->flags & PORT_INIT_DONE) {
1109			t4_update_fl_bufsize(ifp);
1110			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1111				rc = update_mac_settings(pi, XGMAC_MTU);
1112		}
1113		end_synchronized_op(sc, 0);
1114		break;
1115
1116	case SIOCSIFFLAGS:
1117		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1118		if (rc)
1119			return (rc);
1120
1121		if (ifp->if_flags & IFF_UP) {
1122			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1123				flags = pi->if_flags;
1124				if ((ifp->if_flags ^ flags) &
1125				    (IFF_PROMISC | IFF_ALLMULTI)) {
1126					rc = update_mac_settings(pi,
1127					    XGMAC_PROMISC | XGMAC_ALLMULTI);
1128				}
1129			} else
1130				rc = cxgbe_init_synchronized(pi);
1131			pi->if_flags = ifp->if_flags;
1132		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1133			rc = cxgbe_uninit_synchronized(pi);
1134		end_synchronized_op(sc, 0);
1135		break;
1136
1137	case SIOCADDMULTI:
1138	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1139		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1140		if (rc)
1141			return (rc);
1142		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1143			rc = update_mac_settings(pi, XGMAC_MCADDRS);
1144		end_synchronized_op(sc, LOCK_HELD);
1145		break;
1146
1147	case SIOCSIFCAP:
1148		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1149		if (rc)
1150			return (rc);
1151
1152		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1153		if (mask & IFCAP_TXCSUM) {
1154			ifp->if_capenable ^= IFCAP_TXCSUM;
1155			ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1156
1157			if (IFCAP_TSO4 & ifp->if_capenable &&
1158			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1159				ifp->if_capenable &= ~IFCAP_TSO4;
1160				if_printf(ifp,
1161				    "tso4 disabled due to -txcsum.\n");
1162			}
1163		}
1164		if (mask & IFCAP_TXCSUM_IPV6) {
1165			ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1166			ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1167
1168			if (IFCAP_TSO6 & ifp->if_capenable &&
1169			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1170				ifp->if_capenable &= ~IFCAP_TSO6;
1171				if_printf(ifp,
1172				    "tso6 disabled due to -txcsum6.\n");
1173			}
1174		}
1175		if (mask & IFCAP_RXCSUM)
1176			ifp->if_capenable ^= IFCAP_RXCSUM;
1177		if (mask & IFCAP_RXCSUM_IPV6)
1178			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1179
1180		/*
1181		 * Note that we leave CSUM_TSO alone (it is always set).  The
1182		 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1183		 * sending a TSO request our way, so it's sufficient to toggle
1184		 * IFCAP_TSOx only.
1185		 */
1186		if (mask & IFCAP_TSO4) {
1187			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1188			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
1189				if_printf(ifp, "enable txcsum first.\n");
1190				rc = EAGAIN;
1191				goto fail;
1192			}
1193			ifp->if_capenable ^= IFCAP_TSO4;
1194		}
1195		if (mask & IFCAP_TSO6) {
1196			if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1197			    !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1198				if_printf(ifp, "enable txcsum6 first.\n");
1199				rc = EAGAIN;
1200				goto fail;
1201			}
1202			ifp->if_capenable ^= IFCAP_TSO6;
1203		}
1204		if (mask & IFCAP_LRO) {
1205#if defined(INET) || defined(INET6)
1206			int i;
1207			struct sge_rxq *rxq;
1208
1209			ifp->if_capenable ^= IFCAP_LRO;
1210			for_each_rxq(pi, i, rxq) {
1211				if (ifp->if_capenable & IFCAP_LRO)
1212					rxq->iq.flags |= IQ_LRO_ENABLED;
1213				else
1214					rxq->iq.flags &= ~IQ_LRO_ENABLED;
1215			}
1216#endif
1217		}
1218#ifdef TCP_OFFLOAD
1219		if (mask & IFCAP_TOE) {
1220			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1221
1222			rc = toe_capability(pi, enable);
1223			if (rc != 0)
1224				goto fail;
1225
1226			ifp->if_capenable ^= mask;
1227		}
1228#endif
1229		if (mask & IFCAP_VLAN_HWTAGGING) {
1230			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1231			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1232				rc = update_mac_settings(pi, XGMAC_VLANEX);
1233		}
1234		if (mask & IFCAP_VLAN_MTU) {
1235			ifp->if_capenable ^= IFCAP_VLAN_MTU;
1236
1237			/* Need to find out how to disable auto-mtu-inflation */
1238		}
1239		if (mask & IFCAP_VLAN_HWTSO)
1240			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1241		if (mask & IFCAP_VLAN_HWCSUM)
1242			ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1243
1244#ifdef VLAN_CAPABILITIES
1245		VLAN_CAPABILITIES(ifp);
1246#endif
1247fail:
1248		end_synchronized_op(sc, 0);
1249		break;
1250
1251	case SIOCSIFMEDIA:
1252	case SIOCGIFMEDIA:
1253		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1254		break;
1255
1256	default:
1257		rc = ether_ioctl(ifp, cmd, data);
1258	}
1259
1260	return (rc);
1261}
1262
1263static int
1264cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1265{
1266	struct port_info *pi = ifp->if_softc;
1267	struct adapter *sc = pi->adapter;
1268	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1269	struct buf_ring *br;
1270	int rc;
1271
1272	M_ASSERTPKTHDR(m);
1273
1274	if (__predict_false(pi->link_cfg.link_ok == 0)) {
1275		m_freem(m);
1276		return (ENETDOWN);
1277	}
1278
1279	if (m->m_flags & M_FLOWID)
1280		txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
1281		    + pi->rsrv_noflowq);
1282	br = txq->br;
1283
1284	if (TXQ_TRYLOCK(txq) == 0) {
1285		struct sge_eq *eq = &txq->eq;
1286
1287		/*
1288		 * It is possible that t4_eth_tx finishes up and releases the
1289		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
1290		 * need to make sure that this mbuf doesn't just sit there in
1291		 * the drbr.
1292		 */
1293
1294		rc = drbr_enqueue(ifp, br, m);
1295		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1296		    !(eq->flags & EQ_DOOMED))
1297			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1298		return (rc);
1299	}
1300
1301	/*
1302	 * txq->m is the mbuf that is held up due to a temporary shortage of
1303	 * resources and it should be put on the wire first.  Then what's in
1304	 * drbr and finally the mbuf that was just passed in to us.
1305	 *
1306	 * Return code should indicate the fate of the mbuf that was passed in
1307	 * this time.
1308	 */
1309
1310	TXQ_LOCK_ASSERT_OWNED(txq);
1311	if (drbr_needs_enqueue(ifp, br) || txq->m) {
1312
1313		/* Queued for transmission. */
1314
1315		rc = drbr_enqueue(ifp, br, m);
1316		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1317		(void) t4_eth_tx(ifp, txq, m);
1318		TXQ_UNLOCK(txq);
1319		return (rc);
1320	}
1321
1322	/* Direct transmission. */
1323	rc = t4_eth_tx(ifp, txq, m);
1324	if (rc != 0 && txq->m)
1325		rc = 0;	/* held, will be transmitted soon (hopefully) */
1326
1327	TXQ_UNLOCK(txq);
1328	return (rc);
1329}
1330
1331static void
1332cxgbe_qflush(struct ifnet *ifp)
1333{
1334	struct port_info *pi = ifp->if_softc;
1335	struct sge_txq *txq;
1336	int i;
1337	struct mbuf *m;
1338
1339	/* queues do not exist if !PORT_INIT_DONE. */
1340	if (pi->flags & PORT_INIT_DONE) {
1341		for_each_txq(pi, i, txq) {
1342			TXQ_LOCK(txq);
1343			m_freem(txq->m);
1344			txq->m = NULL;
1345			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1346				m_freem(m);
1347			TXQ_UNLOCK(txq);
1348		}
1349	}
1350	if_qflush(ifp);
1351}
1352
1353static int
1354cxgbe_media_change(struct ifnet *ifp)
1355{
1356	struct port_info *pi = ifp->if_softc;
1357
1358	device_printf(pi->dev, "%s unimplemented.\n", __func__);
1359
1360	return (EOPNOTSUPP);
1361}
1362
1363static void
1364cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1365{
1366	struct port_info *pi = ifp->if_softc;
1367	struct ifmedia_entry *cur = pi->media.ifm_cur;
1368	int speed = pi->link_cfg.speed;
1369	int data = (pi->port_type << 8) | pi->mod_type;
1370
1371	if (cur->ifm_data != data) {
1372		build_medialist(pi);
1373		cur = pi->media.ifm_cur;
1374	}
1375
1376	ifmr->ifm_status = IFM_AVALID;
1377	if (!pi->link_cfg.link_ok)
1378		return;
1379
1380	ifmr->ifm_status |= IFM_ACTIVE;
1381
1382	/* active and current will differ iff current media is autoselect. */
1383	if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1384		return;
1385
1386	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1387	if (speed == SPEED_10000)
1388		ifmr->ifm_active |= IFM_10G_T;
1389	else if (speed == SPEED_1000)
1390		ifmr->ifm_active |= IFM_1000_T;
1391	else if (speed == SPEED_100)
1392		ifmr->ifm_active |= IFM_100_TX;
1393	else if (speed == SPEED_10)
1394		ifmr->ifm_active |= IFM_10_T;
1395	else
1396		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1397			    speed));
1398}
1399
1400void
1401t4_fatal_err(struct adapter *sc)
1402{
1403	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1404	t4_intr_disable(sc);
1405	log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1406	    device_get_nameunit(sc->dev));
1407}
1408
1409static int
1410map_bars_0_and_4(struct adapter *sc)
1411{
1412	sc->regs_rid = PCIR_BAR(0);
1413	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1414	    &sc->regs_rid, RF_ACTIVE);
1415	if (sc->regs_res == NULL) {
1416		device_printf(sc->dev, "cannot map registers.\n");
1417		return (ENXIO);
1418	}
1419	sc->bt = rman_get_bustag(sc->regs_res);
1420	sc->bh = rman_get_bushandle(sc->regs_res);
1421	sc->mmio_len = rman_get_size(sc->regs_res);
1422	setbit(&sc->doorbells, DOORBELL_KDB);
1423
1424	sc->msix_rid = PCIR_BAR(4);
1425	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1426	    &sc->msix_rid, RF_ACTIVE);
1427	if (sc->msix_res == NULL) {
1428		device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1429		return (ENXIO);
1430	}
1431
1432	return (0);
1433}
1434
1435static int
1436map_bar_2(struct adapter *sc)
1437{
1438
1439	/*
1440	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
1441	 * to map it if RDMA is disabled.
1442	 */
1443	if (is_t4(sc) && sc->rdmacaps == 0)
1444		return (0);
1445
1446	sc->udbs_rid = PCIR_BAR(2);
1447	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1448	    &sc->udbs_rid, RF_ACTIVE);
1449	if (sc->udbs_res == NULL) {
1450		device_printf(sc->dev, "cannot map doorbell BAR.\n");
1451		return (ENXIO);
1452	}
1453	sc->udbs_base = rman_get_virtual(sc->udbs_res);
1454
1455	if (is_t5(sc)) {
1456		setbit(&sc->doorbells, DOORBELL_UDB);
1457#if defined(__i386__) || defined(__amd64__)
1458		if (t5_write_combine) {
1459			int rc;
1460
1461			/*
1462			 * Enable write combining on BAR2.  This is the
1463			 * userspace doorbell BAR and is split into 128B
1464			 * (UDBS_SEG_SIZE) doorbell regions, each associated
1465			 * with an egress queue.  The first 64B has the doorbell
1466			 * and the second 64B can be used to submit a tx work
1467			 * request with an implicit doorbell.
1468			 */
1469
1470			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1471			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1472			if (rc == 0) {
1473				clrbit(&sc->doorbells, DOORBELL_UDB);
1474				setbit(&sc->doorbells, DOORBELL_WCWR);
1475				setbit(&sc->doorbells, DOORBELL_UDBWC);
1476			} else {
1477				device_printf(sc->dev,
1478				    "couldn't enable write combining: %d\n",
1479				    rc);
1480			}
1481
1482			t4_write_reg(sc, A_SGE_STAT_CFG,
1483			    V_STATSOURCE_T5(7) | V_STATMODE(0));
1484		}
1485#endif
1486	}
1487
1488	return (0);
1489}
1490
1491static const struct memwin t4_memwin[] = {
1492	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1493	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1494	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1495};
1496
1497static const struct memwin t5_memwin[] = {
1498	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1499	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1500	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1501};
1502
1503static void
1504setup_memwin(struct adapter *sc)
1505{
1506	const struct memwin *mw;
1507	int i, n;
1508	uint32_t bar0;
1509
1510	if (is_t4(sc)) {
1511		/*
1512		 * Read low 32b of bar0 indirectly via the hardware backdoor
1513		 * mechanism.  Works from within PCI passthrough environments
1514		 * too, where rman_get_start() can return a different value.  We
1515		 * need to program the T4 memory window decoders with the actual
1516		 * addresses that will be coming across the PCIe link.
1517		 */
1518		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1519		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1520
1521		mw = &t4_memwin[0];
1522		n = nitems(t4_memwin);
1523	} else {
1524		/* T5 uses the relative offset inside the PCIe BAR */
1525		bar0 = 0;
1526
1527		mw = &t5_memwin[0];
1528		n = nitems(t5_memwin);
1529	}
1530
1531	for (i = 0; i < n; i++, mw++) {
1532		t4_write_reg(sc,
1533		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1534		    (mw->base + bar0) | V_BIR(0) |
1535		    V_WINDOW(ilog2(mw->aperture) - 10));
1536	}
1537
1538	/* flush */
1539	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1540}
1541
1542/*
1543 * Verify that the memory range specified by the addr/len pair is valid and lies
1544 * entirely within a single region (EDCx or MCx).
1545 */
1546static int
1547validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1548{
1549	uint32_t em, addr_len, maddr, mlen;
1550
1551	/* Memory can only be accessed in naturally aligned 4 byte units */
1552	if (addr & 3 || len & 3 || len == 0)
1553		return (EINVAL);
1554
1555	/* Enabled memories */
1556	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1557	if (em & F_EDRAM0_ENABLE) {
1558		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1559		maddr = G_EDRAM0_BASE(addr_len) << 20;
1560		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1561		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1562		    addr + len <= maddr + mlen)
1563			return (0);
1564	}
1565	if (em & F_EDRAM1_ENABLE) {
1566		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1567		maddr = G_EDRAM1_BASE(addr_len) << 20;
1568		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1569		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1570		    addr + len <= maddr + mlen)
1571			return (0);
1572	}
1573	if (em & F_EXT_MEM_ENABLE) {
1574		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1575		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1576		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1577		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1578		    addr + len <= maddr + mlen)
1579			return (0);
1580	}
1581	if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1582		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1583		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1584		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1585		if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1586		    addr + len <= maddr + mlen)
1587			return (0);
1588	}
1589
1590	return (EFAULT);
1591}
1592
1593static int
1594fwmtype_to_hwmtype(int mtype)
1595{
1596
1597	switch (mtype) {
1598	case FW_MEMTYPE_EDC0:
1599		return (MEM_EDC0);
1600	case FW_MEMTYPE_EDC1:
1601		return (MEM_EDC1);
1602	case FW_MEMTYPE_EXTMEM:
1603		return (MEM_MC0);
1604	case FW_MEMTYPE_EXTMEM1:
1605		return (MEM_MC1);
1606	default:
1607		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
1608	}
1609}
1610
1611/*
1612 * Verify that the memory range specified by the memtype/offset/len pair is
1613 * valid and lies entirely within the memtype specified.  The global address of
1614 * the start of the range is returned in addr.
1615 */
1616static int
1617validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1618    uint32_t *addr)
1619{
1620	uint32_t em, addr_len, maddr, mlen;
1621
1622	/* Memory can only be accessed in naturally aligned 4 byte units */
1623	if (off & 3 || len & 3 || len == 0)
1624		return (EINVAL);
1625
1626	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1627	switch (fwmtype_to_hwmtype(mtype)) {
1628	case MEM_EDC0:
1629		if (!(em & F_EDRAM0_ENABLE))
1630			return (EINVAL);
1631		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1632		maddr = G_EDRAM0_BASE(addr_len) << 20;
1633		mlen = G_EDRAM0_SIZE(addr_len) << 20;
1634		break;
1635	case MEM_EDC1:
1636		if (!(em & F_EDRAM1_ENABLE))
1637			return (EINVAL);
1638		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1639		maddr = G_EDRAM1_BASE(addr_len) << 20;
1640		mlen = G_EDRAM1_SIZE(addr_len) << 20;
1641		break;
1642	case MEM_MC:
1643		if (!(em & F_EXT_MEM_ENABLE))
1644			return (EINVAL);
1645		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1646		maddr = G_EXT_MEM_BASE(addr_len) << 20;
1647		mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1648		break;
1649	case MEM_MC1:
1650		if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1651			return (EINVAL);
1652		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1653		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1654		mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1655		break;
1656	default:
1657		return (EINVAL);
1658	}
1659
1660	if (mlen > 0 && off < mlen && off + len <= mlen) {
1661		*addr = maddr + off;	/* global address */
1662		return (0);
1663	}
1664
1665	return (EFAULT);
1666}
1667
1668static void
1669memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1670{
1671	const struct memwin *mw;
1672
1673	if (is_t4(sc)) {
1674		KASSERT(win >= 0 && win < nitems(t4_memwin),
1675		    ("%s: incorrect memwin# (%d)", __func__, win));
1676		mw = &t4_memwin[win];
1677	} else {
1678		KASSERT(win >= 0 && win < nitems(t5_memwin),
1679		    ("%s: incorrect memwin# (%d)", __func__, win));
1680		mw = &t5_memwin[win];
1681	}
1682
1683	if (base != NULL)
1684		*base = mw->base;
1685	if (aperture != NULL)
1686		*aperture = mw->aperture;
1687}
1688
1689/*
1690 * Positions the memory window such that it can be used to access the specified
1691 * address in the chip's address space.  The return value is the offset of addr
1692 * from the start of the window.
1693 */
1694static uint32_t
1695position_memwin(struct adapter *sc, int n, uint32_t addr)
1696{
1697	uint32_t start, pf;
1698	uint32_t reg;
1699
1700	KASSERT(n >= 0 && n <= 3,
1701	    ("%s: invalid window %d.", __func__, n));
1702	KASSERT((addr & 3) == 0,
1703	    ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1704
1705	if (is_t4(sc)) {
1706		pf = 0;
1707		start = addr & ~0xf;	/* start must be 16B aligned */
1708	} else {
1709		pf = V_PFNUM(sc->pf);
1710		start = addr & ~0x7f;	/* start must be 128B aligned */
1711	}
1712	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1713
1714	t4_write_reg(sc, reg, start | pf);
1715	t4_read_reg(sc, reg);
1716
1717	return (addr - start);
1718}
1719
1720static int
1721cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1722    struct intrs_and_queues *iaq)
1723{
1724	int rc, itype, navail, nrxq10g, nrxq1g, n;
1725	int nofldrxq10g = 0, nofldrxq1g = 0;
1726
1727	bzero(iaq, sizeof(*iaq));
1728
1729	iaq->ntxq10g = t4_ntxq10g;
1730	iaq->ntxq1g = t4_ntxq1g;
1731	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1732	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1733	iaq->rsrv_noflowq = t4_rsrv_noflowq;
1734#ifdef TCP_OFFLOAD
1735	if (is_offload(sc)) {
1736		iaq->nofldtxq10g = t4_nofldtxq10g;
1737		iaq->nofldtxq1g = t4_nofldtxq1g;
1738		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1739		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1740	}
1741#endif
1742
1743	for (itype = INTR_MSIX; itype; itype >>= 1) {
1744
1745		if ((itype & t4_intr_types) == 0)
1746			continue;	/* not allowed */
1747
1748		if (itype == INTR_MSIX)
1749			navail = pci_msix_count(sc->dev);
1750		else if (itype == INTR_MSI)
1751			navail = pci_msi_count(sc->dev);
1752		else
1753			navail = 1;
1754restart:
1755		if (navail == 0)
1756			continue;
1757
1758		iaq->intr_type = itype;
1759		iaq->intr_flags = 0;
1760
1761		/*
1762		 * Best option: an interrupt vector for errors, one for the
1763		 * firmware event queue, and one each for each rxq (NIC as well
1764		 * as offload).
1765		 */
1766		iaq->nirq = T4_EXTRA_INTR;
1767		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1768		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1769		if (iaq->nirq <= navail &&
1770		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
1771			iaq->intr_flags |= INTR_DIRECT;
1772			goto allocate;
1773		}
1774
1775		/*
1776		 * Second best option: an interrupt vector for errors, one for
1777		 * the firmware event queue, and one each for either NIC or
1778		 * offload rxq's.
1779		 */
1780		iaq->nirq = T4_EXTRA_INTR;
1781		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1782		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1783		if (iaq->nirq <= navail &&
1784		    (itype != INTR_MSI || powerof2(iaq->nirq)))
1785			goto allocate;
1786
1787		/*
1788		 * Next best option: an interrupt vector for errors, one for the
1789		 * firmware event queue, and at least one per port.  At this
1790		 * point we know we'll have to downsize nrxq or nofldrxq to fit
1791		 * what's available to us.
1792		 */
1793		iaq->nirq = T4_EXTRA_INTR;
1794		iaq->nirq += n10g + n1g;
1795		if (iaq->nirq <= navail) {
1796			int leftover = navail - iaq->nirq;
1797
1798			if (n10g > 0) {
1799				int target = max(nrxq10g, nofldrxq10g);
1800
1801				n = 1;
1802				while (n < target && leftover >= n10g) {
1803					leftover -= n10g;
1804					iaq->nirq += n10g;
1805					n++;
1806				}
1807				iaq->nrxq10g = min(n, nrxq10g);
1808#ifdef TCP_OFFLOAD
1809				if (is_offload(sc))
1810					iaq->nofldrxq10g = min(n, nofldrxq10g);
1811#endif
1812			}
1813
1814			if (n1g > 0) {
1815				int target = max(nrxq1g, nofldrxq1g);
1816
1817				n = 1;
1818				while (n < target && leftover >= n1g) {
1819					leftover -= n1g;
1820					iaq->nirq += n1g;
1821					n++;
1822				}
1823				iaq->nrxq1g = min(n, nrxq1g);
1824#ifdef TCP_OFFLOAD
1825				if (is_offload(sc))
1826					iaq->nofldrxq1g = min(n, nofldrxq1g);
1827#endif
1828			}
1829
1830			if (itype != INTR_MSI || powerof2(iaq->nirq))
1831				goto allocate;
1832		}
1833
1834		/*
1835		 * Least desirable option: one interrupt vector for everything.
1836		 */
1837		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1838#ifdef TCP_OFFLOAD
1839		if (is_offload(sc))
1840			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1841#endif
1842
1843allocate:
1844		navail = iaq->nirq;
1845		rc = 0;
1846		if (itype == INTR_MSIX)
1847			rc = pci_alloc_msix(sc->dev, &navail);
1848		else if (itype == INTR_MSI)
1849			rc = pci_alloc_msi(sc->dev, &navail);
1850
1851		if (rc == 0) {
1852			if (navail == iaq->nirq)
1853				return (0);
1854
1855			/*
1856			 * Didn't get the number requested.  Use whatever number
1857			 * the kernel is willing to allocate (it's in navail).
1858			 */
1859			device_printf(sc->dev, "fewer vectors than requested, "
1860			    "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1861			    itype, iaq->nirq, navail);
1862			pci_release_msi(sc->dev);
1863			goto restart;
1864		}
1865
1866		device_printf(sc->dev,
1867		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1868		    itype, rc, iaq->nirq, navail);
1869	}
1870
1871	device_printf(sc->dev,
1872	    "failed to find a usable interrupt type.  "
1873	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1874	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1875
1876	return (ENXIO);
1877}
1878
1879#define FW_VERSION(chip) ( \
1880    V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1881    V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1882    V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1883    V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1884#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1885
1886struct fw_info {
1887	uint8_t chip;
1888	char *kld_name;
1889	char *fw_mod_name;
1890	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
1891} fw_info[] = {
1892	{
1893		.chip = CHELSIO_T4,
1894		.kld_name = "t4fw_cfg",
1895		.fw_mod_name = "t4fw",
1896		.fw_hdr = {
1897			.chip = FW_HDR_CHIP_T4,
1898			.fw_ver = htobe32_const(FW_VERSION(T4)),
1899			.intfver_nic = FW_INTFVER(T4, NIC),
1900			.intfver_vnic = FW_INTFVER(T4, VNIC),
1901			.intfver_ofld = FW_INTFVER(T4, OFLD),
1902			.intfver_ri = FW_INTFVER(T4, RI),
1903			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1904			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
1905			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1906			.intfver_fcoe = FW_INTFVER(T4, FCOE),
1907		},
1908	}, {
1909		.chip = CHELSIO_T5,
1910		.kld_name = "t5fw_cfg",
1911		.fw_mod_name = "t5fw",
1912		.fw_hdr = {
1913			.chip = FW_HDR_CHIP_T5,
1914			.fw_ver = htobe32_const(FW_VERSION(T5)),
1915			.intfver_nic = FW_INTFVER(T5, NIC),
1916			.intfver_vnic = FW_INTFVER(T5, VNIC),
1917			.intfver_ofld = FW_INTFVER(T5, OFLD),
1918			.intfver_ri = FW_INTFVER(T5, RI),
1919			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1920			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
1921			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1922			.intfver_fcoe = FW_INTFVER(T5, FCOE),
1923		},
1924	}
1925};
1926
1927static struct fw_info *
1928find_fw_info(int chip)
1929{
1930	int i;
1931
1932	for (i = 0; i < nitems(fw_info); i++) {
1933		if (fw_info[i].chip == chip)
1934			return (&fw_info[i]);
1935	}
1936	return (NULL);
1937}
1938
1939/*
1940 * Is the given firmware API compatible with the one the driver was compiled
1941 * with?
1942 */
1943static int
1944fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1945{
1946
1947	/* short circuit if it's the exact same firmware version */
1948	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1949		return (1);
1950
1951	/*
1952	 * XXX: Is this too conservative?  Perhaps I should limit this to the
1953	 * features that are supported in the driver.
1954	 */
1955#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1956	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1957	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1958	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1959		return (1);
1960#undef SAME_INTF
1961
1962	return (0);
1963}
1964
1965/*
1966 * The firmware in the KLD is usable, but should it be installed?  This routine
1967 * explains itself in detail if it indicates the KLD firmware should be
1968 * installed.
1969 */
1970static int
1971should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1972{
1973	const char *reason;
1974
1975	if (!card_fw_usable) {
1976		reason = "incompatible or unusable";
1977		goto install;
1978	}
1979
1980	if (k > c) {
1981		reason = "older than the version bundled with this driver";
1982		goto install;
1983	}
1984
1985	if (t4_fw_install == 2 && k != c) {
1986		reason = "different than the version bundled with this driver";
1987		goto install;
1988	}
1989
1990	return (0);
1991
1992install:
1993	if (t4_fw_install == 0) {
1994		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1995		    "but the driver is prohibited from installing a different "
1996		    "firmware on the card.\n",
1997		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1998		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1999
2000		return (0);
2001	}
2002
2003	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
2004	    "installing firmware %u.%u.%u.%u on card.\n",
2005	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2006	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
2007	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2008	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2009
2010	return (1);
2011}
2012/*
2013 * Establish contact with the firmware and determine if we are the master driver
2014 * or not, and whether we are responsible for chip initialization.
2015 */
2016static int
2017prep_firmware(struct adapter *sc)
2018{
2019	const struct firmware *fw = NULL, *default_cfg;
2020	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
2021	enum dev_state state;
2022	struct fw_info *fw_info;
2023	struct fw_hdr *card_fw;		/* fw on the card */
2024	const struct fw_hdr *kld_fw;	/* fw in the KLD */
2025	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
2026					   against */
2027
2028	/* Contact firmware. */
2029	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
2030	if (rc < 0 || state == DEV_STATE_ERR) {
2031		rc = -rc;
2032		device_printf(sc->dev,
2033		    "failed to connect to the firmware: %d, %d.\n", rc, state);
2034		return (rc);
2035	}
2036	pf = rc;
2037	if (pf == sc->mbox)
2038		sc->flags |= MASTER_PF;
2039	else if (state == DEV_STATE_UNINIT) {
2040		/*
2041		 * We didn't get to be the master so we definitely won't be
2042		 * configuring the chip.  It's a bug if someone else hasn't
2043		 * configured it already.
2044		 */
2045		device_printf(sc->dev, "couldn't be master(%d), "
2046		    "device not already initialized either(%d).\n", rc, state);
2047		return (EDOOFUS);
2048	}
2049
2050	/* This is the firmware whose headers the driver was compiled against */
2051	fw_info = find_fw_info(chip_id(sc));
2052	if (fw_info == NULL) {
2053		device_printf(sc->dev,
2054		    "unable to look up firmware information for chip %d.\n",
2055		    chip_id(sc));
2056		return (EINVAL);
2057	}
2058	drv_fw = &fw_info->fw_hdr;
2059
2060	/*
2061	 * The firmware KLD contains many modules.  The KLD name is also the
2062	 * name of the module that contains the default config file.
2063	 */
2064	default_cfg = firmware_get(fw_info->kld_name);
2065
2066	/* Read the header of the firmware on the card */
2067	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2068	rc = -t4_read_flash(sc, FLASH_FW_START,
2069	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2070	if (rc == 0)
2071		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2072	else {
2073		device_printf(sc->dev,
2074		    "Unable to read card's firmware header: %d\n", rc);
2075		card_fw_usable = 0;
2076	}
2077
2078	/* This is the firmware in the KLD */
2079	fw = firmware_get(fw_info->fw_mod_name);
2080	if (fw != NULL) {
2081		kld_fw = (const void *)fw->data;
2082		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2083	} else {
2084		kld_fw = NULL;
2085		kld_fw_usable = 0;
2086	}
2087
2088	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2089	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2090		/*
2091		 * Common case: the firmware on the card is an exact match and
2092		 * the KLD is an exact match too, or the KLD is
2093		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
2094		 * here -- use cxgbetool loadfw if you want to reinstall the
2095		 * same firmware as the one on the card.
2096		 */
2097	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2098	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2099	    be32toh(card_fw->fw_ver))) {
2100
2101		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2102		if (rc != 0) {
2103			device_printf(sc->dev,
2104			    "failed to install firmware: %d\n", rc);
2105			goto done;
2106		}
2107
2108		/* Installed successfully, update the cached header too. */
2109		memcpy(card_fw, kld_fw, sizeof(*card_fw));
2110		card_fw_usable = 1;
2111		need_fw_reset = 0;	/* already reset as part of load_fw */
2112	}
2113
2114	if (!card_fw_usable) {
2115		uint32_t d, c, k;
2116
2117		d = ntohl(drv_fw->fw_ver);
2118		c = ntohl(card_fw->fw_ver);
2119		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2120
2121		device_printf(sc->dev, "Cannot find a usable firmware: "
2122		    "fw_install %d, chip state %d, "
2123		    "driver compiled with %d.%d.%d.%d, "
2124		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2125		    t4_fw_install, state,
2126		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2127		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2128		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2129		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2130		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2131		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2132		rc = EINVAL;
2133		goto done;
2134	}
2135
2136	/* We're using whatever's on the card and it's known to be good. */
2137	sc->params.fw_vers = ntohl(card_fw->fw_ver);
2138	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2139	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2140	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2141	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2142	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2143	t4_get_tp_version(sc, &sc->params.tp_vers);
2144
2145	/* Reset device */
2146	if (need_fw_reset &&
2147	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2148		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2149		if (rc != ETIMEDOUT && rc != EIO)
2150			t4_fw_bye(sc, sc->mbox);
2151		goto done;
2152	}
2153	sc->flags |= FW_OK;
2154
2155	rc = get_params__pre_init(sc);
2156	if (rc != 0)
2157		goto done; /* error message displayed already */
2158
2159	/* Partition adapter resources as specified in the config file. */
2160	if (state == DEV_STATE_UNINIT) {
2161
2162		KASSERT(sc->flags & MASTER_PF,
2163		    ("%s: trying to change chip settings when not master.",
2164		    __func__));
2165
2166		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2167		if (rc != 0)
2168			goto done;	/* error message displayed already */
2169
2170		t4_tweak_chip_settings(sc);
2171
2172		/* get basic stuff going */
2173		rc = -t4_fw_initialize(sc, sc->mbox);
2174		if (rc != 0) {
2175			device_printf(sc->dev, "fw init failed: %d.\n", rc);
2176			goto done;
2177		}
2178	} else {
2179		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2180		sc->cfcsum = 0;
2181	}
2182
2183done:
2184	free(card_fw, M_CXGBE);
2185	if (fw != NULL)
2186		firmware_put(fw, FIRMWARE_UNLOAD);
2187	if (default_cfg != NULL)
2188		firmware_put(default_cfg, FIRMWARE_UNLOAD);
2189
2190	return (rc);
2191}
2192
2193#define FW_PARAM_DEV(param) \
2194	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2195	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2196#define FW_PARAM_PFVF(param) \
2197	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2198	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2199
2200/*
2201 * Partition chip resources for use between various PFs, VFs, etc.
2202 */
2203static int
2204partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2205    const char *name_prefix)
2206{
2207	const struct firmware *cfg = NULL;
2208	int rc = 0;
2209	struct fw_caps_config_cmd caps;
2210	uint32_t mtype, moff, finicsum, cfcsum;
2211
2212	/*
2213	 * Figure out what configuration file to use.  Pick the default config
2214	 * file for the card if the user hasn't specified one explicitly.
2215	 */
2216	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2217	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2218		/* Card specific overrides go here. */
2219		if (pci_get_device(sc->dev) == 0x440a)
2220			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2221		if (is_fpga(sc))
2222			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2223	}
2224
2225	/*
2226	 * We need to load another module if the profile is anything except
2227	 * "default" or "flash".
2228	 */
2229	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2230	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2231		char s[32];
2232
2233		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2234		cfg = firmware_get(s);
2235		if (cfg == NULL) {
2236			if (default_cfg != NULL) {
2237				device_printf(sc->dev,
2238				    "unable to load module \"%s\" for "
2239				    "configuration profile \"%s\", will use "
2240				    "the default config file instead.\n",
2241				    s, sc->cfg_file);
2242				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2243				    "%s", DEFAULT_CF);
2244			} else {
2245				device_printf(sc->dev,
2246				    "unable to load module \"%s\" for "
2247				    "configuration profile \"%s\", will use "
2248				    "the config file on the card's flash "
2249				    "instead.\n", s, sc->cfg_file);
2250				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2251				    "%s", FLASH_CF);
2252			}
2253		}
2254	}
2255
2256	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2257	    default_cfg == NULL) {
2258		device_printf(sc->dev,
2259		    "default config file not available, will use the config "
2260		    "file on the card's flash instead.\n");
2261		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2262	}
2263
2264	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2265		u_int cflen, i, n;
2266		const uint32_t *cfdata;
2267		uint32_t param, val, addr, off, mw_base, mw_aperture;
2268
2269		KASSERT(cfg != NULL || default_cfg != NULL,
2270		    ("%s: no config to upload", __func__));
2271
2272		/*
2273		 * Ask the firmware where it wants us to upload the config file.
2274		 */
2275		param = FW_PARAM_DEV(CF);
2276		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2277		if (rc != 0) {
2278			/* No support for config file?  Shouldn't happen. */
2279			device_printf(sc->dev,
2280			    "failed to query config file location: %d.\n", rc);
2281			goto done;
2282		}
2283		mtype = G_FW_PARAMS_PARAM_Y(val);
2284		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2285
2286		/*
2287		 * XXX: sheer laziness.  We deliberately added 4 bytes of
2288		 * useless stuffing/comments at the end of the config file so
2289		 * it's ok to simply throw away the last remaining bytes when
2290		 * the config file is not an exact multiple of 4.  This also
2291		 * helps with the validate_mt_off_len check.
2292		 */
2293		if (cfg != NULL) {
2294			cflen = cfg->datasize & ~3;
2295			cfdata = cfg->data;
2296		} else {
2297			cflen = default_cfg->datasize & ~3;
2298			cfdata = default_cfg->data;
2299		}
2300
2301		if (cflen > FLASH_CFG_MAX_SIZE) {
2302			device_printf(sc->dev,
2303			    "config file too long (%d, max allowed is %d).  "
2304			    "Will try to use the config on the card, if any.\n",
2305			    cflen, FLASH_CFG_MAX_SIZE);
2306			goto use_config_on_flash;
2307		}
2308
2309		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2310		if (rc != 0) {
2311			device_printf(sc->dev,
2312			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
2313			    "Will try to use the config on the card, if any.\n",
2314			    __func__, mtype, moff, cflen, rc);
2315			goto use_config_on_flash;
2316		}
2317
2318		memwin_info(sc, 2, &mw_base, &mw_aperture);
2319		while (cflen) {
2320			off = position_memwin(sc, 2, addr);
2321			n = min(cflen, mw_aperture - off);
2322			for (i = 0; i < n; i += 4)
2323				t4_write_reg(sc, mw_base + off + i, *cfdata++);
2324			cflen -= n;
2325			addr += n;
2326		}
2327	} else {
2328use_config_on_flash:
2329		mtype = FW_MEMTYPE_FLASH;
2330		moff = t4_flash_cfg_addr(sc);
2331	}
2332
2333	bzero(&caps, sizeof(caps));
2334	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2335	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2336	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2337	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2338	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2339	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2340	if (rc != 0) {
2341		device_printf(sc->dev,
2342		    "failed to pre-process config file: %d "
2343		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2344		goto done;
2345	}
2346
2347	finicsum = be32toh(caps.finicsum);
2348	cfcsum = be32toh(caps.cfcsum);
2349	if (finicsum != cfcsum) {
2350		device_printf(sc->dev,
2351		    "WARNING: config file checksum mismatch: %08x %08x\n",
2352		    finicsum, cfcsum);
2353	}
2354	sc->cfcsum = cfcsum;
2355
2356#define LIMIT_CAPS(x) do { \
2357	caps.x &= htobe16(t4_##x##_allowed); \
2358	sc->x = htobe16(caps.x); \
2359} while (0)
2360
2361	/*
2362	 * Let the firmware know what features will (not) be used so it can tune
2363	 * things accordingly.
2364	 */
2365	LIMIT_CAPS(linkcaps);
2366	LIMIT_CAPS(niccaps);
2367	LIMIT_CAPS(toecaps);
2368	LIMIT_CAPS(rdmacaps);
2369	LIMIT_CAPS(iscsicaps);
2370	LIMIT_CAPS(fcoecaps);
2371#undef LIMIT_CAPS
2372
2373	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2374	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2375	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2376	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2377	if (rc != 0) {
2378		device_printf(sc->dev,
2379		    "failed to process config file: %d.\n", rc);
2380	}
2381done:
2382	if (cfg != NULL)
2383		firmware_put(cfg, FIRMWARE_UNLOAD);
2384	return (rc);
2385}
2386
2387/*
2388 * Retrieve parameters that are needed (or nice to have) very early.
2389 */
2390static int
2391get_params__pre_init(struct adapter *sc)
2392{
2393	int rc;
2394	uint32_t param[2], val[2];
2395	struct fw_devlog_cmd cmd;
2396	struct devlog_params *dlog = &sc->params.devlog;
2397
2398	param[0] = FW_PARAM_DEV(PORTVEC);
2399	param[1] = FW_PARAM_DEV(CCLK);
2400	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2401	if (rc != 0) {
2402		device_printf(sc->dev,
2403		    "failed to query parameters (pre_init): %d.\n", rc);
2404		return (rc);
2405	}
2406
2407	sc->params.portvec = val[0];
2408	sc->params.nports = bitcount32(val[0]);
2409	sc->params.vpd.cclk = val[1];
2410
2411	/* Read device log parameters. */
2412	bzero(&cmd, sizeof(cmd));
2413	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2414	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2415	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2416	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2417	if (rc != 0) {
2418		device_printf(sc->dev,
2419		    "failed to get devlog parameters: %d.\n", rc);
2420		bzero(dlog, sizeof (*dlog));
2421		rc = 0;	/* devlog isn't critical for device operation */
2422	} else {
2423		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2424		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2425		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2426		dlog->size = be32toh(cmd.memsize_devlog);
2427	}
2428
2429	return (rc);
2430}
2431
2432/*
2433 * Retrieve various parameters that are of interest to the driver.  The device
2434 * has been initialized by the firmware at this point.
2435 */
2436static int
2437get_params__post_init(struct adapter *sc)
2438{
2439	int rc;
2440	uint32_t param[7], val[7];
2441	struct fw_caps_config_cmd caps;
2442
2443	param[0] = FW_PARAM_PFVF(IQFLINT_START);
2444	param[1] = FW_PARAM_PFVF(EQ_START);
2445	param[2] = FW_PARAM_PFVF(FILTER_START);
2446	param[3] = FW_PARAM_PFVF(FILTER_END);
2447	param[4] = FW_PARAM_PFVF(L2T_START);
2448	param[5] = FW_PARAM_PFVF(L2T_END);
2449	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2450	if (rc != 0) {
2451		device_printf(sc->dev,
2452		    "failed to query parameters (post_init): %d.\n", rc);
2453		return (rc);
2454	}
2455
2456	sc->sge.iq_start = val[0];
2457	sc->sge.eq_start = val[1];
2458	sc->tids.ftid_base = val[2];
2459	sc->tids.nftids = val[3] - val[2] + 1;
2460	sc->vres.l2t.start = val[4];
2461	sc->vres.l2t.size = val[5] - val[4] + 1;
2462	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2463	    ("%s: L2 table size (%u) larger than expected (%u)",
2464	    __func__, sc->vres.l2t.size, L2T_SIZE));
2465
2466	/* get capabilites */
2467	bzero(&caps, sizeof(caps));
2468	caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2469	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
2470	caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2471	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2472	if (rc != 0) {
2473		device_printf(sc->dev,
2474		    "failed to get card capabilities: %d.\n", rc);
2475		return (rc);
2476	}
2477
2478	if (caps.toecaps) {
2479		/* query offload-related parameters */
2480		param[0] = FW_PARAM_DEV(NTID);
2481		param[1] = FW_PARAM_PFVF(SERVER_START);
2482		param[2] = FW_PARAM_PFVF(SERVER_END);
2483		param[3] = FW_PARAM_PFVF(TDDP_START);
2484		param[4] = FW_PARAM_PFVF(TDDP_END);
2485		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2486		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2487		if (rc != 0) {
2488			device_printf(sc->dev,
2489			    "failed to query TOE parameters: %d.\n", rc);
2490			return (rc);
2491		}
2492		sc->tids.ntids = val[0];
2493		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2494		sc->tids.stid_base = val[1];
2495		sc->tids.nstids = val[2] - val[1] + 1;
2496		sc->vres.ddp.start = val[3];
2497		sc->vres.ddp.size = val[4] - val[3] + 1;
2498		sc->params.ofldq_wr_cred = val[5];
2499		sc->params.offload = 1;
2500	}
2501	if (caps.rdmacaps) {
2502		param[0] = FW_PARAM_PFVF(STAG_START);
2503		param[1] = FW_PARAM_PFVF(STAG_END);
2504		param[2] = FW_PARAM_PFVF(RQ_START);
2505		param[3] = FW_PARAM_PFVF(RQ_END);
2506		param[4] = FW_PARAM_PFVF(PBL_START);
2507		param[5] = FW_PARAM_PFVF(PBL_END);
2508		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2509		if (rc != 0) {
2510			device_printf(sc->dev,
2511			    "failed to query RDMA parameters(1): %d.\n", rc);
2512			return (rc);
2513		}
2514		sc->vres.stag.start = val[0];
2515		sc->vres.stag.size = val[1] - val[0] + 1;
2516		sc->vres.rq.start = val[2];
2517		sc->vres.rq.size = val[3] - val[2] + 1;
2518		sc->vres.pbl.start = val[4];
2519		sc->vres.pbl.size = val[5] - val[4] + 1;
2520
2521		param[0] = FW_PARAM_PFVF(SQRQ_START);
2522		param[1] = FW_PARAM_PFVF(SQRQ_END);
2523		param[2] = FW_PARAM_PFVF(CQ_START);
2524		param[3] = FW_PARAM_PFVF(CQ_END);
2525		param[4] = FW_PARAM_PFVF(OCQ_START);
2526		param[5] = FW_PARAM_PFVF(OCQ_END);
2527		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2528		if (rc != 0) {
2529			device_printf(sc->dev,
2530			    "failed to query RDMA parameters(2): %d.\n", rc);
2531			return (rc);
2532		}
2533		sc->vres.qp.start = val[0];
2534		sc->vres.qp.size = val[1] - val[0] + 1;
2535		sc->vres.cq.start = val[2];
2536		sc->vres.cq.size = val[3] - val[2] + 1;
2537		sc->vres.ocq.start = val[4];
2538		sc->vres.ocq.size = val[5] - val[4] + 1;
2539	}
2540	if (caps.iscsicaps) {
2541		param[0] = FW_PARAM_PFVF(ISCSI_START);
2542		param[1] = FW_PARAM_PFVF(ISCSI_END);
2543		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2544		if (rc != 0) {
2545			device_printf(sc->dev,
2546			    "failed to query iSCSI parameters: %d.\n", rc);
2547			return (rc);
2548		}
2549		sc->vres.iscsi.start = val[0];
2550		sc->vres.iscsi.size = val[1] - val[0] + 1;
2551	}
2552
2553	/*
2554	 * We've got the params we wanted to query via the firmware.  Now grab
2555	 * some others directly from the chip.
2556	 */
2557	rc = t4_read_chip_settings(sc);
2558
2559	return (rc);
2560}
2561
2562static int
2563set_params__post_init(struct adapter *sc)
2564{
2565	uint32_t param, val;
2566
2567	/* ask for encapsulated CPLs */
2568	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2569	val = 1;
2570	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2571
2572	return (0);
2573}
2574
2575#undef FW_PARAM_PFVF
2576#undef FW_PARAM_DEV
2577
2578static void
2579t4_set_desc(struct adapter *sc)
2580{
2581	char buf[128];
2582	struct adapter_params *p = &sc->params;
2583
2584	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2585	    "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2586	    chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2587
2588	device_set_desc_copy(sc->dev, buf);
2589}
2590
2591static void
2592build_medialist(struct port_info *pi)
2593{
2594	struct ifmedia *media = &pi->media;
2595	int data, m;
2596
2597	PORT_LOCK(pi);
2598
2599	ifmedia_removeall(media);
2600
2601	m = IFM_ETHER | IFM_FDX;
2602	data = (pi->port_type << 8) | pi->mod_type;
2603
2604	switch(pi->port_type) {
2605	case FW_PORT_TYPE_BT_XFI:
2606		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2607		break;
2608
2609	case FW_PORT_TYPE_BT_XAUI:
2610		ifmedia_add(media, m | IFM_10G_T, data, NULL);
2611		/* fall through */
2612
2613	case FW_PORT_TYPE_BT_SGMII:
2614		ifmedia_add(media, m | IFM_1000_T, data, NULL);
2615		ifmedia_add(media, m | IFM_100_TX, data, NULL);
2616		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2617		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2618		break;
2619
2620	case FW_PORT_TYPE_CX4:
2621		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2622		ifmedia_set(media, m | IFM_10G_CX4);
2623		break;
2624
2625	case FW_PORT_TYPE_QSFP_10G:
2626	case FW_PORT_TYPE_SFP:
2627	case FW_PORT_TYPE_FIBER_XFI:
2628	case FW_PORT_TYPE_FIBER_XAUI:
2629		switch (pi->mod_type) {
2630
2631		case FW_PORT_MOD_TYPE_LR:
2632			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2633			ifmedia_set(media, m | IFM_10G_LR);
2634			break;
2635
2636		case FW_PORT_MOD_TYPE_SR:
2637			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2638			ifmedia_set(media, m | IFM_10G_SR);
2639			break;
2640
2641		case FW_PORT_MOD_TYPE_LRM:
2642			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2643			ifmedia_set(media, m | IFM_10G_LRM);
2644			break;
2645
2646		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2647		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2648			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2649			ifmedia_set(media, m | IFM_10G_TWINAX);
2650			break;
2651
2652		case FW_PORT_MOD_TYPE_NONE:
2653			m &= ~IFM_FDX;
2654			ifmedia_add(media, m | IFM_NONE, data, NULL);
2655			ifmedia_set(media, m | IFM_NONE);
2656			break;
2657
2658		case FW_PORT_MOD_TYPE_NA:
2659		case FW_PORT_MOD_TYPE_ER:
2660		default:
2661			device_printf(pi->dev,
2662			    "unknown port_type (%d), mod_type (%d)\n",
2663			    pi->port_type, pi->mod_type);
2664			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2665			ifmedia_set(media, m | IFM_UNKNOWN);
2666			break;
2667		}
2668		break;
2669
2670	case FW_PORT_TYPE_QSFP:
2671		switch (pi->mod_type) {
2672
2673		case FW_PORT_MOD_TYPE_LR:
2674			ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2675			ifmedia_set(media, m | IFM_40G_LR4);
2676			break;
2677
2678		case FW_PORT_MOD_TYPE_SR:
2679			ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2680			ifmedia_set(media, m | IFM_40G_SR4);
2681			break;
2682
2683		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2684		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2685			ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2686			ifmedia_set(media, m | IFM_40G_CR4);
2687			break;
2688
2689		case FW_PORT_MOD_TYPE_NONE:
2690			m &= ~IFM_FDX;
2691			ifmedia_add(media, m | IFM_NONE, data, NULL);
2692			ifmedia_set(media, m | IFM_NONE);
2693			break;
2694
2695		default:
2696			device_printf(pi->dev,
2697			    "unknown port_type (%d), mod_type (%d)\n",
2698			    pi->port_type, pi->mod_type);
2699			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2700			ifmedia_set(media, m | IFM_UNKNOWN);
2701			break;
2702		}
2703		break;
2704
2705	default:
2706		device_printf(pi->dev,
2707		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2708		    pi->mod_type);
2709		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2710		ifmedia_set(media, m | IFM_UNKNOWN);
2711		break;
2712	}
2713
2714	PORT_UNLOCK(pi);
2715}
2716
2717#define FW_MAC_EXACT_CHUNK	7
2718
2719/*
2720 * Program the port's XGMAC based on parameters in ifnet.  The caller also
2721 * indicates which parameters should be programmed (the rest are left alone).
2722 */
2723static int
2724update_mac_settings(struct port_info *pi, int flags)
2725{
2726	int rc;
2727	struct ifnet *ifp = pi->ifp;
2728	struct adapter *sc = pi->adapter;
2729	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2730
2731	ASSERT_SYNCHRONIZED_OP(sc);
2732	KASSERT(flags, ("%s: not told what to update.", __func__));
2733
2734	if (flags & XGMAC_MTU)
2735		mtu = ifp->if_mtu;
2736
2737	if (flags & XGMAC_PROMISC)
2738		promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2739
2740	if (flags & XGMAC_ALLMULTI)
2741		allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2742
2743	if (flags & XGMAC_VLANEX)
2744		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2745
2746	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2747	    vlanex, false);
2748	if (rc) {
2749		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2750		return (rc);
2751	}
2752
2753	if (flags & XGMAC_UCADDR) {
2754		uint8_t ucaddr[ETHER_ADDR_LEN];
2755
2756		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2757		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2758		    ucaddr, true, true);
2759		if (rc < 0) {
2760			rc = -rc;
2761			if_printf(ifp, "change_mac failed: %d\n", rc);
2762			return (rc);
2763		} else {
2764			pi->xact_addr_filt = rc;
2765			rc = 0;
2766		}
2767	}
2768
2769	if (flags & XGMAC_MCADDRS) {
2770		const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2771		int del = 1;
2772		uint64_t hash = 0;
2773		struct ifmultiaddr *ifma;
2774		int i = 0, j;
2775
2776		if_maddr_rlock(ifp);
2777		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2778			if (ifma->ifma_addr->sa_family != AF_LINK)
2779				continue;
2780			mcaddr[i++] =
2781			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2782
2783			if (i == FW_MAC_EXACT_CHUNK) {
2784				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2785				    del, i, mcaddr, NULL, &hash, 0);
2786				if (rc < 0) {
2787					rc = -rc;
2788					for (j = 0; j < i; j++) {
2789						if_printf(ifp,
2790						    "failed to add mc address"
2791						    " %02x:%02x:%02x:"
2792						    "%02x:%02x:%02x rc=%d\n",
2793						    mcaddr[j][0], mcaddr[j][1],
2794						    mcaddr[j][2], mcaddr[j][3],
2795						    mcaddr[j][4], mcaddr[j][5],
2796						    rc);
2797					}
2798					goto mcfail;
2799				}
2800				del = 0;
2801				i = 0;
2802			}
2803		}
2804		if (i > 0) {
2805			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2806			    del, i, mcaddr, NULL, &hash, 0);
2807			if (rc < 0) {
2808				rc = -rc;
2809				for (j = 0; j < i; j++) {
2810					if_printf(ifp,
2811					    "failed to add mc address"
2812					    " %02x:%02x:%02x:"
2813					    "%02x:%02x:%02x rc=%d\n",
2814					    mcaddr[j][0], mcaddr[j][1],
2815					    mcaddr[j][2], mcaddr[j][3],
2816					    mcaddr[j][4], mcaddr[j][5],
2817					    rc);
2818				}
2819				goto mcfail;
2820			}
2821		}
2822
2823		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2824		if (rc != 0)
2825			if_printf(ifp, "failed to set mc address hash: %d", rc);
2826mcfail:
2827		if_maddr_runlock(ifp);
2828	}
2829
2830	return (rc);
2831}
2832
2833int
2834begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2835    char *wmesg)
2836{
2837	int rc, pri;
2838
2839#ifdef WITNESS
2840	/* the caller thinks it's ok to sleep, but is it really? */
2841	if (flags & SLEEP_OK)
2842		pause("t4slptst", 1);
2843#endif
2844
2845	if (INTR_OK)
2846		pri = PCATCH;
2847	else
2848		pri = 0;
2849
2850	ADAPTER_LOCK(sc);
2851	for (;;) {
2852
2853		if (pi && IS_DOOMED(pi)) {
2854			rc = ENXIO;
2855			goto done;
2856		}
2857
2858		if (!IS_BUSY(sc)) {
2859			rc = 0;
2860			break;
2861		}
2862
2863		if (!(flags & SLEEP_OK)) {
2864			rc = EBUSY;
2865			goto done;
2866		}
2867
2868		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2869			rc = EINTR;
2870			goto done;
2871		}
2872	}
2873
2874	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2875	SET_BUSY(sc);
2876#ifdef INVARIANTS
2877	sc->last_op = wmesg;
2878	sc->last_op_thr = curthread;
2879#endif
2880
2881done:
2882	if (!(flags & HOLD_LOCK) || rc)
2883		ADAPTER_UNLOCK(sc);
2884
2885	return (rc);
2886}
2887
2888void
2889end_synchronized_op(struct adapter *sc, int flags)
2890{
2891
2892	if (flags & LOCK_HELD)
2893		ADAPTER_LOCK_ASSERT_OWNED(sc);
2894	else
2895		ADAPTER_LOCK(sc);
2896
2897	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2898	CLR_BUSY(sc);
2899	wakeup(&sc->flags);
2900	ADAPTER_UNLOCK(sc);
2901}
2902
2903static int
2904cxgbe_init_synchronized(struct port_info *pi)
2905{
2906	struct adapter *sc = pi->adapter;
2907	struct ifnet *ifp = pi->ifp;
2908	int rc = 0;
2909
2910	ASSERT_SYNCHRONIZED_OP(sc);
2911
2912	if (isset(&sc->open_device_map, pi->port_id)) {
2913		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2914		    ("mismatch between open_device_map and if_drv_flags"));
2915		return (0);	/* already running */
2916	}
2917
2918	if (!(sc->flags & FULL_INIT_DONE) &&
2919	    ((rc = adapter_full_init(sc)) != 0))
2920		return (rc);	/* error message displayed already */
2921
2922	if (!(pi->flags & PORT_INIT_DONE) &&
2923	    ((rc = port_full_init(pi)) != 0))
2924		return (rc); /* error message displayed already */
2925
2926	rc = update_mac_settings(pi, XGMAC_ALL);
2927	if (rc)
2928		goto done;	/* error message displayed already */
2929
2930	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2931	if (rc != 0) {
2932		if_printf(ifp, "start_link failed: %d\n", rc);
2933		goto done;
2934	}
2935
2936	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2937	if (rc != 0) {
2938		if_printf(ifp, "enable_vi failed: %d\n", rc);
2939		goto done;
2940	}
2941
2942	/*
2943	 * The first iq of the first port to come up is used for tracing.
2944	 */
2945	if (sc->traceq < 0) {
2946		sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2947		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
2948		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2949		    V_QUEUENUMBER(sc->traceq));
2950		pi->flags |= HAS_TRACEQ;
2951	}
2952
2953	/* all ok */
2954	setbit(&sc->open_device_map, pi->port_id);
2955	PORT_LOCK(pi);
2956	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2957	PORT_UNLOCK(pi);
2958
2959	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2960done:
2961	if (rc != 0)
2962		cxgbe_uninit_synchronized(pi);
2963
2964	return (rc);
2965}
2966
2967/*
2968 * Idempotent.
2969 */
2970static int
2971cxgbe_uninit_synchronized(struct port_info *pi)
2972{
2973	struct adapter *sc = pi->adapter;
2974	struct ifnet *ifp = pi->ifp;
2975	int rc;
2976
2977	ASSERT_SYNCHRONIZED_OP(sc);
2978
2979	/*
2980	 * Disable the VI so that all its data in either direction is discarded
2981	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
2982	 * tick) intact as the TP can deliver negative advice or data that it's
2983	 * holding in its RAM (for an offloaded connection) even after the VI is
2984	 * disabled.
2985	 */
2986	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2987	if (rc) {
2988		if_printf(ifp, "disable_vi failed: %d\n", rc);
2989		return (rc);
2990	}
2991
2992	clrbit(&sc->open_device_map, pi->port_id);
2993	PORT_LOCK(pi);
2994	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2995	PORT_UNLOCK(pi);
2996
2997	pi->link_cfg.link_ok = 0;
2998	pi->link_cfg.speed = 0;
2999	pi->linkdnrc = -1;
3000	t4_os_link_changed(sc, pi->port_id, 0, -1);
3001
3002	return (0);
3003}
3004
3005/*
3006 * It is ok for this function to fail midway and return right away.  t4_detach
3007 * will walk the entire sc->irq list and clean up whatever is valid.
3008 */
3009static int
3010setup_intr_handlers(struct adapter *sc)
3011{
3012	int rc, rid, p, q;
3013	char s[8];
3014	struct irq *irq;
3015	struct port_info *pi;
3016	struct sge_rxq *rxq;
3017#ifdef TCP_OFFLOAD
3018	struct sge_ofld_rxq *ofld_rxq;
3019#endif
3020
3021	/*
3022	 * Setup interrupts.
3023	 */
3024	irq = &sc->irq[0];
3025	rid = sc->intr_type == INTR_INTX ? 0 : 1;
3026	if (sc->intr_count == 1) {
3027		KASSERT(!(sc->flags & INTR_DIRECT),
3028		    ("%s: single interrupt && INTR_DIRECT?", __func__));
3029
3030		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
3031		if (rc != 0)
3032			return (rc);
3033	} else {
3034		/* Multiple interrupts. */
3035		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3036		    ("%s: too few intr.", __func__));
3037
3038		/* The first one is always error intr */
3039		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3040		if (rc != 0)
3041			return (rc);
3042		irq++;
3043		rid++;
3044
3045		/* The second one is always the firmware event queue */
3046		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3047		    "evt");
3048		if (rc != 0)
3049			return (rc);
3050		irq++;
3051		rid++;
3052
3053		/*
3054		 * Note that if INTR_DIRECT is not set then either the NIC rx
3055		 * queues or (exclusive or) the TOE rx queueus will be taking
3056		 * direct interrupts.
3057		 *
3058		 * There is no need to check for is_offload(sc) as nofldrxq
3059		 * will be 0 if offload is disabled.
3060		 */
3061		for_each_port(sc, p) {
3062			pi = sc->port[p];
3063
3064#ifdef TCP_OFFLOAD
3065			/*
3066			 * Skip over the NIC queues if they aren't taking direct
3067			 * interrupts.
3068			 */
3069			if (!(sc->flags & INTR_DIRECT) &&
3070			    pi->nofldrxq > pi->nrxq)
3071				goto ofld_queues;
3072#endif
3073			rxq = &sc->sge.rxq[pi->first_rxq];
3074			for (q = 0; q < pi->nrxq; q++, rxq++) {
3075				snprintf(s, sizeof(s), "%d.%d", p, q);
3076				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3077				    s);
3078				if (rc != 0)
3079					return (rc);
3080				irq++;
3081				rid++;
3082			}
3083
3084#ifdef TCP_OFFLOAD
3085			/*
3086			 * Skip over the offload queues if they aren't taking
3087			 * direct interrupts.
3088			 */
3089			if (!(sc->flags & INTR_DIRECT))
3090				continue;
3091ofld_queues:
3092			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3093			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3094				snprintf(s, sizeof(s), "%d,%d", p, q);
3095				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3096				    ofld_rxq, s);
3097				if (rc != 0)
3098					return (rc);
3099				irq++;
3100				rid++;
3101			}
3102#endif
3103		}
3104	}
3105
3106	return (0);
3107}
3108
3109static int
3110adapter_full_init(struct adapter *sc)
3111{
3112	int rc, i;
3113
3114	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3115	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3116	    ("%s: FULL_INIT_DONE already", __func__));
3117
3118	/*
3119	 * queues that belong to the adapter (not any particular port).
3120	 */
3121	rc = t4_setup_adapter_queues(sc);
3122	if (rc != 0)
3123		goto done;
3124
3125	for (i = 0; i < nitems(sc->tq); i++) {
3126		sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3127		    taskqueue_thread_enqueue, &sc->tq[i]);
3128		if (sc->tq[i] == NULL) {
3129			device_printf(sc->dev,
3130			    "failed to allocate task queue %d\n", i);
3131			rc = ENOMEM;
3132			goto done;
3133		}
3134		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3135		    device_get_nameunit(sc->dev), i);
3136	}
3137
3138	t4_intr_enable(sc);
3139	sc->flags |= FULL_INIT_DONE;
3140done:
3141	if (rc != 0)
3142		adapter_full_uninit(sc);
3143
3144	return (rc);
3145}
3146
3147static int
3148adapter_full_uninit(struct adapter *sc)
3149{
3150	int i;
3151
3152	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3153
3154	t4_teardown_adapter_queues(sc);
3155
3156	for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3157		taskqueue_free(sc->tq[i]);
3158		sc->tq[i] = NULL;
3159	}
3160
3161	sc->flags &= ~FULL_INIT_DONE;
3162
3163	return (0);
3164}
3165
3166static int
3167port_full_init(struct port_info *pi)
3168{
3169	struct adapter *sc = pi->adapter;
3170	struct ifnet *ifp = pi->ifp;
3171	uint16_t *rss;
3172	struct sge_rxq *rxq;
3173	int rc, i, j;
3174
3175	ASSERT_SYNCHRONIZED_OP(sc);
3176	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3177	    ("%s: PORT_INIT_DONE already", __func__));
3178
3179	sysctl_ctx_init(&pi->ctx);
3180	pi->flags |= PORT_SYSCTL_CTX;
3181
3182	/*
3183	 * Allocate tx/rx/fl queues for this port.
3184	 */
3185	rc = t4_setup_port_queues(pi);
3186	if (rc != 0)
3187		goto done;	/* error message displayed already */
3188
3189	/*
3190	 * Setup RSS for this port.  Save a copy of the RSS table for later use.
3191	 */
3192	rss = malloc(pi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
3193	for (i = 0; i < pi->rss_size;) {
3194		for_each_rxq(pi, j, rxq) {
3195			rss[i++] = rxq->iq.abs_id;
3196			if (i == pi->rss_size)
3197				break;
3198		}
3199	}
3200
3201	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, pi->rss_size, rss,
3202	    pi->rss_size);
3203	if (rc != 0) {
3204		if_printf(ifp, "rss_config failed: %d\n", rc);
3205		goto done;
3206	}
3207
3208	pi->rss = rss;
3209	pi->flags |= PORT_INIT_DONE;
3210done:
3211	if (rc != 0)
3212		port_full_uninit(pi);
3213
3214	return (rc);
3215}
3216
3217/*
3218 * Idempotent.
3219 */
3220static int
3221port_full_uninit(struct port_info *pi)
3222{
3223	struct adapter *sc = pi->adapter;
3224	int i;
3225	struct sge_rxq *rxq;
3226	struct sge_txq *txq;
3227#ifdef TCP_OFFLOAD
3228	struct sge_ofld_rxq *ofld_rxq;
3229	struct sge_wrq *ofld_txq;
3230#endif
3231
3232	if (pi->flags & PORT_INIT_DONE) {
3233
3234		/* Need to quiesce queues.  XXX: ctrl queues? */
3235
3236		for_each_txq(pi, i, txq) {
3237			quiesce_eq(sc, &txq->eq);
3238		}
3239
3240#ifdef TCP_OFFLOAD
3241		for_each_ofld_txq(pi, i, ofld_txq) {
3242			quiesce_eq(sc, &ofld_txq->eq);
3243		}
3244#endif
3245
3246		for_each_rxq(pi, i, rxq) {
3247			quiesce_iq(sc, &rxq->iq);
3248			quiesce_fl(sc, &rxq->fl);
3249		}
3250
3251#ifdef TCP_OFFLOAD
3252		for_each_ofld_rxq(pi, i, ofld_rxq) {
3253			quiesce_iq(sc, &ofld_rxq->iq);
3254			quiesce_fl(sc, &ofld_rxq->fl);
3255		}
3256#endif
3257		free(pi->rss, M_CXGBE);
3258	}
3259
3260	t4_teardown_port_queues(pi);
3261	pi->flags &= ~PORT_INIT_DONE;
3262
3263	return (0);
3264}
3265
3266static void
3267quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3268{
3269	EQ_LOCK(eq);
3270	eq->flags |= EQ_DOOMED;
3271
3272	/*
3273	 * Wait for the response to a credit flush if one's
3274	 * pending.
3275	 */
3276	while (eq->flags & EQ_CRFLUSHED)
3277		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3278	EQ_UNLOCK(eq);
3279
3280	callout_drain(&eq->tx_callout);	/* XXX: iffy */
3281	pause("callout", 10);		/* Still iffy */
3282
3283	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3284}
3285
3286static void
3287quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3288{
3289	(void) sc;	/* unused */
3290
3291	/* Synchronize with the interrupt handler */
3292	while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3293		pause("iqfree", 1);
3294}
3295
3296static void
3297quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3298{
3299	mtx_lock(&sc->sfl_lock);
3300	FL_LOCK(fl);
3301	fl->flags |= FL_DOOMED;
3302	FL_UNLOCK(fl);
3303	mtx_unlock(&sc->sfl_lock);
3304
3305	callout_drain(&sc->sfl_callout);
3306	KASSERT((fl->flags & FL_STARVING) == 0,
3307	    ("%s: still starving", __func__));
3308}
3309
3310static int
3311t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3312    driver_intr_t *handler, void *arg, char *name)
3313{
3314	int rc;
3315
3316	irq->rid = rid;
3317	irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3318	    RF_SHAREABLE | RF_ACTIVE);
3319	if (irq->res == NULL) {
3320		device_printf(sc->dev,
3321		    "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3322		return (ENOMEM);
3323	}
3324
3325	rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3326	    NULL, handler, arg, &irq->tag);
3327	if (rc != 0) {
3328		device_printf(sc->dev,
3329		    "failed to setup interrupt for rid %d, name %s: %d\n",
3330		    rid, name, rc);
3331	} else if (name)
3332		bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3333
3334	return (rc);
3335}
3336
3337static int
3338t4_free_irq(struct adapter *sc, struct irq *irq)
3339{
3340	if (irq->tag)
3341		bus_teardown_intr(sc->dev, irq->res, irq->tag);
3342	if (irq->res)
3343		bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3344
3345	bzero(irq, sizeof(*irq));
3346
3347	return (0);
3348}
3349
3350static void
3351reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3352    unsigned int end)
3353{
3354	uint32_t *p = (uint32_t *)(buf + start);
3355
3356	for ( ; start <= end; start += sizeof(uint32_t))
3357		*p++ = t4_read_reg(sc, start);
3358}
3359
3360static void
3361t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3362{
3363	int i, n;
3364	const unsigned int *reg_ranges;
3365	static const unsigned int t4_reg_ranges[] = {
3366		0x1008, 0x1108,
3367		0x1180, 0x11b4,
3368		0x11fc, 0x123c,
3369		0x1300, 0x173c,
3370		0x1800, 0x18fc,
3371		0x3000, 0x30d8,
3372		0x30e0, 0x5924,
3373		0x5960, 0x59d4,
3374		0x5a00, 0x5af8,
3375		0x6000, 0x6098,
3376		0x6100, 0x6150,
3377		0x6200, 0x6208,
3378		0x6240, 0x6248,
3379		0x6280, 0x6338,
3380		0x6370, 0x638c,
3381		0x6400, 0x643c,
3382		0x6500, 0x6524,
3383		0x6a00, 0x6a38,
3384		0x6a60, 0x6a78,
3385		0x6b00, 0x6b84,
3386		0x6bf0, 0x6c84,
3387		0x6cf0, 0x6d84,
3388		0x6df0, 0x6e84,
3389		0x6ef0, 0x6f84,
3390		0x6ff0, 0x7084,
3391		0x70f0, 0x7184,
3392		0x71f0, 0x7284,
3393		0x72f0, 0x7384,
3394		0x73f0, 0x7450,
3395		0x7500, 0x7530,
3396		0x7600, 0x761c,
3397		0x7680, 0x76cc,
3398		0x7700, 0x7798,
3399		0x77c0, 0x77fc,
3400		0x7900, 0x79fc,
3401		0x7b00, 0x7c38,
3402		0x7d00, 0x7efc,
3403		0x8dc0, 0x8e1c,
3404		0x8e30, 0x8e78,
3405		0x8ea0, 0x8f6c,
3406		0x8fc0, 0x9074,
3407		0x90fc, 0x90fc,
3408		0x9400, 0x9458,
3409		0x9600, 0x96bc,
3410		0x9800, 0x9808,
3411		0x9820, 0x983c,
3412		0x9850, 0x9864,
3413		0x9c00, 0x9c6c,
3414		0x9c80, 0x9cec,
3415		0x9d00, 0x9d6c,
3416		0x9d80, 0x9dec,
3417		0x9e00, 0x9e6c,
3418		0x9e80, 0x9eec,
3419		0x9f00, 0x9f6c,
3420		0x9f80, 0x9fec,
3421		0xd004, 0xd03c,
3422		0xdfc0, 0xdfe0,
3423		0xe000, 0xea7c,
3424		0xf000, 0x11110,
3425		0x11118, 0x11190,
3426		0x19040, 0x1906c,
3427		0x19078, 0x19080,
3428		0x1908c, 0x19124,
3429		0x19150, 0x191b0,
3430		0x191d0, 0x191e8,
3431		0x19238, 0x1924c,
3432		0x193f8, 0x19474,
3433		0x19490, 0x194f8,
3434		0x19800, 0x19f30,
3435		0x1a000, 0x1a06c,
3436		0x1a0b0, 0x1a120,
3437		0x1a128, 0x1a138,
3438		0x1a190, 0x1a1c4,
3439		0x1a1fc, 0x1a1fc,
3440		0x1e040, 0x1e04c,
3441		0x1e284, 0x1e28c,
3442		0x1e2c0, 0x1e2c0,
3443		0x1e2e0, 0x1e2e0,
3444		0x1e300, 0x1e384,
3445		0x1e3c0, 0x1e3c8,
3446		0x1e440, 0x1e44c,
3447		0x1e684, 0x1e68c,
3448		0x1e6c0, 0x1e6c0,
3449		0x1e6e0, 0x1e6e0,
3450		0x1e700, 0x1e784,
3451		0x1e7c0, 0x1e7c8,
3452		0x1e840, 0x1e84c,
3453		0x1ea84, 0x1ea8c,
3454		0x1eac0, 0x1eac0,
3455		0x1eae0, 0x1eae0,
3456		0x1eb00, 0x1eb84,
3457		0x1ebc0, 0x1ebc8,
3458		0x1ec40, 0x1ec4c,
3459		0x1ee84, 0x1ee8c,
3460		0x1eec0, 0x1eec0,
3461		0x1eee0, 0x1eee0,
3462		0x1ef00, 0x1ef84,
3463		0x1efc0, 0x1efc8,
3464		0x1f040, 0x1f04c,
3465		0x1f284, 0x1f28c,
3466		0x1f2c0, 0x1f2c0,
3467		0x1f2e0, 0x1f2e0,
3468		0x1f300, 0x1f384,
3469		0x1f3c0, 0x1f3c8,
3470		0x1f440, 0x1f44c,
3471		0x1f684, 0x1f68c,
3472		0x1f6c0, 0x1f6c0,
3473		0x1f6e0, 0x1f6e0,
3474		0x1f700, 0x1f784,
3475		0x1f7c0, 0x1f7c8,
3476		0x1f840, 0x1f84c,
3477		0x1fa84, 0x1fa8c,
3478		0x1fac0, 0x1fac0,
3479		0x1fae0, 0x1fae0,
3480		0x1fb00, 0x1fb84,
3481		0x1fbc0, 0x1fbc8,
3482		0x1fc40, 0x1fc4c,
3483		0x1fe84, 0x1fe8c,
3484		0x1fec0, 0x1fec0,
3485		0x1fee0, 0x1fee0,
3486		0x1ff00, 0x1ff84,
3487		0x1ffc0, 0x1ffc8,
3488		0x20000, 0x2002c,
3489		0x20100, 0x2013c,
3490		0x20190, 0x201c8,
3491		0x20200, 0x20318,
3492		0x20400, 0x20528,
3493		0x20540, 0x20614,
3494		0x21000, 0x21040,
3495		0x2104c, 0x21060,
3496		0x210c0, 0x210ec,
3497		0x21200, 0x21268,
3498		0x21270, 0x21284,
3499		0x212fc, 0x21388,
3500		0x21400, 0x21404,
3501		0x21500, 0x21518,
3502		0x2152c, 0x2153c,
3503		0x21550, 0x21554,
3504		0x21600, 0x21600,
3505		0x21608, 0x21628,
3506		0x21630, 0x2163c,
3507		0x21700, 0x2171c,
3508		0x21780, 0x2178c,
3509		0x21800, 0x21c38,
3510		0x21c80, 0x21d7c,
3511		0x21e00, 0x21e04,
3512		0x22000, 0x2202c,
3513		0x22100, 0x2213c,
3514		0x22190, 0x221c8,
3515		0x22200, 0x22318,
3516		0x22400, 0x22528,
3517		0x22540, 0x22614,
3518		0x23000, 0x23040,
3519		0x2304c, 0x23060,
3520		0x230c0, 0x230ec,
3521		0x23200, 0x23268,
3522		0x23270, 0x23284,
3523		0x232fc, 0x23388,
3524		0x23400, 0x23404,
3525		0x23500, 0x23518,
3526		0x2352c, 0x2353c,
3527		0x23550, 0x23554,
3528		0x23600, 0x23600,
3529		0x23608, 0x23628,
3530		0x23630, 0x2363c,
3531		0x23700, 0x2371c,
3532		0x23780, 0x2378c,
3533		0x23800, 0x23c38,
3534		0x23c80, 0x23d7c,
3535		0x23e00, 0x23e04,
3536		0x24000, 0x2402c,
3537		0x24100, 0x2413c,
3538		0x24190, 0x241c8,
3539		0x24200, 0x24318,
3540		0x24400, 0x24528,
3541		0x24540, 0x24614,
3542		0x25000, 0x25040,
3543		0x2504c, 0x25060,
3544		0x250c0, 0x250ec,
3545		0x25200, 0x25268,
3546		0x25270, 0x25284,
3547		0x252fc, 0x25388,
3548		0x25400, 0x25404,
3549		0x25500, 0x25518,
3550		0x2552c, 0x2553c,
3551		0x25550, 0x25554,
3552		0x25600, 0x25600,
3553		0x25608, 0x25628,
3554		0x25630, 0x2563c,
3555		0x25700, 0x2571c,
3556		0x25780, 0x2578c,
3557		0x25800, 0x25c38,
3558		0x25c80, 0x25d7c,
3559		0x25e00, 0x25e04,
3560		0x26000, 0x2602c,
3561		0x26100, 0x2613c,
3562		0x26190, 0x261c8,
3563		0x26200, 0x26318,
3564		0x26400, 0x26528,
3565		0x26540, 0x26614,
3566		0x27000, 0x27040,
3567		0x2704c, 0x27060,
3568		0x270c0, 0x270ec,
3569		0x27200, 0x27268,
3570		0x27270, 0x27284,
3571		0x272fc, 0x27388,
3572		0x27400, 0x27404,
3573		0x27500, 0x27518,
3574		0x2752c, 0x2753c,
3575		0x27550, 0x27554,
3576		0x27600, 0x27600,
3577		0x27608, 0x27628,
3578		0x27630, 0x2763c,
3579		0x27700, 0x2771c,
3580		0x27780, 0x2778c,
3581		0x27800, 0x27c38,
3582		0x27c80, 0x27d7c,
3583		0x27e00, 0x27e04
3584	};
3585	static const unsigned int t5_reg_ranges[] = {
3586		0x1008, 0x1148,
3587		0x1180, 0x11b4,
3588		0x11fc, 0x123c,
3589		0x1280, 0x173c,
3590		0x1800, 0x18fc,
3591		0x3000, 0x3028,
3592		0x3060, 0x30d8,
3593		0x30e0, 0x30fc,
3594		0x3140, 0x357c,
3595		0x35a8, 0x35cc,
3596		0x35ec, 0x35ec,
3597		0x3600, 0x5624,
3598		0x56cc, 0x575c,
3599		0x580c, 0x5814,
3600		0x5890, 0x58bc,
3601		0x5940, 0x59dc,
3602		0x59fc, 0x5a18,
3603		0x5a60, 0x5a9c,
3604		0x5b94, 0x5bfc,
3605		0x6000, 0x6040,
3606		0x6058, 0x614c,
3607		0x7700, 0x7798,
3608		0x77c0, 0x78fc,
3609		0x7b00, 0x7c54,
3610		0x7d00, 0x7efc,
3611		0x8dc0, 0x8de0,
3612		0x8df8, 0x8e84,
3613		0x8ea0, 0x8f84,
3614		0x8fc0, 0x90f8,
3615		0x9400, 0x9470,
3616		0x9600, 0x96f4,
3617		0x9800, 0x9808,
3618		0x9820, 0x983c,
3619		0x9850, 0x9864,
3620		0x9c00, 0x9c6c,
3621		0x9c80, 0x9cec,
3622		0x9d00, 0x9d6c,
3623		0x9d80, 0x9dec,
3624		0x9e00, 0x9e6c,
3625		0x9e80, 0x9eec,
3626		0x9f00, 0x9f6c,
3627		0x9f80, 0xa020,
3628		0xd004, 0xd03c,
3629		0xdfc0, 0xdfe0,
3630		0xe000, 0x11088,
3631		0x1109c, 0x11110,
3632		0x11118, 0x1117c,
3633		0x11190, 0x11204,
3634		0x19040, 0x1906c,
3635		0x19078, 0x19080,
3636		0x1908c, 0x19124,
3637		0x19150, 0x191b0,
3638		0x191d0, 0x191e8,
3639		0x19238, 0x19290,
3640		0x193f8, 0x19474,
3641		0x19490, 0x194cc,
3642		0x194f0, 0x194f8,
3643		0x19c00, 0x19c60,
3644		0x19c94, 0x19e10,
3645		0x19e50, 0x19f34,
3646		0x19f40, 0x19f50,
3647		0x19f90, 0x19fe4,
3648		0x1a000, 0x1a06c,
3649		0x1a0b0, 0x1a120,
3650		0x1a128, 0x1a138,
3651		0x1a190, 0x1a1c4,
3652		0x1a1fc, 0x1a1fc,
3653		0x1e008, 0x1e00c,
3654		0x1e040, 0x1e04c,
3655		0x1e284, 0x1e290,
3656		0x1e2c0, 0x1e2c0,
3657		0x1e2e0, 0x1e2e0,
3658		0x1e300, 0x1e384,
3659		0x1e3c0, 0x1e3c8,
3660		0x1e408, 0x1e40c,
3661		0x1e440, 0x1e44c,
3662		0x1e684, 0x1e690,
3663		0x1e6c0, 0x1e6c0,
3664		0x1e6e0, 0x1e6e0,
3665		0x1e700, 0x1e784,
3666		0x1e7c0, 0x1e7c8,
3667		0x1e808, 0x1e80c,
3668		0x1e840, 0x1e84c,
3669		0x1ea84, 0x1ea90,
3670		0x1eac0, 0x1eac0,
3671		0x1eae0, 0x1eae0,
3672		0x1eb00, 0x1eb84,
3673		0x1ebc0, 0x1ebc8,
3674		0x1ec08, 0x1ec0c,
3675		0x1ec40, 0x1ec4c,
3676		0x1ee84, 0x1ee90,
3677		0x1eec0, 0x1eec0,
3678		0x1eee0, 0x1eee0,
3679		0x1ef00, 0x1ef84,
3680		0x1efc0, 0x1efc8,
3681		0x1f008, 0x1f00c,
3682		0x1f040, 0x1f04c,
3683		0x1f284, 0x1f290,
3684		0x1f2c0, 0x1f2c0,
3685		0x1f2e0, 0x1f2e0,
3686		0x1f300, 0x1f384,
3687		0x1f3c0, 0x1f3c8,
3688		0x1f408, 0x1f40c,
3689		0x1f440, 0x1f44c,
3690		0x1f684, 0x1f690,
3691		0x1f6c0, 0x1f6c0,
3692		0x1f6e0, 0x1f6e0,
3693		0x1f700, 0x1f784,
3694		0x1f7c0, 0x1f7c8,
3695		0x1f808, 0x1f80c,
3696		0x1f840, 0x1f84c,
3697		0x1fa84, 0x1fa90,
3698		0x1fac0, 0x1fac0,
3699		0x1fae0, 0x1fae0,
3700		0x1fb00, 0x1fb84,
3701		0x1fbc0, 0x1fbc8,
3702		0x1fc08, 0x1fc0c,
3703		0x1fc40, 0x1fc4c,
3704		0x1fe84, 0x1fe90,
3705		0x1fec0, 0x1fec0,
3706		0x1fee0, 0x1fee0,
3707		0x1ff00, 0x1ff84,
3708		0x1ffc0, 0x1ffc8,
3709		0x30000, 0x30030,
3710		0x30100, 0x30144,
3711		0x30190, 0x301d0,
3712		0x30200, 0x30318,
3713		0x30400, 0x3052c,
3714		0x30540, 0x3061c,
3715		0x30800, 0x30834,
3716		0x308c0, 0x30908,
3717		0x30910, 0x309ac,
3718		0x30a00, 0x30a2c,
3719		0x30a44, 0x30a50,
3720		0x30a74, 0x30c24,
3721		0x30d00, 0x30d00,
3722		0x30d08, 0x30d14,
3723		0x30d1c, 0x30d20,
3724		0x30d3c, 0x30d50,
3725		0x31200, 0x3120c,
3726		0x31220, 0x31220,
3727		0x31240, 0x31240,
3728		0x31600, 0x3160c,
3729		0x31a00, 0x31a1c,
3730		0x31e00, 0x31e20,
3731		0x31e38, 0x31e3c,
3732		0x31e80, 0x31e80,
3733		0x31e88, 0x31ea8,
3734		0x31eb0, 0x31eb4,
3735		0x31ec8, 0x31ed4,
3736		0x31fb8, 0x32004,
3737		0x32200, 0x32200,
3738		0x32208, 0x32240,
3739		0x32248, 0x32280,
3740		0x32288, 0x322c0,
3741		0x322c8, 0x322fc,
3742		0x32600, 0x32630,
3743		0x32a00, 0x32abc,
3744		0x32b00, 0x32b70,
3745		0x33000, 0x33048,
3746		0x33060, 0x3309c,
3747		0x330f0, 0x33148,
3748		0x33160, 0x3319c,
3749		0x331f0, 0x332e4,
3750		0x332f8, 0x333e4,
3751		0x333f8, 0x33448,
3752		0x33460, 0x3349c,
3753		0x334f0, 0x33548,
3754		0x33560, 0x3359c,
3755		0x335f0, 0x336e4,
3756		0x336f8, 0x337e4,
3757		0x337f8, 0x337fc,
3758		0x33814, 0x33814,
3759		0x3382c, 0x3382c,
3760		0x33880, 0x3388c,
3761		0x338e8, 0x338ec,
3762		0x33900, 0x33948,
3763		0x33960, 0x3399c,
3764		0x339f0, 0x33ae4,
3765		0x33af8, 0x33b10,
3766		0x33b28, 0x33b28,
3767		0x33b3c, 0x33b50,
3768		0x33bf0, 0x33c10,
3769		0x33c28, 0x33c28,
3770		0x33c3c, 0x33c50,
3771		0x33cf0, 0x33cfc,
3772		0x34000, 0x34030,
3773		0x34100, 0x34144,
3774		0x34190, 0x341d0,
3775		0x34200, 0x34318,
3776		0x34400, 0x3452c,
3777		0x34540, 0x3461c,
3778		0x34800, 0x34834,
3779		0x348c0, 0x34908,
3780		0x34910, 0x349ac,
3781		0x34a00, 0x34a2c,
3782		0x34a44, 0x34a50,
3783		0x34a74, 0x34c24,
3784		0x34d00, 0x34d00,
3785		0x34d08, 0x34d14,
3786		0x34d1c, 0x34d20,
3787		0x34d3c, 0x34d50,
3788		0x35200, 0x3520c,
3789		0x35220, 0x35220,
3790		0x35240, 0x35240,
3791		0x35600, 0x3560c,
3792		0x35a00, 0x35a1c,
3793		0x35e00, 0x35e20,
3794		0x35e38, 0x35e3c,
3795		0x35e80, 0x35e80,
3796		0x35e88, 0x35ea8,
3797		0x35eb0, 0x35eb4,
3798		0x35ec8, 0x35ed4,
3799		0x35fb8, 0x36004,
3800		0x36200, 0x36200,
3801		0x36208, 0x36240,
3802		0x36248, 0x36280,
3803		0x36288, 0x362c0,
3804		0x362c8, 0x362fc,
3805		0x36600, 0x36630,
3806		0x36a00, 0x36abc,
3807		0x36b00, 0x36b70,
3808		0x37000, 0x37048,
3809		0x37060, 0x3709c,
3810		0x370f0, 0x37148,
3811		0x37160, 0x3719c,
3812		0x371f0, 0x372e4,
3813		0x372f8, 0x373e4,
3814		0x373f8, 0x37448,
3815		0x37460, 0x3749c,
3816		0x374f0, 0x37548,
3817		0x37560, 0x3759c,
3818		0x375f0, 0x376e4,
3819		0x376f8, 0x377e4,
3820		0x377f8, 0x377fc,
3821		0x37814, 0x37814,
3822		0x3782c, 0x3782c,
3823		0x37880, 0x3788c,
3824		0x378e8, 0x378ec,
3825		0x37900, 0x37948,
3826		0x37960, 0x3799c,
3827		0x379f0, 0x37ae4,
3828		0x37af8, 0x37b10,
3829		0x37b28, 0x37b28,
3830		0x37b3c, 0x37b50,
3831		0x37bf0, 0x37c10,
3832		0x37c28, 0x37c28,
3833		0x37c3c, 0x37c50,
3834		0x37cf0, 0x37cfc,
3835		0x38000, 0x38030,
3836		0x38100, 0x38144,
3837		0x38190, 0x381d0,
3838		0x38200, 0x38318,
3839		0x38400, 0x3852c,
3840		0x38540, 0x3861c,
3841		0x38800, 0x38834,
3842		0x388c0, 0x38908,
3843		0x38910, 0x389ac,
3844		0x38a00, 0x38a2c,
3845		0x38a44, 0x38a50,
3846		0x38a74, 0x38c24,
3847		0x38d00, 0x38d00,
3848		0x38d08, 0x38d14,
3849		0x38d1c, 0x38d20,
3850		0x38d3c, 0x38d50,
3851		0x39200, 0x3920c,
3852		0x39220, 0x39220,
3853		0x39240, 0x39240,
3854		0x39600, 0x3960c,
3855		0x39a00, 0x39a1c,
3856		0x39e00, 0x39e20,
3857		0x39e38, 0x39e3c,
3858		0x39e80, 0x39e80,
3859		0x39e88, 0x39ea8,
3860		0x39eb0, 0x39eb4,
3861		0x39ec8, 0x39ed4,
3862		0x39fb8, 0x3a004,
3863		0x3a200, 0x3a200,
3864		0x3a208, 0x3a240,
3865		0x3a248, 0x3a280,
3866		0x3a288, 0x3a2c0,
3867		0x3a2c8, 0x3a2fc,
3868		0x3a600, 0x3a630,
3869		0x3aa00, 0x3aabc,
3870		0x3ab00, 0x3ab70,
3871		0x3b000, 0x3b048,
3872		0x3b060, 0x3b09c,
3873		0x3b0f0, 0x3b148,
3874		0x3b160, 0x3b19c,
3875		0x3b1f0, 0x3b2e4,
3876		0x3b2f8, 0x3b3e4,
3877		0x3b3f8, 0x3b448,
3878		0x3b460, 0x3b49c,
3879		0x3b4f0, 0x3b548,
3880		0x3b560, 0x3b59c,
3881		0x3b5f0, 0x3b6e4,
3882		0x3b6f8, 0x3b7e4,
3883		0x3b7f8, 0x3b7fc,
3884		0x3b814, 0x3b814,
3885		0x3b82c, 0x3b82c,
3886		0x3b880, 0x3b88c,
3887		0x3b8e8, 0x3b8ec,
3888		0x3b900, 0x3b948,
3889		0x3b960, 0x3b99c,
3890		0x3b9f0, 0x3bae4,
3891		0x3baf8, 0x3bb10,
3892		0x3bb28, 0x3bb28,
3893		0x3bb3c, 0x3bb50,
3894		0x3bbf0, 0x3bc10,
3895		0x3bc28, 0x3bc28,
3896		0x3bc3c, 0x3bc50,
3897		0x3bcf0, 0x3bcfc,
3898		0x3c000, 0x3c030,
3899		0x3c100, 0x3c144,
3900		0x3c190, 0x3c1d0,
3901		0x3c200, 0x3c318,
3902		0x3c400, 0x3c52c,
3903		0x3c540, 0x3c61c,
3904		0x3c800, 0x3c834,
3905		0x3c8c0, 0x3c908,
3906		0x3c910, 0x3c9ac,
3907		0x3ca00, 0x3ca2c,
3908		0x3ca44, 0x3ca50,
3909		0x3ca74, 0x3cc24,
3910		0x3cd00, 0x3cd00,
3911		0x3cd08, 0x3cd14,
3912		0x3cd1c, 0x3cd20,
3913		0x3cd3c, 0x3cd50,
3914		0x3d200, 0x3d20c,
3915		0x3d220, 0x3d220,
3916		0x3d240, 0x3d240,
3917		0x3d600, 0x3d60c,
3918		0x3da00, 0x3da1c,
3919		0x3de00, 0x3de20,
3920		0x3de38, 0x3de3c,
3921		0x3de80, 0x3de80,
3922		0x3de88, 0x3dea8,
3923		0x3deb0, 0x3deb4,
3924		0x3dec8, 0x3ded4,
3925		0x3dfb8, 0x3e004,
3926		0x3e200, 0x3e200,
3927		0x3e208, 0x3e240,
3928		0x3e248, 0x3e280,
3929		0x3e288, 0x3e2c0,
3930		0x3e2c8, 0x3e2fc,
3931		0x3e600, 0x3e630,
3932		0x3ea00, 0x3eabc,
3933		0x3eb00, 0x3eb70,
3934		0x3f000, 0x3f048,
3935		0x3f060, 0x3f09c,
3936		0x3f0f0, 0x3f148,
3937		0x3f160, 0x3f19c,
3938		0x3f1f0, 0x3f2e4,
3939		0x3f2f8, 0x3f3e4,
3940		0x3f3f8, 0x3f448,
3941		0x3f460, 0x3f49c,
3942		0x3f4f0, 0x3f548,
3943		0x3f560, 0x3f59c,
3944		0x3f5f0, 0x3f6e4,
3945		0x3f6f8, 0x3f7e4,
3946		0x3f7f8, 0x3f7fc,
3947		0x3f814, 0x3f814,
3948		0x3f82c, 0x3f82c,
3949		0x3f880, 0x3f88c,
3950		0x3f8e8, 0x3f8ec,
3951		0x3f900, 0x3f948,
3952		0x3f960, 0x3f99c,
3953		0x3f9f0, 0x3fae4,
3954		0x3faf8, 0x3fb10,
3955		0x3fb28, 0x3fb28,
3956		0x3fb3c, 0x3fb50,
3957		0x3fbf0, 0x3fc10,
3958		0x3fc28, 0x3fc28,
3959		0x3fc3c, 0x3fc50,
3960		0x3fcf0, 0x3fcfc,
3961		0x40000, 0x4000c,
3962		0x40040, 0x40068,
3963		0x4007c, 0x40144,
3964		0x40180, 0x4018c,
3965		0x40200, 0x40298,
3966		0x402ac, 0x4033c,
3967		0x403f8, 0x403fc,
3968		0x41304, 0x413c4,
3969		0x41400, 0x4141c,
3970		0x41480, 0x414d0,
3971		0x44000, 0x44078,
3972		0x440c0, 0x44278,
3973		0x442c0, 0x44478,
3974		0x444c0, 0x44678,
3975		0x446c0, 0x44878,
3976		0x448c0, 0x449fc,
3977		0x45000, 0x45068,
3978		0x45080, 0x45084,
3979		0x450a0, 0x450b0,
3980		0x45200, 0x45268,
3981		0x45280, 0x45284,
3982		0x452a0, 0x452b0,
3983		0x460c0, 0x460e4,
3984		0x47000, 0x4708c,
3985		0x47200, 0x47250,
3986		0x47400, 0x47420,
3987		0x47600, 0x47618,
3988		0x47800, 0x47814,
3989		0x48000, 0x4800c,
3990		0x48040, 0x48068,
3991		0x4807c, 0x48144,
3992		0x48180, 0x4818c,
3993		0x48200, 0x48298,
3994		0x482ac, 0x4833c,
3995		0x483f8, 0x483fc,
3996		0x49304, 0x493c4,
3997		0x49400, 0x4941c,
3998		0x49480, 0x494d0,
3999		0x4c000, 0x4c078,
4000		0x4c0c0, 0x4c278,
4001		0x4c2c0, 0x4c478,
4002		0x4c4c0, 0x4c678,
4003		0x4c6c0, 0x4c878,
4004		0x4c8c0, 0x4c9fc,
4005		0x4d000, 0x4d068,
4006		0x4d080, 0x4d084,
4007		0x4d0a0, 0x4d0b0,
4008		0x4d200, 0x4d268,
4009		0x4d280, 0x4d284,
4010		0x4d2a0, 0x4d2b0,
4011		0x4e0c0, 0x4e0e4,
4012		0x4f000, 0x4f08c,
4013		0x4f200, 0x4f250,
4014		0x4f400, 0x4f420,
4015		0x4f600, 0x4f618,
4016		0x4f800, 0x4f814,
4017		0x50000, 0x500cc,
4018		0x50400, 0x50400,
4019		0x50800, 0x508cc,
4020		0x50c00, 0x50c00,
4021		0x51000, 0x5101c,
4022		0x51300, 0x51308,
4023	};
4024
4025	if (is_t4(sc)) {
4026		reg_ranges = &t4_reg_ranges[0];
4027		n = nitems(t4_reg_ranges);
4028	} else {
4029		reg_ranges = &t5_reg_ranges[0];
4030		n = nitems(t5_reg_ranges);
4031	}
4032
4033	regs->version = chip_id(sc) | chip_rev(sc) << 10;
4034	for (i = 0; i < n; i += 2)
4035		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
4036}
4037
4038static void
4039cxgbe_tick(void *arg)
4040{
4041	struct port_info *pi = arg;
4042	struct ifnet *ifp = pi->ifp;
4043	struct sge_txq *txq;
4044	int i, drops;
4045	struct port_stats *s = &pi->stats;
4046
4047	PORT_LOCK(pi);
4048	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4049		PORT_UNLOCK(pi);
4050		return;	/* without scheduling another callout */
4051	}
4052
4053	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4054
4055	ifp->if_opackets = s->tx_frames - s->tx_pause;
4056	ifp->if_ipackets = s->rx_frames - s->rx_pause;
4057	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4058	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4059	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4060	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4061	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4062	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4063	    s->rx_trunc3;
4064
4065	drops = s->tx_drop;
4066	for_each_txq(pi, i, txq)
4067		drops += txq->br->br_drops;
4068	ifp->if_snd.ifq_drops = drops;
4069
4070	ifp->if_oerrors = s->tx_error_frames;
4071	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4072	    s->rx_fcs_err + s->rx_len_err;
4073
4074	callout_schedule(&pi->tick, hz);
4075	PORT_UNLOCK(pi);
4076}
4077
4078static void
4079cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4080{
4081	struct ifnet *vlan;
4082
4083	if (arg != ifp || ifp->if_type != IFT_ETHER)
4084		return;
4085
4086	vlan = VLAN_DEVAT(ifp, vid);
4087	VLAN_SETCOOKIE(vlan, ifp);
4088}
4089
4090static int
4091cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4092{
4093
4094#ifdef INVARIANTS
4095	panic("%s: opcode 0x%02x on iq %p with payload %p",
4096	    __func__, rss->opcode, iq, m);
4097#else
4098	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4099	    __func__, rss->opcode, iq, m);
4100	m_freem(m);
4101#endif
4102	return (EDOOFUS);
4103}
4104
4105int
4106t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4107{
4108	uintptr_t *loc, new;
4109
4110	if (opcode >= nitems(sc->cpl_handler))
4111		return (EINVAL);
4112
4113	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4114	loc = (uintptr_t *) &sc->cpl_handler[opcode];
4115	atomic_store_rel_ptr(loc, new);
4116
4117	return (0);
4118}
4119
4120static int
4121an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4122{
4123
4124#ifdef INVARIANTS
4125	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4126#else
4127	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4128	    __func__, iq, ctrl);
4129#endif
4130	return (EDOOFUS);
4131}
4132
4133int
4134t4_register_an_handler(struct adapter *sc, an_handler_t h)
4135{
4136	uintptr_t *loc, new;
4137
4138	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4139	loc = (uintptr_t *) &sc->an_handler;
4140	atomic_store_rel_ptr(loc, new);
4141
4142	return (0);
4143}
4144
4145static int
4146fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4147{
4148	const struct cpl_fw6_msg *cpl =
4149	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
4150
4151#ifdef INVARIANTS
4152	panic("%s: fw_msg type %d", __func__, cpl->type);
4153#else
4154	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4155#endif
4156	return (EDOOFUS);
4157}
4158
4159int
4160t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4161{
4162	uintptr_t *loc, new;
4163
4164	if (type >= nitems(sc->fw_msg_handler))
4165		return (EINVAL);
4166
4167	/*
4168	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4169	 * handler dispatch table.  Reject any attempt to install a handler for
4170	 * this subtype.
4171	 */
4172	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4173		return (EINVAL);
4174
4175	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4176	loc = (uintptr_t *) &sc->fw_msg_handler[type];
4177	atomic_store_rel_ptr(loc, new);
4178
4179	return (0);
4180}
4181
4182static int
4183t4_sysctls(struct adapter *sc)
4184{
4185	struct sysctl_ctx_list *ctx;
4186	struct sysctl_oid *oid;
4187	struct sysctl_oid_list *children, *c0;
4188	static char *caps[] = {
4189		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
4190		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL"	/* caps[1] niccaps */
4191		    "\6HASHFILTER\7ETHOFLD",
4192		"\20\1TOE",				/* caps[2] toecaps */
4193		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
4194		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
4195		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4196		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4197		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
4198		    "\4PO_INITIAOR\5PO_TARGET"
4199	};
4200	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4201
4202	ctx = device_get_sysctl_ctx(sc->dev);
4203
4204	/*
4205	 * dev.t4nex.X.
4206	 */
4207	oid = device_get_sysctl_tree(sc->dev);
4208	c0 = children = SYSCTL_CHILDREN(oid);
4209
4210	sc->sc_do_rxcopy = 1;
4211	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
4212	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
4213
4214	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4215	    sc->params.nports, "# of ports");
4216
4217	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4218	    NULL, chip_rev(sc), "chip hardware revision");
4219
4220	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4221	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4222
4223	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4224	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4225
4226	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4227	    sc->cfcsum, "config file checksum");
4228
4229	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4230	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4231	    sysctl_bitfield, "A", "available doorbells");
4232
4233	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4234	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4235	    sysctl_bitfield, "A", "available link capabilities");
4236
4237	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4238	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4239	    sysctl_bitfield, "A", "available NIC capabilities");
4240
4241	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4242	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4243	    sysctl_bitfield, "A", "available TCP offload capabilities");
4244
4245	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4246	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4247	    sysctl_bitfield, "A", "available RDMA capabilities");
4248
4249	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4250	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4251	    sysctl_bitfield, "A", "available iSCSI capabilities");
4252
4253	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4254	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4255	    sysctl_bitfield, "A", "available FCoE capabilities");
4256
4257	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4258	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
4259
4260	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4261	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4262	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4263	    "interrupt holdoff timer values (us)");
4264
4265	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4266	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4267	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4268	    "interrupt holdoff packet counter values");
4269
4270	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4271	    NULL, sc->tids.nftids, "number of filters");
4272
4273	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4274	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
4275	    "chip temperature (in Celsius)");
4276
4277	t4_sge_sysctls(sc, ctx, children);
4278
4279	sc->lro_timeout = 100;
4280	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
4281	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
4282
4283#ifdef SBUF_DRAIN
4284	/*
4285	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
4286	 */
4287	oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4288	    CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4289	    "logs and miscellaneous information");
4290	children = SYSCTL_CHILDREN(oid);
4291
4292	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4293	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4294	    sysctl_cctrl, "A", "congestion control");
4295
4296	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4297	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4298	    sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4299
4300	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4301	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4302	    sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4303
4304	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4305	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4306	    sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4307
4308	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4309	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4310	    sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4311
4312	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4313	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4314	    sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4315
4316	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4317	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4318	    sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4319
4320	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4321	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4322	    sysctl_cim_la, "A", "CIM logic analyzer");
4323
4324	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4325	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4326	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4327
4328	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4329	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4330	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4331
4332	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4333	    CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4334	    sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4335
4336	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4337	    CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4338	    sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4339
4340	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4341	    CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4342	    sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4343
4344	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4345	    CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4346	    sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4347
4348	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4349	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4350	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4351
4352	if (is_t5(sc)) {
4353		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4354		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4355		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4356
4357		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4358		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4359		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4360	}
4361
4362	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4363	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4364	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4365
4366	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4367	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4368	    sysctl_cim_qcfg, "A", "CIM queue configuration");
4369
4370	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4371	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4372	    sysctl_cpl_stats, "A", "CPL statistics");
4373
4374	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4375	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4376	    sysctl_ddp_stats, "A", "DDP statistics");
4377
4378	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4379	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4380	    sysctl_devlog, "A", "firmware's device log");
4381
4382	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4383	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4384	    sysctl_fcoe_stats, "A", "FCoE statistics");
4385
4386	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4387	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4388	    sysctl_hw_sched, "A", "hardware scheduler ");
4389
4390	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4391	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4392	    sysctl_l2t, "A", "hardware L2 table");
4393
4394	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4395	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4396	    sysctl_lb_stats, "A", "loopback statistics");
4397
4398	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4399	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4400	    sysctl_meminfo, "A", "memory regions");
4401
4402	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4403	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4404	    sysctl_mps_tcam, "A", "MPS TCAM entries");
4405
4406	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4407	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4408	    sysctl_path_mtus, "A", "path MTUs");
4409
4410	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4411	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4412	    sysctl_pm_stats, "A", "PM statistics");
4413
4414	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4415	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4416	    sysctl_rdma_stats, "A", "RDMA statistics");
4417
4418	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4419	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4420	    sysctl_tcp_stats, "A", "TCP statistics");
4421
4422	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4423	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4424	    sysctl_tids, "A", "TID information");
4425
4426	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4427	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4428	    sysctl_tp_err_stats, "A", "TP error statistics");
4429
4430	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4431	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4432	    sysctl_tp_la, "A", "TP logic analyzer");
4433
4434	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4435	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4436	    sysctl_tx_rate, "A", "Tx rate");
4437
4438	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4439	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4440	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4441
4442	if (is_t5(sc)) {
4443		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4444		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4445		    sysctl_wcwr_stats, "A", "write combined work requests");
4446	}
4447#endif
4448
4449#ifdef TCP_OFFLOAD
4450	if (is_offload(sc)) {
4451		/*
4452		 * dev.t4nex.X.toe.
4453		 */
4454		oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4455		    NULL, "TOE parameters");
4456		children = SYSCTL_CHILDREN(oid);
4457
4458		sc->tt.sndbuf = 256 * 1024;
4459		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4460		    &sc->tt.sndbuf, 0, "max hardware send buffer size");
4461
4462		sc->tt.ddp = 0;
4463		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4464		    &sc->tt.ddp, 0, "DDP allowed");
4465
4466		sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4467		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4468		    &sc->tt.indsz, 0, "DDP max indicate size allowed");
4469
4470		sc->tt.ddp_thres =
4471		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4472		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4473		    &sc->tt.ddp_thres, 0, "DDP threshold");
4474
4475		sc->tt.rx_coalesce = 1;
4476		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4477		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4478	}
4479#endif
4480
4481
4482	return (0);
4483}
4484
4485static int
4486cxgbe_sysctls(struct port_info *pi)
4487{
4488	struct sysctl_ctx_list *ctx;
4489	struct sysctl_oid *oid;
4490	struct sysctl_oid_list *children;
4491
4492	ctx = device_get_sysctl_ctx(pi->dev);
4493
4494	/*
4495	 * dev.cxgbe.X.
4496	 */
4497	oid = device_get_sysctl_tree(pi->dev);
4498	children = SYSCTL_CHILDREN(oid);
4499
4500	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4501	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4502	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4503		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4504		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4505		    "PHY temperature (in Celsius)");
4506		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4507		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4508		    "PHY firmware version");
4509	}
4510	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4511	    &pi->nrxq, 0, "# of rx queues");
4512	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4513	    &pi->ntxq, 0, "# of tx queues");
4514	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4515	    &pi->first_rxq, 0, "index of first rx queue");
4516	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4517	    &pi->first_txq, 0, "index of first tx queue");
4518	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", CTLTYPE_INT |
4519	    CTLFLAG_RW, pi, 0, sysctl_noflowq, "IU",
4520	    "Reserve queue 0 for non-flowid packets");
4521
4522#ifdef TCP_OFFLOAD
4523	if (is_offload(pi->adapter)) {
4524		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4525		    &pi->nofldrxq, 0,
4526		    "# of rx queues for offloaded TCP connections");
4527		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4528		    &pi->nofldtxq, 0,
4529		    "# of tx queues for offloaded TCP connections");
4530		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4531		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4532		    "index of first TOE rx queue");
4533		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4534		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
4535		    "index of first TOE tx queue");
4536	}
4537#endif
4538
4539	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4540	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4541	    "holdoff timer index");
4542	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4543	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4544	    "holdoff packet counter index");
4545
4546	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4547	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4548	    "rx queue size");
4549	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4550	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4551	    "tx queue size");
4552
4553	/*
4554	 * dev.cxgbe.X.stats.
4555	 */
4556	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4557	    NULL, "port statistics");
4558	children = SYSCTL_CHILDREN(oid);
4559
4560#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4561	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4562	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4563	    sysctl_handle_t4_reg64, "QU", desc)
4564
4565	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4566	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4567	SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4568	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4569	SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4570	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4571	SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4572	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4573	SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4574	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4575	SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4576	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4577	SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4578	    "# of tx frames in this range",
4579	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4580	SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4581	    "# of tx frames in this range",
4582	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4583	SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4584	    "# of tx frames in this range",
4585	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4586	SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4587	    "# of tx frames in this range",
4588	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4589	SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4590	    "# of tx frames in this range",
4591	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4592	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4593	    "# of tx frames in this range",
4594	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4595	SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4596	    "# of tx frames in this range",
4597	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4598	SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4599	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4600	SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4601	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4602	SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4603	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4604	SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4605	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4606	SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4607	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4608	SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4609	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4610	SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4611	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4612	SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4613	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4614	SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4615	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4616	SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4617	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4618
4619	SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4620	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4621	SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4622	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4623	SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4624	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4625	SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4626	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4627	SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4628	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4629	SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4630	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4631	SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4632	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4633	SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4634	    "# of frames received with bad FCS",
4635	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4636	SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4637	    "# of frames received with length error",
4638	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4639	SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4640	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4641	SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4642	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4643	SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4644	    "# of rx frames in this range",
4645	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4646	SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4647	    "# of rx frames in this range",
4648	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4649	SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4650	    "# of rx frames in this range",
4651	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4652	SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4653	    "# of rx frames in this range",
4654	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4655	SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4656	    "# of rx frames in this range",
4657	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4658	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4659	    "# of rx frames in this range",
4660	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4661	SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4662	    "# of rx frames in this range",
4663	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4664	SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4665	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4666	SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4667	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4668	SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4669	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4670	SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4671	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4672	SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4673	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4674	SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4675	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4676	SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4677	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4678	SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4679	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4680	SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4681	    PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4682
4683#undef SYSCTL_ADD_T4_REG64
4684
4685#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4686	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4687	    &pi->stats.name, desc)
4688
4689	/* We get these from port_stats and they may be stale by upto 1s */
4690	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4691	    "# drops due to buffer-group 0 overflows");
4692	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4693	    "# drops due to buffer-group 1 overflows");
4694	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4695	    "# drops due to buffer-group 2 overflows");
4696	SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4697	    "# drops due to buffer-group 3 overflows");
4698	SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4699	    "# of buffer-group 0 truncated packets");
4700	SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4701	    "# of buffer-group 1 truncated packets");
4702	SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4703	    "# of buffer-group 2 truncated packets");
4704	SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4705	    "# of buffer-group 3 truncated packets");
4706
4707#undef SYSCTL_ADD_T4_PORTSTAT
4708
4709	return (0);
4710}
4711
4712static int
4713sysctl_int_array(SYSCTL_HANDLER_ARGS)
4714{
4715	int rc, *i;
4716	struct sbuf sb;
4717
4718	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4719	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4720		sbuf_printf(&sb, "%d ", *i);
4721	sbuf_trim(&sb);
4722	sbuf_finish(&sb);
4723	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4724	sbuf_delete(&sb);
4725	return (rc);
4726}
4727
4728static int
4729sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4730{
4731	int rc;
4732	struct sbuf *sb;
4733
4734	rc = sysctl_wire_old_buffer(req, 0);
4735	if (rc != 0)
4736		return(rc);
4737
4738	sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4739	if (sb == NULL)
4740		return (ENOMEM);
4741
4742	sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4743	rc = sbuf_finish(sb);
4744	sbuf_delete(sb);
4745
4746	return (rc);
4747}
4748
4749static int
4750sysctl_btphy(SYSCTL_HANDLER_ARGS)
4751{
4752	struct port_info *pi = arg1;
4753	int op = arg2;
4754	struct adapter *sc = pi->adapter;
4755	u_int v;
4756	int rc;
4757
4758	rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4759	if (rc)
4760		return (rc);
4761	/* XXX: magic numbers */
4762	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4763	    &v);
4764	end_synchronized_op(sc, 0);
4765	if (rc)
4766		return (rc);
4767	if (op == 0)
4768		v /= 256;
4769
4770	rc = sysctl_handle_int(oidp, &v, 0, req);
4771	return (rc);
4772}
4773
4774static int
4775sysctl_noflowq(SYSCTL_HANDLER_ARGS)
4776{
4777	struct port_info *pi = arg1;
4778	int rc, val;
4779
4780	val = pi->rsrv_noflowq;
4781	rc = sysctl_handle_int(oidp, &val, 0, req);
4782	if (rc != 0 || req->newptr == NULL)
4783		return (rc);
4784
4785	if ((val >= 1) && (pi->ntxq > 1))
4786		pi->rsrv_noflowq = 1;
4787	else
4788		pi->rsrv_noflowq = 0;
4789
4790	return (rc);
4791}
4792
4793static int
4794sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4795{
4796	struct port_info *pi = arg1;
4797	struct adapter *sc = pi->adapter;
4798	int idx, rc, i;
4799	struct sge_rxq *rxq;
4800#ifdef TCP_OFFLOAD
4801	struct sge_ofld_rxq *ofld_rxq;
4802#endif
4803	uint8_t v;
4804
4805	idx = pi->tmr_idx;
4806
4807	rc = sysctl_handle_int(oidp, &idx, 0, req);
4808	if (rc != 0 || req->newptr == NULL)
4809		return (rc);
4810
4811	if (idx < 0 || idx >= SGE_NTIMERS)
4812		return (EINVAL);
4813
4814	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4815	    "t4tmr");
4816	if (rc)
4817		return (rc);
4818
4819	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4820	for_each_rxq(pi, i, rxq) {
4821#ifdef atomic_store_rel_8
4822		atomic_store_rel_8(&rxq->iq.intr_params, v);
4823#else
4824		rxq->iq.intr_params = v;
4825#endif
4826	}
4827#ifdef TCP_OFFLOAD
4828	for_each_ofld_rxq(pi, i, ofld_rxq) {
4829#ifdef atomic_store_rel_8
4830		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4831#else
4832		ofld_rxq->iq.intr_params = v;
4833#endif
4834	}
4835#endif
4836	pi->tmr_idx = idx;
4837
4838	end_synchronized_op(sc, LOCK_HELD);
4839	return (0);
4840}
4841
4842static int
4843sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4844{
4845	struct port_info *pi = arg1;
4846	struct adapter *sc = pi->adapter;
4847	int idx, rc;
4848
4849	idx = pi->pktc_idx;
4850
4851	rc = sysctl_handle_int(oidp, &idx, 0, req);
4852	if (rc != 0 || req->newptr == NULL)
4853		return (rc);
4854
4855	if (idx < -1 || idx >= SGE_NCOUNTERS)
4856		return (EINVAL);
4857
4858	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4859	    "t4pktc");
4860	if (rc)
4861		return (rc);
4862
4863	if (pi->flags & PORT_INIT_DONE)
4864		rc = EBUSY; /* cannot be changed once the queues are created */
4865	else
4866		pi->pktc_idx = idx;
4867
4868	end_synchronized_op(sc, LOCK_HELD);
4869	return (rc);
4870}
4871
4872static int
4873sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4874{
4875	struct port_info *pi = arg1;
4876	struct adapter *sc = pi->adapter;
4877	int qsize, rc;
4878
4879	qsize = pi->qsize_rxq;
4880
4881	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4882	if (rc != 0 || req->newptr == NULL)
4883		return (rc);
4884
4885	if (qsize < 128 || (qsize & 7))
4886		return (EINVAL);
4887
4888	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4889	    "t4rxqs");
4890	if (rc)
4891		return (rc);
4892
4893	if (pi->flags & PORT_INIT_DONE)
4894		rc = EBUSY; /* cannot be changed once the queues are created */
4895	else
4896		pi->qsize_rxq = qsize;
4897
4898	end_synchronized_op(sc, LOCK_HELD);
4899	return (rc);
4900}
4901
4902static int
4903sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4904{
4905	struct port_info *pi = arg1;
4906	struct adapter *sc = pi->adapter;
4907	int qsize, rc;
4908
4909	qsize = pi->qsize_txq;
4910
4911	rc = sysctl_handle_int(oidp, &qsize, 0, req);
4912	if (rc != 0 || req->newptr == NULL)
4913		return (rc);
4914
4915	/* bufring size must be powerof2 */
4916	if (qsize < 128 || !powerof2(qsize))
4917		return (EINVAL);
4918
4919	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4920	    "t4txqs");
4921	if (rc)
4922		return (rc);
4923
4924	if (pi->flags & PORT_INIT_DONE)
4925		rc = EBUSY; /* cannot be changed once the queues are created */
4926	else
4927		pi->qsize_txq = qsize;
4928
4929	end_synchronized_op(sc, LOCK_HELD);
4930	return (rc);
4931}
4932
4933static int
4934sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4935{
4936	struct adapter *sc = arg1;
4937	int reg = arg2;
4938	uint64_t val;
4939
4940	val = t4_read_reg64(sc, reg);
4941
4942	return (sysctl_handle_64(oidp, &val, 0, req));
4943}
4944
4945static int
4946sysctl_temperature(SYSCTL_HANDLER_ARGS)
4947{
4948	struct adapter *sc = arg1;
4949	int rc, t;
4950	uint32_t param, val;
4951
4952	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4953	if (rc)
4954		return (rc);
4955	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4956	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4957	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4958	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4959	end_synchronized_op(sc, 0);
4960	if (rc)
4961		return (rc);
4962
4963	/* unknown is returned as 0 but we display -1 in that case */
4964	t = val == 0 ? -1 : val;
4965
4966	rc = sysctl_handle_int(oidp, &t, 0, req);
4967	return (rc);
4968}
4969
4970#ifdef SBUF_DRAIN
4971static int
4972sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4973{
4974	struct adapter *sc = arg1;
4975	struct sbuf *sb;
4976	int rc, i;
4977	uint16_t incr[NMTUS][NCCTRL_WIN];
4978	static const char *dec_fac[] = {
4979		"0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4980		"0.9375"
4981	};
4982
4983	rc = sysctl_wire_old_buffer(req, 0);
4984	if (rc != 0)
4985		return (rc);
4986
4987	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4988	if (sb == NULL)
4989		return (ENOMEM);
4990
4991	t4_read_cong_tbl(sc, incr);
4992
4993	for (i = 0; i < NCCTRL_WIN; ++i) {
4994		sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4995		    incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4996		    incr[5][i], incr[6][i], incr[7][i]);
4997		sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4998		    incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4999		    incr[12][i], incr[13][i], incr[14][i], incr[15][i],
5000		    sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
5001	}
5002
5003	rc = sbuf_finish(sb);
5004	sbuf_delete(sb);
5005
5006	return (rc);
5007}
5008
5009static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
5010	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
5011	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
5012	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
5013};
5014
5015static int
5016sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
5017{
5018	struct adapter *sc = arg1;
5019	struct sbuf *sb;
5020	int rc, i, n, qid = arg2;
5021	uint32_t *buf, *p;
5022	char *qtype;
5023	u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
5024
5025	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
5026	    ("%s: bad qid %d\n", __func__, qid));
5027
5028	if (qid < CIM_NUM_IBQ) {
5029		/* inbound queue */
5030		qtype = "IBQ";
5031		n = 4 * CIM_IBQ_SIZE;
5032		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5033		rc = t4_read_cim_ibq(sc, qid, buf, n);
5034	} else {
5035		/* outbound queue */
5036		qtype = "OBQ";
5037		qid -= CIM_NUM_IBQ;
5038		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
5039		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
5040		rc = t4_read_cim_obq(sc, qid, buf, n);
5041	}
5042
5043	if (rc < 0) {
5044		rc = -rc;
5045		goto done;
5046	}
5047	n = rc * sizeof(uint32_t);	/* rc has # of words actually read */
5048
5049	rc = sysctl_wire_old_buffer(req, 0);
5050	if (rc != 0)
5051		goto done;
5052
5053	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5054	if (sb == NULL) {
5055		rc = ENOMEM;
5056		goto done;
5057	}
5058
5059	sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
5060	for (i = 0, p = buf; i < n; i += 16, p += 4)
5061		sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
5062		    p[2], p[3]);
5063
5064	rc = sbuf_finish(sb);
5065	sbuf_delete(sb);
5066done:
5067	free(buf, M_CXGBE);
5068	return (rc);
5069}
5070
5071static int
5072sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5073{
5074	struct adapter *sc = arg1;
5075	u_int cfg;
5076	struct sbuf *sb;
5077	uint32_t *buf, *p;
5078	int rc;
5079
5080	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5081	if (rc != 0)
5082		return (rc);
5083
5084	rc = sysctl_wire_old_buffer(req, 0);
5085	if (rc != 0)
5086		return (rc);
5087
5088	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5089	if (sb == NULL)
5090		return (ENOMEM);
5091
5092	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5093	    M_ZERO | M_WAITOK);
5094
5095	rc = -t4_cim_read_la(sc, buf, NULL);
5096	if (rc != 0)
5097		goto done;
5098
5099	sbuf_printf(sb, "Status   Data      PC%s",
5100	    cfg & F_UPDBGLACAPTPCONLY ? "" :
5101	    "     LS0Stat  LS0Addr             LS0Data");
5102
5103	KASSERT((sc->params.cim_la_size & 7) == 0,
5104	    ("%s: p will walk off the end of buf", __func__));
5105
5106	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5107		if (cfg & F_UPDBGLACAPTPCONLY) {
5108			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
5109			    p[6], p[7]);
5110			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x",
5111			    (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5112			    p[4] & 0xff, p[5] >> 8);
5113			sbuf_printf(sb, "\n  %02x   %x%07x %x%07x",
5114			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5115			    p[1] & 0xf, p[2] >> 4);
5116		} else {
5117			sbuf_printf(sb,
5118			    "\n  %02x   %x%07x %x%07x %08x %08x "
5119			    "%08x%08x%08x%08x",
5120			    (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5121			    p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5122			    p[6], p[7]);
5123		}
5124	}
5125
5126	rc = sbuf_finish(sb);
5127	sbuf_delete(sb);
5128done:
5129	free(buf, M_CXGBE);
5130	return (rc);
5131}
5132
5133static int
5134sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5135{
5136	struct adapter *sc = arg1;
5137	u_int i;
5138	struct sbuf *sb;
5139	uint32_t *buf, *p;
5140	int rc;
5141
5142	rc = sysctl_wire_old_buffer(req, 0);
5143	if (rc != 0)
5144		return (rc);
5145
5146	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5147	if (sb == NULL)
5148		return (ENOMEM);
5149
5150	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5151	    M_ZERO | M_WAITOK);
5152
5153	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5154	p = buf;
5155
5156	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5157		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5158		    p[1], p[0]);
5159	}
5160
5161	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
5162	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5163		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
5164		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5165		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5166		    (p[1] >> 2) | ((p[2] & 3) << 30),
5167		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5168		    p[0] & 1);
5169	}
5170
5171	rc = sbuf_finish(sb);
5172	sbuf_delete(sb);
5173	free(buf, M_CXGBE);
5174	return (rc);
5175}
5176
5177static int
5178sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5179{
5180	struct adapter *sc = arg1;
5181	u_int i;
5182	struct sbuf *sb;
5183	uint32_t *buf, *p;
5184	int rc;
5185
5186	rc = sysctl_wire_old_buffer(req, 0);
5187	if (rc != 0)
5188		return (rc);
5189
5190	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5191	if (sb == NULL)
5192		return (ENOMEM);
5193
5194	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5195	    M_ZERO | M_WAITOK);
5196
5197	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5198	p = buf;
5199
5200	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
5201	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5202		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
5203		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5204		    p[4], p[3], p[2], p[1], p[0]);
5205	}
5206
5207	sbuf_printf(sb, "\n\nCntl ID               Data");
5208	for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5209		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
5210		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5211	}
5212
5213	rc = sbuf_finish(sb);
5214	sbuf_delete(sb);
5215	free(buf, M_CXGBE);
5216	return (rc);
5217}
5218
5219static int
5220sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5221{
5222	struct adapter *sc = arg1;
5223	struct sbuf *sb;
5224	int rc, i;
5225	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5226	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5227	uint16_t thres[CIM_NUM_IBQ];
5228	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5229	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5230	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5231
5232	if (is_t4(sc)) {
5233		cim_num_obq = CIM_NUM_OBQ;
5234		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5235		obq_rdaddr = A_UP_OBQ_0_REALADDR;
5236	} else {
5237		cim_num_obq = CIM_NUM_OBQ_T5;
5238		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5239		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5240	}
5241	nq = CIM_NUM_IBQ + cim_num_obq;
5242
5243	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5244	if (rc == 0)
5245		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5246	if (rc != 0)
5247		return (rc);
5248
5249	t4_read_cimq_cfg(sc, base, size, thres);
5250
5251	rc = sysctl_wire_old_buffer(req, 0);
5252	if (rc != 0)
5253		return (rc);
5254
5255	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5256	if (sb == NULL)
5257		return (ENOMEM);
5258
5259	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
5260
5261	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5262		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
5263		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5264		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5265		    G_QUEREMFLITS(p[2]) * 16);
5266	for ( ; i < nq; i++, p += 4, wr += 2)
5267		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
5268		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5269		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5270		    G_QUEREMFLITS(p[2]) * 16);
5271
5272	rc = sbuf_finish(sb);
5273	sbuf_delete(sb);
5274
5275	return (rc);
5276}
5277
5278static int
5279sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5280{
5281	struct adapter *sc = arg1;
5282	struct sbuf *sb;
5283	int rc;
5284	struct tp_cpl_stats stats;
5285
5286	rc = sysctl_wire_old_buffer(req, 0);
5287	if (rc != 0)
5288		return (rc);
5289
5290	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5291	if (sb == NULL)
5292		return (ENOMEM);
5293
5294	t4_tp_get_cpl_stats(sc, &stats);
5295
5296	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
5297	    "channel 3\n");
5298	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
5299		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5300	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
5301		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5302
5303	rc = sbuf_finish(sb);
5304	sbuf_delete(sb);
5305
5306	return (rc);
5307}
5308
5309static int
5310sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5311{
5312	struct adapter *sc = arg1;
5313	struct sbuf *sb;
5314	int rc;
5315	struct tp_usm_stats stats;
5316
5317	rc = sysctl_wire_old_buffer(req, 0);
5318	if (rc != 0)
5319		return(rc);
5320
5321	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5322	if (sb == NULL)
5323		return (ENOMEM);
5324
5325	t4_get_usm_stats(sc, &stats);
5326
5327	sbuf_printf(sb, "Frames: %u\n", stats.frames);
5328	sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5329	sbuf_printf(sb, "Drops:  %u", stats.drops);
5330
5331	rc = sbuf_finish(sb);
5332	sbuf_delete(sb);
5333
5334	return (rc);
5335}
5336
5337const char *devlog_level_strings[] = {
5338	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
5339	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
5340	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
5341	[FW_DEVLOG_LEVEL_NOTICE]	= "NOTICE",
5342	[FW_DEVLOG_LEVEL_INFO]		= "INFO",
5343	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
5344};
5345
5346const char *devlog_facility_strings[] = {
5347	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
5348	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
5349	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
5350	[FW_DEVLOG_FACILITY_RES]	= "RES",
5351	[FW_DEVLOG_FACILITY_HW]		= "HW",
5352	[FW_DEVLOG_FACILITY_FLR]	= "FLR",
5353	[FW_DEVLOG_FACILITY_DMAQ]	= "DMAQ",
5354	[FW_DEVLOG_FACILITY_PHY]	= "PHY",
5355	[FW_DEVLOG_FACILITY_MAC]	= "MAC",
5356	[FW_DEVLOG_FACILITY_PORT]	= "PORT",
5357	[FW_DEVLOG_FACILITY_VI]		= "VI",
5358	[FW_DEVLOG_FACILITY_FILTER]	= "FILTER",
5359	[FW_DEVLOG_FACILITY_ACL]	= "ACL",
5360	[FW_DEVLOG_FACILITY_TM]		= "TM",
5361	[FW_DEVLOG_FACILITY_QFC]	= "QFC",
5362	[FW_DEVLOG_FACILITY_DCB]	= "DCB",
5363	[FW_DEVLOG_FACILITY_ETH]	= "ETH",
5364	[FW_DEVLOG_FACILITY_OFLD]	= "OFLD",
5365	[FW_DEVLOG_FACILITY_RI]		= "RI",
5366	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
5367	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
5368	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
5369	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
5370};
5371
5372static int
5373sysctl_devlog(SYSCTL_HANDLER_ARGS)
5374{
5375	struct adapter *sc = arg1;
5376	struct devlog_params *dparams = &sc->params.devlog;
5377	struct fw_devlog_e *buf, *e;
5378	int i, j, rc, nentries, first = 0, m;
5379	struct sbuf *sb;
5380	uint64_t ftstamp = UINT64_MAX;
5381
5382	if (dparams->start == 0) {
5383		dparams->memtype = FW_MEMTYPE_EDC0;
5384		dparams->start = 0x84000;
5385		dparams->size = 32768;
5386	}
5387
5388	nentries = dparams->size / sizeof(struct fw_devlog_e);
5389
5390	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5391	if (buf == NULL)
5392		return (ENOMEM);
5393
5394	m = fwmtype_to_hwmtype(dparams->memtype);
5395	rc = -t4_mem_read(sc, m, dparams->start, dparams->size, (void *)buf);
5396	if (rc != 0)
5397		goto done;
5398
5399	for (i = 0; i < nentries; i++) {
5400		e = &buf[i];
5401
5402		if (e->timestamp == 0)
5403			break;	/* end */
5404
5405		e->timestamp = be64toh(e->timestamp);
5406		e->seqno = be32toh(e->seqno);
5407		for (j = 0; j < 8; j++)
5408			e->params[j] = be32toh(e->params[j]);
5409
5410		if (e->timestamp < ftstamp) {
5411			ftstamp = e->timestamp;
5412			first = i;
5413		}
5414	}
5415
5416	if (buf[first].timestamp == 0)
5417		goto done;	/* nothing in the log */
5418
5419	rc = sysctl_wire_old_buffer(req, 0);
5420	if (rc != 0)
5421		goto done;
5422
5423	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5424	if (sb == NULL) {
5425		rc = ENOMEM;
5426		goto done;
5427	}
5428	sbuf_printf(sb, "%10s  %15s  %8s  %8s  %s\n",
5429	    "Seq#", "Tstamp", "Level", "Facility", "Message");
5430
5431	i = first;
5432	do {
5433		e = &buf[i];
5434		if (e->timestamp == 0)
5435			break;	/* end */
5436
5437		sbuf_printf(sb, "%10d  %15ju  %8s  %8s  ",
5438		    e->seqno, e->timestamp,
5439		    (e->level < nitems(devlog_level_strings) ?
5440			devlog_level_strings[e->level] : "UNKNOWN"),
5441		    (e->facility < nitems(devlog_facility_strings) ?
5442			devlog_facility_strings[e->facility] : "UNKNOWN"));
5443		sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5444		    e->params[2], e->params[3], e->params[4],
5445		    e->params[5], e->params[6], e->params[7]);
5446
5447		if (++i == nentries)
5448			i = 0;
5449	} while (i != first);
5450
5451	rc = sbuf_finish(sb);
5452	sbuf_delete(sb);
5453done:
5454	free(buf, M_CXGBE);
5455	return (rc);
5456}
5457
5458static int
5459sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5460{
5461	struct adapter *sc = arg1;
5462	struct sbuf *sb;
5463	int rc;
5464	struct tp_fcoe_stats stats[4];
5465
5466	rc = sysctl_wire_old_buffer(req, 0);
5467	if (rc != 0)
5468		return (rc);
5469
5470	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5471	if (sb == NULL)
5472		return (ENOMEM);
5473
5474	t4_get_fcoe_stats(sc, 0, &stats[0]);
5475	t4_get_fcoe_stats(sc, 1, &stats[1]);
5476	t4_get_fcoe_stats(sc, 2, &stats[2]);
5477	t4_get_fcoe_stats(sc, 3, &stats[3]);
5478
5479	sbuf_printf(sb, "                   channel 0        channel 1        "
5480	    "channel 2        channel 3\n");
5481	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
5482	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5483	    stats[3].octetsDDP);
5484	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
5485	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5486	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5487	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5488	    stats[3].framesDrop);
5489
5490	rc = sbuf_finish(sb);
5491	sbuf_delete(sb);
5492
5493	return (rc);
5494}
5495
5496static int
5497sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5498{
5499	struct adapter *sc = arg1;
5500	struct sbuf *sb;
5501	int rc, i;
5502	unsigned int map, kbps, ipg, mode;
5503	unsigned int pace_tab[NTX_SCHED];
5504
5505	rc = sysctl_wire_old_buffer(req, 0);
5506	if (rc != 0)
5507		return (rc);
5508
5509	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5510	if (sb == NULL)
5511		return (ENOMEM);
5512
5513	map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5514	mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5515	t4_read_pace_tbl(sc, pace_tab);
5516
5517	sbuf_printf(sb, "Scheduler  Mode   Channel  Rate (Kbps)   "
5518	    "Class IPG (0.1 ns)   Flow IPG (us)");
5519
5520	for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5521		t4_get_tx_sched(sc, i, &kbps, &ipg);
5522		sbuf_printf(sb, "\n    %u      %-5s     %u     ", i,
5523		    (mode & (1 << i)) ? "flow" : "class", map & 3);
5524		if (kbps)
5525			sbuf_printf(sb, "%9u     ", kbps);
5526		else
5527			sbuf_printf(sb, " disabled     ");
5528
5529		if (ipg)
5530			sbuf_printf(sb, "%13u        ", ipg);
5531		else
5532			sbuf_printf(sb, "     disabled        ");
5533
5534		if (pace_tab[i])
5535			sbuf_printf(sb, "%10u", pace_tab[i]);
5536		else
5537			sbuf_printf(sb, "  disabled");
5538	}
5539
5540	rc = sbuf_finish(sb);
5541	sbuf_delete(sb);
5542
5543	return (rc);
5544}
5545
5546static int
5547sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5548{
5549	struct adapter *sc = arg1;
5550	struct sbuf *sb;
5551	int rc, i, j;
5552	uint64_t *p0, *p1;
5553	struct lb_port_stats s[2];
5554	static const char *stat_name[] = {
5555		"OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5556		"UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5557		"Frames128To255:", "Frames256To511:", "Frames512To1023:",
5558		"Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5559		"BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5560		"BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5561		"BG2FramesTrunc:", "BG3FramesTrunc:"
5562	};
5563
5564	rc = sysctl_wire_old_buffer(req, 0);
5565	if (rc != 0)
5566		return (rc);
5567
5568	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5569	if (sb == NULL)
5570		return (ENOMEM);
5571
5572	memset(s, 0, sizeof(s));
5573
5574	for (i = 0; i < 4; i += 2) {
5575		t4_get_lb_stats(sc, i, &s[0]);
5576		t4_get_lb_stats(sc, i + 1, &s[1]);
5577
5578		p0 = &s[0].octets;
5579		p1 = &s[1].octets;
5580		sbuf_printf(sb, "%s                       Loopback %u"
5581		    "           Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5582
5583		for (j = 0; j < nitems(stat_name); j++)
5584			sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5585				   *p0++, *p1++);
5586	}
5587
5588	rc = sbuf_finish(sb);
5589	sbuf_delete(sb);
5590
5591	return (rc);
5592}
5593
5594static int
5595sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5596{
5597	int rc = 0;
5598	struct port_info *pi = arg1;
5599	struct sbuf *sb;
5600	static const char *linkdnreasons[] = {
5601		"non-specific", "remote fault", "autoneg failed", "reserved3",
5602		"PHY overheated", "unknown", "rx los", "reserved7"
5603	};
5604
5605	rc = sysctl_wire_old_buffer(req, 0);
5606	if (rc != 0)
5607		return(rc);
5608	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5609	if (sb == NULL)
5610		return (ENOMEM);
5611
5612	if (pi->linkdnrc < 0)
5613		sbuf_printf(sb, "n/a");
5614	else if (pi->linkdnrc < nitems(linkdnreasons))
5615		sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5616	else
5617		sbuf_printf(sb, "%d", pi->linkdnrc);
5618
5619	rc = sbuf_finish(sb);
5620	sbuf_delete(sb);
5621
5622	return (rc);
5623}
5624
5625struct mem_desc {
5626	unsigned int base;
5627	unsigned int limit;
5628	unsigned int idx;
5629};
5630
5631static int
5632mem_desc_cmp(const void *a, const void *b)
5633{
5634	return ((const struct mem_desc *)a)->base -
5635	       ((const struct mem_desc *)b)->base;
5636}
5637
5638static void
5639mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5640    unsigned int to)
5641{
5642	unsigned int size;
5643
5644	size = to - from + 1;
5645	if (size == 0)
5646		return;
5647
5648	/* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5649	sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5650}
5651
5652static int
5653sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5654{
5655	struct adapter *sc = arg1;
5656	struct sbuf *sb;
5657	int rc, i, n;
5658	uint32_t lo, hi, used, alloc;
5659	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5660	static const char *region[] = {
5661		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5662		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5663		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5664		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5665		"RQUDP region:", "PBL region:", "TXPBL region:",
5666		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5667		"On-chip queues:"
5668	};
5669	struct mem_desc avail[4];
5670	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
5671	struct mem_desc *md = mem;
5672
5673	rc = sysctl_wire_old_buffer(req, 0);
5674	if (rc != 0)
5675		return (rc);
5676
5677	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5678	if (sb == NULL)
5679		return (ENOMEM);
5680
5681	for (i = 0; i < nitems(mem); i++) {
5682		mem[i].limit = 0;
5683		mem[i].idx = i;
5684	}
5685
5686	/* Find and sort the populated memory ranges */
5687	i = 0;
5688	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5689	if (lo & F_EDRAM0_ENABLE) {
5690		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5691		avail[i].base = G_EDRAM0_BASE(hi) << 20;
5692		avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5693		avail[i].idx = 0;
5694		i++;
5695	}
5696	if (lo & F_EDRAM1_ENABLE) {
5697		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5698		avail[i].base = G_EDRAM1_BASE(hi) << 20;
5699		avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5700		avail[i].idx = 1;
5701		i++;
5702	}
5703	if (lo & F_EXT_MEM_ENABLE) {
5704		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5705		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5706		avail[i].limit = avail[i].base +
5707		    (G_EXT_MEM_SIZE(hi) << 20);
5708		avail[i].idx = is_t4(sc) ? 2 : 3;	/* Call it MC for T4 */
5709		i++;
5710	}
5711	if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5712		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5713		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5714		avail[i].limit = avail[i].base +
5715		    (G_EXT_MEM1_SIZE(hi) << 20);
5716		avail[i].idx = 4;
5717		i++;
5718	}
5719	if (!i)                                    /* no memory available */
5720		return 0;
5721	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5722
5723	(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5724	(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5725	(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5726	(md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5727	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5728	(md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5729	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5730	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5731	(md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5732
5733	/* the next few have explicit upper bounds */
5734	md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5735	md->limit = md->base - 1 +
5736		    t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5737		    G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5738	md++;
5739
5740	md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5741	md->limit = md->base - 1 +
5742		    t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5743		    G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5744	md++;
5745
5746	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5747		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5748		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5749		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5750	} else {
5751		md->base = 0;
5752		md->idx = nitems(region);  /* hide it */
5753	}
5754	md++;
5755
5756#define ulp_region(reg) \
5757	md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5758	(md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5759
5760	ulp_region(RX_ISCSI);
5761	ulp_region(RX_TDDP);
5762	ulp_region(TX_TPT);
5763	ulp_region(RX_STAG);
5764	ulp_region(RX_RQ);
5765	ulp_region(RX_RQUDP);
5766	ulp_region(RX_PBL);
5767	ulp_region(TX_PBL);
5768#undef ulp_region
5769
5770	md->base = 0;
5771	md->idx = nitems(region);
5772	if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5773		md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5774		md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5775		    A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5776	}
5777	md++;
5778
5779	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5780	md->limit = md->base + sc->tids.ntids - 1;
5781	md++;
5782	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5783	md->limit = md->base + sc->tids.ntids - 1;
5784	md++;
5785
5786	md->base = sc->vres.ocq.start;
5787	if (sc->vres.ocq.size)
5788		md->limit = md->base + sc->vres.ocq.size - 1;
5789	else
5790		md->idx = nitems(region);  /* hide it */
5791	md++;
5792
5793	/* add any address-space holes, there can be up to 3 */
5794	for (n = 0; n < i - 1; n++)
5795		if (avail[n].limit < avail[n + 1].base)
5796			(md++)->base = avail[n].limit;
5797	if (avail[n].limit)
5798		(md++)->base = avail[n].limit;
5799
5800	n = md - mem;
5801	qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5802
5803	for (lo = 0; lo < i; lo++)
5804		mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5805				avail[lo].limit - 1);
5806
5807	sbuf_printf(sb, "\n");
5808	for (i = 0; i < n; i++) {
5809		if (mem[i].idx >= nitems(region))
5810			continue;                        /* skip holes */
5811		if (!mem[i].limit)
5812			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5813		mem_region_show(sb, region[mem[i].idx], mem[i].base,
5814				mem[i].limit);
5815	}
5816
5817	sbuf_printf(sb, "\n");
5818	lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5819	hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5820	mem_region_show(sb, "uP RAM:", lo, hi);
5821
5822	lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5823	hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5824	mem_region_show(sb, "uP Extmem2:", lo, hi);
5825
5826	lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5827	sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5828		   G_PMRXMAXPAGE(lo),
5829		   t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5830		   (lo & F_PMRXNUMCHN) ? 2 : 1);
5831
5832	lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5833	hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5834	sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5835		   G_PMTXMAXPAGE(lo),
5836		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5837		   hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5838	sbuf_printf(sb, "%u p-structs\n",
5839		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5840
5841	for (i = 0; i < 4; i++) {
5842		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5843		if (is_t4(sc)) {
5844			used = G_USED(lo);
5845			alloc = G_ALLOC(lo);
5846		} else {
5847			used = G_T5_USED(lo);
5848			alloc = G_T5_ALLOC(lo);
5849		}
5850		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5851			   i, used, alloc);
5852	}
5853	for (i = 0; i < 4; i++) {
5854		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5855		if (is_t4(sc)) {
5856			used = G_USED(lo);
5857			alloc = G_ALLOC(lo);
5858		} else {
5859			used = G_T5_USED(lo);
5860			alloc = G_T5_ALLOC(lo);
5861		}
5862		sbuf_printf(sb,
5863			   "\nLoopback %d using %u pages out of %u allocated",
5864			   i, used, alloc);
5865	}
5866
5867	rc = sbuf_finish(sb);
5868	sbuf_delete(sb);
5869
5870	return (rc);
5871}
5872
5873static inline void
5874tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5875{
5876	*mask = x | y;
5877	y = htobe64(y);
5878	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5879}
5880
5881static int
5882sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5883{
5884	struct adapter *sc = arg1;
5885	struct sbuf *sb;
5886	int rc, i, n;
5887
5888	rc = sysctl_wire_old_buffer(req, 0);
5889	if (rc != 0)
5890		return (rc);
5891
5892	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5893	if (sb == NULL)
5894		return (ENOMEM);
5895
5896	sbuf_printf(sb,
5897	    "Idx  Ethernet address     Mask     Vld Ports PF"
5898	    "  VF              Replication             P0 P1 P2 P3  ML");
5899	n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5900	    NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5901	for (i = 0; i < n; i++) {
5902		uint64_t tcamx, tcamy, mask;
5903		uint32_t cls_lo, cls_hi;
5904		uint8_t addr[ETHER_ADDR_LEN];
5905
5906		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5907		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5908		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5909		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5910
5911		if (tcamx & tcamy)
5912			continue;
5913
5914		tcamxy2valmask(tcamx, tcamy, addr, &mask);
5915		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5916			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
5917			   addr[3], addr[4], addr[5], (uintmax_t)mask,
5918			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5919			   G_PORTMAP(cls_hi), G_PF(cls_lo),
5920			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5921
5922		if (cls_lo & F_REPLICATE) {
5923			struct fw_ldst_cmd ldst_cmd;
5924
5925			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5926			ldst_cmd.op_to_addrspace =
5927			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5928				F_FW_CMD_REQUEST | F_FW_CMD_READ |
5929				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5930			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5931			ldst_cmd.u.mps.fid_ctl =
5932			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5933				V_FW_LDST_CMD_CTL(i));
5934
5935			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5936			    "t4mps");
5937			if (rc)
5938				break;
5939			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5940			    sizeof(ldst_cmd), &ldst_cmd);
5941			end_synchronized_op(sc, 0);
5942
5943			if (rc != 0) {
5944				sbuf_printf(sb,
5945				    " ------------ error %3u ------------", rc);
5946				rc = 0;
5947			} else {
5948				sbuf_printf(sb, " %08x %08x %08x %08x",
5949				    be32toh(ldst_cmd.u.mps.rplc127_96),
5950				    be32toh(ldst_cmd.u.mps.rplc95_64),
5951				    be32toh(ldst_cmd.u.mps.rplc63_32),
5952				    be32toh(ldst_cmd.u.mps.rplc31_0));
5953			}
5954		} else
5955			sbuf_printf(sb, "%36s", "");
5956
5957		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5958		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5959		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5960	}
5961
5962	if (rc)
5963		(void) sbuf_finish(sb);
5964	else
5965		rc = sbuf_finish(sb);
5966	sbuf_delete(sb);
5967
5968	return (rc);
5969}
5970
5971static int
5972sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5973{
5974	struct adapter *sc = arg1;
5975	struct sbuf *sb;
5976	int rc;
5977	uint16_t mtus[NMTUS];
5978
5979	rc = sysctl_wire_old_buffer(req, 0);
5980	if (rc != 0)
5981		return (rc);
5982
5983	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5984	if (sb == NULL)
5985		return (ENOMEM);
5986
5987	t4_read_mtu_tbl(sc, mtus, NULL);
5988
5989	sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5990	    mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5991	    mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5992	    mtus[14], mtus[15]);
5993
5994	rc = sbuf_finish(sb);
5995	sbuf_delete(sb);
5996
5997	return (rc);
5998}
5999
6000static int
6001sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
6002{
6003	struct adapter *sc = arg1;
6004	struct sbuf *sb;
6005	int rc, i;
6006	uint32_t cnt[PM_NSTATS];
6007	uint64_t cyc[PM_NSTATS];
6008	static const char *rx_stats[] = {
6009		"Read:", "Write bypass:", "Write mem:", "Flush:"
6010	};
6011	static const char *tx_stats[] = {
6012		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:"
6013	};
6014
6015	rc = sysctl_wire_old_buffer(req, 0);
6016	if (rc != 0)
6017		return (rc);
6018
6019	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6020	if (sb == NULL)
6021		return (ENOMEM);
6022
6023	t4_pmtx_get_stats(sc, cnt, cyc);
6024	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
6025	for (i = 0; i < ARRAY_SIZE(tx_stats); i++)
6026		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], cnt[i],
6027		    cyc[i]);
6028
6029	t4_pmrx_get_stats(sc, cnt, cyc);
6030	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
6031	for (i = 0; i < ARRAY_SIZE(rx_stats); i++)
6032		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], cnt[i],
6033		    cyc[i]);
6034
6035	rc = sbuf_finish(sb);
6036	sbuf_delete(sb);
6037
6038	return (rc);
6039}
6040
6041static int
6042sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
6043{
6044	struct adapter *sc = arg1;
6045	struct sbuf *sb;
6046	int rc;
6047	struct tp_rdma_stats stats;
6048
6049	rc = sysctl_wire_old_buffer(req, 0);
6050	if (rc != 0)
6051		return (rc);
6052
6053	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6054	if (sb == NULL)
6055		return (ENOMEM);
6056
6057	t4_tp_get_rdma_stats(sc, &stats);
6058	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
6059	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
6060
6061	rc = sbuf_finish(sb);
6062	sbuf_delete(sb);
6063
6064	return (rc);
6065}
6066
6067static int
6068sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
6069{
6070	struct adapter *sc = arg1;
6071	struct sbuf *sb;
6072	int rc;
6073	struct tp_tcp_stats v4, v6;
6074
6075	rc = sysctl_wire_old_buffer(req, 0);
6076	if (rc != 0)
6077		return (rc);
6078
6079	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6080	if (sb == NULL)
6081		return (ENOMEM);
6082
6083	t4_tp_get_tcp_stats(sc, &v4, &v6);
6084	sbuf_printf(sb,
6085	    "                                IP                 IPv6\n");
6086	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
6087	    v4.tcpOutRsts, v6.tcpOutRsts);
6088	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
6089	    v4.tcpInSegs, v6.tcpInSegs);
6090	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
6091	    v4.tcpOutSegs, v6.tcpOutSegs);
6092	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
6093	    v4.tcpRetransSegs, v6.tcpRetransSegs);
6094
6095	rc = sbuf_finish(sb);
6096	sbuf_delete(sb);
6097
6098	return (rc);
6099}
6100
6101static int
6102sysctl_tids(SYSCTL_HANDLER_ARGS)
6103{
6104	struct adapter *sc = arg1;
6105	struct sbuf *sb;
6106	int rc;
6107	struct tid_info *t = &sc->tids;
6108
6109	rc = sysctl_wire_old_buffer(req, 0);
6110	if (rc != 0)
6111		return (rc);
6112
6113	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6114	if (sb == NULL)
6115		return (ENOMEM);
6116
6117	if (t->natids) {
6118		sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6119		    t->atids_in_use);
6120	}
6121
6122	if (t->ntids) {
6123		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6124			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6125
6126			if (b) {
6127				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6128				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6129				    t->ntids - 1);
6130			} else {
6131				sbuf_printf(sb, "TID range: %u-%u",
6132				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6133				    t->ntids - 1);
6134			}
6135		} else
6136			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6137		sbuf_printf(sb, ", in use: %u\n",
6138		    atomic_load_acq_int(&t->tids_in_use));
6139	}
6140
6141	if (t->nstids) {
6142		sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6143		    t->stid_base + t->nstids - 1, t->stids_in_use);
6144	}
6145
6146	if (t->nftids) {
6147		sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6148		    t->ftid_base + t->nftids - 1);
6149	}
6150
6151	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6152	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6153	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6154
6155	rc = sbuf_finish(sb);
6156	sbuf_delete(sb);
6157
6158	return (rc);
6159}
6160
6161static int
6162sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6163{
6164	struct adapter *sc = arg1;
6165	struct sbuf *sb;
6166	int rc;
6167	struct tp_err_stats stats;
6168
6169	rc = sysctl_wire_old_buffer(req, 0);
6170	if (rc != 0)
6171		return (rc);
6172
6173	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6174	if (sb == NULL)
6175		return (ENOMEM);
6176
6177	t4_tp_get_err_stats(sc, &stats);
6178
6179	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
6180		      "channel 3\n");
6181	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
6182	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6183	    stats.macInErrs[3]);
6184	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
6185	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6186	    stats.hdrInErrs[3]);
6187	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
6188	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6189	    stats.tcpInErrs[3]);
6190	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
6191	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6192	    stats.tcp6InErrs[3]);
6193	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
6194	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6195	    stats.tnlCongDrops[3]);
6196	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
6197	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6198	    stats.tnlTxDrops[3]);
6199	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
6200	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6201	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6202	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
6203	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6204	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6205	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
6206	    stats.ofldNoNeigh, stats.ofldCongDefer);
6207
6208	rc = sbuf_finish(sb);
6209	sbuf_delete(sb);
6210
6211	return (rc);
6212}
6213
6214struct field_desc {
6215	const char *name;
6216	u_int start;
6217	u_int width;
6218};
6219
6220static void
6221field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6222{
6223	char buf[32];
6224	int line_size = 0;
6225
6226	while (f->name) {
6227		uint64_t mask = (1ULL << f->width) - 1;
6228		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6229		    ((uintmax_t)v >> f->start) & mask);
6230
6231		if (line_size + len >= 79) {
6232			line_size = 8;
6233			sbuf_printf(sb, "\n        ");
6234		}
6235		sbuf_printf(sb, "%s ", buf);
6236		line_size += len + 1;
6237		f++;
6238	}
6239	sbuf_printf(sb, "\n");
6240}
6241
6242static struct field_desc tp_la0[] = {
6243	{ "RcfOpCodeOut", 60, 4 },
6244	{ "State", 56, 4 },
6245	{ "WcfState", 52, 4 },
6246	{ "RcfOpcSrcOut", 50, 2 },
6247	{ "CRxError", 49, 1 },
6248	{ "ERxError", 48, 1 },
6249	{ "SanityFailed", 47, 1 },
6250	{ "SpuriousMsg", 46, 1 },
6251	{ "FlushInputMsg", 45, 1 },
6252	{ "FlushInputCpl", 44, 1 },
6253	{ "RssUpBit", 43, 1 },
6254	{ "RssFilterHit", 42, 1 },
6255	{ "Tid", 32, 10 },
6256	{ "InitTcb", 31, 1 },
6257	{ "LineNumber", 24, 7 },
6258	{ "Emsg", 23, 1 },
6259	{ "EdataOut", 22, 1 },
6260	{ "Cmsg", 21, 1 },
6261	{ "CdataOut", 20, 1 },
6262	{ "EreadPdu", 19, 1 },
6263	{ "CreadPdu", 18, 1 },
6264	{ "TunnelPkt", 17, 1 },
6265	{ "RcfPeerFin", 16, 1 },
6266	{ "RcfReasonOut", 12, 4 },
6267	{ "TxCchannel", 10, 2 },
6268	{ "RcfTxChannel", 8, 2 },
6269	{ "RxEchannel", 6, 2 },
6270	{ "RcfRxChannel", 5, 1 },
6271	{ "RcfDataOutSrdy", 4, 1 },
6272	{ "RxDvld", 3, 1 },
6273	{ "RxOoDvld", 2, 1 },
6274	{ "RxCongestion", 1, 1 },
6275	{ "TxCongestion", 0, 1 },
6276	{ NULL }
6277};
6278
6279static struct field_desc tp_la1[] = {
6280	{ "CplCmdIn", 56, 8 },
6281	{ "CplCmdOut", 48, 8 },
6282	{ "ESynOut", 47, 1 },
6283	{ "EAckOut", 46, 1 },
6284	{ "EFinOut", 45, 1 },
6285	{ "ERstOut", 44, 1 },
6286	{ "SynIn", 43, 1 },
6287	{ "AckIn", 42, 1 },
6288	{ "FinIn", 41, 1 },
6289	{ "RstIn", 40, 1 },
6290	{ "DataIn", 39, 1 },
6291	{ "DataInVld", 38, 1 },
6292	{ "PadIn", 37, 1 },
6293	{ "RxBufEmpty", 36, 1 },
6294	{ "RxDdp", 35, 1 },
6295	{ "RxFbCongestion", 34, 1 },
6296	{ "TxFbCongestion", 33, 1 },
6297	{ "TxPktSumSrdy", 32, 1 },
6298	{ "RcfUlpType", 28, 4 },
6299	{ "Eread", 27, 1 },
6300	{ "Ebypass", 26, 1 },
6301	{ "Esave", 25, 1 },
6302	{ "Static0", 24, 1 },
6303	{ "Cread", 23, 1 },
6304	{ "Cbypass", 22, 1 },
6305	{ "Csave", 21, 1 },
6306	{ "CPktOut", 20, 1 },
6307	{ "RxPagePoolFull", 18, 2 },
6308	{ "RxLpbkPkt", 17, 1 },
6309	{ "TxLpbkPkt", 16, 1 },
6310	{ "RxVfValid", 15, 1 },
6311	{ "SynLearned", 14, 1 },
6312	{ "SetDelEntry", 13, 1 },
6313	{ "SetInvEntry", 12, 1 },
6314	{ "CpcmdDvld", 11, 1 },
6315	{ "CpcmdSave", 10, 1 },
6316	{ "RxPstructsFull", 8, 2 },
6317	{ "EpcmdDvld", 7, 1 },
6318	{ "EpcmdFlush", 6, 1 },
6319	{ "EpcmdTrimPrefix", 5, 1 },
6320	{ "EpcmdTrimPostfix", 4, 1 },
6321	{ "ERssIp4Pkt", 3, 1 },
6322	{ "ERssIp6Pkt", 2, 1 },
6323	{ "ERssTcpUdpPkt", 1, 1 },
6324	{ "ERssFceFipPkt", 0, 1 },
6325	{ NULL }
6326};
6327
6328static struct field_desc tp_la2[] = {
6329	{ "CplCmdIn", 56, 8 },
6330	{ "MpsVfVld", 55, 1 },
6331	{ "MpsPf", 52, 3 },
6332	{ "MpsVf", 44, 8 },
6333	{ "SynIn", 43, 1 },
6334	{ "AckIn", 42, 1 },
6335	{ "FinIn", 41, 1 },
6336	{ "RstIn", 40, 1 },
6337	{ "DataIn", 39, 1 },
6338	{ "DataInVld", 38, 1 },
6339	{ "PadIn", 37, 1 },
6340	{ "RxBufEmpty", 36, 1 },
6341	{ "RxDdp", 35, 1 },
6342	{ "RxFbCongestion", 34, 1 },
6343	{ "TxFbCongestion", 33, 1 },
6344	{ "TxPktSumSrdy", 32, 1 },
6345	{ "RcfUlpType", 28, 4 },
6346	{ "Eread", 27, 1 },
6347	{ "Ebypass", 26, 1 },
6348	{ "Esave", 25, 1 },
6349	{ "Static0", 24, 1 },
6350	{ "Cread", 23, 1 },
6351	{ "Cbypass", 22, 1 },
6352	{ "Csave", 21, 1 },
6353	{ "CPktOut", 20, 1 },
6354	{ "RxPagePoolFull", 18, 2 },
6355	{ "RxLpbkPkt", 17, 1 },
6356	{ "TxLpbkPkt", 16, 1 },
6357	{ "RxVfValid", 15, 1 },
6358	{ "SynLearned", 14, 1 },
6359	{ "SetDelEntry", 13, 1 },
6360	{ "SetInvEntry", 12, 1 },
6361	{ "CpcmdDvld", 11, 1 },
6362	{ "CpcmdSave", 10, 1 },
6363	{ "RxPstructsFull", 8, 2 },
6364	{ "EpcmdDvld", 7, 1 },
6365	{ "EpcmdFlush", 6, 1 },
6366	{ "EpcmdTrimPrefix", 5, 1 },
6367	{ "EpcmdTrimPostfix", 4, 1 },
6368	{ "ERssIp4Pkt", 3, 1 },
6369	{ "ERssIp6Pkt", 2, 1 },
6370	{ "ERssTcpUdpPkt", 1, 1 },
6371	{ "ERssFceFipPkt", 0, 1 },
6372	{ NULL }
6373};
6374
6375static void
6376tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6377{
6378
6379	field_desc_show(sb, *p, tp_la0);
6380}
6381
6382static void
6383tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6384{
6385
6386	if (idx)
6387		sbuf_printf(sb, "\n");
6388	field_desc_show(sb, p[0], tp_la0);
6389	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6390		field_desc_show(sb, p[1], tp_la0);
6391}
6392
6393static void
6394tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6395{
6396
6397	if (idx)
6398		sbuf_printf(sb, "\n");
6399	field_desc_show(sb, p[0], tp_la0);
6400	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6401		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6402}
6403
6404static int
6405sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6406{
6407	struct adapter *sc = arg1;
6408	struct sbuf *sb;
6409	uint64_t *buf, *p;
6410	int rc;
6411	u_int i, inc;
6412	void (*show_func)(struct sbuf *, uint64_t *, int);
6413
6414	rc = sysctl_wire_old_buffer(req, 0);
6415	if (rc != 0)
6416		return (rc);
6417
6418	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6419	if (sb == NULL)
6420		return (ENOMEM);
6421
6422	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6423
6424	t4_tp_read_la(sc, buf, NULL);
6425	p = buf;
6426
6427	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6428	case 2:
6429		inc = 2;
6430		show_func = tp_la_show2;
6431		break;
6432	case 3:
6433		inc = 2;
6434		show_func = tp_la_show3;
6435		break;
6436	default:
6437		inc = 1;
6438		show_func = tp_la_show;
6439	}
6440
6441	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6442		(*show_func)(sb, p, i);
6443
6444	rc = sbuf_finish(sb);
6445	sbuf_delete(sb);
6446	free(buf, M_CXGBE);
6447	return (rc);
6448}
6449
6450static int
6451sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6452{
6453	struct adapter *sc = arg1;
6454	struct sbuf *sb;
6455	int rc;
6456	u64 nrate[NCHAN], orate[NCHAN];
6457
6458	rc = sysctl_wire_old_buffer(req, 0);
6459	if (rc != 0)
6460		return (rc);
6461
6462	sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6463	if (sb == NULL)
6464		return (ENOMEM);
6465
6466	t4_get_chan_txrate(sc, nrate, orate);
6467	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
6468		 "channel 3\n");
6469	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
6470	    nrate[0], nrate[1], nrate[2], nrate[3]);
6471	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
6472	    orate[0], orate[1], orate[2], orate[3]);
6473
6474	rc = sbuf_finish(sb);
6475	sbuf_delete(sb);
6476
6477	return (rc);
6478}
6479
6480static int
6481sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6482{
6483	struct adapter *sc = arg1;
6484	struct sbuf *sb;
6485	uint32_t *buf, *p;
6486	int rc, i;
6487
6488	rc = sysctl_wire_old_buffer(req, 0);
6489	if (rc != 0)
6490		return (rc);
6491
6492	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6493	if (sb == NULL)
6494		return (ENOMEM);
6495
6496	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6497	    M_ZERO | M_WAITOK);
6498
6499	t4_ulprx_read_la(sc, buf);
6500	p = buf;
6501
6502	sbuf_printf(sb, "      Pcmd        Type   Message"
6503	    "                Data");
6504	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6505		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
6506		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6507	}
6508
6509	rc = sbuf_finish(sb);
6510	sbuf_delete(sb);
6511	free(buf, M_CXGBE);
6512	return (rc);
6513}
6514
6515static int
6516sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6517{
6518	struct adapter *sc = arg1;
6519	struct sbuf *sb;
6520	int rc, v;
6521
6522	rc = sysctl_wire_old_buffer(req, 0);
6523	if (rc != 0)
6524		return (rc);
6525
6526	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6527	if (sb == NULL)
6528		return (ENOMEM);
6529
6530	v = t4_read_reg(sc, A_SGE_STAT_CFG);
6531	if (G_STATSOURCE_T5(v) == 7) {
6532		if (G_STATMODE(v) == 0) {
6533			sbuf_printf(sb, "total %d, incomplete %d",
6534			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6535			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6536		} else if (G_STATMODE(v) == 1) {
6537			sbuf_printf(sb, "total %d, data overflow %d",
6538			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
6539			    t4_read_reg(sc, A_SGE_STAT_MATCH));
6540		}
6541	}
6542	rc = sbuf_finish(sb);
6543	sbuf_delete(sb);
6544
6545	return (rc);
6546}
6547#endif
6548
6549static inline void
6550txq_start(struct ifnet *ifp, struct sge_txq *txq)
6551{
6552	struct buf_ring *br;
6553	struct mbuf *m;
6554
6555	TXQ_LOCK_ASSERT_OWNED(txq);
6556
6557	br = txq->br;
6558	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6559	if (m)
6560		t4_eth_tx(ifp, txq, m);
6561}
6562
6563void
6564t4_tx_callout(void *arg)
6565{
6566	struct sge_eq *eq = arg;
6567	struct adapter *sc;
6568
6569	if (EQ_TRYLOCK(eq) == 0)
6570		goto reschedule;
6571
6572	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6573		EQ_UNLOCK(eq);
6574reschedule:
6575		if (__predict_true(!(eq->flags && EQ_DOOMED)))
6576			callout_schedule(&eq->tx_callout, 1);
6577		return;
6578	}
6579
6580	EQ_LOCK_ASSERT_OWNED(eq);
6581
6582	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6583
6584		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6585			struct sge_txq *txq = arg;
6586			struct port_info *pi = txq->ifp->if_softc;
6587
6588			sc = pi->adapter;
6589		} else {
6590			struct sge_wrq *wrq = arg;
6591
6592			sc = wrq->adapter;
6593		}
6594
6595		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6596	}
6597
6598	EQ_UNLOCK(eq);
6599}
6600
6601void
6602t4_tx_task(void *arg, int count)
6603{
6604	struct sge_eq *eq = arg;
6605
6606	EQ_LOCK(eq);
6607	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6608		struct sge_txq *txq = arg;
6609		txq_start(txq->ifp, txq);
6610	} else {
6611		struct sge_wrq *wrq = arg;
6612		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6613	}
6614	EQ_UNLOCK(eq);
6615}
6616
6617static uint32_t
6618fconf_to_mode(uint32_t fconf)
6619{
6620	uint32_t mode;
6621
6622	mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6623	    T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6624
6625	if (fconf & F_FRAGMENTATION)
6626		mode |= T4_FILTER_IP_FRAGMENT;
6627
6628	if (fconf & F_MPSHITTYPE)
6629		mode |= T4_FILTER_MPS_HIT_TYPE;
6630
6631	if (fconf & F_MACMATCH)
6632		mode |= T4_FILTER_MAC_IDX;
6633
6634	if (fconf & F_ETHERTYPE)
6635		mode |= T4_FILTER_ETH_TYPE;
6636
6637	if (fconf & F_PROTOCOL)
6638		mode |= T4_FILTER_IP_PROTO;
6639
6640	if (fconf & F_TOS)
6641		mode |= T4_FILTER_IP_TOS;
6642
6643	if (fconf & F_VLAN)
6644		mode |= T4_FILTER_VLAN;
6645
6646	if (fconf & F_VNIC_ID)
6647		mode |= T4_FILTER_VNIC;
6648
6649	if (fconf & F_PORT)
6650		mode |= T4_FILTER_PORT;
6651
6652	if (fconf & F_FCOE)
6653		mode |= T4_FILTER_FCoE;
6654
6655	return (mode);
6656}
6657
6658static uint32_t
6659mode_to_fconf(uint32_t mode)
6660{
6661	uint32_t fconf = 0;
6662
6663	if (mode & T4_FILTER_IP_FRAGMENT)
6664		fconf |= F_FRAGMENTATION;
6665
6666	if (mode & T4_FILTER_MPS_HIT_TYPE)
6667		fconf |= F_MPSHITTYPE;
6668
6669	if (mode & T4_FILTER_MAC_IDX)
6670		fconf |= F_MACMATCH;
6671
6672	if (mode & T4_FILTER_ETH_TYPE)
6673		fconf |= F_ETHERTYPE;
6674
6675	if (mode & T4_FILTER_IP_PROTO)
6676		fconf |= F_PROTOCOL;
6677
6678	if (mode & T4_FILTER_IP_TOS)
6679		fconf |= F_TOS;
6680
6681	if (mode & T4_FILTER_VLAN)
6682		fconf |= F_VLAN;
6683
6684	if (mode & T4_FILTER_VNIC)
6685		fconf |= F_VNIC_ID;
6686
6687	if (mode & T4_FILTER_PORT)
6688		fconf |= F_PORT;
6689
6690	if (mode & T4_FILTER_FCoE)
6691		fconf |= F_FCOE;
6692
6693	return (fconf);
6694}
6695
6696static uint32_t
6697fspec_to_fconf(struct t4_filter_specification *fs)
6698{
6699	uint32_t fconf = 0;
6700
6701	if (fs->val.frag || fs->mask.frag)
6702		fconf |= F_FRAGMENTATION;
6703
6704	if (fs->val.matchtype || fs->mask.matchtype)
6705		fconf |= F_MPSHITTYPE;
6706
6707	if (fs->val.macidx || fs->mask.macidx)
6708		fconf |= F_MACMATCH;
6709
6710	if (fs->val.ethtype || fs->mask.ethtype)
6711		fconf |= F_ETHERTYPE;
6712
6713	if (fs->val.proto || fs->mask.proto)
6714		fconf |= F_PROTOCOL;
6715
6716	if (fs->val.tos || fs->mask.tos)
6717		fconf |= F_TOS;
6718
6719	if (fs->val.vlan_vld || fs->mask.vlan_vld)
6720		fconf |= F_VLAN;
6721
6722	if (fs->val.vnic_vld || fs->mask.vnic_vld)
6723		fconf |= F_VNIC_ID;
6724
6725	if (fs->val.iport || fs->mask.iport)
6726		fconf |= F_PORT;
6727
6728	if (fs->val.fcoe || fs->mask.fcoe)
6729		fconf |= F_FCOE;
6730
6731	return (fconf);
6732}
6733
6734static int
6735get_filter_mode(struct adapter *sc, uint32_t *mode)
6736{
6737	int rc;
6738	uint32_t fconf;
6739
6740	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6741	    "t4getfm");
6742	if (rc)
6743		return (rc);
6744
6745	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6746	    A_TP_VLAN_PRI_MAP);
6747
6748	if (sc->params.tp.vlan_pri_map != fconf) {
6749		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6750		    device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6751		    fconf);
6752		sc->params.tp.vlan_pri_map = fconf;
6753	}
6754
6755	*mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6756
6757	end_synchronized_op(sc, LOCK_HELD);
6758	return (0);
6759}
6760
6761static int
6762set_filter_mode(struct adapter *sc, uint32_t mode)
6763{
6764	uint32_t fconf;
6765	int rc;
6766
6767	fconf = mode_to_fconf(mode);
6768
6769	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6770	    "t4setfm");
6771	if (rc)
6772		return (rc);
6773
6774	if (sc->tids.ftids_in_use > 0) {
6775		rc = EBUSY;
6776		goto done;
6777	}
6778
6779#ifdef TCP_OFFLOAD
6780	if (sc->offload_map) {
6781		rc = EBUSY;
6782		goto done;
6783	}
6784#endif
6785
6786#ifdef notyet
6787	rc = -t4_set_filter_mode(sc, fconf);
6788	if (rc == 0)
6789		sc->filter_mode = fconf;
6790#else
6791	rc = ENOTSUP;
6792#endif
6793
6794done:
6795	end_synchronized_op(sc, LOCK_HELD);
6796	return (rc);
6797}
6798
6799static inline uint64_t
6800get_filter_hits(struct adapter *sc, uint32_t fid)
6801{
6802	uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6803	uint64_t hits;
6804
6805	memwin_info(sc, 0, &mw_base, NULL);
6806	off = position_memwin(sc, 0,
6807	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6808	if (is_t4(sc)) {
6809		hits = t4_read_reg64(sc, mw_base + off + 16);
6810		hits = be64toh(hits);
6811	} else {
6812		hits = t4_read_reg(sc, mw_base + off + 24);
6813		hits = be32toh(hits);
6814	}
6815
6816	return (hits);
6817}
6818
6819static int
6820get_filter(struct adapter *sc, struct t4_filter *t)
6821{
6822	int i, rc, nfilters = sc->tids.nftids;
6823	struct filter_entry *f;
6824
6825	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6826	    "t4getf");
6827	if (rc)
6828		return (rc);
6829
6830	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6831	    t->idx >= nfilters) {
6832		t->idx = 0xffffffff;
6833		goto done;
6834	}
6835
6836	f = &sc->tids.ftid_tab[t->idx];
6837	for (i = t->idx; i < nfilters; i++, f++) {
6838		if (f->valid) {
6839			t->idx = i;
6840			t->l2tidx = f->l2t ? f->l2t->idx : 0;
6841			t->smtidx = f->smtidx;
6842			if (f->fs.hitcnts)
6843				t->hits = get_filter_hits(sc, t->idx);
6844			else
6845				t->hits = UINT64_MAX;
6846			t->fs = f->fs;
6847
6848			goto done;
6849		}
6850	}
6851
6852	t->idx = 0xffffffff;
6853done:
6854	end_synchronized_op(sc, LOCK_HELD);
6855	return (0);
6856}
6857
6858static int
6859set_filter(struct adapter *sc, struct t4_filter *t)
6860{
6861	unsigned int nfilters, nports;
6862	struct filter_entry *f;
6863	int i, rc;
6864
6865	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6866	if (rc)
6867		return (rc);
6868
6869	nfilters = sc->tids.nftids;
6870	nports = sc->params.nports;
6871
6872	if (nfilters == 0) {
6873		rc = ENOTSUP;
6874		goto done;
6875	}
6876
6877	if (!(sc->flags & FULL_INIT_DONE)) {
6878		rc = EAGAIN;
6879		goto done;
6880	}
6881
6882	if (t->idx >= nfilters) {
6883		rc = EINVAL;
6884		goto done;
6885	}
6886
6887	/* Validate against the global filter mode */
6888	if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6889	    sc->params.tp.vlan_pri_map) {
6890		rc = E2BIG;
6891		goto done;
6892	}
6893
6894	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6895		rc = EINVAL;
6896		goto done;
6897	}
6898
6899	if (t->fs.val.iport >= nports) {
6900		rc = EINVAL;
6901		goto done;
6902	}
6903
6904	/* Can't specify an iq if not steering to it */
6905	if (!t->fs.dirsteer && t->fs.iq) {
6906		rc = EINVAL;
6907		goto done;
6908	}
6909
6910	/* IPv6 filter idx must be 4 aligned */
6911	if (t->fs.type == 1 &&
6912	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6913		rc = EINVAL;
6914		goto done;
6915	}
6916
6917	if (sc->tids.ftid_tab == NULL) {
6918		KASSERT(sc->tids.ftids_in_use == 0,
6919		    ("%s: no memory allocated but filters_in_use > 0",
6920		    __func__));
6921
6922		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6923		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6924		if (sc->tids.ftid_tab == NULL) {
6925			rc = ENOMEM;
6926			goto done;
6927		}
6928		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6929	}
6930
6931	for (i = 0; i < 4; i++) {
6932		f = &sc->tids.ftid_tab[t->idx + i];
6933
6934		if (f->pending || f->valid) {
6935			rc = EBUSY;
6936			goto done;
6937		}
6938		if (f->locked) {
6939			rc = EPERM;
6940			goto done;
6941		}
6942
6943		if (t->fs.type == 0)
6944			break;
6945	}
6946
6947	f = &sc->tids.ftid_tab[t->idx];
6948	f->fs = t->fs;
6949
6950	rc = set_filter_wr(sc, t->idx);
6951done:
6952	end_synchronized_op(sc, 0);
6953
6954	if (rc == 0) {
6955		mtx_lock(&sc->tids.ftid_lock);
6956		for (;;) {
6957			if (f->pending == 0) {
6958				rc = f->valid ? 0 : EIO;
6959				break;
6960			}
6961
6962			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6963			    PCATCH, "t4setfw", 0)) {
6964				rc = EINPROGRESS;
6965				break;
6966			}
6967		}
6968		mtx_unlock(&sc->tids.ftid_lock);
6969	}
6970	return (rc);
6971}
6972
6973static int
6974del_filter(struct adapter *sc, struct t4_filter *t)
6975{
6976	unsigned int nfilters;
6977	struct filter_entry *f;
6978	int rc;
6979
6980	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6981	if (rc)
6982		return (rc);
6983
6984	nfilters = sc->tids.nftids;
6985
6986	if (nfilters == 0) {
6987		rc = ENOTSUP;
6988		goto done;
6989	}
6990
6991	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6992	    t->idx >= nfilters) {
6993		rc = EINVAL;
6994		goto done;
6995	}
6996
6997	if (!(sc->flags & FULL_INIT_DONE)) {
6998		rc = EAGAIN;
6999		goto done;
7000	}
7001
7002	f = &sc->tids.ftid_tab[t->idx];
7003
7004	if (f->pending) {
7005		rc = EBUSY;
7006		goto done;
7007	}
7008	if (f->locked) {
7009		rc = EPERM;
7010		goto done;
7011	}
7012
7013	if (f->valid) {
7014		t->fs = f->fs;	/* extra info for the caller */
7015		rc = del_filter_wr(sc, t->idx);
7016	}
7017
7018done:
7019	end_synchronized_op(sc, 0);
7020
7021	if (rc == 0) {
7022		mtx_lock(&sc->tids.ftid_lock);
7023		for (;;) {
7024			if (f->pending == 0) {
7025				rc = f->valid ? EIO : 0;
7026				break;
7027			}
7028
7029			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
7030			    PCATCH, "t4delfw", 0)) {
7031				rc = EINPROGRESS;
7032				break;
7033			}
7034		}
7035		mtx_unlock(&sc->tids.ftid_lock);
7036	}
7037
7038	return (rc);
7039}
7040
7041static void
7042clear_filter(struct filter_entry *f)
7043{
7044	if (f->l2t)
7045		t4_l2t_release(f->l2t);
7046
7047	bzero(f, sizeof (*f));
7048}
7049
7050static int
7051set_filter_wr(struct adapter *sc, int fidx)
7052{
7053	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7054	struct wrqe *wr;
7055	struct fw_filter_wr *fwr;
7056	unsigned int ftid;
7057
7058	ASSERT_SYNCHRONIZED_OP(sc);
7059
7060	if (f->fs.newdmac || f->fs.newvlan) {
7061		/* This filter needs an L2T entry; allocate one. */
7062		f->l2t = t4_l2t_alloc_switching(sc->l2t);
7063		if (f->l2t == NULL)
7064			return (EAGAIN);
7065		if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
7066		    f->fs.dmac)) {
7067			t4_l2t_release(f->l2t);
7068			f->l2t = NULL;
7069			return (ENOMEM);
7070		}
7071	}
7072
7073	ftid = sc->tids.ftid_base + fidx;
7074
7075	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7076	if (wr == NULL)
7077		return (ENOMEM);
7078
7079	fwr = wrtod(wr);
7080	bzero(fwr, sizeof (*fwr));
7081
7082	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7083	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7084	fwr->tid_to_iq =
7085	    htobe32(V_FW_FILTER_WR_TID(ftid) |
7086		V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7087		V_FW_FILTER_WR_NOREPLY(0) |
7088		V_FW_FILTER_WR_IQ(f->fs.iq));
7089	fwr->del_filter_to_l2tix =
7090	    htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7091		V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7092		V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7093		V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7094		V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7095		V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7096		V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7097		V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7098		V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7099		    f->fs.newvlan == VLAN_REWRITE) |
7100		V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7101		    f->fs.newvlan == VLAN_REWRITE) |
7102		V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7103		V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7104		V_FW_FILTER_WR_PRIO(f->fs.prio) |
7105		V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7106	fwr->ethtype = htobe16(f->fs.val.ethtype);
7107	fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7108	fwr->frag_to_ovlan_vldm =
7109	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7110		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7111		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7112		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7113		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7114		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7115	fwr->smac_sel = 0;
7116	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7117	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7118	fwr->maci_to_matchtypem =
7119	    htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7120		V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7121		V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7122		V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7123		V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7124		V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7125		V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7126		V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7127	fwr->ptcl = f->fs.val.proto;
7128	fwr->ptclm = f->fs.mask.proto;
7129	fwr->ttyp = f->fs.val.tos;
7130	fwr->ttypm = f->fs.mask.tos;
7131	fwr->ivlan = htobe16(f->fs.val.vlan);
7132	fwr->ivlanm = htobe16(f->fs.mask.vlan);
7133	fwr->ovlan = htobe16(f->fs.val.vnic);
7134	fwr->ovlanm = htobe16(f->fs.mask.vnic);
7135	bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7136	bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7137	bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7138	bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7139	fwr->lp = htobe16(f->fs.val.dport);
7140	fwr->lpm = htobe16(f->fs.mask.dport);
7141	fwr->fp = htobe16(f->fs.val.sport);
7142	fwr->fpm = htobe16(f->fs.mask.sport);
7143	if (f->fs.newsmac)
7144		bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7145
7146	f->pending = 1;
7147	sc->tids.ftids_in_use++;
7148
7149	t4_wrq_tx(sc, wr);
7150	return (0);
7151}
7152
7153static int
7154del_filter_wr(struct adapter *sc, int fidx)
7155{
7156	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7157	struct wrqe *wr;
7158	struct fw_filter_wr *fwr;
7159	unsigned int ftid;
7160
7161	ftid = sc->tids.ftid_base + fidx;
7162
7163	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7164	if (wr == NULL)
7165		return (ENOMEM);
7166	fwr = wrtod(wr);
7167	bzero(fwr, sizeof (*fwr));
7168
7169	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7170
7171	f->pending = 1;
7172	t4_wrq_tx(sc, wr);
7173	return (0);
7174}
7175
7176int
7177t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7178{
7179	struct adapter *sc = iq->adapter;
7180	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7181	unsigned int idx = GET_TID(rpl);
7182
7183	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7184	    rss->opcode));
7185
7186	if (idx >= sc->tids.ftid_base &&
7187	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7188		unsigned int rc = G_COOKIE(rpl->cookie);
7189		struct filter_entry *f = &sc->tids.ftid_tab[idx];
7190
7191		mtx_lock(&sc->tids.ftid_lock);
7192		if (rc == FW_FILTER_WR_FLT_ADDED) {
7193			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7194			    __func__, idx));
7195			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7196			f->pending = 0;  /* asynchronous setup completed */
7197			f->valid = 1;
7198		} else {
7199			if (rc != FW_FILTER_WR_FLT_DELETED) {
7200				/* Add or delete failed, display an error */
7201				log(LOG_ERR,
7202				    "filter %u setup failed with error %u\n",
7203				    idx, rc);
7204			}
7205
7206			clear_filter(f);
7207			sc->tids.ftids_in_use--;
7208		}
7209		wakeup(&sc->tids.ftid_tab);
7210		mtx_unlock(&sc->tids.ftid_lock);
7211	}
7212
7213	return (0);
7214}
7215
7216static int
7217get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7218{
7219	int rc;
7220
7221	if (cntxt->cid > M_CTXTQID)
7222		return (EINVAL);
7223
7224	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7225	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7226		return (EINVAL);
7227
7228	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7229	if (rc)
7230		return (rc);
7231
7232	if (sc->flags & FW_OK) {
7233		rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7234		    &cntxt->data[0]);
7235		if (rc == 0)
7236			goto done;
7237	}
7238
7239	/*
7240	 * Read via firmware failed or wasn't even attempted.  Read directly via
7241	 * the backdoor.
7242	 */
7243	rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7244done:
7245	end_synchronized_op(sc, 0);
7246	return (rc);
7247}
7248
7249static int
7250load_fw(struct adapter *sc, struct t4_data *fw)
7251{
7252	int rc;
7253	uint8_t *fw_data;
7254
7255	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7256	if (rc)
7257		return (rc);
7258
7259	if (sc->flags & FULL_INIT_DONE) {
7260		rc = EBUSY;
7261		goto done;
7262	}
7263
7264	fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7265	if (fw_data == NULL) {
7266		rc = ENOMEM;
7267		goto done;
7268	}
7269
7270	rc = copyin(fw->data, fw_data, fw->len);
7271	if (rc == 0)
7272		rc = -t4_load_fw(sc, fw_data, fw->len);
7273
7274	free(fw_data, M_CXGBE);
7275done:
7276	end_synchronized_op(sc, 0);
7277	return (rc);
7278}
7279
7280static int
7281read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7282{
7283	uint32_t addr, off, remaining, i, n;
7284	uint32_t *buf, *b;
7285	uint32_t mw_base, mw_aperture;
7286	int rc;
7287	uint8_t *dst;
7288
7289	rc = validate_mem_range(sc, mr->addr, mr->len);
7290	if (rc != 0)
7291		return (rc);
7292
7293	memwin_info(sc, win, &mw_base, &mw_aperture);
7294	buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7295	addr = mr->addr;
7296	remaining = mr->len;
7297	dst = (void *)mr->data;
7298
7299	while (remaining) {
7300		off = position_memwin(sc, win, addr);
7301
7302		/* number of bytes that we'll copy in the inner loop */
7303		n = min(remaining, mw_aperture - off);
7304		for (i = 0; i < n; i += 4)
7305			*b++ = t4_read_reg(sc, mw_base + off + i);
7306
7307		rc = copyout(buf, dst, n);
7308		if (rc != 0)
7309			break;
7310
7311		b = buf;
7312		dst += n;
7313		remaining -= n;
7314		addr += n;
7315	}
7316
7317	free(buf, M_CXGBE);
7318	return (rc);
7319}
7320
7321static int
7322read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7323{
7324	int rc;
7325
7326	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7327		return (EINVAL);
7328
7329	if (i2cd->len > 1) {
7330		/* XXX: need fw support for longer reads in one go */
7331		return (ENOTSUP);
7332	}
7333
7334	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7335	if (rc)
7336		return (rc);
7337	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7338	    i2cd->offset, &i2cd->data[0]);
7339	end_synchronized_op(sc, 0);
7340
7341	return (rc);
7342}
7343
7344static int
7345in_range(int val, int lo, int hi)
7346{
7347
7348	return (val < 0 || (val <= hi && val >= lo));
7349}
7350
7351static int
7352set_sched_class(struct adapter *sc, struct t4_sched_params *p)
7353{
7354	int fw_subcmd, fw_type, rc;
7355
7356	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsc");
7357	if (rc)
7358		return (rc);
7359
7360	if (!(sc->flags & FULL_INIT_DONE)) {
7361		rc = EAGAIN;
7362		goto done;
7363	}
7364
7365	/*
7366	 * Translate the cxgbetool parameters into T4 firmware parameters.  (The
7367	 * sub-command and type are in common locations.)
7368	 */
7369	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
7370		fw_subcmd = FW_SCHED_SC_CONFIG;
7371	else if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
7372		fw_subcmd = FW_SCHED_SC_PARAMS;
7373	else {
7374		rc = EINVAL;
7375		goto done;
7376	}
7377	if (p->type == SCHED_CLASS_TYPE_PACKET)
7378		fw_type = FW_SCHED_TYPE_PKTSCHED;
7379	else {
7380		rc = EINVAL;
7381		goto done;
7382	}
7383
7384	if (fw_subcmd == FW_SCHED_SC_CONFIG) {
7385		/* Vet our parameters ..*/
7386		if (p->u.config.minmax < 0) {
7387			rc = EINVAL;
7388			goto done;
7389		}
7390
7391		/* And pass the request to the firmware ...*/
7392		rc = -t4_sched_config(sc, fw_type, p->u.config.minmax);
7393		goto done;
7394	}
7395
7396	if (fw_subcmd == FW_SCHED_SC_PARAMS) {
7397		int fw_level;
7398		int fw_mode;
7399		int fw_rateunit;
7400		int fw_ratemode;
7401
7402		if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL)
7403			fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
7404		else if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR)
7405			fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
7406		else if (p->u.params.level == SCHED_CLASS_LEVEL_CH_RL)
7407			fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
7408		else {
7409			rc = EINVAL;
7410			goto done;
7411		}
7412
7413		if (p->u.params.mode == SCHED_CLASS_MODE_CLASS)
7414			fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
7415		else if (p->u.params.mode == SCHED_CLASS_MODE_FLOW)
7416			fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
7417		else {
7418			rc = EINVAL;
7419			goto done;
7420		}
7421
7422		if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_BITS)
7423			fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
7424		else if (p->u.params.rateunit == SCHED_CLASS_RATEUNIT_PKTS)
7425			fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
7426		else {
7427			rc = EINVAL;
7428			goto done;
7429		}
7430
7431		if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_REL)
7432			fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
7433		else if (p->u.params.ratemode == SCHED_CLASS_RATEMODE_ABS)
7434			fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
7435		else {
7436			rc = EINVAL;
7437			goto done;
7438		}
7439
7440		/* Vet our parameters ... */
7441		if (!in_range(p->u.params.channel, 0, 3) ||
7442		    !in_range(p->u.params.cl, 0, is_t4(sc) ? 15 : 16) ||
7443		    !in_range(p->u.params.minrate, 0, 10000000) ||
7444		    !in_range(p->u.params.maxrate, 0, 10000000) ||
7445		    !in_range(p->u.params.weight, 0, 100)) {
7446			rc = ERANGE;
7447			goto done;
7448		}
7449
7450		/*
7451		 * Translate any unset parameters into the firmware's
7452		 * nomenclature and/or fail the call if the parameters
7453		 * are required ...
7454		 */
7455		if (p->u.params.rateunit < 0 || p->u.params.ratemode < 0 ||
7456		    p->u.params.channel < 0 || p->u.params.cl < 0) {
7457			rc = EINVAL;
7458			goto done;
7459		}
7460		if (p->u.params.minrate < 0)
7461			p->u.params.minrate = 0;
7462		if (p->u.params.maxrate < 0) {
7463			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7464			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7465				rc = EINVAL;
7466				goto done;
7467			} else
7468				p->u.params.maxrate = 0;
7469		}
7470		if (p->u.params.weight < 0) {
7471			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_WRR) {
7472				rc = EINVAL;
7473				goto done;
7474			} else
7475				p->u.params.weight = 0;
7476		}
7477		if (p->u.params.pktsize < 0) {
7478			if (p->u.params.level == SCHED_CLASS_LEVEL_CL_RL ||
7479			    p->u.params.level == SCHED_CLASS_LEVEL_CH_RL) {
7480				rc = EINVAL;
7481				goto done;
7482			} else
7483				p->u.params.pktsize = 0;
7484		}
7485
7486		/* See what the firmware thinks of the request ... */
7487		rc = -t4_sched_params(sc, fw_type, fw_level, fw_mode,
7488		    fw_rateunit, fw_ratemode, p->u.params.channel,
7489		    p->u.params.cl, p->u.params.minrate, p->u.params.maxrate,
7490		    p->u.params.weight, p->u.params.pktsize);
7491		goto done;
7492	}
7493
7494	rc = EINVAL;
7495done:
7496	end_synchronized_op(sc, 0);
7497	return (rc);
7498}
7499
7500static int
7501set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
7502{
7503	struct port_info *pi = NULL;
7504	struct sge_txq *txq;
7505	uint32_t fw_mnem, fw_queue, fw_class;
7506	int i, rc;
7507
7508	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
7509	if (rc)
7510		return (rc);
7511
7512	if (!(sc->flags & FULL_INIT_DONE)) {
7513		rc = EAGAIN;
7514		goto done;
7515	}
7516
7517	if (p->port >= sc->params.nports) {
7518		rc = EINVAL;
7519		goto done;
7520	}
7521
7522	pi = sc->port[p->port];
7523	if (!in_range(p->queue, 0, pi->ntxq - 1) || !in_range(p->cl, 0, 7)) {
7524		rc = EINVAL;
7525		goto done;
7526	}
7527
7528	/*
7529	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
7530	 * Scheduling Class in this case).
7531	 */
7532	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
7533	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
7534	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
7535
7536	/*
7537	 * If op.queue is non-negative, then we're only changing the scheduling
7538	 * on a single specified TX queue.
7539	 */
7540	if (p->queue >= 0) {
7541		txq = &sc->sge.txq[pi->first_txq + p->queue];
7542		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7543		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7544		    &fw_class);
7545		goto done;
7546	}
7547
7548	/*
7549	 * Change the scheduling on all the TX queues for the
7550	 * interface.
7551	 */
7552	for_each_txq(pi, i, txq) {
7553		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
7554		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
7555		    &fw_class);
7556		if (rc)
7557			goto done;
7558	}
7559
7560	rc = 0;
7561done:
7562	end_synchronized_op(sc, 0);
7563	return (rc);
7564}
7565
7566int
7567t4_os_find_pci_capability(struct adapter *sc, int cap)
7568{
7569	int i;
7570
7571	return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7572}
7573
7574int
7575t4_os_pci_save_state(struct adapter *sc)
7576{
7577	device_t dev;
7578	struct pci_devinfo *dinfo;
7579
7580	dev = sc->dev;
7581	dinfo = device_get_ivars(dev);
7582
7583	pci_cfg_save(dev, dinfo, 0);
7584	return (0);
7585}
7586
7587int
7588t4_os_pci_restore_state(struct adapter *sc)
7589{
7590	device_t dev;
7591	struct pci_devinfo *dinfo;
7592
7593	dev = sc->dev;
7594	dinfo = device_get_ivars(dev);
7595
7596	pci_cfg_restore(dev, dinfo);
7597	return (0);
7598}
7599
7600void
7601t4_os_portmod_changed(const struct adapter *sc, int idx)
7602{
7603	struct port_info *pi = sc->port[idx];
7604	static const char *mod_str[] = {
7605		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7606	};
7607
7608	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7609		if_printf(pi->ifp, "transceiver unplugged.\n");
7610	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7611		if_printf(pi->ifp, "unknown transceiver inserted.\n");
7612	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7613		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7614	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7615		if_printf(pi->ifp, "%s transceiver inserted.\n",
7616		    mod_str[pi->mod_type]);
7617	} else {
7618		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7619		    pi->mod_type);
7620	}
7621}
7622
7623void
7624t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7625{
7626	struct port_info *pi = sc->port[idx];
7627	struct ifnet *ifp = pi->ifp;
7628
7629	if (link_stat) {
7630		pi->linkdnrc = -1;
7631		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7632		if_link_state_change(ifp, LINK_STATE_UP);
7633	} else {
7634		if (reason >= 0)
7635			pi->linkdnrc = reason;
7636		if_link_state_change(ifp, LINK_STATE_DOWN);
7637	}
7638}
7639
7640void
7641t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7642{
7643	struct adapter *sc;
7644
7645	sx_slock(&t4_list_lock);
7646	SLIST_FOREACH(sc, &t4_list, link) {
7647		/*
7648		 * func should not make any assumptions about what state sc is
7649		 * in - the only guarantee is that sc->sc_lock is a valid lock.
7650		 */
7651		func(sc, arg);
7652	}
7653	sx_sunlock(&t4_list_lock);
7654}
7655
7656static int
7657t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7658{
7659       return (0);
7660}
7661
7662static int
7663t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7664{
7665       return (0);
7666}
7667
7668static int
7669t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7670    struct thread *td)
7671{
7672	int rc;
7673	struct adapter *sc = dev->si_drv1;
7674
7675	rc = priv_check(td, PRIV_DRIVER);
7676	if (rc != 0)
7677		return (rc);
7678
7679	switch (cmd) {
7680	case CHELSIO_T4_GETREG: {
7681		struct t4_reg *edata = (struct t4_reg *)data;
7682
7683		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7684			return (EFAULT);
7685
7686		if (edata->size == 4)
7687			edata->val = t4_read_reg(sc, edata->addr);
7688		else if (edata->size == 8)
7689			edata->val = t4_read_reg64(sc, edata->addr);
7690		else
7691			return (EINVAL);
7692
7693		break;
7694	}
7695	case CHELSIO_T4_SETREG: {
7696		struct t4_reg *edata = (struct t4_reg *)data;
7697
7698		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7699			return (EFAULT);
7700
7701		if (edata->size == 4) {
7702			if (edata->val & 0xffffffff00000000)
7703				return (EINVAL);
7704			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7705		} else if (edata->size == 8)
7706			t4_write_reg64(sc, edata->addr, edata->val);
7707		else
7708			return (EINVAL);
7709		break;
7710	}
7711	case CHELSIO_T4_REGDUMP: {
7712		struct t4_regdump *regs = (struct t4_regdump *)data;
7713		int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7714		uint8_t *buf;
7715
7716		if (regs->len < reglen) {
7717			regs->len = reglen; /* hint to the caller */
7718			return (ENOBUFS);
7719		}
7720
7721		regs->len = reglen;
7722		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7723		t4_get_regs(sc, regs, buf);
7724		rc = copyout(buf, regs->data, reglen);
7725		free(buf, M_CXGBE);
7726		break;
7727	}
7728	case CHELSIO_T4_GET_FILTER_MODE:
7729		rc = get_filter_mode(sc, (uint32_t *)data);
7730		break;
7731	case CHELSIO_T4_SET_FILTER_MODE:
7732		rc = set_filter_mode(sc, *(uint32_t *)data);
7733		break;
7734	case CHELSIO_T4_GET_FILTER:
7735		rc = get_filter(sc, (struct t4_filter *)data);
7736		break;
7737	case CHELSIO_T4_SET_FILTER:
7738		rc = set_filter(sc, (struct t4_filter *)data);
7739		break;
7740	case CHELSIO_T4_DEL_FILTER:
7741		rc = del_filter(sc, (struct t4_filter *)data);
7742		break;
7743	case CHELSIO_T4_GET_SGE_CONTEXT:
7744		rc = get_sge_context(sc, (struct t4_sge_context *)data);
7745		break;
7746	case CHELSIO_T4_LOAD_FW:
7747		rc = load_fw(sc, (struct t4_data *)data);
7748		break;
7749	case CHELSIO_T4_GET_MEM:
7750		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7751		break;
7752	case CHELSIO_T4_GET_I2C:
7753		rc = read_i2c(sc, (struct t4_i2c_data *)data);
7754		break;
7755	case CHELSIO_T4_CLEAR_STATS: {
7756		int i;
7757		u_int port_id = *(uint32_t *)data;
7758		struct port_info *pi;
7759
7760		if (port_id >= sc->params.nports)
7761			return (EINVAL);
7762		pi = sc->port[port_id];
7763
7764		/* MAC stats */
7765		t4_clr_port_stats(sc, pi->tx_chan);
7766
7767		if (pi->flags & PORT_INIT_DONE) {
7768			struct sge_rxq *rxq;
7769			struct sge_txq *txq;
7770			struct sge_wrq *wrq;
7771
7772			for_each_rxq(pi, i, rxq) {
7773#if defined(INET) || defined(INET6)
7774				rxq->lro.lro_queued = 0;
7775				rxq->lro.lro_flushed = 0;
7776#endif
7777				rxq->rxcsum = 0;
7778				rxq->vlan_extraction = 0;
7779			}
7780
7781			for_each_txq(pi, i, txq) {
7782				txq->txcsum = 0;
7783				txq->tso_wrs = 0;
7784				txq->vlan_insertion = 0;
7785				txq->imm_wrs = 0;
7786				txq->sgl_wrs = 0;
7787				txq->txpkt_wrs = 0;
7788				txq->txpkts_wrs = 0;
7789				txq->txpkts_pkts = 0;
7790				txq->br->br_drops = 0;
7791				txq->no_dmamap = 0;
7792				txq->no_desc = 0;
7793			}
7794
7795#ifdef TCP_OFFLOAD
7796			/* nothing to clear for each ofld_rxq */
7797
7798			for_each_ofld_txq(pi, i, wrq) {
7799				wrq->tx_wrs = 0;
7800				wrq->no_desc = 0;
7801			}
7802#endif
7803			wrq = &sc->sge.ctrlq[pi->port_id];
7804			wrq->tx_wrs = 0;
7805			wrq->no_desc = 0;
7806		}
7807		break;
7808	}
7809	case CHELSIO_T4_SCHED_CLASS:
7810		rc = set_sched_class(sc, (struct t4_sched_params *)data);
7811		break;
7812	case CHELSIO_T4_SCHED_QUEUE:
7813		rc = set_sched_queue(sc, (struct t4_sched_queue *)data);
7814		break;
7815	case CHELSIO_T4_GET_TRACER:
7816		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7817		break;
7818	case CHELSIO_T4_SET_TRACER:
7819		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7820		break;
7821	default:
7822		rc = EINVAL;
7823	}
7824
7825	return (rc);
7826}
7827
7828#ifdef TCP_OFFLOAD
7829static int
7830toe_capability(struct port_info *pi, int enable)
7831{
7832	int rc;
7833	struct adapter *sc = pi->adapter;
7834
7835	ASSERT_SYNCHRONIZED_OP(sc);
7836
7837	if (!is_offload(sc))
7838		return (ENODEV);
7839
7840	if (enable) {
7841		if (!(sc->flags & FULL_INIT_DONE)) {
7842			rc = cxgbe_init_synchronized(pi);
7843			if (rc)
7844				return (rc);
7845		}
7846
7847		if (isset(&sc->offload_map, pi->port_id))
7848			return (0);
7849
7850		if (!(sc->flags & TOM_INIT_DONE)) {
7851			rc = t4_activate_uld(sc, ULD_TOM);
7852			if (rc == EAGAIN) {
7853				log(LOG_WARNING,
7854				    "You must kldload t4_tom.ko before trying "
7855				    "to enable TOE on a cxgbe interface.\n");
7856			}
7857			if (rc != 0)
7858				return (rc);
7859			KASSERT(sc->tom_softc != NULL,
7860			    ("%s: TOM activated but softc NULL", __func__));
7861			KASSERT(sc->flags & TOM_INIT_DONE,
7862			    ("%s: TOM activated but flag not set", __func__));
7863		}
7864
7865		setbit(&sc->offload_map, pi->port_id);
7866	} else {
7867		if (!isset(&sc->offload_map, pi->port_id))
7868			return (0);
7869
7870		KASSERT(sc->flags & TOM_INIT_DONE,
7871		    ("%s: TOM never initialized?", __func__));
7872		clrbit(&sc->offload_map, pi->port_id);
7873	}
7874
7875	return (0);
7876}
7877
7878/*
7879 * Add an upper layer driver to the global list.
7880 */
7881int
7882t4_register_uld(struct uld_info *ui)
7883{
7884	int rc = 0;
7885	struct uld_info *u;
7886
7887	sx_xlock(&t4_uld_list_lock);
7888	SLIST_FOREACH(u, &t4_uld_list, link) {
7889	    if (u->uld_id == ui->uld_id) {
7890		    rc = EEXIST;
7891		    goto done;
7892	    }
7893	}
7894
7895	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7896	ui->refcount = 0;
7897done:
7898	sx_xunlock(&t4_uld_list_lock);
7899	return (rc);
7900}
7901
7902int
7903t4_unregister_uld(struct uld_info *ui)
7904{
7905	int rc = EINVAL;
7906	struct uld_info *u;
7907
7908	sx_xlock(&t4_uld_list_lock);
7909
7910	SLIST_FOREACH(u, &t4_uld_list, link) {
7911	    if (u == ui) {
7912		    if (ui->refcount > 0) {
7913			    rc = EBUSY;
7914			    goto done;
7915		    }
7916
7917		    SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7918		    rc = 0;
7919		    goto done;
7920	    }
7921	}
7922done:
7923	sx_xunlock(&t4_uld_list_lock);
7924	return (rc);
7925}
7926
7927int
7928t4_activate_uld(struct adapter *sc, int id)
7929{
7930	int rc = EAGAIN;
7931	struct uld_info *ui;
7932
7933	ASSERT_SYNCHRONIZED_OP(sc);
7934
7935	sx_slock(&t4_uld_list_lock);
7936
7937	SLIST_FOREACH(ui, &t4_uld_list, link) {
7938		if (ui->uld_id == id) {
7939			rc = ui->activate(sc);
7940			if (rc == 0)
7941				ui->refcount++;
7942			goto done;
7943		}
7944	}
7945done:
7946	sx_sunlock(&t4_uld_list_lock);
7947
7948	return (rc);
7949}
7950
7951int
7952t4_deactivate_uld(struct adapter *sc, int id)
7953{
7954	int rc = EINVAL;
7955	struct uld_info *ui;
7956
7957	ASSERT_SYNCHRONIZED_OP(sc);
7958
7959	sx_slock(&t4_uld_list_lock);
7960
7961	SLIST_FOREACH(ui, &t4_uld_list, link) {
7962		if (ui->uld_id == id) {
7963			rc = ui->deactivate(sc);
7964			if (rc == 0)
7965				ui->refcount--;
7966			goto done;
7967		}
7968	}
7969done:
7970	sx_sunlock(&t4_uld_list_lock);
7971
7972	return (rc);
7973}
7974#endif
7975
7976/*
7977 * Come up with reasonable defaults for some of the tunables, provided they're
7978 * not set by the user (in which case we'll use the values as is).
7979 */
7980static void
7981tweak_tunables(void)
7982{
7983	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
7984
7985	if (t4_ntxq10g < 1)
7986		t4_ntxq10g = min(nc, NTXQ_10G);
7987
7988	if (t4_ntxq1g < 1)
7989		t4_ntxq1g = min(nc, NTXQ_1G);
7990
7991	if (t4_nrxq10g < 1)
7992		t4_nrxq10g = min(nc, NRXQ_10G);
7993
7994	if (t4_nrxq1g < 1)
7995		t4_nrxq1g = min(nc, NRXQ_1G);
7996
7997#ifdef TCP_OFFLOAD
7998	if (t4_nofldtxq10g < 1)
7999		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
8000
8001	if (t4_nofldtxq1g < 1)
8002		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
8003
8004	if (t4_nofldrxq10g < 1)
8005		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
8006
8007	if (t4_nofldrxq1g < 1)
8008		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
8009
8010	if (t4_toecaps_allowed == -1)
8011		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
8012#else
8013	if (t4_toecaps_allowed == -1)
8014		t4_toecaps_allowed = 0;
8015#endif
8016
8017	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
8018		t4_tmr_idx_10g = TMR_IDX_10G;
8019
8020	if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
8021		t4_pktc_idx_10g = PKTC_IDX_10G;
8022
8023	if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
8024		t4_tmr_idx_1g = TMR_IDX_1G;
8025
8026	if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
8027		t4_pktc_idx_1g = PKTC_IDX_1G;
8028
8029	if (t4_qsize_txq < 128)
8030		t4_qsize_txq = 128;
8031
8032	if (t4_qsize_rxq < 128)
8033		t4_qsize_rxq = 128;
8034	while (t4_qsize_rxq & 7)
8035		t4_qsize_rxq++;
8036
8037	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
8038}
8039
8040static int
8041mod_event(module_t mod, int cmd, void *arg)
8042{
8043	int rc = 0;
8044	static int loaded = 0;
8045
8046	switch (cmd) {
8047	case MOD_LOAD:
8048		if (atomic_fetchadd_int(&loaded, 1))
8049			break;
8050		t4_sge_modload();
8051		sx_init(&t4_list_lock, "T4/T5 adapters");
8052		SLIST_INIT(&t4_list);
8053#ifdef TCP_OFFLOAD
8054		sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
8055		SLIST_INIT(&t4_uld_list);
8056#endif
8057		t4_tracer_modload();
8058		tweak_tunables();
8059		break;
8060
8061	case MOD_UNLOAD:
8062		if (atomic_fetchadd_int(&loaded, -1) > 1)
8063			break;
8064		t4_tracer_modunload();
8065#ifdef TCP_OFFLOAD
8066		sx_slock(&t4_uld_list_lock);
8067		if (!SLIST_EMPTY(&t4_uld_list)) {
8068			rc = EBUSY;
8069			sx_sunlock(&t4_uld_list_lock);
8070			break;
8071		}
8072		sx_sunlock(&t4_uld_list_lock);
8073		sx_destroy(&t4_uld_list_lock);
8074#endif
8075		sx_slock(&t4_list_lock);
8076		if (!SLIST_EMPTY(&t4_list)) {
8077			rc = EBUSY;
8078			sx_sunlock(&t4_list_lock);
8079			break;
8080		}
8081		sx_sunlock(&t4_list_lock);
8082		sx_destroy(&t4_list_lock);
8083		break;
8084	}
8085
8086	return (rc);
8087}
8088
8089static devclass_t t4_devclass, t5_devclass;
8090static devclass_t cxgbe_devclass, cxl_devclass;
8091
8092DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
8093MODULE_VERSION(t4nex, 1);
8094MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
8095
8096DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
8097MODULE_VERSION(t5nex, 1);
8098MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
8099
8100DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
8101MODULE_VERSION(cxgbe, 1);
8102
8103DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
8104MODULE_VERSION(cxl, 1);
8105