Deleted Added
full compact
t4_main.c (253890) t4_main.c (254577)
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 253890 2013-08-02 18:05:42Z np $");
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 254577 2013-08-20 18:22:04Z np $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/conf.h>
36#include <sys/priv.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/module.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/taskqueue.h>
43#include <sys/pciio.h>
44#include <dev/pci/pcireg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pci_private.h>
47#include <sys/firmware.h>
48#include <sys/sbuf.h>
49#include <sys/smp.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_types.h>
56#include <net/if_dl.h>
57#include <net/if_vlan_var.h>
58#if defined(__i386__) || defined(__amd64__)
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#endif
62
63#include "common/common.h"
64#include "common/t4_msg.h"
65#include "common/t4_regs.h"
66#include "common/t4_regs_values.h"
67#include "t4_ioctl.h"
68#include "t4_l2t.h"
69
70/* T4 bus driver interface */
71static int t4_probe(device_t);
72static int t4_attach(device_t);
73static int t4_detach(device_t);
74static device_method_t t4_methods[] = {
75 DEVMETHOD(device_probe, t4_probe),
76 DEVMETHOD(device_attach, t4_attach),
77 DEVMETHOD(device_detach, t4_detach),
78
79 DEVMETHOD_END
80};
81static driver_t t4_driver = {
82 "t4nex",
83 t4_methods,
84 sizeof(struct adapter)
85};
86
87
88/* T4 port (cxgbe) interface */
89static int cxgbe_probe(device_t);
90static int cxgbe_attach(device_t);
91static int cxgbe_detach(device_t);
92static device_method_t cxgbe_methods[] = {
93 DEVMETHOD(device_probe, cxgbe_probe),
94 DEVMETHOD(device_attach, cxgbe_attach),
95 DEVMETHOD(device_detach, cxgbe_detach),
96 { 0, 0 }
97};
98static driver_t cxgbe_driver = {
99 "cxgbe",
100 cxgbe_methods,
101 sizeof(struct port_info)
102};
103
104static d_ioctl_t t4_ioctl;
105static d_open_t t4_open;
106static d_close_t t4_close;
107
108static struct cdevsw t4_cdevsw = {
109 .d_version = D_VERSION,
110 .d_flags = 0,
111 .d_open = t4_open,
112 .d_close = t4_close,
113 .d_ioctl = t4_ioctl,
114 .d_name = "t4nex",
115};
116
117/* T5 bus driver interface */
118static int t5_probe(device_t);
119static device_method_t t5_methods[] = {
120 DEVMETHOD(device_probe, t5_probe),
121 DEVMETHOD(device_attach, t4_attach),
122 DEVMETHOD(device_detach, t4_detach),
123
124 DEVMETHOD_END
125};
126static driver_t t5_driver = {
127 "t5nex",
128 t5_methods,
129 sizeof(struct adapter)
130};
131
132
133/* T5 port (cxl) interface */
134static driver_t cxl_driver = {
135 "cxl",
136 cxgbe_methods,
137 sizeof(struct port_info)
138};
139
140static struct cdevsw t5_cdevsw = {
141 .d_version = D_VERSION,
142 .d_flags = 0,
143 .d_open = t4_open,
144 .d_close = t4_close,
145 .d_ioctl = t4_ioctl,
146 .d_name = "t5nex",
147};
148
149/* ifnet + media interface */
150static void cxgbe_init(void *);
151static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153static void cxgbe_qflush(struct ifnet *);
154static int cxgbe_media_change(struct ifnet *);
155static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159/*
160 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161 * then ADAPTER_LOCK, then t4_uld_list_lock.
162 */
163static struct mtx t4_list_lock;
164static SLIST_HEAD(, adapter) t4_list;
165#ifdef TCP_OFFLOAD
166static struct mtx t4_uld_list_lock;
167static SLIST_HEAD(, uld_info) t4_uld_list;
168#endif
169
170/*
171 * Tunables. See tweak_tunables() too.
172 *
173 * Each tunable is set to a default value here if it's known at compile-time.
174 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175 * provide a reasonable default when the driver is loaded.
176 *
177 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
178 * T5 are under hw.cxl.
179 */
180
181/*
182 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183 */
184#define NTXQ_10G 16
185static int t4_ntxq10g = -1;
186TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188#define NRXQ_10G 8
189static int t4_nrxq10g = -1;
190TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192#define NTXQ_1G 4
193static int t4_ntxq1g = -1;
194TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196#define NRXQ_1G 2
197static int t4_nrxq1g = -1;
198TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200#ifdef TCP_OFFLOAD
201#define NOFLDTXQ_10G 8
202static int t4_nofldtxq10g = -1;
203TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204
205#define NOFLDRXQ_10G 2
206static int t4_nofldrxq10g = -1;
207TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208
209#define NOFLDTXQ_1G 2
210static int t4_nofldtxq1g = -1;
211TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212
213#define NOFLDRXQ_1G 1
214static int t4_nofldrxq1g = -1;
215TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216#endif
217
218/*
219 * Holdoff parameters for 10G and 1G ports.
220 */
221#define TMR_IDX_10G 1
222static int t4_tmr_idx_10g = TMR_IDX_10G;
223TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224
225#define PKTC_IDX_10G (-1)
226static int t4_pktc_idx_10g = PKTC_IDX_10G;
227TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228
229#define TMR_IDX_1G 1
230static int t4_tmr_idx_1g = TMR_IDX_1G;
231TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232
233#define PKTC_IDX_1G (-1)
234static int t4_pktc_idx_1g = PKTC_IDX_1G;
235TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236
237/*
238 * Size (# of entries) of each tx and rx queue.
239 */
240static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242
243static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245
246/*
247 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248 */
249static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251
252/*
253 * Configuration file.
254 */
255#define DEFAULT_CF "default"
256#define FLASH_CF "flash"
257#define UWIRE_CF "uwire"
258#define FPGA_CF "fpga"
259static char t4_cfg_file[32] = DEFAULT_CF;
260TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261
262/*
263 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264 * encouraged respectively).
265 */
266static unsigned int t4_fw_install = 1;
267TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268
269/*
270 * ASIC features that will be used. Disable the ones you don't want so that the
271 * chip resources aren't wasted on features that will not be used.
272 */
273static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
274TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275
276static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278
279static int t4_toecaps_allowed = -1;
280TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281
282static int t4_rdmacaps_allowed = 0;
283TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284
285static int t4_iscsicaps_allowed = 0;
286TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287
288static int t4_fcoecaps_allowed = 0;
289TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290
291static int t5_write_combine = 0;
292TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293
294struct intrs_and_queues {
295 int intr_type; /* INTx, MSI, or MSI-X */
296 int nirq; /* Number of vectors */
297 int intr_flags;
298 int ntxq10g; /* # of NIC txq's for each 10G port */
299 int nrxq10g; /* # of NIC rxq's for each 10G port */
300 int ntxq1g; /* # of NIC txq's for each 1G port */
301 int nrxq1g; /* # of NIC rxq's for each 1G port */
302#ifdef TCP_OFFLOAD
303 int nofldtxq10g; /* # of TOE txq's for each 10G port */
304 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
305 int nofldtxq1g; /* # of TOE txq's for each 1G port */
306 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
307#endif
308};
309
310struct filter_entry {
311 uint32_t valid:1; /* filter allocated and valid */
312 uint32_t locked:1; /* filter is administratively locked */
313 uint32_t pending:1; /* filter action is pending firmware reply */
314 uint32_t smtidx:8; /* Source MAC Table index for smac */
315 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
316
317 struct t4_filter_specification fs;
318};
319
320enum {
321 XGMAC_MTU = (1 << 0),
322 XGMAC_PROMISC = (1 << 1),
323 XGMAC_ALLMULTI = (1 << 2),
324 XGMAC_VLANEX = (1 << 3),
325 XGMAC_UCADDR = (1 << 4),
326 XGMAC_MCADDRS = (1 << 5),
327
328 XGMAC_ALL = 0xffff
329};
330
331static int map_bars_0_and_4(struct adapter *);
332static int map_bar_2(struct adapter *);
333static void setup_memwin(struct adapter *);
334static int validate_mem_range(struct adapter *, uint32_t, int);
335static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336 uint32_t *);
337static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338static uint32_t position_memwin(struct adapter *, int, uint32_t);
339static int cfg_itype_and_nqueues(struct adapter *, int, int,
340 struct intrs_and_queues *);
341static int prep_firmware(struct adapter *);
342static int partition_resources(struct adapter *, const struct firmware *,
343 const char *);
344static int get_params__pre_init(struct adapter *);
345static int get_params__post_init(struct adapter *);
346static int set_params__post_init(struct adapter *);
347static void t4_set_desc(struct adapter *);
348static void build_medialist(struct port_info *);
349static int update_mac_settings(struct port_info *, int);
350static int cxgbe_init_synchronized(struct port_info *);
351static int cxgbe_uninit_synchronized(struct port_info *);
352static int setup_intr_handlers(struct adapter *);
353static int adapter_full_init(struct adapter *);
354static int adapter_full_uninit(struct adapter *);
355static int port_full_init(struct port_info *);
356static int port_full_uninit(struct port_info *);
357static void quiesce_eq(struct adapter *, struct sge_eq *);
358static void quiesce_iq(struct adapter *, struct sge_iq *);
359static void quiesce_fl(struct adapter *, struct sge_fl *);
360static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361 driver_intr_t *, void *, char *);
362static int t4_free_irq(struct adapter *, struct irq *);
363static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364 unsigned int);
365static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366static void cxgbe_tick(void *);
367static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369 struct mbuf *);
370static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371static int fw_msg_not_handled(struct adapter *, const __be64 *);
372static int t4_sysctls(struct adapter *);
373static int cxgbe_sysctls(struct port_info *);
374static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
377static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
378static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
379static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
380static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
381static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
382static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
383#ifdef SBUF_DRAIN
384static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
385static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
386static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
387static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
388static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
389static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
390static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
391static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
392static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
393static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
394static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
395static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
396static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
397static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
398static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
399static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
400static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
401static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
402static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
403static int sysctl_tids(SYSCTL_HANDLER_ARGS);
404static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
405static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
406static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
407static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
408static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
409#endif
410static inline void txq_start(struct ifnet *, struct sge_txq *);
411static uint32_t fconf_to_mode(uint32_t);
412static uint32_t mode_to_fconf(uint32_t);
413static uint32_t fspec_to_fconf(struct t4_filter_specification *);
414static int get_filter_mode(struct adapter *, uint32_t *);
415static int set_filter_mode(struct adapter *, uint32_t);
416static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
417static int get_filter(struct adapter *, struct t4_filter *);
418static int set_filter(struct adapter *, struct t4_filter *);
419static int del_filter(struct adapter *, struct t4_filter *);
420static void clear_filter(struct filter_entry *);
421static int set_filter_wr(struct adapter *, int);
422static int del_filter_wr(struct adapter *, int);
423static int get_sge_context(struct adapter *, struct t4_sge_context *);
424static int load_fw(struct adapter *, struct t4_data *);
425static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
426static int read_i2c(struct adapter *, struct t4_i2c_data *);
427#ifdef TCP_OFFLOAD
428static int toe_capability(struct port_info *, int);
429#endif
430static int mod_event(module_t, int, void *);
431
432struct {
433 uint16_t device;
434 char *desc;
435} t4_pciids[] = {
436 {0xa000, "Chelsio Terminator 4 FPGA"},
437 {0x4400, "Chelsio T440-dbg"},
438 {0x4401, "Chelsio T420-CR"},
439 {0x4402, "Chelsio T422-CR"},
440 {0x4403, "Chelsio T440-CR"},
441 {0x4404, "Chelsio T420-BCH"},
442 {0x4405, "Chelsio T440-BCH"},
443 {0x4406, "Chelsio T440-CH"},
444 {0x4407, "Chelsio T420-SO"},
445 {0x4408, "Chelsio T420-CX"},
446 {0x4409, "Chelsio T420-BT"},
447 {0x440a, "Chelsio T404-BT"},
448 {0x440e, "Chelsio T440-LP-CR"},
449}, t5_pciids[] = {
450 {0xb000, "Chelsio Terminator 5 FPGA"},
451 {0x5400, "Chelsio T580-dbg"},
452 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
453 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
454 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
455 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
456 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
457 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
458 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
459 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
460 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
461 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
462 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
463 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
464#ifdef notyet
465 {0x5404, "Chelsio T520-BCH"},
466 {0x5405, "Chelsio T540-BCH"},
467 {0x5406, "Chelsio T540-CH"},
468 {0x5408, "Chelsio T520-CX"},
469 {0x540b, "Chelsio B520-SR"},
470 {0x540c, "Chelsio B504-BT"},
471 {0x540f, "Chelsio Amsterdam"},
472 {0x5413, "Chelsio T580-CHR"},
473#endif
474};
475
476#ifdef TCP_OFFLOAD
477/*
478 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
479 * exactly the same for both rxq and ofld_rxq.
480 */
481CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
482CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
483#endif
484
485/* No easy way to include t4_msg.h before adapter.h so we check this way */
486CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
487CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
488
489static int
490t4_probe(device_t dev)
491{
492 int i;
493 uint16_t v = pci_get_vendor(dev);
494 uint16_t d = pci_get_device(dev);
495 uint8_t f = pci_get_function(dev);
496
497 if (v != PCI_VENDOR_ID_CHELSIO)
498 return (ENXIO);
499
500 /* Attach only to PF0 of the FPGA */
501 if (d == 0xa000 && f != 0)
502 return (ENXIO);
503
504 for (i = 0; i < nitems(t4_pciids); i++) {
505 if (d == t4_pciids[i].device) {
506 device_set_desc(dev, t4_pciids[i].desc);
507 return (BUS_PROBE_DEFAULT);
508 }
509 }
510
511 return (ENXIO);
512}
513
514static int
515t5_probe(device_t dev)
516{
517 int i;
518 uint16_t v = pci_get_vendor(dev);
519 uint16_t d = pci_get_device(dev);
520 uint8_t f = pci_get_function(dev);
521
522 if (v != PCI_VENDOR_ID_CHELSIO)
523 return (ENXIO);
524
525 /* Attach only to PF0 of the FPGA */
526 if (d == 0xb000 && f != 0)
527 return (ENXIO);
528
529 for (i = 0; i < nitems(t5_pciids); i++) {
530 if (d == t5_pciids[i].device) {
531 device_set_desc(dev, t5_pciids[i].desc);
532 return (BUS_PROBE_DEFAULT);
533 }
534 }
535
536 return (ENXIO);
537}
538
539static int
540t4_attach(device_t dev)
541{
542 struct adapter *sc;
543 int rc = 0, i, n10g, n1g, rqidx, tqidx;
544 struct intrs_and_queues iaq;
545 struct sge *s;
546#ifdef TCP_OFFLOAD
547 int ofld_rqidx, ofld_tqidx;
548#endif
549
550 sc = device_get_softc(dev);
551 sc->dev = dev;
552
553 pci_enable_busmaster(dev);
554 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
555 uint32_t v;
556
557 pci_set_max_read_req(dev, 4096);
558 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
559 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
560 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
561 }
562
563 sc->traceq = -1;
564 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
565 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
566 device_get_nameunit(dev));
567
568 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
569 device_get_nameunit(dev));
570 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
571 mtx_lock(&t4_list_lock);
572 SLIST_INSERT_HEAD(&t4_list, sc, link);
573 mtx_unlock(&t4_list_lock);
574
575 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
576 TAILQ_INIT(&sc->sfl);
577 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
578
579 rc = map_bars_0_and_4(sc);
580 if (rc != 0)
581 goto done; /* error message displayed already */
582
583 /*
584 * This is the real PF# to which we're attaching. Works from within PCI
585 * passthrough environments too, where pci_get_function() could return a
586 * different PF# depending on the passthrough configuration. We need to
587 * use the real PF# in all our communication with the firmware.
588 */
589 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
590 sc->mbox = sc->pf;
591
592 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
593 sc->an_handler = an_not_handled;
594 for (i = 0; i < nitems(sc->cpl_handler); i++)
595 sc->cpl_handler[i] = cpl_not_handled;
596 for (i = 0; i < nitems(sc->fw_msg_handler); i++)
597 sc->fw_msg_handler[i] = fw_msg_not_handled;
598 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
599 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
600 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
601 t4_init_sge_cpl_handlers(sc);
602
603
604 /* Prepare the adapter for operation */
605 rc = -t4_prep_adapter(sc);
606 if (rc != 0) {
607 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
608 goto done;
609 }
610
611 /*
612 * Do this really early, with the memory windows set up even before the
613 * character device. The userland tool's register i/o and mem read
614 * will work even in "recovery mode".
615 */
616 setup_memwin(sc);
617 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
618 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
619 device_get_nameunit(dev));
620 if (sc->cdev == NULL)
621 device_printf(dev, "failed to create nexus char device.\n");
622 else
623 sc->cdev->si_drv1 = sc;
624
625 /* Go no further if recovery mode has been requested. */
626 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
627 device_printf(dev, "recovery mode.\n");
628 goto done;
629 }
630
631 /* Prepare the firmware for operation */
632 rc = prep_firmware(sc);
633 if (rc != 0)
634 goto done; /* error message displayed already */
635
636 rc = get_params__post_init(sc);
637 if (rc != 0)
638 goto done; /* error message displayed already */
639
640 rc = set_params__post_init(sc);
641 if (rc != 0)
642 goto done; /* error message displayed already */
643
644 rc = map_bar_2(sc);
645 if (rc != 0)
646 goto done; /* error message displayed already */
647
648 rc = t4_create_dma_tag(sc);
649 if (rc != 0)
650 goto done; /* error message displayed already */
651
652 /*
653 * First pass over all the ports - allocate VIs and initialize some
654 * basic parameters like mac address, port type, etc. We also figure
655 * out whether a port is 10G or 1G and use that information when
656 * calculating how many interrupts to attempt to allocate.
657 */
658 n10g = n1g = 0;
659 for_each_port(sc, i) {
660 struct port_info *pi;
661
662 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
663 sc->port[i] = pi;
664
665 /* These must be set before t4_port_init */
666 pi->adapter = sc;
667 pi->port_id = i;
668
669 /* Allocate the vi and initialize parameters like mac addr */
670 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
671 if (rc != 0) {
672 device_printf(dev, "unable to initialize port %d: %d\n",
673 i, rc);
674 free(pi, M_CXGBE);
675 sc->port[i] = NULL;
676 goto done;
677 }
678
679 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
680 device_get_nameunit(dev), i);
681 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
682 sc->chan_map[pi->tx_chan] = i;
683
684 if (is_10G_port(pi) || is_40G_port(pi)) {
685 n10g++;
686 pi->tmr_idx = t4_tmr_idx_10g;
687 pi->pktc_idx = t4_pktc_idx_10g;
688 } else {
689 n1g++;
690 pi->tmr_idx = t4_tmr_idx_1g;
691 pi->pktc_idx = t4_pktc_idx_1g;
692 }
693
694 pi->xact_addr_filt = -1;
695 pi->linkdnrc = -1;
696
697 pi->qsize_rxq = t4_qsize_rxq;
698 pi->qsize_txq = t4_qsize_txq;
699
700 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
701 if (pi->dev == NULL) {
702 device_printf(dev,
703 "failed to add device for port %d.\n", i);
704 rc = ENXIO;
705 goto done;
706 }
707 device_set_softc(pi->dev, pi);
708 }
709
710 /*
711 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
712 */
713 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
714 if (rc != 0)
715 goto done; /* error message displayed already */
716
717 sc->intr_type = iaq.intr_type;
718 sc->intr_count = iaq.nirq;
719 sc->flags |= iaq.intr_flags;
720
721 s = &sc->sge;
722 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
723 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
724 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
725 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
726 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
727
728#ifdef TCP_OFFLOAD
729 if (is_offload(sc)) {
730
731 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
732 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
733 s->neq += s->nofldtxq + s->nofldrxq;
734 s->niq += s->nofldrxq;
735
736 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
737 M_CXGBE, M_ZERO | M_WAITOK);
738 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
739 M_CXGBE, M_ZERO | M_WAITOK);
740 }
741#endif
742
743 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
744 M_ZERO | M_WAITOK);
745 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
746 M_ZERO | M_WAITOK);
747 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
748 M_ZERO | M_WAITOK);
749 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
750 M_ZERO | M_WAITOK);
751 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
752 M_ZERO | M_WAITOK);
753
754 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
755 M_ZERO | M_WAITOK);
756
757 t4_init_l2t(sc, M_WAITOK);
758
759 /*
760 * Second pass over the ports. This time we know the number of rx and
761 * tx queues that each port should get.
762 */
763 rqidx = tqidx = 0;
764#ifdef TCP_OFFLOAD
765 ofld_rqidx = ofld_tqidx = 0;
766#endif
767 for_each_port(sc, i) {
768 struct port_info *pi = sc->port[i];
769
770 if (pi == NULL)
771 continue;
772
773 pi->first_rxq = rqidx;
774 pi->first_txq = tqidx;
775 if (is_10G_port(pi) || is_40G_port(pi)) {
776 pi->nrxq = iaq.nrxq10g;
777 pi->ntxq = iaq.ntxq10g;
778 } else {
779 pi->nrxq = iaq.nrxq1g;
780 pi->ntxq = iaq.ntxq1g;
781 }
782
783 rqidx += pi->nrxq;
784 tqidx += pi->ntxq;
785
786#ifdef TCP_OFFLOAD
787 if (is_offload(sc)) {
788 pi->first_ofld_rxq = ofld_rqidx;
789 pi->first_ofld_txq = ofld_tqidx;
790 if (is_10G_port(pi) || is_40G_port(pi)) {
791 pi->nofldrxq = iaq.nofldrxq10g;
792 pi->nofldtxq = iaq.nofldtxq10g;
793 } else {
794 pi->nofldrxq = iaq.nofldrxq1g;
795 pi->nofldtxq = iaq.nofldtxq1g;
796 }
797 ofld_rqidx += pi->nofldrxq;
798 ofld_tqidx += pi->nofldtxq;
799 }
800#endif
801 }
802
803 rc = setup_intr_handlers(sc);
804 if (rc != 0) {
805 device_printf(dev,
806 "failed to setup interrupt handlers: %d\n", rc);
807 goto done;
808 }
809
810 rc = bus_generic_attach(dev);
811 if (rc != 0) {
812 device_printf(dev,
813 "failed to attach all child ports: %d\n", rc);
814 goto done;
815 }
816
817 device_printf(dev,
818 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
819 sc->params.pci.width, sc->params.nports, sc->intr_count,
820 sc->intr_type == INTR_MSIX ? "MSI-X" :
821 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
822 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
823
824 t4_set_desc(sc);
825
826done:
827 if (rc != 0 && sc->cdev) {
828 /* cdev was created and so cxgbetool works; recover that way. */
829 device_printf(dev,
830 "error during attach, adapter is now in recovery mode.\n");
831 rc = 0;
832 }
833
834 if (rc != 0)
835 t4_detach(dev);
836 else
837 t4_sysctls(sc);
838
839 return (rc);
840}
841
842/*
843 * Idempotent
844 */
845static int
846t4_detach(device_t dev)
847{
848 struct adapter *sc;
849 struct port_info *pi;
850 int i, rc;
851
852 sc = device_get_softc(dev);
853
854 if (sc->flags & FULL_INIT_DONE)
855 t4_intr_disable(sc);
856
857 if (sc->cdev) {
858 destroy_dev(sc->cdev);
859 sc->cdev = NULL;
860 }
861
862 rc = bus_generic_detach(dev);
863 if (rc) {
864 device_printf(dev,
865 "failed to detach child devices: %d\n", rc);
866 return (rc);
867 }
868
869 for (i = 0; i < sc->intr_count; i++)
870 t4_free_irq(sc, &sc->irq[i]);
871
872 for (i = 0; i < MAX_NPORTS; i++) {
873 pi = sc->port[i];
874 if (pi) {
875 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
876 if (pi->dev)
877 device_delete_child(dev, pi->dev);
878
879 mtx_destroy(&pi->pi_lock);
880 free(pi, M_CXGBE);
881 }
882 }
883
884 if (sc->flags & FULL_INIT_DONE)
885 adapter_full_uninit(sc);
886
887 if (sc->flags & FW_OK)
888 t4_fw_bye(sc, sc->mbox);
889
890 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
891 pci_release_msi(dev);
892
893 if (sc->regs_res)
894 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
895 sc->regs_res);
896
897 if (sc->udbs_res)
898 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
899 sc->udbs_res);
900
901 if (sc->msix_res)
902 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
903 sc->msix_res);
904
905 if (sc->l2t)
906 t4_free_l2t(sc->l2t);
907
908#ifdef TCP_OFFLOAD
909 free(sc->sge.ofld_rxq, M_CXGBE);
910 free(sc->sge.ofld_txq, M_CXGBE);
911#endif
912 free(sc->irq, M_CXGBE);
913 free(sc->sge.rxq, M_CXGBE);
914 free(sc->sge.txq, M_CXGBE);
915 free(sc->sge.ctrlq, M_CXGBE);
916 free(sc->sge.iqmap, M_CXGBE);
917 free(sc->sge.eqmap, M_CXGBE);
918 free(sc->tids.ftid_tab, M_CXGBE);
919 t4_destroy_dma_tag(sc);
920 if (mtx_initialized(&sc->sc_lock)) {
921 mtx_lock(&t4_list_lock);
922 SLIST_REMOVE(&t4_list, sc, adapter, link);
923 mtx_unlock(&t4_list_lock);
924 mtx_destroy(&sc->sc_lock);
925 }
926
927 if (mtx_initialized(&sc->tids.ftid_lock))
928 mtx_destroy(&sc->tids.ftid_lock);
929 if (mtx_initialized(&sc->sfl_lock))
930 mtx_destroy(&sc->sfl_lock);
931 if (mtx_initialized(&sc->ifp_lock))
932 mtx_destroy(&sc->ifp_lock);
933
934 bzero(sc, sizeof(*sc));
935
936 return (0);
937}
938
939
940static int
941cxgbe_probe(device_t dev)
942{
943 char buf[128];
944 struct port_info *pi = device_get_softc(dev);
945
946 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
947 device_set_desc_copy(dev, buf);
948
949 return (BUS_PROBE_DEFAULT);
950}
951
952#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
953 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
954 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
955#define T4_CAP_ENABLE (T4_CAP)
956
957static int
958cxgbe_attach(device_t dev)
959{
960 struct port_info *pi = device_get_softc(dev);
961 struct ifnet *ifp;
962
963 /* Allocate an ifnet and set it up */
964 ifp = if_alloc(IFT_ETHER);
965 if (ifp == NULL) {
966 device_printf(dev, "Cannot allocate ifnet\n");
967 return (ENOMEM);
968 }
969 pi->ifp = ifp;
970 ifp->if_softc = pi;
971
972 callout_init(&pi->tick, CALLOUT_MPSAFE);
973
974 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
975 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
976
977 ifp->if_init = cxgbe_init;
978 ifp->if_ioctl = cxgbe_ioctl;
979 ifp->if_transmit = cxgbe_transmit;
980 ifp->if_qflush = cxgbe_qflush;
981
982 ifp->if_capabilities = T4_CAP;
983#ifdef TCP_OFFLOAD
984 if (is_offload(pi->adapter))
985 ifp->if_capabilities |= IFCAP_TOE;
986#endif
987 ifp->if_capenable = T4_CAP_ENABLE;
988 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
989 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
990
991 /* Initialize ifmedia for this port */
992 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
993 cxgbe_media_status);
994 build_medialist(pi);
995
996 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
997 EVENTHANDLER_PRI_ANY);
998
999 ether_ifattach(ifp, pi->hw_addr);
1000
1001#ifdef TCP_OFFLOAD
1002 if (is_offload(pi->adapter)) {
1003 device_printf(dev,
1004 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1005 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1006 } else
1007#endif
1008 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1009
1010 cxgbe_sysctls(pi);
1011
1012 return (0);
1013}
1014
1015static int
1016cxgbe_detach(device_t dev)
1017{
1018 struct port_info *pi = device_get_softc(dev);
1019 struct adapter *sc = pi->adapter;
1020 struct ifnet *ifp = pi->ifp;
1021
1022 /* Tell if_ioctl and if_init that the port is going away */
1023 ADAPTER_LOCK(sc);
1024 SET_DOOMED(pi);
1025 wakeup(&sc->flags);
1026 while (IS_BUSY(sc))
1027 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1028 SET_BUSY(sc);
1029#ifdef INVARIANTS
1030 sc->last_op = "t4detach";
1031 sc->last_op_thr = curthread;
1032#endif
1033 ADAPTER_UNLOCK(sc);
1034
1035 if (pi->flags & HAS_TRACEQ) {
1036 sc->traceq = -1; /* cloner should not create ifnet */
1037 t4_tracer_port_detach(sc);
1038 }
1039
1040 if (pi->vlan_c)
1041 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1042
1043 PORT_LOCK(pi);
1044 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1045 callout_stop(&pi->tick);
1046 PORT_UNLOCK(pi);
1047 callout_drain(&pi->tick);
1048
1049 /* Let detach proceed even if these fail. */
1050 cxgbe_uninit_synchronized(pi);
1051 port_full_uninit(pi);
1052
1053 ifmedia_removeall(&pi->media);
1054 ether_ifdetach(pi->ifp);
1055 if_free(pi->ifp);
1056
1057 ADAPTER_LOCK(sc);
1058 CLR_BUSY(sc);
1059 wakeup(&sc->flags);
1060 ADAPTER_UNLOCK(sc);
1061
1062 return (0);
1063}
1064
1065static void
1066cxgbe_init(void *arg)
1067{
1068 struct port_info *pi = arg;
1069 struct adapter *sc = pi->adapter;
1070
1071 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1072 return;
1073 cxgbe_init_synchronized(pi);
1074 end_synchronized_op(sc, 0);
1075}
1076
1077static int
1078cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1079{
1080 int rc = 0, mtu, flags;
1081 struct port_info *pi = ifp->if_softc;
1082 struct adapter *sc = pi->adapter;
1083 struct ifreq *ifr = (struct ifreq *)data;
1084 uint32_t mask;
1085
1086 switch (cmd) {
1087 case SIOCSIFMTU:
1088 mtu = ifr->ifr_mtu;
1089 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1090 return (EINVAL);
1091
1092 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1093 if (rc)
1094 return (rc);
1095 ifp->if_mtu = mtu;
1096 if (pi->flags & PORT_INIT_DONE) {
1097 t4_update_fl_bufsize(ifp);
1098 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1099 rc = update_mac_settings(pi, XGMAC_MTU);
1100 }
1101 end_synchronized_op(sc, 0);
1102 break;
1103
1104 case SIOCSIFFLAGS:
1105 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1106 if (rc)
1107 return (rc);
1108
1109 if (ifp->if_flags & IFF_UP) {
1110 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1111 flags = pi->if_flags;
1112 if ((ifp->if_flags ^ flags) &
1113 (IFF_PROMISC | IFF_ALLMULTI)) {
1114 rc = update_mac_settings(pi,
1115 XGMAC_PROMISC | XGMAC_ALLMULTI);
1116 }
1117 } else
1118 rc = cxgbe_init_synchronized(pi);
1119 pi->if_flags = ifp->if_flags;
1120 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1121 rc = cxgbe_uninit_synchronized(pi);
1122 end_synchronized_op(sc, 0);
1123 break;
1124
1125 case SIOCADDMULTI:
1126 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1127 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1128 if (rc)
1129 return (rc);
1130 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1131 rc = update_mac_settings(pi, XGMAC_MCADDRS);
1132 end_synchronized_op(sc, LOCK_HELD);
1133 break;
1134
1135 case SIOCSIFCAP:
1136 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1137 if (rc)
1138 return (rc);
1139
1140 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1141 if (mask & IFCAP_TXCSUM) {
1142 ifp->if_capenable ^= IFCAP_TXCSUM;
1143 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1144
1145 if (IFCAP_TSO4 & ifp->if_capenable &&
1146 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1147 ifp->if_capenable &= ~IFCAP_TSO4;
1148 if_printf(ifp,
1149 "tso4 disabled due to -txcsum.\n");
1150 }
1151 }
1152 if (mask & IFCAP_TXCSUM_IPV6) {
1153 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1154 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1155
1156 if (IFCAP_TSO6 & ifp->if_capenable &&
1157 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1158 ifp->if_capenable &= ~IFCAP_TSO6;
1159 if_printf(ifp,
1160 "tso6 disabled due to -txcsum6.\n");
1161 }
1162 }
1163 if (mask & IFCAP_RXCSUM)
1164 ifp->if_capenable ^= IFCAP_RXCSUM;
1165 if (mask & IFCAP_RXCSUM_IPV6)
1166 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1167
1168 /*
1169 * Note that we leave CSUM_TSO alone (it is always set). The
1170 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1171 * sending a TSO request our way, so it's sufficient to toggle
1172 * IFCAP_TSOx only.
1173 */
1174 if (mask & IFCAP_TSO4) {
1175 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1176 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1177 if_printf(ifp, "enable txcsum first.\n");
1178 rc = EAGAIN;
1179 goto fail;
1180 }
1181 ifp->if_capenable ^= IFCAP_TSO4;
1182 }
1183 if (mask & IFCAP_TSO6) {
1184 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1185 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1186 if_printf(ifp, "enable txcsum6 first.\n");
1187 rc = EAGAIN;
1188 goto fail;
1189 }
1190 ifp->if_capenable ^= IFCAP_TSO6;
1191 }
1192 if (mask & IFCAP_LRO) {
1193#if defined(INET) || defined(INET6)
1194 int i;
1195 struct sge_rxq *rxq;
1196
1197 ifp->if_capenable ^= IFCAP_LRO;
1198 for_each_rxq(pi, i, rxq) {
1199 if (ifp->if_capenable & IFCAP_LRO)
1200 rxq->iq.flags |= IQ_LRO_ENABLED;
1201 else
1202 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1203 }
1204#endif
1205 }
1206#ifdef TCP_OFFLOAD
1207 if (mask & IFCAP_TOE) {
1208 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1209
1210 rc = toe_capability(pi, enable);
1211 if (rc != 0)
1212 goto fail;
1213
1214 ifp->if_capenable ^= mask;
1215 }
1216#endif
1217 if (mask & IFCAP_VLAN_HWTAGGING) {
1218 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1219 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1220 rc = update_mac_settings(pi, XGMAC_VLANEX);
1221 }
1222 if (mask & IFCAP_VLAN_MTU) {
1223 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1224
1225 /* Need to find out how to disable auto-mtu-inflation */
1226 }
1227 if (mask & IFCAP_VLAN_HWTSO)
1228 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1229 if (mask & IFCAP_VLAN_HWCSUM)
1230 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1231
1232#ifdef VLAN_CAPABILITIES
1233 VLAN_CAPABILITIES(ifp);
1234#endif
1235fail:
1236 end_synchronized_op(sc, 0);
1237 break;
1238
1239 case SIOCSIFMEDIA:
1240 case SIOCGIFMEDIA:
1241 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1242 break;
1243
1244 default:
1245 rc = ether_ioctl(ifp, cmd, data);
1246 }
1247
1248 return (rc);
1249}
1250
1251static int
1252cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1253{
1254 struct port_info *pi = ifp->if_softc;
1255 struct adapter *sc = pi->adapter;
1256 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1257 struct buf_ring *br;
1258 int rc;
1259
1260 M_ASSERTPKTHDR(m);
1261
1262 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1263 m_freem(m);
1264 return (ENETDOWN);
1265 }
1266
1267 if (m->m_flags & M_FLOWID)
1268 txq += (m->m_pkthdr.flowid % pi->ntxq);
1269 br = txq->br;
1270
1271 if (TXQ_TRYLOCK(txq) == 0) {
1272 struct sge_eq *eq = &txq->eq;
1273
1274 /*
1275 * It is possible that t4_eth_tx finishes up and releases the
1276 * lock between the TRYLOCK above and the drbr_enqueue here. We
1277 * need to make sure that this mbuf doesn't just sit there in
1278 * the drbr.
1279 */
1280
1281 rc = drbr_enqueue(ifp, br, m);
1282 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1283 !(eq->flags & EQ_DOOMED))
1284 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1285 return (rc);
1286 }
1287
1288 /*
1289 * txq->m is the mbuf that is held up due to a temporary shortage of
1290 * resources and it should be put on the wire first. Then what's in
1291 * drbr and finally the mbuf that was just passed in to us.
1292 *
1293 * Return code should indicate the fate of the mbuf that was passed in
1294 * this time.
1295 */
1296
1297 TXQ_LOCK_ASSERT_OWNED(txq);
1298 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1299
1300 /* Queued for transmission. */
1301
1302 rc = drbr_enqueue(ifp, br, m);
1303 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1304 (void) t4_eth_tx(ifp, txq, m);
1305 TXQ_UNLOCK(txq);
1306 return (rc);
1307 }
1308
1309 /* Direct transmission. */
1310 rc = t4_eth_tx(ifp, txq, m);
1311 if (rc != 0 && txq->m)
1312 rc = 0; /* held, will be transmitted soon (hopefully) */
1313
1314 TXQ_UNLOCK(txq);
1315 return (rc);
1316}
1317
1318static void
1319cxgbe_qflush(struct ifnet *ifp)
1320{
1321 struct port_info *pi = ifp->if_softc;
1322 struct sge_txq *txq;
1323 int i;
1324 struct mbuf *m;
1325
1326 /* queues do not exist if !PORT_INIT_DONE. */
1327 if (pi->flags & PORT_INIT_DONE) {
1328 for_each_txq(pi, i, txq) {
1329 TXQ_LOCK(txq);
1330 m_freem(txq->m);
1331 txq->m = NULL;
1332 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1333 m_freem(m);
1334 TXQ_UNLOCK(txq);
1335 }
1336 }
1337 if_qflush(ifp);
1338}
1339
1340static int
1341cxgbe_media_change(struct ifnet *ifp)
1342{
1343 struct port_info *pi = ifp->if_softc;
1344
1345 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1346
1347 return (EOPNOTSUPP);
1348}
1349
1350static void
1351cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1352{
1353 struct port_info *pi = ifp->if_softc;
1354 struct ifmedia_entry *cur = pi->media.ifm_cur;
1355 int speed = pi->link_cfg.speed;
1356 int data = (pi->port_type << 8) | pi->mod_type;
1357
1358 if (cur->ifm_data != data) {
1359 build_medialist(pi);
1360 cur = pi->media.ifm_cur;
1361 }
1362
1363 ifmr->ifm_status = IFM_AVALID;
1364 if (!pi->link_cfg.link_ok)
1365 return;
1366
1367 ifmr->ifm_status |= IFM_ACTIVE;
1368
1369 /* active and current will differ iff current media is autoselect. */
1370 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1371 return;
1372
1373 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1374 if (speed == SPEED_10000)
1375 ifmr->ifm_active |= IFM_10G_T;
1376 else if (speed == SPEED_1000)
1377 ifmr->ifm_active |= IFM_1000_T;
1378 else if (speed == SPEED_100)
1379 ifmr->ifm_active |= IFM_100_TX;
1380 else if (speed == SPEED_10)
1381 ifmr->ifm_active |= IFM_10_T;
1382 else
1383 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1384 speed));
1385}
1386
1387void
1388t4_fatal_err(struct adapter *sc)
1389{
1390 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1391 t4_intr_disable(sc);
1392 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1393 device_get_nameunit(sc->dev));
1394}
1395
1396static int
1397map_bars_0_and_4(struct adapter *sc)
1398{
1399 sc->regs_rid = PCIR_BAR(0);
1400 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1401 &sc->regs_rid, RF_ACTIVE);
1402 if (sc->regs_res == NULL) {
1403 device_printf(sc->dev, "cannot map registers.\n");
1404 return (ENXIO);
1405 }
1406 sc->bt = rman_get_bustag(sc->regs_res);
1407 sc->bh = rman_get_bushandle(sc->regs_res);
1408 sc->mmio_len = rman_get_size(sc->regs_res);
1409 setbit(&sc->doorbells, DOORBELL_KDB);
1410
1411 sc->msix_rid = PCIR_BAR(4);
1412 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1413 &sc->msix_rid, RF_ACTIVE);
1414 if (sc->msix_res == NULL) {
1415 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1416 return (ENXIO);
1417 }
1418
1419 return (0);
1420}
1421
1422static int
1423map_bar_2(struct adapter *sc)
1424{
1425
1426 /*
1427 * T4: only iWARP driver uses the userspace doorbells. There is no need
1428 * to map it if RDMA is disabled.
1429 */
1430 if (is_t4(sc) && sc->rdmacaps == 0)
1431 return (0);
1432
1433 sc->udbs_rid = PCIR_BAR(2);
1434 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1435 &sc->udbs_rid, RF_ACTIVE);
1436 if (sc->udbs_res == NULL) {
1437 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1438 return (ENXIO);
1439 }
1440 sc->udbs_base = rman_get_virtual(sc->udbs_res);
1441
1442 if (is_t5(sc)) {
1443 setbit(&sc->doorbells, DOORBELL_UDB);
1444#if defined(__i386__) || defined(__amd64__)
1445 if (t5_write_combine) {
1446 int rc;
1447
1448 /*
1449 * Enable write combining on BAR2. This is the
1450 * userspace doorbell BAR and is split into 128B
1451 * (UDBS_SEG_SIZE) doorbell regions, each associated
1452 * with an egress queue. The first 64B has the doorbell
1453 * and the second 64B can be used to submit a tx work
1454 * request with an implicit doorbell.
1455 */
1456
1457 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1458 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1459 if (rc == 0) {
1460 clrbit(&sc->doorbells, DOORBELL_UDB);
1461 setbit(&sc->doorbells, DOORBELL_WCWR);
1462 setbit(&sc->doorbells, DOORBELL_UDBWC);
1463 } else {
1464 device_printf(sc->dev,
1465 "couldn't enable write combining: %d\n",
1466 rc);
1467 }
1468
1469 t4_write_reg(sc, A_SGE_STAT_CFG,
1470 V_STATSOURCE_T5(7) | V_STATMODE(0));
1471 }
1472#endif
1473 }
1474
1475 return (0);
1476}
1477
1478static const struct memwin t4_memwin[] = {
1479 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1480 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1481 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1482};
1483
1484static const struct memwin t5_memwin[] = {
1485 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1486 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1487 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1488};
1489
1490static void
1491setup_memwin(struct adapter *sc)
1492{
1493 const struct memwin *mw;
1494 int i, n;
1495 uint32_t bar0;
1496
1497 if (is_t4(sc)) {
1498 /*
1499 * Read low 32b of bar0 indirectly via the hardware backdoor
1500 * mechanism. Works from within PCI passthrough environments
1501 * too, where rman_get_start() can return a different value. We
1502 * need to program the T4 memory window decoders with the actual
1503 * addresses that will be coming across the PCIe link.
1504 */
1505 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1506 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1507
1508 mw = &t4_memwin[0];
1509 n = nitems(t4_memwin);
1510 } else {
1511 /* T5 uses the relative offset inside the PCIe BAR */
1512 bar0 = 0;
1513
1514 mw = &t5_memwin[0];
1515 n = nitems(t5_memwin);
1516 }
1517
1518 for (i = 0; i < n; i++, mw++) {
1519 t4_write_reg(sc,
1520 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1521 (mw->base + bar0) | V_BIR(0) |
1522 V_WINDOW(ilog2(mw->aperture) - 10));
1523 }
1524
1525 /* flush */
1526 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1527}
1528
1529/*
1530 * Verify that the memory range specified by the addr/len pair is valid and lies
1531 * entirely within a single region (EDCx or MCx).
1532 */
1533static int
1534validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1535{
1536 uint32_t em, addr_len, maddr, mlen;
1537
1538 /* Memory can only be accessed in naturally aligned 4 byte units */
1539 if (addr & 3 || len & 3 || len == 0)
1540 return (EINVAL);
1541
1542 /* Enabled memories */
1543 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1544 if (em & F_EDRAM0_ENABLE) {
1545 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1546 maddr = G_EDRAM0_BASE(addr_len) << 20;
1547 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1548 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549 addr + len <= maddr + mlen)
1550 return (0);
1551 }
1552 if (em & F_EDRAM1_ENABLE) {
1553 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1554 maddr = G_EDRAM1_BASE(addr_len) << 20;
1555 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1556 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1557 addr + len <= maddr + mlen)
1558 return (0);
1559 }
1560 if (em & F_EXT_MEM_ENABLE) {
1561 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1562 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1563 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1564 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1565 addr + len <= maddr + mlen)
1566 return (0);
1567 }
1568 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1569 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1570 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1571 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1572 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1573 addr + len <= maddr + mlen)
1574 return (0);
1575 }
1576
1577 return (EFAULT);
1578}
1579
1580/*
1581 * Verify that the memory range specified by the memtype/offset/len pair is
1582 * valid and lies entirely within the memtype specified. The global address of
1583 * the start of the range is returned in addr.
1584 */
1585static int
1586validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1587 uint32_t *addr)
1588{
1589 uint32_t em, addr_len, maddr, mlen;
1590
1591 /* Memory can only be accessed in naturally aligned 4 byte units */
1592 if (off & 3 || len & 3 || len == 0)
1593 return (EINVAL);
1594
1595 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1596 switch (mtype) {
1597 case MEM_EDC0:
1598 if (!(em & F_EDRAM0_ENABLE))
1599 return (EINVAL);
1600 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1601 maddr = G_EDRAM0_BASE(addr_len) << 20;
1602 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1603 break;
1604 case MEM_EDC1:
1605 if (!(em & F_EDRAM1_ENABLE))
1606 return (EINVAL);
1607 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1608 maddr = G_EDRAM1_BASE(addr_len) << 20;
1609 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1610 break;
1611 case MEM_MC:
1612 if (!(em & F_EXT_MEM_ENABLE))
1613 return (EINVAL);
1614 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1615 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1616 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1617 break;
1618 case MEM_MC1:
1619 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1620 return (EINVAL);
1621 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1622 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1623 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1624 break;
1625 default:
1626 return (EINVAL);
1627 }
1628
1629 if (mlen > 0 && off < mlen && off + len <= mlen) {
1630 *addr = maddr + off; /* global address */
1631 return (0);
1632 }
1633
1634 return (EFAULT);
1635}
1636
1637static void
1638memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1639{
1640 const struct memwin *mw;
1641
1642 if (is_t4(sc)) {
1643 KASSERT(win >= 0 && win < nitems(t4_memwin),
1644 ("%s: incorrect memwin# (%d)", __func__, win));
1645 mw = &t4_memwin[win];
1646 } else {
1647 KASSERT(win >= 0 && win < nitems(t5_memwin),
1648 ("%s: incorrect memwin# (%d)", __func__, win));
1649 mw = &t5_memwin[win];
1650 }
1651
1652 if (base != NULL)
1653 *base = mw->base;
1654 if (aperture != NULL)
1655 *aperture = mw->aperture;
1656}
1657
1658/*
1659 * Positions the memory window such that it can be used to access the specified
1660 * address in the chip's address space. The return value is the offset of addr
1661 * from the start of the window.
1662 */
1663static uint32_t
1664position_memwin(struct adapter *sc, int n, uint32_t addr)
1665{
1666 uint32_t start, pf;
1667 uint32_t reg;
1668
1669 KASSERT(n >= 0 && n <= 3,
1670 ("%s: invalid window %d.", __func__, n));
1671 KASSERT((addr & 3) == 0,
1672 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1673
1674 if (is_t4(sc)) {
1675 pf = 0;
1676 start = addr & ~0xf; /* start must be 16B aligned */
1677 } else {
1678 pf = V_PFNUM(sc->pf);
1679 start = addr & ~0x7f; /* start must be 128B aligned */
1680 }
1681 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1682
1683 t4_write_reg(sc, reg, start | pf);
1684 t4_read_reg(sc, reg);
1685
1686 return (addr - start);
1687}
1688
1689static int
1690cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1691 struct intrs_and_queues *iaq)
1692{
1693 int rc, itype, navail, nrxq10g, nrxq1g, n;
1694 int nofldrxq10g = 0, nofldrxq1g = 0;
1695
1696 bzero(iaq, sizeof(*iaq));
1697
1698 iaq->ntxq10g = t4_ntxq10g;
1699 iaq->ntxq1g = t4_ntxq1g;
1700 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1701 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1702#ifdef TCP_OFFLOAD
1703 if (is_offload(sc)) {
1704 iaq->nofldtxq10g = t4_nofldtxq10g;
1705 iaq->nofldtxq1g = t4_nofldtxq1g;
1706 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1707 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1708 }
1709#endif
1710
1711 for (itype = INTR_MSIX; itype; itype >>= 1) {
1712
1713 if ((itype & t4_intr_types) == 0)
1714 continue; /* not allowed */
1715
1716 if (itype == INTR_MSIX)
1717 navail = pci_msix_count(sc->dev);
1718 else if (itype == INTR_MSI)
1719 navail = pci_msi_count(sc->dev);
1720 else
1721 navail = 1;
1722restart:
1723 if (navail == 0)
1724 continue;
1725
1726 iaq->intr_type = itype;
1727 iaq->intr_flags = 0;
1728
1729 /*
1730 * Best option: an interrupt vector for errors, one for the
1731 * firmware event queue, and one each for each rxq (NIC as well
1732 * as offload).
1733 */
1734 iaq->nirq = T4_EXTRA_INTR;
1735 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1736 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1737 if (iaq->nirq <= navail &&
1738 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1739 iaq->intr_flags |= INTR_DIRECT;
1740 goto allocate;
1741 }
1742
1743 /*
1744 * Second best option: an interrupt vector for errors, one for
1745 * the firmware event queue, and one each for either NIC or
1746 * offload rxq's.
1747 */
1748 iaq->nirq = T4_EXTRA_INTR;
1749 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1750 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1751 if (iaq->nirq <= navail &&
1752 (itype != INTR_MSI || powerof2(iaq->nirq)))
1753 goto allocate;
1754
1755 /*
1756 * Next best option: an interrupt vector for errors, one for the
1757 * firmware event queue, and at least one per port. At this
1758 * point we know we'll have to downsize nrxq or nofldrxq to fit
1759 * what's available to us.
1760 */
1761 iaq->nirq = T4_EXTRA_INTR;
1762 iaq->nirq += n10g + n1g;
1763 if (iaq->nirq <= navail) {
1764 int leftover = navail - iaq->nirq;
1765
1766 if (n10g > 0) {
1767 int target = max(nrxq10g, nofldrxq10g);
1768
1769 n = 1;
1770 while (n < target && leftover >= n10g) {
1771 leftover -= n10g;
1772 iaq->nirq += n10g;
1773 n++;
1774 }
1775 iaq->nrxq10g = min(n, nrxq10g);
1776#ifdef TCP_OFFLOAD
1777 if (is_offload(sc))
1778 iaq->nofldrxq10g = min(n, nofldrxq10g);
1779#endif
1780 }
1781
1782 if (n1g > 0) {
1783 int target = max(nrxq1g, nofldrxq1g);
1784
1785 n = 1;
1786 while (n < target && leftover >= n1g) {
1787 leftover -= n1g;
1788 iaq->nirq += n1g;
1789 n++;
1790 }
1791 iaq->nrxq1g = min(n, nrxq1g);
1792#ifdef TCP_OFFLOAD
1793 if (is_offload(sc))
1794 iaq->nofldrxq1g = min(n, nofldrxq1g);
1795#endif
1796 }
1797
1798 if (itype != INTR_MSI || powerof2(iaq->nirq))
1799 goto allocate;
1800 }
1801
1802 /*
1803 * Least desirable option: one interrupt vector for everything.
1804 */
1805 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1806#ifdef TCP_OFFLOAD
1807 if (is_offload(sc))
1808 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1809#endif
1810
1811allocate:
1812 navail = iaq->nirq;
1813 rc = 0;
1814 if (itype == INTR_MSIX)
1815 rc = pci_alloc_msix(sc->dev, &navail);
1816 else if (itype == INTR_MSI)
1817 rc = pci_alloc_msi(sc->dev, &navail);
1818
1819 if (rc == 0) {
1820 if (navail == iaq->nirq)
1821 return (0);
1822
1823 /*
1824 * Didn't get the number requested. Use whatever number
1825 * the kernel is willing to allocate (it's in navail).
1826 */
1827 device_printf(sc->dev, "fewer vectors than requested, "
1828 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1829 itype, iaq->nirq, navail);
1830 pci_release_msi(sc->dev);
1831 goto restart;
1832 }
1833
1834 device_printf(sc->dev,
1835 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1836 itype, rc, iaq->nirq, navail);
1837 }
1838
1839 device_printf(sc->dev,
1840 "failed to find a usable interrupt type. "
1841 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1842 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1843
1844 return (ENXIO);
1845}
1846
1847#define FW_VERSION(chip) ( \
1848 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1849 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1850 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1851 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1852#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1853
1854struct fw_info {
1855 uint8_t chip;
1856 char *kld_name;
1857 char *fw_mod_name;
1858 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
1859} fw_info[] = {
1860 {
1861 .chip = CHELSIO_T4,
1862 .kld_name = "t4fw_cfg",
1863 .fw_mod_name = "t4fw",
1864 .fw_hdr = {
1865 .chip = FW_HDR_CHIP_T4,
1866 .fw_ver = htobe32_const(FW_VERSION(T4)),
1867 .intfver_nic = FW_INTFVER(T4, NIC),
1868 .intfver_vnic = FW_INTFVER(T4, VNIC),
1869 .intfver_ofld = FW_INTFVER(T4, OFLD),
1870 .intfver_ri = FW_INTFVER(T4, RI),
1871 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1872 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1873 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1874 .intfver_fcoe = FW_INTFVER(T4, FCOE),
1875 },
1876 }, {
1877 .chip = CHELSIO_T5,
1878 .kld_name = "t5fw_cfg",
1879 .fw_mod_name = "t5fw",
1880 .fw_hdr = {
1881 .chip = FW_HDR_CHIP_T5,
1882 .fw_ver = htobe32_const(FW_VERSION(T5)),
1883 .intfver_nic = FW_INTFVER(T5, NIC),
1884 .intfver_vnic = FW_INTFVER(T5, VNIC),
1885 .intfver_ofld = FW_INTFVER(T5, OFLD),
1886 .intfver_ri = FW_INTFVER(T5, RI),
1887 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1888 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1889 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1890 .intfver_fcoe = FW_INTFVER(T5, FCOE),
1891 },
1892 }
1893};
1894
1895static struct fw_info *
1896find_fw_info(int chip)
1897{
1898 int i;
1899
1900 for (i = 0; i < nitems(fw_info); i++) {
1901 if (fw_info[i].chip == chip)
1902 return (&fw_info[i]);
1903 }
1904 return (NULL);
1905}
1906
1907/*
1908 * Is the given firmware API compatible with the one the driver was compiled
1909 * with?
1910 */
1911static int
1912fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1913{
1914
1915 /* short circuit if it's the exact same firmware version */
1916 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1917 return (1);
1918
1919 /*
1920 * XXX: Is this too conservative? Perhaps I should limit this to the
1921 * features that are supported in the driver.
1922 */
1923#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1924 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1925 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1926 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1927 return (1);
1928#undef SAME_INTF
1929
1930 return (0);
1931}
1932
1933/*
1934 * The firmware in the KLD is usable, but should it be installed? This routine
1935 * explains itself in detail if it indicates the KLD firmware should be
1936 * installed.
1937 */
1938static int
1939should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1940{
1941 const char *reason;
1942
1943 if (!card_fw_usable) {
1944 reason = "incompatible or unusable";
1945 goto install;
1946 }
1947
1948 if (k > c) {
1949 reason = "older than the version bundled with this driver";
1950 goto install;
1951 }
1952
1953 if (t4_fw_install == 2 && k != c) {
1954 reason = "different than the version bundled with this driver";
1955 goto install;
1956 }
1957
1958 return (0);
1959
1960install:
1961 if (t4_fw_install == 0) {
1962 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1963 "but the driver is prohibited from installing a different "
1964 "firmware on the card.\n",
1965 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1966 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1967
1968 return (0);
1969 }
1970
1971 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1972 "installing firmware %u.%u.%u.%u on card.\n",
1973 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1974 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1975 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1976 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1977
1978 return (1);
1979}
1980/*
1981 * Establish contact with the firmware and determine if we are the master driver
1982 * or not, and whether we are responsible for chip initialization.
1983 */
1984static int
1985prep_firmware(struct adapter *sc)
1986{
1987 const struct firmware *fw = NULL, *default_cfg;
1988 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1989 enum dev_state state;
1990 struct fw_info *fw_info;
1991 struct fw_hdr *card_fw; /* fw on the card */
1992 const struct fw_hdr *kld_fw; /* fw in the KLD */
1993 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
1994 against */
1995
1996 /* Contact firmware. */
1997 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1998 if (rc < 0 || state == DEV_STATE_ERR) {
1999 rc = -rc;
2000 device_printf(sc->dev,
2001 "failed to connect to the firmware: %d, %d.\n", rc, state);
2002 return (rc);
2003 }
2004 pf = rc;
2005 if (pf == sc->mbox)
2006 sc->flags |= MASTER_PF;
2007 else if (state == DEV_STATE_UNINIT) {
2008 /*
2009 * We didn't get to be the master so we definitely won't be
2010 * configuring the chip. It's a bug if someone else hasn't
2011 * configured it already.
2012 */
2013 device_printf(sc->dev, "couldn't be master(%d), "
2014 "device not already initialized either(%d).\n", rc, state);
2015 return (EDOOFUS);
2016 }
2017
2018 /* This is the firmware whose headers the driver was compiled against */
2019 fw_info = find_fw_info(chip_id(sc));
2020 if (fw_info == NULL) {
2021 device_printf(sc->dev,
2022 "unable to look up firmware information for chip %d.\n",
2023 chip_id(sc));
2024 return (EINVAL);
2025 }
2026 drv_fw = &fw_info->fw_hdr;
2027
2028 /*
2029 * The firmware KLD contains many modules. The KLD name is also the
2030 * name of the module that contains the default config file.
2031 */
2032 default_cfg = firmware_get(fw_info->kld_name);
2033
2034 /* Read the header of the firmware on the card */
2035 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2036 rc = -t4_read_flash(sc, FLASH_FW_START,
2037 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2038 if (rc == 0)
2039 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2040 else {
2041 device_printf(sc->dev,
2042 "Unable to read card's firmware header: %d\n", rc);
2043 card_fw_usable = 0;
2044 }
2045
2046 /* This is the firmware in the KLD */
2047 fw = firmware_get(fw_info->fw_mod_name);
2048 if (fw != NULL) {
2049 kld_fw = (const void *)fw->data;
2050 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2051 } else {
2052 kld_fw = NULL;
2053 kld_fw_usable = 0;
2054 }
2055
2056 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2057 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2058 /*
2059 * Common case: the firmware on the card is an exact match and
2060 * the KLD is an exact match too, or the KLD is
2061 * absent/incompatible. Note that t4_fw_install = 2 is ignored
2062 * here -- use cxgbetool loadfw if you want to reinstall the
2063 * same firmware as the one on the card.
2064 */
2065 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2066 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2067 be32toh(card_fw->fw_ver))) {
2068
2069 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2070 if (rc != 0) {
2071 device_printf(sc->dev,
2072 "failed to install firmware: %d\n", rc);
2073 goto done;
2074 }
2075
2076 /* Installed successfully, update the cached header too. */
2077 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2078 card_fw_usable = 1;
2079 need_fw_reset = 0; /* already reset as part of load_fw */
2080 }
2081
2082 if (!card_fw_usable) {
2083 uint32_t d, c, k;
2084
2085 d = ntohl(drv_fw->fw_ver);
2086 c = ntohl(card_fw->fw_ver);
2087 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2088
2089 device_printf(sc->dev, "Cannot find a usable firmware: "
2090 "fw_install %d, chip state %d, "
2091 "driver compiled with %d.%d.%d.%d, "
2092 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2093 t4_fw_install, state,
2094 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2095 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2096 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2097 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2098 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2099 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2100 rc = EINVAL;
2101 goto done;
2102 }
2103
2104 /* We're using whatever's on the card and it's known to be good. */
2105 sc->params.fw_vers = ntohl(card_fw->fw_ver);
2106 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2107 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2108 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2109 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2110 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2111 t4_get_tp_version(sc, &sc->params.tp_vers);
2112
2113 /* Reset device */
2114 if (need_fw_reset &&
2115 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2116 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2117 if (rc != ETIMEDOUT && rc != EIO)
2118 t4_fw_bye(sc, sc->mbox);
2119 goto done;
2120 }
2121 sc->flags |= FW_OK;
2122
2123 rc = get_params__pre_init(sc);
2124 if (rc != 0)
2125 goto done; /* error message displayed already */
2126
2127 /* Partition adapter resources as specified in the config file. */
2128 if (state == DEV_STATE_UNINIT) {
2129
2130 KASSERT(sc->flags & MASTER_PF,
2131 ("%s: trying to change chip settings when not master.",
2132 __func__));
2133
2134 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2135 if (rc != 0)
2136 goto done; /* error message displayed already */
2137
2138 t4_tweak_chip_settings(sc);
2139
2140 /* get basic stuff going */
2141 rc = -t4_fw_initialize(sc, sc->mbox);
2142 if (rc != 0) {
2143 device_printf(sc->dev, "fw init failed: %d.\n", rc);
2144 goto done;
2145 }
2146 } else {
2147 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2148 sc->cfcsum = 0;
2149 }
2150
2151done:
2152 free(card_fw, M_CXGBE);
2153 if (fw != NULL)
2154 firmware_put(fw, FIRMWARE_UNLOAD);
2155 if (default_cfg != NULL)
2156 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2157
2158 return (rc);
2159}
2160
2161#define FW_PARAM_DEV(param) \
2162 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2163 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2164#define FW_PARAM_PFVF(param) \
2165 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2166 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2167
2168/*
2169 * Partition chip resources for use between various PFs, VFs, etc.
2170 */
2171static int
2172partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2173 const char *name_prefix)
2174{
2175 const struct firmware *cfg = NULL;
2176 int rc = 0;
2177 struct fw_caps_config_cmd caps;
2178 uint32_t mtype, moff, finicsum, cfcsum;
2179
2180 /*
2181 * Figure out what configuration file to use. Pick the default config
2182 * file for the card if the user hasn't specified one explicitly.
2183 */
2184 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2185 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2186 /* Card specific overrides go here. */
2187 if (pci_get_device(sc->dev) == 0x440a)
2188 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2189 if (is_fpga(sc))
2190 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2191 }
2192
2193 /*
2194 * We need to load another module if the profile is anything except
2195 * "default" or "flash".
2196 */
2197 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2198 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2199 char s[32];
2200
2201 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2202 cfg = firmware_get(s);
2203 if (cfg == NULL) {
2204 if (default_cfg != NULL) {
2205 device_printf(sc->dev,
2206 "unable to load module \"%s\" for "
2207 "configuration profile \"%s\", will use "
2208 "the default config file instead.\n",
2209 s, sc->cfg_file);
2210 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2211 "%s", DEFAULT_CF);
2212 } else {
2213 device_printf(sc->dev,
2214 "unable to load module \"%s\" for "
2215 "configuration profile \"%s\", will use "
2216 "the config file on the card's flash "
2217 "instead.\n", s, sc->cfg_file);
2218 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2219 "%s", FLASH_CF);
2220 }
2221 }
2222 }
2223
2224 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2225 default_cfg == NULL) {
2226 device_printf(sc->dev,
2227 "default config file not available, will use the config "
2228 "file on the card's flash instead.\n");
2229 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2230 }
2231
2232 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2233 u_int cflen, i, n;
2234 const uint32_t *cfdata;
2235 uint32_t param, val, addr, off, mw_base, mw_aperture;
2236
2237 KASSERT(cfg != NULL || default_cfg != NULL,
2238 ("%s: no config to upload", __func__));
2239
2240 /*
2241 * Ask the firmware where it wants us to upload the config file.
2242 */
2243 param = FW_PARAM_DEV(CF);
2244 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2245 if (rc != 0) {
2246 /* No support for config file? Shouldn't happen. */
2247 device_printf(sc->dev,
2248 "failed to query config file location: %d.\n", rc);
2249 goto done;
2250 }
2251 mtype = G_FW_PARAMS_PARAM_Y(val);
2252 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2253
2254 /*
2255 * XXX: sheer laziness. We deliberately added 4 bytes of
2256 * useless stuffing/comments at the end of the config file so
2257 * it's ok to simply throw away the last remaining bytes when
2258 * the config file is not an exact multiple of 4. This also
2259 * helps with the validate_mt_off_len check.
2260 */
2261 if (cfg != NULL) {
2262 cflen = cfg->datasize & ~3;
2263 cfdata = cfg->data;
2264 } else {
2265 cflen = default_cfg->datasize & ~3;
2266 cfdata = default_cfg->data;
2267 }
2268
2269 if (cflen > FLASH_CFG_MAX_SIZE) {
2270 device_printf(sc->dev,
2271 "config file too long (%d, max allowed is %d). "
2272 "Will try to use the config on the card, if any.\n",
2273 cflen, FLASH_CFG_MAX_SIZE);
2274 goto use_config_on_flash;
2275 }
2276
2277 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2278 if (rc != 0) {
2279 device_printf(sc->dev,
2280 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
2281 "Will try to use the config on the card, if any.\n",
2282 __func__, mtype, moff, cflen, rc);
2283 goto use_config_on_flash;
2284 }
2285
2286 memwin_info(sc, 2, &mw_base, &mw_aperture);
2287 while (cflen) {
2288 off = position_memwin(sc, 2, addr);
2289 n = min(cflen, mw_aperture - off);
2290 for (i = 0; i < n; i += 4)
2291 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2292 cflen -= n;
2293 addr += n;
2294 }
2295 } else {
2296use_config_on_flash:
2297 mtype = FW_MEMTYPE_CF_FLASH;
2298 moff = t4_flash_cfg_addr(sc);
2299 }
2300
2301 bzero(&caps, sizeof(caps));
2302 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2303 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2304 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2305 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2306 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2307 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2308 if (rc != 0) {
2309 device_printf(sc->dev,
2310 "failed to pre-process config file: %d "
2311 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2312 goto done;
2313 }
2314
2315 finicsum = be32toh(caps.finicsum);
2316 cfcsum = be32toh(caps.cfcsum);
2317 if (finicsum != cfcsum) {
2318 device_printf(sc->dev,
2319 "WARNING: config file checksum mismatch: %08x %08x\n",
2320 finicsum, cfcsum);
2321 }
2322 sc->cfcsum = cfcsum;
2323
2324#define LIMIT_CAPS(x) do { \
2325 caps.x &= htobe16(t4_##x##_allowed); \
2326 sc->x = htobe16(caps.x); \
2327} while (0)
2328
2329 /*
2330 * Let the firmware know what features will (not) be used so it can tune
2331 * things accordingly.
2332 */
2333 LIMIT_CAPS(linkcaps);
2334 LIMIT_CAPS(niccaps);
2335 LIMIT_CAPS(toecaps);
2336 LIMIT_CAPS(rdmacaps);
2337 LIMIT_CAPS(iscsicaps);
2338 LIMIT_CAPS(fcoecaps);
2339#undef LIMIT_CAPS
2340
2341 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2342 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2343 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2344 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2345 if (rc != 0) {
2346 device_printf(sc->dev,
2347 "failed to process config file: %d.\n", rc);
2348 }
2349done:
2350 if (cfg != NULL)
2351 firmware_put(cfg, FIRMWARE_UNLOAD);
2352 return (rc);
2353}
2354
2355/*
2356 * Retrieve parameters that are needed (or nice to have) very early.
2357 */
2358static int
2359get_params__pre_init(struct adapter *sc)
2360{
2361 int rc;
2362 uint32_t param[2], val[2];
2363 struct fw_devlog_cmd cmd;
2364 struct devlog_params *dlog = &sc->params.devlog;
2365
2366 param[0] = FW_PARAM_DEV(PORTVEC);
2367 param[1] = FW_PARAM_DEV(CCLK);
2368 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2369 if (rc != 0) {
2370 device_printf(sc->dev,
2371 "failed to query parameters (pre_init): %d.\n", rc);
2372 return (rc);
2373 }
2374
2375 sc->params.portvec = val[0];
2376 sc->params.nports = bitcount32(val[0]);
2377 sc->params.vpd.cclk = val[1];
2378
2379 /* Read device log parameters. */
2380 bzero(&cmd, sizeof(cmd));
2381 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2382 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2383 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2384 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2385 if (rc != 0) {
2386 device_printf(sc->dev,
2387 "failed to get devlog parameters: %d.\n", rc);
2388 bzero(dlog, sizeof (*dlog));
2389 rc = 0; /* devlog isn't critical for device operation */
2390 } else {
2391 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2392 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2393 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2394 dlog->size = be32toh(cmd.memsize_devlog);
2395 }
2396
2397 return (rc);
2398}
2399
2400/*
2401 * Retrieve various parameters that are of interest to the driver. The device
2402 * has been initialized by the firmware at this point.
2403 */
2404static int
2405get_params__post_init(struct adapter *sc)
2406{
2407 int rc;
2408 uint32_t param[7], val[7];
2409 struct fw_caps_config_cmd caps;
2410
2411 param[0] = FW_PARAM_PFVF(IQFLINT_START);
2412 param[1] = FW_PARAM_PFVF(EQ_START);
2413 param[2] = FW_PARAM_PFVF(FILTER_START);
2414 param[3] = FW_PARAM_PFVF(FILTER_END);
2415 param[4] = FW_PARAM_PFVF(L2T_START);
2416 param[5] = FW_PARAM_PFVF(L2T_END);
2417 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2418 if (rc != 0) {
2419 device_printf(sc->dev,
2420 "failed to query parameters (post_init): %d.\n", rc);
2421 return (rc);
2422 }
2423
2424 sc->sge.iq_start = val[0];
2425 sc->sge.eq_start = val[1];
2426 sc->tids.ftid_base = val[2];
2427 sc->tids.nftids = val[3] - val[2] + 1;
2428 sc->vres.l2t.start = val[4];
2429 sc->vres.l2t.size = val[5] - val[4] + 1;
2430 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2431 ("%s: L2 table size (%u) larger than expected (%u)",
2432 __func__, sc->vres.l2t.size, L2T_SIZE));
2433
2434 /* get capabilites */
2435 bzero(&caps, sizeof(caps));
2436 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2437 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2438 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2439 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2440 if (rc != 0) {
2441 device_printf(sc->dev,
2442 "failed to get card capabilities: %d.\n", rc);
2443 return (rc);
2444 }
2445
2446 if (caps.toecaps) {
2447 /* query offload-related parameters */
2448 param[0] = FW_PARAM_DEV(NTID);
2449 param[1] = FW_PARAM_PFVF(SERVER_START);
2450 param[2] = FW_PARAM_PFVF(SERVER_END);
2451 param[3] = FW_PARAM_PFVF(TDDP_START);
2452 param[4] = FW_PARAM_PFVF(TDDP_END);
2453 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2454 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2455 if (rc != 0) {
2456 device_printf(sc->dev,
2457 "failed to query TOE parameters: %d.\n", rc);
2458 return (rc);
2459 }
2460 sc->tids.ntids = val[0];
2461 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2462 sc->tids.stid_base = val[1];
2463 sc->tids.nstids = val[2] - val[1] + 1;
2464 sc->vres.ddp.start = val[3];
2465 sc->vres.ddp.size = val[4] - val[3] + 1;
2466 sc->params.ofldq_wr_cred = val[5];
2467 sc->params.offload = 1;
2468 }
2469 if (caps.rdmacaps) {
2470 param[0] = FW_PARAM_PFVF(STAG_START);
2471 param[1] = FW_PARAM_PFVF(STAG_END);
2472 param[2] = FW_PARAM_PFVF(RQ_START);
2473 param[3] = FW_PARAM_PFVF(RQ_END);
2474 param[4] = FW_PARAM_PFVF(PBL_START);
2475 param[5] = FW_PARAM_PFVF(PBL_END);
2476 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2477 if (rc != 0) {
2478 device_printf(sc->dev,
2479 "failed to query RDMA parameters(1): %d.\n", rc);
2480 return (rc);
2481 }
2482 sc->vres.stag.start = val[0];
2483 sc->vres.stag.size = val[1] - val[0] + 1;
2484 sc->vres.rq.start = val[2];
2485 sc->vres.rq.size = val[3] - val[2] + 1;
2486 sc->vres.pbl.start = val[4];
2487 sc->vres.pbl.size = val[5] - val[4] + 1;
2488
2489 param[0] = FW_PARAM_PFVF(SQRQ_START);
2490 param[1] = FW_PARAM_PFVF(SQRQ_END);
2491 param[2] = FW_PARAM_PFVF(CQ_START);
2492 param[3] = FW_PARAM_PFVF(CQ_END);
2493 param[4] = FW_PARAM_PFVF(OCQ_START);
2494 param[5] = FW_PARAM_PFVF(OCQ_END);
2495 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2496 if (rc != 0) {
2497 device_printf(sc->dev,
2498 "failed to query RDMA parameters(2): %d.\n", rc);
2499 return (rc);
2500 }
2501 sc->vres.qp.start = val[0];
2502 sc->vres.qp.size = val[1] - val[0] + 1;
2503 sc->vres.cq.start = val[2];
2504 sc->vres.cq.size = val[3] - val[2] + 1;
2505 sc->vres.ocq.start = val[4];
2506 sc->vres.ocq.size = val[5] - val[4] + 1;
2507 }
2508 if (caps.iscsicaps) {
2509 param[0] = FW_PARAM_PFVF(ISCSI_START);
2510 param[1] = FW_PARAM_PFVF(ISCSI_END);
2511 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2512 if (rc != 0) {
2513 device_printf(sc->dev,
2514 "failed to query iSCSI parameters: %d.\n", rc);
2515 return (rc);
2516 }
2517 sc->vres.iscsi.start = val[0];
2518 sc->vres.iscsi.size = val[1] - val[0] + 1;
2519 }
2520
2521 /*
2522 * We've got the params we wanted to query via the firmware. Now grab
2523 * some others directly from the chip.
2524 */
2525 rc = t4_read_chip_settings(sc);
2526
2527 return (rc);
2528}
2529
2530static int
2531set_params__post_init(struct adapter *sc)
2532{
2533 uint32_t param, val;
2534
2535 /* ask for encapsulated CPLs */
2536 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2537 val = 1;
2538 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2539
2540 return (0);
2541}
2542
2543#undef FW_PARAM_PFVF
2544#undef FW_PARAM_DEV
2545
2546static void
2547t4_set_desc(struct adapter *sc)
2548{
2549 char buf[128];
2550 struct adapter_params *p = &sc->params;
2551
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/conf.h>
36#include <sys/priv.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/module.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/taskqueue.h>
43#include <sys/pciio.h>
44#include <dev/pci/pcireg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pci_private.h>
47#include <sys/firmware.h>
48#include <sys/sbuf.h>
49#include <sys/smp.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_types.h>
56#include <net/if_dl.h>
57#include <net/if_vlan_var.h>
58#if defined(__i386__) || defined(__amd64__)
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#endif
62
63#include "common/common.h"
64#include "common/t4_msg.h"
65#include "common/t4_regs.h"
66#include "common/t4_regs_values.h"
67#include "t4_ioctl.h"
68#include "t4_l2t.h"
69
70/* T4 bus driver interface */
71static int t4_probe(device_t);
72static int t4_attach(device_t);
73static int t4_detach(device_t);
74static device_method_t t4_methods[] = {
75 DEVMETHOD(device_probe, t4_probe),
76 DEVMETHOD(device_attach, t4_attach),
77 DEVMETHOD(device_detach, t4_detach),
78
79 DEVMETHOD_END
80};
81static driver_t t4_driver = {
82 "t4nex",
83 t4_methods,
84 sizeof(struct adapter)
85};
86
87
88/* T4 port (cxgbe) interface */
89static int cxgbe_probe(device_t);
90static int cxgbe_attach(device_t);
91static int cxgbe_detach(device_t);
92static device_method_t cxgbe_methods[] = {
93 DEVMETHOD(device_probe, cxgbe_probe),
94 DEVMETHOD(device_attach, cxgbe_attach),
95 DEVMETHOD(device_detach, cxgbe_detach),
96 { 0, 0 }
97};
98static driver_t cxgbe_driver = {
99 "cxgbe",
100 cxgbe_methods,
101 sizeof(struct port_info)
102};
103
104static d_ioctl_t t4_ioctl;
105static d_open_t t4_open;
106static d_close_t t4_close;
107
108static struct cdevsw t4_cdevsw = {
109 .d_version = D_VERSION,
110 .d_flags = 0,
111 .d_open = t4_open,
112 .d_close = t4_close,
113 .d_ioctl = t4_ioctl,
114 .d_name = "t4nex",
115};
116
117/* T5 bus driver interface */
118static int t5_probe(device_t);
119static device_method_t t5_methods[] = {
120 DEVMETHOD(device_probe, t5_probe),
121 DEVMETHOD(device_attach, t4_attach),
122 DEVMETHOD(device_detach, t4_detach),
123
124 DEVMETHOD_END
125};
126static driver_t t5_driver = {
127 "t5nex",
128 t5_methods,
129 sizeof(struct adapter)
130};
131
132
133/* T5 port (cxl) interface */
134static driver_t cxl_driver = {
135 "cxl",
136 cxgbe_methods,
137 sizeof(struct port_info)
138};
139
140static struct cdevsw t5_cdevsw = {
141 .d_version = D_VERSION,
142 .d_flags = 0,
143 .d_open = t4_open,
144 .d_close = t4_close,
145 .d_ioctl = t4_ioctl,
146 .d_name = "t5nex",
147};
148
149/* ifnet + media interface */
150static void cxgbe_init(void *);
151static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153static void cxgbe_qflush(struct ifnet *);
154static int cxgbe_media_change(struct ifnet *);
155static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159/*
160 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161 * then ADAPTER_LOCK, then t4_uld_list_lock.
162 */
163static struct mtx t4_list_lock;
164static SLIST_HEAD(, adapter) t4_list;
165#ifdef TCP_OFFLOAD
166static struct mtx t4_uld_list_lock;
167static SLIST_HEAD(, uld_info) t4_uld_list;
168#endif
169
170/*
171 * Tunables. See tweak_tunables() too.
172 *
173 * Each tunable is set to a default value here if it's known at compile-time.
174 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175 * provide a reasonable default when the driver is loaded.
176 *
177 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
178 * T5 are under hw.cxl.
179 */
180
181/*
182 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183 */
184#define NTXQ_10G 16
185static int t4_ntxq10g = -1;
186TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188#define NRXQ_10G 8
189static int t4_nrxq10g = -1;
190TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192#define NTXQ_1G 4
193static int t4_ntxq1g = -1;
194TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196#define NRXQ_1G 2
197static int t4_nrxq1g = -1;
198TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200#ifdef TCP_OFFLOAD
201#define NOFLDTXQ_10G 8
202static int t4_nofldtxq10g = -1;
203TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204
205#define NOFLDRXQ_10G 2
206static int t4_nofldrxq10g = -1;
207TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208
209#define NOFLDTXQ_1G 2
210static int t4_nofldtxq1g = -1;
211TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212
213#define NOFLDRXQ_1G 1
214static int t4_nofldrxq1g = -1;
215TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216#endif
217
218/*
219 * Holdoff parameters for 10G and 1G ports.
220 */
221#define TMR_IDX_10G 1
222static int t4_tmr_idx_10g = TMR_IDX_10G;
223TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224
225#define PKTC_IDX_10G (-1)
226static int t4_pktc_idx_10g = PKTC_IDX_10G;
227TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228
229#define TMR_IDX_1G 1
230static int t4_tmr_idx_1g = TMR_IDX_1G;
231TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232
233#define PKTC_IDX_1G (-1)
234static int t4_pktc_idx_1g = PKTC_IDX_1G;
235TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236
237/*
238 * Size (# of entries) of each tx and rx queue.
239 */
240static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242
243static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245
246/*
247 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248 */
249static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251
252/*
253 * Configuration file.
254 */
255#define DEFAULT_CF "default"
256#define FLASH_CF "flash"
257#define UWIRE_CF "uwire"
258#define FPGA_CF "fpga"
259static char t4_cfg_file[32] = DEFAULT_CF;
260TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261
262/*
263 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264 * encouraged respectively).
265 */
266static unsigned int t4_fw_install = 1;
267TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268
269/*
270 * ASIC features that will be used. Disable the ones you don't want so that the
271 * chip resources aren't wasted on features that will not be used.
272 */
273static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
274TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275
276static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278
279static int t4_toecaps_allowed = -1;
280TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281
282static int t4_rdmacaps_allowed = 0;
283TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284
285static int t4_iscsicaps_allowed = 0;
286TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287
288static int t4_fcoecaps_allowed = 0;
289TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290
291static int t5_write_combine = 0;
292TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293
294struct intrs_and_queues {
295 int intr_type; /* INTx, MSI, or MSI-X */
296 int nirq; /* Number of vectors */
297 int intr_flags;
298 int ntxq10g; /* # of NIC txq's for each 10G port */
299 int nrxq10g; /* # of NIC rxq's for each 10G port */
300 int ntxq1g; /* # of NIC txq's for each 1G port */
301 int nrxq1g; /* # of NIC rxq's for each 1G port */
302#ifdef TCP_OFFLOAD
303 int nofldtxq10g; /* # of TOE txq's for each 10G port */
304 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
305 int nofldtxq1g; /* # of TOE txq's for each 1G port */
306 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
307#endif
308};
309
310struct filter_entry {
311 uint32_t valid:1; /* filter allocated and valid */
312 uint32_t locked:1; /* filter is administratively locked */
313 uint32_t pending:1; /* filter action is pending firmware reply */
314 uint32_t smtidx:8; /* Source MAC Table index for smac */
315 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
316
317 struct t4_filter_specification fs;
318};
319
320enum {
321 XGMAC_MTU = (1 << 0),
322 XGMAC_PROMISC = (1 << 1),
323 XGMAC_ALLMULTI = (1 << 2),
324 XGMAC_VLANEX = (1 << 3),
325 XGMAC_UCADDR = (1 << 4),
326 XGMAC_MCADDRS = (1 << 5),
327
328 XGMAC_ALL = 0xffff
329};
330
331static int map_bars_0_and_4(struct adapter *);
332static int map_bar_2(struct adapter *);
333static void setup_memwin(struct adapter *);
334static int validate_mem_range(struct adapter *, uint32_t, int);
335static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336 uint32_t *);
337static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338static uint32_t position_memwin(struct adapter *, int, uint32_t);
339static int cfg_itype_and_nqueues(struct adapter *, int, int,
340 struct intrs_and_queues *);
341static int prep_firmware(struct adapter *);
342static int partition_resources(struct adapter *, const struct firmware *,
343 const char *);
344static int get_params__pre_init(struct adapter *);
345static int get_params__post_init(struct adapter *);
346static int set_params__post_init(struct adapter *);
347static void t4_set_desc(struct adapter *);
348static void build_medialist(struct port_info *);
349static int update_mac_settings(struct port_info *, int);
350static int cxgbe_init_synchronized(struct port_info *);
351static int cxgbe_uninit_synchronized(struct port_info *);
352static int setup_intr_handlers(struct adapter *);
353static int adapter_full_init(struct adapter *);
354static int adapter_full_uninit(struct adapter *);
355static int port_full_init(struct port_info *);
356static int port_full_uninit(struct port_info *);
357static void quiesce_eq(struct adapter *, struct sge_eq *);
358static void quiesce_iq(struct adapter *, struct sge_iq *);
359static void quiesce_fl(struct adapter *, struct sge_fl *);
360static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361 driver_intr_t *, void *, char *);
362static int t4_free_irq(struct adapter *, struct irq *);
363static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364 unsigned int);
365static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366static void cxgbe_tick(void *);
367static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369 struct mbuf *);
370static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371static int fw_msg_not_handled(struct adapter *, const __be64 *);
372static int t4_sysctls(struct adapter *);
373static int cxgbe_sysctls(struct port_info *);
374static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
377static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
378static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
379static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
380static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
381static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
382static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
383#ifdef SBUF_DRAIN
384static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
385static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
386static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
387static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
388static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
389static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
390static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
391static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
392static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
393static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
394static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
395static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
396static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
397static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
398static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
399static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
400static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
401static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
402static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
403static int sysctl_tids(SYSCTL_HANDLER_ARGS);
404static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
405static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
406static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
407static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
408static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
409#endif
410static inline void txq_start(struct ifnet *, struct sge_txq *);
411static uint32_t fconf_to_mode(uint32_t);
412static uint32_t mode_to_fconf(uint32_t);
413static uint32_t fspec_to_fconf(struct t4_filter_specification *);
414static int get_filter_mode(struct adapter *, uint32_t *);
415static int set_filter_mode(struct adapter *, uint32_t);
416static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
417static int get_filter(struct adapter *, struct t4_filter *);
418static int set_filter(struct adapter *, struct t4_filter *);
419static int del_filter(struct adapter *, struct t4_filter *);
420static void clear_filter(struct filter_entry *);
421static int set_filter_wr(struct adapter *, int);
422static int del_filter_wr(struct adapter *, int);
423static int get_sge_context(struct adapter *, struct t4_sge_context *);
424static int load_fw(struct adapter *, struct t4_data *);
425static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
426static int read_i2c(struct adapter *, struct t4_i2c_data *);
427#ifdef TCP_OFFLOAD
428static int toe_capability(struct port_info *, int);
429#endif
430static int mod_event(module_t, int, void *);
431
432struct {
433 uint16_t device;
434 char *desc;
435} t4_pciids[] = {
436 {0xa000, "Chelsio Terminator 4 FPGA"},
437 {0x4400, "Chelsio T440-dbg"},
438 {0x4401, "Chelsio T420-CR"},
439 {0x4402, "Chelsio T422-CR"},
440 {0x4403, "Chelsio T440-CR"},
441 {0x4404, "Chelsio T420-BCH"},
442 {0x4405, "Chelsio T440-BCH"},
443 {0x4406, "Chelsio T440-CH"},
444 {0x4407, "Chelsio T420-SO"},
445 {0x4408, "Chelsio T420-CX"},
446 {0x4409, "Chelsio T420-BT"},
447 {0x440a, "Chelsio T404-BT"},
448 {0x440e, "Chelsio T440-LP-CR"},
449}, t5_pciids[] = {
450 {0xb000, "Chelsio Terminator 5 FPGA"},
451 {0x5400, "Chelsio T580-dbg"},
452 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
453 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
454 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
455 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
456 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
457 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
458 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
459 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
460 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
461 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
462 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
463 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
464#ifdef notyet
465 {0x5404, "Chelsio T520-BCH"},
466 {0x5405, "Chelsio T540-BCH"},
467 {0x5406, "Chelsio T540-CH"},
468 {0x5408, "Chelsio T520-CX"},
469 {0x540b, "Chelsio B520-SR"},
470 {0x540c, "Chelsio B504-BT"},
471 {0x540f, "Chelsio Amsterdam"},
472 {0x5413, "Chelsio T580-CHR"},
473#endif
474};
475
476#ifdef TCP_OFFLOAD
477/*
478 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
479 * exactly the same for both rxq and ofld_rxq.
480 */
481CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
482CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
483#endif
484
485/* No easy way to include t4_msg.h before adapter.h so we check this way */
486CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
487CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
488
489static int
490t4_probe(device_t dev)
491{
492 int i;
493 uint16_t v = pci_get_vendor(dev);
494 uint16_t d = pci_get_device(dev);
495 uint8_t f = pci_get_function(dev);
496
497 if (v != PCI_VENDOR_ID_CHELSIO)
498 return (ENXIO);
499
500 /* Attach only to PF0 of the FPGA */
501 if (d == 0xa000 && f != 0)
502 return (ENXIO);
503
504 for (i = 0; i < nitems(t4_pciids); i++) {
505 if (d == t4_pciids[i].device) {
506 device_set_desc(dev, t4_pciids[i].desc);
507 return (BUS_PROBE_DEFAULT);
508 }
509 }
510
511 return (ENXIO);
512}
513
514static int
515t5_probe(device_t dev)
516{
517 int i;
518 uint16_t v = pci_get_vendor(dev);
519 uint16_t d = pci_get_device(dev);
520 uint8_t f = pci_get_function(dev);
521
522 if (v != PCI_VENDOR_ID_CHELSIO)
523 return (ENXIO);
524
525 /* Attach only to PF0 of the FPGA */
526 if (d == 0xb000 && f != 0)
527 return (ENXIO);
528
529 for (i = 0; i < nitems(t5_pciids); i++) {
530 if (d == t5_pciids[i].device) {
531 device_set_desc(dev, t5_pciids[i].desc);
532 return (BUS_PROBE_DEFAULT);
533 }
534 }
535
536 return (ENXIO);
537}
538
539static int
540t4_attach(device_t dev)
541{
542 struct adapter *sc;
543 int rc = 0, i, n10g, n1g, rqidx, tqidx;
544 struct intrs_and_queues iaq;
545 struct sge *s;
546#ifdef TCP_OFFLOAD
547 int ofld_rqidx, ofld_tqidx;
548#endif
549
550 sc = device_get_softc(dev);
551 sc->dev = dev;
552
553 pci_enable_busmaster(dev);
554 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
555 uint32_t v;
556
557 pci_set_max_read_req(dev, 4096);
558 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
559 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
560 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
561 }
562
563 sc->traceq = -1;
564 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
565 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
566 device_get_nameunit(dev));
567
568 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
569 device_get_nameunit(dev));
570 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
571 mtx_lock(&t4_list_lock);
572 SLIST_INSERT_HEAD(&t4_list, sc, link);
573 mtx_unlock(&t4_list_lock);
574
575 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
576 TAILQ_INIT(&sc->sfl);
577 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
578
579 rc = map_bars_0_and_4(sc);
580 if (rc != 0)
581 goto done; /* error message displayed already */
582
583 /*
584 * This is the real PF# to which we're attaching. Works from within PCI
585 * passthrough environments too, where pci_get_function() could return a
586 * different PF# depending on the passthrough configuration. We need to
587 * use the real PF# in all our communication with the firmware.
588 */
589 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
590 sc->mbox = sc->pf;
591
592 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
593 sc->an_handler = an_not_handled;
594 for (i = 0; i < nitems(sc->cpl_handler); i++)
595 sc->cpl_handler[i] = cpl_not_handled;
596 for (i = 0; i < nitems(sc->fw_msg_handler); i++)
597 sc->fw_msg_handler[i] = fw_msg_not_handled;
598 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
599 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
600 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
601 t4_init_sge_cpl_handlers(sc);
602
603
604 /* Prepare the adapter for operation */
605 rc = -t4_prep_adapter(sc);
606 if (rc != 0) {
607 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
608 goto done;
609 }
610
611 /*
612 * Do this really early, with the memory windows set up even before the
613 * character device. The userland tool's register i/o and mem read
614 * will work even in "recovery mode".
615 */
616 setup_memwin(sc);
617 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
618 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
619 device_get_nameunit(dev));
620 if (sc->cdev == NULL)
621 device_printf(dev, "failed to create nexus char device.\n");
622 else
623 sc->cdev->si_drv1 = sc;
624
625 /* Go no further if recovery mode has been requested. */
626 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
627 device_printf(dev, "recovery mode.\n");
628 goto done;
629 }
630
631 /* Prepare the firmware for operation */
632 rc = prep_firmware(sc);
633 if (rc != 0)
634 goto done; /* error message displayed already */
635
636 rc = get_params__post_init(sc);
637 if (rc != 0)
638 goto done; /* error message displayed already */
639
640 rc = set_params__post_init(sc);
641 if (rc != 0)
642 goto done; /* error message displayed already */
643
644 rc = map_bar_2(sc);
645 if (rc != 0)
646 goto done; /* error message displayed already */
647
648 rc = t4_create_dma_tag(sc);
649 if (rc != 0)
650 goto done; /* error message displayed already */
651
652 /*
653 * First pass over all the ports - allocate VIs and initialize some
654 * basic parameters like mac address, port type, etc. We also figure
655 * out whether a port is 10G or 1G and use that information when
656 * calculating how many interrupts to attempt to allocate.
657 */
658 n10g = n1g = 0;
659 for_each_port(sc, i) {
660 struct port_info *pi;
661
662 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
663 sc->port[i] = pi;
664
665 /* These must be set before t4_port_init */
666 pi->adapter = sc;
667 pi->port_id = i;
668
669 /* Allocate the vi and initialize parameters like mac addr */
670 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
671 if (rc != 0) {
672 device_printf(dev, "unable to initialize port %d: %d\n",
673 i, rc);
674 free(pi, M_CXGBE);
675 sc->port[i] = NULL;
676 goto done;
677 }
678
679 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
680 device_get_nameunit(dev), i);
681 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
682 sc->chan_map[pi->tx_chan] = i;
683
684 if (is_10G_port(pi) || is_40G_port(pi)) {
685 n10g++;
686 pi->tmr_idx = t4_tmr_idx_10g;
687 pi->pktc_idx = t4_pktc_idx_10g;
688 } else {
689 n1g++;
690 pi->tmr_idx = t4_tmr_idx_1g;
691 pi->pktc_idx = t4_pktc_idx_1g;
692 }
693
694 pi->xact_addr_filt = -1;
695 pi->linkdnrc = -1;
696
697 pi->qsize_rxq = t4_qsize_rxq;
698 pi->qsize_txq = t4_qsize_txq;
699
700 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
701 if (pi->dev == NULL) {
702 device_printf(dev,
703 "failed to add device for port %d.\n", i);
704 rc = ENXIO;
705 goto done;
706 }
707 device_set_softc(pi->dev, pi);
708 }
709
710 /*
711 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
712 */
713 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
714 if (rc != 0)
715 goto done; /* error message displayed already */
716
717 sc->intr_type = iaq.intr_type;
718 sc->intr_count = iaq.nirq;
719 sc->flags |= iaq.intr_flags;
720
721 s = &sc->sge;
722 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
723 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
724 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
725 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
726 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
727
728#ifdef TCP_OFFLOAD
729 if (is_offload(sc)) {
730
731 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
732 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
733 s->neq += s->nofldtxq + s->nofldrxq;
734 s->niq += s->nofldrxq;
735
736 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
737 M_CXGBE, M_ZERO | M_WAITOK);
738 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
739 M_CXGBE, M_ZERO | M_WAITOK);
740 }
741#endif
742
743 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
744 M_ZERO | M_WAITOK);
745 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
746 M_ZERO | M_WAITOK);
747 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
748 M_ZERO | M_WAITOK);
749 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
750 M_ZERO | M_WAITOK);
751 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
752 M_ZERO | M_WAITOK);
753
754 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
755 M_ZERO | M_WAITOK);
756
757 t4_init_l2t(sc, M_WAITOK);
758
759 /*
760 * Second pass over the ports. This time we know the number of rx and
761 * tx queues that each port should get.
762 */
763 rqidx = tqidx = 0;
764#ifdef TCP_OFFLOAD
765 ofld_rqidx = ofld_tqidx = 0;
766#endif
767 for_each_port(sc, i) {
768 struct port_info *pi = sc->port[i];
769
770 if (pi == NULL)
771 continue;
772
773 pi->first_rxq = rqidx;
774 pi->first_txq = tqidx;
775 if (is_10G_port(pi) || is_40G_port(pi)) {
776 pi->nrxq = iaq.nrxq10g;
777 pi->ntxq = iaq.ntxq10g;
778 } else {
779 pi->nrxq = iaq.nrxq1g;
780 pi->ntxq = iaq.ntxq1g;
781 }
782
783 rqidx += pi->nrxq;
784 tqidx += pi->ntxq;
785
786#ifdef TCP_OFFLOAD
787 if (is_offload(sc)) {
788 pi->first_ofld_rxq = ofld_rqidx;
789 pi->first_ofld_txq = ofld_tqidx;
790 if (is_10G_port(pi) || is_40G_port(pi)) {
791 pi->nofldrxq = iaq.nofldrxq10g;
792 pi->nofldtxq = iaq.nofldtxq10g;
793 } else {
794 pi->nofldrxq = iaq.nofldrxq1g;
795 pi->nofldtxq = iaq.nofldtxq1g;
796 }
797 ofld_rqidx += pi->nofldrxq;
798 ofld_tqidx += pi->nofldtxq;
799 }
800#endif
801 }
802
803 rc = setup_intr_handlers(sc);
804 if (rc != 0) {
805 device_printf(dev,
806 "failed to setup interrupt handlers: %d\n", rc);
807 goto done;
808 }
809
810 rc = bus_generic_attach(dev);
811 if (rc != 0) {
812 device_printf(dev,
813 "failed to attach all child ports: %d\n", rc);
814 goto done;
815 }
816
817 device_printf(dev,
818 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
819 sc->params.pci.width, sc->params.nports, sc->intr_count,
820 sc->intr_type == INTR_MSIX ? "MSI-X" :
821 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
822 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
823
824 t4_set_desc(sc);
825
826done:
827 if (rc != 0 && sc->cdev) {
828 /* cdev was created and so cxgbetool works; recover that way. */
829 device_printf(dev,
830 "error during attach, adapter is now in recovery mode.\n");
831 rc = 0;
832 }
833
834 if (rc != 0)
835 t4_detach(dev);
836 else
837 t4_sysctls(sc);
838
839 return (rc);
840}
841
842/*
843 * Idempotent
844 */
845static int
846t4_detach(device_t dev)
847{
848 struct adapter *sc;
849 struct port_info *pi;
850 int i, rc;
851
852 sc = device_get_softc(dev);
853
854 if (sc->flags & FULL_INIT_DONE)
855 t4_intr_disable(sc);
856
857 if (sc->cdev) {
858 destroy_dev(sc->cdev);
859 sc->cdev = NULL;
860 }
861
862 rc = bus_generic_detach(dev);
863 if (rc) {
864 device_printf(dev,
865 "failed to detach child devices: %d\n", rc);
866 return (rc);
867 }
868
869 for (i = 0; i < sc->intr_count; i++)
870 t4_free_irq(sc, &sc->irq[i]);
871
872 for (i = 0; i < MAX_NPORTS; i++) {
873 pi = sc->port[i];
874 if (pi) {
875 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
876 if (pi->dev)
877 device_delete_child(dev, pi->dev);
878
879 mtx_destroy(&pi->pi_lock);
880 free(pi, M_CXGBE);
881 }
882 }
883
884 if (sc->flags & FULL_INIT_DONE)
885 adapter_full_uninit(sc);
886
887 if (sc->flags & FW_OK)
888 t4_fw_bye(sc, sc->mbox);
889
890 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
891 pci_release_msi(dev);
892
893 if (sc->regs_res)
894 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
895 sc->regs_res);
896
897 if (sc->udbs_res)
898 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
899 sc->udbs_res);
900
901 if (sc->msix_res)
902 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
903 sc->msix_res);
904
905 if (sc->l2t)
906 t4_free_l2t(sc->l2t);
907
908#ifdef TCP_OFFLOAD
909 free(sc->sge.ofld_rxq, M_CXGBE);
910 free(sc->sge.ofld_txq, M_CXGBE);
911#endif
912 free(sc->irq, M_CXGBE);
913 free(sc->sge.rxq, M_CXGBE);
914 free(sc->sge.txq, M_CXGBE);
915 free(sc->sge.ctrlq, M_CXGBE);
916 free(sc->sge.iqmap, M_CXGBE);
917 free(sc->sge.eqmap, M_CXGBE);
918 free(sc->tids.ftid_tab, M_CXGBE);
919 t4_destroy_dma_tag(sc);
920 if (mtx_initialized(&sc->sc_lock)) {
921 mtx_lock(&t4_list_lock);
922 SLIST_REMOVE(&t4_list, sc, adapter, link);
923 mtx_unlock(&t4_list_lock);
924 mtx_destroy(&sc->sc_lock);
925 }
926
927 if (mtx_initialized(&sc->tids.ftid_lock))
928 mtx_destroy(&sc->tids.ftid_lock);
929 if (mtx_initialized(&sc->sfl_lock))
930 mtx_destroy(&sc->sfl_lock);
931 if (mtx_initialized(&sc->ifp_lock))
932 mtx_destroy(&sc->ifp_lock);
933
934 bzero(sc, sizeof(*sc));
935
936 return (0);
937}
938
939
940static int
941cxgbe_probe(device_t dev)
942{
943 char buf[128];
944 struct port_info *pi = device_get_softc(dev);
945
946 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
947 device_set_desc_copy(dev, buf);
948
949 return (BUS_PROBE_DEFAULT);
950}
951
952#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
953 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
954 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
955#define T4_CAP_ENABLE (T4_CAP)
956
957static int
958cxgbe_attach(device_t dev)
959{
960 struct port_info *pi = device_get_softc(dev);
961 struct ifnet *ifp;
962
963 /* Allocate an ifnet and set it up */
964 ifp = if_alloc(IFT_ETHER);
965 if (ifp == NULL) {
966 device_printf(dev, "Cannot allocate ifnet\n");
967 return (ENOMEM);
968 }
969 pi->ifp = ifp;
970 ifp->if_softc = pi;
971
972 callout_init(&pi->tick, CALLOUT_MPSAFE);
973
974 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
975 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
976
977 ifp->if_init = cxgbe_init;
978 ifp->if_ioctl = cxgbe_ioctl;
979 ifp->if_transmit = cxgbe_transmit;
980 ifp->if_qflush = cxgbe_qflush;
981
982 ifp->if_capabilities = T4_CAP;
983#ifdef TCP_OFFLOAD
984 if (is_offload(pi->adapter))
985 ifp->if_capabilities |= IFCAP_TOE;
986#endif
987 ifp->if_capenable = T4_CAP_ENABLE;
988 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
989 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
990
991 /* Initialize ifmedia for this port */
992 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
993 cxgbe_media_status);
994 build_medialist(pi);
995
996 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
997 EVENTHANDLER_PRI_ANY);
998
999 ether_ifattach(ifp, pi->hw_addr);
1000
1001#ifdef TCP_OFFLOAD
1002 if (is_offload(pi->adapter)) {
1003 device_printf(dev,
1004 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1005 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1006 } else
1007#endif
1008 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1009
1010 cxgbe_sysctls(pi);
1011
1012 return (0);
1013}
1014
1015static int
1016cxgbe_detach(device_t dev)
1017{
1018 struct port_info *pi = device_get_softc(dev);
1019 struct adapter *sc = pi->adapter;
1020 struct ifnet *ifp = pi->ifp;
1021
1022 /* Tell if_ioctl and if_init that the port is going away */
1023 ADAPTER_LOCK(sc);
1024 SET_DOOMED(pi);
1025 wakeup(&sc->flags);
1026 while (IS_BUSY(sc))
1027 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1028 SET_BUSY(sc);
1029#ifdef INVARIANTS
1030 sc->last_op = "t4detach";
1031 sc->last_op_thr = curthread;
1032#endif
1033 ADAPTER_UNLOCK(sc);
1034
1035 if (pi->flags & HAS_TRACEQ) {
1036 sc->traceq = -1; /* cloner should not create ifnet */
1037 t4_tracer_port_detach(sc);
1038 }
1039
1040 if (pi->vlan_c)
1041 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1042
1043 PORT_LOCK(pi);
1044 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1045 callout_stop(&pi->tick);
1046 PORT_UNLOCK(pi);
1047 callout_drain(&pi->tick);
1048
1049 /* Let detach proceed even if these fail. */
1050 cxgbe_uninit_synchronized(pi);
1051 port_full_uninit(pi);
1052
1053 ifmedia_removeall(&pi->media);
1054 ether_ifdetach(pi->ifp);
1055 if_free(pi->ifp);
1056
1057 ADAPTER_LOCK(sc);
1058 CLR_BUSY(sc);
1059 wakeup(&sc->flags);
1060 ADAPTER_UNLOCK(sc);
1061
1062 return (0);
1063}
1064
1065static void
1066cxgbe_init(void *arg)
1067{
1068 struct port_info *pi = arg;
1069 struct adapter *sc = pi->adapter;
1070
1071 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1072 return;
1073 cxgbe_init_synchronized(pi);
1074 end_synchronized_op(sc, 0);
1075}
1076
1077static int
1078cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1079{
1080 int rc = 0, mtu, flags;
1081 struct port_info *pi = ifp->if_softc;
1082 struct adapter *sc = pi->adapter;
1083 struct ifreq *ifr = (struct ifreq *)data;
1084 uint32_t mask;
1085
1086 switch (cmd) {
1087 case SIOCSIFMTU:
1088 mtu = ifr->ifr_mtu;
1089 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1090 return (EINVAL);
1091
1092 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1093 if (rc)
1094 return (rc);
1095 ifp->if_mtu = mtu;
1096 if (pi->flags & PORT_INIT_DONE) {
1097 t4_update_fl_bufsize(ifp);
1098 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1099 rc = update_mac_settings(pi, XGMAC_MTU);
1100 }
1101 end_synchronized_op(sc, 0);
1102 break;
1103
1104 case SIOCSIFFLAGS:
1105 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1106 if (rc)
1107 return (rc);
1108
1109 if (ifp->if_flags & IFF_UP) {
1110 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1111 flags = pi->if_flags;
1112 if ((ifp->if_flags ^ flags) &
1113 (IFF_PROMISC | IFF_ALLMULTI)) {
1114 rc = update_mac_settings(pi,
1115 XGMAC_PROMISC | XGMAC_ALLMULTI);
1116 }
1117 } else
1118 rc = cxgbe_init_synchronized(pi);
1119 pi->if_flags = ifp->if_flags;
1120 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1121 rc = cxgbe_uninit_synchronized(pi);
1122 end_synchronized_op(sc, 0);
1123 break;
1124
1125 case SIOCADDMULTI:
1126 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1127 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1128 if (rc)
1129 return (rc);
1130 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1131 rc = update_mac_settings(pi, XGMAC_MCADDRS);
1132 end_synchronized_op(sc, LOCK_HELD);
1133 break;
1134
1135 case SIOCSIFCAP:
1136 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1137 if (rc)
1138 return (rc);
1139
1140 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1141 if (mask & IFCAP_TXCSUM) {
1142 ifp->if_capenable ^= IFCAP_TXCSUM;
1143 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1144
1145 if (IFCAP_TSO4 & ifp->if_capenable &&
1146 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1147 ifp->if_capenable &= ~IFCAP_TSO4;
1148 if_printf(ifp,
1149 "tso4 disabled due to -txcsum.\n");
1150 }
1151 }
1152 if (mask & IFCAP_TXCSUM_IPV6) {
1153 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1154 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1155
1156 if (IFCAP_TSO6 & ifp->if_capenable &&
1157 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1158 ifp->if_capenable &= ~IFCAP_TSO6;
1159 if_printf(ifp,
1160 "tso6 disabled due to -txcsum6.\n");
1161 }
1162 }
1163 if (mask & IFCAP_RXCSUM)
1164 ifp->if_capenable ^= IFCAP_RXCSUM;
1165 if (mask & IFCAP_RXCSUM_IPV6)
1166 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1167
1168 /*
1169 * Note that we leave CSUM_TSO alone (it is always set). The
1170 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1171 * sending a TSO request our way, so it's sufficient to toggle
1172 * IFCAP_TSOx only.
1173 */
1174 if (mask & IFCAP_TSO4) {
1175 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1176 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1177 if_printf(ifp, "enable txcsum first.\n");
1178 rc = EAGAIN;
1179 goto fail;
1180 }
1181 ifp->if_capenable ^= IFCAP_TSO4;
1182 }
1183 if (mask & IFCAP_TSO6) {
1184 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1185 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1186 if_printf(ifp, "enable txcsum6 first.\n");
1187 rc = EAGAIN;
1188 goto fail;
1189 }
1190 ifp->if_capenable ^= IFCAP_TSO6;
1191 }
1192 if (mask & IFCAP_LRO) {
1193#if defined(INET) || defined(INET6)
1194 int i;
1195 struct sge_rxq *rxq;
1196
1197 ifp->if_capenable ^= IFCAP_LRO;
1198 for_each_rxq(pi, i, rxq) {
1199 if (ifp->if_capenable & IFCAP_LRO)
1200 rxq->iq.flags |= IQ_LRO_ENABLED;
1201 else
1202 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1203 }
1204#endif
1205 }
1206#ifdef TCP_OFFLOAD
1207 if (mask & IFCAP_TOE) {
1208 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1209
1210 rc = toe_capability(pi, enable);
1211 if (rc != 0)
1212 goto fail;
1213
1214 ifp->if_capenable ^= mask;
1215 }
1216#endif
1217 if (mask & IFCAP_VLAN_HWTAGGING) {
1218 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1219 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1220 rc = update_mac_settings(pi, XGMAC_VLANEX);
1221 }
1222 if (mask & IFCAP_VLAN_MTU) {
1223 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1224
1225 /* Need to find out how to disable auto-mtu-inflation */
1226 }
1227 if (mask & IFCAP_VLAN_HWTSO)
1228 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1229 if (mask & IFCAP_VLAN_HWCSUM)
1230 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1231
1232#ifdef VLAN_CAPABILITIES
1233 VLAN_CAPABILITIES(ifp);
1234#endif
1235fail:
1236 end_synchronized_op(sc, 0);
1237 break;
1238
1239 case SIOCSIFMEDIA:
1240 case SIOCGIFMEDIA:
1241 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1242 break;
1243
1244 default:
1245 rc = ether_ioctl(ifp, cmd, data);
1246 }
1247
1248 return (rc);
1249}
1250
1251static int
1252cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1253{
1254 struct port_info *pi = ifp->if_softc;
1255 struct adapter *sc = pi->adapter;
1256 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1257 struct buf_ring *br;
1258 int rc;
1259
1260 M_ASSERTPKTHDR(m);
1261
1262 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1263 m_freem(m);
1264 return (ENETDOWN);
1265 }
1266
1267 if (m->m_flags & M_FLOWID)
1268 txq += (m->m_pkthdr.flowid % pi->ntxq);
1269 br = txq->br;
1270
1271 if (TXQ_TRYLOCK(txq) == 0) {
1272 struct sge_eq *eq = &txq->eq;
1273
1274 /*
1275 * It is possible that t4_eth_tx finishes up and releases the
1276 * lock between the TRYLOCK above and the drbr_enqueue here. We
1277 * need to make sure that this mbuf doesn't just sit there in
1278 * the drbr.
1279 */
1280
1281 rc = drbr_enqueue(ifp, br, m);
1282 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1283 !(eq->flags & EQ_DOOMED))
1284 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1285 return (rc);
1286 }
1287
1288 /*
1289 * txq->m is the mbuf that is held up due to a temporary shortage of
1290 * resources and it should be put on the wire first. Then what's in
1291 * drbr and finally the mbuf that was just passed in to us.
1292 *
1293 * Return code should indicate the fate of the mbuf that was passed in
1294 * this time.
1295 */
1296
1297 TXQ_LOCK_ASSERT_OWNED(txq);
1298 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1299
1300 /* Queued for transmission. */
1301
1302 rc = drbr_enqueue(ifp, br, m);
1303 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1304 (void) t4_eth_tx(ifp, txq, m);
1305 TXQ_UNLOCK(txq);
1306 return (rc);
1307 }
1308
1309 /* Direct transmission. */
1310 rc = t4_eth_tx(ifp, txq, m);
1311 if (rc != 0 && txq->m)
1312 rc = 0; /* held, will be transmitted soon (hopefully) */
1313
1314 TXQ_UNLOCK(txq);
1315 return (rc);
1316}
1317
1318static void
1319cxgbe_qflush(struct ifnet *ifp)
1320{
1321 struct port_info *pi = ifp->if_softc;
1322 struct sge_txq *txq;
1323 int i;
1324 struct mbuf *m;
1325
1326 /* queues do not exist if !PORT_INIT_DONE. */
1327 if (pi->flags & PORT_INIT_DONE) {
1328 for_each_txq(pi, i, txq) {
1329 TXQ_LOCK(txq);
1330 m_freem(txq->m);
1331 txq->m = NULL;
1332 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1333 m_freem(m);
1334 TXQ_UNLOCK(txq);
1335 }
1336 }
1337 if_qflush(ifp);
1338}
1339
1340static int
1341cxgbe_media_change(struct ifnet *ifp)
1342{
1343 struct port_info *pi = ifp->if_softc;
1344
1345 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1346
1347 return (EOPNOTSUPP);
1348}
1349
1350static void
1351cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1352{
1353 struct port_info *pi = ifp->if_softc;
1354 struct ifmedia_entry *cur = pi->media.ifm_cur;
1355 int speed = pi->link_cfg.speed;
1356 int data = (pi->port_type << 8) | pi->mod_type;
1357
1358 if (cur->ifm_data != data) {
1359 build_medialist(pi);
1360 cur = pi->media.ifm_cur;
1361 }
1362
1363 ifmr->ifm_status = IFM_AVALID;
1364 if (!pi->link_cfg.link_ok)
1365 return;
1366
1367 ifmr->ifm_status |= IFM_ACTIVE;
1368
1369 /* active and current will differ iff current media is autoselect. */
1370 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1371 return;
1372
1373 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1374 if (speed == SPEED_10000)
1375 ifmr->ifm_active |= IFM_10G_T;
1376 else if (speed == SPEED_1000)
1377 ifmr->ifm_active |= IFM_1000_T;
1378 else if (speed == SPEED_100)
1379 ifmr->ifm_active |= IFM_100_TX;
1380 else if (speed == SPEED_10)
1381 ifmr->ifm_active |= IFM_10_T;
1382 else
1383 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1384 speed));
1385}
1386
1387void
1388t4_fatal_err(struct adapter *sc)
1389{
1390 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1391 t4_intr_disable(sc);
1392 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1393 device_get_nameunit(sc->dev));
1394}
1395
1396static int
1397map_bars_0_and_4(struct adapter *sc)
1398{
1399 sc->regs_rid = PCIR_BAR(0);
1400 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1401 &sc->regs_rid, RF_ACTIVE);
1402 if (sc->regs_res == NULL) {
1403 device_printf(sc->dev, "cannot map registers.\n");
1404 return (ENXIO);
1405 }
1406 sc->bt = rman_get_bustag(sc->regs_res);
1407 sc->bh = rman_get_bushandle(sc->regs_res);
1408 sc->mmio_len = rman_get_size(sc->regs_res);
1409 setbit(&sc->doorbells, DOORBELL_KDB);
1410
1411 sc->msix_rid = PCIR_BAR(4);
1412 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1413 &sc->msix_rid, RF_ACTIVE);
1414 if (sc->msix_res == NULL) {
1415 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1416 return (ENXIO);
1417 }
1418
1419 return (0);
1420}
1421
1422static int
1423map_bar_2(struct adapter *sc)
1424{
1425
1426 /*
1427 * T4: only iWARP driver uses the userspace doorbells. There is no need
1428 * to map it if RDMA is disabled.
1429 */
1430 if (is_t4(sc) && sc->rdmacaps == 0)
1431 return (0);
1432
1433 sc->udbs_rid = PCIR_BAR(2);
1434 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1435 &sc->udbs_rid, RF_ACTIVE);
1436 if (sc->udbs_res == NULL) {
1437 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1438 return (ENXIO);
1439 }
1440 sc->udbs_base = rman_get_virtual(sc->udbs_res);
1441
1442 if (is_t5(sc)) {
1443 setbit(&sc->doorbells, DOORBELL_UDB);
1444#if defined(__i386__) || defined(__amd64__)
1445 if (t5_write_combine) {
1446 int rc;
1447
1448 /*
1449 * Enable write combining on BAR2. This is the
1450 * userspace doorbell BAR and is split into 128B
1451 * (UDBS_SEG_SIZE) doorbell regions, each associated
1452 * with an egress queue. The first 64B has the doorbell
1453 * and the second 64B can be used to submit a tx work
1454 * request with an implicit doorbell.
1455 */
1456
1457 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1458 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1459 if (rc == 0) {
1460 clrbit(&sc->doorbells, DOORBELL_UDB);
1461 setbit(&sc->doorbells, DOORBELL_WCWR);
1462 setbit(&sc->doorbells, DOORBELL_UDBWC);
1463 } else {
1464 device_printf(sc->dev,
1465 "couldn't enable write combining: %d\n",
1466 rc);
1467 }
1468
1469 t4_write_reg(sc, A_SGE_STAT_CFG,
1470 V_STATSOURCE_T5(7) | V_STATMODE(0));
1471 }
1472#endif
1473 }
1474
1475 return (0);
1476}
1477
1478static const struct memwin t4_memwin[] = {
1479 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1480 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1481 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1482};
1483
1484static const struct memwin t5_memwin[] = {
1485 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1486 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1487 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1488};
1489
1490static void
1491setup_memwin(struct adapter *sc)
1492{
1493 const struct memwin *mw;
1494 int i, n;
1495 uint32_t bar0;
1496
1497 if (is_t4(sc)) {
1498 /*
1499 * Read low 32b of bar0 indirectly via the hardware backdoor
1500 * mechanism. Works from within PCI passthrough environments
1501 * too, where rman_get_start() can return a different value. We
1502 * need to program the T4 memory window decoders with the actual
1503 * addresses that will be coming across the PCIe link.
1504 */
1505 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1506 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1507
1508 mw = &t4_memwin[0];
1509 n = nitems(t4_memwin);
1510 } else {
1511 /* T5 uses the relative offset inside the PCIe BAR */
1512 bar0 = 0;
1513
1514 mw = &t5_memwin[0];
1515 n = nitems(t5_memwin);
1516 }
1517
1518 for (i = 0; i < n; i++, mw++) {
1519 t4_write_reg(sc,
1520 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1521 (mw->base + bar0) | V_BIR(0) |
1522 V_WINDOW(ilog2(mw->aperture) - 10));
1523 }
1524
1525 /* flush */
1526 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1527}
1528
1529/*
1530 * Verify that the memory range specified by the addr/len pair is valid and lies
1531 * entirely within a single region (EDCx or MCx).
1532 */
1533static int
1534validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1535{
1536 uint32_t em, addr_len, maddr, mlen;
1537
1538 /* Memory can only be accessed in naturally aligned 4 byte units */
1539 if (addr & 3 || len & 3 || len == 0)
1540 return (EINVAL);
1541
1542 /* Enabled memories */
1543 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1544 if (em & F_EDRAM0_ENABLE) {
1545 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1546 maddr = G_EDRAM0_BASE(addr_len) << 20;
1547 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1548 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1549 addr + len <= maddr + mlen)
1550 return (0);
1551 }
1552 if (em & F_EDRAM1_ENABLE) {
1553 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1554 maddr = G_EDRAM1_BASE(addr_len) << 20;
1555 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1556 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1557 addr + len <= maddr + mlen)
1558 return (0);
1559 }
1560 if (em & F_EXT_MEM_ENABLE) {
1561 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1562 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1563 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1564 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1565 addr + len <= maddr + mlen)
1566 return (0);
1567 }
1568 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1569 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1570 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1571 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1572 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1573 addr + len <= maddr + mlen)
1574 return (0);
1575 }
1576
1577 return (EFAULT);
1578}
1579
1580/*
1581 * Verify that the memory range specified by the memtype/offset/len pair is
1582 * valid and lies entirely within the memtype specified. The global address of
1583 * the start of the range is returned in addr.
1584 */
1585static int
1586validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1587 uint32_t *addr)
1588{
1589 uint32_t em, addr_len, maddr, mlen;
1590
1591 /* Memory can only be accessed in naturally aligned 4 byte units */
1592 if (off & 3 || len & 3 || len == 0)
1593 return (EINVAL);
1594
1595 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1596 switch (mtype) {
1597 case MEM_EDC0:
1598 if (!(em & F_EDRAM0_ENABLE))
1599 return (EINVAL);
1600 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1601 maddr = G_EDRAM0_BASE(addr_len) << 20;
1602 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1603 break;
1604 case MEM_EDC1:
1605 if (!(em & F_EDRAM1_ENABLE))
1606 return (EINVAL);
1607 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1608 maddr = G_EDRAM1_BASE(addr_len) << 20;
1609 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1610 break;
1611 case MEM_MC:
1612 if (!(em & F_EXT_MEM_ENABLE))
1613 return (EINVAL);
1614 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1615 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1616 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1617 break;
1618 case MEM_MC1:
1619 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1620 return (EINVAL);
1621 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1622 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1623 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1624 break;
1625 default:
1626 return (EINVAL);
1627 }
1628
1629 if (mlen > 0 && off < mlen && off + len <= mlen) {
1630 *addr = maddr + off; /* global address */
1631 return (0);
1632 }
1633
1634 return (EFAULT);
1635}
1636
1637static void
1638memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1639{
1640 const struct memwin *mw;
1641
1642 if (is_t4(sc)) {
1643 KASSERT(win >= 0 && win < nitems(t4_memwin),
1644 ("%s: incorrect memwin# (%d)", __func__, win));
1645 mw = &t4_memwin[win];
1646 } else {
1647 KASSERT(win >= 0 && win < nitems(t5_memwin),
1648 ("%s: incorrect memwin# (%d)", __func__, win));
1649 mw = &t5_memwin[win];
1650 }
1651
1652 if (base != NULL)
1653 *base = mw->base;
1654 if (aperture != NULL)
1655 *aperture = mw->aperture;
1656}
1657
1658/*
1659 * Positions the memory window such that it can be used to access the specified
1660 * address in the chip's address space. The return value is the offset of addr
1661 * from the start of the window.
1662 */
1663static uint32_t
1664position_memwin(struct adapter *sc, int n, uint32_t addr)
1665{
1666 uint32_t start, pf;
1667 uint32_t reg;
1668
1669 KASSERT(n >= 0 && n <= 3,
1670 ("%s: invalid window %d.", __func__, n));
1671 KASSERT((addr & 3) == 0,
1672 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1673
1674 if (is_t4(sc)) {
1675 pf = 0;
1676 start = addr & ~0xf; /* start must be 16B aligned */
1677 } else {
1678 pf = V_PFNUM(sc->pf);
1679 start = addr & ~0x7f; /* start must be 128B aligned */
1680 }
1681 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1682
1683 t4_write_reg(sc, reg, start | pf);
1684 t4_read_reg(sc, reg);
1685
1686 return (addr - start);
1687}
1688
1689static int
1690cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1691 struct intrs_and_queues *iaq)
1692{
1693 int rc, itype, navail, nrxq10g, nrxq1g, n;
1694 int nofldrxq10g = 0, nofldrxq1g = 0;
1695
1696 bzero(iaq, sizeof(*iaq));
1697
1698 iaq->ntxq10g = t4_ntxq10g;
1699 iaq->ntxq1g = t4_ntxq1g;
1700 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1701 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1702#ifdef TCP_OFFLOAD
1703 if (is_offload(sc)) {
1704 iaq->nofldtxq10g = t4_nofldtxq10g;
1705 iaq->nofldtxq1g = t4_nofldtxq1g;
1706 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1707 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1708 }
1709#endif
1710
1711 for (itype = INTR_MSIX; itype; itype >>= 1) {
1712
1713 if ((itype & t4_intr_types) == 0)
1714 continue; /* not allowed */
1715
1716 if (itype == INTR_MSIX)
1717 navail = pci_msix_count(sc->dev);
1718 else if (itype == INTR_MSI)
1719 navail = pci_msi_count(sc->dev);
1720 else
1721 navail = 1;
1722restart:
1723 if (navail == 0)
1724 continue;
1725
1726 iaq->intr_type = itype;
1727 iaq->intr_flags = 0;
1728
1729 /*
1730 * Best option: an interrupt vector for errors, one for the
1731 * firmware event queue, and one each for each rxq (NIC as well
1732 * as offload).
1733 */
1734 iaq->nirq = T4_EXTRA_INTR;
1735 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1736 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1737 if (iaq->nirq <= navail &&
1738 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1739 iaq->intr_flags |= INTR_DIRECT;
1740 goto allocate;
1741 }
1742
1743 /*
1744 * Second best option: an interrupt vector for errors, one for
1745 * the firmware event queue, and one each for either NIC or
1746 * offload rxq's.
1747 */
1748 iaq->nirq = T4_EXTRA_INTR;
1749 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1750 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1751 if (iaq->nirq <= navail &&
1752 (itype != INTR_MSI || powerof2(iaq->nirq)))
1753 goto allocate;
1754
1755 /*
1756 * Next best option: an interrupt vector for errors, one for the
1757 * firmware event queue, and at least one per port. At this
1758 * point we know we'll have to downsize nrxq or nofldrxq to fit
1759 * what's available to us.
1760 */
1761 iaq->nirq = T4_EXTRA_INTR;
1762 iaq->nirq += n10g + n1g;
1763 if (iaq->nirq <= navail) {
1764 int leftover = navail - iaq->nirq;
1765
1766 if (n10g > 0) {
1767 int target = max(nrxq10g, nofldrxq10g);
1768
1769 n = 1;
1770 while (n < target && leftover >= n10g) {
1771 leftover -= n10g;
1772 iaq->nirq += n10g;
1773 n++;
1774 }
1775 iaq->nrxq10g = min(n, nrxq10g);
1776#ifdef TCP_OFFLOAD
1777 if (is_offload(sc))
1778 iaq->nofldrxq10g = min(n, nofldrxq10g);
1779#endif
1780 }
1781
1782 if (n1g > 0) {
1783 int target = max(nrxq1g, nofldrxq1g);
1784
1785 n = 1;
1786 while (n < target && leftover >= n1g) {
1787 leftover -= n1g;
1788 iaq->nirq += n1g;
1789 n++;
1790 }
1791 iaq->nrxq1g = min(n, nrxq1g);
1792#ifdef TCP_OFFLOAD
1793 if (is_offload(sc))
1794 iaq->nofldrxq1g = min(n, nofldrxq1g);
1795#endif
1796 }
1797
1798 if (itype != INTR_MSI || powerof2(iaq->nirq))
1799 goto allocate;
1800 }
1801
1802 /*
1803 * Least desirable option: one interrupt vector for everything.
1804 */
1805 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1806#ifdef TCP_OFFLOAD
1807 if (is_offload(sc))
1808 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1809#endif
1810
1811allocate:
1812 navail = iaq->nirq;
1813 rc = 0;
1814 if (itype == INTR_MSIX)
1815 rc = pci_alloc_msix(sc->dev, &navail);
1816 else if (itype == INTR_MSI)
1817 rc = pci_alloc_msi(sc->dev, &navail);
1818
1819 if (rc == 0) {
1820 if (navail == iaq->nirq)
1821 return (0);
1822
1823 /*
1824 * Didn't get the number requested. Use whatever number
1825 * the kernel is willing to allocate (it's in navail).
1826 */
1827 device_printf(sc->dev, "fewer vectors than requested, "
1828 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1829 itype, iaq->nirq, navail);
1830 pci_release_msi(sc->dev);
1831 goto restart;
1832 }
1833
1834 device_printf(sc->dev,
1835 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1836 itype, rc, iaq->nirq, navail);
1837 }
1838
1839 device_printf(sc->dev,
1840 "failed to find a usable interrupt type. "
1841 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1842 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1843
1844 return (ENXIO);
1845}
1846
1847#define FW_VERSION(chip) ( \
1848 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1849 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1850 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1851 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1852#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1853
1854struct fw_info {
1855 uint8_t chip;
1856 char *kld_name;
1857 char *fw_mod_name;
1858 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
1859} fw_info[] = {
1860 {
1861 .chip = CHELSIO_T4,
1862 .kld_name = "t4fw_cfg",
1863 .fw_mod_name = "t4fw",
1864 .fw_hdr = {
1865 .chip = FW_HDR_CHIP_T4,
1866 .fw_ver = htobe32_const(FW_VERSION(T4)),
1867 .intfver_nic = FW_INTFVER(T4, NIC),
1868 .intfver_vnic = FW_INTFVER(T4, VNIC),
1869 .intfver_ofld = FW_INTFVER(T4, OFLD),
1870 .intfver_ri = FW_INTFVER(T4, RI),
1871 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1872 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1873 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1874 .intfver_fcoe = FW_INTFVER(T4, FCOE),
1875 },
1876 }, {
1877 .chip = CHELSIO_T5,
1878 .kld_name = "t5fw_cfg",
1879 .fw_mod_name = "t5fw",
1880 .fw_hdr = {
1881 .chip = FW_HDR_CHIP_T5,
1882 .fw_ver = htobe32_const(FW_VERSION(T5)),
1883 .intfver_nic = FW_INTFVER(T5, NIC),
1884 .intfver_vnic = FW_INTFVER(T5, VNIC),
1885 .intfver_ofld = FW_INTFVER(T5, OFLD),
1886 .intfver_ri = FW_INTFVER(T5, RI),
1887 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1888 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1889 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1890 .intfver_fcoe = FW_INTFVER(T5, FCOE),
1891 },
1892 }
1893};
1894
1895static struct fw_info *
1896find_fw_info(int chip)
1897{
1898 int i;
1899
1900 for (i = 0; i < nitems(fw_info); i++) {
1901 if (fw_info[i].chip == chip)
1902 return (&fw_info[i]);
1903 }
1904 return (NULL);
1905}
1906
1907/*
1908 * Is the given firmware API compatible with the one the driver was compiled
1909 * with?
1910 */
1911static int
1912fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1913{
1914
1915 /* short circuit if it's the exact same firmware version */
1916 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1917 return (1);
1918
1919 /*
1920 * XXX: Is this too conservative? Perhaps I should limit this to the
1921 * features that are supported in the driver.
1922 */
1923#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1924 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1925 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1926 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1927 return (1);
1928#undef SAME_INTF
1929
1930 return (0);
1931}
1932
1933/*
1934 * The firmware in the KLD is usable, but should it be installed? This routine
1935 * explains itself in detail if it indicates the KLD firmware should be
1936 * installed.
1937 */
1938static int
1939should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1940{
1941 const char *reason;
1942
1943 if (!card_fw_usable) {
1944 reason = "incompatible or unusable";
1945 goto install;
1946 }
1947
1948 if (k > c) {
1949 reason = "older than the version bundled with this driver";
1950 goto install;
1951 }
1952
1953 if (t4_fw_install == 2 && k != c) {
1954 reason = "different than the version bundled with this driver";
1955 goto install;
1956 }
1957
1958 return (0);
1959
1960install:
1961 if (t4_fw_install == 0) {
1962 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1963 "but the driver is prohibited from installing a different "
1964 "firmware on the card.\n",
1965 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1966 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1967
1968 return (0);
1969 }
1970
1971 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1972 "installing firmware %u.%u.%u.%u on card.\n",
1973 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1974 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1975 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1976 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1977
1978 return (1);
1979}
1980/*
1981 * Establish contact with the firmware and determine if we are the master driver
1982 * or not, and whether we are responsible for chip initialization.
1983 */
1984static int
1985prep_firmware(struct adapter *sc)
1986{
1987 const struct firmware *fw = NULL, *default_cfg;
1988 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1989 enum dev_state state;
1990 struct fw_info *fw_info;
1991 struct fw_hdr *card_fw; /* fw on the card */
1992 const struct fw_hdr *kld_fw; /* fw in the KLD */
1993 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
1994 against */
1995
1996 /* Contact firmware. */
1997 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1998 if (rc < 0 || state == DEV_STATE_ERR) {
1999 rc = -rc;
2000 device_printf(sc->dev,
2001 "failed to connect to the firmware: %d, %d.\n", rc, state);
2002 return (rc);
2003 }
2004 pf = rc;
2005 if (pf == sc->mbox)
2006 sc->flags |= MASTER_PF;
2007 else if (state == DEV_STATE_UNINIT) {
2008 /*
2009 * We didn't get to be the master so we definitely won't be
2010 * configuring the chip. It's a bug if someone else hasn't
2011 * configured it already.
2012 */
2013 device_printf(sc->dev, "couldn't be master(%d), "
2014 "device not already initialized either(%d).\n", rc, state);
2015 return (EDOOFUS);
2016 }
2017
2018 /* This is the firmware whose headers the driver was compiled against */
2019 fw_info = find_fw_info(chip_id(sc));
2020 if (fw_info == NULL) {
2021 device_printf(sc->dev,
2022 "unable to look up firmware information for chip %d.\n",
2023 chip_id(sc));
2024 return (EINVAL);
2025 }
2026 drv_fw = &fw_info->fw_hdr;
2027
2028 /*
2029 * The firmware KLD contains many modules. The KLD name is also the
2030 * name of the module that contains the default config file.
2031 */
2032 default_cfg = firmware_get(fw_info->kld_name);
2033
2034 /* Read the header of the firmware on the card */
2035 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2036 rc = -t4_read_flash(sc, FLASH_FW_START,
2037 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2038 if (rc == 0)
2039 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2040 else {
2041 device_printf(sc->dev,
2042 "Unable to read card's firmware header: %d\n", rc);
2043 card_fw_usable = 0;
2044 }
2045
2046 /* This is the firmware in the KLD */
2047 fw = firmware_get(fw_info->fw_mod_name);
2048 if (fw != NULL) {
2049 kld_fw = (const void *)fw->data;
2050 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2051 } else {
2052 kld_fw = NULL;
2053 kld_fw_usable = 0;
2054 }
2055
2056 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2057 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2058 /*
2059 * Common case: the firmware on the card is an exact match and
2060 * the KLD is an exact match too, or the KLD is
2061 * absent/incompatible. Note that t4_fw_install = 2 is ignored
2062 * here -- use cxgbetool loadfw if you want to reinstall the
2063 * same firmware as the one on the card.
2064 */
2065 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2066 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2067 be32toh(card_fw->fw_ver))) {
2068
2069 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2070 if (rc != 0) {
2071 device_printf(sc->dev,
2072 "failed to install firmware: %d\n", rc);
2073 goto done;
2074 }
2075
2076 /* Installed successfully, update the cached header too. */
2077 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2078 card_fw_usable = 1;
2079 need_fw_reset = 0; /* already reset as part of load_fw */
2080 }
2081
2082 if (!card_fw_usable) {
2083 uint32_t d, c, k;
2084
2085 d = ntohl(drv_fw->fw_ver);
2086 c = ntohl(card_fw->fw_ver);
2087 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2088
2089 device_printf(sc->dev, "Cannot find a usable firmware: "
2090 "fw_install %d, chip state %d, "
2091 "driver compiled with %d.%d.%d.%d, "
2092 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2093 t4_fw_install, state,
2094 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2095 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2096 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2097 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2098 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2099 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2100 rc = EINVAL;
2101 goto done;
2102 }
2103
2104 /* We're using whatever's on the card and it's known to be good. */
2105 sc->params.fw_vers = ntohl(card_fw->fw_ver);
2106 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2107 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2108 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2109 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2110 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2111 t4_get_tp_version(sc, &sc->params.tp_vers);
2112
2113 /* Reset device */
2114 if (need_fw_reset &&
2115 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2116 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2117 if (rc != ETIMEDOUT && rc != EIO)
2118 t4_fw_bye(sc, sc->mbox);
2119 goto done;
2120 }
2121 sc->flags |= FW_OK;
2122
2123 rc = get_params__pre_init(sc);
2124 if (rc != 0)
2125 goto done; /* error message displayed already */
2126
2127 /* Partition adapter resources as specified in the config file. */
2128 if (state == DEV_STATE_UNINIT) {
2129
2130 KASSERT(sc->flags & MASTER_PF,
2131 ("%s: trying to change chip settings when not master.",
2132 __func__));
2133
2134 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2135 if (rc != 0)
2136 goto done; /* error message displayed already */
2137
2138 t4_tweak_chip_settings(sc);
2139
2140 /* get basic stuff going */
2141 rc = -t4_fw_initialize(sc, sc->mbox);
2142 if (rc != 0) {
2143 device_printf(sc->dev, "fw init failed: %d.\n", rc);
2144 goto done;
2145 }
2146 } else {
2147 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2148 sc->cfcsum = 0;
2149 }
2150
2151done:
2152 free(card_fw, M_CXGBE);
2153 if (fw != NULL)
2154 firmware_put(fw, FIRMWARE_UNLOAD);
2155 if (default_cfg != NULL)
2156 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2157
2158 return (rc);
2159}
2160
2161#define FW_PARAM_DEV(param) \
2162 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2163 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2164#define FW_PARAM_PFVF(param) \
2165 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2166 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2167
2168/*
2169 * Partition chip resources for use between various PFs, VFs, etc.
2170 */
2171static int
2172partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2173 const char *name_prefix)
2174{
2175 const struct firmware *cfg = NULL;
2176 int rc = 0;
2177 struct fw_caps_config_cmd caps;
2178 uint32_t mtype, moff, finicsum, cfcsum;
2179
2180 /*
2181 * Figure out what configuration file to use. Pick the default config
2182 * file for the card if the user hasn't specified one explicitly.
2183 */
2184 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2185 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2186 /* Card specific overrides go here. */
2187 if (pci_get_device(sc->dev) == 0x440a)
2188 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2189 if (is_fpga(sc))
2190 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2191 }
2192
2193 /*
2194 * We need to load another module if the profile is anything except
2195 * "default" or "flash".
2196 */
2197 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2198 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2199 char s[32];
2200
2201 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2202 cfg = firmware_get(s);
2203 if (cfg == NULL) {
2204 if (default_cfg != NULL) {
2205 device_printf(sc->dev,
2206 "unable to load module \"%s\" for "
2207 "configuration profile \"%s\", will use "
2208 "the default config file instead.\n",
2209 s, sc->cfg_file);
2210 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2211 "%s", DEFAULT_CF);
2212 } else {
2213 device_printf(sc->dev,
2214 "unable to load module \"%s\" for "
2215 "configuration profile \"%s\", will use "
2216 "the config file on the card's flash "
2217 "instead.\n", s, sc->cfg_file);
2218 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2219 "%s", FLASH_CF);
2220 }
2221 }
2222 }
2223
2224 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2225 default_cfg == NULL) {
2226 device_printf(sc->dev,
2227 "default config file not available, will use the config "
2228 "file on the card's flash instead.\n");
2229 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2230 }
2231
2232 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2233 u_int cflen, i, n;
2234 const uint32_t *cfdata;
2235 uint32_t param, val, addr, off, mw_base, mw_aperture;
2236
2237 KASSERT(cfg != NULL || default_cfg != NULL,
2238 ("%s: no config to upload", __func__));
2239
2240 /*
2241 * Ask the firmware where it wants us to upload the config file.
2242 */
2243 param = FW_PARAM_DEV(CF);
2244 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2245 if (rc != 0) {
2246 /* No support for config file? Shouldn't happen. */
2247 device_printf(sc->dev,
2248 "failed to query config file location: %d.\n", rc);
2249 goto done;
2250 }
2251 mtype = G_FW_PARAMS_PARAM_Y(val);
2252 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2253
2254 /*
2255 * XXX: sheer laziness. We deliberately added 4 bytes of
2256 * useless stuffing/comments at the end of the config file so
2257 * it's ok to simply throw away the last remaining bytes when
2258 * the config file is not an exact multiple of 4. This also
2259 * helps with the validate_mt_off_len check.
2260 */
2261 if (cfg != NULL) {
2262 cflen = cfg->datasize & ~3;
2263 cfdata = cfg->data;
2264 } else {
2265 cflen = default_cfg->datasize & ~3;
2266 cfdata = default_cfg->data;
2267 }
2268
2269 if (cflen > FLASH_CFG_MAX_SIZE) {
2270 device_printf(sc->dev,
2271 "config file too long (%d, max allowed is %d). "
2272 "Will try to use the config on the card, if any.\n",
2273 cflen, FLASH_CFG_MAX_SIZE);
2274 goto use_config_on_flash;
2275 }
2276
2277 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2278 if (rc != 0) {
2279 device_printf(sc->dev,
2280 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
2281 "Will try to use the config on the card, if any.\n",
2282 __func__, mtype, moff, cflen, rc);
2283 goto use_config_on_flash;
2284 }
2285
2286 memwin_info(sc, 2, &mw_base, &mw_aperture);
2287 while (cflen) {
2288 off = position_memwin(sc, 2, addr);
2289 n = min(cflen, mw_aperture - off);
2290 for (i = 0; i < n; i += 4)
2291 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2292 cflen -= n;
2293 addr += n;
2294 }
2295 } else {
2296use_config_on_flash:
2297 mtype = FW_MEMTYPE_CF_FLASH;
2298 moff = t4_flash_cfg_addr(sc);
2299 }
2300
2301 bzero(&caps, sizeof(caps));
2302 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2303 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2304 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2305 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2306 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2307 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2308 if (rc != 0) {
2309 device_printf(sc->dev,
2310 "failed to pre-process config file: %d "
2311 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2312 goto done;
2313 }
2314
2315 finicsum = be32toh(caps.finicsum);
2316 cfcsum = be32toh(caps.cfcsum);
2317 if (finicsum != cfcsum) {
2318 device_printf(sc->dev,
2319 "WARNING: config file checksum mismatch: %08x %08x\n",
2320 finicsum, cfcsum);
2321 }
2322 sc->cfcsum = cfcsum;
2323
2324#define LIMIT_CAPS(x) do { \
2325 caps.x &= htobe16(t4_##x##_allowed); \
2326 sc->x = htobe16(caps.x); \
2327} while (0)
2328
2329 /*
2330 * Let the firmware know what features will (not) be used so it can tune
2331 * things accordingly.
2332 */
2333 LIMIT_CAPS(linkcaps);
2334 LIMIT_CAPS(niccaps);
2335 LIMIT_CAPS(toecaps);
2336 LIMIT_CAPS(rdmacaps);
2337 LIMIT_CAPS(iscsicaps);
2338 LIMIT_CAPS(fcoecaps);
2339#undef LIMIT_CAPS
2340
2341 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2342 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2343 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2344 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2345 if (rc != 0) {
2346 device_printf(sc->dev,
2347 "failed to process config file: %d.\n", rc);
2348 }
2349done:
2350 if (cfg != NULL)
2351 firmware_put(cfg, FIRMWARE_UNLOAD);
2352 return (rc);
2353}
2354
2355/*
2356 * Retrieve parameters that are needed (or nice to have) very early.
2357 */
2358static int
2359get_params__pre_init(struct adapter *sc)
2360{
2361 int rc;
2362 uint32_t param[2], val[2];
2363 struct fw_devlog_cmd cmd;
2364 struct devlog_params *dlog = &sc->params.devlog;
2365
2366 param[0] = FW_PARAM_DEV(PORTVEC);
2367 param[1] = FW_PARAM_DEV(CCLK);
2368 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2369 if (rc != 0) {
2370 device_printf(sc->dev,
2371 "failed to query parameters (pre_init): %d.\n", rc);
2372 return (rc);
2373 }
2374
2375 sc->params.portvec = val[0];
2376 sc->params.nports = bitcount32(val[0]);
2377 sc->params.vpd.cclk = val[1];
2378
2379 /* Read device log parameters. */
2380 bzero(&cmd, sizeof(cmd));
2381 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2382 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2383 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2384 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2385 if (rc != 0) {
2386 device_printf(sc->dev,
2387 "failed to get devlog parameters: %d.\n", rc);
2388 bzero(dlog, sizeof (*dlog));
2389 rc = 0; /* devlog isn't critical for device operation */
2390 } else {
2391 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2392 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2393 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2394 dlog->size = be32toh(cmd.memsize_devlog);
2395 }
2396
2397 return (rc);
2398}
2399
2400/*
2401 * Retrieve various parameters that are of interest to the driver. The device
2402 * has been initialized by the firmware at this point.
2403 */
2404static int
2405get_params__post_init(struct adapter *sc)
2406{
2407 int rc;
2408 uint32_t param[7], val[7];
2409 struct fw_caps_config_cmd caps;
2410
2411 param[0] = FW_PARAM_PFVF(IQFLINT_START);
2412 param[1] = FW_PARAM_PFVF(EQ_START);
2413 param[2] = FW_PARAM_PFVF(FILTER_START);
2414 param[3] = FW_PARAM_PFVF(FILTER_END);
2415 param[4] = FW_PARAM_PFVF(L2T_START);
2416 param[5] = FW_PARAM_PFVF(L2T_END);
2417 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2418 if (rc != 0) {
2419 device_printf(sc->dev,
2420 "failed to query parameters (post_init): %d.\n", rc);
2421 return (rc);
2422 }
2423
2424 sc->sge.iq_start = val[0];
2425 sc->sge.eq_start = val[1];
2426 sc->tids.ftid_base = val[2];
2427 sc->tids.nftids = val[3] - val[2] + 1;
2428 sc->vres.l2t.start = val[4];
2429 sc->vres.l2t.size = val[5] - val[4] + 1;
2430 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2431 ("%s: L2 table size (%u) larger than expected (%u)",
2432 __func__, sc->vres.l2t.size, L2T_SIZE));
2433
2434 /* get capabilites */
2435 bzero(&caps, sizeof(caps));
2436 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2437 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2438 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2439 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2440 if (rc != 0) {
2441 device_printf(sc->dev,
2442 "failed to get card capabilities: %d.\n", rc);
2443 return (rc);
2444 }
2445
2446 if (caps.toecaps) {
2447 /* query offload-related parameters */
2448 param[0] = FW_PARAM_DEV(NTID);
2449 param[1] = FW_PARAM_PFVF(SERVER_START);
2450 param[2] = FW_PARAM_PFVF(SERVER_END);
2451 param[3] = FW_PARAM_PFVF(TDDP_START);
2452 param[4] = FW_PARAM_PFVF(TDDP_END);
2453 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2454 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2455 if (rc != 0) {
2456 device_printf(sc->dev,
2457 "failed to query TOE parameters: %d.\n", rc);
2458 return (rc);
2459 }
2460 sc->tids.ntids = val[0];
2461 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2462 sc->tids.stid_base = val[1];
2463 sc->tids.nstids = val[2] - val[1] + 1;
2464 sc->vres.ddp.start = val[3];
2465 sc->vres.ddp.size = val[4] - val[3] + 1;
2466 sc->params.ofldq_wr_cred = val[5];
2467 sc->params.offload = 1;
2468 }
2469 if (caps.rdmacaps) {
2470 param[0] = FW_PARAM_PFVF(STAG_START);
2471 param[1] = FW_PARAM_PFVF(STAG_END);
2472 param[2] = FW_PARAM_PFVF(RQ_START);
2473 param[3] = FW_PARAM_PFVF(RQ_END);
2474 param[4] = FW_PARAM_PFVF(PBL_START);
2475 param[5] = FW_PARAM_PFVF(PBL_END);
2476 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2477 if (rc != 0) {
2478 device_printf(sc->dev,
2479 "failed to query RDMA parameters(1): %d.\n", rc);
2480 return (rc);
2481 }
2482 sc->vres.stag.start = val[0];
2483 sc->vres.stag.size = val[1] - val[0] + 1;
2484 sc->vres.rq.start = val[2];
2485 sc->vres.rq.size = val[3] - val[2] + 1;
2486 sc->vres.pbl.start = val[4];
2487 sc->vres.pbl.size = val[5] - val[4] + 1;
2488
2489 param[0] = FW_PARAM_PFVF(SQRQ_START);
2490 param[1] = FW_PARAM_PFVF(SQRQ_END);
2491 param[2] = FW_PARAM_PFVF(CQ_START);
2492 param[3] = FW_PARAM_PFVF(CQ_END);
2493 param[4] = FW_PARAM_PFVF(OCQ_START);
2494 param[5] = FW_PARAM_PFVF(OCQ_END);
2495 rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
2496 if (rc != 0) {
2497 device_printf(sc->dev,
2498 "failed to query RDMA parameters(2): %d.\n", rc);
2499 return (rc);
2500 }
2501 sc->vres.qp.start = val[0];
2502 sc->vres.qp.size = val[1] - val[0] + 1;
2503 sc->vres.cq.start = val[2];
2504 sc->vres.cq.size = val[3] - val[2] + 1;
2505 sc->vres.ocq.start = val[4];
2506 sc->vres.ocq.size = val[5] - val[4] + 1;
2507 }
2508 if (caps.iscsicaps) {
2509 param[0] = FW_PARAM_PFVF(ISCSI_START);
2510 param[1] = FW_PARAM_PFVF(ISCSI_END);
2511 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2512 if (rc != 0) {
2513 device_printf(sc->dev,
2514 "failed to query iSCSI parameters: %d.\n", rc);
2515 return (rc);
2516 }
2517 sc->vres.iscsi.start = val[0];
2518 sc->vres.iscsi.size = val[1] - val[0] + 1;
2519 }
2520
2521 /*
2522 * We've got the params we wanted to query via the firmware. Now grab
2523 * some others directly from the chip.
2524 */
2525 rc = t4_read_chip_settings(sc);
2526
2527 return (rc);
2528}
2529
2530static int
2531set_params__post_init(struct adapter *sc)
2532{
2533 uint32_t param, val;
2534
2535 /* ask for encapsulated CPLs */
2536 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2537 val = 1;
2538 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2539
2540 return (0);
2541}
2542
2543#undef FW_PARAM_PFVF
2544#undef FW_PARAM_DEV
2545
2546static void
2547t4_set_desc(struct adapter *sc)
2548{
2549 char buf[128];
2550 struct adapter_params *p = &sc->params;
2551
2552 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
2553 p->vpd.id, is_offload(sc) ? "R" : "", chip_rev(sc), p->vpd.sn,
2554 p->vpd.ec);
2552 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2553 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2554 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2555
2556 device_set_desc_copy(sc->dev, buf);
2557}
2558
2559static void
2560build_medialist(struct port_info *pi)
2561{
2562 struct ifmedia *media = &pi->media;
2563 int data, m;
2564
2565 PORT_LOCK(pi);
2566
2567 ifmedia_removeall(media);
2568
2569 m = IFM_ETHER | IFM_FDX;
2570 data = (pi->port_type << 8) | pi->mod_type;
2571
2572 switch(pi->port_type) {
2573 case FW_PORT_TYPE_BT_XFI:
2574 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2575 break;
2576
2577 case FW_PORT_TYPE_BT_XAUI:
2578 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2579 /* fall through */
2580
2581 case FW_PORT_TYPE_BT_SGMII:
2582 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2583 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2584 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2585 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2586 break;
2587
2588 case FW_PORT_TYPE_CX4:
2589 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2590 ifmedia_set(media, m | IFM_10G_CX4);
2591 break;
2592
2593 case FW_PORT_TYPE_SFP:
2594 case FW_PORT_TYPE_FIBER_XFI:
2595 case FW_PORT_TYPE_FIBER_XAUI:
2596 switch (pi->mod_type) {
2597
2598 case FW_PORT_MOD_TYPE_LR:
2599 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2600 ifmedia_set(media, m | IFM_10G_LR);
2601 break;
2602
2603 case FW_PORT_MOD_TYPE_SR:
2604 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2605 ifmedia_set(media, m | IFM_10G_SR);
2606 break;
2607
2608 case FW_PORT_MOD_TYPE_LRM:
2609 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2610 ifmedia_set(media, m | IFM_10G_LRM);
2611 break;
2612
2613 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2614 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2615 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2616 ifmedia_set(media, m | IFM_10G_TWINAX);
2617 break;
2618
2619 case FW_PORT_MOD_TYPE_NONE:
2620 m &= ~IFM_FDX;
2621 ifmedia_add(media, m | IFM_NONE, data, NULL);
2622 ifmedia_set(media, m | IFM_NONE);
2623 break;
2624
2625 case FW_PORT_MOD_TYPE_NA:
2626 case FW_PORT_MOD_TYPE_ER:
2627 default:
2628 device_printf(pi->dev,
2629 "unknown port_type (%d), mod_type (%d)\n",
2630 pi->port_type, pi->mod_type);
2631 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2632 ifmedia_set(media, m | IFM_UNKNOWN);
2633 break;
2634 }
2635 break;
2636
2637 case FW_PORT_TYPE_QSFP:
2638 switch (pi->mod_type) {
2639
2640 case FW_PORT_MOD_TYPE_LR:
2641 ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2642 ifmedia_set(media, m | IFM_40G_LR4);
2643 break;
2644
2645 case FW_PORT_MOD_TYPE_SR:
2646 ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2647 ifmedia_set(media, m | IFM_40G_SR4);
2648 break;
2649
2650 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2651 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2652 ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2653 ifmedia_set(media, m | IFM_40G_CR4);
2654 break;
2655
2656 case FW_PORT_MOD_TYPE_NONE:
2657 m &= ~IFM_FDX;
2658 ifmedia_add(media, m | IFM_NONE, data, NULL);
2659 ifmedia_set(media, m | IFM_NONE);
2660 break;
2661
2662 default:
2663 device_printf(pi->dev,
2664 "unknown port_type (%d), mod_type (%d)\n",
2665 pi->port_type, pi->mod_type);
2666 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2667 ifmedia_set(media, m | IFM_UNKNOWN);
2668 break;
2669 }
2670 break;
2671
2672 default:
2673 device_printf(pi->dev,
2674 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2675 pi->mod_type);
2676 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2677 ifmedia_set(media, m | IFM_UNKNOWN);
2678 break;
2679 }
2680
2681 PORT_UNLOCK(pi);
2682}
2683
2684#define FW_MAC_EXACT_CHUNK 7
2685
2686/*
2687 * Program the port's XGMAC based on parameters in ifnet. The caller also
2688 * indicates which parameters should be programmed (the rest are left alone).
2689 */
2690static int
2691update_mac_settings(struct port_info *pi, int flags)
2692{
2693 int rc;
2694 struct ifnet *ifp = pi->ifp;
2695 struct adapter *sc = pi->adapter;
2696 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2697
2698 ASSERT_SYNCHRONIZED_OP(sc);
2699 KASSERT(flags, ("%s: not told what to update.", __func__));
2700
2701 if (flags & XGMAC_MTU)
2702 mtu = ifp->if_mtu;
2703
2704 if (flags & XGMAC_PROMISC)
2705 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2706
2707 if (flags & XGMAC_ALLMULTI)
2708 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2709
2710 if (flags & XGMAC_VLANEX)
2711 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2712
2713 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2714 vlanex, false);
2715 if (rc) {
2716 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2717 return (rc);
2718 }
2719
2720 if (flags & XGMAC_UCADDR) {
2721 uint8_t ucaddr[ETHER_ADDR_LEN];
2722
2723 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2724 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2725 ucaddr, true, true);
2726 if (rc < 0) {
2727 rc = -rc;
2728 if_printf(ifp, "change_mac failed: %d\n", rc);
2729 return (rc);
2730 } else {
2731 pi->xact_addr_filt = rc;
2732 rc = 0;
2733 }
2734 }
2735
2736 if (flags & XGMAC_MCADDRS) {
2737 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2738 int del = 1;
2739 uint64_t hash = 0;
2740 struct ifmultiaddr *ifma;
2741 int i = 0, j;
2742
2743 if_maddr_rlock(ifp);
2744 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2745 if (ifma->ifma_addr->sa_family != AF_LINK)
2746 continue;
2747 mcaddr[i++] =
2748 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2749
2750 if (i == FW_MAC_EXACT_CHUNK) {
2751 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2752 del, i, mcaddr, NULL, &hash, 0);
2753 if (rc < 0) {
2754 rc = -rc;
2755 for (j = 0; j < i; j++) {
2756 if_printf(ifp,
2757 "failed to add mc address"
2758 " %02x:%02x:%02x:"
2759 "%02x:%02x:%02x rc=%d\n",
2760 mcaddr[j][0], mcaddr[j][1],
2761 mcaddr[j][2], mcaddr[j][3],
2762 mcaddr[j][4], mcaddr[j][5],
2763 rc);
2764 }
2765 goto mcfail;
2766 }
2767 del = 0;
2768 i = 0;
2769 }
2770 }
2771 if (i > 0) {
2772 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2773 del, i, mcaddr, NULL, &hash, 0);
2774 if (rc < 0) {
2775 rc = -rc;
2776 for (j = 0; j < i; j++) {
2777 if_printf(ifp,
2778 "failed to add mc address"
2779 " %02x:%02x:%02x:"
2780 "%02x:%02x:%02x rc=%d\n",
2781 mcaddr[j][0], mcaddr[j][1],
2782 mcaddr[j][2], mcaddr[j][3],
2783 mcaddr[j][4], mcaddr[j][5],
2784 rc);
2785 }
2786 goto mcfail;
2787 }
2788 }
2789
2790 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2791 if (rc != 0)
2792 if_printf(ifp, "failed to set mc address hash: %d", rc);
2793mcfail:
2794 if_maddr_runlock(ifp);
2795 }
2796
2797 return (rc);
2798}
2799
2800int
2801begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2802 char *wmesg)
2803{
2804 int rc, pri;
2805
2806#ifdef WITNESS
2807 /* the caller thinks it's ok to sleep, but is it really? */
2808 if (flags & SLEEP_OK)
2809 pause("t4slptst", 1);
2810#endif
2811
2812 if (INTR_OK)
2813 pri = PCATCH;
2814 else
2815 pri = 0;
2816
2817 ADAPTER_LOCK(sc);
2818 for (;;) {
2819
2820 if (pi && IS_DOOMED(pi)) {
2821 rc = ENXIO;
2822 goto done;
2823 }
2824
2825 if (!IS_BUSY(sc)) {
2826 rc = 0;
2827 break;
2828 }
2829
2830 if (!(flags & SLEEP_OK)) {
2831 rc = EBUSY;
2832 goto done;
2833 }
2834
2835 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2836 rc = EINTR;
2837 goto done;
2838 }
2839 }
2840
2841 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2842 SET_BUSY(sc);
2843#ifdef INVARIANTS
2844 sc->last_op = wmesg;
2845 sc->last_op_thr = curthread;
2846#endif
2847
2848done:
2849 if (!(flags & HOLD_LOCK) || rc)
2850 ADAPTER_UNLOCK(sc);
2851
2852 return (rc);
2853}
2854
2855void
2856end_synchronized_op(struct adapter *sc, int flags)
2857{
2858
2859 if (flags & LOCK_HELD)
2860 ADAPTER_LOCK_ASSERT_OWNED(sc);
2861 else
2862 ADAPTER_LOCK(sc);
2863
2864 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2865 CLR_BUSY(sc);
2866 wakeup(&sc->flags);
2867 ADAPTER_UNLOCK(sc);
2868}
2869
2870static int
2871cxgbe_init_synchronized(struct port_info *pi)
2872{
2873 struct adapter *sc = pi->adapter;
2874 struct ifnet *ifp = pi->ifp;
2875 int rc = 0;
2876
2877 ASSERT_SYNCHRONIZED_OP(sc);
2878
2879 if (isset(&sc->open_device_map, pi->port_id)) {
2880 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2881 ("mismatch between open_device_map and if_drv_flags"));
2882 return (0); /* already running */
2883 }
2884
2885 if (!(sc->flags & FULL_INIT_DONE) &&
2886 ((rc = adapter_full_init(sc)) != 0))
2887 return (rc); /* error message displayed already */
2888
2889 if (!(pi->flags & PORT_INIT_DONE) &&
2890 ((rc = port_full_init(pi)) != 0))
2891 return (rc); /* error message displayed already */
2892
2893 rc = update_mac_settings(pi, XGMAC_ALL);
2894 if (rc)
2895 goto done; /* error message displayed already */
2896
2897 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2898 if (rc != 0) {
2899 if_printf(ifp, "start_link failed: %d\n", rc);
2900 goto done;
2901 }
2902
2903 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2904 if (rc != 0) {
2905 if_printf(ifp, "enable_vi failed: %d\n", rc);
2906 goto done;
2907 }
2908
2909 /*
2910 * The first iq of the first port to come up is used for tracing.
2911 */
2912 if (sc->traceq < 0) {
2913 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2914 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
2915 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2916 V_QUEUENUMBER(sc->traceq));
2917 pi->flags |= HAS_TRACEQ;
2918 }
2919
2920 /* all ok */
2921 setbit(&sc->open_device_map, pi->port_id);
2922 PORT_LOCK(pi);
2923 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2924 PORT_UNLOCK(pi);
2925
2926 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2927done:
2928 if (rc != 0)
2929 cxgbe_uninit_synchronized(pi);
2930
2931 return (rc);
2932}
2933
2934/*
2935 * Idempotent.
2936 */
2937static int
2938cxgbe_uninit_synchronized(struct port_info *pi)
2939{
2940 struct adapter *sc = pi->adapter;
2941 struct ifnet *ifp = pi->ifp;
2942 int rc;
2943
2944 ASSERT_SYNCHRONIZED_OP(sc);
2945
2946 /*
2947 * Disable the VI so that all its data in either direction is discarded
2948 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2949 * tick) intact as the TP can deliver negative advice or data that it's
2950 * holding in its RAM (for an offloaded connection) even after the VI is
2951 * disabled.
2952 */
2953 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2954 if (rc) {
2955 if_printf(ifp, "disable_vi failed: %d\n", rc);
2956 return (rc);
2957 }
2958
2959 clrbit(&sc->open_device_map, pi->port_id);
2960 PORT_LOCK(pi);
2961 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2962 PORT_UNLOCK(pi);
2963
2964 pi->link_cfg.link_ok = 0;
2965 pi->link_cfg.speed = 0;
2966 pi->linkdnrc = -1;
2967 t4_os_link_changed(sc, pi->port_id, 0, -1);
2968
2969 return (0);
2970}
2971
2972/*
2973 * It is ok for this function to fail midway and return right away. t4_detach
2974 * will walk the entire sc->irq list and clean up whatever is valid.
2975 */
2976static int
2977setup_intr_handlers(struct adapter *sc)
2978{
2979 int rc, rid, p, q;
2980 char s[8];
2981 struct irq *irq;
2982 struct port_info *pi;
2983 struct sge_rxq *rxq;
2984#ifdef TCP_OFFLOAD
2985 struct sge_ofld_rxq *ofld_rxq;
2986#endif
2987
2988 /*
2989 * Setup interrupts.
2990 */
2991 irq = &sc->irq[0];
2992 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2993 if (sc->intr_count == 1) {
2994 KASSERT(!(sc->flags & INTR_DIRECT),
2995 ("%s: single interrupt && INTR_DIRECT?", __func__));
2996
2997 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2998 if (rc != 0)
2999 return (rc);
3000 } else {
3001 /* Multiple interrupts. */
3002 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3003 ("%s: too few intr.", __func__));
3004
3005 /* The first one is always error intr */
3006 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3007 if (rc != 0)
3008 return (rc);
3009 irq++;
3010 rid++;
3011
3012 /* The second one is always the firmware event queue */
3013 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3014 "evt");
3015 if (rc != 0)
3016 return (rc);
3017 irq++;
3018 rid++;
3019
3020 /*
3021 * Note that if INTR_DIRECT is not set then either the NIC rx
3022 * queues or (exclusive or) the TOE rx queueus will be taking
3023 * direct interrupts.
3024 *
3025 * There is no need to check for is_offload(sc) as nofldrxq
3026 * will be 0 if offload is disabled.
3027 */
3028 for_each_port(sc, p) {
3029 pi = sc->port[p];
3030
3031#ifdef TCP_OFFLOAD
3032 /*
3033 * Skip over the NIC queues if they aren't taking direct
3034 * interrupts.
3035 */
3036 if (!(sc->flags & INTR_DIRECT) &&
3037 pi->nofldrxq > pi->nrxq)
3038 goto ofld_queues;
3039#endif
3040 rxq = &sc->sge.rxq[pi->first_rxq];
3041 for (q = 0; q < pi->nrxq; q++, rxq++) {
3042 snprintf(s, sizeof(s), "%d.%d", p, q);
3043 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3044 s);
3045 if (rc != 0)
3046 return (rc);
3047 irq++;
3048 rid++;
3049 }
3050
3051#ifdef TCP_OFFLOAD
3052 /*
3053 * Skip over the offload queues if they aren't taking
3054 * direct interrupts.
3055 */
3056 if (!(sc->flags & INTR_DIRECT))
3057 continue;
3058ofld_queues:
3059 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3060 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3061 snprintf(s, sizeof(s), "%d,%d", p, q);
3062 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3063 ofld_rxq, s);
3064 if (rc != 0)
3065 return (rc);
3066 irq++;
3067 rid++;
3068 }
3069#endif
3070 }
3071 }
3072
3073 return (0);
3074}
3075
3076static int
3077adapter_full_init(struct adapter *sc)
3078{
3079 int rc, i;
3080
3081 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3082 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3083 ("%s: FULL_INIT_DONE already", __func__));
3084
3085 /*
3086 * queues that belong to the adapter (not any particular port).
3087 */
3088 rc = t4_setup_adapter_queues(sc);
3089 if (rc != 0)
3090 goto done;
3091
3092 for (i = 0; i < nitems(sc->tq); i++) {
3093 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3094 taskqueue_thread_enqueue, &sc->tq[i]);
3095 if (sc->tq[i] == NULL) {
3096 device_printf(sc->dev,
3097 "failed to allocate task queue %d\n", i);
3098 rc = ENOMEM;
3099 goto done;
3100 }
3101 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3102 device_get_nameunit(sc->dev), i);
3103 }
3104
3105 t4_intr_enable(sc);
3106 sc->flags |= FULL_INIT_DONE;
3107done:
3108 if (rc != 0)
3109 adapter_full_uninit(sc);
3110
3111 return (rc);
3112}
3113
3114static int
3115adapter_full_uninit(struct adapter *sc)
3116{
3117 int i;
3118
3119 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3120
3121 t4_teardown_adapter_queues(sc);
3122
3123 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3124 taskqueue_free(sc->tq[i]);
3125 sc->tq[i] = NULL;
3126 }
3127
3128 sc->flags &= ~FULL_INIT_DONE;
3129
3130 return (0);
3131}
3132
3133static int
3134port_full_init(struct port_info *pi)
3135{
3136 struct adapter *sc = pi->adapter;
3137 struct ifnet *ifp = pi->ifp;
3138 uint16_t *rss;
3139 struct sge_rxq *rxq;
3140 int rc, i;
3141
3142 ASSERT_SYNCHRONIZED_OP(sc);
3143 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3144 ("%s: PORT_INIT_DONE already", __func__));
3145
3146 sysctl_ctx_init(&pi->ctx);
3147 pi->flags |= PORT_SYSCTL_CTX;
3148
3149 /*
3150 * Allocate tx/rx/fl queues for this port.
3151 */
3152 rc = t4_setup_port_queues(pi);
3153 if (rc != 0)
3154 goto done; /* error message displayed already */
3155
3156 /*
3157 * Setup RSS for this port.
3158 */
3159 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3160 M_ZERO | M_WAITOK);
3161 for_each_rxq(pi, i, rxq) {
3162 rss[i] = rxq->iq.abs_id;
3163 }
3164 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3165 pi->rss_size, rss, pi->nrxq);
3166 free(rss, M_CXGBE);
3167 if (rc != 0) {
3168 if_printf(ifp, "rss_config failed: %d\n", rc);
3169 goto done;
3170 }
3171
3172 pi->flags |= PORT_INIT_DONE;
3173done:
3174 if (rc != 0)
3175 port_full_uninit(pi);
3176
3177 return (rc);
3178}
3179
3180/*
3181 * Idempotent.
3182 */
3183static int
3184port_full_uninit(struct port_info *pi)
3185{
3186 struct adapter *sc = pi->adapter;
3187 int i;
3188 struct sge_rxq *rxq;
3189 struct sge_txq *txq;
3190#ifdef TCP_OFFLOAD
3191 struct sge_ofld_rxq *ofld_rxq;
3192 struct sge_wrq *ofld_txq;
3193#endif
3194
3195 if (pi->flags & PORT_INIT_DONE) {
3196
3197 /* Need to quiesce queues. XXX: ctrl queues? */
3198
3199 for_each_txq(pi, i, txq) {
3200 quiesce_eq(sc, &txq->eq);
3201 }
3202
3203#ifdef TCP_OFFLOAD
3204 for_each_ofld_txq(pi, i, ofld_txq) {
3205 quiesce_eq(sc, &ofld_txq->eq);
3206 }
3207#endif
3208
3209 for_each_rxq(pi, i, rxq) {
3210 quiesce_iq(sc, &rxq->iq);
3211 quiesce_fl(sc, &rxq->fl);
3212 }
3213
3214#ifdef TCP_OFFLOAD
3215 for_each_ofld_rxq(pi, i, ofld_rxq) {
3216 quiesce_iq(sc, &ofld_rxq->iq);
3217 quiesce_fl(sc, &ofld_rxq->fl);
3218 }
3219#endif
3220 }
3221
3222 t4_teardown_port_queues(pi);
3223 pi->flags &= ~PORT_INIT_DONE;
3224
3225 return (0);
3226}
3227
3228static void
3229quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3230{
3231 EQ_LOCK(eq);
3232 eq->flags |= EQ_DOOMED;
3233
3234 /*
3235 * Wait for the response to a credit flush if one's
3236 * pending.
3237 */
3238 while (eq->flags & EQ_CRFLUSHED)
3239 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3240 EQ_UNLOCK(eq);
3241
3242 callout_drain(&eq->tx_callout); /* XXX: iffy */
3243 pause("callout", 10); /* Still iffy */
3244
3245 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3246}
3247
3248static void
3249quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3250{
3251 (void) sc; /* unused */
3252
3253 /* Synchronize with the interrupt handler */
3254 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3255 pause("iqfree", 1);
3256}
3257
3258static void
3259quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3260{
3261 mtx_lock(&sc->sfl_lock);
3262 FL_LOCK(fl);
3263 fl->flags |= FL_DOOMED;
3264 FL_UNLOCK(fl);
3265 mtx_unlock(&sc->sfl_lock);
3266
3267 callout_drain(&sc->sfl_callout);
3268 KASSERT((fl->flags & FL_STARVING) == 0,
3269 ("%s: still starving", __func__));
3270}
3271
3272static int
3273t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3274 driver_intr_t *handler, void *arg, char *name)
3275{
3276 int rc;
3277
3278 irq->rid = rid;
3279 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3280 RF_SHAREABLE | RF_ACTIVE);
3281 if (irq->res == NULL) {
3282 device_printf(sc->dev,
3283 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3284 return (ENOMEM);
3285 }
3286
3287 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3288 NULL, handler, arg, &irq->tag);
3289 if (rc != 0) {
3290 device_printf(sc->dev,
3291 "failed to setup interrupt for rid %d, name %s: %d\n",
3292 rid, name, rc);
3293 } else if (name)
3294 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3295
3296 return (rc);
3297}
3298
3299static int
3300t4_free_irq(struct adapter *sc, struct irq *irq)
3301{
3302 if (irq->tag)
3303 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3304 if (irq->res)
3305 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3306
3307 bzero(irq, sizeof(*irq));
3308
3309 return (0);
3310}
3311
3312static void
3313reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3314 unsigned int end)
3315{
3316 uint32_t *p = (uint32_t *)(buf + start);
3317
3318 for ( ; start <= end; start += sizeof(uint32_t))
3319 *p++ = t4_read_reg(sc, start);
3320}
3321
3322static void
3323t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3324{
3325 int i, n;
3326 const unsigned int *reg_ranges;
3327 static const unsigned int t4_reg_ranges[] = {
3328 0x1008, 0x1108,
3329 0x1180, 0x11b4,
3330 0x11fc, 0x123c,
3331 0x1300, 0x173c,
3332 0x1800, 0x18fc,
3333 0x3000, 0x30d8,
3334 0x30e0, 0x5924,
3335 0x5960, 0x59d4,
3336 0x5a00, 0x5af8,
3337 0x6000, 0x6098,
3338 0x6100, 0x6150,
3339 0x6200, 0x6208,
3340 0x6240, 0x6248,
3341 0x6280, 0x6338,
3342 0x6370, 0x638c,
3343 0x6400, 0x643c,
3344 0x6500, 0x6524,
3345 0x6a00, 0x6a38,
3346 0x6a60, 0x6a78,
3347 0x6b00, 0x6b84,
3348 0x6bf0, 0x6c84,
3349 0x6cf0, 0x6d84,
3350 0x6df0, 0x6e84,
3351 0x6ef0, 0x6f84,
3352 0x6ff0, 0x7084,
3353 0x70f0, 0x7184,
3354 0x71f0, 0x7284,
3355 0x72f0, 0x7384,
3356 0x73f0, 0x7450,
3357 0x7500, 0x7530,
3358 0x7600, 0x761c,
3359 0x7680, 0x76cc,
3360 0x7700, 0x7798,
3361 0x77c0, 0x77fc,
3362 0x7900, 0x79fc,
3363 0x7b00, 0x7c38,
3364 0x7d00, 0x7efc,
3365 0x8dc0, 0x8e1c,
3366 0x8e30, 0x8e78,
3367 0x8ea0, 0x8f6c,
3368 0x8fc0, 0x9074,
3369 0x90fc, 0x90fc,
3370 0x9400, 0x9458,
3371 0x9600, 0x96bc,
3372 0x9800, 0x9808,
3373 0x9820, 0x983c,
3374 0x9850, 0x9864,
3375 0x9c00, 0x9c6c,
3376 0x9c80, 0x9cec,
3377 0x9d00, 0x9d6c,
3378 0x9d80, 0x9dec,
3379 0x9e00, 0x9e6c,
3380 0x9e80, 0x9eec,
3381 0x9f00, 0x9f6c,
3382 0x9f80, 0x9fec,
3383 0xd004, 0xd03c,
3384 0xdfc0, 0xdfe0,
3385 0xe000, 0xea7c,
3386 0xf000, 0x11190,
3387 0x19040, 0x1906c,
3388 0x19078, 0x19080,
3389 0x1908c, 0x19124,
3390 0x19150, 0x191b0,
3391 0x191d0, 0x191e8,
3392 0x19238, 0x1924c,
3393 0x193f8, 0x19474,
3394 0x19490, 0x194f8,
3395 0x19800, 0x19f30,
3396 0x1a000, 0x1a06c,
3397 0x1a0b0, 0x1a120,
3398 0x1a128, 0x1a138,
3399 0x1a190, 0x1a1c4,
3400 0x1a1fc, 0x1a1fc,
3401 0x1e040, 0x1e04c,
3402 0x1e284, 0x1e28c,
3403 0x1e2c0, 0x1e2c0,
3404 0x1e2e0, 0x1e2e0,
3405 0x1e300, 0x1e384,
3406 0x1e3c0, 0x1e3c8,
3407 0x1e440, 0x1e44c,
3408 0x1e684, 0x1e68c,
3409 0x1e6c0, 0x1e6c0,
3410 0x1e6e0, 0x1e6e0,
3411 0x1e700, 0x1e784,
3412 0x1e7c0, 0x1e7c8,
3413 0x1e840, 0x1e84c,
3414 0x1ea84, 0x1ea8c,
3415 0x1eac0, 0x1eac0,
3416 0x1eae0, 0x1eae0,
3417 0x1eb00, 0x1eb84,
3418 0x1ebc0, 0x1ebc8,
3419 0x1ec40, 0x1ec4c,
3420 0x1ee84, 0x1ee8c,
3421 0x1eec0, 0x1eec0,
3422 0x1eee0, 0x1eee0,
3423 0x1ef00, 0x1ef84,
3424 0x1efc0, 0x1efc8,
3425 0x1f040, 0x1f04c,
3426 0x1f284, 0x1f28c,
3427 0x1f2c0, 0x1f2c0,
3428 0x1f2e0, 0x1f2e0,
3429 0x1f300, 0x1f384,
3430 0x1f3c0, 0x1f3c8,
3431 0x1f440, 0x1f44c,
3432 0x1f684, 0x1f68c,
3433 0x1f6c0, 0x1f6c0,
3434 0x1f6e0, 0x1f6e0,
3435 0x1f700, 0x1f784,
3436 0x1f7c0, 0x1f7c8,
3437 0x1f840, 0x1f84c,
3438 0x1fa84, 0x1fa8c,
3439 0x1fac0, 0x1fac0,
3440 0x1fae0, 0x1fae0,
3441 0x1fb00, 0x1fb84,
3442 0x1fbc0, 0x1fbc8,
3443 0x1fc40, 0x1fc4c,
3444 0x1fe84, 0x1fe8c,
3445 0x1fec0, 0x1fec0,
3446 0x1fee0, 0x1fee0,
3447 0x1ff00, 0x1ff84,
3448 0x1ffc0, 0x1ffc8,
3449 0x20000, 0x2002c,
3450 0x20100, 0x2013c,
3451 0x20190, 0x201c8,
3452 0x20200, 0x20318,
3453 0x20400, 0x20528,
3454 0x20540, 0x20614,
3455 0x21000, 0x21040,
3456 0x2104c, 0x21060,
3457 0x210c0, 0x210ec,
3458 0x21200, 0x21268,
3459 0x21270, 0x21284,
3460 0x212fc, 0x21388,
3461 0x21400, 0x21404,
3462 0x21500, 0x21518,
3463 0x2152c, 0x2153c,
3464 0x21550, 0x21554,
3465 0x21600, 0x21600,
3466 0x21608, 0x21628,
3467 0x21630, 0x2163c,
3468 0x21700, 0x2171c,
3469 0x21780, 0x2178c,
3470 0x21800, 0x21c38,
3471 0x21c80, 0x21d7c,
3472 0x21e00, 0x21e04,
3473 0x22000, 0x2202c,
3474 0x22100, 0x2213c,
3475 0x22190, 0x221c8,
3476 0x22200, 0x22318,
3477 0x22400, 0x22528,
3478 0x22540, 0x22614,
3479 0x23000, 0x23040,
3480 0x2304c, 0x23060,
3481 0x230c0, 0x230ec,
3482 0x23200, 0x23268,
3483 0x23270, 0x23284,
3484 0x232fc, 0x23388,
3485 0x23400, 0x23404,
3486 0x23500, 0x23518,
3487 0x2352c, 0x2353c,
3488 0x23550, 0x23554,
3489 0x23600, 0x23600,
3490 0x23608, 0x23628,
3491 0x23630, 0x2363c,
3492 0x23700, 0x2371c,
3493 0x23780, 0x2378c,
3494 0x23800, 0x23c38,
3495 0x23c80, 0x23d7c,
3496 0x23e00, 0x23e04,
3497 0x24000, 0x2402c,
3498 0x24100, 0x2413c,
3499 0x24190, 0x241c8,
3500 0x24200, 0x24318,
3501 0x24400, 0x24528,
3502 0x24540, 0x24614,
3503 0x25000, 0x25040,
3504 0x2504c, 0x25060,
3505 0x250c0, 0x250ec,
3506 0x25200, 0x25268,
3507 0x25270, 0x25284,
3508 0x252fc, 0x25388,
3509 0x25400, 0x25404,
3510 0x25500, 0x25518,
3511 0x2552c, 0x2553c,
3512 0x25550, 0x25554,
3513 0x25600, 0x25600,
3514 0x25608, 0x25628,
3515 0x25630, 0x2563c,
3516 0x25700, 0x2571c,
3517 0x25780, 0x2578c,
3518 0x25800, 0x25c38,
3519 0x25c80, 0x25d7c,
3520 0x25e00, 0x25e04,
3521 0x26000, 0x2602c,
3522 0x26100, 0x2613c,
3523 0x26190, 0x261c8,
3524 0x26200, 0x26318,
3525 0x26400, 0x26528,
3526 0x26540, 0x26614,
3527 0x27000, 0x27040,
3528 0x2704c, 0x27060,
3529 0x270c0, 0x270ec,
3530 0x27200, 0x27268,
3531 0x27270, 0x27284,
3532 0x272fc, 0x27388,
3533 0x27400, 0x27404,
3534 0x27500, 0x27518,
3535 0x2752c, 0x2753c,
3536 0x27550, 0x27554,
3537 0x27600, 0x27600,
3538 0x27608, 0x27628,
3539 0x27630, 0x2763c,
3540 0x27700, 0x2771c,
3541 0x27780, 0x2778c,
3542 0x27800, 0x27c38,
3543 0x27c80, 0x27d7c,
3544 0x27e00, 0x27e04
3545 };
3546 static const unsigned int t5_reg_ranges[] = {
3547 0x1008, 0x1148,
3548 0x1180, 0x11b4,
3549 0x11fc, 0x123c,
3550 0x1280, 0x173c,
3551 0x1800, 0x18fc,
3552 0x3000, 0x3028,
3553 0x3060, 0x30d8,
3554 0x30e0, 0x30fc,
3555 0x3140, 0x357c,
3556 0x35a8, 0x35cc,
3557 0x35ec, 0x35ec,
3558 0x3600, 0x5624,
3559 0x56cc, 0x575c,
3560 0x580c, 0x5814,
3561 0x5890, 0x58bc,
3562 0x5940, 0x59dc,
3563 0x59fc, 0x5a18,
3564 0x5a60, 0x5a9c,
3565 0x5b94, 0x5bfc,
3566 0x6000, 0x6040,
3567 0x6058, 0x614c,
3568 0x7700, 0x7798,
3569 0x77c0, 0x78fc,
3570 0x7b00, 0x7c54,
3571 0x7d00, 0x7efc,
3572 0x8dc0, 0x8de0,
3573 0x8df8, 0x8e84,
3574 0x8ea0, 0x8f84,
3575 0x8fc0, 0x90f8,
3576 0x9400, 0x9470,
3577 0x9600, 0x96f4,
3578 0x9800, 0x9808,
3579 0x9820, 0x983c,
3580 0x9850, 0x9864,
3581 0x9c00, 0x9c6c,
3582 0x9c80, 0x9cec,
3583 0x9d00, 0x9d6c,
3584 0x9d80, 0x9dec,
3585 0x9e00, 0x9e6c,
3586 0x9e80, 0x9eec,
3587 0x9f00, 0x9f6c,
3588 0x9f80, 0xa020,
3589 0xd004, 0xd03c,
3590 0xdfc0, 0xdfe0,
3591 0xe000, 0x11088,
3592 0x1109c, 0x1117c,
3593 0x11190, 0x11204,
3594 0x19040, 0x1906c,
3595 0x19078, 0x19080,
3596 0x1908c, 0x19124,
3597 0x19150, 0x191b0,
3598 0x191d0, 0x191e8,
3599 0x19238, 0x19290,
3600 0x193f8, 0x19474,
3601 0x19490, 0x194cc,
3602 0x194f0, 0x194f8,
3603 0x19c00, 0x19c60,
3604 0x19c94, 0x19e10,
3605 0x19e50, 0x19f34,
3606 0x19f40, 0x19f50,
3607 0x19f90, 0x19fe4,
3608 0x1a000, 0x1a06c,
3609 0x1a0b0, 0x1a120,
3610 0x1a128, 0x1a138,
3611 0x1a190, 0x1a1c4,
3612 0x1a1fc, 0x1a1fc,
3613 0x1e008, 0x1e00c,
3614 0x1e040, 0x1e04c,
3615 0x1e284, 0x1e290,
3616 0x1e2c0, 0x1e2c0,
3617 0x1e2e0, 0x1e2e0,
3618 0x1e300, 0x1e384,
3619 0x1e3c0, 0x1e3c8,
3620 0x1e408, 0x1e40c,
3621 0x1e440, 0x1e44c,
3622 0x1e684, 0x1e690,
3623 0x1e6c0, 0x1e6c0,
3624 0x1e6e0, 0x1e6e0,
3625 0x1e700, 0x1e784,
3626 0x1e7c0, 0x1e7c8,
3627 0x1e808, 0x1e80c,
3628 0x1e840, 0x1e84c,
3629 0x1ea84, 0x1ea90,
3630 0x1eac0, 0x1eac0,
3631 0x1eae0, 0x1eae0,
3632 0x1eb00, 0x1eb84,
3633 0x1ebc0, 0x1ebc8,
3634 0x1ec08, 0x1ec0c,
3635 0x1ec40, 0x1ec4c,
3636 0x1ee84, 0x1ee90,
3637 0x1eec0, 0x1eec0,
3638 0x1eee0, 0x1eee0,
3639 0x1ef00, 0x1ef84,
3640 0x1efc0, 0x1efc8,
3641 0x1f008, 0x1f00c,
3642 0x1f040, 0x1f04c,
3643 0x1f284, 0x1f290,
3644 0x1f2c0, 0x1f2c0,
3645 0x1f2e0, 0x1f2e0,
3646 0x1f300, 0x1f384,
3647 0x1f3c0, 0x1f3c8,
3648 0x1f408, 0x1f40c,
3649 0x1f440, 0x1f44c,
3650 0x1f684, 0x1f690,
3651 0x1f6c0, 0x1f6c0,
3652 0x1f6e0, 0x1f6e0,
3653 0x1f700, 0x1f784,
3654 0x1f7c0, 0x1f7c8,
3655 0x1f808, 0x1f80c,
3656 0x1f840, 0x1f84c,
3657 0x1fa84, 0x1fa90,
3658 0x1fac0, 0x1fac0,
3659 0x1fae0, 0x1fae0,
3660 0x1fb00, 0x1fb84,
3661 0x1fbc0, 0x1fbc8,
3662 0x1fc08, 0x1fc0c,
3663 0x1fc40, 0x1fc4c,
3664 0x1fe84, 0x1fe90,
3665 0x1fec0, 0x1fec0,
3666 0x1fee0, 0x1fee0,
3667 0x1ff00, 0x1ff84,
3668 0x1ffc0, 0x1ffc8,
3669 0x30000, 0x30030,
3670 0x30100, 0x30144,
3671 0x30190, 0x301d0,
3672 0x30200, 0x30318,
3673 0x30400, 0x3052c,
3674 0x30540, 0x3061c,
3675 0x30800, 0x30834,
3676 0x308c0, 0x30908,
3677 0x30910, 0x309ac,
3678 0x30a00, 0x30a2c,
3679 0x30a44, 0x30a50,
3680 0x30a74, 0x30c24,
3681 0x30d00, 0x30d00,
3682 0x30d08, 0x30d14,
3683 0x30d1c, 0x30d20,
3684 0x30d3c, 0x30d50,
3685 0x31200, 0x3120c,
3686 0x31220, 0x31220,
3687 0x31240, 0x31240,
3688 0x31600, 0x3160c,
3689 0x31a00, 0x31a1c,
3690 0x31e00, 0x31e20,
3691 0x31e38, 0x31e3c,
3692 0x31e80, 0x31e80,
3693 0x31e88, 0x31ea8,
3694 0x31eb0, 0x31eb4,
3695 0x31ec8, 0x31ed4,
3696 0x31fb8, 0x32004,
3697 0x32200, 0x32200,
3698 0x32208, 0x32240,
3699 0x32248, 0x32280,
3700 0x32288, 0x322c0,
3701 0x322c8, 0x322fc,
3702 0x32600, 0x32630,
3703 0x32a00, 0x32abc,
3704 0x32b00, 0x32b70,
3705 0x33000, 0x33048,
3706 0x33060, 0x3309c,
3707 0x330f0, 0x33148,
3708 0x33160, 0x3319c,
3709 0x331f0, 0x332e4,
3710 0x332f8, 0x333e4,
3711 0x333f8, 0x33448,
3712 0x33460, 0x3349c,
3713 0x334f0, 0x33548,
3714 0x33560, 0x3359c,
3715 0x335f0, 0x336e4,
3716 0x336f8, 0x337e4,
3717 0x337f8, 0x337fc,
3718 0x33814, 0x33814,
3719 0x3382c, 0x3382c,
3720 0x33880, 0x3388c,
3721 0x338e8, 0x338ec,
3722 0x33900, 0x33948,
3723 0x33960, 0x3399c,
3724 0x339f0, 0x33ae4,
3725 0x33af8, 0x33b10,
3726 0x33b28, 0x33b28,
3727 0x33b3c, 0x33b50,
3728 0x33bf0, 0x33c10,
3729 0x33c28, 0x33c28,
3730 0x33c3c, 0x33c50,
3731 0x33cf0, 0x33cfc,
3732 0x34000, 0x34030,
3733 0x34100, 0x34144,
3734 0x34190, 0x341d0,
3735 0x34200, 0x34318,
3736 0x34400, 0x3452c,
3737 0x34540, 0x3461c,
3738 0x34800, 0x34834,
3739 0x348c0, 0x34908,
3740 0x34910, 0x349ac,
3741 0x34a00, 0x34a2c,
3742 0x34a44, 0x34a50,
3743 0x34a74, 0x34c24,
3744 0x34d00, 0x34d00,
3745 0x34d08, 0x34d14,
3746 0x34d1c, 0x34d20,
3747 0x34d3c, 0x34d50,
3748 0x35200, 0x3520c,
3749 0x35220, 0x35220,
3750 0x35240, 0x35240,
3751 0x35600, 0x3560c,
3752 0x35a00, 0x35a1c,
3753 0x35e00, 0x35e20,
3754 0x35e38, 0x35e3c,
3755 0x35e80, 0x35e80,
3756 0x35e88, 0x35ea8,
3757 0x35eb0, 0x35eb4,
3758 0x35ec8, 0x35ed4,
3759 0x35fb8, 0x36004,
3760 0x36200, 0x36200,
3761 0x36208, 0x36240,
3762 0x36248, 0x36280,
3763 0x36288, 0x362c0,
3764 0x362c8, 0x362fc,
3765 0x36600, 0x36630,
3766 0x36a00, 0x36abc,
3767 0x36b00, 0x36b70,
3768 0x37000, 0x37048,
3769 0x37060, 0x3709c,
3770 0x370f0, 0x37148,
3771 0x37160, 0x3719c,
3772 0x371f0, 0x372e4,
3773 0x372f8, 0x373e4,
3774 0x373f8, 0x37448,
3775 0x37460, 0x3749c,
3776 0x374f0, 0x37548,
3777 0x37560, 0x3759c,
3778 0x375f0, 0x376e4,
3779 0x376f8, 0x377e4,
3780 0x377f8, 0x377fc,
3781 0x37814, 0x37814,
3782 0x3782c, 0x3782c,
3783 0x37880, 0x3788c,
3784 0x378e8, 0x378ec,
3785 0x37900, 0x37948,
3786 0x37960, 0x3799c,
3787 0x379f0, 0x37ae4,
3788 0x37af8, 0x37b10,
3789 0x37b28, 0x37b28,
3790 0x37b3c, 0x37b50,
3791 0x37bf0, 0x37c10,
3792 0x37c28, 0x37c28,
3793 0x37c3c, 0x37c50,
3794 0x37cf0, 0x37cfc,
3795 0x38000, 0x38030,
3796 0x38100, 0x38144,
3797 0x38190, 0x381d0,
3798 0x38200, 0x38318,
3799 0x38400, 0x3852c,
3800 0x38540, 0x3861c,
3801 0x38800, 0x38834,
3802 0x388c0, 0x38908,
3803 0x38910, 0x389ac,
3804 0x38a00, 0x38a2c,
3805 0x38a44, 0x38a50,
3806 0x38a74, 0x38c24,
3807 0x38d00, 0x38d00,
3808 0x38d08, 0x38d14,
3809 0x38d1c, 0x38d20,
3810 0x38d3c, 0x38d50,
3811 0x39200, 0x3920c,
3812 0x39220, 0x39220,
3813 0x39240, 0x39240,
3814 0x39600, 0x3960c,
3815 0x39a00, 0x39a1c,
3816 0x39e00, 0x39e20,
3817 0x39e38, 0x39e3c,
3818 0x39e80, 0x39e80,
3819 0x39e88, 0x39ea8,
3820 0x39eb0, 0x39eb4,
3821 0x39ec8, 0x39ed4,
3822 0x39fb8, 0x3a004,
3823 0x3a200, 0x3a200,
3824 0x3a208, 0x3a240,
3825 0x3a248, 0x3a280,
3826 0x3a288, 0x3a2c0,
3827 0x3a2c8, 0x3a2fc,
3828 0x3a600, 0x3a630,
3829 0x3aa00, 0x3aabc,
3830 0x3ab00, 0x3ab70,
3831 0x3b000, 0x3b048,
3832 0x3b060, 0x3b09c,
3833 0x3b0f0, 0x3b148,
3834 0x3b160, 0x3b19c,
3835 0x3b1f0, 0x3b2e4,
3836 0x3b2f8, 0x3b3e4,
3837 0x3b3f8, 0x3b448,
3838 0x3b460, 0x3b49c,
3839 0x3b4f0, 0x3b548,
3840 0x3b560, 0x3b59c,
3841 0x3b5f0, 0x3b6e4,
3842 0x3b6f8, 0x3b7e4,
3843 0x3b7f8, 0x3b7fc,
3844 0x3b814, 0x3b814,
3845 0x3b82c, 0x3b82c,
3846 0x3b880, 0x3b88c,
3847 0x3b8e8, 0x3b8ec,
3848 0x3b900, 0x3b948,
3849 0x3b960, 0x3b99c,
3850 0x3b9f0, 0x3bae4,
3851 0x3baf8, 0x3bb10,
3852 0x3bb28, 0x3bb28,
3853 0x3bb3c, 0x3bb50,
3854 0x3bbf0, 0x3bc10,
3855 0x3bc28, 0x3bc28,
3856 0x3bc3c, 0x3bc50,
3857 0x3bcf0, 0x3bcfc,
3858 0x3c000, 0x3c030,
3859 0x3c100, 0x3c144,
3860 0x3c190, 0x3c1d0,
3861 0x3c200, 0x3c318,
3862 0x3c400, 0x3c52c,
3863 0x3c540, 0x3c61c,
3864 0x3c800, 0x3c834,
3865 0x3c8c0, 0x3c908,
3866 0x3c910, 0x3c9ac,
3867 0x3ca00, 0x3ca2c,
3868 0x3ca44, 0x3ca50,
3869 0x3ca74, 0x3cc24,
3870 0x3cd00, 0x3cd00,
3871 0x3cd08, 0x3cd14,
3872 0x3cd1c, 0x3cd20,
3873 0x3cd3c, 0x3cd50,
3874 0x3d200, 0x3d20c,
3875 0x3d220, 0x3d220,
3876 0x3d240, 0x3d240,
3877 0x3d600, 0x3d60c,
3878 0x3da00, 0x3da1c,
3879 0x3de00, 0x3de20,
3880 0x3de38, 0x3de3c,
3881 0x3de80, 0x3de80,
3882 0x3de88, 0x3dea8,
3883 0x3deb0, 0x3deb4,
3884 0x3dec8, 0x3ded4,
3885 0x3dfb8, 0x3e004,
3886 0x3e200, 0x3e200,
3887 0x3e208, 0x3e240,
3888 0x3e248, 0x3e280,
3889 0x3e288, 0x3e2c0,
3890 0x3e2c8, 0x3e2fc,
3891 0x3e600, 0x3e630,
3892 0x3ea00, 0x3eabc,
3893 0x3eb00, 0x3eb70,
3894 0x3f000, 0x3f048,
3895 0x3f060, 0x3f09c,
3896 0x3f0f0, 0x3f148,
3897 0x3f160, 0x3f19c,
3898 0x3f1f0, 0x3f2e4,
3899 0x3f2f8, 0x3f3e4,
3900 0x3f3f8, 0x3f448,
3901 0x3f460, 0x3f49c,
3902 0x3f4f0, 0x3f548,
3903 0x3f560, 0x3f59c,
3904 0x3f5f0, 0x3f6e4,
3905 0x3f6f8, 0x3f7e4,
3906 0x3f7f8, 0x3f7fc,
3907 0x3f814, 0x3f814,
3908 0x3f82c, 0x3f82c,
3909 0x3f880, 0x3f88c,
3910 0x3f8e8, 0x3f8ec,
3911 0x3f900, 0x3f948,
3912 0x3f960, 0x3f99c,
3913 0x3f9f0, 0x3fae4,
3914 0x3faf8, 0x3fb10,
3915 0x3fb28, 0x3fb28,
3916 0x3fb3c, 0x3fb50,
3917 0x3fbf0, 0x3fc10,
3918 0x3fc28, 0x3fc28,
3919 0x3fc3c, 0x3fc50,
3920 0x3fcf0, 0x3fcfc,
3921 0x40000, 0x4000c,
3922 0x40040, 0x40068,
3923 0x4007c, 0x40144,
3924 0x40180, 0x4018c,
3925 0x40200, 0x40298,
3926 0x402ac, 0x4033c,
3927 0x403f8, 0x403fc,
3928 0x41304, 0x413c4,
3929 0x41400, 0x4141c,
3930 0x41480, 0x414d0,
3931 0x44000, 0x44078,
3932 0x440c0, 0x44278,
3933 0x442c0, 0x44478,
3934 0x444c0, 0x44678,
3935 0x446c0, 0x44878,
3936 0x448c0, 0x449fc,
3937 0x45000, 0x45068,
3938 0x45080, 0x45084,
3939 0x450a0, 0x450b0,
3940 0x45200, 0x45268,
3941 0x45280, 0x45284,
3942 0x452a0, 0x452b0,
3943 0x460c0, 0x460e4,
3944 0x47000, 0x4708c,
3945 0x47200, 0x47250,
3946 0x47400, 0x47420,
3947 0x47600, 0x47618,
3948 0x47800, 0x47814,
3949 0x48000, 0x4800c,
3950 0x48040, 0x48068,
3951 0x4807c, 0x48144,
3952 0x48180, 0x4818c,
3953 0x48200, 0x48298,
3954 0x482ac, 0x4833c,
3955 0x483f8, 0x483fc,
3956 0x49304, 0x493c4,
3957 0x49400, 0x4941c,
3958 0x49480, 0x494d0,
3959 0x4c000, 0x4c078,
3960 0x4c0c0, 0x4c278,
3961 0x4c2c0, 0x4c478,
3962 0x4c4c0, 0x4c678,
3963 0x4c6c0, 0x4c878,
3964 0x4c8c0, 0x4c9fc,
3965 0x4d000, 0x4d068,
3966 0x4d080, 0x4d084,
3967 0x4d0a0, 0x4d0b0,
3968 0x4d200, 0x4d268,
3969 0x4d280, 0x4d284,
3970 0x4d2a0, 0x4d2b0,
3971 0x4e0c0, 0x4e0e4,
3972 0x4f000, 0x4f08c,
3973 0x4f200, 0x4f250,
3974 0x4f400, 0x4f420,
3975 0x4f600, 0x4f618,
3976 0x4f800, 0x4f814,
3977 0x50000, 0x500cc,
3978 0x50400, 0x50400,
3979 0x50800, 0x508cc,
3980 0x50c00, 0x50c00,
3981 0x51000, 0x5101c,
3982 0x51300, 0x51308,
3983 };
3984
3985 if (is_t4(sc)) {
3986 reg_ranges = &t4_reg_ranges[0];
3987 n = nitems(t4_reg_ranges);
3988 } else {
3989 reg_ranges = &t5_reg_ranges[0];
3990 n = nitems(t5_reg_ranges);
3991 }
3992
3993 regs->version = chip_id(sc) | chip_rev(sc) << 10;
3994 for (i = 0; i < n; i += 2)
3995 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3996}
3997
3998static void
3999cxgbe_tick(void *arg)
4000{
4001 struct port_info *pi = arg;
4002 struct ifnet *ifp = pi->ifp;
4003 struct sge_txq *txq;
4004 int i, drops;
4005 struct port_stats *s = &pi->stats;
4006
4007 PORT_LOCK(pi);
4008 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4009 PORT_UNLOCK(pi);
4010 return; /* without scheduling another callout */
4011 }
4012
4013 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4014
4015 ifp->if_opackets = s->tx_frames - s->tx_pause;
4016 ifp->if_ipackets = s->rx_frames - s->rx_pause;
4017 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4018 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4019 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4020 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4021 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4022 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4023 s->rx_trunc3;
4024
4025 drops = s->tx_drop;
4026 for_each_txq(pi, i, txq)
4027 drops += txq->br->br_drops;
4028 ifp->if_snd.ifq_drops = drops;
4029
4030 ifp->if_oerrors = s->tx_error_frames;
4031 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4032 s->rx_fcs_err + s->rx_len_err;
4033
4034 callout_schedule(&pi->tick, hz);
4035 PORT_UNLOCK(pi);
4036}
4037
4038static void
4039cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4040{
4041 struct ifnet *vlan;
4042
4043 if (arg != ifp || ifp->if_type != IFT_ETHER)
4044 return;
4045
4046 vlan = VLAN_DEVAT(ifp, vid);
4047 VLAN_SETCOOKIE(vlan, ifp);
4048}
4049
4050static int
4051cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4052{
4053
4054#ifdef INVARIANTS
4055 panic("%s: opcode 0x%02x on iq %p with payload %p",
4056 __func__, rss->opcode, iq, m);
4057#else
4058 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4059 __func__, rss->opcode, iq, m);
4060 m_freem(m);
4061#endif
4062 return (EDOOFUS);
4063}
4064
4065int
4066t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4067{
4068 uintptr_t *loc, new;
4069
4070 if (opcode >= nitems(sc->cpl_handler))
4071 return (EINVAL);
4072
4073 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4074 loc = (uintptr_t *) &sc->cpl_handler[opcode];
4075 atomic_store_rel_ptr(loc, new);
4076
4077 return (0);
4078}
4079
4080static int
4081an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4082{
4083
4084#ifdef INVARIANTS
4085 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4086#else
4087 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4088 __func__, iq, ctrl);
4089#endif
4090 return (EDOOFUS);
4091}
4092
4093int
4094t4_register_an_handler(struct adapter *sc, an_handler_t h)
4095{
4096 uintptr_t *loc, new;
4097
4098 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4099 loc = (uintptr_t *) &sc->an_handler;
4100 atomic_store_rel_ptr(loc, new);
4101
4102 return (0);
4103}
4104
4105static int
4106fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4107{
4108 const struct cpl_fw6_msg *cpl =
4109 __containerof(rpl, struct cpl_fw6_msg, data[0]);
4110
4111#ifdef INVARIANTS
4112 panic("%s: fw_msg type %d", __func__, cpl->type);
4113#else
4114 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4115#endif
4116 return (EDOOFUS);
4117}
4118
4119int
4120t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4121{
4122 uintptr_t *loc, new;
4123
4124 if (type >= nitems(sc->fw_msg_handler))
4125 return (EINVAL);
4126
4127 /*
4128 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4129 * handler dispatch table. Reject any attempt to install a handler for
4130 * this subtype.
4131 */
4132 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4133 return (EINVAL);
4134
4135 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4136 loc = (uintptr_t *) &sc->fw_msg_handler[type];
4137 atomic_store_rel_ptr(loc, new);
4138
4139 return (0);
4140}
4141
4142static int
4143t4_sysctls(struct adapter *sc)
4144{
4145 struct sysctl_ctx_list *ctx;
4146 struct sysctl_oid *oid;
4147 struct sysctl_oid_list *children, *c0;
4148 static char *caps[] = {
4149 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
4150 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
4151 "\20\1TOE", /* caps[2] toecaps */
4152 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
4153 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
4154 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4155 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4156 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
4157 };
4158 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4159
4160 ctx = device_get_sysctl_ctx(sc->dev);
4161
4162 /*
4163 * dev.t4nex.X.
4164 */
4165 oid = device_get_sysctl_tree(sc->dev);
4166 c0 = children = SYSCTL_CHILDREN(oid);
4167
4168 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4169 sc->params.nports, "# of ports");
4170
4171 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4172 NULL, chip_rev(sc), "chip hardware revision");
4173
4174 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4175 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4176
4177 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4178 CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4179
4180 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4181 sc->cfcsum, "config file checksum");
4182
4183 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4184 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4185 sysctl_bitfield, "A", "available doorbells");
4186
4187 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4188 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4189 sysctl_bitfield, "A", "available link capabilities");
4190
4191 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4192 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4193 sysctl_bitfield, "A", "available NIC capabilities");
4194
4195 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4196 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4197 sysctl_bitfield, "A", "available TCP offload capabilities");
4198
4199 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4200 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4201 sysctl_bitfield, "A", "available RDMA capabilities");
4202
4203 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4204 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4205 sysctl_bitfield, "A", "available iSCSI capabilities");
4206
4207 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4208 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4209 sysctl_bitfield, "A", "available FCoE capabilities");
4210
4211 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4212 sc->params.vpd.cclk, "core clock frequency (in KHz)");
4213
4214 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4215 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4216 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4217 "interrupt holdoff timer values (us)");
4218
4219 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4220 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4221 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4222 "interrupt holdoff packet counter values");
4223
4224 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4225 NULL, sc->tids.nftids, "number of filters");
4226
4227 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4228 CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4229 "chip temperature (in Celsius)");
4230
4231 t4_sge_sysctls(sc, ctx, children);
4232
4233#ifdef SBUF_DRAIN
4234 /*
4235 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
4236 */
4237 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4238 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4239 "logs and miscellaneous information");
4240 children = SYSCTL_CHILDREN(oid);
4241
4242 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4243 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4244 sysctl_cctrl, "A", "congestion control");
4245
4246 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4247 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4248 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4249
4250 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4251 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4252 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4253
4254 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4255 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4256 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4257
4258 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4259 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4260 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4261
4262 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4263 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4264 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4265
4266 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4267 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4268 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4269
4270 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4271 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4272 sysctl_cim_la, "A", "CIM logic analyzer");
4273
4274 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4275 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4276 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4277
4278 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4279 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4280 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4281
4282 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4283 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4284 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4285
4286 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4287 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4288 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4289
4290 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4291 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4292 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4293
4294 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4295 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4296 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4297
4298 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4299 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4300 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4301
4302 if (is_t5(sc)) {
4303 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4304 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4305 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4306
4307 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4308 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4309 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4310 }
4311
4312 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4313 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4314 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4315
4316 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4317 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4318 sysctl_cim_qcfg, "A", "CIM queue configuration");
4319
4320 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4321 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4322 sysctl_cpl_stats, "A", "CPL statistics");
4323
4324 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4325 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4326 sysctl_ddp_stats, "A", "DDP statistics");
4327
4328 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4329 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4330 sysctl_devlog, "A", "firmware's device log");
4331
4332 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4333 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4334 sysctl_fcoe_stats, "A", "FCoE statistics");
4335
4336 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4337 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4338 sysctl_hw_sched, "A", "hardware scheduler ");
4339
4340 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4341 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4342 sysctl_l2t, "A", "hardware L2 table");
4343
4344 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4345 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4346 sysctl_lb_stats, "A", "loopback statistics");
4347
4348 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4349 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4350 sysctl_meminfo, "A", "memory regions");
4351
4352 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4353 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4354 sysctl_mps_tcam, "A", "MPS TCAM entries");
4355
4356 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4357 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4358 sysctl_path_mtus, "A", "path MTUs");
4359
4360 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4361 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4362 sysctl_pm_stats, "A", "PM statistics");
4363
4364 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4365 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4366 sysctl_rdma_stats, "A", "RDMA statistics");
4367
4368 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4369 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4370 sysctl_tcp_stats, "A", "TCP statistics");
4371
4372 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4373 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4374 sysctl_tids, "A", "TID information");
4375
4376 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4377 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4378 sysctl_tp_err_stats, "A", "TP error statistics");
4379
4380 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4381 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4382 sysctl_tp_la, "A", "TP logic analyzer");
4383
4384 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4385 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4386 sysctl_tx_rate, "A", "Tx rate");
4387
4388 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4389 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4390 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4391
4392 if (is_t5(sc)) {
4393 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4394 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4395 sysctl_wcwr_stats, "A", "write combined work requests");
4396 }
4397#endif
4398
4399#ifdef TCP_OFFLOAD
4400 if (is_offload(sc)) {
4401 /*
4402 * dev.t4nex.X.toe.
4403 */
4404 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4405 NULL, "TOE parameters");
4406 children = SYSCTL_CHILDREN(oid);
4407
4408 sc->tt.sndbuf = 256 * 1024;
4409 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4410 &sc->tt.sndbuf, 0, "max hardware send buffer size");
4411
4412 sc->tt.ddp = 0;
4413 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4414 &sc->tt.ddp, 0, "DDP allowed");
4415
4416 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4417 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4418 &sc->tt.indsz, 0, "DDP max indicate size allowed");
4419
4420 sc->tt.ddp_thres =
4421 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4422 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4423 &sc->tt.ddp_thres, 0, "DDP threshold");
4424
4425 sc->tt.rx_coalesce = 1;
4426 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4427 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4428 }
4429#endif
4430
4431
4432 return (0);
4433}
4434
4435static int
4436cxgbe_sysctls(struct port_info *pi)
4437{
4438 struct sysctl_ctx_list *ctx;
4439 struct sysctl_oid *oid;
4440 struct sysctl_oid_list *children;
4441
4442 ctx = device_get_sysctl_ctx(pi->dev);
4443
4444 /*
4445 * dev.cxgbe.X.
4446 */
4447 oid = device_get_sysctl_tree(pi->dev);
4448 children = SYSCTL_CHILDREN(oid);
4449
4450 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4451 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4452 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4453 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4454 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4455 "PHY temperature (in Celsius)");
4456 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4457 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4458 "PHY firmware version");
4459 }
4460 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4461 &pi->nrxq, 0, "# of rx queues");
4462 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4463 &pi->ntxq, 0, "# of tx queues");
4464 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4465 &pi->first_rxq, 0, "index of first rx queue");
4466 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4467 &pi->first_txq, 0, "index of first tx queue");
4468
4469#ifdef TCP_OFFLOAD
4470 if (is_offload(pi->adapter)) {
4471 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4472 &pi->nofldrxq, 0,
4473 "# of rx queues for offloaded TCP connections");
4474 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4475 &pi->nofldtxq, 0,
4476 "# of tx queues for offloaded TCP connections");
4477 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4478 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4479 "index of first TOE rx queue");
4480 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4481 CTLFLAG_RD, &pi->first_ofld_txq, 0,
4482 "index of first TOE tx queue");
4483 }
4484#endif
4485
4486 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4487 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4488 "holdoff timer index");
4489 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4490 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4491 "holdoff packet counter index");
4492
4493 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4494 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4495 "rx queue size");
4496 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4497 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4498 "tx queue size");
4499
4500 /*
4501 * dev.cxgbe.X.stats.
4502 */
4503 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4504 NULL, "port statistics");
4505 children = SYSCTL_CHILDREN(oid);
4506
4507#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4508 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4509 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4510 sysctl_handle_t4_reg64, "QU", desc)
4511
4512 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4513 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4514 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4515 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4516 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4517 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4518 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4519 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4520 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4521 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4522 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4523 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4524 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4525 "# of tx frames in this range",
4526 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4527 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4528 "# of tx frames in this range",
4529 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4530 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4531 "# of tx frames in this range",
4532 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4533 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4534 "# of tx frames in this range",
4535 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4536 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4537 "# of tx frames in this range",
4538 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4539 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4540 "# of tx frames in this range",
4541 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4542 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4543 "# of tx frames in this range",
4544 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4545 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4546 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4547 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4548 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4549 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4550 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4551 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4552 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4553 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4554 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4555 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4556 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4557 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4558 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4559 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4560 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4561 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4562 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4563 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4564 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4565
4566 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4567 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4568 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4569 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4570 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4571 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4572 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4573 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4574 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4575 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4576 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4577 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4578 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4579 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4580 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4581 "# of frames received with bad FCS",
4582 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4583 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4584 "# of frames received with length error",
4585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4586 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4588 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4590 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4591 "# of rx frames in this range",
4592 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4593 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4594 "# of rx frames in this range",
4595 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4596 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4597 "# of rx frames in this range",
4598 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4599 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4600 "# of rx frames in this range",
4601 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4602 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4603 "# of rx frames in this range",
4604 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4605 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4606 "# of rx frames in this range",
4607 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4608 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4609 "# of rx frames in this range",
4610 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4611 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4612 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4613 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4614 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4615 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4616 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4617 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4618 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4619 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4620 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4621 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4622 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4623 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4624 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4625 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4626 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4627 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4628 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4629
4630#undef SYSCTL_ADD_T4_REG64
4631
4632#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4633 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4634 &pi->stats.name, desc)
4635
4636 /* We get these from port_stats and they may be stale by upto 1s */
4637 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4638 "# drops due to buffer-group 0 overflows");
4639 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4640 "# drops due to buffer-group 1 overflows");
4641 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4642 "# drops due to buffer-group 2 overflows");
4643 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4644 "# drops due to buffer-group 3 overflows");
4645 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4646 "# of buffer-group 0 truncated packets");
4647 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4648 "# of buffer-group 1 truncated packets");
4649 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4650 "# of buffer-group 2 truncated packets");
4651 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4652 "# of buffer-group 3 truncated packets");
4653
4654#undef SYSCTL_ADD_T4_PORTSTAT
4655
4656 return (0);
4657}
4658
4659static int
4660sysctl_int_array(SYSCTL_HANDLER_ARGS)
4661{
4662 int rc, *i;
4663 struct sbuf sb;
4664
4665 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4666 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4667 sbuf_printf(&sb, "%d ", *i);
4668 sbuf_trim(&sb);
4669 sbuf_finish(&sb);
4670 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4671 sbuf_delete(&sb);
4672 return (rc);
4673}
4674
4675static int
4676sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4677{
4678 int rc;
4679 struct sbuf *sb;
4680
4681 rc = sysctl_wire_old_buffer(req, 0);
4682 if (rc != 0)
4683 return(rc);
4684
4685 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4686 if (sb == NULL)
4687 return (ENOMEM);
4688
4689 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4690 rc = sbuf_finish(sb);
4691 sbuf_delete(sb);
4692
4693 return (rc);
4694}
4695
4696static int
4697sysctl_btphy(SYSCTL_HANDLER_ARGS)
4698{
4699 struct port_info *pi = arg1;
4700 int op = arg2;
4701 struct adapter *sc = pi->adapter;
4702 u_int v;
4703 int rc;
4704
4705 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4706 if (rc)
4707 return (rc);
4708 /* XXX: magic numbers */
4709 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4710 &v);
4711 end_synchronized_op(sc, 0);
4712 if (rc)
4713 return (rc);
4714 if (op == 0)
4715 v /= 256;
4716
4717 rc = sysctl_handle_int(oidp, &v, 0, req);
4718 return (rc);
4719}
4720
4721static int
4722sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4723{
4724 struct port_info *pi = arg1;
4725 struct adapter *sc = pi->adapter;
4726 int idx, rc, i;
4727 struct sge_rxq *rxq;
4728#ifdef TCP_OFFLOAD
4729 struct sge_ofld_rxq *ofld_rxq;
4730#endif
4731 uint8_t v;
4732
4733 idx = pi->tmr_idx;
4734
4735 rc = sysctl_handle_int(oidp, &idx, 0, req);
4736 if (rc != 0 || req->newptr == NULL)
4737 return (rc);
4738
4739 if (idx < 0 || idx >= SGE_NTIMERS)
4740 return (EINVAL);
4741
4742 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4743 "t4tmr");
4744 if (rc)
4745 return (rc);
4746
4747 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4748 for_each_rxq(pi, i, rxq) {
4749#ifdef atomic_store_rel_8
4750 atomic_store_rel_8(&rxq->iq.intr_params, v);
4751#else
4752 rxq->iq.intr_params = v;
4753#endif
4754 }
4755#ifdef TCP_OFFLOAD
4756 for_each_ofld_rxq(pi, i, ofld_rxq) {
4757#ifdef atomic_store_rel_8
4758 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4759#else
4760 ofld_rxq->iq.intr_params = v;
4761#endif
4762 }
4763#endif
4764 pi->tmr_idx = idx;
4765
4766 end_synchronized_op(sc, LOCK_HELD);
4767 return (0);
4768}
4769
4770static int
4771sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4772{
4773 struct port_info *pi = arg1;
4774 struct adapter *sc = pi->adapter;
4775 int idx, rc;
4776
4777 idx = pi->pktc_idx;
4778
4779 rc = sysctl_handle_int(oidp, &idx, 0, req);
4780 if (rc != 0 || req->newptr == NULL)
4781 return (rc);
4782
4783 if (idx < -1 || idx >= SGE_NCOUNTERS)
4784 return (EINVAL);
4785
4786 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4787 "t4pktc");
4788 if (rc)
4789 return (rc);
4790
4791 if (pi->flags & PORT_INIT_DONE)
4792 rc = EBUSY; /* cannot be changed once the queues are created */
4793 else
4794 pi->pktc_idx = idx;
4795
4796 end_synchronized_op(sc, LOCK_HELD);
4797 return (rc);
4798}
4799
4800static int
4801sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4802{
4803 struct port_info *pi = arg1;
4804 struct adapter *sc = pi->adapter;
4805 int qsize, rc;
4806
4807 qsize = pi->qsize_rxq;
4808
4809 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4810 if (rc != 0 || req->newptr == NULL)
4811 return (rc);
4812
4813 if (qsize < 128 || (qsize & 7))
4814 return (EINVAL);
4815
4816 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4817 "t4rxqs");
4818 if (rc)
4819 return (rc);
4820
4821 if (pi->flags & PORT_INIT_DONE)
4822 rc = EBUSY; /* cannot be changed once the queues are created */
4823 else
4824 pi->qsize_rxq = qsize;
4825
4826 end_synchronized_op(sc, LOCK_HELD);
4827 return (rc);
4828}
4829
4830static int
4831sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4832{
4833 struct port_info *pi = arg1;
4834 struct adapter *sc = pi->adapter;
4835 int qsize, rc;
4836
4837 qsize = pi->qsize_txq;
4838
4839 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4840 if (rc != 0 || req->newptr == NULL)
4841 return (rc);
4842
4843 /* bufring size must be powerof2 */
4844 if (qsize < 128 || !powerof2(qsize))
4845 return (EINVAL);
4846
4847 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4848 "t4txqs");
4849 if (rc)
4850 return (rc);
4851
4852 if (pi->flags & PORT_INIT_DONE)
4853 rc = EBUSY; /* cannot be changed once the queues are created */
4854 else
4855 pi->qsize_txq = qsize;
4856
4857 end_synchronized_op(sc, LOCK_HELD);
4858 return (rc);
4859}
4860
4861static int
4862sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4863{
4864 struct adapter *sc = arg1;
4865 int reg = arg2;
4866 uint64_t val;
4867
4868 val = t4_read_reg64(sc, reg);
4869
4870 return (sysctl_handle_64(oidp, &val, 0, req));
4871}
4872
4873static int
4874sysctl_temperature(SYSCTL_HANDLER_ARGS)
4875{
4876 struct adapter *sc = arg1;
4877 int rc, t;
4878 uint32_t param, val;
4879
4880 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4881 if (rc)
4882 return (rc);
4883 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4884 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4885 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4886 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4887 end_synchronized_op(sc, 0);
4888 if (rc)
4889 return (rc);
4890
4891 /* unknown is returned as 0 but we display -1 in that case */
4892 t = val == 0 ? -1 : val;
4893
4894 rc = sysctl_handle_int(oidp, &t, 0, req);
4895 return (rc);
4896}
4897
4898#ifdef SBUF_DRAIN
4899static int
4900sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4901{
4902 struct adapter *sc = arg1;
4903 struct sbuf *sb;
4904 int rc, i;
4905 uint16_t incr[NMTUS][NCCTRL_WIN];
4906 static const char *dec_fac[] = {
4907 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4908 "0.9375"
4909 };
4910
4911 rc = sysctl_wire_old_buffer(req, 0);
4912 if (rc != 0)
4913 return (rc);
4914
4915 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4916 if (sb == NULL)
4917 return (ENOMEM);
4918
4919 t4_read_cong_tbl(sc, incr);
4920
4921 for (i = 0; i < NCCTRL_WIN; ++i) {
4922 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4923 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4924 incr[5][i], incr[6][i], incr[7][i]);
4925 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4926 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4927 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4928 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4929 }
4930
4931 rc = sbuf_finish(sb);
4932 sbuf_delete(sb);
4933
4934 return (rc);
4935}
4936
4937static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4938 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
4939 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
4940 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
4941};
4942
4943static int
4944sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4945{
4946 struct adapter *sc = arg1;
4947 struct sbuf *sb;
4948 int rc, i, n, qid = arg2;
4949 uint32_t *buf, *p;
4950 char *qtype;
4951 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4952
4953 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4954 ("%s: bad qid %d\n", __func__, qid));
4955
4956 if (qid < CIM_NUM_IBQ) {
4957 /* inbound queue */
4958 qtype = "IBQ";
4959 n = 4 * CIM_IBQ_SIZE;
4960 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4961 rc = t4_read_cim_ibq(sc, qid, buf, n);
4962 } else {
4963 /* outbound queue */
4964 qtype = "OBQ";
4965 qid -= CIM_NUM_IBQ;
4966 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4967 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4968 rc = t4_read_cim_obq(sc, qid, buf, n);
4969 }
4970
4971 if (rc < 0) {
4972 rc = -rc;
4973 goto done;
4974 }
4975 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
4976
4977 rc = sysctl_wire_old_buffer(req, 0);
4978 if (rc != 0)
4979 goto done;
4980
4981 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4982 if (sb == NULL) {
4983 rc = ENOMEM;
4984 goto done;
4985 }
4986
4987 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4988 for (i = 0, p = buf; i < n; i += 16, p += 4)
4989 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4990 p[2], p[3]);
4991
4992 rc = sbuf_finish(sb);
4993 sbuf_delete(sb);
4994done:
4995 free(buf, M_CXGBE);
4996 return (rc);
4997}
4998
4999static int
5000sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5001{
5002 struct adapter *sc = arg1;
5003 u_int cfg;
5004 struct sbuf *sb;
5005 uint32_t *buf, *p;
5006 int rc;
5007
5008 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5009 if (rc != 0)
5010 return (rc);
5011
5012 rc = sysctl_wire_old_buffer(req, 0);
5013 if (rc != 0)
5014 return (rc);
5015
5016 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5017 if (sb == NULL)
5018 return (ENOMEM);
5019
5020 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5021 M_ZERO | M_WAITOK);
5022
5023 rc = -t4_cim_read_la(sc, buf, NULL);
5024 if (rc != 0)
5025 goto done;
5026
5027 sbuf_printf(sb, "Status Data PC%s",
5028 cfg & F_UPDBGLACAPTPCONLY ? "" :
5029 " LS0Stat LS0Addr LS0Data");
5030
5031 KASSERT((sc->params.cim_la_size & 7) == 0,
5032 ("%s: p will walk off the end of buf", __func__));
5033
5034 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5035 if (cfg & F_UPDBGLACAPTPCONLY) {
5036 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
5037 p[6], p[7]);
5038 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
5039 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5040 p[4] & 0xff, p[5] >> 8);
5041 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
5042 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5043 p[1] & 0xf, p[2] >> 4);
5044 } else {
5045 sbuf_printf(sb,
5046 "\n %02x %x%07x %x%07x %08x %08x "
5047 "%08x%08x%08x%08x",
5048 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5049 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5050 p[6], p[7]);
5051 }
5052 }
5053
5054 rc = sbuf_finish(sb);
5055 sbuf_delete(sb);
5056done:
5057 free(buf, M_CXGBE);
5058 return (rc);
5059}
5060
5061static int
5062sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5063{
5064 struct adapter *sc = arg1;
5065 u_int i;
5066 struct sbuf *sb;
5067 uint32_t *buf, *p;
5068 int rc;
5069
5070 rc = sysctl_wire_old_buffer(req, 0);
5071 if (rc != 0)
5072 return (rc);
5073
5074 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5075 if (sb == NULL)
5076 return (ENOMEM);
5077
5078 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5079 M_ZERO | M_WAITOK);
5080
5081 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5082 p = buf;
5083
5084 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5085 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5086 p[1], p[0]);
5087 }
5088
5089 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
5090 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5091 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
5092 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5093 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5094 (p[1] >> 2) | ((p[2] & 3) << 30),
5095 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5096 p[0] & 1);
5097 }
5098
5099 rc = sbuf_finish(sb);
5100 sbuf_delete(sb);
5101 free(buf, M_CXGBE);
5102 return (rc);
5103}
5104
5105static int
5106sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5107{
5108 struct adapter *sc = arg1;
5109 u_int i;
5110 struct sbuf *sb;
5111 uint32_t *buf, *p;
5112 int rc;
5113
5114 rc = sysctl_wire_old_buffer(req, 0);
5115 if (rc != 0)
5116 return (rc);
5117
5118 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5119 if (sb == NULL)
5120 return (ENOMEM);
5121
5122 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5123 M_ZERO | M_WAITOK);
5124
5125 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5126 p = buf;
5127
5128 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
5129 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5130 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
5131 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5132 p[4], p[3], p[2], p[1], p[0]);
5133 }
5134
5135 sbuf_printf(sb, "\n\nCntl ID Data");
5136 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5137 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
5138 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5139 }
5140
5141 rc = sbuf_finish(sb);
5142 sbuf_delete(sb);
5143 free(buf, M_CXGBE);
5144 return (rc);
5145}
5146
5147static int
5148sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5149{
5150 struct adapter *sc = arg1;
5151 struct sbuf *sb;
5152 int rc, i;
5153 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5154 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5155 uint16_t thres[CIM_NUM_IBQ];
5156 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5157 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5158 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5159
5160 if (is_t4(sc)) {
5161 cim_num_obq = CIM_NUM_OBQ;
5162 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5163 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5164 } else {
5165 cim_num_obq = CIM_NUM_OBQ_T5;
5166 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5167 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5168 }
5169 nq = CIM_NUM_IBQ + cim_num_obq;
5170
5171 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5172 if (rc == 0)
5173 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5174 if (rc != 0)
5175 return (rc);
5176
5177 t4_read_cimq_cfg(sc, base, size, thres);
5178
5179 rc = sysctl_wire_old_buffer(req, 0);
5180 if (rc != 0)
5181 return (rc);
5182
5183 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5184 if (sb == NULL)
5185 return (ENOMEM);
5186
5187 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
5188
5189 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5190 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
5191 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5192 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5193 G_QUEREMFLITS(p[2]) * 16);
5194 for ( ; i < nq; i++, p += 4, wr += 2)
5195 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
5196 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5197 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5198 G_QUEREMFLITS(p[2]) * 16);
5199
5200 rc = sbuf_finish(sb);
5201 sbuf_delete(sb);
5202
5203 return (rc);
5204}
5205
5206static int
5207sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5208{
5209 struct adapter *sc = arg1;
5210 struct sbuf *sb;
5211 int rc;
5212 struct tp_cpl_stats stats;
5213
5214 rc = sysctl_wire_old_buffer(req, 0);
5215 if (rc != 0)
5216 return (rc);
5217
5218 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5219 if (sb == NULL)
5220 return (ENOMEM);
5221
5222 t4_tp_get_cpl_stats(sc, &stats);
5223
5224 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
5225 "channel 3\n");
5226 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
5227 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5228 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
5229 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5230
5231 rc = sbuf_finish(sb);
5232 sbuf_delete(sb);
5233
5234 return (rc);
5235}
5236
5237static int
5238sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5239{
5240 struct adapter *sc = arg1;
5241 struct sbuf *sb;
5242 int rc;
5243 struct tp_usm_stats stats;
5244
5245 rc = sysctl_wire_old_buffer(req, 0);
5246 if (rc != 0)
5247 return(rc);
5248
5249 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5250 if (sb == NULL)
5251 return (ENOMEM);
5252
5253 t4_get_usm_stats(sc, &stats);
5254
5255 sbuf_printf(sb, "Frames: %u\n", stats.frames);
5256 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5257 sbuf_printf(sb, "Drops: %u", stats.drops);
5258
5259 rc = sbuf_finish(sb);
5260 sbuf_delete(sb);
5261
5262 return (rc);
5263}
5264
5265const char *devlog_level_strings[] = {
5266 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
5267 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
5268 [FW_DEVLOG_LEVEL_ERR] = "ERR",
5269 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
5270 [FW_DEVLOG_LEVEL_INFO] = "INFO",
5271 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
5272};
5273
5274const char *devlog_facility_strings[] = {
5275 [FW_DEVLOG_FACILITY_CORE] = "CORE",
5276 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
5277 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
5278 [FW_DEVLOG_FACILITY_RES] = "RES",
5279 [FW_DEVLOG_FACILITY_HW] = "HW",
5280 [FW_DEVLOG_FACILITY_FLR] = "FLR",
5281 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
5282 [FW_DEVLOG_FACILITY_PHY] = "PHY",
5283 [FW_DEVLOG_FACILITY_MAC] = "MAC",
5284 [FW_DEVLOG_FACILITY_PORT] = "PORT",
5285 [FW_DEVLOG_FACILITY_VI] = "VI",
5286 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
5287 [FW_DEVLOG_FACILITY_ACL] = "ACL",
5288 [FW_DEVLOG_FACILITY_TM] = "TM",
5289 [FW_DEVLOG_FACILITY_QFC] = "QFC",
5290 [FW_DEVLOG_FACILITY_DCB] = "DCB",
5291 [FW_DEVLOG_FACILITY_ETH] = "ETH",
5292 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
5293 [FW_DEVLOG_FACILITY_RI] = "RI",
5294 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
5295 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
5296 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
5297 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
5298};
5299
5300static int
5301sysctl_devlog(SYSCTL_HANDLER_ARGS)
5302{
5303 struct adapter *sc = arg1;
5304 struct devlog_params *dparams = &sc->params.devlog;
5305 struct fw_devlog_e *buf, *e;
5306 int i, j, rc, nentries, first = 0;
5307 struct sbuf *sb;
5308 uint64_t ftstamp = UINT64_MAX;
5309
5310 if (dparams->start == 0) {
5311 dparams->memtype = 0;
5312 dparams->start = 0x84000;
5313 dparams->size = 32768;
5314 }
5315
5316 nentries = dparams->size / sizeof(struct fw_devlog_e);
5317
5318 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5319 if (buf == NULL)
5320 return (ENOMEM);
5321
5322 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5323 (void *)buf);
5324 if (rc != 0)
5325 goto done;
5326
5327 for (i = 0; i < nentries; i++) {
5328 e = &buf[i];
5329
5330 if (e->timestamp == 0)
5331 break; /* end */
5332
5333 e->timestamp = be64toh(e->timestamp);
5334 e->seqno = be32toh(e->seqno);
5335 for (j = 0; j < 8; j++)
5336 e->params[j] = be32toh(e->params[j]);
5337
5338 if (e->timestamp < ftstamp) {
5339 ftstamp = e->timestamp;
5340 first = i;
5341 }
5342 }
5343
5344 if (buf[first].timestamp == 0)
5345 goto done; /* nothing in the log */
5346
5347 rc = sysctl_wire_old_buffer(req, 0);
5348 if (rc != 0)
5349 goto done;
5350
5351 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5352 if (sb == NULL) {
5353 rc = ENOMEM;
5354 goto done;
5355 }
5356 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
5357 "Seq#", "Tstamp", "Level", "Facility", "Message");
5358
5359 i = first;
5360 do {
5361 e = &buf[i];
5362 if (e->timestamp == 0)
5363 break; /* end */
5364
5365 sbuf_printf(sb, "%10d %15ju %8s %8s ",
5366 e->seqno, e->timestamp,
5367 (e->level < nitems(devlog_level_strings) ?
5368 devlog_level_strings[e->level] : "UNKNOWN"),
5369 (e->facility < nitems(devlog_facility_strings) ?
5370 devlog_facility_strings[e->facility] : "UNKNOWN"));
5371 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5372 e->params[2], e->params[3], e->params[4],
5373 e->params[5], e->params[6], e->params[7]);
5374
5375 if (++i == nentries)
5376 i = 0;
5377 } while (i != first);
5378
5379 rc = sbuf_finish(sb);
5380 sbuf_delete(sb);
5381done:
5382 free(buf, M_CXGBE);
5383 return (rc);
5384}
5385
5386static int
5387sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5388{
5389 struct adapter *sc = arg1;
5390 struct sbuf *sb;
5391 int rc;
5392 struct tp_fcoe_stats stats[4];
5393
5394 rc = sysctl_wire_old_buffer(req, 0);
5395 if (rc != 0)
5396 return (rc);
5397
5398 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5399 if (sb == NULL)
5400 return (ENOMEM);
5401
5402 t4_get_fcoe_stats(sc, 0, &stats[0]);
5403 t4_get_fcoe_stats(sc, 1, &stats[1]);
5404 t4_get_fcoe_stats(sc, 2, &stats[2]);
5405 t4_get_fcoe_stats(sc, 3, &stats[3]);
5406
5407 sbuf_printf(sb, " channel 0 channel 1 "
5408 "channel 2 channel 3\n");
5409 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
5410 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5411 stats[3].octetsDDP);
5412 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
5413 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5414 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5415 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5416 stats[3].framesDrop);
5417
5418 rc = sbuf_finish(sb);
5419 sbuf_delete(sb);
5420
5421 return (rc);
5422}
5423
5424static int
5425sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5426{
5427 struct adapter *sc = arg1;
5428 struct sbuf *sb;
5429 int rc, i;
5430 unsigned int map, kbps, ipg, mode;
5431 unsigned int pace_tab[NTX_SCHED];
5432
5433 rc = sysctl_wire_old_buffer(req, 0);
5434 if (rc != 0)
5435 return (rc);
5436
5437 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5438 if (sb == NULL)
5439 return (ENOMEM);
5440
5441 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5442 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5443 t4_read_pace_tbl(sc, pace_tab);
5444
5445 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
5446 "Class IPG (0.1 ns) Flow IPG (us)");
5447
5448 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5449 t4_get_tx_sched(sc, i, &kbps, &ipg);
5450 sbuf_printf(sb, "\n %u %-5s %u ", i,
5451 (mode & (1 << i)) ? "flow" : "class", map & 3);
5452 if (kbps)
5453 sbuf_printf(sb, "%9u ", kbps);
5454 else
5455 sbuf_printf(sb, " disabled ");
5456
5457 if (ipg)
5458 sbuf_printf(sb, "%13u ", ipg);
5459 else
5460 sbuf_printf(sb, " disabled ");
5461
5462 if (pace_tab[i])
5463 sbuf_printf(sb, "%10u", pace_tab[i]);
5464 else
5465 sbuf_printf(sb, " disabled");
5466 }
5467
5468 rc = sbuf_finish(sb);
5469 sbuf_delete(sb);
5470
5471 return (rc);
5472}
5473
5474static int
5475sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5476{
5477 struct adapter *sc = arg1;
5478 struct sbuf *sb;
5479 int rc, i, j;
5480 uint64_t *p0, *p1;
5481 struct lb_port_stats s[2];
5482 static const char *stat_name[] = {
5483 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5484 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5485 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5486 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5487 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5488 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5489 "BG2FramesTrunc:", "BG3FramesTrunc:"
5490 };
5491
5492 rc = sysctl_wire_old_buffer(req, 0);
5493 if (rc != 0)
5494 return (rc);
5495
5496 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5497 if (sb == NULL)
5498 return (ENOMEM);
5499
5500 memset(s, 0, sizeof(s));
5501
5502 for (i = 0; i < 4; i += 2) {
5503 t4_get_lb_stats(sc, i, &s[0]);
5504 t4_get_lb_stats(sc, i + 1, &s[1]);
5505
5506 p0 = &s[0].octets;
5507 p1 = &s[1].octets;
5508 sbuf_printf(sb, "%s Loopback %u"
5509 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5510
5511 for (j = 0; j < nitems(stat_name); j++)
5512 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5513 *p0++, *p1++);
5514 }
5515
5516 rc = sbuf_finish(sb);
5517 sbuf_delete(sb);
5518
5519 return (rc);
5520}
5521
5522static int
5523sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5524{
5525 int rc = 0;
5526 struct port_info *pi = arg1;
5527 struct sbuf *sb;
5528 static const char *linkdnreasons[] = {
5529 "non-specific", "remote fault", "autoneg failed", "reserved3",
5530 "PHY overheated", "unknown", "rx los", "reserved7"
5531 };
5532
5533 rc = sysctl_wire_old_buffer(req, 0);
5534 if (rc != 0)
5535 return(rc);
5536 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5537 if (sb == NULL)
5538 return (ENOMEM);
5539
5540 if (pi->linkdnrc < 0)
5541 sbuf_printf(sb, "n/a");
5542 else if (pi->linkdnrc < nitems(linkdnreasons))
5543 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5544 else
5545 sbuf_printf(sb, "%d", pi->linkdnrc);
5546
5547 rc = sbuf_finish(sb);
5548 sbuf_delete(sb);
5549
5550 return (rc);
5551}
5552
5553struct mem_desc {
5554 unsigned int base;
5555 unsigned int limit;
5556 unsigned int idx;
5557};
5558
5559static int
5560mem_desc_cmp(const void *a, const void *b)
5561{
5562 return ((const struct mem_desc *)a)->base -
5563 ((const struct mem_desc *)b)->base;
5564}
5565
5566static void
5567mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5568 unsigned int to)
5569{
5570 unsigned int size;
5571
5572 size = to - from + 1;
5573 if (size == 0)
5574 return;
5575
5576 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5577 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5578}
5579
5580static int
5581sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5582{
5583 struct adapter *sc = arg1;
5584 struct sbuf *sb;
5585 int rc, i, n;
5586 uint32_t lo, hi, used, alloc;
5587 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5588 static const char *region[] = {
5589 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5590 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5591 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5592 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5593 "RQUDP region:", "PBL region:", "TXPBL region:",
5594 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5595 "On-chip queues:"
5596 };
5597 struct mem_desc avail[4];
5598 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
5599 struct mem_desc *md = mem;
5600
5601 rc = sysctl_wire_old_buffer(req, 0);
5602 if (rc != 0)
5603 return (rc);
5604
5605 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5606 if (sb == NULL)
5607 return (ENOMEM);
5608
5609 for (i = 0; i < nitems(mem); i++) {
5610 mem[i].limit = 0;
5611 mem[i].idx = i;
5612 }
5613
5614 /* Find and sort the populated memory ranges */
5615 i = 0;
5616 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5617 if (lo & F_EDRAM0_ENABLE) {
5618 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5619 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5620 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5621 avail[i].idx = 0;
5622 i++;
5623 }
5624 if (lo & F_EDRAM1_ENABLE) {
5625 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5626 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5627 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5628 avail[i].idx = 1;
5629 i++;
5630 }
5631 if (lo & F_EXT_MEM_ENABLE) {
5632 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5633 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5634 avail[i].limit = avail[i].base +
5635 (G_EXT_MEM_SIZE(hi) << 20);
5636 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */
5637 i++;
5638 }
5639 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5640 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5641 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5642 avail[i].limit = avail[i].base +
5643 (G_EXT_MEM1_SIZE(hi) << 20);
5644 avail[i].idx = 4;
5645 i++;
5646 }
5647 if (!i) /* no memory available */
5648 return 0;
5649 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5650
5651 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5652 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5653 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5654 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5655 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5656 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5657 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5658 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5659 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5660
5661 /* the next few have explicit upper bounds */
5662 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5663 md->limit = md->base - 1 +
5664 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5665 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5666 md++;
5667
5668 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5669 md->limit = md->base - 1 +
5670 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5671 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5672 md++;
5673
5674 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5675 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5676 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5677 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5678 } else {
5679 md->base = 0;
5680 md->idx = nitems(region); /* hide it */
5681 }
5682 md++;
5683
5684#define ulp_region(reg) \
5685 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5686 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5687
5688 ulp_region(RX_ISCSI);
5689 ulp_region(RX_TDDP);
5690 ulp_region(TX_TPT);
5691 ulp_region(RX_STAG);
5692 ulp_region(RX_RQ);
5693 ulp_region(RX_RQUDP);
5694 ulp_region(RX_PBL);
5695 ulp_region(TX_PBL);
5696#undef ulp_region
5697
5698 md->base = 0;
5699 md->idx = nitems(region);
5700 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5701 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5702 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5703 A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5704 }
5705 md++;
5706
5707 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5708 md->limit = md->base + sc->tids.ntids - 1;
5709 md++;
5710 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5711 md->limit = md->base + sc->tids.ntids - 1;
5712 md++;
5713
5714 md->base = sc->vres.ocq.start;
5715 if (sc->vres.ocq.size)
5716 md->limit = md->base + sc->vres.ocq.size - 1;
5717 else
5718 md->idx = nitems(region); /* hide it */
5719 md++;
5720
5721 /* add any address-space holes, there can be up to 3 */
5722 for (n = 0; n < i - 1; n++)
5723 if (avail[n].limit < avail[n + 1].base)
5724 (md++)->base = avail[n].limit;
5725 if (avail[n].limit)
5726 (md++)->base = avail[n].limit;
5727
5728 n = md - mem;
5729 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5730
5731 for (lo = 0; lo < i; lo++)
5732 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5733 avail[lo].limit - 1);
5734
5735 sbuf_printf(sb, "\n");
5736 for (i = 0; i < n; i++) {
5737 if (mem[i].idx >= nitems(region))
5738 continue; /* skip holes */
5739 if (!mem[i].limit)
5740 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5741 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5742 mem[i].limit);
5743 }
5744
5745 sbuf_printf(sb, "\n");
5746 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5747 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5748 mem_region_show(sb, "uP RAM:", lo, hi);
5749
5750 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5751 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5752 mem_region_show(sb, "uP Extmem2:", lo, hi);
5753
5754 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5755 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5756 G_PMRXMAXPAGE(lo),
5757 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5758 (lo & F_PMRXNUMCHN) ? 2 : 1);
5759
5760 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5761 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5762 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5763 G_PMTXMAXPAGE(lo),
5764 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5765 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5766 sbuf_printf(sb, "%u p-structs\n",
5767 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5768
5769 for (i = 0; i < 4; i++) {
5770 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5771 if (is_t4(sc)) {
5772 used = G_USED(lo);
5773 alloc = G_ALLOC(lo);
5774 } else {
5775 used = G_T5_USED(lo);
5776 alloc = G_T5_ALLOC(lo);
5777 }
5778 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5779 i, used, alloc);
5780 }
5781 for (i = 0; i < 4; i++) {
5782 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5783 if (is_t4(sc)) {
5784 used = G_USED(lo);
5785 alloc = G_ALLOC(lo);
5786 } else {
5787 used = G_T5_USED(lo);
5788 alloc = G_T5_ALLOC(lo);
5789 }
5790 sbuf_printf(sb,
5791 "\nLoopback %d using %u pages out of %u allocated",
5792 i, used, alloc);
5793 }
5794
5795 rc = sbuf_finish(sb);
5796 sbuf_delete(sb);
5797
5798 return (rc);
5799}
5800
5801static inline void
5802tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5803{
5804 *mask = x | y;
5805 y = htobe64(y);
5806 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5807}
5808
5809static int
5810sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5811{
5812 struct adapter *sc = arg1;
5813 struct sbuf *sb;
5814 int rc, i, n;
5815
5816 rc = sysctl_wire_old_buffer(req, 0);
5817 if (rc != 0)
5818 return (rc);
5819
5820 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5821 if (sb == NULL)
5822 return (ENOMEM);
5823
5824 sbuf_printf(sb,
5825 "Idx Ethernet address Mask Vld Ports PF"
5826 " VF Replication P0 P1 P2 P3 ML");
5827 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5828 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5829 for (i = 0; i < n; i++) {
5830 uint64_t tcamx, tcamy, mask;
5831 uint32_t cls_lo, cls_hi;
5832 uint8_t addr[ETHER_ADDR_LEN];
5833
5834 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5835 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5836 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5837 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5838
5839 if (tcamx & tcamy)
5840 continue;
5841
5842 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5843 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5844 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
5845 addr[3], addr[4], addr[5], (uintmax_t)mask,
5846 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5847 G_PORTMAP(cls_hi), G_PF(cls_lo),
5848 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5849
5850 if (cls_lo & F_REPLICATE) {
5851 struct fw_ldst_cmd ldst_cmd;
5852
5853 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5854 ldst_cmd.op_to_addrspace =
5855 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5856 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5857 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5858 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5859 ldst_cmd.u.mps.fid_ctl =
5860 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5861 V_FW_LDST_CMD_CTL(i));
5862
5863 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5864 "t4mps");
5865 if (rc)
5866 break;
5867 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5868 sizeof(ldst_cmd), &ldst_cmd);
5869 end_synchronized_op(sc, 0);
5870
5871 if (rc != 0) {
5872 sbuf_printf(sb,
5873 " ------------ error %3u ------------", rc);
5874 rc = 0;
5875 } else {
5876 sbuf_printf(sb, " %08x %08x %08x %08x",
5877 be32toh(ldst_cmd.u.mps.rplc127_96),
5878 be32toh(ldst_cmd.u.mps.rplc95_64),
5879 be32toh(ldst_cmd.u.mps.rplc63_32),
5880 be32toh(ldst_cmd.u.mps.rplc31_0));
5881 }
5882 } else
5883 sbuf_printf(sb, "%36s", "");
5884
5885 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5886 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5887 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5888 }
5889
5890 if (rc)
5891 (void) sbuf_finish(sb);
5892 else
5893 rc = sbuf_finish(sb);
5894 sbuf_delete(sb);
5895
5896 return (rc);
5897}
5898
5899static int
5900sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5901{
5902 struct adapter *sc = arg1;
5903 struct sbuf *sb;
5904 int rc;
5905 uint16_t mtus[NMTUS];
5906
5907 rc = sysctl_wire_old_buffer(req, 0);
5908 if (rc != 0)
5909 return (rc);
5910
5911 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5912 if (sb == NULL)
5913 return (ENOMEM);
5914
5915 t4_read_mtu_tbl(sc, mtus, NULL);
5916
5917 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5918 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5919 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5920 mtus[14], mtus[15]);
5921
5922 rc = sbuf_finish(sb);
5923 sbuf_delete(sb);
5924
5925 return (rc);
5926}
5927
5928static int
5929sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5930{
5931 struct adapter *sc = arg1;
5932 struct sbuf *sb;
5933 int rc, i;
5934 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5935 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5936 static const char *pm_stats[] = {
5937 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5938 };
5939
5940 rc = sysctl_wire_old_buffer(req, 0);
5941 if (rc != 0)
5942 return (rc);
5943
5944 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5945 if (sb == NULL)
5946 return (ENOMEM);
5947
5948 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5949 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5950
5951 sbuf_printf(sb, " Tx count Tx cycles "
5952 "Rx count Rx cycles");
5953 for (i = 0; i < PM_NSTATS; i++)
5954 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
5955 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5956
5957 rc = sbuf_finish(sb);
5958 sbuf_delete(sb);
5959
5960 return (rc);
5961}
5962
5963static int
5964sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5965{
5966 struct adapter *sc = arg1;
5967 struct sbuf *sb;
5968 int rc;
5969 struct tp_rdma_stats stats;
5970
5971 rc = sysctl_wire_old_buffer(req, 0);
5972 if (rc != 0)
5973 return (rc);
5974
5975 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5976 if (sb == NULL)
5977 return (ENOMEM);
5978
5979 t4_tp_get_rdma_stats(sc, &stats);
5980 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5981 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5982
5983 rc = sbuf_finish(sb);
5984 sbuf_delete(sb);
5985
5986 return (rc);
5987}
5988
5989static int
5990sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5991{
5992 struct adapter *sc = arg1;
5993 struct sbuf *sb;
5994 int rc;
5995 struct tp_tcp_stats v4, v6;
5996
5997 rc = sysctl_wire_old_buffer(req, 0);
5998 if (rc != 0)
5999 return (rc);
6000
6001 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6002 if (sb == NULL)
6003 return (ENOMEM);
6004
6005 t4_tp_get_tcp_stats(sc, &v4, &v6);
6006 sbuf_printf(sb,
6007 " IP IPv6\n");
6008 sbuf_printf(sb, "OutRsts: %20u %20u\n",
6009 v4.tcpOutRsts, v6.tcpOutRsts);
6010 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
6011 v4.tcpInSegs, v6.tcpInSegs);
6012 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
6013 v4.tcpOutSegs, v6.tcpOutSegs);
6014 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
6015 v4.tcpRetransSegs, v6.tcpRetransSegs);
6016
6017 rc = sbuf_finish(sb);
6018 sbuf_delete(sb);
6019
6020 return (rc);
6021}
6022
6023static int
6024sysctl_tids(SYSCTL_HANDLER_ARGS)
6025{
6026 struct adapter *sc = arg1;
6027 struct sbuf *sb;
6028 int rc;
6029 struct tid_info *t = &sc->tids;
6030
6031 rc = sysctl_wire_old_buffer(req, 0);
6032 if (rc != 0)
6033 return (rc);
6034
6035 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6036 if (sb == NULL)
6037 return (ENOMEM);
6038
6039 if (t->natids) {
6040 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6041 t->atids_in_use);
6042 }
6043
6044 if (t->ntids) {
6045 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6046 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6047
6048 if (b) {
6049 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6050 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6051 t->ntids - 1);
6052 } else {
6053 sbuf_printf(sb, "TID range: %u-%u",
6054 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6055 t->ntids - 1);
6056 }
6057 } else
6058 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6059 sbuf_printf(sb, ", in use: %u\n",
6060 atomic_load_acq_int(&t->tids_in_use));
6061 }
6062
6063 if (t->nstids) {
6064 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6065 t->stid_base + t->nstids - 1, t->stids_in_use);
6066 }
6067
6068 if (t->nftids) {
6069 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6070 t->ftid_base + t->nftids - 1);
6071 }
6072
6073 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6074 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6075 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6076
6077 rc = sbuf_finish(sb);
6078 sbuf_delete(sb);
6079
6080 return (rc);
6081}
6082
6083static int
6084sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6085{
6086 struct adapter *sc = arg1;
6087 struct sbuf *sb;
6088 int rc;
6089 struct tp_err_stats stats;
6090
6091 rc = sysctl_wire_old_buffer(req, 0);
6092 if (rc != 0)
6093 return (rc);
6094
6095 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6096 if (sb == NULL)
6097 return (ENOMEM);
6098
6099 t4_tp_get_err_stats(sc, &stats);
6100
6101 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6102 "channel 3\n");
6103 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
6104 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6105 stats.macInErrs[3]);
6106 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
6107 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6108 stats.hdrInErrs[3]);
6109 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
6110 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6111 stats.tcpInErrs[3]);
6112 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
6113 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6114 stats.tcp6InErrs[3]);
6115 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
6116 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6117 stats.tnlCongDrops[3]);
6118 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
6119 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6120 stats.tnlTxDrops[3]);
6121 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
6122 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6123 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6124 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
6125 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6126 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6127 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
6128 stats.ofldNoNeigh, stats.ofldCongDefer);
6129
6130 rc = sbuf_finish(sb);
6131 sbuf_delete(sb);
6132
6133 return (rc);
6134}
6135
6136struct field_desc {
6137 const char *name;
6138 u_int start;
6139 u_int width;
6140};
6141
6142static void
6143field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6144{
6145 char buf[32];
6146 int line_size = 0;
6147
6148 while (f->name) {
6149 uint64_t mask = (1ULL << f->width) - 1;
6150 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6151 ((uintmax_t)v >> f->start) & mask);
6152
6153 if (line_size + len >= 79) {
6154 line_size = 8;
6155 sbuf_printf(sb, "\n ");
6156 }
6157 sbuf_printf(sb, "%s ", buf);
6158 line_size += len + 1;
6159 f++;
6160 }
6161 sbuf_printf(sb, "\n");
6162}
6163
6164static struct field_desc tp_la0[] = {
6165 { "RcfOpCodeOut", 60, 4 },
6166 { "State", 56, 4 },
6167 { "WcfState", 52, 4 },
6168 { "RcfOpcSrcOut", 50, 2 },
6169 { "CRxError", 49, 1 },
6170 { "ERxError", 48, 1 },
6171 { "SanityFailed", 47, 1 },
6172 { "SpuriousMsg", 46, 1 },
6173 { "FlushInputMsg", 45, 1 },
6174 { "FlushInputCpl", 44, 1 },
6175 { "RssUpBit", 43, 1 },
6176 { "RssFilterHit", 42, 1 },
6177 { "Tid", 32, 10 },
6178 { "InitTcb", 31, 1 },
6179 { "LineNumber", 24, 7 },
6180 { "Emsg", 23, 1 },
6181 { "EdataOut", 22, 1 },
6182 { "Cmsg", 21, 1 },
6183 { "CdataOut", 20, 1 },
6184 { "EreadPdu", 19, 1 },
6185 { "CreadPdu", 18, 1 },
6186 { "TunnelPkt", 17, 1 },
6187 { "RcfPeerFin", 16, 1 },
6188 { "RcfReasonOut", 12, 4 },
6189 { "TxCchannel", 10, 2 },
6190 { "RcfTxChannel", 8, 2 },
6191 { "RxEchannel", 6, 2 },
6192 { "RcfRxChannel", 5, 1 },
6193 { "RcfDataOutSrdy", 4, 1 },
6194 { "RxDvld", 3, 1 },
6195 { "RxOoDvld", 2, 1 },
6196 { "RxCongestion", 1, 1 },
6197 { "TxCongestion", 0, 1 },
6198 { NULL }
6199};
6200
6201static struct field_desc tp_la1[] = {
6202 { "CplCmdIn", 56, 8 },
6203 { "CplCmdOut", 48, 8 },
6204 { "ESynOut", 47, 1 },
6205 { "EAckOut", 46, 1 },
6206 { "EFinOut", 45, 1 },
6207 { "ERstOut", 44, 1 },
6208 { "SynIn", 43, 1 },
6209 { "AckIn", 42, 1 },
6210 { "FinIn", 41, 1 },
6211 { "RstIn", 40, 1 },
6212 { "DataIn", 39, 1 },
6213 { "DataInVld", 38, 1 },
6214 { "PadIn", 37, 1 },
6215 { "RxBufEmpty", 36, 1 },
6216 { "RxDdp", 35, 1 },
6217 { "RxFbCongestion", 34, 1 },
6218 { "TxFbCongestion", 33, 1 },
6219 { "TxPktSumSrdy", 32, 1 },
6220 { "RcfUlpType", 28, 4 },
6221 { "Eread", 27, 1 },
6222 { "Ebypass", 26, 1 },
6223 { "Esave", 25, 1 },
6224 { "Static0", 24, 1 },
6225 { "Cread", 23, 1 },
6226 { "Cbypass", 22, 1 },
6227 { "Csave", 21, 1 },
6228 { "CPktOut", 20, 1 },
6229 { "RxPagePoolFull", 18, 2 },
6230 { "RxLpbkPkt", 17, 1 },
6231 { "TxLpbkPkt", 16, 1 },
6232 { "RxVfValid", 15, 1 },
6233 { "SynLearned", 14, 1 },
6234 { "SetDelEntry", 13, 1 },
6235 { "SetInvEntry", 12, 1 },
6236 { "CpcmdDvld", 11, 1 },
6237 { "CpcmdSave", 10, 1 },
6238 { "RxPstructsFull", 8, 2 },
6239 { "EpcmdDvld", 7, 1 },
6240 { "EpcmdFlush", 6, 1 },
6241 { "EpcmdTrimPrefix", 5, 1 },
6242 { "EpcmdTrimPostfix", 4, 1 },
6243 { "ERssIp4Pkt", 3, 1 },
6244 { "ERssIp6Pkt", 2, 1 },
6245 { "ERssTcpUdpPkt", 1, 1 },
6246 { "ERssFceFipPkt", 0, 1 },
6247 { NULL }
6248};
6249
6250static struct field_desc tp_la2[] = {
6251 { "CplCmdIn", 56, 8 },
6252 { "MpsVfVld", 55, 1 },
6253 { "MpsPf", 52, 3 },
6254 { "MpsVf", 44, 8 },
6255 { "SynIn", 43, 1 },
6256 { "AckIn", 42, 1 },
6257 { "FinIn", 41, 1 },
6258 { "RstIn", 40, 1 },
6259 { "DataIn", 39, 1 },
6260 { "DataInVld", 38, 1 },
6261 { "PadIn", 37, 1 },
6262 { "RxBufEmpty", 36, 1 },
6263 { "RxDdp", 35, 1 },
6264 { "RxFbCongestion", 34, 1 },
6265 { "TxFbCongestion", 33, 1 },
6266 { "TxPktSumSrdy", 32, 1 },
6267 { "RcfUlpType", 28, 4 },
6268 { "Eread", 27, 1 },
6269 { "Ebypass", 26, 1 },
6270 { "Esave", 25, 1 },
6271 { "Static0", 24, 1 },
6272 { "Cread", 23, 1 },
6273 { "Cbypass", 22, 1 },
6274 { "Csave", 21, 1 },
6275 { "CPktOut", 20, 1 },
6276 { "RxPagePoolFull", 18, 2 },
6277 { "RxLpbkPkt", 17, 1 },
6278 { "TxLpbkPkt", 16, 1 },
6279 { "RxVfValid", 15, 1 },
6280 { "SynLearned", 14, 1 },
6281 { "SetDelEntry", 13, 1 },
6282 { "SetInvEntry", 12, 1 },
6283 { "CpcmdDvld", 11, 1 },
6284 { "CpcmdSave", 10, 1 },
6285 { "RxPstructsFull", 8, 2 },
6286 { "EpcmdDvld", 7, 1 },
6287 { "EpcmdFlush", 6, 1 },
6288 { "EpcmdTrimPrefix", 5, 1 },
6289 { "EpcmdTrimPostfix", 4, 1 },
6290 { "ERssIp4Pkt", 3, 1 },
6291 { "ERssIp6Pkt", 2, 1 },
6292 { "ERssTcpUdpPkt", 1, 1 },
6293 { "ERssFceFipPkt", 0, 1 },
6294 { NULL }
6295};
6296
6297static void
6298tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6299{
6300
6301 field_desc_show(sb, *p, tp_la0);
6302}
6303
6304static void
6305tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6306{
6307
6308 if (idx)
6309 sbuf_printf(sb, "\n");
6310 field_desc_show(sb, p[0], tp_la0);
6311 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6312 field_desc_show(sb, p[1], tp_la0);
6313}
6314
6315static void
6316tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6317{
6318
6319 if (idx)
6320 sbuf_printf(sb, "\n");
6321 field_desc_show(sb, p[0], tp_la0);
6322 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6323 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6324}
6325
6326static int
6327sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6328{
6329 struct adapter *sc = arg1;
6330 struct sbuf *sb;
6331 uint64_t *buf, *p;
6332 int rc;
6333 u_int i, inc;
6334 void (*show_func)(struct sbuf *, uint64_t *, int);
6335
6336 rc = sysctl_wire_old_buffer(req, 0);
6337 if (rc != 0)
6338 return (rc);
6339
6340 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6341 if (sb == NULL)
6342 return (ENOMEM);
6343
6344 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6345
6346 t4_tp_read_la(sc, buf, NULL);
6347 p = buf;
6348
6349 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6350 case 2:
6351 inc = 2;
6352 show_func = tp_la_show2;
6353 break;
6354 case 3:
6355 inc = 2;
6356 show_func = tp_la_show3;
6357 break;
6358 default:
6359 inc = 1;
6360 show_func = tp_la_show;
6361 }
6362
6363 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6364 (*show_func)(sb, p, i);
6365
6366 rc = sbuf_finish(sb);
6367 sbuf_delete(sb);
6368 free(buf, M_CXGBE);
6369 return (rc);
6370}
6371
6372static int
6373sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6374{
6375 struct adapter *sc = arg1;
6376 struct sbuf *sb;
6377 int rc;
6378 u64 nrate[NCHAN], orate[NCHAN];
6379
6380 rc = sysctl_wire_old_buffer(req, 0);
6381 if (rc != 0)
6382 return (rc);
6383
6384 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6385 if (sb == NULL)
6386 return (ENOMEM);
6387
6388 t4_get_chan_txrate(sc, nrate, orate);
6389 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6390 "channel 3\n");
6391 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
6392 nrate[0], nrate[1], nrate[2], nrate[3]);
6393 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
6394 orate[0], orate[1], orate[2], orate[3]);
6395
6396 rc = sbuf_finish(sb);
6397 sbuf_delete(sb);
6398
6399 return (rc);
6400}
6401
6402static int
6403sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6404{
6405 struct adapter *sc = arg1;
6406 struct sbuf *sb;
6407 uint32_t *buf, *p;
6408 int rc, i;
6409
6410 rc = sysctl_wire_old_buffer(req, 0);
6411 if (rc != 0)
6412 return (rc);
6413
6414 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6415 if (sb == NULL)
6416 return (ENOMEM);
6417
6418 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6419 M_ZERO | M_WAITOK);
6420
6421 t4_ulprx_read_la(sc, buf);
6422 p = buf;
6423
6424 sbuf_printf(sb, " Pcmd Type Message"
6425 " Data");
6426 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6427 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
6428 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6429 }
6430
6431 rc = sbuf_finish(sb);
6432 sbuf_delete(sb);
6433 free(buf, M_CXGBE);
6434 return (rc);
6435}
6436
6437static int
6438sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6439{
6440 struct adapter *sc = arg1;
6441 struct sbuf *sb;
6442 int rc, v;
6443
6444 rc = sysctl_wire_old_buffer(req, 0);
6445 if (rc != 0)
6446 return (rc);
6447
6448 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6449 if (sb == NULL)
6450 return (ENOMEM);
6451
6452 v = t4_read_reg(sc, A_SGE_STAT_CFG);
6453 if (G_STATSOURCE_T5(v) == 7) {
6454 if (G_STATMODE(v) == 0) {
6455 sbuf_printf(sb, "total %d, incomplete %d",
6456 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6457 t4_read_reg(sc, A_SGE_STAT_MATCH));
6458 } else if (G_STATMODE(v) == 1) {
6459 sbuf_printf(sb, "total %d, data overflow %d",
6460 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6461 t4_read_reg(sc, A_SGE_STAT_MATCH));
6462 }
6463 }
6464 rc = sbuf_finish(sb);
6465 sbuf_delete(sb);
6466
6467 return (rc);
6468}
6469#endif
6470
6471static inline void
6472txq_start(struct ifnet *ifp, struct sge_txq *txq)
6473{
6474 struct buf_ring *br;
6475 struct mbuf *m;
6476
6477 TXQ_LOCK_ASSERT_OWNED(txq);
6478
6479 br = txq->br;
6480 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6481 if (m)
6482 t4_eth_tx(ifp, txq, m);
6483}
6484
6485void
6486t4_tx_callout(void *arg)
6487{
6488 struct sge_eq *eq = arg;
6489 struct adapter *sc;
6490
6491 if (EQ_TRYLOCK(eq) == 0)
6492 goto reschedule;
6493
6494 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6495 EQ_UNLOCK(eq);
6496reschedule:
6497 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6498 callout_schedule(&eq->tx_callout, 1);
6499 return;
6500 }
6501
6502 EQ_LOCK_ASSERT_OWNED(eq);
6503
6504 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6505
6506 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6507 struct sge_txq *txq = arg;
6508 struct port_info *pi = txq->ifp->if_softc;
6509
6510 sc = pi->adapter;
6511 } else {
6512 struct sge_wrq *wrq = arg;
6513
6514 sc = wrq->adapter;
6515 }
6516
6517 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6518 }
6519
6520 EQ_UNLOCK(eq);
6521}
6522
6523void
6524t4_tx_task(void *arg, int count)
6525{
6526 struct sge_eq *eq = arg;
6527
6528 EQ_LOCK(eq);
6529 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6530 struct sge_txq *txq = arg;
6531 txq_start(txq->ifp, txq);
6532 } else {
6533 struct sge_wrq *wrq = arg;
6534 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6535 }
6536 EQ_UNLOCK(eq);
6537}
6538
6539static uint32_t
6540fconf_to_mode(uint32_t fconf)
6541{
6542 uint32_t mode;
6543
6544 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6545 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6546
6547 if (fconf & F_FRAGMENTATION)
6548 mode |= T4_FILTER_IP_FRAGMENT;
6549
6550 if (fconf & F_MPSHITTYPE)
6551 mode |= T4_FILTER_MPS_HIT_TYPE;
6552
6553 if (fconf & F_MACMATCH)
6554 mode |= T4_FILTER_MAC_IDX;
6555
6556 if (fconf & F_ETHERTYPE)
6557 mode |= T4_FILTER_ETH_TYPE;
6558
6559 if (fconf & F_PROTOCOL)
6560 mode |= T4_FILTER_IP_PROTO;
6561
6562 if (fconf & F_TOS)
6563 mode |= T4_FILTER_IP_TOS;
6564
6565 if (fconf & F_VLAN)
6566 mode |= T4_FILTER_VLAN;
6567
6568 if (fconf & F_VNIC_ID)
6569 mode |= T4_FILTER_VNIC;
6570
6571 if (fconf & F_PORT)
6572 mode |= T4_FILTER_PORT;
6573
6574 if (fconf & F_FCOE)
6575 mode |= T4_FILTER_FCoE;
6576
6577 return (mode);
6578}
6579
6580static uint32_t
6581mode_to_fconf(uint32_t mode)
6582{
6583 uint32_t fconf = 0;
6584
6585 if (mode & T4_FILTER_IP_FRAGMENT)
6586 fconf |= F_FRAGMENTATION;
6587
6588 if (mode & T4_FILTER_MPS_HIT_TYPE)
6589 fconf |= F_MPSHITTYPE;
6590
6591 if (mode & T4_FILTER_MAC_IDX)
6592 fconf |= F_MACMATCH;
6593
6594 if (mode & T4_FILTER_ETH_TYPE)
6595 fconf |= F_ETHERTYPE;
6596
6597 if (mode & T4_FILTER_IP_PROTO)
6598 fconf |= F_PROTOCOL;
6599
6600 if (mode & T4_FILTER_IP_TOS)
6601 fconf |= F_TOS;
6602
6603 if (mode & T4_FILTER_VLAN)
6604 fconf |= F_VLAN;
6605
6606 if (mode & T4_FILTER_VNIC)
6607 fconf |= F_VNIC_ID;
6608
6609 if (mode & T4_FILTER_PORT)
6610 fconf |= F_PORT;
6611
6612 if (mode & T4_FILTER_FCoE)
6613 fconf |= F_FCOE;
6614
6615 return (fconf);
6616}
6617
6618static uint32_t
6619fspec_to_fconf(struct t4_filter_specification *fs)
6620{
6621 uint32_t fconf = 0;
6622
6623 if (fs->val.frag || fs->mask.frag)
6624 fconf |= F_FRAGMENTATION;
6625
6626 if (fs->val.matchtype || fs->mask.matchtype)
6627 fconf |= F_MPSHITTYPE;
6628
6629 if (fs->val.macidx || fs->mask.macidx)
6630 fconf |= F_MACMATCH;
6631
6632 if (fs->val.ethtype || fs->mask.ethtype)
6633 fconf |= F_ETHERTYPE;
6634
6635 if (fs->val.proto || fs->mask.proto)
6636 fconf |= F_PROTOCOL;
6637
6638 if (fs->val.tos || fs->mask.tos)
6639 fconf |= F_TOS;
6640
6641 if (fs->val.vlan_vld || fs->mask.vlan_vld)
6642 fconf |= F_VLAN;
6643
6644 if (fs->val.vnic_vld || fs->mask.vnic_vld)
6645 fconf |= F_VNIC_ID;
6646
6647 if (fs->val.iport || fs->mask.iport)
6648 fconf |= F_PORT;
6649
6650 if (fs->val.fcoe || fs->mask.fcoe)
6651 fconf |= F_FCOE;
6652
6653 return (fconf);
6654}
6655
6656static int
6657get_filter_mode(struct adapter *sc, uint32_t *mode)
6658{
6659 int rc;
6660 uint32_t fconf;
6661
6662 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6663 "t4getfm");
6664 if (rc)
6665 return (rc);
6666
6667 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6668 A_TP_VLAN_PRI_MAP);
6669
6670 if (sc->params.tp.vlan_pri_map != fconf) {
6671 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6672 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6673 fconf);
6674 sc->params.tp.vlan_pri_map = fconf;
6675 }
6676
6677 *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6678
6679 end_synchronized_op(sc, LOCK_HELD);
6680 return (0);
6681}
6682
6683static int
6684set_filter_mode(struct adapter *sc, uint32_t mode)
6685{
6686 uint32_t fconf;
6687 int rc;
6688
6689 fconf = mode_to_fconf(mode);
6690
6691 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6692 "t4setfm");
6693 if (rc)
6694 return (rc);
6695
6696 if (sc->tids.ftids_in_use > 0) {
6697 rc = EBUSY;
6698 goto done;
6699 }
6700
6701#ifdef TCP_OFFLOAD
6702 if (sc->offload_map) {
6703 rc = EBUSY;
6704 goto done;
6705 }
6706#endif
6707
6708#ifdef notyet
6709 rc = -t4_set_filter_mode(sc, fconf);
6710 if (rc == 0)
6711 sc->filter_mode = fconf;
6712#else
6713 rc = ENOTSUP;
6714#endif
6715
6716done:
6717 end_synchronized_op(sc, LOCK_HELD);
6718 return (rc);
6719}
6720
6721static inline uint64_t
6722get_filter_hits(struct adapter *sc, uint32_t fid)
6723{
6724 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6725 uint64_t hits;
6726
6727 memwin_info(sc, 0, &mw_base, NULL);
6728 off = position_memwin(sc, 0,
6729 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6730 if (is_t4(sc)) {
6731 hits = t4_read_reg64(sc, mw_base + off + 16);
6732 hits = be64toh(hits);
6733 } else {
6734 hits = t4_read_reg(sc, mw_base + off + 24);
6735 hits = be32toh(hits);
6736 }
6737
6738 return (hits);
6739}
6740
6741static int
6742get_filter(struct adapter *sc, struct t4_filter *t)
6743{
6744 int i, rc, nfilters = sc->tids.nftids;
6745 struct filter_entry *f;
6746
6747 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6748 "t4getf");
6749 if (rc)
6750 return (rc);
6751
6752 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6753 t->idx >= nfilters) {
6754 t->idx = 0xffffffff;
6755 goto done;
6756 }
6757
6758 f = &sc->tids.ftid_tab[t->idx];
6759 for (i = t->idx; i < nfilters; i++, f++) {
6760 if (f->valid) {
6761 t->idx = i;
6762 t->l2tidx = f->l2t ? f->l2t->idx : 0;
6763 t->smtidx = f->smtidx;
6764 if (f->fs.hitcnts)
6765 t->hits = get_filter_hits(sc, t->idx);
6766 else
6767 t->hits = UINT64_MAX;
6768 t->fs = f->fs;
6769
6770 goto done;
6771 }
6772 }
6773
6774 t->idx = 0xffffffff;
6775done:
6776 end_synchronized_op(sc, LOCK_HELD);
6777 return (0);
6778}
6779
6780static int
6781set_filter(struct adapter *sc, struct t4_filter *t)
6782{
6783 unsigned int nfilters, nports;
6784 struct filter_entry *f;
6785 int i, rc;
6786
6787 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6788 if (rc)
6789 return (rc);
6790
6791 nfilters = sc->tids.nftids;
6792 nports = sc->params.nports;
6793
6794 if (nfilters == 0) {
6795 rc = ENOTSUP;
6796 goto done;
6797 }
6798
6799 if (!(sc->flags & FULL_INIT_DONE)) {
6800 rc = EAGAIN;
6801 goto done;
6802 }
6803
6804 if (t->idx >= nfilters) {
6805 rc = EINVAL;
6806 goto done;
6807 }
6808
6809 /* Validate against the global filter mode */
6810 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6811 sc->params.tp.vlan_pri_map) {
6812 rc = E2BIG;
6813 goto done;
6814 }
6815
6816 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6817 rc = EINVAL;
6818 goto done;
6819 }
6820
6821 if (t->fs.val.iport >= nports) {
6822 rc = EINVAL;
6823 goto done;
6824 }
6825
6826 /* Can't specify an iq if not steering to it */
6827 if (!t->fs.dirsteer && t->fs.iq) {
6828 rc = EINVAL;
6829 goto done;
6830 }
6831
6832 /* IPv6 filter idx must be 4 aligned */
6833 if (t->fs.type == 1 &&
6834 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6835 rc = EINVAL;
6836 goto done;
6837 }
6838
6839 if (sc->tids.ftid_tab == NULL) {
6840 KASSERT(sc->tids.ftids_in_use == 0,
6841 ("%s: no memory allocated but filters_in_use > 0",
6842 __func__));
6843
6844 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6845 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6846 if (sc->tids.ftid_tab == NULL) {
6847 rc = ENOMEM;
6848 goto done;
6849 }
6850 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6851 }
6852
6853 for (i = 0; i < 4; i++) {
6854 f = &sc->tids.ftid_tab[t->idx + i];
6855
6856 if (f->pending || f->valid) {
6857 rc = EBUSY;
6858 goto done;
6859 }
6860 if (f->locked) {
6861 rc = EPERM;
6862 goto done;
6863 }
6864
6865 if (t->fs.type == 0)
6866 break;
6867 }
6868
6869 f = &sc->tids.ftid_tab[t->idx];
6870 f->fs = t->fs;
6871
6872 rc = set_filter_wr(sc, t->idx);
6873done:
6874 end_synchronized_op(sc, 0);
6875
6876 if (rc == 0) {
6877 mtx_lock(&sc->tids.ftid_lock);
6878 for (;;) {
6879 if (f->pending == 0) {
6880 rc = f->valid ? 0 : EIO;
6881 break;
6882 }
6883
6884 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6885 PCATCH, "t4setfw", 0)) {
6886 rc = EINPROGRESS;
6887 break;
6888 }
6889 }
6890 mtx_unlock(&sc->tids.ftid_lock);
6891 }
6892 return (rc);
6893}
6894
6895static int
6896del_filter(struct adapter *sc, struct t4_filter *t)
6897{
6898 unsigned int nfilters;
6899 struct filter_entry *f;
6900 int rc;
6901
6902 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6903 if (rc)
6904 return (rc);
6905
6906 nfilters = sc->tids.nftids;
6907
6908 if (nfilters == 0) {
6909 rc = ENOTSUP;
6910 goto done;
6911 }
6912
6913 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6914 t->idx >= nfilters) {
6915 rc = EINVAL;
6916 goto done;
6917 }
6918
6919 if (!(sc->flags & FULL_INIT_DONE)) {
6920 rc = EAGAIN;
6921 goto done;
6922 }
6923
6924 f = &sc->tids.ftid_tab[t->idx];
6925
6926 if (f->pending) {
6927 rc = EBUSY;
6928 goto done;
6929 }
6930 if (f->locked) {
6931 rc = EPERM;
6932 goto done;
6933 }
6934
6935 if (f->valid) {
6936 t->fs = f->fs; /* extra info for the caller */
6937 rc = del_filter_wr(sc, t->idx);
6938 }
6939
6940done:
6941 end_synchronized_op(sc, 0);
6942
6943 if (rc == 0) {
6944 mtx_lock(&sc->tids.ftid_lock);
6945 for (;;) {
6946 if (f->pending == 0) {
6947 rc = f->valid ? EIO : 0;
6948 break;
6949 }
6950
6951 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6952 PCATCH, "t4delfw", 0)) {
6953 rc = EINPROGRESS;
6954 break;
6955 }
6956 }
6957 mtx_unlock(&sc->tids.ftid_lock);
6958 }
6959
6960 return (rc);
6961}
6962
6963static void
6964clear_filter(struct filter_entry *f)
6965{
6966 if (f->l2t)
6967 t4_l2t_release(f->l2t);
6968
6969 bzero(f, sizeof (*f));
6970}
6971
6972static int
6973set_filter_wr(struct adapter *sc, int fidx)
6974{
6975 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6976 struct wrqe *wr;
6977 struct fw_filter_wr *fwr;
6978 unsigned int ftid;
6979
6980 ASSERT_SYNCHRONIZED_OP(sc);
6981
6982 if (f->fs.newdmac || f->fs.newvlan) {
6983 /* This filter needs an L2T entry; allocate one. */
6984 f->l2t = t4_l2t_alloc_switching(sc->l2t);
6985 if (f->l2t == NULL)
6986 return (EAGAIN);
6987 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6988 f->fs.dmac)) {
6989 t4_l2t_release(f->l2t);
6990 f->l2t = NULL;
6991 return (ENOMEM);
6992 }
6993 }
6994
6995 ftid = sc->tids.ftid_base + fidx;
6996
6997 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6998 if (wr == NULL)
6999 return (ENOMEM);
7000
7001 fwr = wrtod(wr);
7002 bzero(fwr, sizeof (*fwr));
7003
7004 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7005 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7006 fwr->tid_to_iq =
7007 htobe32(V_FW_FILTER_WR_TID(ftid) |
7008 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7009 V_FW_FILTER_WR_NOREPLY(0) |
7010 V_FW_FILTER_WR_IQ(f->fs.iq));
7011 fwr->del_filter_to_l2tix =
7012 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7013 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7014 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7015 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7016 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7017 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7018 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7019 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7020 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7021 f->fs.newvlan == VLAN_REWRITE) |
7022 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7023 f->fs.newvlan == VLAN_REWRITE) |
7024 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7025 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7026 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7027 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7028 fwr->ethtype = htobe16(f->fs.val.ethtype);
7029 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7030 fwr->frag_to_ovlan_vldm =
7031 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7032 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7033 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7034 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7035 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7036 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7037 fwr->smac_sel = 0;
7038 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7039 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7040 fwr->maci_to_matchtypem =
7041 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7042 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7043 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7044 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7045 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7046 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7047 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7048 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7049 fwr->ptcl = f->fs.val.proto;
7050 fwr->ptclm = f->fs.mask.proto;
7051 fwr->ttyp = f->fs.val.tos;
7052 fwr->ttypm = f->fs.mask.tos;
7053 fwr->ivlan = htobe16(f->fs.val.vlan);
7054 fwr->ivlanm = htobe16(f->fs.mask.vlan);
7055 fwr->ovlan = htobe16(f->fs.val.vnic);
7056 fwr->ovlanm = htobe16(f->fs.mask.vnic);
7057 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7058 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7059 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7060 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7061 fwr->lp = htobe16(f->fs.val.dport);
7062 fwr->lpm = htobe16(f->fs.mask.dport);
7063 fwr->fp = htobe16(f->fs.val.sport);
7064 fwr->fpm = htobe16(f->fs.mask.sport);
7065 if (f->fs.newsmac)
7066 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7067
7068 f->pending = 1;
7069 sc->tids.ftids_in_use++;
7070
7071 t4_wrq_tx(sc, wr);
7072 return (0);
7073}
7074
7075static int
7076del_filter_wr(struct adapter *sc, int fidx)
7077{
7078 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7079 struct wrqe *wr;
7080 struct fw_filter_wr *fwr;
7081 unsigned int ftid;
7082
7083 ftid = sc->tids.ftid_base + fidx;
7084
7085 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7086 if (wr == NULL)
7087 return (ENOMEM);
7088 fwr = wrtod(wr);
7089 bzero(fwr, sizeof (*fwr));
7090
7091 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7092
7093 f->pending = 1;
7094 t4_wrq_tx(sc, wr);
7095 return (0);
7096}
7097
7098int
7099t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7100{
7101 struct adapter *sc = iq->adapter;
7102 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7103 unsigned int idx = GET_TID(rpl);
7104
7105 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7106 rss->opcode));
7107
7108 if (idx >= sc->tids.ftid_base &&
7109 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7110 unsigned int rc = G_COOKIE(rpl->cookie);
7111 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7112
7113 mtx_lock(&sc->tids.ftid_lock);
7114 if (rc == FW_FILTER_WR_FLT_ADDED) {
7115 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7116 __func__, idx));
7117 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7118 f->pending = 0; /* asynchronous setup completed */
7119 f->valid = 1;
7120 } else {
7121 if (rc != FW_FILTER_WR_FLT_DELETED) {
7122 /* Add or delete failed, display an error */
7123 log(LOG_ERR,
7124 "filter %u setup failed with error %u\n",
7125 idx, rc);
7126 }
7127
7128 clear_filter(f);
7129 sc->tids.ftids_in_use--;
7130 }
7131 wakeup(&sc->tids.ftid_tab);
7132 mtx_unlock(&sc->tids.ftid_lock);
7133 }
7134
7135 return (0);
7136}
7137
7138static int
7139get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7140{
7141 int rc;
7142
7143 if (cntxt->cid > M_CTXTQID)
7144 return (EINVAL);
7145
7146 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7147 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7148 return (EINVAL);
7149
7150 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7151 if (rc)
7152 return (rc);
7153
7154 if (sc->flags & FW_OK) {
7155 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7156 &cntxt->data[0]);
7157 if (rc == 0)
7158 goto done;
7159 }
7160
7161 /*
7162 * Read via firmware failed or wasn't even attempted. Read directly via
7163 * the backdoor.
7164 */
7165 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7166done:
7167 end_synchronized_op(sc, 0);
7168 return (rc);
7169}
7170
7171static int
7172load_fw(struct adapter *sc, struct t4_data *fw)
7173{
7174 int rc;
7175 uint8_t *fw_data;
7176
7177 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7178 if (rc)
7179 return (rc);
7180
7181 if (sc->flags & FULL_INIT_DONE) {
7182 rc = EBUSY;
7183 goto done;
7184 }
7185
7186 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7187 if (fw_data == NULL) {
7188 rc = ENOMEM;
7189 goto done;
7190 }
7191
7192 rc = copyin(fw->data, fw_data, fw->len);
7193 if (rc == 0)
7194 rc = -t4_load_fw(sc, fw_data, fw->len);
7195
7196 free(fw_data, M_CXGBE);
7197done:
7198 end_synchronized_op(sc, 0);
7199 return (rc);
7200}
7201
7202static int
7203read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7204{
7205 uint32_t addr, off, remaining, i, n;
7206 uint32_t *buf, *b;
7207 uint32_t mw_base, mw_aperture;
7208 int rc;
7209 uint8_t *dst;
7210
7211 rc = validate_mem_range(sc, mr->addr, mr->len);
7212 if (rc != 0)
7213 return (rc);
7214
7215 memwin_info(sc, win, &mw_base, &mw_aperture);
7216 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7217 addr = mr->addr;
7218 remaining = mr->len;
7219 dst = (void *)mr->data;
7220
7221 while (remaining) {
7222 off = position_memwin(sc, win, addr);
7223
7224 /* number of bytes that we'll copy in the inner loop */
7225 n = min(remaining, mw_aperture - off);
7226 for (i = 0; i < n; i += 4)
7227 *b++ = t4_read_reg(sc, mw_base + off + i);
7228
7229 rc = copyout(buf, dst, n);
7230 if (rc != 0)
7231 break;
7232
7233 b = buf;
7234 dst += n;
7235 remaining -= n;
7236 addr += n;
7237 }
7238
7239 free(buf, M_CXGBE);
7240 return (rc);
7241}
7242
7243static int
7244read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7245{
7246 int rc;
7247
7248 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7249 return (EINVAL);
7250
7251 if (i2cd->len > 1) {
7252 /* XXX: need fw support for longer reads in one go */
7253 return (ENOTSUP);
7254 }
7255
7256 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7257 if (rc)
7258 return (rc);
7259 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7260 i2cd->offset, &i2cd->data[0]);
7261 end_synchronized_op(sc, 0);
7262
7263 return (rc);
7264}
7265
7266int
7267t4_os_find_pci_capability(struct adapter *sc, int cap)
7268{
7269 int i;
7270
7271 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7272}
7273
7274int
7275t4_os_pci_save_state(struct adapter *sc)
7276{
7277 device_t dev;
7278 struct pci_devinfo *dinfo;
7279
7280 dev = sc->dev;
7281 dinfo = device_get_ivars(dev);
7282
7283 pci_cfg_save(dev, dinfo, 0);
7284 return (0);
7285}
7286
7287int
7288t4_os_pci_restore_state(struct adapter *sc)
7289{
7290 device_t dev;
7291 struct pci_devinfo *dinfo;
7292
7293 dev = sc->dev;
7294 dinfo = device_get_ivars(dev);
7295
7296 pci_cfg_restore(dev, dinfo);
7297 return (0);
7298}
7299
7300void
7301t4_os_portmod_changed(const struct adapter *sc, int idx)
7302{
7303 struct port_info *pi = sc->port[idx];
7304 static const char *mod_str[] = {
7305 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7306 };
7307
7308 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7309 if_printf(pi->ifp, "transceiver unplugged.\n");
7310 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7311 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7312 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7313 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7314 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7315 if_printf(pi->ifp, "%s transceiver inserted.\n",
7316 mod_str[pi->mod_type]);
7317 } else {
7318 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7319 pi->mod_type);
7320 }
7321}
7322
7323void
7324t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7325{
7326 struct port_info *pi = sc->port[idx];
7327 struct ifnet *ifp = pi->ifp;
7328
7329 if (link_stat) {
7330 pi->linkdnrc = -1;
7331 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7332 if_link_state_change(ifp, LINK_STATE_UP);
7333 } else {
7334 if (reason >= 0)
7335 pi->linkdnrc = reason;
7336 if_link_state_change(ifp, LINK_STATE_DOWN);
7337 }
7338}
7339
7340void
7341t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7342{
7343 struct adapter *sc;
7344
7345 mtx_lock(&t4_list_lock);
7346 SLIST_FOREACH(sc, &t4_list, link) {
7347 /*
7348 * func should not make any assumptions about what state sc is
7349 * in - the only guarantee is that sc->sc_lock is a valid lock.
7350 */
7351 func(sc, arg);
7352 }
7353 mtx_unlock(&t4_list_lock);
7354}
7355
7356static int
7357t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7358{
7359 return (0);
7360}
7361
7362static int
7363t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7364{
7365 return (0);
7366}
7367
7368static int
7369t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7370 struct thread *td)
7371{
7372 int rc;
7373 struct adapter *sc = dev->si_drv1;
7374
7375 rc = priv_check(td, PRIV_DRIVER);
7376 if (rc != 0)
7377 return (rc);
7378
7379 switch (cmd) {
7380 case CHELSIO_T4_GETREG: {
7381 struct t4_reg *edata = (struct t4_reg *)data;
7382
7383 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7384 return (EFAULT);
7385
7386 if (edata->size == 4)
7387 edata->val = t4_read_reg(sc, edata->addr);
7388 else if (edata->size == 8)
7389 edata->val = t4_read_reg64(sc, edata->addr);
7390 else
7391 return (EINVAL);
7392
7393 break;
7394 }
7395 case CHELSIO_T4_SETREG: {
7396 struct t4_reg *edata = (struct t4_reg *)data;
7397
7398 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7399 return (EFAULT);
7400
7401 if (edata->size == 4) {
7402 if (edata->val & 0xffffffff00000000)
7403 return (EINVAL);
7404 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7405 } else if (edata->size == 8)
7406 t4_write_reg64(sc, edata->addr, edata->val);
7407 else
7408 return (EINVAL);
7409 break;
7410 }
7411 case CHELSIO_T4_REGDUMP: {
7412 struct t4_regdump *regs = (struct t4_regdump *)data;
7413 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7414 uint8_t *buf;
7415
7416 if (regs->len < reglen) {
7417 regs->len = reglen; /* hint to the caller */
7418 return (ENOBUFS);
7419 }
7420
7421 regs->len = reglen;
7422 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7423 t4_get_regs(sc, regs, buf);
7424 rc = copyout(buf, regs->data, reglen);
7425 free(buf, M_CXGBE);
7426 break;
7427 }
7428 case CHELSIO_T4_GET_FILTER_MODE:
7429 rc = get_filter_mode(sc, (uint32_t *)data);
7430 break;
7431 case CHELSIO_T4_SET_FILTER_MODE:
7432 rc = set_filter_mode(sc, *(uint32_t *)data);
7433 break;
7434 case CHELSIO_T4_GET_FILTER:
7435 rc = get_filter(sc, (struct t4_filter *)data);
7436 break;
7437 case CHELSIO_T4_SET_FILTER:
7438 rc = set_filter(sc, (struct t4_filter *)data);
7439 break;
7440 case CHELSIO_T4_DEL_FILTER:
7441 rc = del_filter(sc, (struct t4_filter *)data);
7442 break;
7443 case CHELSIO_T4_GET_SGE_CONTEXT:
7444 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7445 break;
7446 case CHELSIO_T4_LOAD_FW:
7447 rc = load_fw(sc, (struct t4_data *)data);
7448 break;
7449 case CHELSIO_T4_GET_MEM:
7450 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7451 break;
7452 case CHELSIO_T4_GET_I2C:
7453 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7454 break;
7455 case CHELSIO_T4_CLEAR_STATS: {
7456 int i;
7457 u_int port_id = *(uint32_t *)data;
7458 struct port_info *pi;
7459
7460 if (port_id >= sc->params.nports)
7461 return (EINVAL);
7462
7463 /* MAC stats */
7464 t4_clr_port_stats(sc, port_id);
7465
7466 pi = sc->port[port_id];
7467 if (pi->flags & PORT_INIT_DONE) {
7468 struct sge_rxq *rxq;
7469 struct sge_txq *txq;
7470 struct sge_wrq *wrq;
7471
7472 for_each_rxq(pi, i, rxq) {
7473#if defined(INET) || defined(INET6)
7474 rxq->lro.lro_queued = 0;
7475 rxq->lro.lro_flushed = 0;
7476#endif
7477 rxq->rxcsum = 0;
7478 rxq->vlan_extraction = 0;
7479 }
7480
7481 for_each_txq(pi, i, txq) {
7482 txq->txcsum = 0;
7483 txq->tso_wrs = 0;
7484 txq->vlan_insertion = 0;
7485 txq->imm_wrs = 0;
7486 txq->sgl_wrs = 0;
7487 txq->txpkt_wrs = 0;
7488 txq->txpkts_wrs = 0;
7489 txq->txpkts_pkts = 0;
7490 txq->br->br_drops = 0;
7491 txq->no_dmamap = 0;
7492 txq->no_desc = 0;
7493 }
7494
7495#ifdef TCP_OFFLOAD
7496 /* nothing to clear for each ofld_rxq */
7497
7498 for_each_ofld_txq(pi, i, wrq) {
7499 wrq->tx_wrs = 0;
7500 wrq->no_desc = 0;
7501 }
7502#endif
7503 wrq = &sc->sge.ctrlq[pi->port_id];
7504 wrq->tx_wrs = 0;
7505 wrq->no_desc = 0;
7506 }
7507 break;
7508 }
7509 case CHELSIO_T4_GET_TRACER:
7510 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7511 break;
7512 case CHELSIO_T4_SET_TRACER:
7513 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7514 break;
7515 default:
7516 rc = EINVAL;
7517 }
7518
7519 return (rc);
7520}
7521
7522#ifdef TCP_OFFLOAD
7523static int
7524toe_capability(struct port_info *pi, int enable)
7525{
7526 int rc;
7527 struct adapter *sc = pi->adapter;
7528
7529 ASSERT_SYNCHRONIZED_OP(sc);
7530
7531 if (!is_offload(sc))
7532 return (ENODEV);
7533
7534 if (enable) {
7535 if (!(sc->flags & FULL_INIT_DONE)) {
7536 rc = cxgbe_init_synchronized(pi);
7537 if (rc)
7538 return (rc);
7539 }
7540
7541 if (isset(&sc->offload_map, pi->port_id))
7542 return (0);
7543
7544 if (!(sc->flags & TOM_INIT_DONE)) {
7545 rc = t4_activate_uld(sc, ULD_TOM);
7546 if (rc == EAGAIN) {
7547 log(LOG_WARNING,
7548 "You must kldload t4_tom.ko before trying "
7549 "to enable TOE on a cxgbe interface.\n");
7550 }
7551 if (rc != 0)
7552 return (rc);
7553 KASSERT(sc->tom_softc != NULL,
7554 ("%s: TOM activated but softc NULL", __func__));
7555 KASSERT(sc->flags & TOM_INIT_DONE,
7556 ("%s: TOM activated but flag not set", __func__));
7557 }
7558
7559 setbit(&sc->offload_map, pi->port_id);
7560 } else {
7561 if (!isset(&sc->offload_map, pi->port_id))
7562 return (0);
7563
7564 KASSERT(sc->flags & TOM_INIT_DONE,
7565 ("%s: TOM never initialized?", __func__));
7566 clrbit(&sc->offload_map, pi->port_id);
7567 }
7568
7569 return (0);
7570}
7571
7572/*
7573 * Add an upper layer driver to the global list.
7574 */
7575int
7576t4_register_uld(struct uld_info *ui)
7577{
7578 int rc = 0;
7579 struct uld_info *u;
7580
7581 mtx_lock(&t4_uld_list_lock);
7582 SLIST_FOREACH(u, &t4_uld_list, link) {
7583 if (u->uld_id == ui->uld_id) {
7584 rc = EEXIST;
7585 goto done;
7586 }
7587 }
7588
7589 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7590 ui->refcount = 0;
7591done:
7592 mtx_unlock(&t4_uld_list_lock);
7593 return (rc);
7594}
7595
7596int
7597t4_unregister_uld(struct uld_info *ui)
7598{
7599 int rc = EINVAL;
7600 struct uld_info *u;
7601
7602 mtx_lock(&t4_uld_list_lock);
7603
7604 SLIST_FOREACH(u, &t4_uld_list, link) {
7605 if (u == ui) {
7606 if (ui->refcount > 0) {
7607 rc = EBUSY;
7608 goto done;
7609 }
7610
7611 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7612 rc = 0;
7613 goto done;
7614 }
7615 }
7616done:
7617 mtx_unlock(&t4_uld_list_lock);
7618 return (rc);
7619}
7620
7621int
7622t4_activate_uld(struct adapter *sc, int id)
7623{
7624 int rc = EAGAIN;
7625 struct uld_info *ui;
7626
7627 ASSERT_SYNCHRONIZED_OP(sc);
7628
7629 mtx_lock(&t4_uld_list_lock);
7630
7631 SLIST_FOREACH(ui, &t4_uld_list, link) {
7632 if (ui->uld_id == id) {
7633 rc = ui->activate(sc);
7634 if (rc == 0)
7635 ui->refcount++;
7636 goto done;
7637 }
7638 }
7639done:
7640 mtx_unlock(&t4_uld_list_lock);
7641
7642 return (rc);
7643}
7644
7645int
7646t4_deactivate_uld(struct adapter *sc, int id)
7647{
7648 int rc = EINVAL;
7649 struct uld_info *ui;
7650
7651 ASSERT_SYNCHRONIZED_OP(sc);
7652
7653 mtx_lock(&t4_uld_list_lock);
7654
7655 SLIST_FOREACH(ui, &t4_uld_list, link) {
7656 if (ui->uld_id == id) {
7657 rc = ui->deactivate(sc);
7658 if (rc == 0)
7659 ui->refcount--;
7660 goto done;
7661 }
7662 }
7663done:
7664 mtx_unlock(&t4_uld_list_lock);
7665
7666 return (rc);
7667}
7668#endif
7669
7670/*
7671 * Come up with reasonable defaults for some of the tunables, provided they're
7672 * not set by the user (in which case we'll use the values as is).
7673 */
7674static void
7675tweak_tunables(void)
7676{
7677 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
7678
7679 if (t4_ntxq10g < 1)
7680 t4_ntxq10g = min(nc, NTXQ_10G);
7681
7682 if (t4_ntxq1g < 1)
7683 t4_ntxq1g = min(nc, NTXQ_1G);
7684
7685 if (t4_nrxq10g < 1)
7686 t4_nrxq10g = min(nc, NRXQ_10G);
7687
7688 if (t4_nrxq1g < 1)
7689 t4_nrxq1g = min(nc, NRXQ_1G);
7690
7691#ifdef TCP_OFFLOAD
7692 if (t4_nofldtxq10g < 1)
7693 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7694
7695 if (t4_nofldtxq1g < 1)
7696 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7697
7698 if (t4_nofldrxq10g < 1)
7699 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7700
7701 if (t4_nofldrxq1g < 1)
7702 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7703
7704 if (t4_toecaps_allowed == -1)
7705 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7706#else
7707 if (t4_toecaps_allowed == -1)
7708 t4_toecaps_allowed = 0;
7709#endif
7710
7711 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7712 t4_tmr_idx_10g = TMR_IDX_10G;
7713
7714 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7715 t4_pktc_idx_10g = PKTC_IDX_10G;
7716
7717 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7718 t4_tmr_idx_1g = TMR_IDX_1G;
7719
7720 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7721 t4_pktc_idx_1g = PKTC_IDX_1G;
7722
7723 if (t4_qsize_txq < 128)
7724 t4_qsize_txq = 128;
7725
7726 if (t4_qsize_rxq < 128)
7727 t4_qsize_rxq = 128;
7728 while (t4_qsize_rxq & 7)
7729 t4_qsize_rxq++;
7730
7731 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7732}
7733
7734static int
7735mod_event(module_t mod, int cmd, void *arg)
7736{
7737 int rc = 0;
7738 static int loaded = 0;
7739
7740 switch (cmd) {
7741 case MOD_LOAD:
7742 if (atomic_fetchadd_int(&loaded, 1))
7743 break;
7744 t4_sge_modload();
7745 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7746 SLIST_INIT(&t4_list);
7747#ifdef TCP_OFFLOAD
7748 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7749 SLIST_INIT(&t4_uld_list);
7750#endif
7751 t4_tracer_modload();
7752 tweak_tunables();
7753 break;
7754
7755 case MOD_UNLOAD:
7756 if (atomic_fetchadd_int(&loaded, -1) > 1)
7757 break;
7758 t4_tracer_modunload();
7759#ifdef TCP_OFFLOAD
7760 mtx_lock(&t4_uld_list_lock);
7761 if (!SLIST_EMPTY(&t4_uld_list)) {
7762 rc = EBUSY;
7763 mtx_unlock(&t4_uld_list_lock);
7764 break;
7765 }
7766 mtx_unlock(&t4_uld_list_lock);
7767 mtx_destroy(&t4_uld_list_lock);
7768#endif
7769 mtx_lock(&t4_list_lock);
7770 if (!SLIST_EMPTY(&t4_list)) {
7771 rc = EBUSY;
7772 mtx_unlock(&t4_list_lock);
7773 break;
7774 }
7775 mtx_unlock(&t4_list_lock);
7776 mtx_destroy(&t4_list_lock);
7777 break;
7778 }
7779
7780 return (rc);
7781}
7782
7783static devclass_t t4_devclass, t5_devclass;
7784static devclass_t cxgbe_devclass, cxl_devclass;
7785
7786DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7787MODULE_VERSION(t4nex, 1);
7788MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7789
7790DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7791MODULE_VERSION(t5nex, 1);
7792MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7793
7794DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7795MODULE_VERSION(cxgbe, 1);
7796
7797DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7798MODULE_VERSION(cxl, 1);
2555
2556 device_set_desc_copy(sc->dev, buf);
2557}
2558
2559static void
2560build_medialist(struct port_info *pi)
2561{
2562 struct ifmedia *media = &pi->media;
2563 int data, m;
2564
2565 PORT_LOCK(pi);
2566
2567 ifmedia_removeall(media);
2568
2569 m = IFM_ETHER | IFM_FDX;
2570 data = (pi->port_type << 8) | pi->mod_type;
2571
2572 switch(pi->port_type) {
2573 case FW_PORT_TYPE_BT_XFI:
2574 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2575 break;
2576
2577 case FW_PORT_TYPE_BT_XAUI:
2578 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2579 /* fall through */
2580
2581 case FW_PORT_TYPE_BT_SGMII:
2582 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2583 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2584 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2585 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2586 break;
2587
2588 case FW_PORT_TYPE_CX4:
2589 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2590 ifmedia_set(media, m | IFM_10G_CX4);
2591 break;
2592
2593 case FW_PORT_TYPE_SFP:
2594 case FW_PORT_TYPE_FIBER_XFI:
2595 case FW_PORT_TYPE_FIBER_XAUI:
2596 switch (pi->mod_type) {
2597
2598 case FW_PORT_MOD_TYPE_LR:
2599 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2600 ifmedia_set(media, m | IFM_10G_LR);
2601 break;
2602
2603 case FW_PORT_MOD_TYPE_SR:
2604 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2605 ifmedia_set(media, m | IFM_10G_SR);
2606 break;
2607
2608 case FW_PORT_MOD_TYPE_LRM:
2609 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2610 ifmedia_set(media, m | IFM_10G_LRM);
2611 break;
2612
2613 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2614 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2615 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2616 ifmedia_set(media, m | IFM_10G_TWINAX);
2617 break;
2618
2619 case FW_PORT_MOD_TYPE_NONE:
2620 m &= ~IFM_FDX;
2621 ifmedia_add(media, m | IFM_NONE, data, NULL);
2622 ifmedia_set(media, m | IFM_NONE);
2623 break;
2624
2625 case FW_PORT_MOD_TYPE_NA:
2626 case FW_PORT_MOD_TYPE_ER:
2627 default:
2628 device_printf(pi->dev,
2629 "unknown port_type (%d), mod_type (%d)\n",
2630 pi->port_type, pi->mod_type);
2631 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2632 ifmedia_set(media, m | IFM_UNKNOWN);
2633 break;
2634 }
2635 break;
2636
2637 case FW_PORT_TYPE_QSFP:
2638 switch (pi->mod_type) {
2639
2640 case FW_PORT_MOD_TYPE_LR:
2641 ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2642 ifmedia_set(media, m | IFM_40G_LR4);
2643 break;
2644
2645 case FW_PORT_MOD_TYPE_SR:
2646 ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2647 ifmedia_set(media, m | IFM_40G_SR4);
2648 break;
2649
2650 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2651 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2652 ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2653 ifmedia_set(media, m | IFM_40G_CR4);
2654 break;
2655
2656 case FW_PORT_MOD_TYPE_NONE:
2657 m &= ~IFM_FDX;
2658 ifmedia_add(media, m | IFM_NONE, data, NULL);
2659 ifmedia_set(media, m | IFM_NONE);
2660 break;
2661
2662 default:
2663 device_printf(pi->dev,
2664 "unknown port_type (%d), mod_type (%d)\n",
2665 pi->port_type, pi->mod_type);
2666 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2667 ifmedia_set(media, m | IFM_UNKNOWN);
2668 break;
2669 }
2670 break;
2671
2672 default:
2673 device_printf(pi->dev,
2674 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2675 pi->mod_type);
2676 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2677 ifmedia_set(media, m | IFM_UNKNOWN);
2678 break;
2679 }
2680
2681 PORT_UNLOCK(pi);
2682}
2683
2684#define FW_MAC_EXACT_CHUNK 7
2685
2686/*
2687 * Program the port's XGMAC based on parameters in ifnet. The caller also
2688 * indicates which parameters should be programmed (the rest are left alone).
2689 */
2690static int
2691update_mac_settings(struct port_info *pi, int flags)
2692{
2693 int rc;
2694 struct ifnet *ifp = pi->ifp;
2695 struct adapter *sc = pi->adapter;
2696 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2697
2698 ASSERT_SYNCHRONIZED_OP(sc);
2699 KASSERT(flags, ("%s: not told what to update.", __func__));
2700
2701 if (flags & XGMAC_MTU)
2702 mtu = ifp->if_mtu;
2703
2704 if (flags & XGMAC_PROMISC)
2705 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2706
2707 if (flags & XGMAC_ALLMULTI)
2708 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2709
2710 if (flags & XGMAC_VLANEX)
2711 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2712
2713 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2714 vlanex, false);
2715 if (rc) {
2716 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2717 return (rc);
2718 }
2719
2720 if (flags & XGMAC_UCADDR) {
2721 uint8_t ucaddr[ETHER_ADDR_LEN];
2722
2723 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2724 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2725 ucaddr, true, true);
2726 if (rc < 0) {
2727 rc = -rc;
2728 if_printf(ifp, "change_mac failed: %d\n", rc);
2729 return (rc);
2730 } else {
2731 pi->xact_addr_filt = rc;
2732 rc = 0;
2733 }
2734 }
2735
2736 if (flags & XGMAC_MCADDRS) {
2737 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2738 int del = 1;
2739 uint64_t hash = 0;
2740 struct ifmultiaddr *ifma;
2741 int i = 0, j;
2742
2743 if_maddr_rlock(ifp);
2744 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2745 if (ifma->ifma_addr->sa_family != AF_LINK)
2746 continue;
2747 mcaddr[i++] =
2748 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2749
2750 if (i == FW_MAC_EXACT_CHUNK) {
2751 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2752 del, i, mcaddr, NULL, &hash, 0);
2753 if (rc < 0) {
2754 rc = -rc;
2755 for (j = 0; j < i; j++) {
2756 if_printf(ifp,
2757 "failed to add mc address"
2758 " %02x:%02x:%02x:"
2759 "%02x:%02x:%02x rc=%d\n",
2760 mcaddr[j][0], mcaddr[j][1],
2761 mcaddr[j][2], mcaddr[j][3],
2762 mcaddr[j][4], mcaddr[j][5],
2763 rc);
2764 }
2765 goto mcfail;
2766 }
2767 del = 0;
2768 i = 0;
2769 }
2770 }
2771 if (i > 0) {
2772 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2773 del, i, mcaddr, NULL, &hash, 0);
2774 if (rc < 0) {
2775 rc = -rc;
2776 for (j = 0; j < i; j++) {
2777 if_printf(ifp,
2778 "failed to add mc address"
2779 " %02x:%02x:%02x:"
2780 "%02x:%02x:%02x rc=%d\n",
2781 mcaddr[j][0], mcaddr[j][1],
2782 mcaddr[j][2], mcaddr[j][3],
2783 mcaddr[j][4], mcaddr[j][5],
2784 rc);
2785 }
2786 goto mcfail;
2787 }
2788 }
2789
2790 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2791 if (rc != 0)
2792 if_printf(ifp, "failed to set mc address hash: %d", rc);
2793mcfail:
2794 if_maddr_runlock(ifp);
2795 }
2796
2797 return (rc);
2798}
2799
2800int
2801begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2802 char *wmesg)
2803{
2804 int rc, pri;
2805
2806#ifdef WITNESS
2807 /* the caller thinks it's ok to sleep, but is it really? */
2808 if (flags & SLEEP_OK)
2809 pause("t4slptst", 1);
2810#endif
2811
2812 if (INTR_OK)
2813 pri = PCATCH;
2814 else
2815 pri = 0;
2816
2817 ADAPTER_LOCK(sc);
2818 for (;;) {
2819
2820 if (pi && IS_DOOMED(pi)) {
2821 rc = ENXIO;
2822 goto done;
2823 }
2824
2825 if (!IS_BUSY(sc)) {
2826 rc = 0;
2827 break;
2828 }
2829
2830 if (!(flags & SLEEP_OK)) {
2831 rc = EBUSY;
2832 goto done;
2833 }
2834
2835 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2836 rc = EINTR;
2837 goto done;
2838 }
2839 }
2840
2841 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2842 SET_BUSY(sc);
2843#ifdef INVARIANTS
2844 sc->last_op = wmesg;
2845 sc->last_op_thr = curthread;
2846#endif
2847
2848done:
2849 if (!(flags & HOLD_LOCK) || rc)
2850 ADAPTER_UNLOCK(sc);
2851
2852 return (rc);
2853}
2854
2855void
2856end_synchronized_op(struct adapter *sc, int flags)
2857{
2858
2859 if (flags & LOCK_HELD)
2860 ADAPTER_LOCK_ASSERT_OWNED(sc);
2861 else
2862 ADAPTER_LOCK(sc);
2863
2864 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2865 CLR_BUSY(sc);
2866 wakeup(&sc->flags);
2867 ADAPTER_UNLOCK(sc);
2868}
2869
2870static int
2871cxgbe_init_synchronized(struct port_info *pi)
2872{
2873 struct adapter *sc = pi->adapter;
2874 struct ifnet *ifp = pi->ifp;
2875 int rc = 0;
2876
2877 ASSERT_SYNCHRONIZED_OP(sc);
2878
2879 if (isset(&sc->open_device_map, pi->port_id)) {
2880 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2881 ("mismatch between open_device_map and if_drv_flags"));
2882 return (0); /* already running */
2883 }
2884
2885 if (!(sc->flags & FULL_INIT_DONE) &&
2886 ((rc = adapter_full_init(sc)) != 0))
2887 return (rc); /* error message displayed already */
2888
2889 if (!(pi->flags & PORT_INIT_DONE) &&
2890 ((rc = port_full_init(pi)) != 0))
2891 return (rc); /* error message displayed already */
2892
2893 rc = update_mac_settings(pi, XGMAC_ALL);
2894 if (rc)
2895 goto done; /* error message displayed already */
2896
2897 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2898 if (rc != 0) {
2899 if_printf(ifp, "start_link failed: %d\n", rc);
2900 goto done;
2901 }
2902
2903 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2904 if (rc != 0) {
2905 if_printf(ifp, "enable_vi failed: %d\n", rc);
2906 goto done;
2907 }
2908
2909 /*
2910 * The first iq of the first port to come up is used for tracing.
2911 */
2912 if (sc->traceq < 0) {
2913 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2914 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
2915 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2916 V_QUEUENUMBER(sc->traceq));
2917 pi->flags |= HAS_TRACEQ;
2918 }
2919
2920 /* all ok */
2921 setbit(&sc->open_device_map, pi->port_id);
2922 PORT_LOCK(pi);
2923 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2924 PORT_UNLOCK(pi);
2925
2926 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2927done:
2928 if (rc != 0)
2929 cxgbe_uninit_synchronized(pi);
2930
2931 return (rc);
2932}
2933
2934/*
2935 * Idempotent.
2936 */
2937static int
2938cxgbe_uninit_synchronized(struct port_info *pi)
2939{
2940 struct adapter *sc = pi->adapter;
2941 struct ifnet *ifp = pi->ifp;
2942 int rc;
2943
2944 ASSERT_SYNCHRONIZED_OP(sc);
2945
2946 /*
2947 * Disable the VI so that all its data in either direction is discarded
2948 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2949 * tick) intact as the TP can deliver negative advice or data that it's
2950 * holding in its RAM (for an offloaded connection) even after the VI is
2951 * disabled.
2952 */
2953 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2954 if (rc) {
2955 if_printf(ifp, "disable_vi failed: %d\n", rc);
2956 return (rc);
2957 }
2958
2959 clrbit(&sc->open_device_map, pi->port_id);
2960 PORT_LOCK(pi);
2961 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2962 PORT_UNLOCK(pi);
2963
2964 pi->link_cfg.link_ok = 0;
2965 pi->link_cfg.speed = 0;
2966 pi->linkdnrc = -1;
2967 t4_os_link_changed(sc, pi->port_id, 0, -1);
2968
2969 return (0);
2970}
2971
2972/*
2973 * It is ok for this function to fail midway and return right away. t4_detach
2974 * will walk the entire sc->irq list and clean up whatever is valid.
2975 */
2976static int
2977setup_intr_handlers(struct adapter *sc)
2978{
2979 int rc, rid, p, q;
2980 char s[8];
2981 struct irq *irq;
2982 struct port_info *pi;
2983 struct sge_rxq *rxq;
2984#ifdef TCP_OFFLOAD
2985 struct sge_ofld_rxq *ofld_rxq;
2986#endif
2987
2988 /*
2989 * Setup interrupts.
2990 */
2991 irq = &sc->irq[0];
2992 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2993 if (sc->intr_count == 1) {
2994 KASSERT(!(sc->flags & INTR_DIRECT),
2995 ("%s: single interrupt && INTR_DIRECT?", __func__));
2996
2997 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2998 if (rc != 0)
2999 return (rc);
3000 } else {
3001 /* Multiple interrupts. */
3002 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3003 ("%s: too few intr.", __func__));
3004
3005 /* The first one is always error intr */
3006 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3007 if (rc != 0)
3008 return (rc);
3009 irq++;
3010 rid++;
3011
3012 /* The second one is always the firmware event queue */
3013 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3014 "evt");
3015 if (rc != 0)
3016 return (rc);
3017 irq++;
3018 rid++;
3019
3020 /*
3021 * Note that if INTR_DIRECT is not set then either the NIC rx
3022 * queues or (exclusive or) the TOE rx queueus will be taking
3023 * direct interrupts.
3024 *
3025 * There is no need to check for is_offload(sc) as nofldrxq
3026 * will be 0 if offload is disabled.
3027 */
3028 for_each_port(sc, p) {
3029 pi = sc->port[p];
3030
3031#ifdef TCP_OFFLOAD
3032 /*
3033 * Skip over the NIC queues if they aren't taking direct
3034 * interrupts.
3035 */
3036 if (!(sc->flags & INTR_DIRECT) &&
3037 pi->nofldrxq > pi->nrxq)
3038 goto ofld_queues;
3039#endif
3040 rxq = &sc->sge.rxq[pi->first_rxq];
3041 for (q = 0; q < pi->nrxq; q++, rxq++) {
3042 snprintf(s, sizeof(s), "%d.%d", p, q);
3043 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3044 s);
3045 if (rc != 0)
3046 return (rc);
3047 irq++;
3048 rid++;
3049 }
3050
3051#ifdef TCP_OFFLOAD
3052 /*
3053 * Skip over the offload queues if they aren't taking
3054 * direct interrupts.
3055 */
3056 if (!(sc->flags & INTR_DIRECT))
3057 continue;
3058ofld_queues:
3059 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3060 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3061 snprintf(s, sizeof(s), "%d,%d", p, q);
3062 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3063 ofld_rxq, s);
3064 if (rc != 0)
3065 return (rc);
3066 irq++;
3067 rid++;
3068 }
3069#endif
3070 }
3071 }
3072
3073 return (0);
3074}
3075
3076static int
3077adapter_full_init(struct adapter *sc)
3078{
3079 int rc, i;
3080
3081 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3082 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3083 ("%s: FULL_INIT_DONE already", __func__));
3084
3085 /*
3086 * queues that belong to the adapter (not any particular port).
3087 */
3088 rc = t4_setup_adapter_queues(sc);
3089 if (rc != 0)
3090 goto done;
3091
3092 for (i = 0; i < nitems(sc->tq); i++) {
3093 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3094 taskqueue_thread_enqueue, &sc->tq[i]);
3095 if (sc->tq[i] == NULL) {
3096 device_printf(sc->dev,
3097 "failed to allocate task queue %d\n", i);
3098 rc = ENOMEM;
3099 goto done;
3100 }
3101 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3102 device_get_nameunit(sc->dev), i);
3103 }
3104
3105 t4_intr_enable(sc);
3106 sc->flags |= FULL_INIT_DONE;
3107done:
3108 if (rc != 0)
3109 adapter_full_uninit(sc);
3110
3111 return (rc);
3112}
3113
3114static int
3115adapter_full_uninit(struct adapter *sc)
3116{
3117 int i;
3118
3119 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3120
3121 t4_teardown_adapter_queues(sc);
3122
3123 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3124 taskqueue_free(sc->tq[i]);
3125 sc->tq[i] = NULL;
3126 }
3127
3128 sc->flags &= ~FULL_INIT_DONE;
3129
3130 return (0);
3131}
3132
3133static int
3134port_full_init(struct port_info *pi)
3135{
3136 struct adapter *sc = pi->adapter;
3137 struct ifnet *ifp = pi->ifp;
3138 uint16_t *rss;
3139 struct sge_rxq *rxq;
3140 int rc, i;
3141
3142 ASSERT_SYNCHRONIZED_OP(sc);
3143 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3144 ("%s: PORT_INIT_DONE already", __func__));
3145
3146 sysctl_ctx_init(&pi->ctx);
3147 pi->flags |= PORT_SYSCTL_CTX;
3148
3149 /*
3150 * Allocate tx/rx/fl queues for this port.
3151 */
3152 rc = t4_setup_port_queues(pi);
3153 if (rc != 0)
3154 goto done; /* error message displayed already */
3155
3156 /*
3157 * Setup RSS for this port.
3158 */
3159 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3160 M_ZERO | M_WAITOK);
3161 for_each_rxq(pi, i, rxq) {
3162 rss[i] = rxq->iq.abs_id;
3163 }
3164 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3165 pi->rss_size, rss, pi->nrxq);
3166 free(rss, M_CXGBE);
3167 if (rc != 0) {
3168 if_printf(ifp, "rss_config failed: %d\n", rc);
3169 goto done;
3170 }
3171
3172 pi->flags |= PORT_INIT_DONE;
3173done:
3174 if (rc != 0)
3175 port_full_uninit(pi);
3176
3177 return (rc);
3178}
3179
3180/*
3181 * Idempotent.
3182 */
3183static int
3184port_full_uninit(struct port_info *pi)
3185{
3186 struct adapter *sc = pi->adapter;
3187 int i;
3188 struct sge_rxq *rxq;
3189 struct sge_txq *txq;
3190#ifdef TCP_OFFLOAD
3191 struct sge_ofld_rxq *ofld_rxq;
3192 struct sge_wrq *ofld_txq;
3193#endif
3194
3195 if (pi->flags & PORT_INIT_DONE) {
3196
3197 /* Need to quiesce queues. XXX: ctrl queues? */
3198
3199 for_each_txq(pi, i, txq) {
3200 quiesce_eq(sc, &txq->eq);
3201 }
3202
3203#ifdef TCP_OFFLOAD
3204 for_each_ofld_txq(pi, i, ofld_txq) {
3205 quiesce_eq(sc, &ofld_txq->eq);
3206 }
3207#endif
3208
3209 for_each_rxq(pi, i, rxq) {
3210 quiesce_iq(sc, &rxq->iq);
3211 quiesce_fl(sc, &rxq->fl);
3212 }
3213
3214#ifdef TCP_OFFLOAD
3215 for_each_ofld_rxq(pi, i, ofld_rxq) {
3216 quiesce_iq(sc, &ofld_rxq->iq);
3217 quiesce_fl(sc, &ofld_rxq->fl);
3218 }
3219#endif
3220 }
3221
3222 t4_teardown_port_queues(pi);
3223 pi->flags &= ~PORT_INIT_DONE;
3224
3225 return (0);
3226}
3227
3228static void
3229quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3230{
3231 EQ_LOCK(eq);
3232 eq->flags |= EQ_DOOMED;
3233
3234 /*
3235 * Wait for the response to a credit flush if one's
3236 * pending.
3237 */
3238 while (eq->flags & EQ_CRFLUSHED)
3239 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3240 EQ_UNLOCK(eq);
3241
3242 callout_drain(&eq->tx_callout); /* XXX: iffy */
3243 pause("callout", 10); /* Still iffy */
3244
3245 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3246}
3247
3248static void
3249quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3250{
3251 (void) sc; /* unused */
3252
3253 /* Synchronize with the interrupt handler */
3254 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3255 pause("iqfree", 1);
3256}
3257
3258static void
3259quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3260{
3261 mtx_lock(&sc->sfl_lock);
3262 FL_LOCK(fl);
3263 fl->flags |= FL_DOOMED;
3264 FL_UNLOCK(fl);
3265 mtx_unlock(&sc->sfl_lock);
3266
3267 callout_drain(&sc->sfl_callout);
3268 KASSERT((fl->flags & FL_STARVING) == 0,
3269 ("%s: still starving", __func__));
3270}
3271
3272static int
3273t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3274 driver_intr_t *handler, void *arg, char *name)
3275{
3276 int rc;
3277
3278 irq->rid = rid;
3279 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3280 RF_SHAREABLE | RF_ACTIVE);
3281 if (irq->res == NULL) {
3282 device_printf(sc->dev,
3283 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3284 return (ENOMEM);
3285 }
3286
3287 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3288 NULL, handler, arg, &irq->tag);
3289 if (rc != 0) {
3290 device_printf(sc->dev,
3291 "failed to setup interrupt for rid %d, name %s: %d\n",
3292 rid, name, rc);
3293 } else if (name)
3294 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3295
3296 return (rc);
3297}
3298
3299static int
3300t4_free_irq(struct adapter *sc, struct irq *irq)
3301{
3302 if (irq->tag)
3303 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3304 if (irq->res)
3305 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3306
3307 bzero(irq, sizeof(*irq));
3308
3309 return (0);
3310}
3311
3312static void
3313reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3314 unsigned int end)
3315{
3316 uint32_t *p = (uint32_t *)(buf + start);
3317
3318 for ( ; start <= end; start += sizeof(uint32_t))
3319 *p++ = t4_read_reg(sc, start);
3320}
3321
3322static void
3323t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3324{
3325 int i, n;
3326 const unsigned int *reg_ranges;
3327 static const unsigned int t4_reg_ranges[] = {
3328 0x1008, 0x1108,
3329 0x1180, 0x11b4,
3330 0x11fc, 0x123c,
3331 0x1300, 0x173c,
3332 0x1800, 0x18fc,
3333 0x3000, 0x30d8,
3334 0x30e0, 0x5924,
3335 0x5960, 0x59d4,
3336 0x5a00, 0x5af8,
3337 0x6000, 0x6098,
3338 0x6100, 0x6150,
3339 0x6200, 0x6208,
3340 0x6240, 0x6248,
3341 0x6280, 0x6338,
3342 0x6370, 0x638c,
3343 0x6400, 0x643c,
3344 0x6500, 0x6524,
3345 0x6a00, 0x6a38,
3346 0x6a60, 0x6a78,
3347 0x6b00, 0x6b84,
3348 0x6bf0, 0x6c84,
3349 0x6cf0, 0x6d84,
3350 0x6df0, 0x6e84,
3351 0x6ef0, 0x6f84,
3352 0x6ff0, 0x7084,
3353 0x70f0, 0x7184,
3354 0x71f0, 0x7284,
3355 0x72f0, 0x7384,
3356 0x73f0, 0x7450,
3357 0x7500, 0x7530,
3358 0x7600, 0x761c,
3359 0x7680, 0x76cc,
3360 0x7700, 0x7798,
3361 0x77c0, 0x77fc,
3362 0x7900, 0x79fc,
3363 0x7b00, 0x7c38,
3364 0x7d00, 0x7efc,
3365 0x8dc0, 0x8e1c,
3366 0x8e30, 0x8e78,
3367 0x8ea0, 0x8f6c,
3368 0x8fc0, 0x9074,
3369 0x90fc, 0x90fc,
3370 0x9400, 0x9458,
3371 0x9600, 0x96bc,
3372 0x9800, 0x9808,
3373 0x9820, 0x983c,
3374 0x9850, 0x9864,
3375 0x9c00, 0x9c6c,
3376 0x9c80, 0x9cec,
3377 0x9d00, 0x9d6c,
3378 0x9d80, 0x9dec,
3379 0x9e00, 0x9e6c,
3380 0x9e80, 0x9eec,
3381 0x9f00, 0x9f6c,
3382 0x9f80, 0x9fec,
3383 0xd004, 0xd03c,
3384 0xdfc0, 0xdfe0,
3385 0xe000, 0xea7c,
3386 0xf000, 0x11190,
3387 0x19040, 0x1906c,
3388 0x19078, 0x19080,
3389 0x1908c, 0x19124,
3390 0x19150, 0x191b0,
3391 0x191d0, 0x191e8,
3392 0x19238, 0x1924c,
3393 0x193f8, 0x19474,
3394 0x19490, 0x194f8,
3395 0x19800, 0x19f30,
3396 0x1a000, 0x1a06c,
3397 0x1a0b0, 0x1a120,
3398 0x1a128, 0x1a138,
3399 0x1a190, 0x1a1c4,
3400 0x1a1fc, 0x1a1fc,
3401 0x1e040, 0x1e04c,
3402 0x1e284, 0x1e28c,
3403 0x1e2c0, 0x1e2c0,
3404 0x1e2e0, 0x1e2e0,
3405 0x1e300, 0x1e384,
3406 0x1e3c0, 0x1e3c8,
3407 0x1e440, 0x1e44c,
3408 0x1e684, 0x1e68c,
3409 0x1e6c0, 0x1e6c0,
3410 0x1e6e0, 0x1e6e0,
3411 0x1e700, 0x1e784,
3412 0x1e7c0, 0x1e7c8,
3413 0x1e840, 0x1e84c,
3414 0x1ea84, 0x1ea8c,
3415 0x1eac0, 0x1eac0,
3416 0x1eae0, 0x1eae0,
3417 0x1eb00, 0x1eb84,
3418 0x1ebc0, 0x1ebc8,
3419 0x1ec40, 0x1ec4c,
3420 0x1ee84, 0x1ee8c,
3421 0x1eec0, 0x1eec0,
3422 0x1eee0, 0x1eee0,
3423 0x1ef00, 0x1ef84,
3424 0x1efc0, 0x1efc8,
3425 0x1f040, 0x1f04c,
3426 0x1f284, 0x1f28c,
3427 0x1f2c0, 0x1f2c0,
3428 0x1f2e0, 0x1f2e0,
3429 0x1f300, 0x1f384,
3430 0x1f3c0, 0x1f3c8,
3431 0x1f440, 0x1f44c,
3432 0x1f684, 0x1f68c,
3433 0x1f6c0, 0x1f6c0,
3434 0x1f6e0, 0x1f6e0,
3435 0x1f700, 0x1f784,
3436 0x1f7c0, 0x1f7c8,
3437 0x1f840, 0x1f84c,
3438 0x1fa84, 0x1fa8c,
3439 0x1fac0, 0x1fac0,
3440 0x1fae0, 0x1fae0,
3441 0x1fb00, 0x1fb84,
3442 0x1fbc0, 0x1fbc8,
3443 0x1fc40, 0x1fc4c,
3444 0x1fe84, 0x1fe8c,
3445 0x1fec0, 0x1fec0,
3446 0x1fee0, 0x1fee0,
3447 0x1ff00, 0x1ff84,
3448 0x1ffc0, 0x1ffc8,
3449 0x20000, 0x2002c,
3450 0x20100, 0x2013c,
3451 0x20190, 0x201c8,
3452 0x20200, 0x20318,
3453 0x20400, 0x20528,
3454 0x20540, 0x20614,
3455 0x21000, 0x21040,
3456 0x2104c, 0x21060,
3457 0x210c0, 0x210ec,
3458 0x21200, 0x21268,
3459 0x21270, 0x21284,
3460 0x212fc, 0x21388,
3461 0x21400, 0x21404,
3462 0x21500, 0x21518,
3463 0x2152c, 0x2153c,
3464 0x21550, 0x21554,
3465 0x21600, 0x21600,
3466 0x21608, 0x21628,
3467 0x21630, 0x2163c,
3468 0x21700, 0x2171c,
3469 0x21780, 0x2178c,
3470 0x21800, 0x21c38,
3471 0x21c80, 0x21d7c,
3472 0x21e00, 0x21e04,
3473 0x22000, 0x2202c,
3474 0x22100, 0x2213c,
3475 0x22190, 0x221c8,
3476 0x22200, 0x22318,
3477 0x22400, 0x22528,
3478 0x22540, 0x22614,
3479 0x23000, 0x23040,
3480 0x2304c, 0x23060,
3481 0x230c0, 0x230ec,
3482 0x23200, 0x23268,
3483 0x23270, 0x23284,
3484 0x232fc, 0x23388,
3485 0x23400, 0x23404,
3486 0x23500, 0x23518,
3487 0x2352c, 0x2353c,
3488 0x23550, 0x23554,
3489 0x23600, 0x23600,
3490 0x23608, 0x23628,
3491 0x23630, 0x2363c,
3492 0x23700, 0x2371c,
3493 0x23780, 0x2378c,
3494 0x23800, 0x23c38,
3495 0x23c80, 0x23d7c,
3496 0x23e00, 0x23e04,
3497 0x24000, 0x2402c,
3498 0x24100, 0x2413c,
3499 0x24190, 0x241c8,
3500 0x24200, 0x24318,
3501 0x24400, 0x24528,
3502 0x24540, 0x24614,
3503 0x25000, 0x25040,
3504 0x2504c, 0x25060,
3505 0x250c0, 0x250ec,
3506 0x25200, 0x25268,
3507 0x25270, 0x25284,
3508 0x252fc, 0x25388,
3509 0x25400, 0x25404,
3510 0x25500, 0x25518,
3511 0x2552c, 0x2553c,
3512 0x25550, 0x25554,
3513 0x25600, 0x25600,
3514 0x25608, 0x25628,
3515 0x25630, 0x2563c,
3516 0x25700, 0x2571c,
3517 0x25780, 0x2578c,
3518 0x25800, 0x25c38,
3519 0x25c80, 0x25d7c,
3520 0x25e00, 0x25e04,
3521 0x26000, 0x2602c,
3522 0x26100, 0x2613c,
3523 0x26190, 0x261c8,
3524 0x26200, 0x26318,
3525 0x26400, 0x26528,
3526 0x26540, 0x26614,
3527 0x27000, 0x27040,
3528 0x2704c, 0x27060,
3529 0x270c0, 0x270ec,
3530 0x27200, 0x27268,
3531 0x27270, 0x27284,
3532 0x272fc, 0x27388,
3533 0x27400, 0x27404,
3534 0x27500, 0x27518,
3535 0x2752c, 0x2753c,
3536 0x27550, 0x27554,
3537 0x27600, 0x27600,
3538 0x27608, 0x27628,
3539 0x27630, 0x2763c,
3540 0x27700, 0x2771c,
3541 0x27780, 0x2778c,
3542 0x27800, 0x27c38,
3543 0x27c80, 0x27d7c,
3544 0x27e00, 0x27e04
3545 };
3546 static const unsigned int t5_reg_ranges[] = {
3547 0x1008, 0x1148,
3548 0x1180, 0x11b4,
3549 0x11fc, 0x123c,
3550 0x1280, 0x173c,
3551 0x1800, 0x18fc,
3552 0x3000, 0x3028,
3553 0x3060, 0x30d8,
3554 0x30e0, 0x30fc,
3555 0x3140, 0x357c,
3556 0x35a8, 0x35cc,
3557 0x35ec, 0x35ec,
3558 0x3600, 0x5624,
3559 0x56cc, 0x575c,
3560 0x580c, 0x5814,
3561 0x5890, 0x58bc,
3562 0x5940, 0x59dc,
3563 0x59fc, 0x5a18,
3564 0x5a60, 0x5a9c,
3565 0x5b94, 0x5bfc,
3566 0x6000, 0x6040,
3567 0x6058, 0x614c,
3568 0x7700, 0x7798,
3569 0x77c0, 0x78fc,
3570 0x7b00, 0x7c54,
3571 0x7d00, 0x7efc,
3572 0x8dc0, 0x8de0,
3573 0x8df8, 0x8e84,
3574 0x8ea0, 0x8f84,
3575 0x8fc0, 0x90f8,
3576 0x9400, 0x9470,
3577 0x9600, 0x96f4,
3578 0x9800, 0x9808,
3579 0x9820, 0x983c,
3580 0x9850, 0x9864,
3581 0x9c00, 0x9c6c,
3582 0x9c80, 0x9cec,
3583 0x9d00, 0x9d6c,
3584 0x9d80, 0x9dec,
3585 0x9e00, 0x9e6c,
3586 0x9e80, 0x9eec,
3587 0x9f00, 0x9f6c,
3588 0x9f80, 0xa020,
3589 0xd004, 0xd03c,
3590 0xdfc0, 0xdfe0,
3591 0xe000, 0x11088,
3592 0x1109c, 0x1117c,
3593 0x11190, 0x11204,
3594 0x19040, 0x1906c,
3595 0x19078, 0x19080,
3596 0x1908c, 0x19124,
3597 0x19150, 0x191b0,
3598 0x191d0, 0x191e8,
3599 0x19238, 0x19290,
3600 0x193f8, 0x19474,
3601 0x19490, 0x194cc,
3602 0x194f0, 0x194f8,
3603 0x19c00, 0x19c60,
3604 0x19c94, 0x19e10,
3605 0x19e50, 0x19f34,
3606 0x19f40, 0x19f50,
3607 0x19f90, 0x19fe4,
3608 0x1a000, 0x1a06c,
3609 0x1a0b0, 0x1a120,
3610 0x1a128, 0x1a138,
3611 0x1a190, 0x1a1c4,
3612 0x1a1fc, 0x1a1fc,
3613 0x1e008, 0x1e00c,
3614 0x1e040, 0x1e04c,
3615 0x1e284, 0x1e290,
3616 0x1e2c0, 0x1e2c0,
3617 0x1e2e0, 0x1e2e0,
3618 0x1e300, 0x1e384,
3619 0x1e3c0, 0x1e3c8,
3620 0x1e408, 0x1e40c,
3621 0x1e440, 0x1e44c,
3622 0x1e684, 0x1e690,
3623 0x1e6c0, 0x1e6c0,
3624 0x1e6e0, 0x1e6e0,
3625 0x1e700, 0x1e784,
3626 0x1e7c0, 0x1e7c8,
3627 0x1e808, 0x1e80c,
3628 0x1e840, 0x1e84c,
3629 0x1ea84, 0x1ea90,
3630 0x1eac0, 0x1eac0,
3631 0x1eae0, 0x1eae0,
3632 0x1eb00, 0x1eb84,
3633 0x1ebc0, 0x1ebc8,
3634 0x1ec08, 0x1ec0c,
3635 0x1ec40, 0x1ec4c,
3636 0x1ee84, 0x1ee90,
3637 0x1eec0, 0x1eec0,
3638 0x1eee0, 0x1eee0,
3639 0x1ef00, 0x1ef84,
3640 0x1efc0, 0x1efc8,
3641 0x1f008, 0x1f00c,
3642 0x1f040, 0x1f04c,
3643 0x1f284, 0x1f290,
3644 0x1f2c0, 0x1f2c0,
3645 0x1f2e0, 0x1f2e0,
3646 0x1f300, 0x1f384,
3647 0x1f3c0, 0x1f3c8,
3648 0x1f408, 0x1f40c,
3649 0x1f440, 0x1f44c,
3650 0x1f684, 0x1f690,
3651 0x1f6c0, 0x1f6c0,
3652 0x1f6e0, 0x1f6e0,
3653 0x1f700, 0x1f784,
3654 0x1f7c0, 0x1f7c8,
3655 0x1f808, 0x1f80c,
3656 0x1f840, 0x1f84c,
3657 0x1fa84, 0x1fa90,
3658 0x1fac0, 0x1fac0,
3659 0x1fae0, 0x1fae0,
3660 0x1fb00, 0x1fb84,
3661 0x1fbc0, 0x1fbc8,
3662 0x1fc08, 0x1fc0c,
3663 0x1fc40, 0x1fc4c,
3664 0x1fe84, 0x1fe90,
3665 0x1fec0, 0x1fec0,
3666 0x1fee0, 0x1fee0,
3667 0x1ff00, 0x1ff84,
3668 0x1ffc0, 0x1ffc8,
3669 0x30000, 0x30030,
3670 0x30100, 0x30144,
3671 0x30190, 0x301d0,
3672 0x30200, 0x30318,
3673 0x30400, 0x3052c,
3674 0x30540, 0x3061c,
3675 0x30800, 0x30834,
3676 0x308c0, 0x30908,
3677 0x30910, 0x309ac,
3678 0x30a00, 0x30a2c,
3679 0x30a44, 0x30a50,
3680 0x30a74, 0x30c24,
3681 0x30d00, 0x30d00,
3682 0x30d08, 0x30d14,
3683 0x30d1c, 0x30d20,
3684 0x30d3c, 0x30d50,
3685 0x31200, 0x3120c,
3686 0x31220, 0x31220,
3687 0x31240, 0x31240,
3688 0x31600, 0x3160c,
3689 0x31a00, 0x31a1c,
3690 0x31e00, 0x31e20,
3691 0x31e38, 0x31e3c,
3692 0x31e80, 0x31e80,
3693 0x31e88, 0x31ea8,
3694 0x31eb0, 0x31eb4,
3695 0x31ec8, 0x31ed4,
3696 0x31fb8, 0x32004,
3697 0x32200, 0x32200,
3698 0x32208, 0x32240,
3699 0x32248, 0x32280,
3700 0x32288, 0x322c0,
3701 0x322c8, 0x322fc,
3702 0x32600, 0x32630,
3703 0x32a00, 0x32abc,
3704 0x32b00, 0x32b70,
3705 0x33000, 0x33048,
3706 0x33060, 0x3309c,
3707 0x330f0, 0x33148,
3708 0x33160, 0x3319c,
3709 0x331f0, 0x332e4,
3710 0x332f8, 0x333e4,
3711 0x333f8, 0x33448,
3712 0x33460, 0x3349c,
3713 0x334f0, 0x33548,
3714 0x33560, 0x3359c,
3715 0x335f0, 0x336e4,
3716 0x336f8, 0x337e4,
3717 0x337f8, 0x337fc,
3718 0x33814, 0x33814,
3719 0x3382c, 0x3382c,
3720 0x33880, 0x3388c,
3721 0x338e8, 0x338ec,
3722 0x33900, 0x33948,
3723 0x33960, 0x3399c,
3724 0x339f0, 0x33ae4,
3725 0x33af8, 0x33b10,
3726 0x33b28, 0x33b28,
3727 0x33b3c, 0x33b50,
3728 0x33bf0, 0x33c10,
3729 0x33c28, 0x33c28,
3730 0x33c3c, 0x33c50,
3731 0x33cf0, 0x33cfc,
3732 0x34000, 0x34030,
3733 0x34100, 0x34144,
3734 0x34190, 0x341d0,
3735 0x34200, 0x34318,
3736 0x34400, 0x3452c,
3737 0x34540, 0x3461c,
3738 0x34800, 0x34834,
3739 0x348c0, 0x34908,
3740 0x34910, 0x349ac,
3741 0x34a00, 0x34a2c,
3742 0x34a44, 0x34a50,
3743 0x34a74, 0x34c24,
3744 0x34d00, 0x34d00,
3745 0x34d08, 0x34d14,
3746 0x34d1c, 0x34d20,
3747 0x34d3c, 0x34d50,
3748 0x35200, 0x3520c,
3749 0x35220, 0x35220,
3750 0x35240, 0x35240,
3751 0x35600, 0x3560c,
3752 0x35a00, 0x35a1c,
3753 0x35e00, 0x35e20,
3754 0x35e38, 0x35e3c,
3755 0x35e80, 0x35e80,
3756 0x35e88, 0x35ea8,
3757 0x35eb0, 0x35eb4,
3758 0x35ec8, 0x35ed4,
3759 0x35fb8, 0x36004,
3760 0x36200, 0x36200,
3761 0x36208, 0x36240,
3762 0x36248, 0x36280,
3763 0x36288, 0x362c0,
3764 0x362c8, 0x362fc,
3765 0x36600, 0x36630,
3766 0x36a00, 0x36abc,
3767 0x36b00, 0x36b70,
3768 0x37000, 0x37048,
3769 0x37060, 0x3709c,
3770 0x370f0, 0x37148,
3771 0x37160, 0x3719c,
3772 0x371f0, 0x372e4,
3773 0x372f8, 0x373e4,
3774 0x373f8, 0x37448,
3775 0x37460, 0x3749c,
3776 0x374f0, 0x37548,
3777 0x37560, 0x3759c,
3778 0x375f0, 0x376e4,
3779 0x376f8, 0x377e4,
3780 0x377f8, 0x377fc,
3781 0x37814, 0x37814,
3782 0x3782c, 0x3782c,
3783 0x37880, 0x3788c,
3784 0x378e8, 0x378ec,
3785 0x37900, 0x37948,
3786 0x37960, 0x3799c,
3787 0x379f0, 0x37ae4,
3788 0x37af8, 0x37b10,
3789 0x37b28, 0x37b28,
3790 0x37b3c, 0x37b50,
3791 0x37bf0, 0x37c10,
3792 0x37c28, 0x37c28,
3793 0x37c3c, 0x37c50,
3794 0x37cf0, 0x37cfc,
3795 0x38000, 0x38030,
3796 0x38100, 0x38144,
3797 0x38190, 0x381d0,
3798 0x38200, 0x38318,
3799 0x38400, 0x3852c,
3800 0x38540, 0x3861c,
3801 0x38800, 0x38834,
3802 0x388c0, 0x38908,
3803 0x38910, 0x389ac,
3804 0x38a00, 0x38a2c,
3805 0x38a44, 0x38a50,
3806 0x38a74, 0x38c24,
3807 0x38d00, 0x38d00,
3808 0x38d08, 0x38d14,
3809 0x38d1c, 0x38d20,
3810 0x38d3c, 0x38d50,
3811 0x39200, 0x3920c,
3812 0x39220, 0x39220,
3813 0x39240, 0x39240,
3814 0x39600, 0x3960c,
3815 0x39a00, 0x39a1c,
3816 0x39e00, 0x39e20,
3817 0x39e38, 0x39e3c,
3818 0x39e80, 0x39e80,
3819 0x39e88, 0x39ea8,
3820 0x39eb0, 0x39eb4,
3821 0x39ec8, 0x39ed4,
3822 0x39fb8, 0x3a004,
3823 0x3a200, 0x3a200,
3824 0x3a208, 0x3a240,
3825 0x3a248, 0x3a280,
3826 0x3a288, 0x3a2c0,
3827 0x3a2c8, 0x3a2fc,
3828 0x3a600, 0x3a630,
3829 0x3aa00, 0x3aabc,
3830 0x3ab00, 0x3ab70,
3831 0x3b000, 0x3b048,
3832 0x3b060, 0x3b09c,
3833 0x3b0f0, 0x3b148,
3834 0x3b160, 0x3b19c,
3835 0x3b1f0, 0x3b2e4,
3836 0x3b2f8, 0x3b3e4,
3837 0x3b3f8, 0x3b448,
3838 0x3b460, 0x3b49c,
3839 0x3b4f0, 0x3b548,
3840 0x3b560, 0x3b59c,
3841 0x3b5f0, 0x3b6e4,
3842 0x3b6f8, 0x3b7e4,
3843 0x3b7f8, 0x3b7fc,
3844 0x3b814, 0x3b814,
3845 0x3b82c, 0x3b82c,
3846 0x3b880, 0x3b88c,
3847 0x3b8e8, 0x3b8ec,
3848 0x3b900, 0x3b948,
3849 0x3b960, 0x3b99c,
3850 0x3b9f0, 0x3bae4,
3851 0x3baf8, 0x3bb10,
3852 0x3bb28, 0x3bb28,
3853 0x3bb3c, 0x3bb50,
3854 0x3bbf0, 0x3bc10,
3855 0x3bc28, 0x3bc28,
3856 0x3bc3c, 0x3bc50,
3857 0x3bcf0, 0x3bcfc,
3858 0x3c000, 0x3c030,
3859 0x3c100, 0x3c144,
3860 0x3c190, 0x3c1d0,
3861 0x3c200, 0x3c318,
3862 0x3c400, 0x3c52c,
3863 0x3c540, 0x3c61c,
3864 0x3c800, 0x3c834,
3865 0x3c8c0, 0x3c908,
3866 0x3c910, 0x3c9ac,
3867 0x3ca00, 0x3ca2c,
3868 0x3ca44, 0x3ca50,
3869 0x3ca74, 0x3cc24,
3870 0x3cd00, 0x3cd00,
3871 0x3cd08, 0x3cd14,
3872 0x3cd1c, 0x3cd20,
3873 0x3cd3c, 0x3cd50,
3874 0x3d200, 0x3d20c,
3875 0x3d220, 0x3d220,
3876 0x3d240, 0x3d240,
3877 0x3d600, 0x3d60c,
3878 0x3da00, 0x3da1c,
3879 0x3de00, 0x3de20,
3880 0x3de38, 0x3de3c,
3881 0x3de80, 0x3de80,
3882 0x3de88, 0x3dea8,
3883 0x3deb0, 0x3deb4,
3884 0x3dec8, 0x3ded4,
3885 0x3dfb8, 0x3e004,
3886 0x3e200, 0x3e200,
3887 0x3e208, 0x3e240,
3888 0x3e248, 0x3e280,
3889 0x3e288, 0x3e2c0,
3890 0x3e2c8, 0x3e2fc,
3891 0x3e600, 0x3e630,
3892 0x3ea00, 0x3eabc,
3893 0x3eb00, 0x3eb70,
3894 0x3f000, 0x3f048,
3895 0x3f060, 0x3f09c,
3896 0x3f0f0, 0x3f148,
3897 0x3f160, 0x3f19c,
3898 0x3f1f0, 0x3f2e4,
3899 0x3f2f8, 0x3f3e4,
3900 0x3f3f8, 0x3f448,
3901 0x3f460, 0x3f49c,
3902 0x3f4f0, 0x3f548,
3903 0x3f560, 0x3f59c,
3904 0x3f5f0, 0x3f6e4,
3905 0x3f6f8, 0x3f7e4,
3906 0x3f7f8, 0x3f7fc,
3907 0x3f814, 0x3f814,
3908 0x3f82c, 0x3f82c,
3909 0x3f880, 0x3f88c,
3910 0x3f8e8, 0x3f8ec,
3911 0x3f900, 0x3f948,
3912 0x3f960, 0x3f99c,
3913 0x3f9f0, 0x3fae4,
3914 0x3faf8, 0x3fb10,
3915 0x3fb28, 0x3fb28,
3916 0x3fb3c, 0x3fb50,
3917 0x3fbf0, 0x3fc10,
3918 0x3fc28, 0x3fc28,
3919 0x3fc3c, 0x3fc50,
3920 0x3fcf0, 0x3fcfc,
3921 0x40000, 0x4000c,
3922 0x40040, 0x40068,
3923 0x4007c, 0x40144,
3924 0x40180, 0x4018c,
3925 0x40200, 0x40298,
3926 0x402ac, 0x4033c,
3927 0x403f8, 0x403fc,
3928 0x41304, 0x413c4,
3929 0x41400, 0x4141c,
3930 0x41480, 0x414d0,
3931 0x44000, 0x44078,
3932 0x440c0, 0x44278,
3933 0x442c0, 0x44478,
3934 0x444c0, 0x44678,
3935 0x446c0, 0x44878,
3936 0x448c0, 0x449fc,
3937 0x45000, 0x45068,
3938 0x45080, 0x45084,
3939 0x450a0, 0x450b0,
3940 0x45200, 0x45268,
3941 0x45280, 0x45284,
3942 0x452a0, 0x452b0,
3943 0x460c0, 0x460e4,
3944 0x47000, 0x4708c,
3945 0x47200, 0x47250,
3946 0x47400, 0x47420,
3947 0x47600, 0x47618,
3948 0x47800, 0x47814,
3949 0x48000, 0x4800c,
3950 0x48040, 0x48068,
3951 0x4807c, 0x48144,
3952 0x48180, 0x4818c,
3953 0x48200, 0x48298,
3954 0x482ac, 0x4833c,
3955 0x483f8, 0x483fc,
3956 0x49304, 0x493c4,
3957 0x49400, 0x4941c,
3958 0x49480, 0x494d0,
3959 0x4c000, 0x4c078,
3960 0x4c0c0, 0x4c278,
3961 0x4c2c0, 0x4c478,
3962 0x4c4c0, 0x4c678,
3963 0x4c6c0, 0x4c878,
3964 0x4c8c0, 0x4c9fc,
3965 0x4d000, 0x4d068,
3966 0x4d080, 0x4d084,
3967 0x4d0a0, 0x4d0b0,
3968 0x4d200, 0x4d268,
3969 0x4d280, 0x4d284,
3970 0x4d2a0, 0x4d2b0,
3971 0x4e0c0, 0x4e0e4,
3972 0x4f000, 0x4f08c,
3973 0x4f200, 0x4f250,
3974 0x4f400, 0x4f420,
3975 0x4f600, 0x4f618,
3976 0x4f800, 0x4f814,
3977 0x50000, 0x500cc,
3978 0x50400, 0x50400,
3979 0x50800, 0x508cc,
3980 0x50c00, 0x50c00,
3981 0x51000, 0x5101c,
3982 0x51300, 0x51308,
3983 };
3984
3985 if (is_t4(sc)) {
3986 reg_ranges = &t4_reg_ranges[0];
3987 n = nitems(t4_reg_ranges);
3988 } else {
3989 reg_ranges = &t5_reg_ranges[0];
3990 n = nitems(t5_reg_ranges);
3991 }
3992
3993 regs->version = chip_id(sc) | chip_rev(sc) << 10;
3994 for (i = 0; i < n; i += 2)
3995 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3996}
3997
3998static void
3999cxgbe_tick(void *arg)
4000{
4001 struct port_info *pi = arg;
4002 struct ifnet *ifp = pi->ifp;
4003 struct sge_txq *txq;
4004 int i, drops;
4005 struct port_stats *s = &pi->stats;
4006
4007 PORT_LOCK(pi);
4008 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4009 PORT_UNLOCK(pi);
4010 return; /* without scheduling another callout */
4011 }
4012
4013 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4014
4015 ifp->if_opackets = s->tx_frames - s->tx_pause;
4016 ifp->if_ipackets = s->rx_frames - s->rx_pause;
4017 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4018 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4019 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4020 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4021 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4022 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4023 s->rx_trunc3;
4024
4025 drops = s->tx_drop;
4026 for_each_txq(pi, i, txq)
4027 drops += txq->br->br_drops;
4028 ifp->if_snd.ifq_drops = drops;
4029
4030 ifp->if_oerrors = s->tx_error_frames;
4031 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4032 s->rx_fcs_err + s->rx_len_err;
4033
4034 callout_schedule(&pi->tick, hz);
4035 PORT_UNLOCK(pi);
4036}
4037
4038static void
4039cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4040{
4041 struct ifnet *vlan;
4042
4043 if (arg != ifp || ifp->if_type != IFT_ETHER)
4044 return;
4045
4046 vlan = VLAN_DEVAT(ifp, vid);
4047 VLAN_SETCOOKIE(vlan, ifp);
4048}
4049
4050static int
4051cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4052{
4053
4054#ifdef INVARIANTS
4055 panic("%s: opcode 0x%02x on iq %p with payload %p",
4056 __func__, rss->opcode, iq, m);
4057#else
4058 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4059 __func__, rss->opcode, iq, m);
4060 m_freem(m);
4061#endif
4062 return (EDOOFUS);
4063}
4064
4065int
4066t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4067{
4068 uintptr_t *loc, new;
4069
4070 if (opcode >= nitems(sc->cpl_handler))
4071 return (EINVAL);
4072
4073 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4074 loc = (uintptr_t *) &sc->cpl_handler[opcode];
4075 atomic_store_rel_ptr(loc, new);
4076
4077 return (0);
4078}
4079
4080static int
4081an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4082{
4083
4084#ifdef INVARIANTS
4085 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4086#else
4087 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4088 __func__, iq, ctrl);
4089#endif
4090 return (EDOOFUS);
4091}
4092
4093int
4094t4_register_an_handler(struct adapter *sc, an_handler_t h)
4095{
4096 uintptr_t *loc, new;
4097
4098 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4099 loc = (uintptr_t *) &sc->an_handler;
4100 atomic_store_rel_ptr(loc, new);
4101
4102 return (0);
4103}
4104
4105static int
4106fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4107{
4108 const struct cpl_fw6_msg *cpl =
4109 __containerof(rpl, struct cpl_fw6_msg, data[0]);
4110
4111#ifdef INVARIANTS
4112 panic("%s: fw_msg type %d", __func__, cpl->type);
4113#else
4114 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4115#endif
4116 return (EDOOFUS);
4117}
4118
4119int
4120t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4121{
4122 uintptr_t *loc, new;
4123
4124 if (type >= nitems(sc->fw_msg_handler))
4125 return (EINVAL);
4126
4127 /*
4128 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4129 * handler dispatch table. Reject any attempt to install a handler for
4130 * this subtype.
4131 */
4132 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4133 return (EINVAL);
4134
4135 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4136 loc = (uintptr_t *) &sc->fw_msg_handler[type];
4137 atomic_store_rel_ptr(loc, new);
4138
4139 return (0);
4140}
4141
4142static int
4143t4_sysctls(struct adapter *sc)
4144{
4145 struct sysctl_ctx_list *ctx;
4146 struct sysctl_oid *oid;
4147 struct sysctl_oid_list *children, *c0;
4148 static char *caps[] = {
4149 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
4150 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
4151 "\20\1TOE", /* caps[2] toecaps */
4152 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
4153 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
4154 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4155 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4156 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
4157 };
4158 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4159
4160 ctx = device_get_sysctl_ctx(sc->dev);
4161
4162 /*
4163 * dev.t4nex.X.
4164 */
4165 oid = device_get_sysctl_tree(sc->dev);
4166 c0 = children = SYSCTL_CHILDREN(oid);
4167
4168 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4169 sc->params.nports, "# of ports");
4170
4171 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4172 NULL, chip_rev(sc), "chip hardware revision");
4173
4174 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4175 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4176
4177 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4178 CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4179
4180 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4181 sc->cfcsum, "config file checksum");
4182
4183 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4184 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4185 sysctl_bitfield, "A", "available doorbells");
4186
4187 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4188 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4189 sysctl_bitfield, "A", "available link capabilities");
4190
4191 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4192 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4193 sysctl_bitfield, "A", "available NIC capabilities");
4194
4195 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4196 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4197 sysctl_bitfield, "A", "available TCP offload capabilities");
4198
4199 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4200 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4201 sysctl_bitfield, "A", "available RDMA capabilities");
4202
4203 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4204 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4205 sysctl_bitfield, "A", "available iSCSI capabilities");
4206
4207 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4208 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4209 sysctl_bitfield, "A", "available FCoE capabilities");
4210
4211 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4212 sc->params.vpd.cclk, "core clock frequency (in KHz)");
4213
4214 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4215 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4216 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4217 "interrupt holdoff timer values (us)");
4218
4219 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4220 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4221 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4222 "interrupt holdoff packet counter values");
4223
4224 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4225 NULL, sc->tids.nftids, "number of filters");
4226
4227 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4228 CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4229 "chip temperature (in Celsius)");
4230
4231 t4_sge_sysctls(sc, ctx, children);
4232
4233#ifdef SBUF_DRAIN
4234 /*
4235 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
4236 */
4237 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4238 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4239 "logs and miscellaneous information");
4240 children = SYSCTL_CHILDREN(oid);
4241
4242 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4243 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4244 sysctl_cctrl, "A", "congestion control");
4245
4246 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4247 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4248 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4249
4250 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4251 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4252 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4253
4254 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4255 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4256 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4257
4258 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4259 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4260 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4261
4262 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4263 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4264 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4265
4266 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4267 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4268 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4269
4270 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4271 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4272 sysctl_cim_la, "A", "CIM logic analyzer");
4273
4274 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4275 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4276 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4277
4278 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4279 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4280 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4281
4282 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4283 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4284 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4285
4286 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4287 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4288 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4289
4290 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4291 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4292 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4293
4294 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4295 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4296 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4297
4298 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4299 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4300 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4301
4302 if (is_t5(sc)) {
4303 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4304 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4305 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4306
4307 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4308 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4309 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4310 }
4311
4312 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4313 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4314 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4315
4316 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4317 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4318 sysctl_cim_qcfg, "A", "CIM queue configuration");
4319
4320 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4321 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4322 sysctl_cpl_stats, "A", "CPL statistics");
4323
4324 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4325 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4326 sysctl_ddp_stats, "A", "DDP statistics");
4327
4328 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4329 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4330 sysctl_devlog, "A", "firmware's device log");
4331
4332 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4333 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4334 sysctl_fcoe_stats, "A", "FCoE statistics");
4335
4336 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4337 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4338 sysctl_hw_sched, "A", "hardware scheduler ");
4339
4340 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4341 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4342 sysctl_l2t, "A", "hardware L2 table");
4343
4344 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4345 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4346 sysctl_lb_stats, "A", "loopback statistics");
4347
4348 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4349 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4350 sysctl_meminfo, "A", "memory regions");
4351
4352 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4353 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4354 sysctl_mps_tcam, "A", "MPS TCAM entries");
4355
4356 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4357 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4358 sysctl_path_mtus, "A", "path MTUs");
4359
4360 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4361 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4362 sysctl_pm_stats, "A", "PM statistics");
4363
4364 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4365 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4366 sysctl_rdma_stats, "A", "RDMA statistics");
4367
4368 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4369 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4370 sysctl_tcp_stats, "A", "TCP statistics");
4371
4372 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4373 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4374 sysctl_tids, "A", "TID information");
4375
4376 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4377 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4378 sysctl_tp_err_stats, "A", "TP error statistics");
4379
4380 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4381 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4382 sysctl_tp_la, "A", "TP logic analyzer");
4383
4384 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4385 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4386 sysctl_tx_rate, "A", "Tx rate");
4387
4388 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4389 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4390 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4391
4392 if (is_t5(sc)) {
4393 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4394 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4395 sysctl_wcwr_stats, "A", "write combined work requests");
4396 }
4397#endif
4398
4399#ifdef TCP_OFFLOAD
4400 if (is_offload(sc)) {
4401 /*
4402 * dev.t4nex.X.toe.
4403 */
4404 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4405 NULL, "TOE parameters");
4406 children = SYSCTL_CHILDREN(oid);
4407
4408 sc->tt.sndbuf = 256 * 1024;
4409 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4410 &sc->tt.sndbuf, 0, "max hardware send buffer size");
4411
4412 sc->tt.ddp = 0;
4413 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4414 &sc->tt.ddp, 0, "DDP allowed");
4415
4416 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4417 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4418 &sc->tt.indsz, 0, "DDP max indicate size allowed");
4419
4420 sc->tt.ddp_thres =
4421 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4422 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4423 &sc->tt.ddp_thres, 0, "DDP threshold");
4424
4425 sc->tt.rx_coalesce = 1;
4426 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4427 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4428 }
4429#endif
4430
4431
4432 return (0);
4433}
4434
4435static int
4436cxgbe_sysctls(struct port_info *pi)
4437{
4438 struct sysctl_ctx_list *ctx;
4439 struct sysctl_oid *oid;
4440 struct sysctl_oid_list *children;
4441
4442 ctx = device_get_sysctl_ctx(pi->dev);
4443
4444 /*
4445 * dev.cxgbe.X.
4446 */
4447 oid = device_get_sysctl_tree(pi->dev);
4448 children = SYSCTL_CHILDREN(oid);
4449
4450 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4451 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4452 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4453 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4454 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4455 "PHY temperature (in Celsius)");
4456 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4457 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4458 "PHY firmware version");
4459 }
4460 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4461 &pi->nrxq, 0, "# of rx queues");
4462 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4463 &pi->ntxq, 0, "# of tx queues");
4464 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4465 &pi->first_rxq, 0, "index of first rx queue");
4466 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4467 &pi->first_txq, 0, "index of first tx queue");
4468
4469#ifdef TCP_OFFLOAD
4470 if (is_offload(pi->adapter)) {
4471 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4472 &pi->nofldrxq, 0,
4473 "# of rx queues for offloaded TCP connections");
4474 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4475 &pi->nofldtxq, 0,
4476 "# of tx queues for offloaded TCP connections");
4477 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4478 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4479 "index of first TOE rx queue");
4480 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4481 CTLFLAG_RD, &pi->first_ofld_txq, 0,
4482 "index of first TOE tx queue");
4483 }
4484#endif
4485
4486 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4487 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4488 "holdoff timer index");
4489 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4490 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4491 "holdoff packet counter index");
4492
4493 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4494 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4495 "rx queue size");
4496 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4497 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4498 "tx queue size");
4499
4500 /*
4501 * dev.cxgbe.X.stats.
4502 */
4503 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4504 NULL, "port statistics");
4505 children = SYSCTL_CHILDREN(oid);
4506
4507#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4508 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4509 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4510 sysctl_handle_t4_reg64, "QU", desc)
4511
4512 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4513 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4514 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4515 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4516 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4517 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4518 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4519 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4520 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4521 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4522 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4523 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4524 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4525 "# of tx frames in this range",
4526 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4527 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4528 "# of tx frames in this range",
4529 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4530 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4531 "# of tx frames in this range",
4532 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4533 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4534 "# of tx frames in this range",
4535 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4536 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4537 "# of tx frames in this range",
4538 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4539 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4540 "# of tx frames in this range",
4541 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4542 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4543 "# of tx frames in this range",
4544 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4545 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4546 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4547 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4548 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4549 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4550 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4551 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4552 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4553 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4554 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4555 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4556 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4557 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4558 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4559 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4560 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4561 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4562 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4563 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4564 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4565
4566 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4567 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4568 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4569 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4570 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4571 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4572 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4573 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4574 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4575 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4576 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4577 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4578 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4579 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4580 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4581 "# of frames received with bad FCS",
4582 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4583 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4584 "# of frames received with length error",
4585 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4586 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4587 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4588 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4589 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4590 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4591 "# of rx frames in this range",
4592 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4593 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4594 "# of rx frames in this range",
4595 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4596 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4597 "# of rx frames in this range",
4598 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4599 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4600 "# of rx frames in this range",
4601 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4602 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4603 "# of rx frames in this range",
4604 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4605 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4606 "# of rx frames in this range",
4607 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4608 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4609 "# of rx frames in this range",
4610 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4611 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4612 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4613 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4614 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4615 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4616 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4617 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4618 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4619 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4620 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4621 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4622 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4623 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4624 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4625 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4626 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4627 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4628 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4629
4630#undef SYSCTL_ADD_T4_REG64
4631
4632#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4633 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4634 &pi->stats.name, desc)
4635
4636 /* We get these from port_stats and they may be stale by upto 1s */
4637 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4638 "# drops due to buffer-group 0 overflows");
4639 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4640 "# drops due to buffer-group 1 overflows");
4641 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4642 "# drops due to buffer-group 2 overflows");
4643 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4644 "# drops due to buffer-group 3 overflows");
4645 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4646 "# of buffer-group 0 truncated packets");
4647 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4648 "# of buffer-group 1 truncated packets");
4649 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4650 "# of buffer-group 2 truncated packets");
4651 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4652 "# of buffer-group 3 truncated packets");
4653
4654#undef SYSCTL_ADD_T4_PORTSTAT
4655
4656 return (0);
4657}
4658
4659static int
4660sysctl_int_array(SYSCTL_HANDLER_ARGS)
4661{
4662 int rc, *i;
4663 struct sbuf sb;
4664
4665 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4666 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4667 sbuf_printf(&sb, "%d ", *i);
4668 sbuf_trim(&sb);
4669 sbuf_finish(&sb);
4670 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4671 sbuf_delete(&sb);
4672 return (rc);
4673}
4674
4675static int
4676sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4677{
4678 int rc;
4679 struct sbuf *sb;
4680
4681 rc = sysctl_wire_old_buffer(req, 0);
4682 if (rc != 0)
4683 return(rc);
4684
4685 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4686 if (sb == NULL)
4687 return (ENOMEM);
4688
4689 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4690 rc = sbuf_finish(sb);
4691 sbuf_delete(sb);
4692
4693 return (rc);
4694}
4695
4696static int
4697sysctl_btphy(SYSCTL_HANDLER_ARGS)
4698{
4699 struct port_info *pi = arg1;
4700 int op = arg2;
4701 struct adapter *sc = pi->adapter;
4702 u_int v;
4703 int rc;
4704
4705 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4706 if (rc)
4707 return (rc);
4708 /* XXX: magic numbers */
4709 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4710 &v);
4711 end_synchronized_op(sc, 0);
4712 if (rc)
4713 return (rc);
4714 if (op == 0)
4715 v /= 256;
4716
4717 rc = sysctl_handle_int(oidp, &v, 0, req);
4718 return (rc);
4719}
4720
4721static int
4722sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4723{
4724 struct port_info *pi = arg1;
4725 struct adapter *sc = pi->adapter;
4726 int idx, rc, i;
4727 struct sge_rxq *rxq;
4728#ifdef TCP_OFFLOAD
4729 struct sge_ofld_rxq *ofld_rxq;
4730#endif
4731 uint8_t v;
4732
4733 idx = pi->tmr_idx;
4734
4735 rc = sysctl_handle_int(oidp, &idx, 0, req);
4736 if (rc != 0 || req->newptr == NULL)
4737 return (rc);
4738
4739 if (idx < 0 || idx >= SGE_NTIMERS)
4740 return (EINVAL);
4741
4742 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4743 "t4tmr");
4744 if (rc)
4745 return (rc);
4746
4747 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4748 for_each_rxq(pi, i, rxq) {
4749#ifdef atomic_store_rel_8
4750 atomic_store_rel_8(&rxq->iq.intr_params, v);
4751#else
4752 rxq->iq.intr_params = v;
4753#endif
4754 }
4755#ifdef TCP_OFFLOAD
4756 for_each_ofld_rxq(pi, i, ofld_rxq) {
4757#ifdef atomic_store_rel_8
4758 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4759#else
4760 ofld_rxq->iq.intr_params = v;
4761#endif
4762 }
4763#endif
4764 pi->tmr_idx = idx;
4765
4766 end_synchronized_op(sc, LOCK_HELD);
4767 return (0);
4768}
4769
4770static int
4771sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4772{
4773 struct port_info *pi = arg1;
4774 struct adapter *sc = pi->adapter;
4775 int idx, rc;
4776
4777 idx = pi->pktc_idx;
4778
4779 rc = sysctl_handle_int(oidp, &idx, 0, req);
4780 if (rc != 0 || req->newptr == NULL)
4781 return (rc);
4782
4783 if (idx < -1 || idx >= SGE_NCOUNTERS)
4784 return (EINVAL);
4785
4786 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4787 "t4pktc");
4788 if (rc)
4789 return (rc);
4790
4791 if (pi->flags & PORT_INIT_DONE)
4792 rc = EBUSY; /* cannot be changed once the queues are created */
4793 else
4794 pi->pktc_idx = idx;
4795
4796 end_synchronized_op(sc, LOCK_HELD);
4797 return (rc);
4798}
4799
4800static int
4801sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4802{
4803 struct port_info *pi = arg1;
4804 struct adapter *sc = pi->adapter;
4805 int qsize, rc;
4806
4807 qsize = pi->qsize_rxq;
4808
4809 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4810 if (rc != 0 || req->newptr == NULL)
4811 return (rc);
4812
4813 if (qsize < 128 || (qsize & 7))
4814 return (EINVAL);
4815
4816 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4817 "t4rxqs");
4818 if (rc)
4819 return (rc);
4820
4821 if (pi->flags & PORT_INIT_DONE)
4822 rc = EBUSY; /* cannot be changed once the queues are created */
4823 else
4824 pi->qsize_rxq = qsize;
4825
4826 end_synchronized_op(sc, LOCK_HELD);
4827 return (rc);
4828}
4829
4830static int
4831sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4832{
4833 struct port_info *pi = arg1;
4834 struct adapter *sc = pi->adapter;
4835 int qsize, rc;
4836
4837 qsize = pi->qsize_txq;
4838
4839 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4840 if (rc != 0 || req->newptr == NULL)
4841 return (rc);
4842
4843 /* bufring size must be powerof2 */
4844 if (qsize < 128 || !powerof2(qsize))
4845 return (EINVAL);
4846
4847 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4848 "t4txqs");
4849 if (rc)
4850 return (rc);
4851
4852 if (pi->flags & PORT_INIT_DONE)
4853 rc = EBUSY; /* cannot be changed once the queues are created */
4854 else
4855 pi->qsize_txq = qsize;
4856
4857 end_synchronized_op(sc, LOCK_HELD);
4858 return (rc);
4859}
4860
4861static int
4862sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4863{
4864 struct adapter *sc = arg1;
4865 int reg = arg2;
4866 uint64_t val;
4867
4868 val = t4_read_reg64(sc, reg);
4869
4870 return (sysctl_handle_64(oidp, &val, 0, req));
4871}
4872
4873static int
4874sysctl_temperature(SYSCTL_HANDLER_ARGS)
4875{
4876 struct adapter *sc = arg1;
4877 int rc, t;
4878 uint32_t param, val;
4879
4880 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4881 if (rc)
4882 return (rc);
4883 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4884 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4885 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4886 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4887 end_synchronized_op(sc, 0);
4888 if (rc)
4889 return (rc);
4890
4891 /* unknown is returned as 0 but we display -1 in that case */
4892 t = val == 0 ? -1 : val;
4893
4894 rc = sysctl_handle_int(oidp, &t, 0, req);
4895 return (rc);
4896}
4897
4898#ifdef SBUF_DRAIN
4899static int
4900sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4901{
4902 struct adapter *sc = arg1;
4903 struct sbuf *sb;
4904 int rc, i;
4905 uint16_t incr[NMTUS][NCCTRL_WIN];
4906 static const char *dec_fac[] = {
4907 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4908 "0.9375"
4909 };
4910
4911 rc = sysctl_wire_old_buffer(req, 0);
4912 if (rc != 0)
4913 return (rc);
4914
4915 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4916 if (sb == NULL)
4917 return (ENOMEM);
4918
4919 t4_read_cong_tbl(sc, incr);
4920
4921 for (i = 0; i < NCCTRL_WIN; ++i) {
4922 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4923 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4924 incr[5][i], incr[6][i], incr[7][i]);
4925 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4926 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4927 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4928 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4929 }
4930
4931 rc = sbuf_finish(sb);
4932 sbuf_delete(sb);
4933
4934 return (rc);
4935}
4936
4937static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4938 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
4939 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
4940 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
4941};
4942
4943static int
4944sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4945{
4946 struct adapter *sc = arg1;
4947 struct sbuf *sb;
4948 int rc, i, n, qid = arg2;
4949 uint32_t *buf, *p;
4950 char *qtype;
4951 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4952
4953 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4954 ("%s: bad qid %d\n", __func__, qid));
4955
4956 if (qid < CIM_NUM_IBQ) {
4957 /* inbound queue */
4958 qtype = "IBQ";
4959 n = 4 * CIM_IBQ_SIZE;
4960 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4961 rc = t4_read_cim_ibq(sc, qid, buf, n);
4962 } else {
4963 /* outbound queue */
4964 qtype = "OBQ";
4965 qid -= CIM_NUM_IBQ;
4966 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4967 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4968 rc = t4_read_cim_obq(sc, qid, buf, n);
4969 }
4970
4971 if (rc < 0) {
4972 rc = -rc;
4973 goto done;
4974 }
4975 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
4976
4977 rc = sysctl_wire_old_buffer(req, 0);
4978 if (rc != 0)
4979 goto done;
4980
4981 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4982 if (sb == NULL) {
4983 rc = ENOMEM;
4984 goto done;
4985 }
4986
4987 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4988 for (i = 0, p = buf; i < n; i += 16, p += 4)
4989 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4990 p[2], p[3]);
4991
4992 rc = sbuf_finish(sb);
4993 sbuf_delete(sb);
4994done:
4995 free(buf, M_CXGBE);
4996 return (rc);
4997}
4998
4999static int
5000sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5001{
5002 struct adapter *sc = arg1;
5003 u_int cfg;
5004 struct sbuf *sb;
5005 uint32_t *buf, *p;
5006 int rc;
5007
5008 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5009 if (rc != 0)
5010 return (rc);
5011
5012 rc = sysctl_wire_old_buffer(req, 0);
5013 if (rc != 0)
5014 return (rc);
5015
5016 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5017 if (sb == NULL)
5018 return (ENOMEM);
5019
5020 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5021 M_ZERO | M_WAITOK);
5022
5023 rc = -t4_cim_read_la(sc, buf, NULL);
5024 if (rc != 0)
5025 goto done;
5026
5027 sbuf_printf(sb, "Status Data PC%s",
5028 cfg & F_UPDBGLACAPTPCONLY ? "" :
5029 " LS0Stat LS0Addr LS0Data");
5030
5031 KASSERT((sc->params.cim_la_size & 7) == 0,
5032 ("%s: p will walk off the end of buf", __func__));
5033
5034 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5035 if (cfg & F_UPDBGLACAPTPCONLY) {
5036 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
5037 p[6], p[7]);
5038 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
5039 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5040 p[4] & 0xff, p[5] >> 8);
5041 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
5042 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5043 p[1] & 0xf, p[2] >> 4);
5044 } else {
5045 sbuf_printf(sb,
5046 "\n %02x %x%07x %x%07x %08x %08x "
5047 "%08x%08x%08x%08x",
5048 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5049 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5050 p[6], p[7]);
5051 }
5052 }
5053
5054 rc = sbuf_finish(sb);
5055 sbuf_delete(sb);
5056done:
5057 free(buf, M_CXGBE);
5058 return (rc);
5059}
5060
5061static int
5062sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5063{
5064 struct adapter *sc = arg1;
5065 u_int i;
5066 struct sbuf *sb;
5067 uint32_t *buf, *p;
5068 int rc;
5069
5070 rc = sysctl_wire_old_buffer(req, 0);
5071 if (rc != 0)
5072 return (rc);
5073
5074 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5075 if (sb == NULL)
5076 return (ENOMEM);
5077
5078 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5079 M_ZERO | M_WAITOK);
5080
5081 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5082 p = buf;
5083
5084 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5085 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5086 p[1], p[0]);
5087 }
5088
5089 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
5090 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5091 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
5092 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5093 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5094 (p[1] >> 2) | ((p[2] & 3) << 30),
5095 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5096 p[0] & 1);
5097 }
5098
5099 rc = sbuf_finish(sb);
5100 sbuf_delete(sb);
5101 free(buf, M_CXGBE);
5102 return (rc);
5103}
5104
5105static int
5106sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5107{
5108 struct adapter *sc = arg1;
5109 u_int i;
5110 struct sbuf *sb;
5111 uint32_t *buf, *p;
5112 int rc;
5113
5114 rc = sysctl_wire_old_buffer(req, 0);
5115 if (rc != 0)
5116 return (rc);
5117
5118 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5119 if (sb == NULL)
5120 return (ENOMEM);
5121
5122 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5123 M_ZERO | M_WAITOK);
5124
5125 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5126 p = buf;
5127
5128 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
5129 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5130 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
5131 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5132 p[4], p[3], p[2], p[1], p[0]);
5133 }
5134
5135 sbuf_printf(sb, "\n\nCntl ID Data");
5136 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5137 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
5138 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5139 }
5140
5141 rc = sbuf_finish(sb);
5142 sbuf_delete(sb);
5143 free(buf, M_CXGBE);
5144 return (rc);
5145}
5146
5147static int
5148sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5149{
5150 struct adapter *sc = arg1;
5151 struct sbuf *sb;
5152 int rc, i;
5153 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5154 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5155 uint16_t thres[CIM_NUM_IBQ];
5156 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5157 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5158 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5159
5160 if (is_t4(sc)) {
5161 cim_num_obq = CIM_NUM_OBQ;
5162 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5163 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5164 } else {
5165 cim_num_obq = CIM_NUM_OBQ_T5;
5166 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5167 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5168 }
5169 nq = CIM_NUM_IBQ + cim_num_obq;
5170
5171 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5172 if (rc == 0)
5173 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5174 if (rc != 0)
5175 return (rc);
5176
5177 t4_read_cimq_cfg(sc, base, size, thres);
5178
5179 rc = sysctl_wire_old_buffer(req, 0);
5180 if (rc != 0)
5181 return (rc);
5182
5183 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5184 if (sb == NULL)
5185 return (ENOMEM);
5186
5187 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
5188
5189 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5190 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
5191 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5192 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5193 G_QUEREMFLITS(p[2]) * 16);
5194 for ( ; i < nq; i++, p += 4, wr += 2)
5195 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
5196 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5197 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5198 G_QUEREMFLITS(p[2]) * 16);
5199
5200 rc = sbuf_finish(sb);
5201 sbuf_delete(sb);
5202
5203 return (rc);
5204}
5205
5206static int
5207sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5208{
5209 struct adapter *sc = arg1;
5210 struct sbuf *sb;
5211 int rc;
5212 struct tp_cpl_stats stats;
5213
5214 rc = sysctl_wire_old_buffer(req, 0);
5215 if (rc != 0)
5216 return (rc);
5217
5218 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5219 if (sb == NULL)
5220 return (ENOMEM);
5221
5222 t4_tp_get_cpl_stats(sc, &stats);
5223
5224 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
5225 "channel 3\n");
5226 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
5227 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5228 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
5229 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5230
5231 rc = sbuf_finish(sb);
5232 sbuf_delete(sb);
5233
5234 return (rc);
5235}
5236
5237static int
5238sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5239{
5240 struct adapter *sc = arg1;
5241 struct sbuf *sb;
5242 int rc;
5243 struct tp_usm_stats stats;
5244
5245 rc = sysctl_wire_old_buffer(req, 0);
5246 if (rc != 0)
5247 return(rc);
5248
5249 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5250 if (sb == NULL)
5251 return (ENOMEM);
5252
5253 t4_get_usm_stats(sc, &stats);
5254
5255 sbuf_printf(sb, "Frames: %u\n", stats.frames);
5256 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5257 sbuf_printf(sb, "Drops: %u", stats.drops);
5258
5259 rc = sbuf_finish(sb);
5260 sbuf_delete(sb);
5261
5262 return (rc);
5263}
5264
5265const char *devlog_level_strings[] = {
5266 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
5267 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
5268 [FW_DEVLOG_LEVEL_ERR] = "ERR",
5269 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
5270 [FW_DEVLOG_LEVEL_INFO] = "INFO",
5271 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
5272};
5273
5274const char *devlog_facility_strings[] = {
5275 [FW_DEVLOG_FACILITY_CORE] = "CORE",
5276 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
5277 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
5278 [FW_DEVLOG_FACILITY_RES] = "RES",
5279 [FW_DEVLOG_FACILITY_HW] = "HW",
5280 [FW_DEVLOG_FACILITY_FLR] = "FLR",
5281 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
5282 [FW_DEVLOG_FACILITY_PHY] = "PHY",
5283 [FW_DEVLOG_FACILITY_MAC] = "MAC",
5284 [FW_DEVLOG_FACILITY_PORT] = "PORT",
5285 [FW_DEVLOG_FACILITY_VI] = "VI",
5286 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
5287 [FW_DEVLOG_FACILITY_ACL] = "ACL",
5288 [FW_DEVLOG_FACILITY_TM] = "TM",
5289 [FW_DEVLOG_FACILITY_QFC] = "QFC",
5290 [FW_DEVLOG_FACILITY_DCB] = "DCB",
5291 [FW_DEVLOG_FACILITY_ETH] = "ETH",
5292 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
5293 [FW_DEVLOG_FACILITY_RI] = "RI",
5294 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
5295 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
5296 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
5297 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
5298};
5299
5300static int
5301sysctl_devlog(SYSCTL_HANDLER_ARGS)
5302{
5303 struct adapter *sc = arg1;
5304 struct devlog_params *dparams = &sc->params.devlog;
5305 struct fw_devlog_e *buf, *e;
5306 int i, j, rc, nentries, first = 0;
5307 struct sbuf *sb;
5308 uint64_t ftstamp = UINT64_MAX;
5309
5310 if (dparams->start == 0) {
5311 dparams->memtype = 0;
5312 dparams->start = 0x84000;
5313 dparams->size = 32768;
5314 }
5315
5316 nentries = dparams->size / sizeof(struct fw_devlog_e);
5317
5318 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5319 if (buf == NULL)
5320 return (ENOMEM);
5321
5322 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5323 (void *)buf);
5324 if (rc != 0)
5325 goto done;
5326
5327 for (i = 0; i < nentries; i++) {
5328 e = &buf[i];
5329
5330 if (e->timestamp == 0)
5331 break; /* end */
5332
5333 e->timestamp = be64toh(e->timestamp);
5334 e->seqno = be32toh(e->seqno);
5335 for (j = 0; j < 8; j++)
5336 e->params[j] = be32toh(e->params[j]);
5337
5338 if (e->timestamp < ftstamp) {
5339 ftstamp = e->timestamp;
5340 first = i;
5341 }
5342 }
5343
5344 if (buf[first].timestamp == 0)
5345 goto done; /* nothing in the log */
5346
5347 rc = sysctl_wire_old_buffer(req, 0);
5348 if (rc != 0)
5349 goto done;
5350
5351 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5352 if (sb == NULL) {
5353 rc = ENOMEM;
5354 goto done;
5355 }
5356 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
5357 "Seq#", "Tstamp", "Level", "Facility", "Message");
5358
5359 i = first;
5360 do {
5361 e = &buf[i];
5362 if (e->timestamp == 0)
5363 break; /* end */
5364
5365 sbuf_printf(sb, "%10d %15ju %8s %8s ",
5366 e->seqno, e->timestamp,
5367 (e->level < nitems(devlog_level_strings) ?
5368 devlog_level_strings[e->level] : "UNKNOWN"),
5369 (e->facility < nitems(devlog_facility_strings) ?
5370 devlog_facility_strings[e->facility] : "UNKNOWN"));
5371 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5372 e->params[2], e->params[3], e->params[4],
5373 e->params[5], e->params[6], e->params[7]);
5374
5375 if (++i == nentries)
5376 i = 0;
5377 } while (i != first);
5378
5379 rc = sbuf_finish(sb);
5380 sbuf_delete(sb);
5381done:
5382 free(buf, M_CXGBE);
5383 return (rc);
5384}
5385
5386static int
5387sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5388{
5389 struct adapter *sc = arg1;
5390 struct sbuf *sb;
5391 int rc;
5392 struct tp_fcoe_stats stats[4];
5393
5394 rc = sysctl_wire_old_buffer(req, 0);
5395 if (rc != 0)
5396 return (rc);
5397
5398 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5399 if (sb == NULL)
5400 return (ENOMEM);
5401
5402 t4_get_fcoe_stats(sc, 0, &stats[0]);
5403 t4_get_fcoe_stats(sc, 1, &stats[1]);
5404 t4_get_fcoe_stats(sc, 2, &stats[2]);
5405 t4_get_fcoe_stats(sc, 3, &stats[3]);
5406
5407 sbuf_printf(sb, " channel 0 channel 1 "
5408 "channel 2 channel 3\n");
5409 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
5410 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5411 stats[3].octetsDDP);
5412 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
5413 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5414 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5415 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5416 stats[3].framesDrop);
5417
5418 rc = sbuf_finish(sb);
5419 sbuf_delete(sb);
5420
5421 return (rc);
5422}
5423
5424static int
5425sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5426{
5427 struct adapter *sc = arg1;
5428 struct sbuf *sb;
5429 int rc, i;
5430 unsigned int map, kbps, ipg, mode;
5431 unsigned int pace_tab[NTX_SCHED];
5432
5433 rc = sysctl_wire_old_buffer(req, 0);
5434 if (rc != 0)
5435 return (rc);
5436
5437 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5438 if (sb == NULL)
5439 return (ENOMEM);
5440
5441 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5442 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5443 t4_read_pace_tbl(sc, pace_tab);
5444
5445 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
5446 "Class IPG (0.1 ns) Flow IPG (us)");
5447
5448 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5449 t4_get_tx_sched(sc, i, &kbps, &ipg);
5450 sbuf_printf(sb, "\n %u %-5s %u ", i,
5451 (mode & (1 << i)) ? "flow" : "class", map & 3);
5452 if (kbps)
5453 sbuf_printf(sb, "%9u ", kbps);
5454 else
5455 sbuf_printf(sb, " disabled ");
5456
5457 if (ipg)
5458 sbuf_printf(sb, "%13u ", ipg);
5459 else
5460 sbuf_printf(sb, " disabled ");
5461
5462 if (pace_tab[i])
5463 sbuf_printf(sb, "%10u", pace_tab[i]);
5464 else
5465 sbuf_printf(sb, " disabled");
5466 }
5467
5468 rc = sbuf_finish(sb);
5469 sbuf_delete(sb);
5470
5471 return (rc);
5472}
5473
5474static int
5475sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5476{
5477 struct adapter *sc = arg1;
5478 struct sbuf *sb;
5479 int rc, i, j;
5480 uint64_t *p0, *p1;
5481 struct lb_port_stats s[2];
5482 static const char *stat_name[] = {
5483 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5484 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5485 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5486 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5487 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5488 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5489 "BG2FramesTrunc:", "BG3FramesTrunc:"
5490 };
5491
5492 rc = sysctl_wire_old_buffer(req, 0);
5493 if (rc != 0)
5494 return (rc);
5495
5496 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5497 if (sb == NULL)
5498 return (ENOMEM);
5499
5500 memset(s, 0, sizeof(s));
5501
5502 for (i = 0; i < 4; i += 2) {
5503 t4_get_lb_stats(sc, i, &s[0]);
5504 t4_get_lb_stats(sc, i + 1, &s[1]);
5505
5506 p0 = &s[0].octets;
5507 p1 = &s[1].octets;
5508 sbuf_printf(sb, "%s Loopback %u"
5509 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5510
5511 for (j = 0; j < nitems(stat_name); j++)
5512 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5513 *p0++, *p1++);
5514 }
5515
5516 rc = sbuf_finish(sb);
5517 sbuf_delete(sb);
5518
5519 return (rc);
5520}
5521
5522static int
5523sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5524{
5525 int rc = 0;
5526 struct port_info *pi = arg1;
5527 struct sbuf *sb;
5528 static const char *linkdnreasons[] = {
5529 "non-specific", "remote fault", "autoneg failed", "reserved3",
5530 "PHY overheated", "unknown", "rx los", "reserved7"
5531 };
5532
5533 rc = sysctl_wire_old_buffer(req, 0);
5534 if (rc != 0)
5535 return(rc);
5536 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5537 if (sb == NULL)
5538 return (ENOMEM);
5539
5540 if (pi->linkdnrc < 0)
5541 sbuf_printf(sb, "n/a");
5542 else if (pi->linkdnrc < nitems(linkdnreasons))
5543 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5544 else
5545 sbuf_printf(sb, "%d", pi->linkdnrc);
5546
5547 rc = sbuf_finish(sb);
5548 sbuf_delete(sb);
5549
5550 return (rc);
5551}
5552
5553struct mem_desc {
5554 unsigned int base;
5555 unsigned int limit;
5556 unsigned int idx;
5557};
5558
5559static int
5560mem_desc_cmp(const void *a, const void *b)
5561{
5562 return ((const struct mem_desc *)a)->base -
5563 ((const struct mem_desc *)b)->base;
5564}
5565
5566static void
5567mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5568 unsigned int to)
5569{
5570 unsigned int size;
5571
5572 size = to - from + 1;
5573 if (size == 0)
5574 return;
5575
5576 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5577 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5578}
5579
5580static int
5581sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5582{
5583 struct adapter *sc = arg1;
5584 struct sbuf *sb;
5585 int rc, i, n;
5586 uint32_t lo, hi, used, alloc;
5587 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5588 static const char *region[] = {
5589 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5590 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5591 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5592 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5593 "RQUDP region:", "PBL region:", "TXPBL region:",
5594 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5595 "On-chip queues:"
5596 };
5597 struct mem_desc avail[4];
5598 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
5599 struct mem_desc *md = mem;
5600
5601 rc = sysctl_wire_old_buffer(req, 0);
5602 if (rc != 0)
5603 return (rc);
5604
5605 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5606 if (sb == NULL)
5607 return (ENOMEM);
5608
5609 for (i = 0; i < nitems(mem); i++) {
5610 mem[i].limit = 0;
5611 mem[i].idx = i;
5612 }
5613
5614 /* Find and sort the populated memory ranges */
5615 i = 0;
5616 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5617 if (lo & F_EDRAM0_ENABLE) {
5618 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5619 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5620 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5621 avail[i].idx = 0;
5622 i++;
5623 }
5624 if (lo & F_EDRAM1_ENABLE) {
5625 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5626 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5627 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5628 avail[i].idx = 1;
5629 i++;
5630 }
5631 if (lo & F_EXT_MEM_ENABLE) {
5632 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5633 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5634 avail[i].limit = avail[i].base +
5635 (G_EXT_MEM_SIZE(hi) << 20);
5636 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */
5637 i++;
5638 }
5639 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5640 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5641 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5642 avail[i].limit = avail[i].base +
5643 (G_EXT_MEM1_SIZE(hi) << 20);
5644 avail[i].idx = 4;
5645 i++;
5646 }
5647 if (!i) /* no memory available */
5648 return 0;
5649 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5650
5651 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5652 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5653 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5654 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5655 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5656 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5657 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5658 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5659 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5660
5661 /* the next few have explicit upper bounds */
5662 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5663 md->limit = md->base - 1 +
5664 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5665 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5666 md++;
5667
5668 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5669 md->limit = md->base - 1 +
5670 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5671 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5672 md++;
5673
5674 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5675 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5676 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5677 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5678 } else {
5679 md->base = 0;
5680 md->idx = nitems(region); /* hide it */
5681 }
5682 md++;
5683
5684#define ulp_region(reg) \
5685 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5686 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5687
5688 ulp_region(RX_ISCSI);
5689 ulp_region(RX_TDDP);
5690 ulp_region(TX_TPT);
5691 ulp_region(RX_STAG);
5692 ulp_region(RX_RQ);
5693 ulp_region(RX_RQUDP);
5694 ulp_region(RX_PBL);
5695 ulp_region(TX_PBL);
5696#undef ulp_region
5697
5698 md->base = 0;
5699 md->idx = nitems(region);
5700 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5701 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5702 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5703 A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5704 }
5705 md++;
5706
5707 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5708 md->limit = md->base + sc->tids.ntids - 1;
5709 md++;
5710 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5711 md->limit = md->base + sc->tids.ntids - 1;
5712 md++;
5713
5714 md->base = sc->vres.ocq.start;
5715 if (sc->vres.ocq.size)
5716 md->limit = md->base + sc->vres.ocq.size - 1;
5717 else
5718 md->idx = nitems(region); /* hide it */
5719 md++;
5720
5721 /* add any address-space holes, there can be up to 3 */
5722 for (n = 0; n < i - 1; n++)
5723 if (avail[n].limit < avail[n + 1].base)
5724 (md++)->base = avail[n].limit;
5725 if (avail[n].limit)
5726 (md++)->base = avail[n].limit;
5727
5728 n = md - mem;
5729 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5730
5731 for (lo = 0; lo < i; lo++)
5732 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5733 avail[lo].limit - 1);
5734
5735 sbuf_printf(sb, "\n");
5736 for (i = 0; i < n; i++) {
5737 if (mem[i].idx >= nitems(region))
5738 continue; /* skip holes */
5739 if (!mem[i].limit)
5740 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5741 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5742 mem[i].limit);
5743 }
5744
5745 sbuf_printf(sb, "\n");
5746 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5747 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5748 mem_region_show(sb, "uP RAM:", lo, hi);
5749
5750 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5751 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5752 mem_region_show(sb, "uP Extmem2:", lo, hi);
5753
5754 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5755 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5756 G_PMRXMAXPAGE(lo),
5757 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5758 (lo & F_PMRXNUMCHN) ? 2 : 1);
5759
5760 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5761 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5762 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5763 G_PMTXMAXPAGE(lo),
5764 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5765 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5766 sbuf_printf(sb, "%u p-structs\n",
5767 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5768
5769 for (i = 0; i < 4; i++) {
5770 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5771 if (is_t4(sc)) {
5772 used = G_USED(lo);
5773 alloc = G_ALLOC(lo);
5774 } else {
5775 used = G_T5_USED(lo);
5776 alloc = G_T5_ALLOC(lo);
5777 }
5778 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5779 i, used, alloc);
5780 }
5781 for (i = 0; i < 4; i++) {
5782 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5783 if (is_t4(sc)) {
5784 used = G_USED(lo);
5785 alloc = G_ALLOC(lo);
5786 } else {
5787 used = G_T5_USED(lo);
5788 alloc = G_T5_ALLOC(lo);
5789 }
5790 sbuf_printf(sb,
5791 "\nLoopback %d using %u pages out of %u allocated",
5792 i, used, alloc);
5793 }
5794
5795 rc = sbuf_finish(sb);
5796 sbuf_delete(sb);
5797
5798 return (rc);
5799}
5800
5801static inline void
5802tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5803{
5804 *mask = x | y;
5805 y = htobe64(y);
5806 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5807}
5808
5809static int
5810sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5811{
5812 struct adapter *sc = arg1;
5813 struct sbuf *sb;
5814 int rc, i, n;
5815
5816 rc = sysctl_wire_old_buffer(req, 0);
5817 if (rc != 0)
5818 return (rc);
5819
5820 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5821 if (sb == NULL)
5822 return (ENOMEM);
5823
5824 sbuf_printf(sb,
5825 "Idx Ethernet address Mask Vld Ports PF"
5826 " VF Replication P0 P1 P2 P3 ML");
5827 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5828 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5829 for (i = 0; i < n; i++) {
5830 uint64_t tcamx, tcamy, mask;
5831 uint32_t cls_lo, cls_hi;
5832 uint8_t addr[ETHER_ADDR_LEN];
5833
5834 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5835 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5836 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5837 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5838
5839 if (tcamx & tcamy)
5840 continue;
5841
5842 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5843 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5844 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
5845 addr[3], addr[4], addr[5], (uintmax_t)mask,
5846 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5847 G_PORTMAP(cls_hi), G_PF(cls_lo),
5848 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5849
5850 if (cls_lo & F_REPLICATE) {
5851 struct fw_ldst_cmd ldst_cmd;
5852
5853 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5854 ldst_cmd.op_to_addrspace =
5855 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5856 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5857 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5858 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5859 ldst_cmd.u.mps.fid_ctl =
5860 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5861 V_FW_LDST_CMD_CTL(i));
5862
5863 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5864 "t4mps");
5865 if (rc)
5866 break;
5867 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5868 sizeof(ldst_cmd), &ldst_cmd);
5869 end_synchronized_op(sc, 0);
5870
5871 if (rc != 0) {
5872 sbuf_printf(sb,
5873 " ------------ error %3u ------------", rc);
5874 rc = 0;
5875 } else {
5876 sbuf_printf(sb, " %08x %08x %08x %08x",
5877 be32toh(ldst_cmd.u.mps.rplc127_96),
5878 be32toh(ldst_cmd.u.mps.rplc95_64),
5879 be32toh(ldst_cmd.u.mps.rplc63_32),
5880 be32toh(ldst_cmd.u.mps.rplc31_0));
5881 }
5882 } else
5883 sbuf_printf(sb, "%36s", "");
5884
5885 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5886 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5887 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5888 }
5889
5890 if (rc)
5891 (void) sbuf_finish(sb);
5892 else
5893 rc = sbuf_finish(sb);
5894 sbuf_delete(sb);
5895
5896 return (rc);
5897}
5898
5899static int
5900sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5901{
5902 struct adapter *sc = arg1;
5903 struct sbuf *sb;
5904 int rc;
5905 uint16_t mtus[NMTUS];
5906
5907 rc = sysctl_wire_old_buffer(req, 0);
5908 if (rc != 0)
5909 return (rc);
5910
5911 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5912 if (sb == NULL)
5913 return (ENOMEM);
5914
5915 t4_read_mtu_tbl(sc, mtus, NULL);
5916
5917 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5918 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5919 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5920 mtus[14], mtus[15]);
5921
5922 rc = sbuf_finish(sb);
5923 sbuf_delete(sb);
5924
5925 return (rc);
5926}
5927
5928static int
5929sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5930{
5931 struct adapter *sc = arg1;
5932 struct sbuf *sb;
5933 int rc, i;
5934 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5935 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5936 static const char *pm_stats[] = {
5937 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5938 };
5939
5940 rc = sysctl_wire_old_buffer(req, 0);
5941 if (rc != 0)
5942 return (rc);
5943
5944 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5945 if (sb == NULL)
5946 return (ENOMEM);
5947
5948 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5949 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5950
5951 sbuf_printf(sb, " Tx count Tx cycles "
5952 "Rx count Rx cycles");
5953 for (i = 0; i < PM_NSTATS; i++)
5954 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
5955 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5956
5957 rc = sbuf_finish(sb);
5958 sbuf_delete(sb);
5959
5960 return (rc);
5961}
5962
5963static int
5964sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5965{
5966 struct adapter *sc = arg1;
5967 struct sbuf *sb;
5968 int rc;
5969 struct tp_rdma_stats stats;
5970
5971 rc = sysctl_wire_old_buffer(req, 0);
5972 if (rc != 0)
5973 return (rc);
5974
5975 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5976 if (sb == NULL)
5977 return (ENOMEM);
5978
5979 t4_tp_get_rdma_stats(sc, &stats);
5980 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5981 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5982
5983 rc = sbuf_finish(sb);
5984 sbuf_delete(sb);
5985
5986 return (rc);
5987}
5988
5989static int
5990sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5991{
5992 struct adapter *sc = arg1;
5993 struct sbuf *sb;
5994 int rc;
5995 struct tp_tcp_stats v4, v6;
5996
5997 rc = sysctl_wire_old_buffer(req, 0);
5998 if (rc != 0)
5999 return (rc);
6000
6001 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6002 if (sb == NULL)
6003 return (ENOMEM);
6004
6005 t4_tp_get_tcp_stats(sc, &v4, &v6);
6006 sbuf_printf(sb,
6007 " IP IPv6\n");
6008 sbuf_printf(sb, "OutRsts: %20u %20u\n",
6009 v4.tcpOutRsts, v6.tcpOutRsts);
6010 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
6011 v4.tcpInSegs, v6.tcpInSegs);
6012 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
6013 v4.tcpOutSegs, v6.tcpOutSegs);
6014 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
6015 v4.tcpRetransSegs, v6.tcpRetransSegs);
6016
6017 rc = sbuf_finish(sb);
6018 sbuf_delete(sb);
6019
6020 return (rc);
6021}
6022
6023static int
6024sysctl_tids(SYSCTL_HANDLER_ARGS)
6025{
6026 struct adapter *sc = arg1;
6027 struct sbuf *sb;
6028 int rc;
6029 struct tid_info *t = &sc->tids;
6030
6031 rc = sysctl_wire_old_buffer(req, 0);
6032 if (rc != 0)
6033 return (rc);
6034
6035 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6036 if (sb == NULL)
6037 return (ENOMEM);
6038
6039 if (t->natids) {
6040 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6041 t->atids_in_use);
6042 }
6043
6044 if (t->ntids) {
6045 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6046 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6047
6048 if (b) {
6049 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6050 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6051 t->ntids - 1);
6052 } else {
6053 sbuf_printf(sb, "TID range: %u-%u",
6054 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6055 t->ntids - 1);
6056 }
6057 } else
6058 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6059 sbuf_printf(sb, ", in use: %u\n",
6060 atomic_load_acq_int(&t->tids_in_use));
6061 }
6062
6063 if (t->nstids) {
6064 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6065 t->stid_base + t->nstids - 1, t->stids_in_use);
6066 }
6067
6068 if (t->nftids) {
6069 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6070 t->ftid_base + t->nftids - 1);
6071 }
6072
6073 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6074 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6075 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6076
6077 rc = sbuf_finish(sb);
6078 sbuf_delete(sb);
6079
6080 return (rc);
6081}
6082
6083static int
6084sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6085{
6086 struct adapter *sc = arg1;
6087 struct sbuf *sb;
6088 int rc;
6089 struct tp_err_stats stats;
6090
6091 rc = sysctl_wire_old_buffer(req, 0);
6092 if (rc != 0)
6093 return (rc);
6094
6095 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6096 if (sb == NULL)
6097 return (ENOMEM);
6098
6099 t4_tp_get_err_stats(sc, &stats);
6100
6101 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6102 "channel 3\n");
6103 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
6104 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6105 stats.macInErrs[3]);
6106 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
6107 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6108 stats.hdrInErrs[3]);
6109 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
6110 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6111 stats.tcpInErrs[3]);
6112 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
6113 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6114 stats.tcp6InErrs[3]);
6115 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
6116 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6117 stats.tnlCongDrops[3]);
6118 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
6119 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6120 stats.tnlTxDrops[3]);
6121 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
6122 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6123 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6124 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
6125 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6126 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6127 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
6128 stats.ofldNoNeigh, stats.ofldCongDefer);
6129
6130 rc = sbuf_finish(sb);
6131 sbuf_delete(sb);
6132
6133 return (rc);
6134}
6135
6136struct field_desc {
6137 const char *name;
6138 u_int start;
6139 u_int width;
6140};
6141
6142static void
6143field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6144{
6145 char buf[32];
6146 int line_size = 0;
6147
6148 while (f->name) {
6149 uint64_t mask = (1ULL << f->width) - 1;
6150 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6151 ((uintmax_t)v >> f->start) & mask);
6152
6153 if (line_size + len >= 79) {
6154 line_size = 8;
6155 sbuf_printf(sb, "\n ");
6156 }
6157 sbuf_printf(sb, "%s ", buf);
6158 line_size += len + 1;
6159 f++;
6160 }
6161 sbuf_printf(sb, "\n");
6162}
6163
6164static struct field_desc tp_la0[] = {
6165 { "RcfOpCodeOut", 60, 4 },
6166 { "State", 56, 4 },
6167 { "WcfState", 52, 4 },
6168 { "RcfOpcSrcOut", 50, 2 },
6169 { "CRxError", 49, 1 },
6170 { "ERxError", 48, 1 },
6171 { "SanityFailed", 47, 1 },
6172 { "SpuriousMsg", 46, 1 },
6173 { "FlushInputMsg", 45, 1 },
6174 { "FlushInputCpl", 44, 1 },
6175 { "RssUpBit", 43, 1 },
6176 { "RssFilterHit", 42, 1 },
6177 { "Tid", 32, 10 },
6178 { "InitTcb", 31, 1 },
6179 { "LineNumber", 24, 7 },
6180 { "Emsg", 23, 1 },
6181 { "EdataOut", 22, 1 },
6182 { "Cmsg", 21, 1 },
6183 { "CdataOut", 20, 1 },
6184 { "EreadPdu", 19, 1 },
6185 { "CreadPdu", 18, 1 },
6186 { "TunnelPkt", 17, 1 },
6187 { "RcfPeerFin", 16, 1 },
6188 { "RcfReasonOut", 12, 4 },
6189 { "TxCchannel", 10, 2 },
6190 { "RcfTxChannel", 8, 2 },
6191 { "RxEchannel", 6, 2 },
6192 { "RcfRxChannel", 5, 1 },
6193 { "RcfDataOutSrdy", 4, 1 },
6194 { "RxDvld", 3, 1 },
6195 { "RxOoDvld", 2, 1 },
6196 { "RxCongestion", 1, 1 },
6197 { "TxCongestion", 0, 1 },
6198 { NULL }
6199};
6200
6201static struct field_desc tp_la1[] = {
6202 { "CplCmdIn", 56, 8 },
6203 { "CplCmdOut", 48, 8 },
6204 { "ESynOut", 47, 1 },
6205 { "EAckOut", 46, 1 },
6206 { "EFinOut", 45, 1 },
6207 { "ERstOut", 44, 1 },
6208 { "SynIn", 43, 1 },
6209 { "AckIn", 42, 1 },
6210 { "FinIn", 41, 1 },
6211 { "RstIn", 40, 1 },
6212 { "DataIn", 39, 1 },
6213 { "DataInVld", 38, 1 },
6214 { "PadIn", 37, 1 },
6215 { "RxBufEmpty", 36, 1 },
6216 { "RxDdp", 35, 1 },
6217 { "RxFbCongestion", 34, 1 },
6218 { "TxFbCongestion", 33, 1 },
6219 { "TxPktSumSrdy", 32, 1 },
6220 { "RcfUlpType", 28, 4 },
6221 { "Eread", 27, 1 },
6222 { "Ebypass", 26, 1 },
6223 { "Esave", 25, 1 },
6224 { "Static0", 24, 1 },
6225 { "Cread", 23, 1 },
6226 { "Cbypass", 22, 1 },
6227 { "Csave", 21, 1 },
6228 { "CPktOut", 20, 1 },
6229 { "RxPagePoolFull", 18, 2 },
6230 { "RxLpbkPkt", 17, 1 },
6231 { "TxLpbkPkt", 16, 1 },
6232 { "RxVfValid", 15, 1 },
6233 { "SynLearned", 14, 1 },
6234 { "SetDelEntry", 13, 1 },
6235 { "SetInvEntry", 12, 1 },
6236 { "CpcmdDvld", 11, 1 },
6237 { "CpcmdSave", 10, 1 },
6238 { "RxPstructsFull", 8, 2 },
6239 { "EpcmdDvld", 7, 1 },
6240 { "EpcmdFlush", 6, 1 },
6241 { "EpcmdTrimPrefix", 5, 1 },
6242 { "EpcmdTrimPostfix", 4, 1 },
6243 { "ERssIp4Pkt", 3, 1 },
6244 { "ERssIp6Pkt", 2, 1 },
6245 { "ERssTcpUdpPkt", 1, 1 },
6246 { "ERssFceFipPkt", 0, 1 },
6247 { NULL }
6248};
6249
6250static struct field_desc tp_la2[] = {
6251 { "CplCmdIn", 56, 8 },
6252 { "MpsVfVld", 55, 1 },
6253 { "MpsPf", 52, 3 },
6254 { "MpsVf", 44, 8 },
6255 { "SynIn", 43, 1 },
6256 { "AckIn", 42, 1 },
6257 { "FinIn", 41, 1 },
6258 { "RstIn", 40, 1 },
6259 { "DataIn", 39, 1 },
6260 { "DataInVld", 38, 1 },
6261 { "PadIn", 37, 1 },
6262 { "RxBufEmpty", 36, 1 },
6263 { "RxDdp", 35, 1 },
6264 { "RxFbCongestion", 34, 1 },
6265 { "TxFbCongestion", 33, 1 },
6266 { "TxPktSumSrdy", 32, 1 },
6267 { "RcfUlpType", 28, 4 },
6268 { "Eread", 27, 1 },
6269 { "Ebypass", 26, 1 },
6270 { "Esave", 25, 1 },
6271 { "Static0", 24, 1 },
6272 { "Cread", 23, 1 },
6273 { "Cbypass", 22, 1 },
6274 { "Csave", 21, 1 },
6275 { "CPktOut", 20, 1 },
6276 { "RxPagePoolFull", 18, 2 },
6277 { "RxLpbkPkt", 17, 1 },
6278 { "TxLpbkPkt", 16, 1 },
6279 { "RxVfValid", 15, 1 },
6280 { "SynLearned", 14, 1 },
6281 { "SetDelEntry", 13, 1 },
6282 { "SetInvEntry", 12, 1 },
6283 { "CpcmdDvld", 11, 1 },
6284 { "CpcmdSave", 10, 1 },
6285 { "RxPstructsFull", 8, 2 },
6286 { "EpcmdDvld", 7, 1 },
6287 { "EpcmdFlush", 6, 1 },
6288 { "EpcmdTrimPrefix", 5, 1 },
6289 { "EpcmdTrimPostfix", 4, 1 },
6290 { "ERssIp4Pkt", 3, 1 },
6291 { "ERssIp6Pkt", 2, 1 },
6292 { "ERssTcpUdpPkt", 1, 1 },
6293 { "ERssFceFipPkt", 0, 1 },
6294 { NULL }
6295};
6296
6297static void
6298tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6299{
6300
6301 field_desc_show(sb, *p, tp_la0);
6302}
6303
6304static void
6305tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6306{
6307
6308 if (idx)
6309 sbuf_printf(sb, "\n");
6310 field_desc_show(sb, p[0], tp_la0);
6311 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6312 field_desc_show(sb, p[1], tp_la0);
6313}
6314
6315static void
6316tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6317{
6318
6319 if (idx)
6320 sbuf_printf(sb, "\n");
6321 field_desc_show(sb, p[0], tp_la0);
6322 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6323 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6324}
6325
6326static int
6327sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6328{
6329 struct adapter *sc = arg1;
6330 struct sbuf *sb;
6331 uint64_t *buf, *p;
6332 int rc;
6333 u_int i, inc;
6334 void (*show_func)(struct sbuf *, uint64_t *, int);
6335
6336 rc = sysctl_wire_old_buffer(req, 0);
6337 if (rc != 0)
6338 return (rc);
6339
6340 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6341 if (sb == NULL)
6342 return (ENOMEM);
6343
6344 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6345
6346 t4_tp_read_la(sc, buf, NULL);
6347 p = buf;
6348
6349 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6350 case 2:
6351 inc = 2;
6352 show_func = tp_la_show2;
6353 break;
6354 case 3:
6355 inc = 2;
6356 show_func = tp_la_show3;
6357 break;
6358 default:
6359 inc = 1;
6360 show_func = tp_la_show;
6361 }
6362
6363 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6364 (*show_func)(sb, p, i);
6365
6366 rc = sbuf_finish(sb);
6367 sbuf_delete(sb);
6368 free(buf, M_CXGBE);
6369 return (rc);
6370}
6371
6372static int
6373sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6374{
6375 struct adapter *sc = arg1;
6376 struct sbuf *sb;
6377 int rc;
6378 u64 nrate[NCHAN], orate[NCHAN];
6379
6380 rc = sysctl_wire_old_buffer(req, 0);
6381 if (rc != 0)
6382 return (rc);
6383
6384 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6385 if (sb == NULL)
6386 return (ENOMEM);
6387
6388 t4_get_chan_txrate(sc, nrate, orate);
6389 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6390 "channel 3\n");
6391 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
6392 nrate[0], nrate[1], nrate[2], nrate[3]);
6393 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
6394 orate[0], orate[1], orate[2], orate[3]);
6395
6396 rc = sbuf_finish(sb);
6397 sbuf_delete(sb);
6398
6399 return (rc);
6400}
6401
6402static int
6403sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6404{
6405 struct adapter *sc = arg1;
6406 struct sbuf *sb;
6407 uint32_t *buf, *p;
6408 int rc, i;
6409
6410 rc = sysctl_wire_old_buffer(req, 0);
6411 if (rc != 0)
6412 return (rc);
6413
6414 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6415 if (sb == NULL)
6416 return (ENOMEM);
6417
6418 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6419 M_ZERO | M_WAITOK);
6420
6421 t4_ulprx_read_la(sc, buf);
6422 p = buf;
6423
6424 sbuf_printf(sb, " Pcmd Type Message"
6425 " Data");
6426 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6427 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
6428 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6429 }
6430
6431 rc = sbuf_finish(sb);
6432 sbuf_delete(sb);
6433 free(buf, M_CXGBE);
6434 return (rc);
6435}
6436
6437static int
6438sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6439{
6440 struct adapter *sc = arg1;
6441 struct sbuf *sb;
6442 int rc, v;
6443
6444 rc = sysctl_wire_old_buffer(req, 0);
6445 if (rc != 0)
6446 return (rc);
6447
6448 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6449 if (sb == NULL)
6450 return (ENOMEM);
6451
6452 v = t4_read_reg(sc, A_SGE_STAT_CFG);
6453 if (G_STATSOURCE_T5(v) == 7) {
6454 if (G_STATMODE(v) == 0) {
6455 sbuf_printf(sb, "total %d, incomplete %d",
6456 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6457 t4_read_reg(sc, A_SGE_STAT_MATCH));
6458 } else if (G_STATMODE(v) == 1) {
6459 sbuf_printf(sb, "total %d, data overflow %d",
6460 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6461 t4_read_reg(sc, A_SGE_STAT_MATCH));
6462 }
6463 }
6464 rc = sbuf_finish(sb);
6465 sbuf_delete(sb);
6466
6467 return (rc);
6468}
6469#endif
6470
6471static inline void
6472txq_start(struct ifnet *ifp, struct sge_txq *txq)
6473{
6474 struct buf_ring *br;
6475 struct mbuf *m;
6476
6477 TXQ_LOCK_ASSERT_OWNED(txq);
6478
6479 br = txq->br;
6480 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6481 if (m)
6482 t4_eth_tx(ifp, txq, m);
6483}
6484
6485void
6486t4_tx_callout(void *arg)
6487{
6488 struct sge_eq *eq = arg;
6489 struct adapter *sc;
6490
6491 if (EQ_TRYLOCK(eq) == 0)
6492 goto reschedule;
6493
6494 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6495 EQ_UNLOCK(eq);
6496reschedule:
6497 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6498 callout_schedule(&eq->tx_callout, 1);
6499 return;
6500 }
6501
6502 EQ_LOCK_ASSERT_OWNED(eq);
6503
6504 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6505
6506 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6507 struct sge_txq *txq = arg;
6508 struct port_info *pi = txq->ifp->if_softc;
6509
6510 sc = pi->adapter;
6511 } else {
6512 struct sge_wrq *wrq = arg;
6513
6514 sc = wrq->adapter;
6515 }
6516
6517 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6518 }
6519
6520 EQ_UNLOCK(eq);
6521}
6522
6523void
6524t4_tx_task(void *arg, int count)
6525{
6526 struct sge_eq *eq = arg;
6527
6528 EQ_LOCK(eq);
6529 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6530 struct sge_txq *txq = arg;
6531 txq_start(txq->ifp, txq);
6532 } else {
6533 struct sge_wrq *wrq = arg;
6534 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6535 }
6536 EQ_UNLOCK(eq);
6537}
6538
6539static uint32_t
6540fconf_to_mode(uint32_t fconf)
6541{
6542 uint32_t mode;
6543
6544 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6545 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6546
6547 if (fconf & F_FRAGMENTATION)
6548 mode |= T4_FILTER_IP_FRAGMENT;
6549
6550 if (fconf & F_MPSHITTYPE)
6551 mode |= T4_FILTER_MPS_HIT_TYPE;
6552
6553 if (fconf & F_MACMATCH)
6554 mode |= T4_FILTER_MAC_IDX;
6555
6556 if (fconf & F_ETHERTYPE)
6557 mode |= T4_FILTER_ETH_TYPE;
6558
6559 if (fconf & F_PROTOCOL)
6560 mode |= T4_FILTER_IP_PROTO;
6561
6562 if (fconf & F_TOS)
6563 mode |= T4_FILTER_IP_TOS;
6564
6565 if (fconf & F_VLAN)
6566 mode |= T4_FILTER_VLAN;
6567
6568 if (fconf & F_VNIC_ID)
6569 mode |= T4_FILTER_VNIC;
6570
6571 if (fconf & F_PORT)
6572 mode |= T4_FILTER_PORT;
6573
6574 if (fconf & F_FCOE)
6575 mode |= T4_FILTER_FCoE;
6576
6577 return (mode);
6578}
6579
6580static uint32_t
6581mode_to_fconf(uint32_t mode)
6582{
6583 uint32_t fconf = 0;
6584
6585 if (mode & T4_FILTER_IP_FRAGMENT)
6586 fconf |= F_FRAGMENTATION;
6587
6588 if (mode & T4_FILTER_MPS_HIT_TYPE)
6589 fconf |= F_MPSHITTYPE;
6590
6591 if (mode & T4_FILTER_MAC_IDX)
6592 fconf |= F_MACMATCH;
6593
6594 if (mode & T4_FILTER_ETH_TYPE)
6595 fconf |= F_ETHERTYPE;
6596
6597 if (mode & T4_FILTER_IP_PROTO)
6598 fconf |= F_PROTOCOL;
6599
6600 if (mode & T4_FILTER_IP_TOS)
6601 fconf |= F_TOS;
6602
6603 if (mode & T4_FILTER_VLAN)
6604 fconf |= F_VLAN;
6605
6606 if (mode & T4_FILTER_VNIC)
6607 fconf |= F_VNIC_ID;
6608
6609 if (mode & T4_FILTER_PORT)
6610 fconf |= F_PORT;
6611
6612 if (mode & T4_FILTER_FCoE)
6613 fconf |= F_FCOE;
6614
6615 return (fconf);
6616}
6617
6618static uint32_t
6619fspec_to_fconf(struct t4_filter_specification *fs)
6620{
6621 uint32_t fconf = 0;
6622
6623 if (fs->val.frag || fs->mask.frag)
6624 fconf |= F_FRAGMENTATION;
6625
6626 if (fs->val.matchtype || fs->mask.matchtype)
6627 fconf |= F_MPSHITTYPE;
6628
6629 if (fs->val.macidx || fs->mask.macidx)
6630 fconf |= F_MACMATCH;
6631
6632 if (fs->val.ethtype || fs->mask.ethtype)
6633 fconf |= F_ETHERTYPE;
6634
6635 if (fs->val.proto || fs->mask.proto)
6636 fconf |= F_PROTOCOL;
6637
6638 if (fs->val.tos || fs->mask.tos)
6639 fconf |= F_TOS;
6640
6641 if (fs->val.vlan_vld || fs->mask.vlan_vld)
6642 fconf |= F_VLAN;
6643
6644 if (fs->val.vnic_vld || fs->mask.vnic_vld)
6645 fconf |= F_VNIC_ID;
6646
6647 if (fs->val.iport || fs->mask.iport)
6648 fconf |= F_PORT;
6649
6650 if (fs->val.fcoe || fs->mask.fcoe)
6651 fconf |= F_FCOE;
6652
6653 return (fconf);
6654}
6655
6656static int
6657get_filter_mode(struct adapter *sc, uint32_t *mode)
6658{
6659 int rc;
6660 uint32_t fconf;
6661
6662 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6663 "t4getfm");
6664 if (rc)
6665 return (rc);
6666
6667 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6668 A_TP_VLAN_PRI_MAP);
6669
6670 if (sc->params.tp.vlan_pri_map != fconf) {
6671 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6672 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6673 fconf);
6674 sc->params.tp.vlan_pri_map = fconf;
6675 }
6676
6677 *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6678
6679 end_synchronized_op(sc, LOCK_HELD);
6680 return (0);
6681}
6682
6683static int
6684set_filter_mode(struct adapter *sc, uint32_t mode)
6685{
6686 uint32_t fconf;
6687 int rc;
6688
6689 fconf = mode_to_fconf(mode);
6690
6691 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6692 "t4setfm");
6693 if (rc)
6694 return (rc);
6695
6696 if (sc->tids.ftids_in_use > 0) {
6697 rc = EBUSY;
6698 goto done;
6699 }
6700
6701#ifdef TCP_OFFLOAD
6702 if (sc->offload_map) {
6703 rc = EBUSY;
6704 goto done;
6705 }
6706#endif
6707
6708#ifdef notyet
6709 rc = -t4_set_filter_mode(sc, fconf);
6710 if (rc == 0)
6711 sc->filter_mode = fconf;
6712#else
6713 rc = ENOTSUP;
6714#endif
6715
6716done:
6717 end_synchronized_op(sc, LOCK_HELD);
6718 return (rc);
6719}
6720
6721static inline uint64_t
6722get_filter_hits(struct adapter *sc, uint32_t fid)
6723{
6724 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6725 uint64_t hits;
6726
6727 memwin_info(sc, 0, &mw_base, NULL);
6728 off = position_memwin(sc, 0,
6729 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6730 if (is_t4(sc)) {
6731 hits = t4_read_reg64(sc, mw_base + off + 16);
6732 hits = be64toh(hits);
6733 } else {
6734 hits = t4_read_reg(sc, mw_base + off + 24);
6735 hits = be32toh(hits);
6736 }
6737
6738 return (hits);
6739}
6740
6741static int
6742get_filter(struct adapter *sc, struct t4_filter *t)
6743{
6744 int i, rc, nfilters = sc->tids.nftids;
6745 struct filter_entry *f;
6746
6747 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6748 "t4getf");
6749 if (rc)
6750 return (rc);
6751
6752 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6753 t->idx >= nfilters) {
6754 t->idx = 0xffffffff;
6755 goto done;
6756 }
6757
6758 f = &sc->tids.ftid_tab[t->idx];
6759 for (i = t->idx; i < nfilters; i++, f++) {
6760 if (f->valid) {
6761 t->idx = i;
6762 t->l2tidx = f->l2t ? f->l2t->idx : 0;
6763 t->smtidx = f->smtidx;
6764 if (f->fs.hitcnts)
6765 t->hits = get_filter_hits(sc, t->idx);
6766 else
6767 t->hits = UINT64_MAX;
6768 t->fs = f->fs;
6769
6770 goto done;
6771 }
6772 }
6773
6774 t->idx = 0xffffffff;
6775done:
6776 end_synchronized_op(sc, LOCK_HELD);
6777 return (0);
6778}
6779
6780static int
6781set_filter(struct adapter *sc, struct t4_filter *t)
6782{
6783 unsigned int nfilters, nports;
6784 struct filter_entry *f;
6785 int i, rc;
6786
6787 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6788 if (rc)
6789 return (rc);
6790
6791 nfilters = sc->tids.nftids;
6792 nports = sc->params.nports;
6793
6794 if (nfilters == 0) {
6795 rc = ENOTSUP;
6796 goto done;
6797 }
6798
6799 if (!(sc->flags & FULL_INIT_DONE)) {
6800 rc = EAGAIN;
6801 goto done;
6802 }
6803
6804 if (t->idx >= nfilters) {
6805 rc = EINVAL;
6806 goto done;
6807 }
6808
6809 /* Validate against the global filter mode */
6810 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6811 sc->params.tp.vlan_pri_map) {
6812 rc = E2BIG;
6813 goto done;
6814 }
6815
6816 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6817 rc = EINVAL;
6818 goto done;
6819 }
6820
6821 if (t->fs.val.iport >= nports) {
6822 rc = EINVAL;
6823 goto done;
6824 }
6825
6826 /* Can't specify an iq if not steering to it */
6827 if (!t->fs.dirsteer && t->fs.iq) {
6828 rc = EINVAL;
6829 goto done;
6830 }
6831
6832 /* IPv6 filter idx must be 4 aligned */
6833 if (t->fs.type == 1 &&
6834 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6835 rc = EINVAL;
6836 goto done;
6837 }
6838
6839 if (sc->tids.ftid_tab == NULL) {
6840 KASSERT(sc->tids.ftids_in_use == 0,
6841 ("%s: no memory allocated but filters_in_use > 0",
6842 __func__));
6843
6844 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6845 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6846 if (sc->tids.ftid_tab == NULL) {
6847 rc = ENOMEM;
6848 goto done;
6849 }
6850 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6851 }
6852
6853 for (i = 0; i < 4; i++) {
6854 f = &sc->tids.ftid_tab[t->idx + i];
6855
6856 if (f->pending || f->valid) {
6857 rc = EBUSY;
6858 goto done;
6859 }
6860 if (f->locked) {
6861 rc = EPERM;
6862 goto done;
6863 }
6864
6865 if (t->fs.type == 0)
6866 break;
6867 }
6868
6869 f = &sc->tids.ftid_tab[t->idx];
6870 f->fs = t->fs;
6871
6872 rc = set_filter_wr(sc, t->idx);
6873done:
6874 end_synchronized_op(sc, 0);
6875
6876 if (rc == 0) {
6877 mtx_lock(&sc->tids.ftid_lock);
6878 for (;;) {
6879 if (f->pending == 0) {
6880 rc = f->valid ? 0 : EIO;
6881 break;
6882 }
6883
6884 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6885 PCATCH, "t4setfw", 0)) {
6886 rc = EINPROGRESS;
6887 break;
6888 }
6889 }
6890 mtx_unlock(&sc->tids.ftid_lock);
6891 }
6892 return (rc);
6893}
6894
6895static int
6896del_filter(struct adapter *sc, struct t4_filter *t)
6897{
6898 unsigned int nfilters;
6899 struct filter_entry *f;
6900 int rc;
6901
6902 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6903 if (rc)
6904 return (rc);
6905
6906 nfilters = sc->tids.nftids;
6907
6908 if (nfilters == 0) {
6909 rc = ENOTSUP;
6910 goto done;
6911 }
6912
6913 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6914 t->idx >= nfilters) {
6915 rc = EINVAL;
6916 goto done;
6917 }
6918
6919 if (!(sc->flags & FULL_INIT_DONE)) {
6920 rc = EAGAIN;
6921 goto done;
6922 }
6923
6924 f = &sc->tids.ftid_tab[t->idx];
6925
6926 if (f->pending) {
6927 rc = EBUSY;
6928 goto done;
6929 }
6930 if (f->locked) {
6931 rc = EPERM;
6932 goto done;
6933 }
6934
6935 if (f->valid) {
6936 t->fs = f->fs; /* extra info for the caller */
6937 rc = del_filter_wr(sc, t->idx);
6938 }
6939
6940done:
6941 end_synchronized_op(sc, 0);
6942
6943 if (rc == 0) {
6944 mtx_lock(&sc->tids.ftid_lock);
6945 for (;;) {
6946 if (f->pending == 0) {
6947 rc = f->valid ? EIO : 0;
6948 break;
6949 }
6950
6951 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6952 PCATCH, "t4delfw", 0)) {
6953 rc = EINPROGRESS;
6954 break;
6955 }
6956 }
6957 mtx_unlock(&sc->tids.ftid_lock);
6958 }
6959
6960 return (rc);
6961}
6962
6963static void
6964clear_filter(struct filter_entry *f)
6965{
6966 if (f->l2t)
6967 t4_l2t_release(f->l2t);
6968
6969 bzero(f, sizeof (*f));
6970}
6971
6972static int
6973set_filter_wr(struct adapter *sc, int fidx)
6974{
6975 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6976 struct wrqe *wr;
6977 struct fw_filter_wr *fwr;
6978 unsigned int ftid;
6979
6980 ASSERT_SYNCHRONIZED_OP(sc);
6981
6982 if (f->fs.newdmac || f->fs.newvlan) {
6983 /* This filter needs an L2T entry; allocate one. */
6984 f->l2t = t4_l2t_alloc_switching(sc->l2t);
6985 if (f->l2t == NULL)
6986 return (EAGAIN);
6987 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6988 f->fs.dmac)) {
6989 t4_l2t_release(f->l2t);
6990 f->l2t = NULL;
6991 return (ENOMEM);
6992 }
6993 }
6994
6995 ftid = sc->tids.ftid_base + fidx;
6996
6997 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6998 if (wr == NULL)
6999 return (ENOMEM);
7000
7001 fwr = wrtod(wr);
7002 bzero(fwr, sizeof (*fwr));
7003
7004 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7005 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7006 fwr->tid_to_iq =
7007 htobe32(V_FW_FILTER_WR_TID(ftid) |
7008 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7009 V_FW_FILTER_WR_NOREPLY(0) |
7010 V_FW_FILTER_WR_IQ(f->fs.iq));
7011 fwr->del_filter_to_l2tix =
7012 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7013 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7014 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7015 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7016 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7017 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7018 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7019 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7020 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7021 f->fs.newvlan == VLAN_REWRITE) |
7022 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7023 f->fs.newvlan == VLAN_REWRITE) |
7024 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7025 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7026 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7027 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7028 fwr->ethtype = htobe16(f->fs.val.ethtype);
7029 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7030 fwr->frag_to_ovlan_vldm =
7031 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7032 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7033 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7034 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7035 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7036 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7037 fwr->smac_sel = 0;
7038 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7039 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7040 fwr->maci_to_matchtypem =
7041 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7042 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7043 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7044 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7045 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7046 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7047 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7048 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7049 fwr->ptcl = f->fs.val.proto;
7050 fwr->ptclm = f->fs.mask.proto;
7051 fwr->ttyp = f->fs.val.tos;
7052 fwr->ttypm = f->fs.mask.tos;
7053 fwr->ivlan = htobe16(f->fs.val.vlan);
7054 fwr->ivlanm = htobe16(f->fs.mask.vlan);
7055 fwr->ovlan = htobe16(f->fs.val.vnic);
7056 fwr->ovlanm = htobe16(f->fs.mask.vnic);
7057 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7058 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7059 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7060 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7061 fwr->lp = htobe16(f->fs.val.dport);
7062 fwr->lpm = htobe16(f->fs.mask.dport);
7063 fwr->fp = htobe16(f->fs.val.sport);
7064 fwr->fpm = htobe16(f->fs.mask.sport);
7065 if (f->fs.newsmac)
7066 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7067
7068 f->pending = 1;
7069 sc->tids.ftids_in_use++;
7070
7071 t4_wrq_tx(sc, wr);
7072 return (0);
7073}
7074
7075static int
7076del_filter_wr(struct adapter *sc, int fidx)
7077{
7078 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7079 struct wrqe *wr;
7080 struct fw_filter_wr *fwr;
7081 unsigned int ftid;
7082
7083 ftid = sc->tids.ftid_base + fidx;
7084
7085 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7086 if (wr == NULL)
7087 return (ENOMEM);
7088 fwr = wrtod(wr);
7089 bzero(fwr, sizeof (*fwr));
7090
7091 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7092
7093 f->pending = 1;
7094 t4_wrq_tx(sc, wr);
7095 return (0);
7096}
7097
7098int
7099t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7100{
7101 struct adapter *sc = iq->adapter;
7102 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7103 unsigned int idx = GET_TID(rpl);
7104
7105 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7106 rss->opcode));
7107
7108 if (idx >= sc->tids.ftid_base &&
7109 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7110 unsigned int rc = G_COOKIE(rpl->cookie);
7111 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7112
7113 mtx_lock(&sc->tids.ftid_lock);
7114 if (rc == FW_FILTER_WR_FLT_ADDED) {
7115 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7116 __func__, idx));
7117 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7118 f->pending = 0; /* asynchronous setup completed */
7119 f->valid = 1;
7120 } else {
7121 if (rc != FW_FILTER_WR_FLT_DELETED) {
7122 /* Add or delete failed, display an error */
7123 log(LOG_ERR,
7124 "filter %u setup failed with error %u\n",
7125 idx, rc);
7126 }
7127
7128 clear_filter(f);
7129 sc->tids.ftids_in_use--;
7130 }
7131 wakeup(&sc->tids.ftid_tab);
7132 mtx_unlock(&sc->tids.ftid_lock);
7133 }
7134
7135 return (0);
7136}
7137
7138static int
7139get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7140{
7141 int rc;
7142
7143 if (cntxt->cid > M_CTXTQID)
7144 return (EINVAL);
7145
7146 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7147 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7148 return (EINVAL);
7149
7150 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7151 if (rc)
7152 return (rc);
7153
7154 if (sc->flags & FW_OK) {
7155 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7156 &cntxt->data[0]);
7157 if (rc == 0)
7158 goto done;
7159 }
7160
7161 /*
7162 * Read via firmware failed or wasn't even attempted. Read directly via
7163 * the backdoor.
7164 */
7165 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7166done:
7167 end_synchronized_op(sc, 0);
7168 return (rc);
7169}
7170
7171static int
7172load_fw(struct adapter *sc, struct t4_data *fw)
7173{
7174 int rc;
7175 uint8_t *fw_data;
7176
7177 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7178 if (rc)
7179 return (rc);
7180
7181 if (sc->flags & FULL_INIT_DONE) {
7182 rc = EBUSY;
7183 goto done;
7184 }
7185
7186 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7187 if (fw_data == NULL) {
7188 rc = ENOMEM;
7189 goto done;
7190 }
7191
7192 rc = copyin(fw->data, fw_data, fw->len);
7193 if (rc == 0)
7194 rc = -t4_load_fw(sc, fw_data, fw->len);
7195
7196 free(fw_data, M_CXGBE);
7197done:
7198 end_synchronized_op(sc, 0);
7199 return (rc);
7200}
7201
7202static int
7203read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7204{
7205 uint32_t addr, off, remaining, i, n;
7206 uint32_t *buf, *b;
7207 uint32_t mw_base, mw_aperture;
7208 int rc;
7209 uint8_t *dst;
7210
7211 rc = validate_mem_range(sc, mr->addr, mr->len);
7212 if (rc != 0)
7213 return (rc);
7214
7215 memwin_info(sc, win, &mw_base, &mw_aperture);
7216 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7217 addr = mr->addr;
7218 remaining = mr->len;
7219 dst = (void *)mr->data;
7220
7221 while (remaining) {
7222 off = position_memwin(sc, win, addr);
7223
7224 /* number of bytes that we'll copy in the inner loop */
7225 n = min(remaining, mw_aperture - off);
7226 for (i = 0; i < n; i += 4)
7227 *b++ = t4_read_reg(sc, mw_base + off + i);
7228
7229 rc = copyout(buf, dst, n);
7230 if (rc != 0)
7231 break;
7232
7233 b = buf;
7234 dst += n;
7235 remaining -= n;
7236 addr += n;
7237 }
7238
7239 free(buf, M_CXGBE);
7240 return (rc);
7241}
7242
7243static int
7244read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7245{
7246 int rc;
7247
7248 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7249 return (EINVAL);
7250
7251 if (i2cd->len > 1) {
7252 /* XXX: need fw support for longer reads in one go */
7253 return (ENOTSUP);
7254 }
7255
7256 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7257 if (rc)
7258 return (rc);
7259 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7260 i2cd->offset, &i2cd->data[0]);
7261 end_synchronized_op(sc, 0);
7262
7263 return (rc);
7264}
7265
7266int
7267t4_os_find_pci_capability(struct adapter *sc, int cap)
7268{
7269 int i;
7270
7271 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7272}
7273
7274int
7275t4_os_pci_save_state(struct adapter *sc)
7276{
7277 device_t dev;
7278 struct pci_devinfo *dinfo;
7279
7280 dev = sc->dev;
7281 dinfo = device_get_ivars(dev);
7282
7283 pci_cfg_save(dev, dinfo, 0);
7284 return (0);
7285}
7286
7287int
7288t4_os_pci_restore_state(struct adapter *sc)
7289{
7290 device_t dev;
7291 struct pci_devinfo *dinfo;
7292
7293 dev = sc->dev;
7294 dinfo = device_get_ivars(dev);
7295
7296 pci_cfg_restore(dev, dinfo);
7297 return (0);
7298}
7299
7300void
7301t4_os_portmod_changed(const struct adapter *sc, int idx)
7302{
7303 struct port_info *pi = sc->port[idx];
7304 static const char *mod_str[] = {
7305 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7306 };
7307
7308 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7309 if_printf(pi->ifp, "transceiver unplugged.\n");
7310 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7311 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7312 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7313 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7314 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7315 if_printf(pi->ifp, "%s transceiver inserted.\n",
7316 mod_str[pi->mod_type]);
7317 } else {
7318 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7319 pi->mod_type);
7320 }
7321}
7322
7323void
7324t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7325{
7326 struct port_info *pi = sc->port[idx];
7327 struct ifnet *ifp = pi->ifp;
7328
7329 if (link_stat) {
7330 pi->linkdnrc = -1;
7331 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7332 if_link_state_change(ifp, LINK_STATE_UP);
7333 } else {
7334 if (reason >= 0)
7335 pi->linkdnrc = reason;
7336 if_link_state_change(ifp, LINK_STATE_DOWN);
7337 }
7338}
7339
7340void
7341t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7342{
7343 struct adapter *sc;
7344
7345 mtx_lock(&t4_list_lock);
7346 SLIST_FOREACH(sc, &t4_list, link) {
7347 /*
7348 * func should not make any assumptions about what state sc is
7349 * in - the only guarantee is that sc->sc_lock is a valid lock.
7350 */
7351 func(sc, arg);
7352 }
7353 mtx_unlock(&t4_list_lock);
7354}
7355
7356static int
7357t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7358{
7359 return (0);
7360}
7361
7362static int
7363t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7364{
7365 return (0);
7366}
7367
7368static int
7369t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7370 struct thread *td)
7371{
7372 int rc;
7373 struct adapter *sc = dev->si_drv1;
7374
7375 rc = priv_check(td, PRIV_DRIVER);
7376 if (rc != 0)
7377 return (rc);
7378
7379 switch (cmd) {
7380 case CHELSIO_T4_GETREG: {
7381 struct t4_reg *edata = (struct t4_reg *)data;
7382
7383 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7384 return (EFAULT);
7385
7386 if (edata->size == 4)
7387 edata->val = t4_read_reg(sc, edata->addr);
7388 else if (edata->size == 8)
7389 edata->val = t4_read_reg64(sc, edata->addr);
7390 else
7391 return (EINVAL);
7392
7393 break;
7394 }
7395 case CHELSIO_T4_SETREG: {
7396 struct t4_reg *edata = (struct t4_reg *)data;
7397
7398 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7399 return (EFAULT);
7400
7401 if (edata->size == 4) {
7402 if (edata->val & 0xffffffff00000000)
7403 return (EINVAL);
7404 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7405 } else if (edata->size == 8)
7406 t4_write_reg64(sc, edata->addr, edata->val);
7407 else
7408 return (EINVAL);
7409 break;
7410 }
7411 case CHELSIO_T4_REGDUMP: {
7412 struct t4_regdump *regs = (struct t4_regdump *)data;
7413 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7414 uint8_t *buf;
7415
7416 if (regs->len < reglen) {
7417 regs->len = reglen; /* hint to the caller */
7418 return (ENOBUFS);
7419 }
7420
7421 regs->len = reglen;
7422 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7423 t4_get_regs(sc, regs, buf);
7424 rc = copyout(buf, regs->data, reglen);
7425 free(buf, M_CXGBE);
7426 break;
7427 }
7428 case CHELSIO_T4_GET_FILTER_MODE:
7429 rc = get_filter_mode(sc, (uint32_t *)data);
7430 break;
7431 case CHELSIO_T4_SET_FILTER_MODE:
7432 rc = set_filter_mode(sc, *(uint32_t *)data);
7433 break;
7434 case CHELSIO_T4_GET_FILTER:
7435 rc = get_filter(sc, (struct t4_filter *)data);
7436 break;
7437 case CHELSIO_T4_SET_FILTER:
7438 rc = set_filter(sc, (struct t4_filter *)data);
7439 break;
7440 case CHELSIO_T4_DEL_FILTER:
7441 rc = del_filter(sc, (struct t4_filter *)data);
7442 break;
7443 case CHELSIO_T4_GET_SGE_CONTEXT:
7444 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7445 break;
7446 case CHELSIO_T4_LOAD_FW:
7447 rc = load_fw(sc, (struct t4_data *)data);
7448 break;
7449 case CHELSIO_T4_GET_MEM:
7450 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7451 break;
7452 case CHELSIO_T4_GET_I2C:
7453 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7454 break;
7455 case CHELSIO_T4_CLEAR_STATS: {
7456 int i;
7457 u_int port_id = *(uint32_t *)data;
7458 struct port_info *pi;
7459
7460 if (port_id >= sc->params.nports)
7461 return (EINVAL);
7462
7463 /* MAC stats */
7464 t4_clr_port_stats(sc, port_id);
7465
7466 pi = sc->port[port_id];
7467 if (pi->flags & PORT_INIT_DONE) {
7468 struct sge_rxq *rxq;
7469 struct sge_txq *txq;
7470 struct sge_wrq *wrq;
7471
7472 for_each_rxq(pi, i, rxq) {
7473#if defined(INET) || defined(INET6)
7474 rxq->lro.lro_queued = 0;
7475 rxq->lro.lro_flushed = 0;
7476#endif
7477 rxq->rxcsum = 0;
7478 rxq->vlan_extraction = 0;
7479 }
7480
7481 for_each_txq(pi, i, txq) {
7482 txq->txcsum = 0;
7483 txq->tso_wrs = 0;
7484 txq->vlan_insertion = 0;
7485 txq->imm_wrs = 0;
7486 txq->sgl_wrs = 0;
7487 txq->txpkt_wrs = 0;
7488 txq->txpkts_wrs = 0;
7489 txq->txpkts_pkts = 0;
7490 txq->br->br_drops = 0;
7491 txq->no_dmamap = 0;
7492 txq->no_desc = 0;
7493 }
7494
7495#ifdef TCP_OFFLOAD
7496 /* nothing to clear for each ofld_rxq */
7497
7498 for_each_ofld_txq(pi, i, wrq) {
7499 wrq->tx_wrs = 0;
7500 wrq->no_desc = 0;
7501 }
7502#endif
7503 wrq = &sc->sge.ctrlq[pi->port_id];
7504 wrq->tx_wrs = 0;
7505 wrq->no_desc = 0;
7506 }
7507 break;
7508 }
7509 case CHELSIO_T4_GET_TRACER:
7510 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7511 break;
7512 case CHELSIO_T4_SET_TRACER:
7513 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7514 break;
7515 default:
7516 rc = EINVAL;
7517 }
7518
7519 return (rc);
7520}
7521
7522#ifdef TCP_OFFLOAD
7523static int
7524toe_capability(struct port_info *pi, int enable)
7525{
7526 int rc;
7527 struct adapter *sc = pi->adapter;
7528
7529 ASSERT_SYNCHRONIZED_OP(sc);
7530
7531 if (!is_offload(sc))
7532 return (ENODEV);
7533
7534 if (enable) {
7535 if (!(sc->flags & FULL_INIT_DONE)) {
7536 rc = cxgbe_init_synchronized(pi);
7537 if (rc)
7538 return (rc);
7539 }
7540
7541 if (isset(&sc->offload_map, pi->port_id))
7542 return (0);
7543
7544 if (!(sc->flags & TOM_INIT_DONE)) {
7545 rc = t4_activate_uld(sc, ULD_TOM);
7546 if (rc == EAGAIN) {
7547 log(LOG_WARNING,
7548 "You must kldload t4_tom.ko before trying "
7549 "to enable TOE on a cxgbe interface.\n");
7550 }
7551 if (rc != 0)
7552 return (rc);
7553 KASSERT(sc->tom_softc != NULL,
7554 ("%s: TOM activated but softc NULL", __func__));
7555 KASSERT(sc->flags & TOM_INIT_DONE,
7556 ("%s: TOM activated but flag not set", __func__));
7557 }
7558
7559 setbit(&sc->offload_map, pi->port_id);
7560 } else {
7561 if (!isset(&sc->offload_map, pi->port_id))
7562 return (0);
7563
7564 KASSERT(sc->flags & TOM_INIT_DONE,
7565 ("%s: TOM never initialized?", __func__));
7566 clrbit(&sc->offload_map, pi->port_id);
7567 }
7568
7569 return (0);
7570}
7571
7572/*
7573 * Add an upper layer driver to the global list.
7574 */
7575int
7576t4_register_uld(struct uld_info *ui)
7577{
7578 int rc = 0;
7579 struct uld_info *u;
7580
7581 mtx_lock(&t4_uld_list_lock);
7582 SLIST_FOREACH(u, &t4_uld_list, link) {
7583 if (u->uld_id == ui->uld_id) {
7584 rc = EEXIST;
7585 goto done;
7586 }
7587 }
7588
7589 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7590 ui->refcount = 0;
7591done:
7592 mtx_unlock(&t4_uld_list_lock);
7593 return (rc);
7594}
7595
7596int
7597t4_unregister_uld(struct uld_info *ui)
7598{
7599 int rc = EINVAL;
7600 struct uld_info *u;
7601
7602 mtx_lock(&t4_uld_list_lock);
7603
7604 SLIST_FOREACH(u, &t4_uld_list, link) {
7605 if (u == ui) {
7606 if (ui->refcount > 0) {
7607 rc = EBUSY;
7608 goto done;
7609 }
7610
7611 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7612 rc = 0;
7613 goto done;
7614 }
7615 }
7616done:
7617 mtx_unlock(&t4_uld_list_lock);
7618 return (rc);
7619}
7620
7621int
7622t4_activate_uld(struct adapter *sc, int id)
7623{
7624 int rc = EAGAIN;
7625 struct uld_info *ui;
7626
7627 ASSERT_SYNCHRONIZED_OP(sc);
7628
7629 mtx_lock(&t4_uld_list_lock);
7630
7631 SLIST_FOREACH(ui, &t4_uld_list, link) {
7632 if (ui->uld_id == id) {
7633 rc = ui->activate(sc);
7634 if (rc == 0)
7635 ui->refcount++;
7636 goto done;
7637 }
7638 }
7639done:
7640 mtx_unlock(&t4_uld_list_lock);
7641
7642 return (rc);
7643}
7644
7645int
7646t4_deactivate_uld(struct adapter *sc, int id)
7647{
7648 int rc = EINVAL;
7649 struct uld_info *ui;
7650
7651 ASSERT_SYNCHRONIZED_OP(sc);
7652
7653 mtx_lock(&t4_uld_list_lock);
7654
7655 SLIST_FOREACH(ui, &t4_uld_list, link) {
7656 if (ui->uld_id == id) {
7657 rc = ui->deactivate(sc);
7658 if (rc == 0)
7659 ui->refcount--;
7660 goto done;
7661 }
7662 }
7663done:
7664 mtx_unlock(&t4_uld_list_lock);
7665
7666 return (rc);
7667}
7668#endif
7669
7670/*
7671 * Come up with reasonable defaults for some of the tunables, provided they're
7672 * not set by the user (in which case we'll use the values as is).
7673 */
7674static void
7675tweak_tunables(void)
7676{
7677 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
7678
7679 if (t4_ntxq10g < 1)
7680 t4_ntxq10g = min(nc, NTXQ_10G);
7681
7682 if (t4_ntxq1g < 1)
7683 t4_ntxq1g = min(nc, NTXQ_1G);
7684
7685 if (t4_nrxq10g < 1)
7686 t4_nrxq10g = min(nc, NRXQ_10G);
7687
7688 if (t4_nrxq1g < 1)
7689 t4_nrxq1g = min(nc, NRXQ_1G);
7690
7691#ifdef TCP_OFFLOAD
7692 if (t4_nofldtxq10g < 1)
7693 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7694
7695 if (t4_nofldtxq1g < 1)
7696 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7697
7698 if (t4_nofldrxq10g < 1)
7699 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7700
7701 if (t4_nofldrxq1g < 1)
7702 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7703
7704 if (t4_toecaps_allowed == -1)
7705 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7706#else
7707 if (t4_toecaps_allowed == -1)
7708 t4_toecaps_allowed = 0;
7709#endif
7710
7711 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7712 t4_tmr_idx_10g = TMR_IDX_10G;
7713
7714 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7715 t4_pktc_idx_10g = PKTC_IDX_10G;
7716
7717 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7718 t4_tmr_idx_1g = TMR_IDX_1G;
7719
7720 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7721 t4_pktc_idx_1g = PKTC_IDX_1G;
7722
7723 if (t4_qsize_txq < 128)
7724 t4_qsize_txq = 128;
7725
7726 if (t4_qsize_rxq < 128)
7727 t4_qsize_rxq = 128;
7728 while (t4_qsize_rxq & 7)
7729 t4_qsize_rxq++;
7730
7731 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7732}
7733
7734static int
7735mod_event(module_t mod, int cmd, void *arg)
7736{
7737 int rc = 0;
7738 static int loaded = 0;
7739
7740 switch (cmd) {
7741 case MOD_LOAD:
7742 if (atomic_fetchadd_int(&loaded, 1))
7743 break;
7744 t4_sge_modload();
7745 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7746 SLIST_INIT(&t4_list);
7747#ifdef TCP_OFFLOAD
7748 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7749 SLIST_INIT(&t4_uld_list);
7750#endif
7751 t4_tracer_modload();
7752 tweak_tunables();
7753 break;
7754
7755 case MOD_UNLOAD:
7756 if (atomic_fetchadd_int(&loaded, -1) > 1)
7757 break;
7758 t4_tracer_modunload();
7759#ifdef TCP_OFFLOAD
7760 mtx_lock(&t4_uld_list_lock);
7761 if (!SLIST_EMPTY(&t4_uld_list)) {
7762 rc = EBUSY;
7763 mtx_unlock(&t4_uld_list_lock);
7764 break;
7765 }
7766 mtx_unlock(&t4_uld_list_lock);
7767 mtx_destroy(&t4_uld_list_lock);
7768#endif
7769 mtx_lock(&t4_list_lock);
7770 if (!SLIST_EMPTY(&t4_list)) {
7771 rc = EBUSY;
7772 mtx_unlock(&t4_list_lock);
7773 break;
7774 }
7775 mtx_unlock(&t4_list_lock);
7776 mtx_destroy(&t4_list_lock);
7777 break;
7778 }
7779
7780 return (rc);
7781}
7782
7783static devclass_t t4_devclass, t5_devclass;
7784static devclass_t cxgbe_devclass, cxl_devclass;
7785
7786DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7787MODULE_VERSION(t4nex, 1);
7788MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7789
7790DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7791MODULE_VERSION(t5nex, 1);
7792MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7793
7794DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7795MODULE_VERSION(cxgbe, 1);
7796
7797DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7798MODULE_VERSION(cxl, 1);