Deleted Added
full compact
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 254933 2013-08-26 19:02:52Z np $");
29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_main.c 255005 2013-08-28 20:45:45Z np $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/conf.h>
36#include <sys/priv.h>
37#include <sys/kernel.h>
38#include <sys/bus.h>
39#include <sys/module.h>
40#include <sys/malloc.h>
41#include <sys/queue.h>
42#include <sys/taskqueue.h>
43#include <sys/pciio.h>
44#include <dev/pci/pcireg.h>
45#include <dev/pci/pcivar.h>
46#include <dev/pci/pci_private.h>
47#include <sys/firmware.h>
48#include <sys/sbuf.h>
49#include <sys/smp.h>
50#include <sys/socket.h>
51#include <sys/sockio.h>
52#include <sys/sysctl.h>
53#include <net/ethernet.h>
54#include <net/if.h>
55#include <net/if_types.h>
56#include <net/if_dl.h>
57#include <net/if_vlan_var.h>
58#if defined(__i386__) || defined(__amd64__)
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#endif
62
63#include "common/common.h"
64#include "common/t4_msg.h"
65#include "common/t4_regs.h"
66#include "common/t4_regs_values.h"
67#include "t4_ioctl.h"
68#include "t4_l2t.h"
69
70/* T4 bus driver interface */
71static int t4_probe(device_t);
72static int t4_attach(device_t);
73static int t4_detach(device_t);
74static device_method_t t4_methods[] = {
75 DEVMETHOD(device_probe, t4_probe),
76 DEVMETHOD(device_attach, t4_attach),
77 DEVMETHOD(device_detach, t4_detach),
78
79 DEVMETHOD_END
80};
81static driver_t t4_driver = {
82 "t4nex",
83 t4_methods,
84 sizeof(struct adapter)
85};
86
87
88/* T4 port (cxgbe) interface */
89static int cxgbe_probe(device_t);
90static int cxgbe_attach(device_t);
91static int cxgbe_detach(device_t);
92static device_method_t cxgbe_methods[] = {
93 DEVMETHOD(device_probe, cxgbe_probe),
94 DEVMETHOD(device_attach, cxgbe_attach),
95 DEVMETHOD(device_detach, cxgbe_detach),
96 { 0, 0 }
97};
98static driver_t cxgbe_driver = {
99 "cxgbe",
100 cxgbe_methods,
101 sizeof(struct port_info)
102};
103
104static d_ioctl_t t4_ioctl;
105static d_open_t t4_open;
106static d_close_t t4_close;
107
108static struct cdevsw t4_cdevsw = {
109 .d_version = D_VERSION,
110 .d_flags = 0,
111 .d_open = t4_open,
112 .d_close = t4_close,
113 .d_ioctl = t4_ioctl,
114 .d_name = "t4nex",
115};
116
117/* T5 bus driver interface */
118static int t5_probe(device_t);
119static device_method_t t5_methods[] = {
120 DEVMETHOD(device_probe, t5_probe),
121 DEVMETHOD(device_attach, t4_attach),
122 DEVMETHOD(device_detach, t4_detach),
123
124 DEVMETHOD_END
125};
126static driver_t t5_driver = {
127 "t5nex",
128 t5_methods,
129 sizeof(struct adapter)
130};
131
132
133/* T5 port (cxl) interface */
134static driver_t cxl_driver = {
135 "cxl",
136 cxgbe_methods,
137 sizeof(struct port_info)
138};
139
140static struct cdevsw t5_cdevsw = {
141 .d_version = D_VERSION,
142 .d_flags = 0,
143 .d_open = t4_open,
144 .d_close = t4_close,
145 .d_ioctl = t4_ioctl,
146 .d_name = "t5nex",
147};
148
149/* ifnet + media interface */
150static void cxgbe_init(void *);
151static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
152static int cxgbe_transmit(struct ifnet *, struct mbuf *);
153static void cxgbe_qflush(struct ifnet *);
154static int cxgbe_media_change(struct ifnet *);
155static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
156
157MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
158
159/*
160 * Correct lock order when you need to acquire multiple locks is t4_list_lock,
161 * then ADAPTER_LOCK, then t4_uld_list_lock.
162 */
163static struct mtx t4_list_lock;
164static SLIST_HEAD(, adapter) t4_list;
165#ifdef TCP_OFFLOAD
166static struct mtx t4_uld_list_lock;
167static SLIST_HEAD(, uld_info) t4_uld_list;
168#endif
169
170/*
171 * Tunables. See tweak_tunables() too.
172 *
173 * Each tunable is set to a default value here if it's known at compile-time.
174 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should
175 * provide a reasonable default when the driver is loaded.
176 *
177 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to
178 * T5 are under hw.cxl.
179 */
180
181/*
182 * Number of queues for tx and rx, 10G and 1G, NIC and offload.
183 */
184#define NTXQ_10G 16
185static int t4_ntxq10g = -1;
186TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
187
188#define NRXQ_10G 8
189static int t4_nrxq10g = -1;
190TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
191
192#define NTXQ_1G 4
193static int t4_ntxq1g = -1;
194TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
195
196#define NRXQ_1G 2
197static int t4_nrxq1g = -1;
198TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
199
200#ifdef TCP_OFFLOAD
201#define NOFLDTXQ_10G 8
202static int t4_nofldtxq10g = -1;
203TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
204
205#define NOFLDRXQ_10G 2
206static int t4_nofldrxq10g = -1;
207TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
208
209#define NOFLDTXQ_1G 2
210static int t4_nofldtxq1g = -1;
211TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
212
213#define NOFLDRXQ_1G 1
214static int t4_nofldrxq1g = -1;
215TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
216#endif
217
218/*
219 * Holdoff parameters for 10G and 1G ports.
220 */
221#define TMR_IDX_10G 1
222static int t4_tmr_idx_10g = TMR_IDX_10G;
223TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
224
225#define PKTC_IDX_10G (-1)
226static int t4_pktc_idx_10g = PKTC_IDX_10G;
227TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
228
229#define TMR_IDX_1G 1
230static int t4_tmr_idx_1g = TMR_IDX_1G;
231TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
232
233#define PKTC_IDX_1G (-1)
234static int t4_pktc_idx_1g = PKTC_IDX_1G;
235TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
236
237/*
238 * Size (# of entries) of each tx and rx queue.
239 */
240static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
241TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
242
243static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
244TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
245
246/*
247 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
248 */
249static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
250TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
251
252/*
253 * Configuration file.
254 */
255#define DEFAULT_CF "default"
256#define FLASH_CF "flash"
257#define UWIRE_CF "uwire"
258#define FPGA_CF "fpga"
259static char t4_cfg_file[32] = DEFAULT_CF;
260TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
261
262/*
263 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
264 * encouraged respectively).
265 */
266static unsigned int t4_fw_install = 1;
267TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install);
268
269/*
270 * ASIC features that will be used. Disable the ones you don't want so that the
271 * chip resources aren't wasted on features that will not be used.
272 */
273static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */
274TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
275
276static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
277TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
278
279static int t4_toecaps_allowed = -1;
280TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
281
282static int t4_rdmacaps_allowed = 0;
283TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
284
285static int t4_iscsicaps_allowed = 0;
286TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
287
288static int t4_fcoecaps_allowed = 0;
289TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
290
291static int t5_write_combine = 0;
292TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
293
294struct intrs_and_queues {
295 int intr_type; /* INTx, MSI, or MSI-X */
296 int nirq; /* Number of vectors */
297 int intr_flags;
298 int ntxq10g; /* # of NIC txq's for each 10G port */
299 int nrxq10g; /* # of NIC rxq's for each 10G port */
300 int ntxq1g; /* # of NIC txq's for each 1G port */
301 int nrxq1g; /* # of NIC rxq's for each 1G port */
302#ifdef TCP_OFFLOAD
303 int nofldtxq10g; /* # of TOE txq's for each 10G port */
304 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
305 int nofldtxq1g; /* # of TOE txq's for each 1G port */
306 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
307#endif
308};
309
310struct filter_entry {
311 uint32_t valid:1; /* filter allocated and valid */
312 uint32_t locked:1; /* filter is administratively locked */
313 uint32_t pending:1; /* filter action is pending firmware reply */
314 uint32_t smtidx:8; /* Source MAC Table index for smac */
315 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
316
317 struct t4_filter_specification fs;
318};
319
320enum {
321 XGMAC_MTU = (1 << 0),
322 XGMAC_PROMISC = (1 << 1),
323 XGMAC_ALLMULTI = (1 << 2),
324 XGMAC_VLANEX = (1 << 3),
325 XGMAC_UCADDR = (1 << 4),
326 XGMAC_MCADDRS = (1 << 5),
327
328 XGMAC_ALL = 0xffff
329};
330
331static int map_bars_0_and_4(struct adapter *);
332static int map_bar_2(struct adapter *);
333static void setup_memwin(struct adapter *);
334static int validate_mem_range(struct adapter *, uint32_t, int);
335static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
336 uint32_t *);
337static void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
338static uint32_t position_memwin(struct adapter *, int, uint32_t);
339static int cfg_itype_and_nqueues(struct adapter *, int, int,
340 struct intrs_and_queues *);
341static int prep_firmware(struct adapter *);
342static int partition_resources(struct adapter *, const struct firmware *,
343 const char *);
344static int get_params__pre_init(struct adapter *);
345static int get_params__post_init(struct adapter *);
346static int set_params__post_init(struct adapter *);
347static void t4_set_desc(struct adapter *);
348static void build_medialist(struct port_info *);
349static int update_mac_settings(struct port_info *, int);
350static int cxgbe_init_synchronized(struct port_info *);
351static int cxgbe_uninit_synchronized(struct port_info *);
352static int setup_intr_handlers(struct adapter *);
353static int adapter_full_init(struct adapter *);
354static int adapter_full_uninit(struct adapter *);
355static int port_full_init(struct port_info *);
356static int port_full_uninit(struct port_info *);
357static void quiesce_eq(struct adapter *, struct sge_eq *);
358static void quiesce_iq(struct adapter *, struct sge_iq *);
359static void quiesce_fl(struct adapter *, struct sge_fl *);
360static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
361 driver_intr_t *, void *, char *);
362static int t4_free_irq(struct adapter *, struct irq *);
363static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
364 unsigned int);
365static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
366static void cxgbe_tick(void *);
367static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
368static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
369 struct mbuf *);
370static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
371static int fw_msg_not_handled(struct adapter *, const __be64 *);
372static int t4_sysctls(struct adapter *);
373static int cxgbe_sysctls(struct port_info *);
374static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
375static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
376static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
377static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
378static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
379static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
380static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
381static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
382static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
383#ifdef SBUF_DRAIN
384static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
385static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
386static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
387static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
388static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
389static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
390static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
391static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
392static int sysctl_devlog(SYSCTL_HANDLER_ARGS);
393static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
394static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
395static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
396static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
397static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
398static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
399static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
400static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
401static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
402static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
403static int sysctl_tids(SYSCTL_HANDLER_ARGS);
404static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
405static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
406static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
407static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
408static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
409#endif
410static inline void txq_start(struct ifnet *, struct sge_txq *);
411static uint32_t fconf_to_mode(uint32_t);
412static uint32_t mode_to_fconf(uint32_t);
413static uint32_t fspec_to_fconf(struct t4_filter_specification *);
414static int get_filter_mode(struct adapter *, uint32_t *);
415static int set_filter_mode(struct adapter *, uint32_t);
416static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
417static int get_filter(struct adapter *, struct t4_filter *);
418static int set_filter(struct adapter *, struct t4_filter *);
419static int del_filter(struct adapter *, struct t4_filter *);
420static void clear_filter(struct filter_entry *);
421static int set_filter_wr(struct adapter *, int);
422static int del_filter_wr(struct adapter *, int);
423static int get_sge_context(struct adapter *, struct t4_sge_context *);
424static int load_fw(struct adapter *, struct t4_data *);
425static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
426static int read_i2c(struct adapter *, struct t4_i2c_data *);
427#ifdef TCP_OFFLOAD
428static int toe_capability(struct port_info *, int);
429#endif
430static int mod_event(module_t, int, void *);
431
432struct {
433 uint16_t device;
434 char *desc;
435} t4_pciids[] = {
436 {0xa000, "Chelsio Terminator 4 FPGA"},
437 {0x4400, "Chelsio T440-dbg"},
438 {0x4401, "Chelsio T420-CR"},
439 {0x4402, "Chelsio T422-CR"},
440 {0x4403, "Chelsio T440-CR"},
441 {0x4404, "Chelsio T420-BCH"},
442 {0x4405, "Chelsio T440-BCH"},
443 {0x4406, "Chelsio T440-CH"},
444 {0x4407, "Chelsio T420-SO"},
445 {0x4408, "Chelsio T420-CX"},
446 {0x4409, "Chelsio T420-BT"},
447 {0x440a, "Chelsio T404-BT"},
448 {0x440e, "Chelsio T440-LP-CR"},
449}, t5_pciids[] = {
450 {0xb000, "Chelsio Terminator 5 FPGA"},
451 {0x5400, "Chelsio T580-dbg"},
452 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */
453 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */
454 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */
455 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */
456 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */
457 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */
458 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */
459 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */
460 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */
461 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */
462 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */
463 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */
464#ifdef notyet
465 {0x5404, "Chelsio T520-BCH"},
466 {0x5405, "Chelsio T540-BCH"},
467 {0x5406, "Chelsio T540-CH"},
468 {0x5408, "Chelsio T520-CX"},
469 {0x540b, "Chelsio B520-SR"},
470 {0x540c, "Chelsio B504-BT"},
471 {0x540f, "Chelsio Amsterdam"},
472 {0x5413, "Chelsio T580-CHR"},
473#endif
474};
475
476#ifdef TCP_OFFLOAD
477/*
478 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be
479 * exactly the same for both rxq and ofld_rxq.
480 */
481CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
482CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
483#endif
484
485/* No easy way to include t4_msg.h before adapter.h so we check this way */
486CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
487CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
488
489static int
490t4_probe(device_t dev)
491{
492 int i;
493 uint16_t v = pci_get_vendor(dev);
494 uint16_t d = pci_get_device(dev);
495 uint8_t f = pci_get_function(dev);
496
497 if (v != PCI_VENDOR_ID_CHELSIO)
498 return (ENXIO);
499
500 /* Attach only to PF0 of the FPGA */
501 if (d == 0xa000 && f != 0)
502 return (ENXIO);
503
504 for (i = 0; i < nitems(t4_pciids); i++) {
505 if (d == t4_pciids[i].device) {
506 device_set_desc(dev, t4_pciids[i].desc);
507 return (BUS_PROBE_DEFAULT);
508 }
509 }
510
511 return (ENXIO);
512}
513
514static int
515t5_probe(device_t dev)
516{
517 int i;
518 uint16_t v = pci_get_vendor(dev);
519 uint16_t d = pci_get_device(dev);
520 uint8_t f = pci_get_function(dev);
521
522 if (v != PCI_VENDOR_ID_CHELSIO)
523 return (ENXIO);
524
525 /* Attach only to PF0 of the FPGA */
526 if (d == 0xb000 && f != 0)
527 return (ENXIO);
528
529 for (i = 0; i < nitems(t5_pciids); i++) {
530 if (d == t5_pciids[i].device) {
531 device_set_desc(dev, t5_pciids[i].desc);
532 return (BUS_PROBE_DEFAULT);
533 }
534 }
535
536 return (ENXIO);
537}
538
539static int
540t4_attach(device_t dev)
541{
542 struct adapter *sc;
543 int rc = 0, i, n10g, n1g, rqidx, tqidx;
544 struct intrs_and_queues iaq;
545 struct sge *s;
546#ifdef TCP_OFFLOAD
547 int ofld_rqidx, ofld_tqidx;
548#endif
549
550 sc = device_get_softc(dev);
551 sc->dev = dev;
552
553 pci_enable_busmaster(dev);
554 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
555 uint32_t v;
556
557 pci_set_max_read_req(dev, 4096);
558 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
559 v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
560 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
561 }
562
563 sc->traceq = -1;
564 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
565 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
566 device_get_nameunit(dev));
567
568 snprintf(sc->lockname, sizeof(sc->lockname), "%s",
569 device_get_nameunit(dev));
570 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
571 mtx_lock(&t4_list_lock);
572 SLIST_INSERT_HEAD(&t4_list, sc, link);
573 mtx_unlock(&t4_list_lock);
574
575 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
576 TAILQ_INIT(&sc->sfl);
577 callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
578
579 rc = map_bars_0_and_4(sc);
580 if (rc != 0)
581 goto done; /* error message displayed already */
582
583 /*
584 * This is the real PF# to which we're attaching. Works from within PCI
585 * passthrough environments too, where pci_get_function() could return a
586 * different PF# depending on the passthrough configuration. We need to
587 * use the real PF# in all our communication with the firmware.
588 */
589 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
590 sc->mbox = sc->pf;
591
592 memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
593 sc->an_handler = an_not_handled;
594 for (i = 0; i < nitems(sc->cpl_handler); i++)
595 sc->cpl_handler[i] = cpl_not_handled;
596 for (i = 0; i < nitems(sc->fw_msg_handler); i++)
597 sc->fw_msg_handler[i] = fw_msg_not_handled;
598 t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
599 t4_register_cpl_handler(sc, CPL_TRACE_PKT, t4_trace_pkt);
600 t4_register_cpl_handler(sc, CPL_TRACE_PKT_T5, t5_trace_pkt);
601 t4_init_sge_cpl_handlers(sc);
602
603
603 /* Prepare the adapter for operation */
604 rc = -t4_prep_adapter(sc);
605 if (rc != 0) {
606 device_printf(dev, "failed to prepare adapter: %d.\n", rc);
607 goto done;
608 }
609
610 /*
611 * Do this really early, with the memory windows set up even before the
612 * character device. The userland tool's register i/o and mem read
613 * will work even in "recovery mode".
614 */
615 setup_memwin(sc);
616 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
617 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
618 device_get_nameunit(dev));
619 if (sc->cdev == NULL)
620 device_printf(dev, "failed to create nexus char device.\n");
621 else
622 sc->cdev->si_drv1 = sc;
623
624 /* Go no further if recovery mode has been requested. */
625 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
626 device_printf(dev, "recovery mode.\n");
627 goto done;
628 }
629
630 /* Prepare the firmware for operation */
631 rc = prep_firmware(sc);
632 if (rc != 0)
633 goto done; /* error message displayed already */
634
635 rc = get_params__post_init(sc);
636 if (rc != 0)
637 goto done; /* error message displayed already */
638
639 rc = set_params__post_init(sc);
640 if (rc != 0)
641 goto done; /* error message displayed already */
642
643 rc = map_bar_2(sc);
644 if (rc != 0)
645 goto done; /* error message displayed already */
646
647 rc = t4_create_dma_tag(sc);
648 if (rc != 0)
649 goto done; /* error message displayed already */
650
651 /*
652 * First pass over all the ports - allocate VIs and initialize some
653 * basic parameters like mac address, port type, etc. We also figure
654 * out whether a port is 10G or 1G and use that information when
655 * calculating how many interrupts to attempt to allocate.
656 */
657 n10g = n1g = 0;
658 for_each_port(sc, i) {
659 struct port_info *pi;
660
661 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
662 sc->port[i] = pi;
663
664 /* These must be set before t4_port_init */
665 pi->adapter = sc;
666 pi->port_id = i;
667
668 /* Allocate the vi and initialize parameters like mac addr */
669 rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
670 if (rc != 0) {
671 device_printf(dev, "unable to initialize port %d: %d\n",
672 i, rc);
673 free(pi, M_CXGBE);
674 sc->port[i] = NULL;
675 goto done;
676 }
677
678 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
679 device_get_nameunit(dev), i);
680 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
681 sc->chan_map[pi->tx_chan] = i;
682
683 if (is_10G_port(pi) || is_40G_port(pi)) {
684 n10g++;
685 pi->tmr_idx = t4_tmr_idx_10g;
686 pi->pktc_idx = t4_pktc_idx_10g;
687 } else {
688 n1g++;
689 pi->tmr_idx = t4_tmr_idx_1g;
690 pi->pktc_idx = t4_pktc_idx_1g;
691 }
692
693 pi->xact_addr_filt = -1;
694 pi->linkdnrc = -1;
695
696 pi->qsize_rxq = t4_qsize_rxq;
697 pi->qsize_txq = t4_qsize_txq;
698
699 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1);
700 if (pi->dev == NULL) {
701 device_printf(dev,
702 "failed to add device for port %d.\n", i);
703 rc = ENXIO;
704 goto done;
705 }
706 device_set_softc(pi->dev, pi);
707 }
708
709 /*
710 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
711 */
712 rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
713 if (rc != 0)
714 goto done; /* error message displayed already */
715
716 sc->intr_type = iaq.intr_type;
717 sc->intr_count = iaq.nirq;
718 sc->flags |= iaq.intr_flags;
719
720 s = &sc->sge;
721 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
722 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
723 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */
724 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
725 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
726
727#ifdef TCP_OFFLOAD
728 if (is_offload(sc)) {
729
730 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
731 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
732 s->neq += s->nofldtxq + s->nofldrxq;
733 s->niq += s->nofldrxq;
734
735 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq),
736 M_CXGBE, M_ZERO | M_WAITOK);
737 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq),
738 M_CXGBE, M_ZERO | M_WAITOK);
739 }
740#endif
741
742 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
743 M_ZERO | M_WAITOK);
744 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
745 M_ZERO | M_WAITOK);
746 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
747 M_ZERO | M_WAITOK);
748 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
749 M_ZERO | M_WAITOK);
750 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
751 M_ZERO | M_WAITOK);
752
753 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
754 M_ZERO | M_WAITOK);
755
756 t4_init_l2t(sc, M_WAITOK);
757
758 /*
759 * Second pass over the ports. This time we know the number of rx and
760 * tx queues that each port should get.
761 */
762 rqidx = tqidx = 0;
763#ifdef TCP_OFFLOAD
764 ofld_rqidx = ofld_tqidx = 0;
765#endif
766 for_each_port(sc, i) {
767 struct port_info *pi = sc->port[i];
768
769 if (pi == NULL)
770 continue;
771
772 pi->first_rxq = rqidx;
773 pi->first_txq = tqidx;
774 if (is_10G_port(pi) || is_40G_port(pi)) {
775 pi->nrxq = iaq.nrxq10g;
776 pi->ntxq = iaq.ntxq10g;
777 } else {
778 pi->nrxq = iaq.nrxq1g;
779 pi->ntxq = iaq.ntxq1g;
780 }
781
782 rqidx += pi->nrxq;
783 tqidx += pi->ntxq;
784
785#ifdef TCP_OFFLOAD
786 if (is_offload(sc)) {
787 pi->first_ofld_rxq = ofld_rqidx;
788 pi->first_ofld_txq = ofld_tqidx;
789 if (is_10G_port(pi) || is_40G_port(pi)) {
790 pi->nofldrxq = iaq.nofldrxq10g;
791 pi->nofldtxq = iaq.nofldtxq10g;
792 } else {
793 pi->nofldrxq = iaq.nofldrxq1g;
794 pi->nofldtxq = iaq.nofldtxq1g;
795 }
796 ofld_rqidx += pi->nofldrxq;
797 ofld_tqidx += pi->nofldtxq;
798 }
799#endif
800 }
801
802 rc = setup_intr_handlers(sc);
803 if (rc != 0) {
804 device_printf(dev,
805 "failed to setup interrupt handlers: %d\n", rc);
806 goto done;
807 }
808
809 rc = bus_generic_attach(dev);
810 if (rc != 0) {
811 device_printf(dev,
812 "failed to attach all child ports: %d\n", rc);
813 goto done;
814 }
815
816 device_printf(dev,
817 "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
818 sc->params.pci.width, sc->params.nports, sc->intr_count,
819 sc->intr_type == INTR_MSIX ? "MSI-X" :
820 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
821 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
822
823 t4_set_desc(sc);
824
825done:
826 if (rc != 0 && sc->cdev) {
827 /* cdev was created and so cxgbetool works; recover that way. */
828 device_printf(dev,
829 "error during attach, adapter is now in recovery mode.\n");
830 rc = 0;
831 }
832
833 if (rc != 0)
834 t4_detach(dev);
835 else
836 t4_sysctls(sc);
837
838 return (rc);
839}
840
841/*
842 * Idempotent
843 */
844static int
845t4_detach(device_t dev)
846{
847 struct adapter *sc;
848 struct port_info *pi;
849 int i, rc;
850
851 sc = device_get_softc(dev);
852
853 if (sc->flags & FULL_INIT_DONE)
854 t4_intr_disable(sc);
855
856 if (sc->cdev) {
857 destroy_dev(sc->cdev);
858 sc->cdev = NULL;
859 }
860
861 rc = bus_generic_detach(dev);
862 if (rc) {
863 device_printf(dev,
864 "failed to detach child devices: %d\n", rc);
865 return (rc);
866 }
867
868 for (i = 0; i < sc->intr_count; i++)
869 t4_free_irq(sc, &sc->irq[i]);
870
871 for (i = 0; i < MAX_NPORTS; i++) {
872 pi = sc->port[i];
873 if (pi) {
874 t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
875 if (pi->dev)
876 device_delete_child(dev, pi->dev);
877
878 mtx_destroy(&pi->pi_lock);
879 free(pi, M_CXGBE);
880 }
881 }
882
883 if (sc->flags & FULL_INIT_DONE)
884 adapter_full_uninit(sc);
885
886 if (sc->flags & FW_OK)
887 t4_fw_bye(sc, sc->mbox);
888
889 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
890 pci_release_msi(dev);
891
892 if (sc->regs_res)
893 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
894 sc->regs_res);
895
896 if (sc->udbs_res)
897 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
898 sc->udbs_res);
899
900 if (sc->msix_res)
901 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
902 sc->msix_res);
903
904 if (sc->l2t)
905 t4_free_l2t(sc->l2t);
906
907#ifdef TCP_OFFLOAD
908 free(sc->sge.ofld_rxq, M_CXGBE);
909 free(sc->sge.ofld_txq, M_CXGBE);
910#endif
911 free(sc->irq, M_CXGBE);
912 free(sc->sge.rxq, M_CXGBE);
913 free(sc->sge.txq, M_CXGBE);
914 free(sc->sge.ctrlq, M_CXGBE);
915 free(sc->sge.iqmap, M_CXGBE);
916 free(sc->sge.eqmap, M_CXGBE);
917 free(sc->tids.ftid_tab, M_CXGBE);
918 t4_destroy_dma_tag(sc);
919 if (mtx_initialized(&sc->sc_lock)) {
920 mtx_lock(&t4_list_lock);
921 SLIST_REMOVE(&t4_list, sc, adapter, link);
922 mtx_unlock(&t4_list_lock);
923 mtx_destroy(&sc->sc_lock);
924 }
925
926 if (mtx_initialized(&sc->tids.ftid_lock))
927 mtx_destroy(&sc->tids.ftid_lock);
928 if (mtx_initialized(&sc->sfl_lock))
929 mtx_destroy(&sc->sfl_lock);
930 if (mtx_initialized(&sc->ifp_lock))
931 mtx_destroy(&sc->ifp_lock);
932
933 bzero(sc, sizeof(*sc));
934
935 return (0);
936}
937
938
939static int
940cxgbe_probe(device_t dev)
941{
942 char buf[128];
943 struct port_info *pi = device_get_softc(dev);
944
945 snprintf(buf, sizeof(buf), "port %d", pi->port_id);
946 device_set_desc_copy(dev, buf);
947
948 return (BUS_PROBE_DEFAULT);
949}
950
951#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
952 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
953 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
954#define T4_CAP_ENABLE (T4_CAP)
955
956static int
957cxgbe_attach(device_t dev)
958{
959 struct port_info *pi = device_get_softc(dev);
960 struct ifnet *ifp;
961
962 /* Allocate an ifnet and set it up */
963 ifp = if_alloc(IFT_ETHER);
964 if (ifp == NULL) {
965 device_printf(dev, "Cannot allocate ifnet\n");
966 return (ENOMEM);
967 }
968 pi->ifp = ifp;
969 ifp->if_softc = pi;
970
971 callout_init(&pi->tick, CALLOUT_MPSAFE);
972
973 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
975
976 ifp->if_init = cxgbe_init;
977 ifp->if_ioctl = cxgbe_ioctl;
978 ifp->if_transmit = cxgbe_transmit;
979 ifp->if_qflush = cxgbe_qflush;
980
981 ifp->if_capabilities = T4_CAP;
982#ifdef TCP_OFFLOAD
983 if (is_offload(pi->adapter))
984 ifp->if_capabilities |= IFCAP_TOE;
985#endif
986 ifp->if_capenable = T4_CAP_ENABLE;
987 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
988 CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
989
990 /* Initialize ifmedia for this port */
991 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
992 cxgbe_media_status);
993 build_medialist(pi);
994
995 pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
996 EVENTHANDLER_PRI_ANY);
997
998 ether_ifattach(ifp, pi->hw_addr);
999
1000#ifdef TCP_OFFLOAD
1001 if (is_offload(pi->adapter)) {
1002 device_printf(dev,
1003 "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
1004 pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
1005 } else
1006#endif
1007 device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
1008
1009 cxgbe_sysctls(pi);
1010
1011 return (0);
1012}
1013
1014static int
1015cxgbe_detach(device_t dev)
1016{
1017 struct port_info *pi = device_get_softc(dev);
1018 struct adapter *sc = pi->adapter;
1019 struct ifnet *ifp = pi->ifp;
1020
1021 /* Tell if_ioctl and if_init that the port is going away */
1022 ADAPTER_LOCK(sc);
1023 SET_DOOMED(pi);
1024 wakeup(&sc->flags);
1025 while (IS_BUSY(sc))
1026 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
1027 SET_BUSY(sc);
1028#ifdef INVARIANTS
1029 sc->last_op = "t4detach";
1030 sc->last_op_thr = curthread;
1031#endif
1032 ADAPTER_UNLOCK(sc);
1033
1034 if (pi->flags & HAS_TRACEQ) {
1035 sc->traceq = -1; /* cloner should not create ifnet */
1036 t4_tracer_port_detach(sc);
1037 }
1038
1039 if (pi->vlan_c)
1040 EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
1041
1042 PORT_LOCK(pi);
1043 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1044 callout_stop(&pi->tick);
1045 PORT_UNLOCK(pi);
1046 callout_drain(&pi->tick);
1047
1048 /* Let detach proceed even if these fail. */
1049 cxgbe_uninit_synchronized(pi);
1050 port_full_uninit(pi);
1051
1052 ifmedia_removeall(&pi->media);
1053 ether_ifdetach(pi->ifp);
1054 if_free(pi->ifp);
1055
1056 ADAPTER_LOCK(sc);
1057 CLR_BUSY(sc);
1058 wakeup(&sc->flags);
1059 ADAPTER_UNLOCK(sc);
1060
1061 return (0);
1062}
1063
1064static void
1065cxgbe_init(void *arg)
1066{
1067 struct port_info *pi = arg;
1068 struct adapter *sc = pi->adapter;
1069
1070 if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
1071 return;
1072 cxgbe_init_synchronized(pi);
1073 end_synchronized_op(sc, 0);
1074}
1075
1076static int
1077cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
1078{
1079 int rc = 0, mtu, flags;
1080 struct port_info *pi = ifp->if_softc;
1081 struct adapter *sc = pi->adapter;
1082 struct ifreq *ifr = (struct ifreq *)data;
1083 uint32_t mask;
1084
1085 switch (cmd) {
1086 case SIOCSIFMTU:
1087 mtu = ifr->ifr_mtu;
1088 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
1089 return (EINVAL);
1090
1091 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
1092 if (rc)
1093 return (rc);
1094 ifp->if_mtu = mtu;
1095 if (pi->flags & PORT_INIT_DONE) {
1096 t4_update_fl_bufsize(ifp);
1097 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1098 rc = update_mac_settings(pi, XGMAC_MTU);
1099 }
1100 end_synchronized_op(sc, 0);
1101 break;
1102
1103 case SIOCSIFFLAGS:
1104 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
1105 if (rc)
1106 return (rc);
1107
1108 if (ifp->if_flags & IFF_UP) {
1109 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1110 flags = pi->if_flags;
1111 if ((ifp->if_flags ^ flags) &
1112 (IFF_PROMISC | IFF_ALLMULTI)) {
1113 rc = update_mac_settings(pi,
1114 XGMAC_PROMISC | XGMAC_ALLMULTI);
1115 }
1116 } else
1117 rc = cxgbe_init_synchronized(pi);
1118 pi->if_flags = ifp->if_flags;
1119 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1120 rc = cxgbe_uninit_synchronized(pi);
1121 end_synchronized_op(sc, 0);
1122 break;
1123
1124 case SIOCADDMULTI:
1125 case SIOCDELMULTI: /* these two are called with a mutex held :-( */
1126 rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
1127 if (rc)
1128 return (rc);
1129 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1130 rc = update_mac_settings(pi, XGMAC_MCADDRS);
1131 end_synchronized_op(sc, LOCK_HELD);
1132 break;
1133
1134 case SIOCSIFCAP:
1135 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
1136 if (rc)
1137 return (rc);
1138
1139 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1140 if (mask & IFCAP_TXCSUM) {
1141 ifp->if_capenable ^= IFCAP_TXCSUM;
1142 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
1143
1144 if (IFCAP_TSO4 & ifp->if_capenable &&
1145 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1146 ifp->if_capenable &= ~IFCAP_TSO4;
1147 if_printf(ifp,
1148 "tso4 disabled due to -txcsum.\n");
1149 }
1150 }
1151 if (mask & IFCAP_TXCSUM_IPV6) {
1152 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1153 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
1154
1155 if (IFCAP_TSO6 & ifp->if_capenable &&
1156 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1157 ifp->if_capenable &= ~IFCAP_TSO6;
1158 if_printf(ifp,
1159 "tso6 disabled due to -txcsum6.\n");
1160 }
1161 }
1162 if (mask & IFCAP_RXCSUM)
1163 ifp->if_capenable ^= IFCAP_RXCSUM;
1164 if (mask & IFCAP_RXCSUM_IPV6)
1165 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1166
1167 /*
1168 * Note that we leave CSUM_TSO alone (it is always set). The
1169 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before
1170 * sending a TSO request our way, so it's sufficient to toggle
1171 * IFCAP_TSOx only.
1172 */
1173 if (mask & IFCAP_TSO4) {
1174 if (!(IFCAP_TSO4 & ifp->if_capenable) &&
1175 !(IFCAP_TXCSUM & ifp->if_capenable)) {
1176 if_printf(ifp, "enable txcsum first.\n");
1177 rc = EAGAIN;
1178 goto fail;
1179 }
1180 ifp->if_capenable ^= IFCAP_TSO4;
1181 }
1182 if (mask & IFCAP_TSO6) {
1183 if (!(IFCAP_TSO6 & ifp->if_capenable) &&
1184 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
1185 if_printf(ifp, "enable txcsum6 first.\n");
1186 rc = EAGAIN;
1187 goto fail;
1188 }
1189 ifp->if_capenable ^= IFCAP_TSO6;
1190 }
1191 if (mask & IFCAP_LRO) {
1192#if defined(INET) || defined(INET6)
1193 int i;
1194 struct sge_rxq *rxq;
1195
1196 ifp->if_capenable ^= IFCAP_LRO;
1197 for_each_rxq(pi, i, rxq) {
1198 if (ifp->if_capenable & IFCAP_LRO)
1199 rxq->iq.flags |= IQ_LRO_ENABLED;
1200 else
1201 rxq->iq.flags &= ~IQ_LRO_ENABLED;
1202 }
1203#endif
1204 }
1205#ifdef TCP_OFFLOAD
1206 if (mask & IFCAP_TOE) {
1207 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
1208
1209 rc = toe_capability(pi, enable);
1210 if (rc != 0)
1211 goto fail;
1212
1213 ifp->if_capenable ^= mask;
1214 }
1215#endif
1216 if (mask & IFCAP_VLAN_HWTAGGING) {
1217 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1218 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1219 rc = update_mac_settings(pi, XGMAC_VLANEX);
1220 }
1221 if (mask & IFCAP_VLAN_MTU) {
1222 ifp->if_capenable ^= IFCAP_VLAN_MTU;
1223
1224 /* Need to find out how to disable auto-mtu-inflation */
1225 }
1226 if (mask & IFCAP_VLAN_HWTSO)
1227 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1228 if (mask & IFCAP_VLAN_HWCSUM)
1229 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1230
1231#ifdef VLAN_CAPABILITIES
1232 VLAN_CAPABILITIES(ifp);
1233#endif
1234fail:
1235 end_synchronized_op(sc, 0);
1236 break;
1237
1238 case SIOCSIFMEDIA:
1239 case SIOCGIFMEDIA:
1240 ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
1241 break;
1242
1243 default:
1244 rc = ether_ioctl(ifp, cmd, data);
1245 }
1246
1247 return (rc);
1248}
1249
1250static int
1251cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
1252{
1253 struct port_info *pi = ifp->if_softc;
1254 struct adapter *sc = pi->adapter;
1255 struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
1256 struct buf_ring *br;
1257 int rc;
1258
1259 M_ASSERTPKTHDR(m);
1260
1261 if (__predict_false(pi->link_cfg.link_ok == 0)) {
1262 m_freem(m);
1263 return (ENETDOWN);
1264 }
1265
1266 if (m->m_flags & M_FLOWID)
1267 txq += (m->m_pkthdr.flowid % pi->ntxq);
1268 br = txq->br;
1269
1270 if (TXQ_TRYLOCK(txq) == 0) {
1271 struct sge_eq *eq = &txq->eq;
1272
1273 /*
1274 * It is possible that t4_eth_tx finishes up and releases the
1275 * lock between the TRYLOCK above and the drbr_enqueue here. We
1276 * need to make sure that this mbuf doesn't just sit there in
1277 * the drbr.
1278 */
1279
1280 rc = drbr_enqueue(ifp, br, m);
1281 if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
1282 !(eq->flags & EQ_DOOMED))
1283 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
1284 return (rc);
1285 }
1286
1287 /*
1288 * txq->m is the mbuf that is held up due to a temporary shortage of
1289 * resources and it should be put on the wire first. Then what's in
1290 * drbr and finally the mbuf that was just passed in to us.
1291 *
1292 * Return code should indicate the fate of the mbuf that was passed in
1293 * this time.
1294 */
1295
1296 TXQ_LOCK_ASSERT_OWNED(txq);
1297 if (drbr_needs_enqueue(ifp, br) || txq->m) {
1298
1299 /* Queued for transmission. */
1300
1301 rc = drbr_enqueue(ifp, br, m);
1302 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
1303 (void) t4_eth_tx(ifp, txq, m);
1304 TXQ_UNLOCK(txq);
1305 return (rc);
1306 }
1307
1308 /* Direct transmission. */
1309 rc = t4_eth_tx(ifp, txq, m);
1310 if (rc != 0 && txq->m)
1311 rc = 0; /* held, will be transmitted soon (hopefully) */
1312
1313 TXQ_UNLOCK(txq);
1314 return (rc);
1315}
1316
1317static void
1318cxgbe_qflush(struct ifnet *ifp)
1319{
1320 struct port_info *pi = ifp->if_softc;
1321 struct sge_txq *txq;
1322 int i;
1323 struct mbuf *m;
1324
1325 /* queues do not exist if !PORT_INIT_DONE. */
1326 if (pi->flags & PORT_INIT_DONE) {
1327 for_each_txq(pi, i, txq) {
1328 TXQ_LOCK(txq);
1329 m_freem(txq->m);
1330 txq->m = NULL;
1331 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
1332 m_freem(m);
1333 TXQ_UNLOCK(txq);
1334 }
1335 }
1336 if_qflush(ifp);
1337}
1338
1339static int
1340cxgbe_media_change(struct ifnet *ifp)
1341{
1342 struct port_info *pi = ifp->if_softc;
1343
1344 device_printf(pi->dev, "%s unimplemented.\n", __func__);
1345
1346 return (EOPNOTSUPP);
1347}
1348
1349static void
1350cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1351{
1352 struct port_info *pi = ifp->if_softc;
1353 struct ifmedia_entry *cur = pi->media.ifm_cur;
1354 int speed = pi->link_cfg.speed;
1355 int data = (pi->port_type << 8) | pi->mod_type;
1356
1357 if (cur->ifm_data != data) {
1358 build_medialist(pi);
1359 cur = pi->media.ifm_cur;
1360 }
1361
1362 ifmr->ifm_status = IFM_AVALID;
1363 if (!pi->link_cfg.link_ok)
1364 return;
1365
1366 ifmr->ifm_status |= IFM_ACTIVE;
1367
1368 /* active and current will differ iff current media is autoselect. */
1369 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO)
1370 return;
1371
1372 ifmr->ifm_active = IFM_ETHER | IFM_FDX;
1373 if (speed == SPEED_10000)
1374 ifmr->ifm_active |= IFM_10G_T;
1375 else if (speed == SPEED_1000)
1376 ifmr->ifm_active |= IFM_1000_T;
1377 else if (speed == SPEED_100)
1378 ifmr->ifm_active |= IFM_100_TX;
1379 else if (speed == SPEED_10)
1380 ifmr->ifm_active |= IFM_10_T;
1381 else
1382 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
1383 speed));
1384}
1385
1386void
1387t4_fatal_err(struct adapter *sc)
1388{
1389 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
1390 t4_intr_disable(sc);
1391 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n",
1392 device_get_nameunit(sc->dev));
1393}
1394
1395static int
1396map_bars_0_and_4(struct adapter *sc)
1397{
1398 sc->regs_rid = PCIR_BAR(0);
1399 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1400 &sc->regs_rid, RF_ACTIVE);
1401 if (sc->regs_res == NULL) {
1402 device_printf(sc->dev, "cannot map registers.\n");
1403 return (ENXIO);
1404 }
1405 sc->bt = rman_get_bustag(sc->regs_res);
1406 sc->bh = rman_get_bushandle(sc->regs_res);
1407 sc->mmio_len = rman_get_size(sc->regs_res);
1408 setbit(&sc->doorbells, DOORBELL_KDB);
1409
1410 sc->msix_rid = PCIR_BAR(4);
1411 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1412 &sc->msix_rid, RF_ACTIVE);
1413 if (sc->msix_res == NULL) {
1414 device_printf(sc->dev, "cannot map MSI-X BAR.\n");
1415 return (ENXIO);
1416 }
1417
1418 return (0);
1419}
1420
1421static int
1422map_bar_2(struct adapter *sc)
1423{
1424
1425 /*
1426 * T4: only iWARP driver uses the userspace doorbells. There is no need
1427 * to map it if RDMA is disabled.
1428 */
1429 if (is_t4(sc) && sc->rdmacaps == 0)
1430 return (0);
1431
1432 sc->udbs_rid = PCIR_BAR(2);
1433 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
1434 &sc->udbs_rid, RF_ACTIVE);
1435 if (sc->udbs_res == NULL) {
1436 device_printf(sc->dev, "cannot map doorbell BAR.\n");
1437 return (ENXIO);
1438 }
1439 sc->udbs_base = rman_get_virtual(sc->udbs_res);
1440
1441 if (is_t5(sc)) {
1442 setbit(&sc->doorbells, DOORBELL_UDB);
1443#if defined(__i386__) || defined(__amd64__)
1444 if (t5_write_combine) {
1445 int rc;
1446
1447 /*
1448 * Enable write combining on BAR2. This is the
1449 * userspace doorbell BAR and is split into 128B
1450 * (UDBS_SEG_SIZE) doorbell regions, each associated
1451 * with an egress queue. The first 64B has the doorbell
1452 * and the second 64B can be used to submit a tx work
1453 * request with an implicit doorbell.
1454 */
1455
1456 rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
1457 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
1458 if (rc == 0) {
1459 clrbit(&sc->doorbells, DOORBELL_UDB);
1460 setbit(&sc->doorbells, DOORBELL_WCWR);
1461 setbit(&sc->doorbells, DOORBELL_UDBWC);
1462 } else {
1463 device_printf(sc->dev,
1464 "couldn't enable write combining: %d\n",
1465 rc);
1466 }
1467
1468 t4_write_reg(sc, A_SGE_STAT_CFG,
1469 V_STATSOURCE_T5(7) | V_STATMODE(0));
1470 }
1471#endif
1472 }
1473
1474 return (0);
1475}
1476
1477static const struct memwin t4_memwin[] = {
1478 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1479 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1480 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
1481};
1482
1483static const struct memwin t5_memwin[] = {
1484 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1485 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1486 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1487};
1488
1489static void
1490setup_memwin(struct adapter *sc)
1491{
1492 const struct memwin *mw;
1493 int i, n;
1494 uint32_t bar0;
1495
1496 if (is_t4(sc)) {
1497 /*
1498 * Read low 32b of bar0 indirectly via the hardware backdoor
1499 * mechanism. Works from within PCI passthrough environments
1500 * too, where rman_get_start() can return a different value. We
1501 * need to program the T4 memory window decoders with the actual
1502 * addresses that will be coming across the PCIe link.
1503 */
1504 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
1505 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
1506
1507 mw = &t4_memwin[0];
1508 n = nitems(t4_memwin);
1509 } else {
1510 /* T5 uses the relative offset inside the PCIe BAR */
1511 bar0 = 0;
1512
1513 mw = &t5_memwin[0];
1514 n = nitems(t5_memwin);
1515 }
1516
1517 for (i = 0; i < n; i++, mw++) {
1518 t4_write_reg(sc,
1519 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
1520 (mw->base + bar0) | V_BIR(0) |
1521 V_WINDOW(ilog2(mw->aperture) - 10));
1522 }
1523
1524 /* flush */
1525 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1526}
1527
1528/*
1529 * Verify that the memory range specified by the addr/len pair is valid and lies
1530 * entirely within a single region (EDCx or MCx).
1531 */
1532static int
1533validate_mem_range(struct adapter *sc, uint32_t addr, int len)
1534{
1535 uint32_t em, addr_len, maddr, mlen;
1536
1537 /* Memory can only be accessed in naturally aligned 4 byte units */
1538 if (addr & 3 || len & 3 || len == 0)
1539 return (EINVAL);
1540
1541 /* Enabled memories */
1542 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1543 if (em & F_EDRAM0_ENABLE) {
1544 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1545 maddr = G_EDRAM0_BASE(addr_len) << 20;
1546 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1547 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1548 addr + len <= maddr + mlen)
1549 return (0);
1550 }
1551 if (em & F_EDRAM1_ENABLE) {
1552 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1553 maddr = G_EDRAM1_BASE(addr_len) << 20;
1554 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1555 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1556 addr + len <= maddr + mlen)
1557 return (0);
1558 }
1559 if (em & F_EXT_MEM_ENABLE) {
1560 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1561 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1562 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1563 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1564 addr + len <= maddr + mlen)
1565 return (0);
1566 }
1567 if (!is_t4(sc) && em & F_EXT_MEM1_ENABLE) {
1568 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1569 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1570 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1571 if (mlen > 0 && addr >= maddr && addr < maddr + mlen &&
1572 addr + len <= maddr + mlen)
1573 return (0);
1574 }
1575
1576 return (EFAULT);
1577}
1578
1579/*
1580 * Verify that the memory range specified by the memtype/offset/len pair is
1581 * valid and lies entirely within the memtype specified. The global address of
1582 * the start of the range is returned in addr.
1583 */
1584static int
1585validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1586 uint32_t *addr)
1587{
1588 uint32_t em, addr_len, maddr, mlen;
1589
1590 /* Memory can only be accessed in naturally aligned 4 byte units */
1591 if (off & 3 || len & 3 || len == 0)
1592 return (EINVAL);
1593
1594 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1595 switch (mtype) {
1596 case MEM_EDC0:
1597 if (!(em & F_EDRAM0_ENABLE))
1598 return (EINVAL);
1599 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1600 maddr = G_EDRAM0_BASE(addr_len) << 20;
1601 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1602 break;
1603 case MEM_EDC1:
1604 if (!(em & F_EDRAM1_ENABLE))
1605 return (EINVAL);
1606 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1607 maddr = G_EDRAM1_BASE(addr_len) << 20;
1608 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1609 break;
1610 case MEM_MC:
1611 if (!(em & F_EXT_MEM_ENABLE))
1612 return (EINVAL);
1613 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1614 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1615 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1616 break;
1617 case MEM_MC1:
1618 if (is_t4(sc) || !(em & F_EXT_MEM1_ENABLE))
1619 return (EINVAL);
1620 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1621 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1622 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1623 break;
1624 default:
1625 return (EINVAL);
1626 }
1627
1628 if (mlen > 0 && off < mlen && off + len <= mlen) {
1629 *addr = maddr + off; /* global address */
1630 return (0);
1631 }
1632
1633 return (EFAULT);
1634}
1635
1636static void
1637memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1638{
1639 const struct memwin *mw;
1640
1641 if (is_t4(sc)) {
1642 KASSERT(win >= 0 && win < nitems(t4_memwin),
1643 ("%s: incorrect memwin# (%d)", __func__, win));
1644 mw = &t4_memwin[win];
1645 } else {
1646 KASSERT(win >= 0 && win < nitems(t5_memwin),
1647 ("%s: incorrect memwin# (%d)", __func__, win));
1648 mw = &t5_memwin[win];
1649 }
1650
1651 if (base != NULL)
1652 *base = mw->base;
1653 if (aperture != NULL)
1654 *aperture = mw->aperture;
1655}
1656
1657/*
1658 * Positions the memory window such that it can be used to access the specified
1659 * address in the chip's address space. The return value is the offset of addr
1660 * from the start of the window.
1661 */
1662static uint32_t
1663position_memwin(struct adapter *sc, int n, uint32_t addr)
1664{
1665 uint32_t start, pf;
1666 uint32_t reg;
1667
1668 KASSERT(n >= 0 && n <= 3,
1669 ("%s: invalid window %d.", __func__, n));
1670 KASSERT((addr & 3) == 0,
1671 ("%s: addr (0x%x) is not at a 4B boundary.", __func__, addr));
1672
1673 if (is_t4(sc)) {
1674 pf = 0;
1675 start = addr & ~0xf; /* start must be 16B aligned */
1676 } else {
1677 pf = V_PFNUM(sc->pf);
1678 start = addr & ~0x7f; /* start must be 128B aligned */
1679 }
1680 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1681
1682 t4_write_reg(sc, reg, start | pf);
1683 t4_read_reg(sc, reg);
1684
1685 return (addr - start);
1686}
1687
1688static int
1689cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
1690 struct intrs_and_queues *iaq)
1691{
1692 int rc, itype, navail, nrxq10g, nrxq1g, n;
1693 int nofldrxq10g = 0, nofldrxq1g = 0;
1694
1695 bzero(iaq, sizeof(*iaq));
1696
1697 iaq->ntxq10g = t4_ntxq10g;
1698 iaq->ntxq1g = t4_ntxq1g;
1699 iaq->nrxq10g = nrxq10g = t4_nrxq10g;
1700 iaq->nrxq1g = nrxq1g = t4_nrxq1g;
1701#ifdef TCP_OFFLOAD
1702 if (is_offload(sc)) {
1703 iaq->nofldtxq10g = t4_nofldtxq10g;
1704 iaq->nofldtxq1g = t4_nofldtxq1g;
1705 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
1706 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
1707 }
1708#endif
1709
1710 for (itype = INTR_MSIX; itype; itype >>= 1) {
1711
1712 if ((itype & t4_intr_types) == 0)
1713 continue; /* not allowed */
1714
1715 if (itype == INTR_MSIX)
1716 navail = pci_msix_count(sc->dev);
1717 else if (itype == INTR_MSI)
1718 navail = pci_msi_count(sc->dev);
1719 else
1720 navail = 1;
1721restart:
1722 if (navail == 0)
1723 continue;
1724
1725 iaq->intr_type = itype;
1726 iaq->intr_flags = 0;
1727
1728 /*
1729 * Best option: an interrupt vector for errors, one for the
1730 * firmware event queue, and one each for each rxq (NIC as well
1731 * as offload).
1732 */
1733 iaq->nirq = T4_EXTRA_INTR;
1734 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
1735 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
1736 if (iaq->nirq <= navail &&
1737 (itype != INTR_MSI || powerof2(iaq->nirq))) {
1738 iaq->intr_flags |= INTR_DIRECT;
1739 goto allocate;
1740 }
1741
1742 /*
1743 * Second best option: an interrupt vector for errors, one for
1744 * the firmware event queue, and one each for either NIC or
1745 * offload rxq's.
1746 */
1747 iaq->nirq = T4_EXTRA_INTR;
1748 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
1749 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
1750 if (iaq->nirq <= navail &&
1751 (itype != INTR_MSI || powerof2(iaq->nirq)))
1752 goto allocate;
1753
1754 /*
1755 * Next best option: an interrupt vector for errors, one for the
1756 * firmware event queue, and at least one per port. At this
1757 * point we know we'll have to downsize nrxq or nofldrxq to fit
1758 * what's available to us.
1759 */
1760 iaq->nirq = T4_EXTRA_INTR;
1761 iaq->nirq += n10g + n1g;
1762 if (iaq->nirq <= navail) {
1763 int leftover = navail - iaq->nirq;
1764
1765 if (n10g > 0) {
1766 int target = max(nrxq10g, nofldrxq10g);
1767
1768 n = 1;
1769 while (n < target && leftover >= n10g) {
1770 leftover -= n10g;
1771 iaq->nirq += n10g;
1772 n++;
1773 }
1774 iaq->nrxq10g = min(n, nrxq10g);
1775#ifdef TCP_OFFLOAD
1776 if (is_offload(sc))
1777 iaq->nofldrxq10g = min(n, nofldrxq10g);
1778#endif
1779 }
1780
1781 if (n1g > 0) {
1782 int target = max(nrxq1g, nofldrxq1g);
1783
1784 n = 1;
1785 while (n < target && leftover >= n1g) {
1786 leftover -= n1g;
1787 iaq->nirq += n1g;
1788 n++;
1789 }
1790 iaq->nrxq1g = min(n, nrxq1g);
1791#ifdef TCP_OFFLOAD
1792 if (is_offload(sc))
1793 iaq->nofldrxq1g = min(n, nofldrxq1g);
1794#endif
1795 }
1796
1797 if (itype != INTR_MSI || powerof2(iaq->nirq))
1798 goto allocate;
1799 }
1800
1801 /*
1802 * Least desirable option: one interrupt vector for everything.
1803 */
1804 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
1805#ifdef TCP_OFFLOAD
1806 if (is_offload(sc))
1807 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
1808#endif
1809
1810allocate:
1811 navail = iaq->nirq;
1812 rc = 0;
1813 if (itype == INTR_MSIX)
1814 rc = pci_alloc_msix(sc->dev, &navail);
1815 else if (itype == INTR_MSI)
1816 rc = pci_alloc_msi(sc->dev, &navail);
1817
1818 if (rc == 0) {
1819 if (navail == iaq->nirq)
1820 return (0);
1821
1822 /*
1823 * Didn't get the number requested. Use whatever number
1824 * the kernel is willing to allocate (it's in navail).
1825 */
1826 device_printf(sc->dev, "fewer vectors than requested, "
1827 "type=%d, req=%d, rcvd=%d; will downshift req.\n",
1828 itype, iaq->nirq, navail);
1829 pci_release_msi(sc->dev);
1830 goto restart;
1831 }
1832
1833 device_printf(sc->dev,
1834 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
1835 itype, rc, iaq->nirq, navail);
1836 }
1837
1838 device_printf(sc->dev,
1839 "failed to find a usable interrupt type. "
1840 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
1841 pci_msix_count(sc->dev), pci_msi_count(sc->dev));
1842
1843 return (ENXIO);
1844}
1845
1846#define FW_VERSION(chip) ( \
1847 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
1848 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
1849 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
1850 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
1851#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
1852
1853struct fw_info {
1854 uint8_t chip;
1855 char *kld_name;
1856 char *fw_mod_name;
1857 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */
1858} fw_info[] = {
1859 {
1860 .chip = CHELSIO_T4,
1861 .kld_name = "t4fw_cfg",
1862 .fw_mod_name = "t4fw",
1863 .fw_hdr = {
1864 .chip = FW_HDR_CHIP_T4,
1865 .fw_ver = htobe32_const(FW_VERSION(T4)),
1866 .intfver_nic = FW_INTFVER(T4, NIC),
1867 .intfver_vnic = FW_INTFVER(T4, VNIC),
1868 .intfver_ofld = FW_INTFVER(T4, OFLD),
1869 .intfver_ri = FW_INTFVER(T4, RI),
1870 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
1871 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
1872 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
1873 .intfver_fcoe = FW_INTFVER(T4, FCOE),
1874 },
1875 }, {
1876 .chip = CHELSIO_T5,
1877 .kld_name = "t5fw_cfg",
1878 .fw_mod_name = "t5fw",
1879 .fw_hdr = {
1880 .chip = FW_HDR_CHIP_T5,
1881 .fw_ver = htobe32_const(FW_VERSION(T5)),
1882 .intfver_nic = FW_INTFVER(T5, NIC),
1883 .intfver_vnic = FW_INTFVER(T5, VNIC),
1884 .intfver_ofld = FW_INTFVER(T5, OFLD),
1885 .intfver_ri = FW_INTFVER(T5, RI),
1886 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
1887 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
1888 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
1889 .intfver_fcoe = FW_INTFVER(T5, FCOE),
1890 },
1891 }
1892};
1893
1894static struct fw_info *
1895find_fw_info(int chip)
1896{
1897 int i;
1898
1899 for (i = 0; i < nitems(fw_info); i++) {
1900 if (fw_info[i].chip == chip)
1901 return (&fw_info[i]);
1902 }
1903 return (NULL);
1904}
1905
1906/*
1907 * Is the given firmware API compatible with the one the driver was compiled
1908 * with?
1909 */
1910static int
1911fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
1912{
1913
1914 /* short circuit if it's the exact same firmware version */
1915 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
1916 return (1);
1917
1918 /*
1919 * XXX: Is this too conservative? Perhaps I should limit this to the
1920 * features that are supported in the driver.
1921 */
1922#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
1923 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
1924 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
1925 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
1926 return (1);
1927#undef SAME_INTF
1928
1929 return (0);
1930}
1931
1932/*
1933 * The firmware in the KLD is usable, but should it be installed? This routine
1934 * explains itself in detail if it indicates the KLD firmware should be
1935 * installed.
1936 */
1937static int
1938should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
1939{
1940 const char *reason;
1941
1942 if (!card_fw_usable) {
1943 reason = "incompatible or unusable";
1944 goto install;
1945 }
1946
1947 if (k > c) {
1948 reason = "older than the version bundled with this driver";
1949 goto install;
1950 }
1951
1952 if (t4_fw_install == 2 && k != c) {
1953 reason = "different than the version bundled with this driver";
1954 goto install;
1955 }
1956
1957 return (0);
1958
1959install:
1960 if (t4_fw_install == 0) {
1961 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1962 "but the driver is prohibited from installing a different "
1963 "firmware on the card.\n",
1964 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1965 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
1966
1967 return (0);
1968 }
1969
1970 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
1971 "installing firmware %u.%u.%u.%u on card.\n",
1972 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
1973 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
1974 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
1975 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
1976
1977 return (1);
1978}
1979/*
1980 * Establish contact with the firmware and determine if we are the master driver
1981 * or not, and whether we are responsible for chip initialization.
1982 */
1983static int
1984prep_firmware(struct adapter *sc)
1985{
1986 const struct firmware *fw = NULL, *default_cfg;
1987 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
1988 enum dev_state state;
1989 struct fw_info *fw_info;
1990 struct fw_hdr *card_fw; /* fw on the card */
1991 const struct fw_hdr *kld_fw; /* fw in the KLD */
1992 const struct fw_hdr *drv_fw; /* fw header the driver was compiled
1993 against */
1994
1995 /* Contact firmware. */
1996 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
1997 if (rc < 0 || state == DEV_STATE_ERR) {
1998 rc = -rc;
1999 device_printf(sc->dev,
2000 "failed to connect to the firmware: %d, %d.\n", rc, state);
2001 return (rc);
2002 }
2003 pf = rc;
2004 if (pf == sc->mbox)
2005 sc->flags |= MASTER_PF;
2006 else if (state == DEV_STATE_UNINIT) {
2007 /*
2008 * We didn't get to be the master so we definitely won't be
2009 * configuring the chip. It's a bug if someone else hasn't
2010 * configured it already.
2011 */
2012 device_printf(sc->dev, "couldn't be master(%d), "
2013 "device not already initialized either(%d).\n", rc, state);
2014 return (EDOOFUS);
2015 }
2016
2017 /* This is the firmware whose headers the driver was compiled against */
2018 fw_info = find_fw_info(chip_id(sc));
2019 if (fw_info == NULL) {
2020 device_printf(sc->dev,
2021 "unable to look up firmware information for chip %d.\n",
2022 chip_id(sc));
2023 return (EINVAL);
2024 }
2025 drv_fw = &fw_info->fw_hdr;
2026
2027 /*
2028 * The firmware KLD contains many modules. The KLD name is also the
2029 * name of the module that contains the default config file.
2030 */
2031 default_cfg = firmware_get(fw_info->kld_name);
2032
2033 /* Read the header of the firmware on the card */
2034 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
2035 rc = -t4_read_flash(sc, FLASH_FW_START,
2036 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
2037 if (rc == 0)
2038 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
2039 else {
2040 device_printf(sc->dev,
2041 "Unable to read card's firmware header: %d\n", rc);
2042 card_fw_usable = 0;
2043 }
2044
2045 /* This is the firmware in the KLD */
2046 fw = firmware_get(fw_info->fw_mod_name);
2047 if (fw != NULL) {
2048 kld_fw = (const void *)fw->data;
2049 kld_fw_usable = fw_compatible(drv_fw, kld_fw);
2050 } else {
2051 kld_fw = NULL;
2052 kld_fw_usable = 0;
2053 }
2054
2055 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2056 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
2057 /*
2058 * Common case: the firmware on the card is an exact match and
2059 * the KLD is an exact match too, or the KLD is
2060 * absent/incompatible. Note that t4_fw_install = 2 is ignored
2061 * here -- use cxgbetool loadfw if you want to reinstall the
2062 * same firmware as the one on the card.
2063 */
2064 } else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
2065 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
2066 be32toh(card_fw->fw_ver))) {
2067
2068 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
2069 if (rc != 0) {
2070 device_printf(sc->dev,
2071 "failed to install firmware: %d\n", rc);
2072 goto done;
2073 }
2074
2075 /* Installed successfully, update the cached header too. */
2076 memcpy(card_fw, kld_fw, sizeof(*card_fw));
2077 card_fw_usable = 1;
2078 need_fw_reset = 0; /* already reset as part of load_fw */
2079 }
2080
2081 if (!card_fw_usable) {
2082 uint32_t d, c, k;
2083
2084 d = ntohl(drv_fw->fw_ver);
2085 c = ntohl(card_fw->fw_ver);
2086 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
2087
2088 device_printf(sc->dev, "Cannot find a usable firmware: "
2089 "fw_install %d, chip state %d, "
2090 "driver compiled with %d.%d.%d.%d, "
2091 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
2092 t4_fw_install, state,
2093 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
2094 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
2095 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
2096 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
2097 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
2098 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
2099 rc = EINVAL;
2100 goto done;
2101 }
2102
2103 /* We're using whatever's on the card and it's known to be good. */
2104 sc->params.fw_vers = ntohl(card_fw->fw_ver);
2105 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
2106 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2107 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2108 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2109 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2110 t4_get_tp_version(sc, &sc->params.tp_vers);
2111
2112 /* Reset device */
2113 if (need_fw_reset &&
2114 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
2115 device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
2116 if (rc != ETIMEDOUT && rc != EIO)
2117 t4_fw_bye(sc, sc->mbox);
2118 goto done;
2119 }
2120 sc->flags |= FW_OK;
2121
2122 rc = get_params__pre_init(sc);
2123 if (rc != 0)
2124 goto done; /* error message displayed already */
2125
2126 /* Partition adapter resources as specified in the config file. */
2127 if (state == DEV_STATE_UNINIT) {
2128
2129 KASSERT(sc->flags & MASTER_PF,
2130 ("%s: trying to change chip settings when not master.",
2131 __func__));
2132
2133 rc = partition_resources(sc, default_cfg, fw_info->kld_name);
2134 if (rc != 0)
2135 goto done; /* error message displayed already */
2136
2137 t4_tweak_chip_settings(sc);
2138
2139 /* get basic stuff going */
2140 rc = -t4_fw_initialize(sc, sc->mbox);
2141 if (rc != 0) {
2142 device_printf(sc->dev, "fw init failed: %d.\n", rc);
2143 goto done;
2144 }
2145 } else {
2146 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
2147 sc->cfcsum = 0;
2148 }
2149
2150done:
2151 free(card_fw, M_CXGBE);
2152 if (fw != NULL)
2153 firmware_put(fw, FIRMWARE_UNLOAD);
2154 if (default_cfg != NULL)
2155 firmware_put(default_cfg, FIRMWARE_UNLOAD);
2156
2157 return (rc);
2158}
2159
2160#define FW_PARAM_DEV(param) \
2161 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2162 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2163#define FW_PARAM_PFVF(param) \
2164 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2165 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2166
2167/*
2168 * Partition chip resources for use between various PFs, VFs, etc.
2169 */
2170static int
2171partition_resources(struct adapter *sc, const struct firmware *default_cfg,
2172 const char *name_prefix)
2173{
2174 const struct firmware *cfg = NULL;
2175 int rc = 0;
2176 struct fw_caps_config_cmd caps;
2177 uint32_t mtype, moff, finicsum, cfcsum;
2178
2179 /*
2180 * Figure out what configuration file to use. Pick the default config
2181 * file for the card if the user hasn't specified one explicitly.
2182 */
2183 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
2184 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
2185 /* Card specific overrides go here. */
2186 if (pci_get_device(sc->dev) == 0x440a)
2187 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
2188 if (is_fpga(sc))
2189 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
2190 }
2191
2192 /*
2193 * We need to load another module if the profile is anything except
2194 * "default" or "flash".
2195 */
2196 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
2197 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2198 char s[32];
2199
2200 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
2201 cfg = firmware_get(s);
2202 if (cfg == NULL) {
2203 if (default_cfg != NULL) {
2204 device_printf(sc->dev,
2205 "unable to load module \"%s\" for "
2206 "configuration profile \"%s\", will use "
2207 "the default config file instead.\n",
2208 s, sc->cfg_file);
2209 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2210 "%s", DEFAULT_CF);
2211 } else {
2212 device_printf(sc->dev,
2213 "unable to load module \"%s\" for "
2214 "configuration profile \"%s\", will use "
2215 "the config file on the card's flash "
2216 "instead.\n", s, sc->cfg_file);
2217 snprintf(sc->cfg_file, sizeof(sc->cfg_file),
2218 "%s", FLASH_CF);
2219 }
2220 }
2221 }
2222
2223 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
2224 default_cfg == NULL) {
2225 device_printf(sc->dev,
2226 "default config file not available, will use the config "
2227 "file on the card's flash instead.\n");
2228 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
2229 }
2230
2231 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
2232 u_int cflen, i, n;
2233 const uint32_t *cfdata;
2234 uint32_t param, val, addr, off, mw_base, mw_aperture;
2235
2236 KASSERT(cfg != NULL || default_cfg != NULL,
2237 ("%s: no config to upload", __func__));
2238
2239 /*
2240 * Ask the firmware where it wants us to upload the config file.
2241 */
2242 param = FW_PARAM_DEV(CF);
2243 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2244 if (rc != 0) {
2245 /* No support for config file? Shouldn't happen. */
2246 device_printf(sc->dev,
2247 "failed to query config file location: %d.\n", rc);
2248 goto done;
2249 }
2250 mtype = G_FW_PARAMS_PARAM_Y(val);
2251 moff = G_FW_PARAMS_PARAM_Z(val) << 16;
2252
2253 /*
2254 * XXX: sheer laziness. We deliberately added 4 bytes of
2255 * useless stuffing/comments at the end of the config file so
2256 * it's ok to simply throw away the last remaining bytes when
2257 * the config file is not an exact multiple of 4. This also
2258 * helps with the validate_mt_off_len check.
2259 */
2260 if (cfg != NULL) {
2261 cflen = cfg->datasize & ~3;
2262 cfdata = cfg->data;
2263 } else {
2264 cflen = default_cfg->datasize & ~3;
2265 cfdata = default_cfg->data;
2266 }
2267
2268 if (cflen > FLASH_CFG_MAX_SIZE) {
2269 device_printf(sc->dev,
2270 "config file too long (%d, max allowed is %d). "
2271 "Will try to use the config on the card, if any.\n",
2272 cflen, FLASH_CFG_MAX_SIZE);
2273 goto use_config_on_flash;
2274 }
2275
2276 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
2277 if (rc != 0) {
2278 device_printf(sc->dev,
2279 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
2280 "Will try to use the config on the card, if any.\n",
2281 __func__, mtype, moff, cflen, rc);
2282 goto use_config_on_flash;
2283 }
2284
2285 memwin_info(sc, 2, &mw_base, &mw_aperture);
2286 while (cflen) {
2287 off = position_memwin(sc, 2, addr);
2288 n = min(cflen, mw_aperture - off);
2289 for (i = 0; i < n; i += 4)
2290 t4_write_reg(sc, mw_base + off + i, *cfdata++);
2291 cflen -= n;
2292 addr += n;
2293 }
2294 } else {
2295use_config_on_flash:
2296 mtype = FW_MEMTYPE_CF_FLASH;
2297 moff = t4_flash_cfg_addr(sc);
2298 }
2299
2300 bzero(&caps, sizeof(caps));
2301 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2302 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2303 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
2304 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
2305 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
2306 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2307 if (rc != 0) {
2308 device_printf(sc->dev,
2309 "failed to pre-process config file: %d "
2310 "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
2311 goto done;
2312 }
2313
2314 finicsum = be32toh(caps.finicsum);
2315 cfcsum = be32toh(caps.cfcsum);
2316 if (finicsum != cfcsum) {
2317 device_printf(sc->dev,
2318 "WARNING: config file checksum mismatch: %08x %08x\n",
2319 finicsum, cfcsum);
2320 }
2321 sc->cfcsum = cfcsum;
2322
2323#define LIMIT_CAPS(x) do { \
2324 caps.x &= htobe16(t4_##x##_allowed); \
2325 sc->x = htobe16(caps.x); \
2326} while (0)
2327
2328 /*
2329 * Let the firmware know what features will (not) be used so it can tune
2330 * things accordingly.
2331 */
2332 LIMIT_CAPS(linkcaps);
2333 LIMIT_CAPS(niccaps);
2334 LIMIT_CAPS(toecaps);
2335 LIMIT_CAPS(rdmacaps);
2336 LIMIT_CAPS(iscsicaps);
2337 LIMIT_CAPS(fcoecaps);
2338#undef LIMIT_CAPS
2339
2340 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2341 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
2342 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2343 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL);
2344 if (rc != 0) {
2345 device_printf(sc->dev,
2346 "failed to process config file: %d.\n", rc);
2347 }
2348done:
2349 if (cfg != NULL)
2350 firmware_put(cfg, FIRMWARE_UNLOAD);
2351 return (rc);
2352}
2353
2354/*
2355 * Retrieve parameters that are needed (or nice to have) very early.
2356 */
2357static int
2358get_params__pre_init(struct adapter *sc)
2359{
2360 int rc;
2361 uint32_t param[2], val[2];
2362 struct fw_devlog_cmd cmd;
2363 struct devlog_params *dlog = &sc->params.devlog;
2364
2365 param[0] = FW_PARAM_DEV(PORTVEC);
2366 param[1] = FW_PARAM_DEV(CCLK);
2367 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2368 if (rc != 0) {
2369 device_printf(sc->dev,
2370 "failed to query parameters (pre_init): %d.\n", rc);
2371 return (rc);
2372 }
2373
2374 sc->params.portvec = val[0];
2375 sc->params.nports = bitcount32(val[0]);
2376 sc->params.vpd.cclk = val[1];
2377
2378 /* Read device log parameters. */
2379 bzero(&cmd, sizeof(cmd));
2380 cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
2381 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2382 cmd.retval_len16 = htobe32(FW_LEN16(cmd));
2383 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
2384 if (rc != 0) {
2385 device_printf(sc->dev,
2386 "failed to get devlog parameters: %d.\n", rc);
2387 bzero(dlog, sizeof (*dlog));
2388 rc = 0; /* devlog isn't critical for device operation */
2389 } else {
2390 val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
2391 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
2392 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
2393 dlog->size = be32toh(cmd.memsize_devlog);
2394 }
2395
2396 return (rc);
2397}
2398
2399/*
2400 * Retrieve various parameters that are of interest to the driver. The device
2401 * has been initialized by the firmware at this point.
2402 */
2403static int
2404get_params__post_init(struct adapter *sc)
2405{
2406 int rc;
2407 uint32_t param[7], val[7];
2408 struct fw_caps_config_cmd caps;
2409
2410 param[0] = FW_PARAM_PFVF(IQFLINT_START);
2411 param[1] = FW_PARAM_PFVF(EQ_START);
2412 param[2] = FW_PARAM_PFVF(FILTER_START);
2413 param[3] = FW_PARAM_PFVF(FILTER_END);
2414 param[4] = FW_PARAM_PFVF(L2T_START);
2415 param[5] = FW_PARAM_PFVF(L2T_END);
2416 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2417 if (rc != 0) {
2418 device_printf(sc->dev,
2419 "failed to query parameters (post_init): %d.\n", rc);
2420 return (rc);
2421 }
2422
2423 sc->sge.iq_start = val[0];
2424 sc->sge.eq_start = val[1];
2425 sc->tids.ftid_base = val[2];
2426 sc->tids.nftids = val[3] - val[2] + 1;
2427 sc->vres.l2t.start = val[4];
2428 sc->vres.l2t.size = val[5] - val[4] + 1;
2429 KASSERT(sc->vres.l2t.size <= L2T_SIZE,
2430 ("%s: L2 table size (%u) larger than expected (%u)",
2431 __func__, sc->vres.l2t.size, L2T_SIZE));
2432
2433 /* get capabilites */
2434 bzero(&caps, sizeof(caps));
2435 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2436 F_FW_CMD_REQUEST | F_FW_CMD_READ);
2437 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
2438 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
2439 if (rc != 0) {
2440 device_printf(sc->dev,
2441 "failed to get card capabilities: %d.\n", rc);
2442 return (rc);
2443 }
2444
2445 if (caps.toecaps) {
2446 /* query offload-related parameters */
2447 param[0] = FW_PARAM_DEV(NTID);
2448 param[1] = FW_PARAM_PFVF(SERVER_START);
2449 param[2] = FW_PARAM_PFVF(SERVER_END);
2450 param[3] = FW_PARAM_PFVF(TDDP_START);
2451 param[4] = FW_PARAM_PFVF(TDDP_END);
2452 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2453 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2454 if (rc != 0) {
2455 device_printf(sc->dev,
2456 "failed to query TOE parameters: %d.\n", rc);
2457 return (rc);
2458 }
2459 sc->tids.ntids = val[0];
2460 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
2461 sc->tids.stid_base = val[1];
2462 sc->tids.nstids = val[2] - val[1] + 1;
2463 sc->vres.ddp.start = val[3];
2464 sc->vres.ddp.size = val[4] - val[3] + 1;
2465 sc->params.ofldq_wr_cred = val[5];
2466 sc->params.offload = 1;
2467 }
2468 if (caps.rdmacaps) {
2469 param[0] = FW_PARAM_PFVF(STAG_START);
2470 param[1] = FW_PARAM_PFVF(STAG_END);
2471 param[2] = FW_PARAM_PFVF(RQ_START);
2472 param[3] = FW_PARAM_PFVF(RQ_END);
2473 param[4] = FW_PARAM_PFVF(PBL_START);
2474 param[5] = FW_PARAM_PFVF(PBL_END);
2475 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2476 if (rc != 0) {
2477 device_printf(sc->dev,
2478 "failed to query RDMA parameters(1): %d.\n", rc);
2479 return (rc);
2480 }
2481 sc->vres.stag.start = val[0];
2482 sc->vres.stag.size = val[1] - val[0] + 1;
2483 sc->vres.rq.start = val[2];
2484 sc->vres.rq.size = val[3] - val[2] + 1;
2485 sc->vres.pbl.start = val[4];
2486 sc->vres.pbl.size = val[5] - val[4] + 1;
2487
2488 param[0] = FW_PARAM_PFVF(SQRQ_START);
2489 param[1] = FW_PARAM_PFVF(SQRQ_END);
2490 param[2] = FW_PARAM_PFVF(CQ_START);
2491 param[3] = FW_PARAM_PFVF(CQ_END);
2492 param[4] = FW_PARAM_PFVF(OCQ_START);
2493 param[5] = FW_PARAM_PFVF(OCQ_END);
2494 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
2495 if (rc != 0) {
2496 device_printf(sc->dev,
2497 "failed to query RDMA parameters(2): %d.\n", rc);
2498 return (rc);
2499 }
2500 sc->vres.qp.start = val[0];
2501 sc->vres.qp.size = val[1] - val[0] + 1;
2502 sc->vres.cq.start = val[2];
2503 sc->vres.cq.size = val[3] - val[2] + 1;
2504 sc->vres.ocq.start = val[4];
2505 sc->vres.ocq.size = val[5] - val[4] + 1;
2506 }
2507 if (caps.iscsicaps) {
2508 param[0] = FW_PARAM_PFVF(ISCSI_START);
2509 param[1] = FW_PARAM_PFVF(ISCSI_END);
2510 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
2511 if (rc != 0) {
2512 device_printf(sc->dev,
2513 "failed to query iSCSI parameters: %d.\n", rc);
2514 return (rc);
2515 }
2516 sc->vres.iscsi.start = val[0];
2517 sc->vres.iscsi.size = val[1] - val[0] + 1;
2518 }
2519
2520 /*
2521 * We've got the params we wanted to query via the firmware. Now grab
2522 * some others directly from the chip.
2523 */
2524 rc = t4_read_chip_settings(sc);
2525
2526 return (rc);
2527}
2528
2529static int
2530set_params__post_init(struct adapter *sc)
2531{
2532 uint32_t param, val;
2533
2534 /* ask for encapsulated CPLs */
2535 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
2536 val = 1;
2537 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
2538
2539 return (0);
2540}
2541
2542#undef FW_PARAM_PFVF
2543#undef FW_PARAM_DEV
2544
2545static void
2546t4_set_desc(struct adapter *sc)
2547{
2548 char buf[128];
2549 struct adapter_params *p = &sc->params;
2550
2551 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, "
2552 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "",
2553 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec);
2554
2555 device_set_desc_copy(sc->dev, buf);
2556}
2557
2558static void
2559build_medialist(struct port_info *pi)
2560{
2561 struct ifmedia *media = &pi->media;
2562 int data, m;
2563
2564 PORT_LOCK(pi);
2565
2566 ifmedia_removeall(media);
2567
2568 m = IFM_ETHER | IFM_FDX;
2569 data = (pi->port_type << 8) | pi->mod_type;
2570
2571 switch(pi->port_type) {
2572 case FW_PORT_TYPE_BT_XFI:
2573 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2574 break;
2575
2576 case FW_PORT_TYPE_BT_XAUI:
2577 ifmedia_add(media, m | IFM_10G_T, data, NULL);
2578 /* fall through */
2579
2580 case FW_PORT_TYPE_BT_SGMII:
2581 ifmedia_add(media, m | IFM_1000_T, data, NULL);
2582 ifmedia_add(media, m | IFM_100_TX, data, NULL);
2583 ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
2584 ifmedia_set(media, IFM_ETHER | IFM_AUTO);
2585 break;
2586
2587 case FW_PORT_TYPE_CX4:
2588 ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
2589 ifmedia_set(media, m | IFM_10G_CX4);
2590 break;
2591
2592 case FW_PORT_TYPE_SFP:
2593 case FW_PORT_TYPE_FIBER_XFI:
2594 case FW_PORT_TYPE_FIBER_XAUI:
2595 switch (pi->mod_type) {
2596
2597 case FW_PORT_MOD_TYPE_LR:
2598 ifmedia_add(media, m | IFM_10G_LR, data, NULL);
2599 ifmedia_set(media, m | IFM_10G_LR);
2600 break;
2601
2602 case FW_PORT_MOD_TYPE_SR:
2603 ifmedia_add(media, m | IFM_10G_SR, data, NULL);
2604 ifmedia_set(media, m | IFM_10G_SR);
2605 break;
2606
2607 case FW_PORT_MOD_TYPE_LRM:
2608 ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
2609 ifmedia_set(media, m | IFM_10G_LRM);
2610 break;
2611
2612 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2613 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2614 ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
2615 ifmedia_set(media, m | IFM_10G_TWINAX);
2616 break;
2617
2618 case FW_PORT_MOD_TYPE_NONE:
2619 m &= ~IFM_FDX;
2620 ifmedia_add(media, m | IFM_NONE, data, NULL);
2621 ifmedia_set(media, m | IFM_NONE);
2622 break;
2623
2624 case FW_PORT_MOD_TYPE_NA:
2625 case FW_PORT_MOD_TYPE_ER:
2626 default:
2627 device_printf(pi->dev,
2628 "unknown port_type (%d), mod_type (%d)\n",
2629 pi->port_type, pi->mod_type);
2630 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2631 ifmedia_set(media, m | IFM_UNKNOWN);
2632 break;
2633 }
2634 break;
2635
2636 case FW_PORT_TYPE_QSFP:
2637 switch (pi->mod_type) {
2638
2639 case FW_PORT_MOD_TYPE_LR:
2640 ifmedia_add(media, m | IFM_40G_LR4, data, NULL);
2641 ifmedia_set(media, m | IFM_40G_LR4);
2642 break;
2643
2644 case FW_PORT_MOD_TYPE_SR:
2645 ifmedia_add(media, m | IFM_40G_SR4, data, NULL);
2646 ifmedia_set(media, m | IFM_40G_SR4);
2647 break;
2648
2649 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
2650 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
2651 ifmedia_add(media, m | IFM_40G_CR4, data, NULL);
2652 ifmedia_set(media, m | IFM_40G_CR4);
2653 break;
2654
2655 case FW_PORT_MOD_TYPE_NONE:
2656 m &= ~IFM_FDX;
2657 ifmedia_add(media, m | IFM_NONE, data, NULL);
2658 ifmedia_set(media, m | IFM_NONE);
2659 break;
2660
2661 default:
2662 device_printf(pi->dev,
2663 "unknown port_type (%d), mod_type (%d)\n",
2664 pi->port_type, pi->mod_type);
2665 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2666 ifmedia_set(media, m | IFM_UNKNOWN);
2667 break;
2668 }
2669 break;
2670
2671 default:
2672 device_printf(pi->dev,
2673 "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
2674 pi->mod_type);
2675 ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
2676 ifmedia_set(media, m | IFM_UNKNOWN);
2677 break;
2678 }
2679
2680 PORT_UNLOCK(pi);
2681}
2682
2683#define FW_MAC_EXACT_CHUNK 7
2684
2685/*
2686 * Program the port's XGMAC based on parameters in ifnet. The caller also
2687 * indicates which parameters should be programmed (the rest are left alone).
2688 */
2689static int
2690update_mac_settings(struct port_info *pi, int flags)
2691{
2692 int rc;
2693 struct ifnet *ifp = pi->ifp;
2694 struct adapter *sc = pi->adapter;
2695 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
2696
2697 ASSERT_SYNCHRONIZED_OP(sc);
2698 KASSERT(flags, ("%s: not told what to update.", __func__));
2699
2700 if (flags & XGMAC_MTU)
2701 mtu = ifp->if_mtu;
2702
2703 if (flags & XGMAC_PROMISC)
2704 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0;
2705
2706 if (flags & XGMAC_ALLMULTI)
2707 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0;
2708
2709 if (flags & XGMAC_VLANEX)
2710 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
2711
2712 rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
2713 vlanex, false);
2714 if (rc) {
2715 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
2716 return (rc);
2717 }
2718
2719 if (flags & XGMAC_UCADDR) {
2720 uint8_t ucaddr[ETHER_ADDR_LEN];
2721
2722 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
2723 rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
2724 ucaddr, true, true);
2725 if (rc < 0) {
2726 rc = -rc;
2727 if_printf(ifp, "change_mac failed: %d\n", rc);
2728 return (rc);
2729 } else {
2730 pi->xact_addr_filt = rc;
2731 rc = 0;
2732 }
2733 }
2734
2735 if (flags & XGMAC_MCADDRS) {
2736 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK];
2737 int del = 1;
2738 uint64_t hash = 0;
2739 struct ifmultiaddr *ifma;
2740 int i = 0, j;
2741
2742 if_maddr_rlock(ifp);
2743 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2744 if (ifma->ifma_addr->sa_family != AF_LINK)
2745 continue;
2746 mcaddr[i++] =
2747 LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
2748
2749 if (i == FW_MAC_EXACT_CHUNK) {
2750 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2751 del, i, mcaddr, NULL, &hash, 0);
2752 if (rc < 0) {
2753 rc = -rc;
2754 for (j = 0; j < i; j++) {
2755 if_printf(ifp,
2756 "failed to add mc address"
2757 " %02x:%02x:%02x:"
2758 "%02x:%02x:%02x rc=%d\n",
2759 mcaddr[j][0], mcaddr[j][1],
2760 mcaddr[j][2], mcaddr[j][3],
2761 mcaddr[j][4], mcaddr[j][5],
2762 rc);
2763 }
2764 goto mcfail;
2765 }
2766 del = 0;
2767 i = 0;
2768 }
2769 }
2770 if (i > 0) {
2771 rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
2772 del, i, mcaddr, NULL, &hash, 0);
2773 if (rc < 0) {
2774 rc = -rc;
2775 for (j = 0; j < i; j++) {
2776 if_printf(ifp,
2777 "failed to add mc address"
2778 " %02x:%02x:%02x:"
2779 "%02x:%02x:%02x rc=%d\n",
2780 mcaddr[j][0], mcaddr[j][1],
2781 mcaddr[j][2], mcaddr[j][3],
2782 mcaddr[j][4], mcaddr[j][5],
2783 rc);
2784 }
2785 goto mcfail;
2786 }
2787 }
2788
2789 rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
2790 if (rc != 0)
2791 if_printf(ifp, "failed to set mc address hash: %d", rc);
2792mcfail:
2793 if_maddr_runlock(ifp);
2794 }
2795
2796 return (rc);
2797}
2798
2799int
2800begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
2801 char *wmesg)
2802{
2803 int rc, pri;
2804
2805#ifdef WITNESS
2806 /* the caller thinks it's ok to sleep, but is it really? */
2807 if (flags & SLEEP_OK)
2808 pause("t4slptst", 1);
2809#endif
2810
2811 if (INTR_OK)
2812 pri = PCATCH;
2813 else
2814 pri = 0;
2815
2816 ADAPTER_LOCK(sc);
2817 for (;;) {
2818
2819 if (pi && IS_DOOMED(pi)) {
2820 rc = ENXIO;
2821 goto done;
2822 }
2823
2824 if (!IS_BUSY(sc)) {
2825 rc = 0;
2826 break;
2827 }
2828
2829 if (!(flags & SLEEP_OK)) {
2830 rc = EBUSY;
2831 goto done;
2832 }
2833
2834 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
2835 rc = EINTR;
2836 goto done;
2837 }
2838 }
2839
2840 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
2841 SET_BUSY(sc);
2842#ifdef INVARIANTS
2843 sc->last_op = wmesg;
2844 sc->last_op_thr = curthread;
2845#endif
2846
2847done:
2848 if (!(flags & HOLD_LOCK) || rc)
2849 ADAPTER_UNLOCK(sc);
2850
2851 return (rc);
2852}
2853
2854void
2855end_synchronized_op(struct adapter *sc, int flags)
2856{
2857
2858 if (flags & LOCK_HELD)
2859 ADAPTER_LOCK_ASSERT_OWNED(sc);
2860 else
2861 ADAPTER_LOCK(sc);
2862
2863 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
2864 CLR_BUSY(sc);
2865 wakeup(&sc->flags);
2866 ADAPTER_UNLOCK(sc);
2867}
2868
2869static int
2870cxgbe_init_synchronized(struct port_info *pi)
2871{
2872 struct adapter *sc = pi->adapter;
2873 struct ifnet *ifp = pi->ifp;
2874 int rc = 0;
2875
2876 ASSERT_SYNCHRONIZED_OP(sc);
2877
2878 if (isset(&sc->open_device_map, pi->port_id)) {
2879 KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
2880 ("mismatch between open_device_map and if_drv_flags"));
2881 return (0); /* already running */
2882 }
2883
2884 if (!(sc->flags & FULL_INIT_DONE) &&
2885 ((rc = adapter_full_init(sc)) != 0))
2886 return (rc); /* error message displayed already */
2887
2888 if (!(pi->flags & PORT_INIT_DONE) &&
2889 ((rc = port_full_init(pi)) != 0))
2890 return (rc); /* error message displayed already */
2891
2892 rc = update_mac_settings(pi, XGMAC_ALL);
2893 if (rc)
2894 goto done; /* error message displayed already */
2895
2896 rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
2897 if (rc != 0) {
2898 if_printf(ifp, "start_link failed: %d\n", rc);
2899 goto done;
2900 }
2901
2902 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
2903 if (rc != 0) {
2904 if_printf(ifp, "enable_vi failed: %d\n", rc);
2905 goto done;
2906 }
2907
2908 /*
2909 * The first iq of the first port to come up is used for tracing.
2910 */
2911 if (sc->traceq < 0) {
2912 sc->traceq = sc->sge.rxq[pi->first_rxq].iq.abs_id;
2913 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
2914 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
2915 V_QUEUENUMBER(sc->traceq));
2916 pi->flags |= HAS_TRACEQ;
2917 }
2918
2919 /* all ok */
2920 setbit(&sc->open_device_map, pi->port_id);
2921 PORT_LOCK(pi);
2922 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2923 PORT_UNLOCK(pi);
2924
2925 callout_reset(&pi->tick, hz, cxgbe_tick, pi);
2926done:
2927 if (rc != 0)
2928 cxgbe_uninit_synchronized(pi);
2929
2930 return (rc);
2931}
2932
2933/*
2934 * Idempotent.
2935 */
2936static int
2937cxgbe_uninit_synchronized(struct port_info *pi)
2938{
2939 struct adapter *sc = pi->adapter;
2940 struct ifnet *ifp = pi->ifp;
2941 int rc;
2942
2943 ASSERT_SYNCHRONIZED_OP(sc);
2944
2945 /*
2946 * Disable the VI so that all its data in either direction is discarded
2947 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz
2948 * tick) intact as the TP can deliver negative advice or data that it's
2949 * holding in its RAM (for an offloaded connection) even after the VI is
2950 * disabled.
2951 */
2952 rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
2953 if (rc) {
2954 if_printf(ifp, "disable_vi failed: %d\n", rc);
2955 return (rc);
2956 }
2957
2958 clrbit(&sc->open_device_map, pi->port_id);
2959 PORT_LOCK(pi);
2960 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2961 PORT_UNLOCK(pi);
2962
2963 pi->link_cfg.link_ok = 0;
2964 pi->link_cfg.speed = 0;
2965 pi->linkdnrc = -1;
2966 t4_os_link_changed(sc, pi->port_id, 0, -1);
2967
2968 return (0);
2969}
2970
2971/*
2972 * It is ok for this function to fail midway and return right away. t4_detach
2973 * will walk the entire sc->irq list and clean up whatever is valid.
2974 */
2975static int
2976setup_intr_handlers(struct adapter *sc)
2977{
2978 int rc, rid, p, q;
2979 char s[8];
2980 struct irq *irq;
2981 struct port_info *pi;
2982 struct sge_rxq *rxq;
2983#ifdef TCP_OFFLOAD
2984 struct sge_ofld_rxq *ofld_rxq;
2985#endif
2986
2987 /*
2988 * Setup interrupts.
2989 */
2990 irq = &sc->irq[0];
2991 rid = sc->intr_type == INTR_INTX ? 0 : 1;
2992 if (sc->intr_count == 1) {
2993 KASSERT(!(sc->flags & INTR_DIRECT),
2994 ("%s: single interrupt && INTR_DIRECT?", __func__));
2995
2996 rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
2997 if (rc != 0)
2998 return (rc);
2999 } else {
3000 /* Multiple interrupts. */
3001 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
3002 ("%s: too few intr.", __func__));
3003
3004 /* The first one is always error intr */
3005 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
3006 if (rc != 0)
3007 return (rc);
3008 irq++;
3009 rid++;
3010
3011 /* The second one is always the firmware event queue */
3012 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
3013 "evt");
3014 if (rc != 0)
3015 return (rc);
3016 irq++;
3017 rid++;
3018
3019 /*
3020 * Note that if INTR_DIRECT is not set then either the NIC rx
3021 * queues or (exclusive or) the TOE rx queueus will be taking
3022 * direct interrupts.
3023 *
3024 * There is no need to check for is_offload(sc) as nofldrxq
3025 * will be 0 if offload is disabled.
3026 */
3027 for_each_port(sc, p) {
3028 pi = sc->port[p];
3029
3030#ifdef TCP_OFFLOAD
3031 /*
3032 * Skip over the NIC queues if they aren't taking direct
3033 * interrupts.
3034 */
3035 if (!(sc->flags & INTR_DIRECT) &&
3036 pi->nofldrxq > pi->nrxq)
3037 goto ofld_queues;
3038#endif
3039 rxq = &sc->sge.rxq[pi->first_rxq];
3040 for (q = 0; q < pi->nrxq; q++, rxq++) {
3041 snprintf(s, sizeof(s), "%d.%d", p, q);
3042 rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
3043 s);
3044 if (rc != 0)
3045 return (rc);
3046 irq++;
3047 rid++;
3048 }
3049
3050#ifdef TCP_OFFLOAD
3051 /*
3052 * Skip over the offload queues if they aren't taking
3053 * direct interrupts.
3054 */
3055 if (!(sc->flags & INTR_DIRECT))
3056 continue;
3057ofld_queues:
3058 ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
3059 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
3060 snprintf(s, sizeof(s), "%d,%d", p, q);
3061 rc = t4_alloc_irq(sc, irq, rid, t4_intr,
3062 ofld_rxq, s);
3063 if (rc != 0)
3064 return (rc);
3065 irq++;
3066 rid++;
3067 }
3068#endif
3069 }
3070 }
3071
3072 return (0);
3073}
3074
3075static int
3076adapter_full_init(struct adapter *sc)
3077{
3078 int rc, i;
3079
3080 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3081 KASSERT((sc->flags & FULL_INIT_DONE) == 0,
3082 ("%s: FULL_INIT_DONE already", __func__));
3083
3084 /*
3085 * queues that belong to the adapter (not any particular port).
3086 */
3087 rc = t4_setup_adapter_queues(sc);
3088 if (rc != 0)
3089 goto done;
3090
3091 for (i = 0; i < nitems(sc->tq); i++) {
3092 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT,
3093 taskqueue_thread_enqueue, &sc->tq[i]);
3094 if (sc->tq[i] == NULL) {
3095 device_printf(sc->dev,
3096 "failed to allocate task queue %d\n", i);
3097 rc = ENOMEM;
3098 goto done;
3099 }
3100 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
3101 device_get_nameunit(sc->dev), i);
3102 }
3103
3104 t4_intr_enable(sc);
3105 sc->flags |= FULL_INIT_DONE;
3106done:
3107 if (rc != 0)
3108 adapter_full_uninit(sc);
3109
3110 return (rc);
3111}
3112
3113static int
3114adapter_full_uninit(struct adapter *sc)
3115{
3116 int i;
3117
3118 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
3119
3120 t4_teardown_adapter_queues(sc);
3121
3122 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) {
3123 taskqueue_free(sc->tq[i]);
3124 sc->tq[i] = NULL;
3125 }
3126
3127 sc->flags &= ~FULL_INIT_DONE;
3128
3129 return (0);
3130}
3131
3132static int
3133port_full_init(struct port_info *pi)
3134{
3135 struct adapter *sc = pi->adapter;
3136 struct ifnet *ifp = pi->ifp;
3137 uint16_t *rss;
3138 struct sge_rxq *rxq;
3139 int rc, i;
3140
3141 ASSERT_SYNCHRONIZED_OP(sc);
3142 KASSERT((pi->flags & PORT_INIT_DONE) == 0,
3143 ("%s: PORT_INIT_DONE already", __func__));
3144
3145 sysctl_ctx_init(&pi->ctx);
3146 pi->flags |= PORT_SYSCTL_CTX;
3147
3148 /*
3149 * Allocate tx/rx/fl queues for this port.
3150 */
3151 rc = t4_setup_port_queues(pi);
3152 if (rc != 0)
3153 goto done; /* error message displayed already */
3154
3155 /*
3156 * Setup RSS for this port.
3157 */
3158 rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
3159 M_ZERO | M_WAITOK);
3160 for_each_rxq(pi, i, rxq) {
3161 rss[i] = rxq->iq.abs_id;
3162 }
3163 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
3164 pi->rss_size, rss, pi->nrxq);
3165 free(rss, M_CXGBE);
3166 if (rc != 0) {
3167 if_printf(ifp, "rss_config failed: %d\n", rc);
3168 goto done;
3169 }
3170
3171 pi->flags |= PORT_INIT_DONE;
3172done:
3173 if (rc != 0)
3174 port_full_uninit(pi);
3175
3176 return (rc);
3177}
3178
3179/*
3180 * Idempotent.
3181 */
3182static int
3183port_full_uninit(struct port_info *pi)
3184{
3185 struct adapter *sc = pi->adapter;
3186 int i;
3187 struct sge_rxq *rxq;
3188 struct sge_txq *txq;
3189#ifdef TCP_OFFLOAD
3190 struct sge_ofld_rxq *ofld_rxq;
3191 struct sge_wrq *ofld_txq;
3192#endif
3193
3194 if (pi->flags & PORT_INIT_DONE) {
3195
3196 /* Need to quiesce queues. XXX: ctrl queues? */
3197
3198 for_each_txq(pi, i, txq) {
3199 quiesce_eq(sc, &txq->eq);
3200 }
3201
3202#ifdef TCP_OFFLOAD
3203 for_each_ofld_txq(pi, i, ofld_txq) {
3204 quiesce_eq(sc, &ofld_txq->eq);
3205 }
3206#endif
3207
3208 for_each_rxq(pi, i, rxq) {
3209 quiesce_iq(sc, &rxq->iq);
3210 quiesce_fl(sc, &rxq->fl);
3211 }
3212
3213#ifdef TCP_OFFLOAD
3214 for_each_ofld_rxq(pi, i, ofld_rxq) {
3215 quiesce_iq(sc, &ofld_rxq->iq);
3216 quiesce_fl(sc, &ofld_rxq->fl);
3217 }
3218#endif
3219 }
3220
3221 t4_teardown_port_queues(pi);
3222 pi->flags &= ~PORT_INIT_DONE;
3223
3224 return (0);
3225}
3226
3227static void
3228quiesce_eq(struct adapter *sc, struct sge_eq *eq)
3229{
3230 EQ_LOCK(eq);
3231 eq->flags |= EQ_DOOMED;
3232
3233 /*
3234 * Wait for the response to a credit flush if one's
3235 * pending.
3236 */
3237 while (eq->flags & EQ_CRFLUSHED)
3238 mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
3239 EQ_UNLOCK(eq);
3240
3241 callout_drain(&eq->tx_callout); /* XXX: iffy */
3242 pause("callout", 10); /* Still iffy */
3243
3244 taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
3245}
3246
3247static void
3248quiesce_iq(struct adapter *sc, struct sge_iq *iq)
3249{
3250 (void) sc; /* unused */
3251
3252 /* Synchronize with the interrupt handler */
3253 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED))
3254 pause("iqfree", 1);
3255}
3256
3257static void
3258quiesce_fl(struct adapter *sc, struct sge_fl *fl)
3259{
3260 mtx_lock(&sc->sfl_lock);
3261 FL_LOCK(fl);
3262 fl->flags |= FL_DOOMED;
3263 FL_UNLOCK(fl);
3264 mtx_unlock(&sc->sfl_lock);
3265
3266 callout_drain(&sc->sfl_callout);
3267 KASSERT((fl->flags & FL_STARVING) == 0,
3268 ("%s: still starving", __func__));
3269}
3270
3271static int
3272t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
3273 driver_intr_t *handler, void *arg, char *name)
3274{
3275 int rc;
3276
3277 irq->rid = rid;
3278 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid,
3279 RF_SHAREABLE | RF_ACTIVE);
3280 if (irq->res == NULL) {
3281 device_printf(sc->dev,
3282 "failed to allocate IRQ for rid %d, name %s.\n", rid, name);
3283 return (ENOMEM);
3284 }
3285
3286 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET,
3287 NULL, handler, arg, &irq->tag);
3288 if (rc != 0) {
3289 device_printf(sc->dev,
3290 "failed to setup interrupt for rid %d, name %s: %d\n",
3291 rid, name, rc);
3292 } else if (name)
3293 bus_describe_intr(sc->dev, irq->res, irq->tag, name);
3294
3295 return (rc);
3296}
3297
3298static int
3299t4_free_irq(struct adapter *sc, struct irq *irq)
3300{
3301 if (irq->tag)
3302 bus_teardown_intr(sc->dev, irq->res, irq->tag);
3303 if (irq->res)
3304 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res);
3305
3306 bzero(irq, sizeof(*irq));
3307
3308 return (0);
3309}
3310
3311static void
3312reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
3313 unsigned int end)
3314{
3315 uint32_t *p = (uint32_t *)(buf + start);
3316
3317 for ( ; start <= end; start += sizeof(uint32_t))
3318 *p++ = t4_read_reg(sc, start);
3319}
3320
3321static void
3322t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
3323{
3324 int i, n;
3325 const unsigned int *reg_ranges;
3326 static const unsigned int t4_reg_ranges[] = {
3327 0x1008, 0x1108,
3328 0x1180, 0x11b4,
3329 0x11fc, 0x123c,
3330 0x1300, 0x173c,
3331 0x1800, 0x18fc,
3332 0x3000, 0x30d8,
3333 0x30e0, 0x5924,
3334 0x5960, 0x59d4,
3335 0x5a00, 0x5af8,
3336 0x6000, 0x6098,
3337 0x6100, 0x6150,
3338 0x6200, 0x6208,
3339 0x6240, 0x6248,
3340 0x6280, 0x6338,
3341 0x6370, 0x638c,
3342 0x6400, 0x643c,
3343 0x6500, 0x6524,
3344 0x6a00, 0x6a38,
3345 0x6a60, 0x6a78,
3346 0x6b00, 0x6b84,
3347 0x6bf0, 0x6c84,
3348 0x6cf0, 0x6d84,
3349 0x6df0, 0x6e84,
3350 0x6ef0, 0x6f84,
3351 0x6ff0, 0x7084,
3352 0x70f0, 0x7184,
3353 0x71f0, 0x7284,
3354 0x72f0, 0x7384,
3355 0x73f0, 0x7450,
3356 0x7500, 0x7530,
3357 0x7600, 0x761c,
3358 0x7680, 0x76cc,
3359 0x7700, 0x7798,
3360 0x77c0, 0x77fc,
3361 0x7900, 0x79fc,
3362 0x7b00, 0x7c38,
3363 0x7d00, 0x7efc,
3364 0x8dc0, 0x8e1c,
3365 0x8e30, 0x8e78,
3366 0x8ea0, 0x8f6c,
3367 0x8fc0, 0x9074,
3368 0x90fc, 0x90fc,
3369 0x9400, 0x9458,
3370 0x9600, 0x96bc,
3371 0x9800, 0x9808,
3372 0x9820, 0x983c,
3373 0x9850, 0x9864,
3374 0x9c00, 0x9c6c,
3375 0x9c80, 0x9cec,
3376 0x9d00, 0x9d6c,
3377 0x9d80, 0x9dec,
3378 0x9e00, 0x9e6c,
3379 0x9e80, 0x9eec,
3380 0x9f00, 0x9f6c,
3381 0x9f80, 0x9fec,
3382 0xd004, 0xd03c,
3383 0xdfc0, 0xdfe0,
3384 0xe000, 0xea7c,
3385 0xf000, 0x11190,
3386 0x19040, 0x1906c,
3387 0x19078, 0x19080,
3388 0x1908c, 0x19124,
3389 0x19150, 0x191b0,
3390 0x191d0, 0x191e8,
3391 0x19238, 0x1924c,
3392 0x193f8, 0x19474,
3393 0x19490, 0x194f8,
3394 0x19800, 0x19f30,
3395 0x1a000, 0x1a06c,
3396 0x1a0b0, 0x1a120,
3397 0x1a128, 0x1a138,
3398 0x1a190, 0x1a1c4,
3399 0x1a1fc, 0x1a1fc,
3400 0x1e040, 0x1e04c,
3401 0x1e284, 0x1e28c,
3402 0x1e2c0, 0x1e2c0,
3403 0x1e2e0, 0x1e2e0,
3404 0x1e300, 0x1e384,
3405 0x1e3c0, 0x1e3c8,
3406 0x1e440, 0x1e44c,
3407 0x1e684, 0x1e68c,
3408 0x1e6c0, 0x1e6c0,
3409 0x1e6e0, 0x1e6e0,
3410 0x1e700, 0x1e784,
3411 0x1e7c0, 0x1e7c8,
3412 0x1e840, 0x1e84c,
3413 0x1ea84, 0x1ea8c,
3414 0x1eac0, 0x1eac0,
3415 0x1eae0, 0x1eae0,
3416 0x1eb00, 0x1eb84,
3417 0x1ebc0, 0x1ebc8,
3418 0x1ec40, 0x1ec4c,
3419 0x1ee84, 0x1ee8c,
3420 0x1eec0, 0x1eec0,
3421 0x1eee0, 0x1eee0,
3422 0x1ef00, 0x1ef84,
3423 0x1efc0, 0x1efc8,
3424 0x1f040, 0x1f04c,
3425 0x1f284, 0x1f28c,
3426 0x1f2c0, 0x1f2c0,
3427 0x1f2e0, 0x1f2e0,
3428 0x1f300, 0x1f384,
3429 0x1f3c0, 0x1f3c8,
3430 0x1f440, 0x1f44c,
3431 0x1f684, 0x1f68c,
3432 0x1f6c0, 0x1f6c0,
3433 0x1f6e0, 0x1f6e0,
3434 0x1f700, 0x1f784,
3435 0x1f7c0, 0x1f7c8,
3436 0x1f840, 0x1f84c,
3437 0x1fa84, 0x1fa8c,
3438 0x1fac0, 0x1fac0,
3439 0x1fae0, 0x1fae0,
3440 0x1fb00, 0x1fb84,
3441 0x1fbc0, 0x1fbc8,
3442 0x1fc40, 0x1fc4c,
3443 0x1fe84, 0x1fe8c,
3444 0x1fec0, 0x1fec0,
3445 0x1fee0, 0x1fee0,
3446 0x1ff00, 0x1ff84,
3447 0x1ffc0, 0x1ffc8,
3448 0x20000, 0x2002c,
3449 0x20100, 0x2013c,
3450 0x20190, 0x201c8,
3451 0x20200, 0x20318,
3452 0x20400, 0x20528,
3453 0x20540, 0x20614,
3454 0x21000, 0x21040,
3455 0x2104c, 0x21060,
3456 0x210c0, 0x210ec,
3457 0x21200, 0x21268,
3458 0x21270, 0x21284,
3459 0x212fc, 0x21388,
3460 0x21400, 0x21404,
3461 0x21500, 0x21518,
3462 0x2152c, 0x2153c,
3463 0x21550, 0x21554,
3464 0x21600, 0x21600,
3465 0x21608, 0x21628,
3466 0x21630, 0x2163c,
3467 0x21700, 0x2171c,
3468 0x21780, 0x2178c,
3469 0x21800, 0x21c38,
3470 0x21c80, 0x21d7c,
3471 0x21e00, 0x21e04,
3472 0x22000, 0x2202c,
3473 0x22100, 0x2213c,
3474 0x22190, 0x221c8,
3475 0x22200, 0x22318,
3476 0x22400, 0x22528,
3477 0x22540, 0x22614,
3478 0x23000, 0x23040,
3479 0x2304c, 0x23060,
3480 0x230c0, 0x230ec,
3481 0x23200, 0x23268,
3482 0x23270, 0x23284,
3483 0x232fc, 0x23388,
3484 0x23400, 0x23404,
3485 0x23500, 0x23518,
3486 0x2352c, 0x2353c,
3487 0x23550, 0x23554,
3488 0x23600, 0x23600,
3489 0x23608, 0x23628,
3490 0x23630, 0x2363c,
3491 0x23700, 0x2371c,
3492 0x23780, 0x2378c,
3493 0x23800, 0x23c38,
3494 0x23c80, 0x23d7c,
3495 0x23e00, 0x23e04,
3496 0x24000, 0x2402c,
3497 0x24100, 0x2413c,
3498 0x24190, 0x241c8,
3499 0x24200, 0x24318,
3500 0x24400, 0x24528,
3501 0x24540, 0x24614,
3502 0x25000, 0x25040,
3503 0x2504c, 0x25060,
3504 0x250c0, 0x250ec,
3505 0x25200, 0x25268,
3506 0x25270, 0x25284,
3507 0x252fc, 0x25388,
3508 0x25400, 0x25404,
3509 0x25500, 0x25518,
3510 0x2552c, 0x2553c,
3511 0x25550, 0x25554,
3512 0x25600, 0x25600,
3513 0x25608, 0x25628,
3514 0x25630, 0x2563c,
3515 0x25700, 0x2571c,
3516 0x25780, 0x2578c,
3517 0x25800, 0x25c38,
3518 0x25c80, 0x25d7c,
3519 0x25e00, 0x25e04,
3520 0x26000, 0x2602c,
3521 0x26100, 0x2613c,
3522 0x26190, 0x261c8,
3523 0x26200, 0x26318,
3524 0x26400, 0x26528,
3525 0x26540, 0x26614,
3526 0x27000, 0x27040,
3527 0x2704c, 0x27060,
3528 0x270c0, 0x270ec,
3529 0x27200, 0x27268,
3530 0x27270, 0x27284,
3531 0x272fc, 0x27388,
3532 0x27400, 0x27404,
3533 0x27500, 0x27518,
3534 0x2752c, 0x2753c,
3535 0x27550, 0x27554,
3536 0x27600, 0x27600,
3537 0x27608, 0x27628,
3538 0x27630, 0x2763c,
3539 0x27700, 0x2771c,
3540 0x27780, 0x2778c,
3541 0x27800, 0x27c38,
3542 0x27c80, 0x27d7c,
3543 0x27e00, 0x27e04
3544 };
3545 static const unsigned int t5_reg_ranges[] = {
3546 0x1008, 0x1148,
3547 0x1180, 0x11b4,
3548 0x11fc, 0x123c,
3549 0x1280, 0x173c,
3550 0x1800, 0x18fc,
3551 0x3000, 0x3028,
3552 0x3060, 0x30d8,
3553 0x30e0, 0x30fc,
3554 0x3140, 0x357c,
3555 0x35a8, 0x35cc,
3556 0x35ec, 0x35ec,
3557 0x3600, 0x5624,
3558 0x56cc, 0x575c,
3559 0x580c, 0x5814,
3560 0x5890, 0x58bc,
3561 0x5940, 0x59dc,
3562 0x59fc, 0x5a18,
3563 0x5a60, 0x5a9c,
3564 0x5b94, 0x5bfc,
3565 0x6000, 0x6040,
3566 0x6058, 0x614c,
3567 0x7700, 0x7798,
3568 0x77c0, 0x78fc,
3569 0x7b00, 0x7c54,
3570 0x7d00, 0x7efc,
3571 0x8dc0, 0x8de0,
3572 0x8df8, 0x8e84,
3573 0x8ea0, 0x8f84,
3574 0x8fc0, 0x90f8,
3575 0x9400, 0x9470,
3576 0x9600, 0x96f4,
3577 0x9800, 0x9808,
3578 0x9820, 0x983c,
3579 0x9850, 0x9864,
3580 0x9c00, 0x9c6c,
3581 0x9c80, 0x9cec,
3582 0x9d00, 0x9d6c,
3583 0x9d80, 0x9dec,
3584 0x9e00, 0x9e6c,
3585 0x9e80, 0x9eec,
3586 0x9f00, 0x9f6c,
3587 0x9f80, 0xa020,
3588 0xd004, 0xd03c,
3589 0xdfc0, 0xdfe0,
3590 0xe000, 0x11088,
3591 0x1109c, 0x1117c,
3592 0x11190, 0x11204,
3593 0x19040, 0x1906c,
3594 0x19078, 0x19080,
3595 0x1908c, 0x19124,
3596 0x19150, 0x191b0,
3597 0x191d0, 0x191e8,
3598 0x19238, 0x19290,
3599 0x193f8, 0x19474,
3600 0x19490, 0x194cc,
3601 0x194f0, 0x194f8,
3602 0x19c00, 0x19c60,
3603 0x19c94, 0x19e10,
3604 0x19e50, 0x19f34,
3605 0x19f40, 0x19f50,
3606 0x19f90, 0x19fe4,
3607 0x1a000, 0x1a06c,
3608 0x1a0b0, 0x1a120,
3609 0x1a128, 0x1a138,
3610 0x1a190, 0x1a1c4,
3611 0x1a1fc, 0x1a1fc,
3612 0x1e008, 0x1e00c,
3613 0x1e040, 0x1e04c,
3614 0x1e284, 0x1e290,
3615 0x1e2c0, 0x1e2c0,
3616 0x1e2e0, 0x1e2e0,
3617 0x1e300, 0x1e384,
3618 0x1e3c0, 0x1e3c8,
3619 0x1e408, 0x1e40c,
3620 0x1e440, 0x1e44c,
3621 0x1e684, 0x1e690,
3622 0x1e6c0, 0x1e6c0,
3623 0x1e6e0, 0x1e6e0,
3624 0x1e700, 0x1e784,
3625 0x1e7c0, 0x1e7c8,
3626 0x1e808, 0x1e80c,
3627 0x1e840, 0x1e84c,
3628 0x1ea84, 0x1ea90,
3629 0x1eac0, 0x1eac0,
3630 0x1eae0, 0x1eae0,
3631 0x1eb00, 0x1eb84,
3632 0x1ebc0, 0x1ebc8,
3633 0x1ec08, 0x1ec0c,
3634 0x1ec40, 0x1ec4c,
3635 0x1ee84, 0x1ee90,
3636 0x1eec0, 0x1eec0,
3637 0x1eee0, 0x1eee0,
3638 0x1ef00, 0x1ef84,
3639 0x1efc0, 0x1efc8,
3640 0x1f008, 0x1f00c,
3641 0x1f040, 0x1f04c,
3642 0x1f284, 0x1f290,
3643 0x1f2c0, 0x1f2c0,
3644 0x1f2e0, 0x1f2e0,
3645 0x1f300, 0x1f384,
3646 0x1f3c0, 0x1f3c8,
3647 0x1f408, 0x1f40c,
3648 0x1f440, 0x1f44c,
3649 0x1f684, 0x1f690,
3650 0x1f6c0, 0x1f6c0,
3651 0x1f6e0, 0x1f6e0,
3652 0x1f700, 0x1f784,
3653 0x1f7c0, 0x1f7c8,
3654 0x1f808, 0x1f80c,
3655 0x1f840, 0x1f84c,
3656 0x1fa84, 0x1fa90,
3657 0x1fac0, 0x1fac0,
3658 0x1fae0, 0x1fae0,
3659 0x1fb00, 0x1fb84,
3660 0x1fbc0, 0x1fbc8,
3661 0x1fc08, 0x1fc0c,
3662 0x1fc40, 0x1fc4c,
3663 0x1fe84, 0x1fe90,
3664 0x1fec0, 0x1fec0,
3665 0x1fee0, 0x1fee0,
3666 0x1ff00, 0x1ff84,
3667 0x1ffc0, 0x1ffc8,
3668 0x30000, 0x30030,
3669 0x30100, 0x30144,
3670 0x30190, 0x301d0,
3671 0x30200, 0x30318,
3672 0x30400, 0x3052c,
3673 0x30540, 0x3061c,
3674 0x30800, 0x30834,
3675 0x308c0, 0x30908,
3676 0x30910, 0x309ac,
3677 0x30a00, 0x30a2c,
3678 0x30a44, 0x30a50,
3679 0x30a74, 0x30c24,
3680 0x30d00, 0x30d00,
3681 0x30d08, 0x30d14,
3682 0x30d1c, 0x30d20,
3683 0x30d3c, 0x30d50,
3684 0x31200, 0x3120c,
3685 0x31220, 0x31220,
3686 0x31240, 0x31240,
3687 0x31600, 0x3160c,
3688 0x31a00, 0x31a1c,
3689 0x31e00, 0x31e20,
3690 0x31e38, 0x31e3c,
3691 0x31e80, 0x31e80,
3692 0x31e88, 0x31ea8,
3693 0x31eb0, 0x31eb4,
3694 0x31ec8, 0x31ed4,
3695 0x31fb8, 0x32004,
3696 0x32200, 0x32200,
3697 0x32208, 0x32240,
3698 0x32248, 0x32280,
3699 0x32288, 0x322c0,
3700 0x322c8, 0x322fc,
3701 0x32600, 0x32630,
3702 0x32a00, 0x32abc,
3703 0x32b00, 0x32b70,
3704 0x33000, 0x33048,
3705 0x33060, 0x3309c,
3706 0x330f0, 0x33148,
3707 0x33160, 0x3319c,
3708 0x331f0, 0x332e4,
3709 0x332f8, 0x333e4,
3710 0x333f8, 0x33448,
3711 0x33460, 0x3349c,
3712 0x334f0, 0x33548,
3713 0x33560, 0x3359c,
3714 0x335f0, 0x336e4,
3715 0x336f8, 0x337e4,
3716 0x337f8, 0x337fc,
3717 0x33814, 0x33814,
3718 0x3382c, 0x3382c,
3719 0x33880, 0x3388c,
3720 0x338e8, 0x338ec,
3721 0x33900, 0x33948,
3722 0x33960, 0x3399c,
3723 0x339f0, 0x33ae4,
3724 0x33af8, 0x33b10,
3725 0x33b28, 0x33b28,
3726 0x33b3c, 0x33b50,
3727 0x33bf0, 0x33c10,
3728 0x33c28, 0x33c28,
3729 0x33c3c, 0x33c50,
3730 0x33cf0, 0x33cfc,
3731 0x34000, 0x34030,
3732 0x34100, 0x34144,
3733 0x34190, 0x341d0,
3734 0x34200, 0x34318,
3735 0x34400, 0x3452c,
3736 0x34540, 0x3461c,
3737 0x34800, 0x34834,
3738 0x348c0, 0x34908,
3739 0x34910, 0x349ac,
3740 0x34a00, 0x34a2c,
3741 0x34a44, 0x34a50,
3742 0x34a74, 0x34c24,
3743 0x34d00, 0x34d00,
3744 0x34d08, 0x34d14,
3745 0x34d1c, 0x34d20,
3746 0x34d3c, 0x34d50,
3747 0x35200, 0x3520c,
3748 0x35220, 0x35220,
3749 0x35240, 0x35240,
3750 0x35600, 0x3560c,
3751 0x35a00, 0x35a1c,
3752 0x35e00, 0x35e20,
3753 0x35e38, 0x35e3c,
3754 0x35e80, 0x35e80,
3755 0x35e88, 0x35ea8,
3756 0x35eb0, 0x35eb4,
3757 0x35ec8, 0x35ed4,
3758 0x35fb8, 0x36004,
3759 0x36200, 0x36200,
3760 0x36208, 0x36240,
3761 0x36248, 0x36280,
3762 0x36288, 0x362c0,
3763 0x362c8, 0x362fc,
3764 0x36600, 0x36630,
3765 0x36a00, 0x36abc,
3766 0x36b00, 0x36b70,
3767 0x37000, 0x37048,
3768 0x37060, 0x3709c,
3769 0x370f0, 0x37148,
3770 0x37160, 0x3719c,
3771 0x371f0, 0x372e4,
3772 0x372f8, 0x373e4,
3773 0x373f8, 0x37448,
3774 0x37460, 0x3749c,
3775 0x374f0, 0x37548,
3776 0x37560, 0x3759c,
3777 0x375f0, 0x376e4,
3778 0x376f8, 0x377e4,
3779 0x377f8, 0x377fc,
3780 0x37814, 0x37814,
3781 0x3782c, 0x3782c,
3782 0x37880, 0x3788c,
3783 0x378e8, 0x378ec,
3784 0x37900, 0x37948,
3785 0x37960, 0x3799c,
3786 0x379f0, 0x37ae4,
3787 0x37af8, 0x37b10,
3788 0x37b28, 0x37b28,
3789 0x37b3c, 0x37b50,
3790 0x37bf0, 0x37c10,
3791 0x37c28, 0x37c28,
3792 0x37c3c, 0x37c50,
3793 0x37cf0, 0x37cfc,
3794 0x38000, 0x38030,
3795 0x38100, 0x38144,
3796 0x38190, 0x381d0,
3797 0x38200, 0x38318,
3798 0x38400, 0x3852c,
3799 0x38540, 0x3861c,
3800 0x38800, 0x38834,
3801 0x388c0, 0x38908,
3802 0x38910, 0x389ac,
3803 0x38a00, 0x38a2c,
3804 0x38a44, 0x38a50,
3805 0x38a74, 0x38c24,
3806 0x38d00, 0x38d00,
3807 0x38d08, 0x38d14,
3808 0x38d1c, 0x38d20,
3809 0x38d3c, 0x38d50,
3810 0x39200, 0x3920c,
3811 0x39220, 0x39220,
3812 0x39240, 0x39240,
3813 0x39600, 0x3960c,
3814 0x39a00, 0x39a1c,
3815 0x39e00, 0x39e20,
3816 0x39e38, 0x39e3c,
3817 0x39e80, 0x39e80,
3818 0x39e88, 0x39ea8,
3819 0x39eb0, 0x39eb4,
3820 0x39ec8, 0x39ed4,
3821 0x39fb8, 0x3a004,
3822 0x3a200, 0x3a200,
3823 0x3a208, 0x3a240,
3824 0x3a248, 0x3a280,
3825 0x3a288, 0x3a2c0,
3826 0x3a2c8, 0x3a2fc,
3827 0x3a600, 0x3a630,
3828 0x3aa00, 0x3aabc,
3829 0x3ab00, 0x3ab70,
3830 0x3b000, 0x3b048,
3831 0x3b060, 0x3b09c,
3832 0x3b0f0, 0x3b148,
3833 0x3b160, 0x3b19c,
3834 0x3b1f0, 0x3b2e4,
3835 0x3b2f8, 0x3b3e4,
3836 0x3b3f8, 0x3b448,
3837 0x3b460, 0x3b49c,
3838 0x3b4f0, 0x3b548,
3839 0x3b560, 0x3b59c,
3840 0x3b5f0, 0x3b6e4,
3841 0x3b6f8, 0x3b7e4,
3842 0x3b7f8, 0x3b7fc,
3843 0x3b814, 0x3b814,
3844 0x3b82c, 0x3b82c,
3845 0x3b880, 0x3b88c,
3846 0x3b8e8, 0x3b8ec,
3847 0x3b900, 0x3b948,
3848 0x3b960, 0x3b99c,
3849 0x3b9f0, 0x3bae4,
3850 0x3baf8, 0x3bb10,
3851 0x3bb28, 0x3bb28,
3852 0x3bb3c, 0x3bb50,
3853 0x3bbf0, 0x3bc10,
3854 0x3bc28, 0x3bc28,
3855 0x3bc3c, 0x3bc50,
3856 0x3bcf0, 0x3bcfc,
3857 0x3c000, 0x3c030,
3858 0x3c100, 0x3c144,
3859 0x3c190, 0x3c1d0,
3860 0x3c200, 0x3c318,
3861 0x3c400, 0x3c52c,
3862 0x3c540, 0x3c61c,
3863 0x3c800, 0x3c834,
3864 0x3c8c0, 0x3c908,
3865 0x3c910, 0x3c9ac,
3866 0x3ca00, 0x3ca2c,
3867 0x3ca44, 0x3ca50,
3868 0x3ca74, 0x3cc24,
3869 0x3cd00, 0x3cd00,
3870 0x3cd08, 0x3cd14,
3871 0x3cd1c, 0x3cd20,
3872 0x3cd3c, 0x3cd50,
3873 0x3d200, 0x3d20c,
3874 0x3d220, 0x3d220,
3875 0x3d240, 0x3d240,
3876 0x3d600, 0x3d60c,
3877 0x3da00, 0x3da1c,
3878 0x3de00, 0x3de20,
3879 0x3de38, 0x3de3c,
3880 0x3de80, 0x3de80,
3881 0x3de88, 0x3dea8,
3882 0x3deb0, 0x3deb4,
3883 0x3dec8, 0x3ded4,
3884 0x3dfb8, 0x3e004,
3885 0x3e200, 0x3e200,
3886 0x3e208, 0x3e240,
3887 0x3e248, 0x3e280,
3888 0x3e288, 0x3e2c0,
3889 0x3e2c8, 0x3e2fc,
3890 0x3e600, 0x3e630,
3891 0x3ea00, 0x3eabc,
3892 0x3eb00, 0x3eb70,
3893 0x3f000, 0x3f048,
3894 0x3f060, 0x3f09c,
3895 0x3f0f0, 0x3f148,
3896 0x3f160, 0x3f19c,
3897 0x3f1f0, 0x3f2e4,
3898 0x3f2f8, 0x3f3e4,
3899 0x3f3f8, 0x3f448,
3900 0x3f460, 0x3f49c,
3901 0x3f4f0, 0x3f548,
3902 0x3f560, 0x3f59c,
3903 0x3f5f0, 0x3f6e4,
3904 0x3f6f8, 0x3f7e4,
3905 0x3f7f8, 0x3f7fc,
3906 0x3f814, 0x3f814,
3907 0x3f82c, 0x3f82c,
3908 0x3f880, 0x3f88c,
3909 0x3f8e8, 0x3f8ec,
3910 0x3f900, 0x3f948,
3911 0x3f960, 0x3f99c,
3912 0x3f9f0, 0x3fae4,
3913 0x3faf8, 0x3fb10,
3914 0x3fb28, 0x3fb28,
3915 0x3fb3c, 0x3fb50,
3916 0x3fbf0, 0x3fc10,
3917 0x3fc28, 0x3fc28,
3918 0x3fc3c, 0x3fc50,
3919 0x3fcf0, 0x3fcfc,
3920 0x40000, 0x4000c,
3921 0x40040, 0x40068,
3922 0x4007c, 0x40144,
3923 0x40180, 0x4018c,
3924 0x40200, 0x40298,
3925 0x402ac, 0x4033c,
3926 0x403f8, 0x403fc,
3927 0x41304, 0x413c4,
3928 0x41400, 0x4141c,
3929 0x41480, 0x414d0,
3930 0x44000, 0x44078,
3931 0x440c0, 0x44278,
3932 0x442c0, 0x44478,
3933 0x444c0, 0x44678,
3934 0x446c0, 0x44878,
3935 0x448c0, 0x449fc,
3936 0x45000, 0x45068,
3937 0x45080, 0x45084,
3938 0x450a0, 0x450b0,
3939 0x45200, 0x45268,
3940 0x45280, 0x45284,
3941 0x452a0, 0x452b0,
3942 0x460c0, 0x460e4,
3943 0x47000, 0x4708c,
3944 0x47200, 0x47250,
3945 0x47400, 0x47420,
3946 0x47600, 0x47618,
3947 0x47800, 0x47814,
3948 0x48000, 0x4800c,
3949 0x48040, 0x48068,
3950 0x4807c, 0x48144,
3951 0x48180, 0x4818c,
3952 0x48200, 0x48298,
3953 0x482ac, 0x4833c,
3954 0x483f8, 0x483fc,
3955 0x49304, 0x493c4,
3956 0x49400, 0x4941c,
3957 0x49480, 0x494d0,
3958 0x4c000, 0x4c078,
3959 0x4c0c0, 0x4c278,
3960 0x4c2c0, 0x4c478,
3961 0x4c4c0, 0x4c678,
3962 0x4c6c0, 0x4c878,
3963 0x4c8c0, 0x4c9fc,
3964 0x4d000, 0x4d068,
3965 0x4d080, 0x4d084,
3966 0x4d0a0, 0x4d0b0,
3967 0x4d200, 0x4d268,
3968 0x4d280, 0x4d284,
3969 0x4d2a0, 0x4d2b0,
3970 0x4e0c0, 0x4e0e4,
3971 0x4f000, 0x4f08c,
3972 0x4f200, 0x4f250,
3973 0x4f400, 0x4f420,
3974 0x4f600, 0x4f618,
3975 0x4f800, 0x4f814,
3976 0x50000, 0x500cc,
3977 0x50400, 0x50400,
3978 0x50800, 0x508cc,
3979 0x50c00, 0x50c00,
3980 0x51000, 0x5101c,
3981 0x51300, 0x51308,
3982 };
3983
3984 if (is_t4(sc)) {
3985 reg_ranges = &t4_reg_ranges[0];
3986 n = nitems(t4_reg_ranges);
3987 } else {
3988 reg_ranges = &t5_reg_ranges[0];
3989 n = nitems(t5_reg_ranges);
3990 }
3991
3992 regs->version = chip_id(sc) | chip_rev(sc) << 10;
3993 for (i = 0; i < n; i += 2)
3994 reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
3995}
3996
3997static void
3998cxgbe_tick(void *arg)
3999{
4000 struct port_info *pi = arg;
4001 struct ifnet *ifp = pi->ifp;
4002 struct sge_txq *txq;
4003 int i, drops;
4004 struct port_stats *s = &pi->stats;
4005
4006 PORT_LOCK(pi);
4007 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
4008 PORT_UNLOCK(pi);
4009 return; /* without scheduling another callout */
4010 }
4011
4012 t4_get_port_stats(pi->adapter, pi->tx_chan, s);
4013
4014 ifp->if_opackets = s->tx_frames - s->tx_pause;
4015 ifp->if_ipackets = s->rx_frames - s->rx_pause;
4016 ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
4017 ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
4018 ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
4019 ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
4020 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
4021 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
4022 s->rx_trunc3;
4023
4024 drops = s->tx_drop;
4025 for_each_txq(pi, i, txq)
4026 drops += txq->br->br_drops;
4027 ifp->if_snd.ifq_drops = drops;
4028
4029 ifp->if_oerrors = s->tx_error_frames;
4030 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
4031 s->rx_fcs_err + s->rx_len_err;
4032
4033 callout_schedule(&pi->tick, hz);
4034 PORT_UNLOCK(pi);
4035}
4036
4037static void
4038cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
4039{
4040 struct ifnet *vlan;
4041
4042 if (arg != ifp || ifp->if_type != IFT_ETHER)
4043 return;
4044
4045 vlan = VLAN_DEVAT(ifp, vid);
4046 VLAN_SETCOOKIE(vlan, ifp);
4047}
4048
4049static int
4050cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
4051{
4052
4053#ifdef INVARIANTS
4054 panic("%s: opcode 0x%02x on iq %p with payload %p",
4055 __func__, rss->opcode, iq, m);
4056#else
4057 log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
4058 __func__, rss->opcode, iq, m);
4059 m_freem(m);
4060#endif
4061 return (EDOOFUS);
4062}
4063
4064int
4065t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
4066{
4067 uintptr_t *loc, new;
4068
4069 if (opcode >= nitems(sc->cpl_handler))
4070 return (EINVAL);
4071
4072 new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
4073 loc = (uintptr_t *) &sc->cpl_handler[opcode];
4074 atomic_store_rel_ptr(loc, new);
4075
4076 return (0);
4077}
4078
4079static int
4080an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
4081{
4082
4083#ifdef INVARIANTS
4084 panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
4085#else
4086 log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
4087 __func__, iq, ctrl);
4088#endif
4089 return (EDOOFUS);
4090}
4091
4092int
4093t4_register_an_handler(struct adapter *sc, an_handler_t h)
4094{
4095 uintptr_t *loc, new;
4096
4097 new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
4098 loc = (uintptr_t *) &sc->an_handler;
4099 atomic_store_rel_ptr(loc, new);
4100
4101 return (0);
4102}
4103
4104static int
4105fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
4106{
4107 const struct cpl_fw6_msg *cpl =
4108 __containerof(rpl, struct cpl_fw6_msg, data[0]);
4109
4110#ifdef INVARIANTS
4111 panic("%s: fw_msg type %d", __func__, cpl->type);
4112#else
4113 log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
4114#endif
4115 return (EDOOFUS);
4116}
4117
4118int
4119t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
4120{
4121 uintptr_t *loc, new;
4122
4123 if (type >= nitems(sc->fw_msg_handler))
4124 return (EINVAL);
4125
4126 /*
4127 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
4128 * handler dispatch table. Reject any attempt to install a handler for
4129 * this subtype.
4130 */
4131 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
4132 return (EINVAL);
4133
4134 new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
4135 loc = (uintptr_t *) &sc->fw_msg_handler[type];
4136 atomic_store_rel_ptr(loc, new);
4137
4138 return (0);
4139}
4140
4141static int
4142t4_sysctls(struct adapter *sc)
4143{
4144 struct sysctl_ctx_list *ctx;
4145 struct sysctl_oid *oid;
4146 struct sysctl_oid_list *children, *c0;
4147 static char *caps[] = {
4148 "\20\1PPP\2QFC\3DCBX", /* caps[0] linkcaps */
4149 "\20\1NIC\2VM\3IDS\4UM\5UM_ISGL", /* caps[1] niccaps */
4150 "\20\1TOE", /* caps[2] toecaps */
4151 "\20\1RDDP\2RDMAC", /* caps[3] rdmacaps */
4152 "\20\1INITIATOR_PDU\2TARGET_PDU" /* caps[4] iscsicaps */
4153 "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
4154 "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
4155 "\20\1INITIATOR\2TARGET\3CTRL_OFLD" /* caps[5] fcoecaps */
4156 };
4157 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
4158
4159 ctx = device_get_sysctl_ctx(sc->dev);
4160
4161 /*
4162 * dev.t4nex.X.
4163 */
4164 oid = device_get_sysctl_tree(sc->dev);
4165 c0 = children = SYSCTL_CHILDREN(oid);
4166
4167 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
4168 sc->params.nports, "# of ports");
4169
4170 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
4171 NULL, chip_rev(sc), "chip hardware revision");
4172
4173 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
4174 CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
4175
4176 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
4177 CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
4178
4179 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
4180 sc->cfcsum, "config file checksum");
4181
4182 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
4183 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
4184 sysctl_bitfield, "A", "available doorbells");
4185
4186 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
4187 CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
4188 sysctl_bitfield, "A", "available link capabilities");
4189
4190 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
4191 CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
4192 sysctl_bitfield, "A", "available NIC capabilities");
4193
4194 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
4195 CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
4196 sysctl_bitfield, "A", "available TCP offload capabilities");
4197
4198 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
4199 CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
4200 sysctl_bitfield, "A", "available RDMA capabilities");
4201
4202 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
4203 CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
4204 sysctl_bitfield, "A", "available iSCSI capabilities");
4205
4206 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
4207 CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
4208 sysctl_bitfield, "A", "available FCoE capabilities");
4209
4210 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
4211 sc->params.vpd.cclk, "core clock frequency (in KHz)");
4212
4213 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
4214 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
4215 sizeof(sc->sge.timer_val), sysctl_int_array, "A",
4216 "interrupt holdoff timer values (us)");
4217
4218 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
4219 CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
4220 sizeof(sc->sge.counter_val), sysctl_int_array, "A",
4221 "interrupt holdoff packet counter values");
4222
4223 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
4224 NULL, sc->tids.nftids, "number of filters");
4225
4226 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
4227 CTLFLAG_RD, sc, 0, sysctl_temperature, "A",
4228 "chip temperature (in Celsius)");
4229
4230 t4_sge_sysctls(sc, ctx, children);
4231
4232#ifdef SBUF_DRAIN
4233 /*
4234 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload.
4235 */
4236 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc",
4237 CTLFLAG_RD | CTLFLAG_SKIP, NULL,
4238 "logs and miscellaneous information");
4239 children = SYSCTL_CHILDREN(oid);
4240
4241 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl",
4242 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4243 sysctl_cctrl, "A", "congestion control");
4244
4245 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
4246 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4247 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
4248
4249 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
4250 CTLTYPE_STRING | CTLFLAG_RD, sc, 1,
4251 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
4252
4253 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
4254 CTLTYPE_STRING | CTLFLAG_RD, sc, 2,
4255 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
4256
4257 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
4258 CTLTYPE_STRING | CTLFLAG_RD, sc, 3,
4259 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
4260
4261 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
4262 CTLTYPE_STRING | CTLFLAG_RD, sc, 4,
4263 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
4264
4265 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
4266 CTLTYPE_STRING | CTLFLAG_RD, sc, 5,
4267 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
4268
4269 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
4270 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4271 sysctl_cim_la, "A", "CIM logic analyzer");
4272
4273 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
4274 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4275 sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
4276
4277 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
4278 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
4279 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
4280
4281 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
4282 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ,
4283 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
4284
4285 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
4286 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ,
4287 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
4288
4289 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
4290 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ,
4291 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
4292
4293 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
4294 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ,
4295 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
4296
4297 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
4298 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
4299 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
4300
4301 if (is_t5(sc)) {
4302 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
4303 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
4304 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
4305
4306 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
4307 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
4308 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
4309 }
4310
4311 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
4312 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4313 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
4314
4315 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
4316 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4317 sysctl_cim_qcfg, "A", "CIM queue configuration");
4318
4319 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
4320 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4321 sysctl_cpl_stats, "A", "CPL statistics");
4322
4323 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
4324 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4325 sysctl_ddp_stats, "A", "DDP statistics");
4326
4327 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
4328 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4329 sysctl_devlog, "A", "firmware's device log");
4330
4331 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
4332 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4333 sysctl_fcoe_stats, "A", "FCoE statistics");
4334
4335 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched",
4336 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4337 sysctl_hw_sched, "A", "hardware scheduler ");
4338
4339 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t",
4340 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4341 sysctl_l2t, "A", "hardware L2 table");
4342
4343 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats",
4344 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4345 sysctl_lb_stats, "A", "loopback statistics");
4346
4347 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo",
4348 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4349 sysctl_meminfo, "A", "memory regions");
4350
4351 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
4352 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4353 sysctl_mps_tcam, "A", "MPS TCAM entries");
4354
4355 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
4356 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4357 sysctl_path_mtus, "A", "path MTUs");
4358
4359 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats",
4360 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4361 sysctl_pm_stats, "A", "PM statistics");
4362
4363 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats",
4364 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4365 sysctl_rdma_stats, "A", "RDMA statistics");
4366
4367 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats",
4368 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4369 sysctl_tcp_stats, "A", "TCP statistics");
4370
4371 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids",
4372 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4373 sysctl_tids, "A", "TID information");
4374
4375 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats",
4376 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4377 sysctl_tp_err_stats, "A", "TP error statistics");
4378
4379 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
4380 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4381 sysctl_tp_la, "A", "TP logic analyzer");
4382
4383 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
4384 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4385 sysctl_tx_rate, "A", "Tx rate");
4386
4387 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
4388 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4389 sysctl_ulprx_la, "A", "ULPRX logic analyzer");
4390
4391 if (is_t5(sc)) {
4392 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
4393 CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
4394 sysctl_wcwr_stats, "A", "write combined work requests");
4395 }
4396#endif
4397
4398#ifdef TCP_OFFLOAD
4399 if (is_offload(sc)) {
4400 /*
4401 * dev.t4nex.X.toe.
4402 */
4403 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD,
4404 NULL, "TOE parameters");
4405 children = SYSCTL_CHILDREN(oid);
4406
4407 sc->tt.sndbuf = 256 * 1024;
4408 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW,
4409 &sc->tt.sndbuf, 0, "max hardware send buffer size");
4410
4411 sc->tt.ddp = 0;
4412 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW,
4413 &sc->tt.ddp, 0, "DDP allowed");
4414
4415 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5));
4416 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW,
4417 &sc->tt.indsz, 0, "DDP max indicate size allowed");
4418
4419 sc->tt.ddp_thres =
4420 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
4421 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
4422 &sc->tt.ddp_thres, 0, "DDP threshold");
4423
4424 sc->tt.rx_coalesce = 1;
4425 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
4426 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
4427 }
4428#endif
4429
4430
4431 return (0);
4432}
4433
4434static int
4435cxgbe_sysctls(struct port_info *pi)
4436{
4437 struct sysctl_ctx_list *ctx;
4438 struct sysctl_oid *oid;
4439 struct sysctl_oid_list *children;
4440
4441 ctx = device_get_sysctl_ctx(pi->dev);
4442
4443 /*
4444 * dev.cxgbe.X.
4445 */
4446 oid = device_get_sysctl_tree(pi->dev);
4447 children = SYSCTL_CHILDREN(oid);
4448
4449 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
4450 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
4451 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
4452 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
4453 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
4454 "PHY temperature (in Celsius)");
4455 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
4456 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
4457 "PHY firmware version");
4458 }
4459 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
4460 &pi->nrxq, 0, "# of rx queues");
4461 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
4462 &pi->ntxq, 0, "# of tx queues");
4463 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
4464 &pi->first_rxq, 0, "index of first rx queue");
4465 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
4466 &pi->first_txq, 0, "index of first tx queue");
4467
4468#ifdef TCP_OFFLOAD
4469 if (is_offload(pi->adapter)) {
4470 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
4471 &pi->nofldrxq, 0,
4472 "# of rx queues for offloaded TCP connections");
4473 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
4474 &pi->nofldtxq, 0,
4475 "# of tx queues for offloaded TCP connections");
4476 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
4477 CTLFLAG_RD, &pi->first_ofld_rxq, 0,
4478 "index of first TOE rx queue");
4479 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
4480 CTLFLAG_RD, &pi->first_ofld_txq, 0,
4481 "index of first TOE tx queue");
4482 }
4483#endif
4484
4485 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
4486 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
4487 "holdoff timer index");
4488 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
4489 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
4490 "holdoff packet counter index");
4491
4492 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
4493 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
4494 "rx queue size");
4495 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
4496 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
4497 "tx queue size");
4498
4499 /*
4500 * dev.cxgbe.X.stats.
4501 */
4502 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
4503 NULL, "port statistics");
4504 children = SYSCTL_CHILDREN(oid);
4505
4506#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
4507 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
4508 CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
4509 sysctl_handle_t4_reg64, "QU", desc)
4510
4511 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
4512 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L));
4513 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames",
4514 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L));
4515 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames",
4516 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L));
4517 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames",
4518 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L));
4519 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames",
4520 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L));
4521 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames",
4522 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L));
4523 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64",
4524 "# of tx frames in this range",
4525 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L));
4526 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127",
4527 "# of tx frames in this range",
4528 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L));
4529 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255",
4530 "# of tx frames in this range",
4531 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L));
4532 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511",
4533 "# of tx frames in this range",
4534 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L));
4535 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023",
4536 "# of tx frames in this range",
4537 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L));
4538 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518",
4539 "# of tx frames in this range",
4540 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L));
4541 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max",
4542 "# of tx frames in this range",
4543 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L));
4544 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames",
4545 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L));
4546 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted",
4547 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L));
4548 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted",
4549 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L));
4550 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted",
4551 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L));
4552 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted",
4553 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L));
4554 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted",
4555 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L));
4556 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted",
4557 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L));
4558 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted",
4559 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L));
4560 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted",
4561 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L));
4562 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted",
4563 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L));
4564
4565 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames",
4566 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L));
4567 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames",
4568 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L));
4569 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames",
4570 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L));
4571 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames",
4572 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L));
4573 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames",
4574 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L));
4575 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU",
4576 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L));
4577 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames",
4578 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L));
4579 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err",
4580 "# of frames received with bad FCS",
4581 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L));
4582 SYSCTL_ADD_T4_REG64(pi, "rx_len_err",
4583 "# of frames received with length error",
4584 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L));
4585 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors",
4586 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L));
4587 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received",
4588 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L));
4589 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64",
4590 "# of rx frames in this range",
4591 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L));
4592 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127",
4593 "# of rx frames in this range",
4594 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L));
4595 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255",
4596 "# of rx frames in this range",
4597 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L));
4598 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511",
4599 "# of rx frames in this range",
4600 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L));
4601 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023",
4602 "# of rx frames in this range",
4603 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L));
4604 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518",
4605 "# of rx frames in this range",
4606 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L));
4607 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max",
4608 "# of rx frames in this range",
4609 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L));
4610 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received",
4611 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L));
4612 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received",
4613 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L));
4614 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received",
4615 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L));
4616 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received",
4617 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L));
4618 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received",
4619 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L));
4620 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received",
4621 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L));
4622 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received",
4623 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L));
4624 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received",
4625 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L));
4626 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received",
4627 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L));
4628
4629#undef SYSCTL_ADD_T4_REG64
4630
4631#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \
4632 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
4633 &pi->stats.name, desc)
4634
4635 /* We get these from port_stats and they may be stale by upto 1s */
4636 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0,
4637 "# drops due to buffer-group 0 overflows");
4638 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1,
4639 "# drops due to buffer-group 1 overflows");
4640 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2,
4641 "# drops due to buffer-group 2 overflows");
4642 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3,
4643 "# drops due to buffer-group 3 overflows");
4644 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0,
4645 "# of buffer-group 0 truncated packets");
4646 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1,
4647 "# of buffer-group 1 truncated packets");
4648 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2,
4649 "# of buffer-group 2 truncated packets");
4650 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3,
4651 "# of buffer-group 3 truncated packets");
4652
4653#undef SYSCTL_ADD_T4_PORTSTAT
4654
4655 return (0);
4656}
4657
4658static int
4659sysctl_int_array(SYSCTL_HANDLER_ARGS)
4660{
4661 int rc, *i;
4662 struct sbuf sb;
4663
4664 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
4665 for (i = arg1; arg2; arg2 -= sizeof(int), i++)
4666 sbuf_printf(&sb, "%d ", *i);
4667 sbuf_trim(&sb);
4668 sbuf_finish(&sb);
4669 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
4670 sbuf_delete(&sb);
4671 return (rc);
4672}
4673
4674static int
4675sysctl_bitfield(SYSCTL_HANDLER_ARGS)
4676{
4677 int rc;
4678 struct sbuf *sb;
4679
4680 rc = sysctl_wire_old_buffer(req, 0);
4681 if (rc != 0)
4682 return(rc);
4683
4684 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4685 if (sb == NULL)
4686 return (ENOMEM);
4687
4688 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1);
4689 rc = sbuf_finish(sb);
4690 sbuf_delete(sb);
4691
4692 return (rc);
4693}
4694
4695static int
4696sysctl_btphy(SYSCTL_HANDLER_ARGS)
4697{
4698 struct port_info *pi = arg1;
4699 int op = arg2;
4700 struct adapter *sc = pi->adapter;
4701 u_int v;
4702 int rc;
4703
4704 rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4btt");
4705 if (rc)
4706 return (rc);
4707 /* XXX: magic numbers */
4708 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
4709 &v);
4710 end_synchronized_op(sc, 0);
4711 if (rc)
4712 return (rc);
4713 if (op == 0)
4714 v /= 256;
4715
4716 rc = sysctl_handle_int(oidp, &v, 0, req);
4717 return (rc);
4718}
4719
4720static int
4721sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
4722{
4723 struct port_info *pi = arg1;
4724 struct adapter *sc = pi->adapter;
4725 int idx, rc, i;
4726 struct sge_rxq *rxq;
4727#ifdef TCP_OFFLOAD
4728 struct sge_ofld_rxq *ofld_rxq;
4729#endif
4730 uint8_t v;
4731
4732 idx = pi->tmr_idx;
4733
4734 rc = sysctl_handle_int(oidp, &idx, 0, req);
4735 if (rc != 0 || req->newptr == NULL)
4736 return (rc);
4737
4738 if (idx < 0 || idx >= SGE_NTIMERS)
4739 return (EINVAL);
4740
4741 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4742 "t4tmr");
4743 if (rc)
4744 return (rc);
4745
4746 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
4747 for_each_rxq(pi, i, rxq) {
4748#ifdef atomic_store_rel_8
4749 atomic_store_rel_8(&rxq->iq.intr_params, v);
4750#else
4751 rxq->iq.intr_params = v;
4752#endif
4753 }
4754#ifdef TCP_OFFLOAD
4755 for_each_ofld_rxq(pi, i, ofld_rxq) {
4756#ifdef atomic_store_rel_8
4757 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
4758#else
4759 ofld_rxq->iq.intr_params = v;
4760#endif
4761 }
4762#endif
4763 pi->tmr_idx = idx;
4764
4765 end_synchronized_op(sc, LOCK_HELD);
4766 return (0);
4767}
4768
4769static int
4770sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
4771{
4772 struct port_info *pi = arg1;
4773 struct adapter *sc = pi->adapter;
4774 int idx, rc;
4775
4776 idx = pi->pktc_idx;
4777
4778 rc = sysctl_handle_int(oidp, &idx, 0, req);
4779 if (rc != 0 || req->newptr == NULL)
4780 return (rc);
4781
4782 if (idx < -1 || idx >= SGE_NCOUNTERS)
4783 return (EINVAL);
4784
4785 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4786 "t4pktc");
4787 if (rc)
4788 return (rc);
4789
4790 if (pi->flags & PORT_INIT_DONE)
4791 rc = EBUSY; /* cannot be changed once the queues are created */
4792 else
4793 pi->pktc_idx = idx;
4794
4795 end_synchronized_op(sc, LOCK_HELD);
4796 return (rc);
4797}
4798
4799static int
4800sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
4801{
4802 struct port_info *pi = arg1;
4803 struct adapter *sc = pi->adapter;
4804 int qsize, rc;
4805
4806 qsize = pi->qsize_rxq;
4807
4808 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4809 if (rc != 0 || req->newptr == NULL)
4810 return (rc);
4811
4812 if (qsize < 128 || (qsize & 7))
4813 return (EINVAL);
4814
4815 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4816 "t4rxqs");
4817 if (rc)
4818 return (rc);
4819
4820 if (pi->flags & PORT_INIT_DONE)
4821 rc = EBUSY; /* cannot be changed once the queues are created */
4822 else
4823 pi->qsize_rxq = qsize;
4824
4825 end_synchronized_op(sc, LOCK_HELD);
4826 return (rc);
4827}
4828
4829static int
4830sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
4831{
4832 struct port_info *pi = arg1;
4833 struct adapter *sc = pi->adapter;
4834 int qsize, rc;
4835
4836 qsize = pi->qsize_txq;
4837
4838 rc = sysctl_handle_int(oidp, &qsize, 0, req);
4839 if (rc != 0 || req->newptr == NULL)
4840 return (rc);
4841
4842 /* bufring size must be powerof2 */
4843 if (qsize < 128 || !powerof2(qsize))
4844 return (EINVAL);
4845
4846 rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
4847 "t4txqs");
4848 if (rc)
4849 return (rc);
4850
4851 if (pi->flags & PORT_INIT_DONE)
4852 rc = EBUSY; /* cannot be changed once the queues are created */
4853 else
4854 pi->qsize_txq = qsize;
4855
4856 end_synchronized_op(sc, LOCK_HELD);
4857 return (rc);
4858}
4859
4860static int
4861sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
4862{
4863 struct adapter *sc = arg1;
4864 int reg = arg2;
4865 uint64_t val;
4866
4867 val = t4_read_reg64(sc, reg);
4868
4869 return (sysctl_handle_64(oidp, &val, 0, req));
4870}
4871
4872static int
4873sysctl_temperature(SYSCTL_HANDLER_ARGS)
4874{
4875 struct adapter *sc = arg1;
4876 int rc, t;
4877 uint32_t param, val;
4878
4879 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
4880 if (rc)
4881 return (rc);
4882 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4883 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
4884 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
4885 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
4886 end_synchronized_op(sc, 0);
4887 if (rc)
4888 return (rc);
4889
4890 /* unknown is returned as 0 but we display -1 in that case */
4891 t = val == 0 ? -1 : val;
4892
4893 rc = sysctl_handle_int(oidp, &t, 0, req);
4894 return (rc);
4895}
4896
4897#ifdef SBUF_DRAIN
4898static int
4899sysctl_cctrl(SYSCTL_HANDLER_ARGS)
4900{
4901 struct adapter *sc = arg1;
4902 struct sbuf *sb;
4903 int rc, i;
4904 uint16_t incr[NMTUS][NCCTRL_WIN];
4905 static const char *dec_fac[] = {
4906 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875",
4907 "0.9375"
4908 };
4909
4910 rc = sysctl_wire_old_buffer(req, 0);
4911 if (rc != 0)
4912 return (rc);
4913
4914 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
4915 if (sb == NULL)
4916 return (ENOMEM);
4917
4918 t4_read_cong_tbl(sc, incr);
4919
4920 for (i = 0; i < NCCTRL_WIN; ++i) {
4921 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i,
4922 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i],
4923 incr[5][i], incr[6][i], incr[7][i]);
4924 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n",
4925 incr[8][i], incr[9][i], incr[10][i], incr[11][i],
4926 incr[12][i], incr[13][i], incr[14][i], incr[15][i],
4927 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]);
4928 }
4929
4930 rc = sbuf_finish(sb);
4931 sbuf_delete(sb);
4932
4933 return (rc);
4934}
4935
4936static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
4937 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
4938 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
4939 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
4940};
4941
4942static int
4943sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
4944{
4945 struct adapter *sc = arg1;
4946 struct sbuf *sb;
4947 int rc, i, n, qid = arg2;
4948 uint32_t *buf, *p;
4949 char *qtype;
4950 u_int cim_num_obq = is_t4(sc) ? CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
4951
4952 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
4953 ("%s: bad qid %d\n", __func__, qid));
4954
4955 if (qid < CIM_NUM_IBQ) {
4956 /* inbound queue */
4957 qtype = "IBQ";
4958 n = 4 * CIM_IBQ_SIZE;
4959 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4960 rc = t4_read_cim_ibq(sc, qid, buf, n);
4961 } else {
4962 /* outbound queue */
4963 qtype = "OBQ";
4964 qid -= CIM_NUM_IBQ;
4965 n = 4 * cim_num_obq * CIM_OBQ_SIZE;
4966 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
4967 rc = t4_read_cim_obq(sc, qid, buf, n);
4968 }
4969
4970 if (rc < 0) {
4971 rc = -rc;
4972 goto done;
4973 }
4974 n = rc * sizeof(uint32_t); /* rc has # of words actually read */
4975
4976 rc = sysctl_wire_old_buffer(req, 0);
4977 if (rc != 0)
4978 goto done;
4979
4980 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
4981 if (sb == NULL) {
4982 rc = ENOMEM;
4983 goto done;
4984 }
4985
4986 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
4987 for (i = 0, p = buf; i < n; i += 16, p += 4)
4988 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
4989 p[2], p[3]);
4990
4991 rc = sbuf_finish(sb);
4992 sbuf_delete(sb);
4993done:
4994 free(buf, M_CXGBE);
4995 return (rc);
4996}
4997
4998static int
4999sysctl_cim_la(SYSCTL_HANDLER_ARGS)
5000{
5001 struct adapter *sc = arg1;
5002 u_int cfg;
5003 struct sbuf *sb;
5004 uint32_t *buf, *p;
5005 int rc;
5006
5007 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
5008 if (rc != 0)
5009 return (rc);
5010
5011 rc = sysctl_wire_old_buffer(req, 0);
5012 if (rc != 0)
5013 return (rc);
5014
5015 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5016 if (sb == NULL)
5017 return (ENOMEM);
5018
5019 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
5020 M_ZERO | M_WAITOK);
5021
5022 rc = -t4_cim_read_la(sc, buf, NULL);
5023 if (rc != 0)
5024 goto done;
5025
5026 sbuf_printf(sb, "Status Data PC%s",
5027 cfg & F_UPDBGLACAPTPCONLY ? "" :
5028 " LS0Stat LS0Addr LS0Data");
5029
5030 KASSERT((sc->params.cim_la_size & 7) == 0,
5031 ("%s: p will walk off the end of buf", __func__));
5032
5033 for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
5034 if (cfg & F_UPDBGLACAPTPCONLY) {
5035 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff,
5036 p[6], p[7]);
5037 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x",
5038 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8,
5039 p[4] & 0xff, p[5] >> 8);
5040 sbuf_printf(sb, "\n %02x %x%07x %x%07x",
5041 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5042 p[1] & 0xf, p[2] >> 4);
5043 } else {
5044 sbuf_printf(sb,
5045 "\n %02x %x%07x %x%07x %08x %08x "
5046 "%08x%08x%08x%08x",
5047 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4,
5048 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5],
5049 p[6], p[7]);
5050 }
5051 }
5052
5053 rc = sbuf_finish(sb);
5054 sbuf_delete(sb);
5055done:
5056 free(buf, M_CXGBE);
5057 return (rc);
5058}
5059
5060static int
5061sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
5062{
5063 struct adapter *sc = arg1;
5064 u_int i;
5065 struct sbuf *sb;
5066 uint32_t *buf, *p;
5067 int rc;
5068
5069 rc = sysctl_wire_old_buffer(req, 0);
5070 if (rc != 0)
5071 return (rc);
5072
5073 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5074 if (sb == NULL)
5075 return (ENOMEM);
5076
5077 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
5078 M_ZERO | M_WAITOK);
5079
5080 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
5081 p = buf;
5082
5083 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5084 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
5085 p[1], p[0]);
5086 }
5087
5088 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD");
5089 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
5090 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u",
5091 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
5092 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
5093 (p[1] >> 2) | ((p[2] & 3) << 30),
5094 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
5095 p[0] & 1);
5096 }
5097
5098 rc = sbuf_finish(sb);
5099 sbuf_delete(sb);
5100 free(buf, M_CXGBE);
5101 return (rc);
5102}
5103
5104static int
5105sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
5106{
5107 struct adapter *sc = arg1;
5108 u_int i;
5109 struct sbuf *sb;
5110 uint32_t *buf, *p;
5111 int rc;
5112
5113 rc = sysctl_wire_old_buffer(req, 0);
5114 if (rc != 0)
5115 return (rc);
5116
5117 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5118 if (sb == NULL)
5119 return (ENOMEM);
5120
5121 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
5122 M_ZERO | M_WAITOK);
5123
5124 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
5125 p = buf;
5126
5127 sbuf_printf(sb, "Cntl ID DataBE Addr Data");
5128 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5129 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x",
5130 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
5131 p[4], p[3], p[2], p[1], p[0]);
5132 }
5133
5134 sbuf_printf(sb, "\n\nCntl ID Data");
5135 for (i = 0; i < CIM_MALA_SIZE; i++, p += 6) {
5136 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x",
5137 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
5138 }
5139
5140 rc = sbuf_finish(sb);
5141 sbuf_delete(sb);
5142 free(buf, M_CXGBE);
5143 return (rc);
5144}
5145
5146static int
5147sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
5148{
5149 struct adapter *sc = arg1;
5150 struct sbuf *sb;
5151 int rc, i;
5152 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5153 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
5154 uint16_t thres[CIM_NUM_IBQ];
5155 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
5156 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
5157 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
5158
5159 if (is_t4(sc)) {
5160 cim_num_obq = CIM_NUM_OBQ;
5161 ibq_rdaddr = A_UP_IBQ_0_RDADDR;
5162 obq_rdaddr = A_UP_OBQ_0_REALADDR;
5163 } else {
5164 cim_num_obq = CIM_NUM_OBQ_T5;
5165 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
5166 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
5167 }
5168 nq = CIM_NUM_IBQ + cim_num_obq;
5169
5170 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
5171 if (rc == 0)
5172 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
5173 if (rc != 0)
5174 return (rc);
5175
5176 t4_read_cimq_cfg(sc, base, size, thres);
5177
5178 rc = sysctl_wire_old_buffer(req, 0);
5179 if (rc != 0)
5180 return (rc);
5181
5182 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
5183 if (sb == NULL)
5184 return (ENOMEM);
5185
5186 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
5187
5188 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
5189 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
5190 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
5191 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5192 G_QUEREMFLITS(p[2]) * 16);
5193 for ( ; i < nq; i++, p += 4, wr += 2)
5194 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i],
5195 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
5196 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
5197 G_QUEREMFLITS(p[2]) * 16);
5198
5199 rc = sbuf_finish(sb);
5200 sbuf_delete(sb);
5201
5202 return (rc);
5203}
5204
5205static int
5206sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
5207{
5208 struct adapter *sc = arg1;
5209 struct sbuf *sb;
5210 int rc;
5211 struct tp_cpl_stats stats;
5212
5213 rc = sysctl_wire_old_buffer(req, 0);
5214 if (rc != 0)
5215 return (rc);
5216
5217 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5218 if (sb == NULL)
5219 return (ENOMEM);
5220
5221 t4_tp_get_cpl_stats(sc, &stats);
5222
5223 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
5224 "channel 3\n");
5225 sbuf_printf(sb, "CPL requests: %10u %10u %10u %10u\n",
5226 stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
5227 sbuf_printf(sb, "CPL responses: %10u %10u %10u %10u",
5228 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
5229
5230 rc = sbuf_finish(sb);
5231 sbuf_delete(sb);
5232
5233 return (rc);
5234}
5235
5236static int
5237sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
5238{
5239 struct adapter *sc = arg1;
5240 struct sbuf *sb;
5241 int rc;
5242 struct tp_usm_stats stats;
5243
5244 rc = sysctl_wire_old_buffer(req, 0);
5245 if (rc != 0)
5246 return(rc);
5247
5248 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5249 if (sb == NULL)
5250 return (ENOMEM);
5251
5252 t4_get_usm_stats(sc, &stats);
5253
5254 sbuf_printf(sb, "Frames: %u\n", stats.frames);
5255 sbuf_printf(sb, "Octets: %ju\n", stats.octets);
5256 sbuf_printf(sb, "Drops: %u", stats.drops);
5257
5258 rc = sbuf_finish(sb);
5259 sbuf_delete(sb);
5260
5261 return (rc);
5262}
5263
5264const char *devlog_level_strings[] = {
5265 [FW_DEVLOG_LEVEL_EMERG] = "EMERG",
5266 [FW_DEVLOG_LEVEL_CRIT] = "CRIT",
5267 [FW_DEVLOG_LEVEL_ERR] = "ERR",
5268 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE",
5269 [FW_DEVLOG_LEVEL_INFO] = "INFO",
5270 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG"
5271};
5272
5273const char *devlog_facility_strings[] = {
5274 [FW_DEVLOG_FACILITY_CORE] = "CORE",
5275 [FW_DEVLOG_FACILITY_SCHED] = "SCHED",
5276 [FW_DEVLOG_FACILITY_TIMER] = "TIMER",
5277 [FW_DEVLOG_FACILITY_RES] = "RES",
5278 [FW_DEVLOG_FACILITY_HW] = "HW",
5279 [FW_DEVLOG_FACILITY_FLR] = "FLR",
5280 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ",
5281 [FW_DEVLOG_FACILITY_PHY] = "PHY",
5282 [FW_DEVLOG_FACILITY_MAC] = "MAC",
5283 [FW_DEVLOG_FACILITY_PORT] = "PORT",
5284 [FW_DEVLOG_FACILITY_VI] = "VI",
5285 [FW_DEVLOG_FACILITY_FILTER] = "FILTER",
5286 [FW_DEVLOG_FACILITY_ACL] = "ACL",
5287 [FW_DEVLOG_FACILITY_TM] = "TM",
5288 [FW_DEVLOG_FACILITY_QFC] = "QFC",
5289 [FW_DEVLOG_FACILITY_DCB] = "DCB",
5290 [FW_DEVLOG_FACILITY_ETH] = "ETH",
5291 [FW_DEVLOG_FACILITY_OFLD] = "OFLD",
5292 [FW_DEVLOG_FACILITY_RI] = "RI",
5293 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI",
5294 [FW_DEVLOG_FACILITY_FCOE] = "FCOE",
5295 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI",
5296 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE"
5297};
5298
5299static int
5300sysctl_devlog(SYSCTL_HANDLER_ARGS)
5301{
5302 struct adapter *sc = arg1;
5303 struct devlog_params *dparams = &sc->params.devlog;
5304 struct fw_devlog_e *buf, *e;
5305 int i, j, rc, nentries, first = 0;
5306 struct sbuf *sb;
5307 uint64_t ftstamp = UINT64_MAX;
5308
5309 if (dparams->start == 0) {
5310 dparams->memtype = 0;
5311 dparams->start = 0x84000;
5312 dparams->size = 32768;
5313 }
5314
5315 nentries = dparams->size / sizeof(struct fw_devlog_e);
5316
5317 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
5318 if (buf == NULL)
5319 return (ENOMEM);
5320
5321 rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
5322 (void *)buf);
5323 if (rc != 0)
5324 goto done;
5325
5326 for (i = 0; i < nentries; i++) {
5327 e = &buf[i];
5328
5329 if (e->timestamp == 0)
5330 break; /* end */
5331
5332 e->timestamp = be64toh(e->timestamp);
5333 e->seqno = be32toh(e->seqno);
5334 for (j = 0; j < 8; j++)
5335 e->params[j] = be32toh(e->params[j]);
5336
5337 if (e->timestamp < ftstamp) {
5338 ftstamp = e->timestamp;
5339 first = i;
5340 }
5341 }
5342
5343 if (buf[first].timestamp == 0)
5344 goto done; /* nothing in the log */
5345
5346 rc = sysctl_wire_old_buffer(req, 0);
5347 if (rc != 0)
5348 goto done;
5349
5350 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5351 if (sb == NULL) {
5352 rc = ENOMEM;
5353 goto done;
5354 }
5355 sbuf_printf(sb, "%10s %15s %8s %8s %s\n",
5356 "Seq#", "Tstamp", "Level", "Facility", "Message");
5357
5358 i = first;
5359 do {
5360 e = &buf[i];
5361 if (e->timestamp == 0)
5362 break; /* end */
5363
5364 sbuf_printf(sb, "%10d %15ju %8s %8s ",
5365 e->seqno, e->timestamp,
5366 (e->level < nitems(devlog_level_strings) ?
5367 devlog_level_strings[e->level] : "UNKNOWN"),
5368 (e->facility < nitems(devlog_facility_strings) ?
5369 devlog_facility_strings[e->facility] : "UNKNOWN"));
5370 sbuf_printf(sb, e->fmt, e->params[0], e->params[1],
5371 e->params[2], e->params[3], e->params[4],
5372 e->params[5], e->params[6], e->params[7]);
5373
5374 if (++i == nentries)
5375 i = 0;
5376 } while (i != first);
5377
5378 rc = sbuf_finish(sb);
5379 sbuf_delete(sb);
5380done:
5381 free(buf, M_CXGBE);
5382 return (rc);
5383}
5384
5385static int
5386sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
5387{
5388 struct adapter *sc = arg1;
5389 struct sbuf *sb;
5390 int rc;
5391 struct tp_fcoe_stats stats[4];
5392
5393 rc = sysctl_wire_old_buffer(req, 0);
5394 if (rc != 0)
5395 return (rc);
5396
5397 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5398 if (sb == NULL)
5399 return (ENOMEM);
5400
5401 t4_get_fcoe_stats(sc, 0, &stats[0]);
5402 t4_get_fcoe_stats(sc, 1, &stats[1]);
5403 t4_get_fcoe_stats(sc, 2, &stats[2]);
5404 t4_get_fcoe_stats(sc, 3, &stats[3]);
5405
5406 sbuf_printf(sb, " channel 0 channel 1 "
5407 "channel 2 channel 3\n");
5408 sbuf_printf(sb, "octetsDDP: %16ju %16ju %16ju %16ju\n",
5409 stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
5410 stats[3].octetsDDP);
5411 sbuf_printf(sb, "framesDDP: %16u %16u %16u %16u\n", stats[0].framesDDP,
5412 stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
5413 sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
5414 stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
5415 stats[3].framesDrop);
5416
5417 rc = sbuf_finish(sb);
5418 sbuf_delete(sb);
5419
5420 return (rc);
5421}
5422
5423static int
5424sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
5425{
5426 struct adapter *sc = arg1;
5427 struct sbuf *sb;
5428 int rc, i;
5429 unsigned int map, kbps, ipg, mode;
5430 unsigned int pace_tab[NTX_SCHED];
5431
5432 rc = sysctl_wire_old_buffer(req, 0);
5433 if (rc != 0)
5434 return (rc);
5435
5436 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5437 if (sb == NULL)
5438 return (ENOMEM);
5439
5440 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
5441 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
5442 t4_read_pace_tbl(sc, pace_tab);
5443
5444 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
5445 "Class IPG (0.1 ns) Flow IPG (us)");
5446
5447 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) {
5448 t4_get_tx_sched(sc, i, &kbps, &ipg);
5449 sbuf_printf(sb, "\n %u %-5s %u ", i,
5450 (mode & (1 << i)) ? "flow" : "class", map & 3);
5451 if (kbps)
5452 sbuf_printf(sb, "%9u ", kbps);
5453 else
5454 sbuf_printf(sb, " disabled ");
5455
5456 if (ipg)
5457 sbuf_printf(sb, "%13u ", ipg);
5458 else
5459 sbuf_printf(sb, " disabled ");
5460
5461 if (pace_tab[i])
5462 sbuf_printf(sb, "%10u", pace_tab[i]);
5463 else
5464 sbuf_printf(sb, " disabled");
5465 }
5466
5467 rc = sbuf_finish(sb);
5468 sbuf_delete(sb);
5469
5470 return (rc);
5471}
5472
5473static int
5474sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
5475{
5476 struct adapter *sc = arg1;
5477 struct sbuf *sb;
5478 int rc, i, j;
5479 uint64_t *p0, *p1;
5480 struct lb_port_stats s[2];
5481 static const char *stat_name[] = {
5482 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:",
5483 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:",
5484 "Frames128To255:", "Frames256To511:", "Frames512To1023:",
5485 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:",
5486 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:",
5487 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:",
5488 "BG2FramesTrunc:", "BG3FramesTrunc:"
5489 };
5490
5491 rc = sysctl_wire_old_buffer(req, 0);
5492 if (rc != 0)
5493 return (rc);
5494
5495 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5496 if (sb == NULL)
5497 return (ENOMEM);
5498
5499 memset(s, 0, sizeof(s));
5500
5501 for (i = 0; i < 4; i += 2) {
5502 t4_get_lb_stats(sc, i, &s[0]);
5503 t4_get_lb_stats(sc, i + 1, &s[1]);
5504
5505 p0 = &s[0].octets;
5506 p1 = &s[1].octets;
5507 sbuf_printf(sb, "%s Loopback %u"
5508 " Loopback %u", i == 0 ? "" : "\n", i, i + 1);
5509
5510 for (j = 0; j < nitems(stat_name); j++)
5511 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j],
5512 *p0++, *p1++);
5513 }
5514
5515 rc = sbuf_finish(sb);
5516 sbuf_delete(sb);
5517
5518 return (rc);
5519}
5520
5521static int
5522sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
5523{
5524 int rc = 0;
5525 struct port_info *pi = arg1;
5526 struct sbuf *sb;
5527 static const char *linkdnreasons[] = {
5528 "non-specific", "remote fault", "autoneg failed", "reserved3",
5529 "PHY overheated", "unknown", "rx los", "reserved7"
5530 };
5531
5532 rc = sysctl_wire_old_buffer(req, 0);
5533 if (rc != 0)
5534 return(rc);
5535 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
5536 if (sb == NULL)
5537 return (ENOMEM);
5538
5539 if (pi->linkdnrc < 0)
5540 sbuf_printf(sb, "n/a");
5541 else if (pi->linkdnrc < nitems(linkdnreasons))
5542 sbuf_printf(sb, "%s", linkdnreasons[pi->linkdnrc]);
5543 else
5544 sbuf_printf(sb, "%d", pi->linkdnrc);
5545
5546 rc = sbuf_finish(sb);
5547 sbuf_delete(sb);
5548
5549 return (rc);
5550}
5551
5552struct mem_desc {
5553 unsigned int base;
5554 unsigned int limit;
5555 unsigned int idx;
5556};
5557
5558static int
5559mem_desc_cmp(const void *a, const void *b)
5560{
5561 return ((const struct mem_desc *)a)->base -
5562 ((const struct mem_desc *)b)->base;
5563}
5564
5565static void
5566mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
5567 unsigned int to)
5568{
5569 unsigned int size;
5570
5571 size = to - from + 1;
5572 if (size == 0)
5573 return;
5574
5575 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
5576 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
5577}
5578
5579static int
5580sysctl_meminfo(SYSCTL_HANDLER_ARGS)
5581{
5582 struct adapter *sc = arg1;
5583 struct sbuf *sb;
5584 int rc, i, n;
5585 uint32_t lo, hi, used, alloc;
5586 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
5587 static const char *region[] = {
5588 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
5589 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
5590 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
5591 "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
5592 "RQUDP region:", "PBL region:", "TXPBL region:",
5593 "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
5594 "On-chip queues:"
5595 };
5596 struct mem_desc avail[4];
5597 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
5598 struct mem_desc *md = mem;
5599
5600 rc = sysctl_wire_old_buffer(req, 0);
5601 if (rc != 0)
5602 return (rc);
5603
5604 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5605 if (sb == NULL)
5606 return (ENOMEM);
5607
5608 for (i = 0; i < nitems(mem); i++) {
5609 mem[i].limit = 0;
5610 mem[i].idx = i;
5611 }
5612
5613 /* Find and sort the populated memory ranges */
5614 i = 0;
5615 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
5616 if (lo & F_EDRAM0_ENABLE) {
5617 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
5618 avail[i].base = G_EDRAM0_BASE(hi) << 20;
5619 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
5620 avail[i].idx = 0;
5621 i++;
5622 }
5623 if (lo & F_EDRAM1_ENABLE) {
5624 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
5625 avail[i].base = G_EDRAM1_BASE(hi) << 20;
5626 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
5627 avail[i].idx = 1;
5628 i++;
5629 }
5630 if (lo & F_EXT_MEM_ENABLE) {
5631 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
5632 avail[i].base = G_EXT_MEM_BASE(hi) << 20;
5633 avail[i].limit = avail[i].base +
5634 (G_EXT_MEM_SIZE(hi) << 20);
5635 avail[i].idx = is_t4(sc) ? 2 : 3; /* Call it MC for T4 */
5636 i++;
5637 }
5638 if (!is_t4(sc) && lo & F_EXT_MEM1_ENABLE) {
5639 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
5640 avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
5641 avail[i].limit = avail[i].base +
5642 (G_EXT_MEM1_SIZE(hi) << 20);
5643 avail[i].idx = 4;
5644 i++;
5645 }
5646 if (!i) /* no memory available */
5647 return 0;
5648 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
5649
5650 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
5651 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
5652 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
5653 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
5654 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE);
5655 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE);
5656 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE);
5657 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE);
5658 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE);
5659
5660 /* the next few have explicit upper bounds */
5661 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE);
5662 md->limit = md->base - 1 +
5663 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) *
5664 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE));
5665 md++;
5666
5667 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE);
5668 md->limit = md->base - 1 +
5669 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) *
5670 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE));
5671 md++;
5672
5673 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
5674 hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
5675 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
5676 md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
5677 } else {
5678 md->base = 0;
5679 md->idx = nitems(region); /* hide it */
5680 }
5681 md++;
5682
5683#define ulp_region(reg) \
5684 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
5685 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
5686
5687 ulp_region(RX_ISCSI);
5688 ulp_region(RX_TDDP);
5689 ulp_region(TX_TPT);
5690 ulp_region(RX_STAG);
5691 ulp_region(RX_RQ);
5692 ulp_region(RX_RQUDP);
5693 ulp_region(RX_PBL);
5694 ulp_region(TX_PBL);
5695#undef ulp_region
5696
5697 md->base = 0;
5698 md->idx = nitems(region);
5699 if (!is_t4(sc) && t4_read_reg(sc, A_SGE_CONTROL2) & F_VFIFO_ENABLE) {
5700 md->base = G_BASEADDR(t4_read_reg(sc, A_SGE_DBVFIFO_BADDR));
5701 md->limit = md->base + (G_DBVFIFO_SIZE((t4_read_reg(sc,
5702 A_SGE_DBVFIFO_SIZE))) << 2) - 1;
5703 }
5704 md++;
5705
5706 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
5707 md->limit = md->base + sc->tids.ntids - 1;
5708 md++;
5709 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
5710 md->limit = md->base + sc->tids.ntids - 1;
5711 md++;
5712
5713 md->base = sc->vres.ocq.start;
5714 if (sc->vres.ocq.size)
5715 md->limit = md->base + sc->vres.ocq.size - 1;
5716 else
5717 md->idx = nitems(region); /* hide it */
5718 md++;
5719
5720 /* add any address-space holes, there can be up to 3 */
5721 for (n = 0; n < i - 1; n++)
5722 if (avail[n].limit < avail[n + 1].base)
5723 (md++)->base = avail[n].limit;
5724 if (avail[n].limit)
5725 (md++)->base = avail[n].limit;
5726
5727 n = md - mem;
5728 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp);
5729
5730 for (lo = 0; lo < i; lo++)
5731 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base,
5732 avail[lo].limit - 1);
5733
5734 sbuf_printf(sb, "\n");
5735 for (i = 0; i < n; i++) {
5736 if (mem[i].idx >= nitems(region))
5737 continue; /* skip holes */
5738 if (!mem[i].limit)
5739 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
5740 mem_region_show(sb, region[mem[i].idx], mem[i].base,
5741 mem[i].limit);
5742 }
5743
5744 sbuf_printf(sb, "\n");
5745 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
5746 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
5747 mem_region_show(sb, "uP RAM:", lo, hi);
5748
5749 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
5750 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
5751 mem_region_show(sb, "uP Extmem2:", lo, hi);
5752
5753 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
5754 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n",
5755 G_PMRXMAXPAGE(lo),
5756 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
5757 (lo & F_PMRXNUMCHN) ? 2 : 1);
5758
5759 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
5760 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
5761 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n",
5762 G_PMTXMAXPAGE(lo),
5763 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
5764 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
5765 sbuf_printf(sb, "%u p-structs\n",
5766 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
5767
5768 for (i = 0; i < 4; i++) {
5769 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
5770 if (is_t4(sc)) {
5771 used = G_USED(lo);
5772 alloc = G_ALLOC(lo);
5773 } else {
5774 used = G_T5_USED(lo);
5775 alloc = G_T5_ALLOC(lo);
5776 }
5777 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
5778 i, used, alloc);
5779 }
5780 for (i = 0; i < 4; i++) {
5781 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
5782 if (is_t4(sc)) {
5783 used = G_USED(lo);
5784 alloc = G_ALLOC(lo);
5785 } else {
5786 used = G_T5_USED(lo);
5787 alloc = G_T5_ALLOC(lo);
5788 }
5789 sbuf_printf(sb,
5790 "\nLoopback %d using %u pages out of %u allocated",
5791 i, used, alloc);
5792 }
5793
5794 rc = sbuf_finish(sb);
5795 sbuf_delete(sb);
5796
5797 return (rc);
5798}
5799
5800static inline void
5801tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
5802{
5803 *mask = x | y;
5804 y = htobe64(y);
5805 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
5806}
5807
5808static int
5809sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
5810{
5811 struct adapter *sc = arg1;
5812 struct sbuf *sb;
5813 int rc, i, n;
5814
5815 rc = sysctl_wire_old_buffer(req, 0);
5816 if (rc != 0)
5817 return (rc);
5818
5819 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
5820 if (sb == NULL)
5821 return (ENOMEM);
5822
5823 sbuf_printf(sb,
5824 "Idx Ethernet address Mask Vld Ports PF"
5825 " VF Replication P0 P1 P2 P3 ML");
5826 n = is_t4(sc) ? NUM_MPS_CLS_SRAM_L_INSTANCES :
5827 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5828 for (i = 0; i < n; i++) {
5829 uint64_t tcamx, tcamy, mask;
5830 uint32_t cls_lo, cls_hi;
5831 uint8_t addr[ETHER_ADDR_LEN];
5832
5833 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
5834 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
5835 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
5836 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
5837
5838 if (tcamx & tcamy)
5839 continue;
5840
5841 tcamxy2valmask(tcamx, tcamy, addr, &mask);
5842 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
5843 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2],
5844 addr[3], addr[4], addr[5], (uintmax_t)mask,
5845 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
5846 G_PORTMAP(cls_hi), G_PF(cls_lo),
5847 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
5848
5849 if (cls_lo & F_REPLICATE) {
5850 struct fw_ldst_cmd ldst_cmd;
5851
5852 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
5853 ldst_cmd.op_to_addrspace =
5854 htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
5855 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5856 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
5857 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
5858 ldst_cmd.u.mps.fid_ctl =
5859 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
5860 V_FW_LDST_CMD_CTL(i));
5861
5862 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
5863 "t4mps");
5864 if (rc)
5865 break;
5866 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
5867 sizeof(ldst_cmd), &ldst_cmd);
5868 end_synchronized_op(sc, 0);
5869
5870 if (rc != 0) {
5871 sbuf_printf(sb,
5872 " ------------ error %3u ------------", rc);
5873 rc = 0;
5874 } else {
5875 sbuf_printf(sb, " %08x %08x %08x %08x",
5876 be32toh(ldst_cmd.u.mps.rplc127_96),
5877 be32toh(ldst_cmd.u.mps.rplc95_64),
5878 be32toh(ldst_cmd.u.mps.rplc63_32),
5879 be32toh(ldst_cmd.u.mps.rplc31_0));
5880 }
5881 } else
5882 sbuf_printf(sb, "%36s", "");
5883
5884 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
5885 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
5886 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
5887 }
5888
5889 if (rc)
5890 (void) sbuf_finish(sb);
5891 else
5892 rc = sbuf_finish(sb);
5893 sbuf_delete(sb);
5894
5895 return (rc);
5896}
5897
5898static int
5899sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
5900{
5901 struct adapter *sc = arg1;
5902 struct sbuf *sb;
5903 int rc;
5904 uint16_t mtus[NMTUS];
5905
5906 rc = sysctl_wire_old_buffer(req, 0);
5907 if (rc != 0)
5908 return (rc);
5909
5910 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5911 if (sb == NULL)
5912 return (ENOMEM);
5913
5914 t4_read_mtu_tbl(sc, mtus, NULL);
5915
5916 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
5917 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6],
5918 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13],
5919 mtus[14], mtus[15]);
5920
5921 rc = sbuf_finish(sb);
5922 sbuf_delete(sb);
5923
5924 return (rc);
5925}
5926
5927static int
5928sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
5929{
5930 struct adapter *sc = arg1;
5931 struct sbuf *sb;
5932 int rc, i;
5933 uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
5934 uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
5935 static const char *pm_stats[] = {
5936 "Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
5937 };
5938
5939 rc = sysctl_wire_old_buffer(req, 0);
5940 if (rc != 0)
5941 return (rc);
5942
5943 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5944 if (sb == NULL)
5945 return (ENOMEM);
5946
5947 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
5948 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
5949
5950 sbuf_printf(sb, " Tx count Tx cycles "
5951 "Rx count Rx cycles");
5952 for (i = 0; i < PM_NSTATS; i++)
5953 sbuf_printf(sb, "\n%-13s %10u %20ju %10u %20ju",
5954 pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
5955
5956 rc = sbuf_finish(sb);
5957 sbuf_delete(sb);
5958
5959 return (rc);
5960}
5961
5962static int
5963sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
5964{
5965 struct adapter *sc = arg1;
5966 struct sbuf *sb;
5967 int rc;
5968 struct tp_rdma_stats stats;
5969
5970 rc = sysctl_wire_old_buffer(req, 0);
5971 if (rc != 0)
5972 return (rc);
5973
5974 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
5975 if (sb == NULL)
5976 return (ENOMEM);
5977
5978 t4_tp_get_rdma_stats(sc, &stats);
5979 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
5980 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
5981
5982 rc = sbuf_finish(sb);
5983 sbuf_delete(sb);
5984
5985 return (rc);
5986}
5987
5988static int
5989sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
5990{
5991 struct adapter *sc = arg1;
5992 struct sbuf *sb;
5993 int rc;
5994 struct tp_tcp_stats v4, v6;
5995
5996 rc = sysctl_wire_old_buffer(req, 0);
5997 if (rc != 0)
5998 return (rc);
5999
6000 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6001 if (sb == NULL)
6002 return (ENOMEM);
6003
6004 t4_tp_get_tcp_stats(sc, &v4, &v6);
6005 sbuf_printf(sb,
6006 " IP IPv6\n");
6007 sbuf_printf(sb, "OutRsts: %20u %20u\n",
6008 v4.tcpOutRsts, v6.tcpOutRsts);
6009 sbuf_printf(sb, "InSegs: %20ju %20ju\n",
6010 v4.tcpInSegs, v6.tcpInSegs);
6011 sbuf_printf(sb, "OutSegs: %20ju %20ju\n",
6012 v4.tcpOutSegs, v6.tcpOutSegs);
6013 sbuf_printf(sb, "RetransSegs: %20ju %20ju",
6014 v4.tcpRetransSegs, v6.tcpRetransSegs);
6015
6016 rc = sbuf_finish(sb);
6017 sbuf_delete(sb);
6018
6019 return (rc);
6020}
6021
6022static int
6023sysctl_tids(SYSCTL_HANDLER_ARGS)
6024{
6025 struct adapter *sc = arg1;
6026 struct sbuf *sb;
6027 int rc;
6028 struct tid_info *t = &sc->tids;
6029
6030 rc = sysctl_wire_old_buffer(req, 0);
6031 if (rc != 0)
6032 return (rc);
6033
6034 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6035 if (sb == NULL)
6036 return (ENOMEM);
6037
6038 if (t->natids) {
6039 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1,
6040 t->atids_in_use);
6041 }
6042
6043 if (t->ntids) {
6044 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
6045 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
6046
6047 if (b) {
6048 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
6049 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6050 t->ntids - 1);
6051 } else {
6052 sbuf_printf(sb, "TID range: %u-%u",
6053 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
6054 t->ntids - 1);
6055 }
6056 } else
6057 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
6058 sbuf_printf(sb, ", in use: %u\n",
6059 atomic_load_acq_int(&t->tids_in_use));
6060 }
6061
6062 if (t->nstids) {
6063 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base,
6064 t->stid_base + t->nstids - 1, t->stids_in_use);
6065 }
6066
6067 if (t->nftids) {
6068 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base,
6069 t->ftid_base + t->nftids - 1);
6070 }
6071
6072 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
6073 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
6074 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
6075
6076 rc = sbuf_finish(sb);
6077 sbuf_delete(sb);
6078
6079 return (rc);
6080}
6081
6082static int
6083sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
6084{
6085 struct adapter *sc = arg1;
6086 struct sbuf *sb;
6087 int rc;
6088 struct tp_err_stats stats;
6089
6090 rc = sysctl_wire_old_buffer(req, 0);
6091 if (rc != 0)
6092 return (rc);
6093
6094 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6095 if (sb == NULL)
6096 return (ENOMEM);
6097
6098 t4_tp_get_err_stats(sc, &stats);
6099
6100 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6101 "channel 3\n");
6102 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n",
6103 stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
6104 stats.macInErrs[3]);
6105 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n",
6106 stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
6107 stats.hdrInErrs[3]);
6108 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n",
6109 stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
6110 stats.tcpInErrs[3]);
6111 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n",
6112 stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
6113 stats.tcp6InErrs[3]);
6114 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n",
6115 stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
6116 stats.tnlCongDrops[3]);
6117 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n",
6118 stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
6119 stats.tnlTxDrops[3]);
6120 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n",
6121 stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
6122 stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
6123 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n",
6124 stats.ofldChanDrops[0], stats.ofldChanDrops[1],
6125 stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
6126 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u",
6127 stats.ofldNoNeigh, stats.ofldCongDefer);
6128
6129 rc = sbuf_finish(sb);
6130 sbuf_delete(sb);
6131
6132 return (rc);
6133}
6134
6135struct field_desc {
6136 const char *name;
6137 u_int start;
6138 u_int width;
6139};
6140
6141static void
6142field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
6143{
6144 char buf[32];
6145 int line_size = 0;
6146
6147 while (f->name) {
6148 uint64_t mask = (1ULL << f->width) - 1;
6149 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
6150 ((uintmax_t)v >> f->start) & mask);
6151
6152 if (line_size + len >= 79) {
6153 line_size = 8;
6154 sbuf_printf(sb, "\n ");
6155 }
6156 sbuf_printf(sb, "%s ", buf);
6157 line_size += len + 1;
6158 f++;
6159 }
6160 sbuf_printf(sb, "\n");
6161}
6162
6163static struct field_desc tp_la0[] = {
6164 { "RcfOpCodeOut", 60, 4 },
6165 { "State", 56, 4 },
6166 { "WcfState", 52, 4 },
6167 { "RcfOpcSrcOut", 50, 2 },
6168 { "CRxError", 49, 1 },
6169 { "ERxError", 48, 1 },
6170 { "SanityFailed", 47, 1 },
6171 { "SpuriousMsg", 46, 1 },
6172 { "FlushInputMsg", 45, 1 },
6173 { "FlushInputCpl", 44, 1 },
6174 { "RssUpBit", 43, 1 },
6175 { "RssFilterHit", 42, 1 },
6176 { "Tid", 32, 10 },
6177 { "InitTcb", 31, 1 },
6178 { "LineNumber", 24, 7 },
6179 { "Emsg", 23, 1 },
6180 { "EdataOut", 22, 1 },
6181 { "Cmsg", 21, 1 },
6182 { "CdataOut", 20, 1 },
6183 { "EreadPdu", 19, 1 },
6184 { "CreadPdu", 18, 1 },
6185 { "TunnelPkt", 17, 1 },
6186 { "RcfPeerFin", 16, 1 },
6187 { "RcfReasonOut", 12, 4 },
6188 { "TxCchannel", 10, 2 },
6189 { "RcfTxChannel", 8, 2 },
6190 { "RxEchannel", 6, 2 },
6191 { "RcfRxChannel", 5, 1 },
6192 { "RcfDataOutSrdy", 4, 1 },
6193 { "RxDvld", 3, 1 },
6194 { "RxOoDvld", 2, 1 },
6195 { "RxCongestion", 1, 1 },
6196 { "TxCongestion", 0, 1 },
6197 { NULL }
6198};
6199
6200static struct field_desc tp_la1[] = {
6201 { "CplCmdIn", 56, 8 },
6202 { "CplCmdOut", 48, 8 },
6203 { "ESynOut", 47, 1 },
6204 { "EAckOut", 46, 1 },
6205 { "EFinOut", 45, 1 },
6206 { "ERstOut", 44, 1 },
6207 { "SynIn", 43, 1 },
6208 { "AckIn", 42, 1 },
6209 { "FinIn", 41, 1 },
6210 { "RstIn", 40, 1 },
6211 { "DataIn", 39, 1 },
6212 { "DataInVld", 38, 1 },
6213 { "PadIn", 37, 1 },
6214 { "RxBufEmpty", 36, 1 },
6215 { "RxDdp", 35, 1 },
6216 { "RxFbCongestion", 34, 1 },
6217 { "TxFbCongestion", 33, 1 },
6218 { "TxPktSumSrdy", 32, 1 },
6219 { "RcfUlpType", 28, 4 },
6220 { "Eread", 27, 1 },
6221 { "Ebypass", 26, 1 },
6222 { "Esave", 25, 1 },
6223 { "Static0", 24, 1 },
6224 { "Cread", 23, 1 },
6225 { "Cbypass", 22, 1 },
6226 { "Csave", 21, 1 },
6227 { "CPktOut", 20, 1 },
6228 { "RxPagePoolFull", 18, 2 },
6229 { "RxLpbkPkt", 17, 1 },
6230 { "TxLpbkPkt", 16, 1 },
6231 { "RxVfValid", 15, 1 },
6232 { "SynLearned", 14, 1 },
6233 { "SetDelEntry", 13, 1 },
6234 { "SetInvEntry", 12, 1 },
6235 { "CpcmdDvld", 11, 1 },
6236 { "CpcmdSave", 10, 1 },
6237 { "RxPstructsFull", 8, 2 },
6238 { "EpcmdDvld", 7, 1 },
6239 { "EpcmdFlush", 6, 1 },
6240 { "EpcmdTrimPrefix", 5, 1 },
6241 { "EpcmdTrimPostfix", 4, 1 },
6242 { "ERssIp4Pkt", 3, 1 },
6243 { "ERssIp6Pkt", 2, 1 },
6244 { "ERssTcpUdpPkt", 1, 1 },
6245 { "ERssFceFipPkt", 0, 1 },
6246 { NULL }
6247};
6248
6249static struct field_desc tp_la2[] = {
6250 { "CplCmdIn", 56, 8 },
6251 { "MpsVfVld", 55, 1 },
6252 { "MpsPf", 52, 3 },
6253 { "MpsVf", 44, 8 },
6254 { "SynIn", 43, 1 },
6255 { "AckIn", 42, 1 },
6256 { "FinIn", 41, 1 },
6257 { "RstIn", 40, 1 },
6258 { "DataIn", 39, 1 },
6259 { "DataInVld", 38, 1 },
6260 { "PadIn", 37, 1 },
6261 { "RxBufEmpty", 36, 1 },
6262 { "RxDdp", 35, 1 },
6263 { "RxFbCongestion", 34, 1 },
6264 { "TxFbCongestion", 33, 1 },
6265 { "TxPktSumSrdy", 32, 1 },
6266 { "RcfUlpType", 28, 4 },
6267 { "Eread", 27, 1 },
6268 { "Ebypass", 26, 1 },
6269 { "Esave", 25, 1 },
6270 { "Static0", 24, 1 },
6271 { "Cread", 23, 1 },
6272 { "Cbypass", 22, 1 },
6273 { "Csave", 21, 1 },
6274 { "CPktOut", 20, 1 },
6275 { "RxPagePoolFull", 18, 2 },
6276 { "RxLpbkPkt", 17, 1 },
6277 { "TxLpbkPkt", 16, 1 },
6278 { "RxVfValid", 15, 1 },
6279 { "SynLearned", 14, 1 },
6280 { "SetDelEntry", 13, 1 },
6281 { "SetInvEntry", 12, 1 },
6282 { "CpcmdDvld", 11, 1 },
6283 { "CpcmdSave", 10, 1 },
6284 { "RxPstructsFull", 8, 2 },
6285 { "EpcmdDvld", 7, 1 },
6286 { "EpcmdFlush", 6, 1 },
6287 { "EpcmdTrimPrefix", 5, 1 },
6288 { "EpcmdTrimPostfix", 4, 1 },
6289 { "ERssIp4Pkt", 3, 1 },
6290 { "ERssIp6Pkt", 2, 1 },
6291 { "ERssTcpUdpPkt", 1, 1 },
6292 { "ERssFceFipPkt", 0, 1 },
6293 { NULL }
6294};
6295
6296static void
6297tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
6298{
6299
6300 field_desc_show(sb, *p, tp_la0);
6301}
6302
6303static void
6304tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
6305{
6306
6307 if (idx)
6308 sbuf_printf(sb, "\n");
6309 field_desc_show(sb, p[0], tp_la0);
6310 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6311 field_desc_show(sb, p[1], tp_la0);
6312}
6313
6314static void
6315tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
6316{
6317
6318 if (idx)
6319 sbuf_printf(sb, "\n");
6320 field_desc_show(sb, p[0], tp_la0);
6321 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
6322 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
6323}
6324
6325static int
6326sysctl_tp_la(SYSCTL_HANDLER_ARGS)
6327{
6328 struct adapter *sc = arg1;
6329 struct sbuf *sb;
6330 uint64_t *buf, *p;
6331 int rc;
6332 u_int i, inc;
6333 void (*show_func)(struct sbuf *, uint64_t *, int);
6334
6335 rc = sysctl_wire_old_buffer(req, 0);
6336 if (rc != 0)
6337 return (rc);
6338
6339 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6340 if (sb == NULL)
6341 return (ENOMEM);
6342
6343 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
6344
6345 t4_tp_read_la(sc, buf, NULL);
6346 p = buf;
6347
6348 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
6349 case 2:
6350 inc = 2;
6351 show_func = tp_la_show2;
6352 break;
6353 case 3:
6354 inc = 2;
6355 show_func = tp_la_show3;
6356 break;
6357 default:
6358 inc = 1;
6359 show_func = tp_la_show;
6360 }
6361
6362 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
6363 (*show_func)(sb, p, i);
6364
6365 rc = sbuf_finish(sb);
6366 sbuf_delete(sb);
6367 free(buf, M_CXGBE);
6368 return (rc);
6369}
6370
6371static int
6372sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
6373{
6374 struct adapter *sc = arg1;
6375 struct sbuf *sb;
6376 int rc;
6377 u64 nrate[NCHAN], orate[NCHAN];
6378
6379 rc = sysctl_wire_old_buffer(req, 0);
6380 if (rc != 0)
6381 return (rc);
6382
6383 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
6384 if (sb == NULL)
6385 return (ENOMEM);
6386
6387 t4_get_chan_txrate(sc, nrate, orate);
6388 sbuf_printf(sb, " channel 0 channel 1 channel 2 "
6389 "channel 3\n");
6390 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n",
6391 nrate[0], nrate[1], nrate[2], nrate[3]);
6392 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju",
6393 orate[0], orate[1], orate[2], orate[3]);
6394
6395 rc = sbuf_finish(sb);
6396 sbuf_delete(sb);
6397
6398 return (rc);
6399}
6400
6401static int
6402sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
6403{
6404 struct adapter *sc = arg1;
6405 struct sbuf *sb;
6406 uint32_t *buf, *p;
6407 int rc, i;
6408
6409 rc = sysctl_wire_old_buffer(req, 0);
6410 if (rc != 0)
6411 return (rc);
6412
6413 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6414 if (sb == NULL)
6415 return (ENOMEM);
6416
6417 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
6418 M_ZERO | M_WAITOK);
6419
6420 t4_ulprx_read_la(sc, buf);
6421 p = buf;
6422
6423 sbuf_printf(sb, " Pcmd Type Message"
6424 " Data");
6425 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
6426 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x",
6427 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
6428 }
6429
6430 rc = sbuf_finish(sb);
6431 sbuf_delete(sb);
6432 free(buf, M_CXGBE);
6433 return (rc);
6434}
6435
6436static int
6437sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
6438{
6439 struct adapter *sc = arg1;
6440 struct sbuf *sb;
6441 int rc, v;
6442
6443 rc = sysctl_wire_old_buffer(req, 0);
6444 if (rc != 0)
6445 return (rc);
6446
6447 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
6448 if (sb == NULL)
6449 return (ENOMEM);
6450
6451 v = t4_read_reg(sc, A_SGE_STAT_CFG);
6452 if (G_STATSOURCE_T5(v) == 7) {
6453 if (G_STATMODE(v) == 0) {
6454 sbuf_printf(sb, "total %d, incomplete %d",
6455 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6456 t4_read_reg(sc, A_SGE_STAT_MATCH));
6457 } else if (G_STATMODE(v) == 1) {
6458 sbuf_printf(sb, "total %d, data overflow %d",
6459 t4_read_reg(sc, A_SGE_STAT_TOTAL),
6460 t4_read_reg(sc, A_SGE_STAT_MATCH));
6461 }
6462 }
6463 rc = sbuf_finish(sb);
6464 sbuf_delete(sb);
6465
6466 return (rc);
6467}
6468#endif
6469
6470static inline void
6471txq_start(struct ifnet *ifp, struct sge_txq *txq)
6472{
6473 struct buf_ring *br;
6474 struct mbuf *m;
6475
6476 TXQ_LOCK_ASSERT_OWNED(txq);
6477
6478 br = txq->br;
6479 m = txq->m ? txq->m : drbr_dequeue(ifp, br);
6480 if (m)
6481 t4_eth_tx(ifp, txq, m);
6482}
6483
6484void
6485t4_tx_callout(void *arg)
6486{
6487 struct sge_eq *eq = arg;
6488 struct adapter *sc;
6489
6490 if (EQ_TRYLOCK(eq) == 0)
6491 goto reschedule;
6492
6493 if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
6494 EQ_UNLOCK(eq);
6495reschedule:
6496 if (__predict_true(!(eq->flags && EQ_DOOMED)))
6497 callout_schedule(&eq->tx_callout, 1);
6498 return;
6499 }
6500
6501 EQ_LOCK_ASSERT_OWNED(eq);
6502
6503 if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
6504
6505 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6506 struct sge_txq *txq = arg;
6507 struct port_info *pi = txq->ifp->if_softc;
6508
6509 sc = pi->adapter;
6510 } else {
6511 struct sge_wrq *wrq = arg;
6512
6513 sc = wrq->adapter;
6514 }
6515
6516 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
6517 }
6518
6519 EQ_UNLOCK(eq);
6520}
6521
6522void
6523t4_tx_task(void *arg, int count)
6524{
6525 struct sge_eq *eq = arg;
6526
6527 EQ_LOCK(eq);
6528 if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
6529 struct sge_txq *txq = arg;
6530 txq_start(txq->ifp, txq);
6531 } else {
6532 struct sge_wrq *wrq = arg;
6533 t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
6534 }
6535 EQ_UNLOCK(eq);
6536}
6537
6538static uint32_t
6539fconf_to_mode(uint32_t fconf)
6540{
6541 uint32_t mode;
6542
6543 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
6544 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
6545
6546 if (fconf & F_FRAGMENTATION)
6547 mode |= T4_FILTER_IP_FRAGMENT;
6548
6549 if (fconf & F_MPSHITTYPE)
6550 mode |= T4_FILTER_MPS_HIT_TYPE;
6551
6552 if (fconf & F_MACMATCH)
6553 mode |= T4_FILTER_MAC_IDX;
6554
6555 if (fconf & F_ETHERTYPE)
6556 mode |= T4_FILTER_ETH_TYPE;
6557
6558 if (fconf & F_PROTOCOL)
6559 mode |= T4_FILTER_IP_PROTO;
6560
6561 if (fconf & F_TOS)
6562 mode |= T4_FILTER_IP_TOS;
6563
6564 if (fconf & F_VLAN)
6565 mode |= T4_FILTER_VLAN;
6566
6567 if (fconf & F_VNIC_ID)
6568 mode |= T4_FILTER_VNIC;
6569
6570 if (fconf & F_PORT)
6571 mode |= T4_FILTER_PORT;
6572
6573 if (fconf & F_FCOE)
6574 mode |= T4_FILTER_FCoE;
6575
6576 return (mode);
6577}
6578
6579static uint32_t
6580mode_to_fconf(uint32_t mode)
6581{
6582 uint32_t fconf = 0;
6583
6584 if (mode & T4_FILTER_IP_FRAGMENT)
6585 fconf |= F_FRAGMENTATION;
6586
6587 if (mode & T4_FILTER_MPS_HIT_TYPE)
6588 fconf |= F_MPSHITTYPE;
6589
6590 if (mode & T4_FILTER_MAC_IDX)
6591 fconf |= F_MACMATCH;
6592
6593 if (mode & T4_FILTER_ETH_TYPE)
6594 fconf |= F_ETHERTYPE;
6595
6596 if (mode & T4_FILTER_IP_PROTO)
6597 fconf |= F_PROTOCOL;
6598
6599 if (mode & T4_FILTER_IP_TOS)
6600 fconf |= F_TOS;
6601
6602 if (mode & T4_FILTER_VLAN)
6603 fconf |= F_VLAN;
6604
6605 if (mode & T4_FILTER_VNIC)
6606 fconf |= F_VNIC_ID;
6607
6608 if (mode & T4_FILTER_PORT)
6609 fconf |= F_PORT;
6610
6611 if (mode & T4_FILTER_FCoE)
6612 fconf |= F_FCOE;
6613
6614 return (fconf);
6615}
6616
6617static uint32_t
6618fspec_to_fconf(struct t4_filter_specification *fs)
6619{
6620 uint32_t fconf = 0;
6621
6622 if (fs->val.frag || fs->mask.frag)
6623 fconf |= F_FRAGMENTATION;
6624
6625 if (fs->val.matchtype || fs->mask.matchtype)
6626 fconf |= F_MPSHITTYPE;
6627
6628 if (fs->val.macidx || fs->mask.macidx)
6629 fconf |= F_MACMATCH;
6630
6631 if (fs->val.ethtype || fs->mask.ethtype)
6632 fconf |= F_ETHERTYPE;
6633
6634 if (fs->val.proto || fs->mask.proto)
6635 fconf |= F_PROTOCOL;
6636
6637 if (fs->val.tos || fs->mask.tos)
6638 fconf |= F_TOS;
6639
6640 if (fs->val.vlan_vld || fs->mask.vlan_vld)
6641 fconf |= F_VLAN;
6642
6643 if (fs->val.vnic_vld || fs->mask.vnic_vld)
6644 fconf |= F_VNIC_ID;
6645
6646 if (fs->val.iport || fs->mask.iport)
6647 fconf |= F_PORT;
6648
6649 if (fs->val.fcoe || fs->mask.fcoe)
6650 fconf |= F_FCOE;
6651
6652 return (fconf);
6653}
6654
6655static int
6656get_filter_mode(struct adapter *sc, uint32_t *mode)
6657{
6658 int rc;
6659 uint32_t fconf;
6660
6661 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6662 "t4getfm");
6663 if (rc)
6664 return (rc);
6665
6666 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
6667 A_TP_VLAN_PRI_MAP);
6668
6669 if (sc->params.tp.vlan_pri_map != fconf) {
6670 log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
6671 device_get_nameunit(sc->dev), sc->params.tp.vlan_pri_map,
6672 fconf);
6673 sc->params.tp.vlan_pri_map = fconf;
6674 }
6675
6676 *mode = fconf_to_mode(sc->params.tp.vlan_pri_map);
6677
6678 end_synchronized_op(sc, LOCK_HELD);
6679 return (0);
6680}
6681
6682static int
6683set_filter_mode(struct adapter *sc, uint32_t mode)
6684{
6685 uint32_t fconf;
6686 int rc;
6687
6688 fconf = mode_to_fconf(mode);
6689
6690 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6691 "t4setfm");
6692 if (rc)
6693 return (rc);
6694
6695 if (sc->tids.ftids_in_use > 0) {
6696 rc = EBUSY;
6697 goto done;
6698 }
6699
6700#ifdef TCP_OFFLOAD
6701 if (sc->offload_map) {
6702 rc = EBUSY;
6703 goto done;
6704 }
6705#endif
6706
6707#ifdef notyet
6708 rc = -t4_set_filter_mode(sc, fconf);
6709 if (rc == 0)
6710 sc->filter_mode = fconf;
6711#else
6712 rc = ENOTSUP;
6713#endif
6714
6715done:
6716 end_synchronized_op(sc, LOCK_HELD);
6717 return (rc);
6718}
6719
6720static inline uint64_t
6721get_filter_hits(struct adapter *sc, uint32_t fid)
6722{
6723 uint32_t mw_base, off, tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
6724 uint64_t hits;
6725
6726 memwin_info(sc, 0, &mw_base, NULL);
6727 off = position_memwin(sc, 0,
6728 tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
6729 if (is_t4(sc)) {
6730 hits = t4_read_reg64(sc, mw_base + off + 16);
6731 hits = be64toh(hits);
6732 } else {
6733 hits = t4_read_reg(sc, mw_base + off + 24);
6734 hits = be32toh(hits);
6735 }
6736
6737 return (hits);
6738}
6739
6740static int
6741get_filter(struct adapter *sc, struct t4_filter *t)
6742{
6743 int i, rc, nfilters = sc->tids.nftids;
6744 struct filter_entry *f;
6745
6746 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
6747 "t4getf");
6748 if (rc)
6749 return (rc);
6750
6751 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
6752 t->idx >= nfilters) {
6753 t->idx = 0xffffffff;
6754 goto done;
6755 }
6756
6757 f = &sc->tids.ftid_tab[t->idx];
6758 for (i = t->idx; i < nfilters; i++, f++) {
6759 if (f->valid) {
6760 t->idx = i;
6761 t->l2tidx = f->l2t ? f->l2t->idx : 0;
6762 t->smtidx = f->smtidx;
6763 if (f->fs.hitcnts)
6764 t->hits = get_filter_hits(sc, t->idx);
6765 else
6766 t->hits = UINT64_MAX;
6767 t->fs = f->fs;
6768
6769 goto done;
6770 }
6771 }
6772
6773 t->idx = 0xffffffff;
6774done:
6775 end_synchronized_op(sc, LOCK_HELD);
6776 return (0);
6777}
6778
6779static int
6780set_filter(struct adapter *sc, struct t4_filter *t)
6781{
6782 unsigned int nfilters, nports;
6783 struct filter_entry *f;
6784 int i, rc;
6785
6786 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
6787 if (rc)
6788 return (rc);
6789
6790 nfilters = sc->tids.nftids;
6791 nports = sc->params.nports;
6792
6793 if (nfilters == 0) {
6794 rc = ENOTSUP;
6795 goto done;
6796 }
6797
6798 if (!(sc->flags & FULL_INIT_DONE)) {
6799 rc = EAGAIN;
6800 goto done;
6801 }
6802
6803 if (t->idx >= nfilters) {
6804 rc = EINVAL;
6805 goto done;
6806 }
6807
6808 /* Validate against the global filter mode */
6809 if ((sc->params.tp.vlan_pri_map | fspec_to_fconf(&t->fs)) !=
6810 sc->params.tp.vlan_pri_map) {
6811 rc = E2BIG;
6812 goto done;
6813 }
6814
6815 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
6816 rc = EINVAL;
6817 goto done;
6818 }
6819
6820 if (t->fs.val.iport >= nports) {
6821 rc = EINVAL;
6822 goto done;
6823 }
6824
6825 /* Can't specify an iq if not steering to it */
6826 if (!t->fs.dirsteer && t->fs.iq) {
6827 rc = EINVAL;
6828 goto done;
6829 }
6830
6831 /* IPv6 filter idx must be 4 aligned */
6832 if (t->fs.type == 1 &&
6833 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
6834 rc = EINVAL;
6835 goto done;
6836 }
6837
6838 if (sc->tids.ftid_tab == NULL) {
6839 KASSERT(sc->tids.ftids_in_use == 0,
6840 ("%s: no memory allocated but filters_in_use > 0",
6841 __func__));
6842
6843 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
6844 nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
6845 if (sc->tids.ftid_tab == NULL) {
6846 rc = ENOMEM;
6847 goto done;
6848 }
6849 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
6850 }
6851
6852 for (i = 0; i < 4; i++) {
6853 f = &sc->tids.ftid_tab[t->idx + i];
6854
6855 if (f->pending || f->valid) {
6856 rc = EBUSY;
6857 goto done;
6858 }
6859 if (f->locked) {
6860 rc = EPERM;
6861 goto done;
6862 }
6863
6864 if (t->fs.type == 0)
6865 break;
6866 }
6867
6868 f = &sc->tids.ftid_tab[t->idx];
6869 f->fs = t->fs;
6870
6871 rc = set_filter_wr(sc, t->idx);
6872done:
6873 end_synchronized_op(sc, 0);
6874
6875 if (rc == 0) {
6876 mtx_lock(&sc->tids.ftid_lock);
6877 for (;;) {
6878 if (f->pending == 0) {
6879 rc = f->valid ? 0 : EIO;
6880 break;
6881 }
6882
6883 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6884 PCATCH, "t4setfw", 0)) {
6885 rc = EINPROGRESS;
6886 break;
6887 }
6888 }
6889 mtx_unlock(&sc->tids.ftid_lock);
6890 }
6891 return (rc);
6892}
6893
6894static int
6895del_filter(struct adapter *sc, struct t4_filter *t)
6896{
6897 unsigned int nfilters;
6898 struct filter_entry *f;
6899 int rc;
6900
6901 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
6902 if (rc)
6903 return (rc);
6904
6905 nfilters = sc->tids.nftids;
6906
6907 if (nfilters == 0) {
6908 rc = ENOTSUP;
6909 goto done;
6910 }
6911
6912 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
6913 t->idx >= nfilters) {
6914 rc = EINVAL;
6915 goto done;
6916 }
6917
6918 if (!(sc->flags & FULL_INIT_DONE)) {
6919 rc = EAGAIN;
6920 goto done;
6921 }
6922
6923 f = &sc->tids.ftid_tab[t->idx];
6924
6925 if (f->pending) {
6926 rc = EBUSY;
6927 goto done;
6928 }
6929 if (f->locked) {
6930 rc = EPERM;
6931 goto done;
6932 }
6933
6934 if (f->valid) {
6935 t->fs = f->fs; /* extra info for the caller */
6936 rc = del_filter_wr(sc, t->idx);
6937 }
6938
6939done:
6940 end_synchronized_op(sc, 0);
6941
6942 if (rc == 0) {
6943 mtx_lock(&sc->tids.ftid_lock);
6944 for (;;) {
6945 if (f->pending == 0) {
6946 rc = f->valid ? EIO : 0;
6947 break;
6948 }
6949
6950 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
6951 PCATCH, "t4delfw", 0)) {
6952 rc = EINPROGRESS;
6953 break;
6954 }
6955 }
6956 mtx_unlock(&sc->tids.ftid_lock);
6957 }
6958
6959 return (rc);
6960}
6961
6962static void
6963clear_filter(struct filter_entry *f)
6964{
6965 if (f->l2t)
6966 t4_l2t_release(f->l2t);
6967
6968 bzero(f, sizeof (*f));
6969}
6970
6971static int
6972set_filter_wr(struct adapter *sc, int fidx)
6973{
6974 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
6975 struct wrqe *wr;
6976 struct fw_filter_wr *fwr;
6977 unsigned int ftid;
6978
6979 ASSERT_SYNCHRONIZED_OP(sc);
6980
6981 if (f->fs.newdmac || f->fs.newvlan) {
6982 /* This filter needs an L2T entry; allocate one. */
6983 f->l2t = t4_l2t_alloc_switching(sc->l2t);
6984 if (f->l2t == NULL)
6985 return (EAGAIN);
6986 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport,
6987 f->fs.dmac)) {
6988 t4_l2t_release(f->l2t);
6989 f->l2t = NULL;
6990 return (ENOMEM);
6991 }
6992 }
6993
6994 ftid = sc->tids.ftid_base + fidx;
6995
6996 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
6997 if (wr == NULL)
6998 return (ENOMEM);
6999
7000 fwr = wrtod(wr);
7001 bzero(fwr, sizeof (*fwr));
7002
7003 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
7004 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
7005 fwr->tid_to_iq =
7006 htobe32(V_FW_FILTER_WR_TID(ftid) |
7007 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
7008 V_FW_FILTER_WR_NOREPLY(0) |
7009 V_FW_FILTER_WR_IQ(f->fs.iq));
7010 fwr->del_filter_to_l2tix =
7011 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
7012 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
7013 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
7014 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
7015 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
7016 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
7017 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
7018 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
7019 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
7020 f->fs.newvlan == VLAN_REWRITE) |
7021 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
7022 f->fs.newvlan == VLAN_REWRITE) |
7023 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
7024 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
7025 V_FW_FILTER_WR_PRIO(f->fs.prio) |
7026 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
7027 fwr->ethtype = htobe16(f->fs.val.ethtype);
7028 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
7029 fwr->frag_to_ovlan_vldm =
7030 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
7031 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
7032 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
7033 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
7034 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
7035 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
7036 fwr->smac_sel = 0;
7037 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
7038 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
7039 fwr->maci_to_matchtypem =
7040 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
7041 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
7042 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
7043 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
7044 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
7045 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
7046 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
7047 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
7048 fwr->ptcl = f->fs.val.proto;
7049 fwr->ptclm = f->fs.mask.proto;
7050 fwr->ttyp = f->fs.val.tos;
7051 fwr->ttypm = f->fs.mask.tos;
7052 fwr->ivlan = htobe16(f->fs.val.vlan);
7053 fwr->ivlanm = htobe16(f->fs.mask.vlan);
7054 fwr->ovlan = htobe16(f->fs.val.vnic);
7055 fwr->ovlanm = htobe16(f->fs.mask.vnic);
7056 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
7057 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
7058 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
7059 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
7060 fwr->lp = htobe16(f->fs.val.dport);
7061 fwr->lpm = htobe16(f->fs.mask.dport);
7062 fwr->fp = htobe16(f->fs.val.sport);
7063 fwr->fpm = htobe16(f->fs.mask.sport);
7064 if (f->fs.newsmac)
7065 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma));
7066
7067 f->pending = 1;
7068 sc->tids.ftids_in_use++;
7069
7070 t4_wrq_tx(sc, wr);
7071 return (0);
7072}
7073
7074static int
7075del_filter_wr(struct adapter *sc, int fidx)
7076{
7077 struct filter_entry *f = &sc->tids.ftid_tab[fidx];
7078 struct wrqe *wr;
7079 struct fw_filter_wr *fwr;
7080 unsigned int ftid;
7081
7082 ftid = sc->tids.ftid_base + fidx;
7083
7084 wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
7085 if (wr == NULL)
7086 return (ENOMEM);
7087 fwr = wrtod(wr);
7088 bzero(fwr, sizeof (*fwr));
7089
7090 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
7091
7092 f->pending = 1;
7093 t4_wrq_tx(sc, wr);
7094 return (0);
7095}
7096
7097int
7098t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
7099{
7100 struct adapter *sc = iq->adapter;
7101 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
7102 unsigned int idx = GET_TID(rpl);
7103
7104 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
7105 rss->opcode));
7106
7107 if (idx >= sc->tids.ftid_base &&
7108 (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
7109 unsigned int rc = G_COOKIE(rpl->cookie);
7110 struct filter_entry *f = &sc->tids.ftid_tab[idx];
7111
7112 mtx_lock(&sc->tids.ftid_lock);
7113 if (rc == FW_FILTER_WR_FLT_ADDED) {
7114 KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
7115 __func__, idx));
7116 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
7117 f->pending = 0; /* asynchronous setup completed */
7118 f->valid = 1;
7119 } else {
7120 if (rc != FW_FILTER_WR_FLT_DELETED) {
7121 /* Add or delete failed, display an error */
7122 log(LOG_ERR,
7123 "filter %u setup failed with error %u\n",
7124 idx, rc);
7125 }
7126
7127 clear_filter(f);
7128 sc->tids.ftids_in_use--;
7129 }
7130 wakeup(&sc->tids.ftid_tab);
7131 mtx_unlock(&sc->tids.ftid_lock);
7132 }
7133
7134 return (0);
7135}
7136
7137static int
7138get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
7139{
7140 int rc;
7141
7142 if (cntxt->cid > M_CTXTQID)
7143 return (EINVAL);
7144
7145 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
7146 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
7147 return (EINVAL);
7148
7149 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
7150 if (rc)
7151 return (rc);
7152
7153 if (sc->flags & FW_OK) {
7154 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
7155 &cntxt->data[0]);
7156 if (rc == 0)
7157 goto done;
7158 }
7159
7160 /*
7161 * Read via firmware failed or wasn't even attempted. Read directly via
7162 * the backdoor.
7163 */
7164 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
7165done:
7166 end_synchronized_op(sc, 0);
7167 return (rc);
7168}
7169
7170static int
7171load_fw(struct adapter *sc, struct t4_data *fw)
7172{
7173 int rc;
7174 uint8_t *fw_data;
7175
7176 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw");
7177 if (rc)
7178 return (rc);
7179
7180 if (sc->flags & FULL_INIT_DONE) {
7181 rc = EBUSY;
7182 goto done;
7183 }
7184
7185 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK);
7186 if (fw_data == NULL) {
7187 rc = ENOMEM;
7188 goto done;
7189 }
7190
7191 rc = copyin(fw->data, fw_data, fw->len);
7192 if (rc == 0)
7193 rc = -t4_load_fw(sc, fw_data, fw->len);
7194
7195 free(fw_data, M_CXGBE);
7196done:
7197 end_synchronized_op(sc, 0);
7198 return (rc);
7199}
7200
7201static int
7202read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
7203{
7204 uint32_t addr, off, remaining, i, n;
7205 uint32_t *buf, *b;
7206 uint32_t mw_base, mw_aperture;
7207 int rc;
7208 uint8_t *dst;
7209
7210 rc = validate_mem_range(sc, mr->addr, mr->len);
7211 if (rc != 0)
7212 return (rc);
7213
7214 memwin_info(sc, win, &mw_base, &mw_aperture);
7215 buf = b = malloc(min(mr->len, mw_aperture), M_CXGBE, M_WAITOK);
7216 addr = mr->addr;
7217 remaining = mr->len;
7218 dst = (void *)mr->data;
7219
7220 while (remaining) {
7221 off = position_memwin(sc, win, addr);
7222
7223 /* number of bytes that we'll copy in the inner loop */
7224 n = min(remaining, mw_aperture - off);
7225 for (i = 0; i < n; i += 4)
7226 *b++ = t4_read_reg(sc, mw_base + off + i);
7227
7228 rc = copyout(buf, dst, n);
7229 if (rc != 0)
7230 break;
7231
7232 b = buf;
7233 dst += n;
7234 remaining -= n;
7235 addr += n;
7236 }
7237
7238 free(buf, M_CXGBE);
7239 return (rc);
7240}
7241
7242static int
7243read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
7244{
7245 int rc;
7246
7247 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
7248 return (EINVAL);
7249
7250 if (i2cd->len > 1) {
7251 /* XXX: need fw support for longer reads in one go */
7252 return (ENOTSUP);
7253 }
7254
7255 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
7256 if (rc)
7257 return (rc);
7258 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
7259 i2cd->offset, &i2cd->data[0]);
7260 end_synchronized_op(sc, 0);
7261
7262 return (rc);
7263}
7264
7265int
7266t4_os_find_pci_capability(struct adapter *sc, int cap)
7267{
7268 int i;
7269
7270 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
7271}
7272
7273int
7274t4_os_pci_save_state(struct adapter *sc)
7275{
7276 device_t dev;
7277 struct pci_devinfo *dinfo;
7278
7279 dev = sc->dev;
7280 dinfo = device_get_ivars(dev);
7281
7282 pci_cfg_save(dev, dinfo, 0);
7283 return (0);
7284}
7285
7286int
7287t4_os_pci_restore_state(struct adapter *sc)
7288{
7289 device_t dev;
7290 struct pci_devinfo *dinfo;
7291
7292 dev = sc->dev;
7293 dinfo = device_get_ivars(dev);
7294
7295 pci_cfg_restore(dev, dinfo);
7296 return (0);
7297}
7298
7299void
7300t4_os_portmod_changed(const struct adapter *sc, int idx)
7301{
7302 struct port_info *pi = sc->port[idx];
7303 static const char *mod_str[] = {
7304 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
7305 };
7306
7307 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
7308 if_printf(pi->ifp, "transceiver unplugged.\n");
7309 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
7310 if_printf(pi->ifp, "unknown transceiver inserted.\n");
7311 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
7312 if_printf(pi->ifp, "unsupported transceiver inserted.\n");
7313 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
7314 if_printf(pi->ifp, "%s transceiver inserted.\n",
7315 mod_str[pi->mod_type]);
7316 } else {
7317 if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
7318 pi->mod_type);
7319 }
7320}
7321
7322void
7323t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason)
7324{
7325 struct port_info *pi = sc->port[idx];
7326 struct ifnet *ifp = pi->ifp;
7327
7328 if (link_stat) {
7329 pi->linkdnrc = -1;
7330 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
7331 if_link_state_change(ifp, LINK_STATE_UP);
7332 } else {
7333 if (reason >= 0)
7334 pi->linkdnrc = reason;
7335 if_link_state_change(ifp, LINK_STATE_DOWN);
7336 }
7337}
7338
7339void
7340t4_iterate(void (*func)(struct adapter *, void *), void *arg)
7341{
7342 struct adapter *sc;
7343
7344 mtx_lock(&t4_list_lock);
7345 SLIST_FOREACH(sc, &t4_list, link) {
7346 /*
7347 * func should not make any assumptions about what state sc is
7348 * in - the only guarantee is that sc->sc_lock is a valid lock.
7349 */
7350 func(sc, arg);
7351 }
7352 mtx_unlock(&t4_list_lock);
7353}
7354
7355static int
7356t4_open(struct cdev *dev, int flags, int type, struct thread *td)
7357{
7358 return (0);
7359}
7360
7361static int
7362t4_close(struct cdev *dev, int flags, int type, struct thread *td)
7363{
7364 return (0);
7365}
7366
7367static int
7368t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
7369 struct thread *td)
7370{
7371 int rc;
7372 struct adapter *sc = dev->si_drv1;
7373
7374 rc = priv_check(td, PRIV_DRIVER);
7375 if (rc != 0)
7376 return (rc);
7377
7378 switch (cmd) {
7379 case CHELSIO_T4_GETREG: {
7380 struct t4_reg *edata = (struct t4_reg *)data;
7381
7382 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7383 return (EFAULT);
7384
7385 if (edata->size == 4)
7386 edata->val = t4_read_reg(sc, edata->addr);
7387 else if (edata->size == 8)
7388 edata->val = t4_read_reg64(sc, edata->addr);
7389 else
7390 return (EINVAL);
7391
7392 break;
7393 }
7394 case CHELSIO_T4_SETREG: {
7395 struct t4_reg *edata = (struct t4_reg *)data;
7396
7397 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
7398 return (EFAULT);
7399
7400 if (edata->size == 4) {
7401 if (edata->val & 0xffffffff00000000)
7402 return (EINVAL);
7403 t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
7404 } else if (edata->size == 8)
7405 t4_write_reg64(sc, edata->addr, edata->val);
7406 else
7407 return (EINVAL);
7408 break;
7409 }
7410 case CHELSIO_T4_REGDUMP: {
7411 struct t4_regdump *regs = (struct t4_regdump *)data;
7412 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
7413 uint8_t *buf;
7414
7415 if (regs->len < reglen) {
7416 regs->len = reglen; /* hint to the caller */
7417 return (ENOBUFS);
7418 }
7419
7420 regs->len = reglen;
7421 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
7422 t4_get_regs(sc, regs, buf);
7423 rc = copyout(buf, regs->data, reglen);
7424 free(buf, M_CXGBE);
7425 break;
7426 }
7427 case CHELSIO_T4_GET_FILTER_MODE:
7428 rc = get_filter_mode(sc, (uint32_t *)data);
7429 break;
7430 case CHELSIO_T4_SET_FILTER_MODE:
7431 rc = set_filter_mode(sc, *(uint32_t *)data);
7432 break;
7433 case CHELSIO_T4_GET_FILTER:
7434 rc = get_filter(sc, (struct t4_filter *)data);
7435 break;
7436 case CHELSIO_T4_SET_FILTER:
7437 rc = set_filter(sc, (struct t4_filter *)data);
7438 break;
7439 case CHELSIO_T4_DEL_FILTER:
7440 rc = del_filter(sc, (struct t4_filter *)data);
7441 break;
7442 case CHELSIO_T4_GET_SGE_CONTEXT:
7443 rc = get_sge_context(sc, (struct t4_sge_context *)data);
7444 break;
7445 case CHELSIO_T4_LOAD_FW:
7446 rc = load_fw(sc, (struct t4_data *)data);
7447 break;
7448 case CHELSIO_T4_GET_MEM:
7449 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
7450 break;
7451 case CHELSIO_T4_GET_I2C:
7452 rc = read_i2c(sc, (struct t4_i2c_data *)data);
7453 break;
7454 case CHELSIO_T4_CLEAR_STATS: {
7455 int i;
7456 u_int port_id = *(uint32_t *)data;
7457 struct port_info *pi;
7458
7459 if (port_id >= sc->params.nports)
7460 return (EINVAL);
7461
7462 /* MAC stats */
7463 t4_clr_port_stats(sc, port_id);
7464
7465 pi = sc->port[port_id];
7466 if (pi->flags & PORT_INIT_DONE) {
7467 struct sge_rxq *rxq;
7468 struct sge_txq *txq;
7469 struct sge_wrq *wrq;
7470
7471 for_each_rxq(pi, i, rxq) {
7472#if defined(INET) || defined(INET6)
7473 rxq->lro.lro_queued = 0;
7474 rxq->lro.lro_flushed = 0;
7475#endif
7476 rxq->rxcsum = 0;
7477 rxq->vlan_extraction = 0;
7478 }
7479
7480 for_each_txq(pi, i, txq) {
7481 txq->txcsum = 0;
7482 txq->tso_wrs = 0;
7483 txq->vlan_insertion = 0;
7484 txq->imm_wrs = 0;
7485 txq->sgl_wrs = 0;
7486 txq->txpkt_wrs = 0;
7487 txq->txpkts_wrs = 0;
7488 txq->txpkts_pkts = 0;
7489 txq->br->br_drops = 0;
7490 txq->no_dmamap = 0;
7491 txq->no_desc = 0;
7492 }
7493
7494#ifdef TCP_OFFLOAD
7495 /* nothing to clear for each ofld_rxq */
7496
7497 for_each_ofld_txq(pi, i, wrq) {
7498 wrq->tx_wrs = 0;
7499 wrq->no_desc = 0;
7500 }
7501#endif
7502 wrq = &sc->sge.ctrlq[pi->port_id];
7503 wrq->tx_wrs = 0;
7504 wrq->no_desc = 0;
7505 }
7506 break;
7507 }
7508 case CHELSIO_T4_GET_TRACER:
7509 rc = t4_get_tracer(sc, (struct t4_tracer *)data);
7510 break;
7511 case CHELSIO_T4_SET_TRACER:
7512 rc = t4_set_tracer(sc, (struct t4_tracer *)data);
7513 break;
7514 default:
7515 rc = EINVAL;
7516 }
7517
7518 return (rc);
7519}
7520
7521#ifdef TCP_OFFLOAD
7522static int
7523toe_capability(struct port_info *pi, int enable)
7524{
7525 int rc;
7526 struct adapter *sc = pi->adapter;
7527
7528 ASSERT_SYNCHRONIZED_OP(sc);
7529
7530 if (!is_offload(sc))
7531 return (ENODEV);
7532
7533 if (enable) {
7534 if (!(sc->flags & FULL_INIT_DONE)) {
7535 rc = cxgbe_init_synchronized(pi);
7536 if (rc)
7537 return (rc);
7538 }
7539
7540 if (isset(&sc->offload_map, pi->port_id))
7541 return (0);
7542
7543 if (!(sc->flags & TOM_INIT_DONE)) {
7544 rc = t4_activate_uld(sc, ULD_TOM);
7545 if (rc == EAGAIN) {
7546 log(LOG_WARNING,
7547 "You must kldload t4_tom.ko before trying "
7548 "to enable TOE on a cxgbe interface.\n");
7549 }
7550 if (rc != 0)
7551 return (rc);
7552 KASSERT(sc->tom_softc != NULL,
7553 ("%s: TOM activated but softc NULL", __func__));
7554 KASSERT(sc->flags & TOM_INIT_DONE,
7555 ("%s: TOM activated but flag not set", __func__));
7556 }
7557
7558 setbit(&sc->offload_map, pi->port_id);
7559 } else {
7560 if (!isset(&sc->offload_map, pi->port_id))
7561 return (0);
7562
7563 KASSERT(sc->flags & TOM_INIT_DONE,
7564 ("%s: TOM never initialized?", __func__));
7565 clrbit(&sc->offload_map, pi->port_id);
7566 }
7567
7568 return (0);
7569}
7570
7571/*
7572 * Add an upper layer driver to the global list.
7573 */
7574int
7575t4_register_uld(struct uld_info *ui)
7576{
7577 int rc = 0;
7578 struct uld_info *u;
7579
7580 mtx_lock(&t4_uld_list_lock);
7581 SLIST_FOREACH(u, &t4_uld_list, link) {
7582 if (u->uld_id == ui->uld_id) {
7583 rc = EEXIST;
7584 goto done;
7585 }
7586 }
7587
7588 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
7589 ui->refcount = 0;
7590done:
7591 mtx_unlock(&t4_uld_list_lock);
7592 return (rc);
7593}
7594
7595int
7596t4_unregister_uld(struct uld_info *ui)
7597{
7598 int rc = EINVAL;
7599 struct uld_info *u;
7600
7601 mtx_lock(&t4_uld_list_lock);
7602
7603 SLIST_FOREACH(u, &t4_uld_list, link) {
7604 if (u == ui) {
7605 if (ui->refcount > 0) {
7606 rc = EBUSY;
7607 goto done;
7608 }
7609
7610 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
7611 rc = 0;
7612 goto done;
7613 }
7614 }
7615done:
7616 mtx_unlock(&t4_uld_list_lock);
7617 return (rc);
7618}
7619
7620int
7621t4_activate_uld(struct adapter *sc, int id)
7622{
7623 int rc = EAGAIN;
7624 struct uld_info *ui;
7625
7626 ASSERT_SYNCHRONIZED_OP(sc);
7627
7628 mtx_lock(&t4_uld_list_lock);
7629
7630 SLIST_FOREACH(ui, &t4_uld_list, link) {
7631 if (ui->uld_id == id) {
7632 rc = ui->activate(sc);
7633 if (rc == 0)
7634 ui->refcount++;
7635 goto done;
7636 }
7637 }
7638done:
7639 mtx_unlock(&t4_uld_list_lock);
7640
7641 return (rc);
7642}
7643
7644int
7645t4_deactivate_uld(struct adapter *sc, int id)
7646{
7647 int rc = EINVAL;
7648 struct uld_info *ui;
7649
7650 ASSERT_SYNCHRONIZED_OP(sc);
7651
7652 mtx_lock(&t4_uld_list_lock);
7653
7654 SLIST_FOREACH(ui, &t4_uld_list, link) {
7655 if (ui->uld_id == id) {
7656 rc = ui->deactivate(sc);
7657 if (rc == 0)
7658 ui->refcount--;
7659 goto done;
7660 }
7661 }
7662done:
7663 mtx_unlock(&t4_uld_list_lock);
7664
7665 return (rc);
7666}
7667#endif
7668
7669/*
7670 * Come up with reasonable defaults for some of the tunables, provided they're
7671 * not set by the user (in which case we'll use the values as is).
7672 */
7673static void
7674tweak_tunables(void)
7675{
7676 int nc = mp_ncpus; /* our snapshot of the number of CPUs */
7677
7678 if (t4_ntxq10g < 1)
7679 t4_ntxq10g = min(nc, NTXQ_10G);
7680
7681 if (t4_ntxq1g < 1)
7682 t4_ntxq1g = min(nc, NTXQ_1G);
7683
7684 if (t4_nrxq10g < 1)
7685 t4_nrxq10g = min(nc, NRXQ_10G);
7686
7687 if (t4_nrxq1g < 1)
7688 t4_nrxq1g = min(nc, NRXQ_1G);
7689
7690#ifdef TCP_OFFLOAD
7691 if (t4_nofldtxq10g < 1)
7692 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
7693
7694 if (t4_nofldtxq1g < 1)
7695 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
7696
7697 if (t4_nofldrxq10g < 1)
7698 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
7699
7700 if (t4_nofldrxq1g < 1)
7701 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
7702
7703 if (t4_toecaps_allowed == -1)
7704 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
7705#else
7706 if (t4_toecaps_allowed == -1)
7707 t4_toecaps_allowed = 0;
7708#endif
7709
7710 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
7711 t4_tmr_idx_10g = TMR_IDX_10G;
7712
7713 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS)
7714 t4_pktc_idx_10g = PKTC_IDX_10G;
7715
7716 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS)
7717 t4_tmr_idx_1g = TMR_IDX_1G;
7718
7719 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS)
7720 t4_pktc_idx_1g = PKTC_IDX_1G;
7721
7722 if (t4_qsize_txq < 128)
7723 t4_qsize_txq = 128;
7724
7725 if (t4_qsize_rxq < 128)
7726 t4_qsize_rxq = 128;
7727 while (t4_qsize_rxq & 7)
7728 t4_qsize_rxq++;
7729
7730 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
7731}
7732
7733static int
7734mod_event(module_t mod, int cmd, void *arg)
7735{
7736 int rc = 0;
7737 static int loaded = 0;
7738
7739 switch (cmd) {
7740 case MOD_LOAD:
7741 if (atomic_fetchadd_int(&loaded, 1))
7742 break;
7743 t4_sge_modload();
7744 mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
7745 SLIST_INIT(&t4_list);
7746#ifdef TCP_OFFLOAD
7747 mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
7748 SLIST_INIT(&t4_uld_list);
7749#endif
7750 t4_tracer_modload();
7751 tweak_tunables();
7752 break;
7753
7754 case MOD_UNLOAD:
7755 if (atomic_fetchadd_int(&loaded, -1) > 1)
7756 break;
7757 t4_tracer_modunload();
7758#ifdef TCP_OFFLOAD
7759 mtx_lock(&t4_uld_list_lock);
7760 if (!SLIST_EMPTY(&t4_uld_list)) {
7761 rc = EBUSY;
7762 mtx_unlock(&t4_uld_list_lock);
7763 break;
7764 }
7765 mtx_unlock(&t4_uld_list_lock);
7766 mtx_destroy(&t4_uld_list_lock);
7767#endif
7768 mtx_lock(&t4_list_lock);
7769 if (!SLIST_EMPTY(&t4_list)) {
7770 rc = EBUSY;
7771 mtx_unlock(&t4_list_lock);
7772 break;
7773 }
7774 mtx_unlock(&t4_list_lock);
7775 mtx_destroy(&t4_list_lock);
7776 break;
7777 }
7778
7779 return (rc);
7780}
7781
7782static devclass_t t4_devclass, t5_devclass;
7783static devclass_t cxgbe_devclass, cxl_devclass;
7784
7785DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
7786MODULE_VERSION(t4nex, 1);
7787MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
7788
7789DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
7790MODULE_VERSION(t5nex, 1);
7791MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
7792
7793DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
7794MODULE_VERSION(cxgbe, 1);
7795
7796DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
7797MODULE_VERSION(cxl, 1);