t4_main.c revision 318850
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/t4_main.c 318850 2017-05-25 01:40:40Z np $"); 30 31#include "opt_ddb.h" 32#include "opt_inet.h" 33#include "opt_inet6.h" 34#include "opt_rss.h" 35 36#include <sys/param.h> 37#include <sys/conf.h> 38#include <sys/priv.h> 39#include <sys/kernel.h> 40#include <sys/bus.h> 41#include <sys/module.h> 42#include <sys/malloc.h> 43#include <sys/queue.h> 44#include <sys/taskqueue.h> 45#include <sys/pciio.h> 46#include <dev/pci/pcireg.h> 47#include <dev/pci/pcivar.h> 48#include <dev/pci/pci_private.h> 49#include <sys/firmware.h> 50#include <sys/sbuf.h> 51#include <sys/smp.h> 52#include <sys/socket.h> 53#include <sys/sockio.h> 54#include <sys/sysctl.h> 55#include <net/ethernet.h> 56#include <net/if.h> 57#include <net/if_types.h> 58#include <net/if_dl.h> 59#include <net/if_vlan_var.h> 60#ifdef RSS 61#include <net/rss_config.h> 62#endif 63#if defined(__i386__) || defined(__amd64__) 64#include <vm/vm.h> 65#include <vm/pmap.h> 66#endif 67#ifdef DDB 68#include <ddb/ddb.h> 69#include <ddb/db_lex.h> 70#endif 71 72#include "common/common.h" 73#include "common/t4_msg.h" 74#include "common/t4_regs.h" 75#include "common/t4_regs_values.h" 76#include "t4_ioctl.h" 77#include "t4_l2t.h" 78#include "t4_mp_ring.h" 79#include "t4_if.h" 80 81/* T4 bus driver interface */ 82static int t4_probe(device_t); 83static int t4_attach(device_t); 84static int t4_detach(device_t); 85static int t4_ready(device_t); 86static int t4_read_port_device(device_t, int, device_t *); 87static device_method_t t4_methods[] = { 88 DEVMETHOD(device_probe, t4_probe), 89 DEVMETHOD(device_attach, t4_attach), 90 DEVMETHOD(device_detach, t4_detach), 91 92 DEVMETHOD(t4_is_main_ready, t4_ready), 93 DEVMETHOD(t4_read_port_device, t4_read_port_device), 94 95 DEVMETHOD_END 96}; 97static driver_t t4_driver = { 98 "t4nex", 99 t4_methods, 100 sizeof(struct adapter) 101}; 102 103 104/* T4 port (cxgbe) interface */ 105static int cxgbe_probe(device_t); 106static int cxgbe_attach(device_t); 107static int cxgbe_detach(device_t); 108device_method_t cxgbe_methods[] = { 109 DEVMETHOD(device_probe, cxgbe_probe), 110 DEVMETHOD(device_attach, cxgbe_attach), 111 DEVMETHOD(device_detach, cxgbe_detach), 112 { 0, 0 } 113}; 114static driver_t cxgbe_driver = { 115 "cxgbe", 116 cxgbe_methods, 117 sizeof(struct port_info) 118}; 119 120/* T4 VI (vcxgbe) interface */ 121static int vcxgbe_probe(device_t); 122static int vcxgbe_attach(device_t); 123static int vcxgbe_detach(device_t); 124static device_method_t vcxgbe_methods[] = { 125 DEVMETHOD(device_probe, vcxgbe_probe), 126 DEVMETHOD(device_attach, vcxgbe_attach), 127 DEVMETHOD(device_detach, vcxgbe_detach), 128 { 0, 0 } 129}; 130static driver_t vcxgbe_driver = { 131 "vcxgbe", 132 vcxgbe_methods, 133 sizeof(struct vi_info) 134}; 135 136static d_ioctl_t t4_ioctl; 137 138static struct cdevsw t4_cdevsw = { 139 .d_version = D_VERSION, 140 .d_ioctl = t4_ioctl, 141 .d_name = "t4nex", 142}; 143 144/* T5 bus driver interface */ 145static int t5_probe(device_t); 146static device_method_t t5_methods[] = { 147 DEVMETHOD(device_probe, t5_probe), 148 DEVMETHOD(device_attach, t4_attach), 149 DEVMETHOD(device_detach, t4_detach), 150 151 DEVMETHOD(t4_is_main_ready, t4_ready), 152 DEVMETHOD(t4_read_port_device, t4_read_port_device), 153 154 DEVMETHOD_END 155}; 156static driver_t t5_driver = { 157 "t5nex", 158 t5_methods, 159 sizeof(struct adapter) 160}; 161 162 163/* T5 port (cxl) interface */ 164static driver_t cxl_driver = { 165 "cxl", 166 cxgbe_methods, 167 sizeof(struct port_info) 168}; 169 170/* T5 VI (vcxl) interface */ 171static driver_t vcxl_driver = { 172 "vcxl", 173 vcxgbe_methods, 174 sizeof(struct vi_info) 175}; 176 177/* T6 bus driver interface */ 178static int t6_probe(device_t); 179static device_method_t t6_methods[] = { 180 DEVMETHOD(device_probe, t6_probe), 181 DEVMETHOD(device_attach, t4_attach), 182 DEVMETHOD(device_detach, t4_detach), 183 184 DEVMETHOD(t4_is_main_ready, t4_ready), 185 DEVMETHOD(t4_read_port_device, t4_read_port_device), 186 187 DEVMETHOD_END 188}; 189static driver_t t6_driver = { 190 "t6nex", 191 t6_methods, 192 sizeof(struct adapter) 193}; 194 195 196/* T6 port (cc) interface */ 197static driver_t cc_driver = { 198 "cc", 199 cxgbe_methods, 200 sizeof(struct port_info) 201}; 202 203/* T6 VI (vcc) interface */ 204static driver_t vcc_driver = { 205 "vcc", 206 vcxgbe_methods, 207 sizeof(struct vi_info) 208}; 209 210/* ifnet + media interface */ 211static void cxgbe_init(void *); 212static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 213static int cxgbe_transmit(struct ifnet *, struct mbuf *); 214static void cxgbe_qflush(struct ifnet *); 215static int cxgbe_media_change(struct ifnet *); 216static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 217 218MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 219 220/* 221 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 222 * then ADAPTER_LOCK, then t4_uld_list_lock. 223 */ 224static struct sx t4_list_lock; 225SLIST_HEAD(, adapter) t4_list; 226#ifdef TCP_OFFLOAD 227static struct sx t4_uld_list_lock; 228SLIST_HEAD(, uld_info) t4_uld_list; 229#endif 230 231/* 232 * Tunables. See tweak_tunables() too. 233 * 234 * Each tunable is set to a default value here if it's known at compile-time. 235 * Otherwise it is set to -n as an indication to tweak_tunables() that it should 236 * provide a reasonable default (upto n) when the driver is loaded. 237 * 238 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 239 * T5 are under hw.cxl. 240 */ 241 242/* 243 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 244 */ 245#define NTXQ_10G 16 246int t4_ntxq10g = -NTXQ_10G; 247TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 248 249#define NRXQ_10G 8 250int t4_nrxq10g = -NRXQ_10G; 251TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 252 253#define NTXQ_1G 4 254int t4_ntxq1g = -NTXQ_1G; 255TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 256 257#define NRXQ_1G 2 258int t4_nrxq1g = -NRXQ_1G; 259TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 260 261#define NTXQ_VI 1 262static int t4_ntxq_vi = -NTXQ_VI; 263TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 264 265#define NRXQ_VI 1 266static int t4_nrxq_vi = -NRXQ_VI; 267TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 268 269static int t4_rsrv_noflowq = 0; 270TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 271 272#ifdef TCP_OFFLOAD 273#define NOFLDTXQ_10G 8 274static int t4_nofldtxq10g = -NOFLDTXQ_10G; 275TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 276 277#define NOFLDRXQ_10G 2 278static int t4_nofldrxq10g = -NOFLDRXQ_10G; 279TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 280 281#define NOFLDTXQ_1G 2 282static int t4_nofldtxq1g = -NOFLDTXQ_1G; 283TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 284 285#define NOFLDRXQ_1G 1 286static int t4_nofldrxq1g = -NOFLDRXQ_1G; 287TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 288 289#define NOFLDTXQ_VI 1 290static int t4_nofldtxq_vi = -NOFLDTXQ_VI; 291TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 292 293#define NOFLDRXQ_VI 1 294static int t4_nofldrxq_vi = -NOFLDRXQ_VI; 295TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 296#endif 297 298#ifdef DEV_NETMAP 299#define NNMTXQ_VI 2 300static int t4_nnmtxq_vi = -NNMTXQ_VI; 301TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 302 303#define NNMRXQ_VI 2 304static int t4_nnmrxq_vi = -NNMRXQ_VI; 305TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 306#endif 307 308/* 309 * Holdoff parameters for 10G and 1G ports. 310 */ 311#define TMR_IDX_10G 1 312int t4_tmr_idx_10g = TMR_IDX_10G; 313TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 314 315#define PKTC_IDX_10G (-1) 316int t4_pktc_idx_10g = PKTC_IDX_10G; 317TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 318 319#define TMR_IDX_1G 1 320int t4_tmr_idx_1g = TMR_IDX_1G; 321TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 322 323#define PKTC_IDX_1G (-1) 324int t4_pktc_idx_1g = PKTC_IDX_1G; 325TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 326 327/* 328 * Size (# of entries) of each tx and rx queue. 329 */ 330unsigned int t4_qsize_txq = TX_EQ_QSIZE; 331TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 332 333unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 334TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 335 336/* 337 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 338 */ 339int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 340TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 341 342/* 343 * Configuration file. 344 */ 345#define DEFAULT_CF "default" 346#define FLASH_CF "flash" 347#define UWIRE_CF "uwire" 348#define FPGA_CF "fpga" 349static char t4_cfg_file[32] = DEFAULT_CF; 350TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 351 352/* 353 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 354 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 355 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 356 * mark or when signalled to do so, 0 to never emit PAUSE. 357 */ 358static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 359TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 360 361/* 362 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS, 363 * FEC_RESERVED respectively). 364 * -1 to run with the firmware default. 365 * 0 to disable FEC. 366 */ 367static int t4_fec = -1; 368TUNABLE_INT("hw.cxgbe.fec", &t4_fec); 369 370/* 371 * Link autonegotiation. 372 * -1 to run with the firmware default. 373 * 0 to disable. 374 * 1 to enable. 375 */ 376static int t4_autoneg = -1; 377TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg); 378 379/* 380 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 381 * encouraged respectively). 382 */ 383static unsigned int t4_fw_install = 1; 384TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 385 386/* 387 * ASIC features that will be used. Disable the ones you don't want so that the 388 * chip resources aren't wasted on features that will not be used. 389 */ 390static int t4_nbmcaps_allowed = 0; 391TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 392 393static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 394TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 395 396static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 397 FW_CAPS_CONFIG_SWITCH_EGRESS; 398TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 399 400static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 401TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 402 403static int t4_toecaps_allowed = -1; 404TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 405 406static int t4_rdmacaps_allowed = -1; 407TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 408 409static int t4_cryptocaps_allowed = 0; 410TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed); 411 412static int t4_iscsicaps_allowed = -1; 413TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 414 415static int t4_fcoecaps_allowed = 0; 416TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 417 418static int t5_write_combine = 0; 419TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 420 421static int t4_num_vis = 1; 422TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 423 424/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 425static int vi_mac_funcs[] = { 426 FW_VI_FUNC_OFLD, 427 FW_VI_FUNC_IWARP, 428 FW_VI_FUNC_OPENISCSI, 429 FW_VI_FUNC_OPENFCOE, 430 FW_VI_FUNC_FOISCSI, 431 FW_VI_FUNC_FOFCOE, 432}; 433 434struct intrs_and_queues { 435 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 436 uint16_t nirq; /* Total # of vectors */ 437 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 438 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 439 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 440 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 441 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 442 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 443 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 444 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 445 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 446 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 447 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 448 449 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 450 uint16_t ntxq_vi; /* # of NIC txq's */ 451 uint16_t nrxq_vi; /* # of NIC rxq's */ 452 uint16_t nofldtxq_vi; /* # of TOE txq's */ 453 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 454 uint16_t nnmtxq_vi; /* # of netmap txq's */ 455 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 456}; 457 458struct filter_entry { 459 uint32_t valid:1; /* filter allocated and valid */ 460 uint32_t locked:1; /* filter is administratively locked */ 461 uint32_t pending:1; /* filter action is pending firmware reply */ 462 uint32_t smtidx:8; /* Source MAC Table index for smac */ 463 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 464 465 struct t4_filter_specification fs; 466}; 467 468static void setup_memwin(struct adapter *); 469static void position_memwin(struct adapter *, int, uint32_t); 470static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 471static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 472 int); 473static inline int write_via_memwin(struct adapter *, int, uint32_t, 474 const uint32_t *, int); 475static int validate_mem_range(struct adapter *, uint32_t, int); 476static int fwmtype_to_hwmtype(int); 477static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 478 uint32_t *); 479static int fixup_devlog_params(struct adapter *); 480static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 481 struct intrs_and_queues *); 482static int prep_firmware(struct adapter *); 483static int partition_resources(struct adapter *, const struct firmware *, 484 const char *); 485static int get_params__pre_init(struct adapter *); 486static int get_params__post_init(struct adapter *); 487static int set_params__post_init(struct adapter *); 488static void t4_set_desc(struct adapter *); 489static void build_medialist(struct port_info *, struct ifmedia *); 490static int cxgbe_init_synchronized(struct vi_info *); 491static int cxgbe_uninit_synchronized(struct vi_info *); 492static void quiesce_txq(struct adapter *, struct sge_txq *); 493static void quiesce_wrq(struct adapter *, struct sge_wrq *); 494static void quiesce_iq(struct adapter *, struct sge_iq *); 495static void quiesce_fl(struct adapter *, struct sge_fl *); 496static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 497 driver_intr_t *, void *, char *); 498static int t4_free_irq(struct adapter *, struct irq *); 499static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 500static void vi_refresh_stats(struct adapter *, struct vi_info *); 501static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 502static void cxgbe_tick(void *); 503static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 504static void cxgbe_sysctls(struct port_info *); 505static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 506static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 507static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 508static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 509static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 510static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 511static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 512static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 513static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 514static int sysctl_fec(SYSCTL_HANDLER_ARGS); 515static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); 516static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 517static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 518#ifdef SBUF_DRAIN 519static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 520static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 521static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 522static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 523static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 524static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 525static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 526static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 527static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 528static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 529static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 530static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 531static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 532static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 533static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 534static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 535static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 536static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 537static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 538static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 539static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 540static int sysctl_tids(SYSCTL_HANDLER_ARGS); 541static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 542static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 543static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 544static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 545static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 546static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 547static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 548#endif 549#ifdef TCP_OFFLOAD 550static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 551static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 552static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 553#endif 554static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 555static uint32_t mode_to_fconf(uint32_t); 556static uint32_t mode_to_iconf(uint32_t); 557static int check_fspec_against_fconf_iconf(struct adapter *, 558 struct t4_filter_specification *); 559static int get_filter_mode(struct adapter *, uint32_t *); 560static int set_filter_mode(struct adapter *, uint32_t); 561static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 562static int get_filter(struct adapter *, struct t4_filter *); 563static int set_filter(struct adapter *, struct t4_filter *); 564static int del_filter(struct adapter *, struct t4_filter *); 565static void clear_filter(struct filter_entry *); 566static int set_filter_wr(struct adapter *, int); 567static int del_filter_wr(struct adapter *, int); 568static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 569 struct mbuf *); 570static int get_sge_context(struct adapter *, struct t4_sge_context *); 571static int load_fw(struct adapter *, struct t4_data *); 572static int load_cfg(struct adapter *, struct t4_data *); 573static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 574static int read_i2c(struct adapter *, struct t4_i2c_data *); 575#ifdef TCP_OFFLOAD 576static int toe_capability(struct vi_info *, int); 577#endif 578static int mod_event(module_t, int, void *); 579static int notify_siblings(device_t, int); 580 581struct { 582 uint16_t device; 583 char *desc; 584} t4_pciids[] = { 585 {0xa000, "Chelsio Terminator 4 FPGA"}, 586 {0x4400, "Chelsio T440-dbg"}, 587 {0x4401, "Chelsio T420-CR"}, 588 {0x4402, "Chelsio T422-CR"}, 589 {0x4403, "Chelsio T440-CR"}, 590 {0x4404, "Chelsio T420-BCH"}, 591 {0x4405, "Chelsio T440-BCH"}, 592 {0x4406, "Chelsio T440-CH"}, 593 {0x4407, "Chelsio T420-SO"}, 594 {0x4408, "Chelsio T420-CX"}, 595 {0x4409, "Chelsio T420-BT"}, 596 {0x440a, "Chelsio T404-BT"}, 597 {0x440e, "Chelsio T440-LP-CR"}, 598}, t5_pciids[] = { 599 {0xb000, "Chelsio Terminator 5 FPGA"}, 600 {0x5400, "Chelsio T580-dbg"}, 601 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 602 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 603 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 604 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 605 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 606 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 607 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 608 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 609 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 610 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 611 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 612 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 613 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 614#ifdef notyet 615 {0x5404, "Chelsio T520-BCH"}, 616 {0x5405, "Chelsio T540-BCH"}, 617 {0x5406, "Chelsio T540-CH"}, 618 {0x5408, "Chelsio T520-CX"}, 619 {0x540b, "Chelsio B520-SR"}, 620 {0x540c, "Chelsio B504-BT"}, 621 {0x540f, "Chelsio Amsterdam"}, 622 {0x5413, "Chelsio T580-CHR"}, 623#endif 624}, t6_pciids[] = { 625 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 626 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ 627 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 628 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 629 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ 630 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ 631 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */ 632 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */ 633 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 634 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 635 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ 636 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 637 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ 638 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ 639 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */ 640 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ 641 642 /* Custom */ 643 {0x6480, "Chelsio T6225 80"}, 644 {0x6481, "Chelsio T62100 81"}, 645}; 646 647#ifdef TCP_OFFLOAD 648/* 649 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 650 * exactly the same for both rxq and ofld_rxq. 651 */ 652CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 653CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 654#endif 655CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 656 657static int 658t4_probe(device_t dev) 659{ 660 int i; 661 uint16_t v = pci_get_vendor(dev); 662 uint16_t d = pci_get_device(dev); 663 uint8_t f = pci_get_function(dev); 664 665 if (v != PCI_VENDOR_ID_CHELSIO) 666 return (ENXIO); 667 668 /* Attach only to PF0 of the FPGA */ 669 if (d == 0xa000 && f != 0) 670 return (ENXIO); 671 672 for (i = 0; i < nitems(t4_pciids); i++) { 673 if (d == t4_pciids[i].device) { 674 device_set_desc(dev, t4_pciids[i].desc); 675 return (BUS_PROBE_DEFAULT); 676 } 677 } 678 679 return (ENXIO); 680} 681 682static int 683t5_probe(device_t dev) 684{ 685 int i; 686 uint16_t v = pci_get_vendor(dev); 687 uint16_t d = pci_get_device(dev); 688 uint8_t f = pci_get_function(dev); 689 690 if (v != PCI_VENDOR_ID_CHELSIO) 691 return (ENXIO); 692 693 /* Attach only to PF0 of the FPGA */ 694 if (d == 0xb000 && f != 0) 695 return (ENXIO); 696 697 for (i = 0; i < nitems(t5_pciids); i++) { 698 if (d == t5_pciids[i].device) { 699 device_set_desc(dev, t5_pciids[i].desc); 700 return (BUS_PROBE_DEFAULT); 701 } 702 } 703 704 return (ENXIO); 705} 706 707static int 708t6_probe(device_t dev) 709{ 710 int i; 711 uint16_t v = pci_get_vendor(dev); 712 uint16_t d = pci_get_device(dev); 713 714 if (v != PCI_VENDOR_ID_CHELSIO) 715 return (ENXIO); 716 717 for (i = 0; i < nitems(t6_pciids); i++) { 718 if (d == t6_pciids[i].device) { 719 device_set_desc(dev, t6_pciids[i].desc); 720 return (BUS_PROBE_DEFAULT); 721 } 722 } 723 724 return (ENXIO); 725} 726 727static void 728t5_attribute_workaround(device_t dev) 729{ 730 device_t root_port; 731 uint32_t v; 732 733 /* 734 * The T5 chips do not properly echo the No Snoop and Relaxed 735 * Ordering attributes when replying to a TLP from a Root 736 * Port. As a workaround, find the parent Root Port and 737 * disable No Snoop and Relaxed Ordering. Note that this 738 * affects all devices under this root port. 739 */ 740 root_port = pci_find_pcie_root_port(dev); 741 if (root_port == NULL) { 742 device_printf(dev, "Unable to find parent root port\n"); 743 return; 744 } 745 746 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 747 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 748 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 749 0) 750 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 751 device_get_nameunit(root_port)); 752} 753 754static const struct devnames devnames[] = { 755 { 756 .nexus_name = "t4nex", 757 .ifnet_name = "cxgbe", 758 .vi_ifnet_name = "vcxgbe", 759 .pf03_drv_name = "t4iov", 760 .vf_nexus_name = "t4vf", 761 .vf_ifnet_name = "cxgbev" 762 }, { 763 .nexus_name = "t5nex", 764 .ifnet_name = "cxl", 765 .vi_ifnet_name = "vcxl", 766 .pf03_drv_name = "t5iov", 767 .vf_nexus_name = "t5vf", 768 .vf_ifnet_name = "cxlv" 769 }, { 770 .nexus_name = "t6nex", 771 .ifnet_name = "cc", 772 .vi_ifnet_name = "vcc", 773 .pf03_drv_name = "t6iov", 774 .vf_nexus_name = "t6vf", 775 .vf_ifnet_name = "ccv" 776 } 777}; 778 779void 780t4_init_devnames(struct adapter *sc) 781{ 782 int id; 783 784 id = chip_id(sc); 785 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 786 sc->names = &devnames[id - CHELSIO_T4]; 787 else { 788 device_printf(sc->dev, "chip id %d is not supported.\n", id); 789 sc->names = NULL; 790 } 791} 792 793static int 794t4_attach(device_t dev) 795{ 796 struct adapter *sc; 797 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 798 struct make_dev_args mda; 799 struct intrs_and_queues iaq; 800 struct sge *s; 801 uint8_t *buf; 802#ifdef TCP_OFFLOAD 803 int ofld_rqidx, ofld_tqidx; 804#endif 805#ifdef DEV_NETMAP 806 int nm_rqidx, nm_tqidx; 807#endif 808 int num_vis; 809 810 sc = device_get_softc(dev); 811 sc->dev = dev; 812 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 813 814 if ((pci_get_device(dev) & 0xff00) == 0x5400) 815 t5_attribute_workaround(dev); 816 pci_enable_busmaster(dev); 817 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 818 uint32_t v; 819 820 pci_set_max_read_req(dev, 4096); 821 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 822 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 823 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 824 825 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 826 } 827 828 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 829 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 830 sc->traceq = -1; 831 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 832 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 833 device_get_nameunit(dev)); 834 835 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 836 device_get_nameunit(dev)); 837 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 838 t4_add_adapter(sc); 839 840 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 841 TAILQ_INIT(&sc->sfl); 842 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 843 844 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 845 846 rc = t4_map_bars_0_and_4(sc); 847 if (rc != 0) 848 goto done; /* error message displayed already */ 849 850 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 851 852 /* Prepare the adapter for operation. */ 853 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 854 rc = -t4_prep_adapter(sc, buf); 855 free(buf, M_CXGBE); 856 if (rc != 0) { 857 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 858 goto done; 859 } 860 861 /* 862 * This is the real PF# to which we're attaching. Works from within PCI 863 * passthrough environments too, where pci_get_function() could return a 864 * different PF# depending on the passthrough configuration. We need to 865 * use the real PF# in all our communication with the firmware. 866 */ 867 j = t4_read_reg(sc, A_PL_WHOAMI); 868 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 869 sc->mbox = sc->pf; 870 871 t4_init_devnames(sc); 872 if (sc->names == NULL) { 873 rc = ENOTSUP; 874 goto done; /* error message displayed already */ 875 } 876 877 /* 878 * Do this really early, with the memory windows set up even before the 879 * character device. The userland tool's register i/o and mem read 880 * will work even in "recovery mode". 881 */ 882 setup_memwin(sc); 883 if (t4_init_devlog_params(sc, 0) == 0) 884 fixup_devlog_params(sc); 885 make_dev_args_init(&mda); 886 mda.mda_devsw = &t4_cdevsw; 887 mda.mda_uid = UID_ROOT; 888 mda.mda_gid = GID_WHEEL; 889 mda.mda_mode = 0600; 890 mda.mda_si_drv1 = sc; 891 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 892 if (rc != 0) 893 device_printf(dev, "failed to create nexus char device: %d.\n", 894 rc); 895 896 /* Go no further if recovery mode has been requested. */ 897 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 898 device_printf(dev, "recovery mode.\n"); 899 goto done; 900 } 901 902#if defined(__i386__) 903 if ((cpu_feature & CPUID_CX8) == 0) { 904 device_printf(dev, "64 bit atomics not available.\n"); 905 rc = ENOTSUP; 906 goto done; 907 } 908#endif 909 910 /* Prepare the firmware for operation */ 911 rc = prep_firmware(sc); 912 if (rc != 0) 913 goto done; /* error message displayed already */ 914 915 rc = get_params__post_init(sc); 916 if (rc != 0) 917 goto done; /* error message displayed already */ 918 919 rc = set_params__post_init(sc); 920 if (rc != 0) 921 goto done; /* error message displayed already */ 922 923 rc = t4_map_bar_2(sc); 924 if (rc != 0) 925 goto done; /* error message displayed already */ 926 927 rc = t4_create_dma_tag(sc); 928 if (rc != 0) 929 goto done; /* error message displayed already */ 930 931 /* 932 * Number of VIs to create per-port. The first VI is the "main" regular 933 * VI for the port. The rest are additional virtual interfaces on the 934 * same physical port. Note that the main VI does not have native 935 * netmap support but the extra VIs do. 936 * 937 * Limit the number of VIs per port to the number of available 938 * MAC addresses per port. 939 */ 940 if (t4_num_vis >= 1) 941 num_vis = t4_num_vis; 942 else 943 num_vis = 1; 944 if (num_vis > nitems(vi_mac_funcs)) { 945 num_vis = nitems(vi_mac_funcs); 946 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 947 } 948 949 /* 950 * First pass over all the ports - allocate VIs and initialize some 951 * basic parameters like mac address, port type, etc. We also figure 952 * out whether a port is 10G or 1G and use that information when 953 * calculating how many interrupts to attempt to allocate. 954 */ 955 n10g = n1g = 0; 956 for_each_port(sc, i) { 957 struct port_info *pi; 958 struct link_config *lc; 959 960 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 961 sc->port[i] = pi; 962 963 /* These must be set before t4_port_init */ 964 pi->adapter = sc; 965 pi->port_id = i; 966 /* 967 * XXX: vi[0] is special so we can't delay this allocation until 968 * pi->nvi's final value is known. 969 */ 970 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 971 M_ZERO | M_WAITOK); 972 973 /* 974 * Allocate the "main" VI and initialize parameters 975 * like mac addr. 976 */ 977 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 978 if (rc != 0) { 979 device_printf(dev, "unable to initialize port %d: %d\n", 980 i, rc); 981 free(pi->vi, M_CXGBE); 982 free(pi, M_CXGBE); 983 sc->port[i] = NULL; 984 goto done; 985 } 986 987 lc = &pi->link_cfg; 988 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 989 lc->requested_fc |= t4_pause_settings; 990 if (t4_fec != -1) { 991 lc->requested_fec = t4_fec & 992 G_FW_PORT_CAP_FEC(lc->supported); 993 } 994 if (lc->supported & FW_PORT_CAP_ANEG && t4_autoneg != -1) { 995 lc->autoneg = t4_autoneg ? AUTONEG_ENABLE : 996 AUTONEG_DISABLE; 997 } 998 999 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 1000 if (rc != 0) { 1001 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 1002 free(pi->vi, M_CXGBE); 1003 free(pi, M_CXGBE); 1004 sc->port[i] = NULL; 1005 goto done; 1006 } 1007 1008 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 1009 device_get_nameunit(dev), i); 1010 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 1011 sc->chan_map[pi->tx_chan] = i; 1012 1013 if (port_top_speed(pi) >= 10) { 1014 n10g++; 1015 } else { 1016 n1g++; 1017 } 1018 1019 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1); 1020 if (pi->dev == NULL) { 1021 device_printf(dev, 1022 "failed to add device for port %d.\n", i); 1023 rc = ENXIO; 1024 goto done; 1025 } 1026 pi->vi[0].dev = pi->dev; 1027 device_set_softc(pi->dev, pi); 1028 } 1029 1030 /* 1031 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 1032 */ 1033 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 1034 if (rc != 0) 1035 goto done; /* error message displayed already */ 1036 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 1037 num_vis = 1; 1038 1039 sc->intr_type = iaq.intr_type; 1040 sc->intr_count = iaq.nirq; 1041 1042 s = &sc->sge; 1043 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 1044 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 1045 if (num_vis > 1) { 1046 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 1047 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 1048 } 1049 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1050 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 1051 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1052#ifdef TCP_OFFLOAD 1053 if (is_offload(sc)) { 1054 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 1055 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 1056 if (num_vis > 1) { 1057 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 1058 iaq.nofldrxq_vi; 1059 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 1060 iaq.nofldtxq_vi; 1061 } 1062 s->neq += s->nofldtxq + s->nofldrxq; 1063 s->niq += s->nofldrxq; 1064 1065 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1066 M_CXGBE, M_ZERO | M_WAITOK); 1067 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1068 M_CXGBE, M_ZERO | M_WAITOK); 1069 } 1070#endif 1071#ifdef DEV_NETMAP 1072 if (num_vis > 1) { 1073 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 1074 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 1075 } 1076 s->neq += s->nnmtxq + s->nnmrxq; 1077 s->niq += s->nnmrxq; 1078 1079 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1080 M_CXGBE, M_ZERO | M_WAITOK); 1081 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1082 M_CXGBE, M_ZERO | M_WAITOK); 1083#endif 1084 1085 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 1086 M_ZERO | M_WAITOK); 1087 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1088 M_ZERO | M_WAITOK); 1089 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1090 M_ZERO | M_WAITOK); 1091 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 1092 M_ZERO | M_WAITOK); 1093 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 1094 M_ZERO | M_WAITOK); 1095 1096 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1097 M_ZERO | M_WAITOK); 1098 1099 t4_init_l2t(sc, M_WAITOK); 1100 t4_init_tx_sched(sc); 1101 1102 /* 1103 * Second pass over the ports. This time we know the number of rx and 1104 * tx queues that each port should get. 1105 */ 1106 rqidx = tqidx = 0; 1107#ifdef TCP_OFFLOAD 1108 ofld_rqidx = ofld_tqidx = 0; 1109#endif 1110#ifdef DEV_NETMAP 1111 nm_rqidx = nm_tqidx = 0; 1112#endif 1113 for_each_port(sc, i) { 1114 struct port_info *pi = sc->port[i]; 1115 struct vi_info *vi; 1116 1117 if (pi == NULL) 1118 continue; 1119 1120 pi->nvi = num_vis; 1121 for_each_vi(pi, j, vi) { 1122 vi->pi = pi; 1123 vi->qsize_rxq = t4_qsize_rxq; 1124 vi->qsize_txq = t4_qsize_txq; 1125 1126 vi->first_rxq = rqidx; 1127 vi->first_txq = tqidx; 1128 if (port_top_speed(pi) >= 10) { 1129 vi->tmr_idx = t4_tmr_idx_10g; 1130 vi->pktc_idx = t4_pktc_idx_10g; 1131 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1132 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 1133 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 1134 } else { 1135 vi->tmr_idx = t4_tmr_idx_1g; 1136 vi->pktc_idx = t4_pktc_idx_1g; 1137 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1138 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 1139 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 1140 } 1141 rqidx += vi->nrxq; 1142 tqidx += vi->ntxq; 1143 1144 if (j == 0 && vi->ntxq > 1) 1145 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1146 else 1147 vi->rsrv_noflowq = 0; 1148 1149#ifdef TCP_OFFLOAD 1150 vi->first_ofld_rxq = ofld_rqidx; 1151 vi->first_ofld_txq = ofld_tqidx; 1152 if (port_top_speed(pi) >= 10) { 1153 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1154 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1155 iaq.nofldrxq_vi; 1156 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1157 iaq.nofldtxq_vi; 1158 } else { 1159 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1160 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1161 iaq.nofldrxq_vi; 1162 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1163 iaq.nofldtxq_vi; 1164 } 1165 ofld_rqidx += vi->nofldrxq; 1166 ofld_tqidx += vi->nofldtxq; 1167#endif 1168#ifdef DEV_NETMAP 1169 if (j > 0) { 1170 vi->first_nm_rxq = nm_rqidx; 1171 vi->first_nm_txq = nm_tqidx; 1172 vi->nnmrxq = iaq.nnmrxq_vi; 1173 vi->nnmtxq = iaq.nnmtxq_vi; 1174 nm_rqidx += vi->nnmrxq; 1175 nm_tqidx += vi->nnmtxq; 1176 } 1177#endif 1178 } 1179 } 1180 1181 rc = t4_setup_intr_handlers(sc); 1182 if (rc != 0) { 1183 device_printf(dev, 1184 "failed to setup interrupt handlers: %d\n", rc); 1185 goto done; 1186 } 1187 1188 rc = bus_generic_attach(dev); 1189 if (rc != 0) { 1190 device_printf(dev, 1191 "failed to attach all child ports: %d\n", rc); 1192 goto done; 1193 } 1194 1195 device_printf(dev, 1196 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1197 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1198 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1199 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1200 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1201 1202 t4_set_desc(sc); 1203 1204 notify_siblings(dev, 0); 1205 1206done: 1207 if (rc != 0 && sc->cdev) { 1208 /* cdev was created and so cxgbetool works; recover that way. */ 1209 device_printf(dev, 1210 "error during attach, adapter is now in recovery mode.\n"); 1211 rc = 0; 1212 } 1213 1214 if (rc != 0) 1215 t4_detach_common(dev); 1216 else 1217 t4_sysctls(sc); 1218 1219 return (rc); 1220} 1221 1222static int 1223t4_ready(device_t dev) 1224{ 1225 struct adapter *sc; 1226 1227 sc = device_get_softc(dev); 1228 if (sc->flags & FW_OK) 1229 return (0); 1230 return (ENXIO); 1231} 1232 1233static int 1234t4_read_port_device(device_t dev, int port, device_t *child) 1235{ 1236 struct adapter *sc; 1237 struct port_info *pi; 1238 1239 sc = device_get_softc(dev); 1240 if (port < 0 || port >= MAX_NPORTS) 1241 return (EINVAL); 1242 pi = sc->port[port]; 1243 if (pi == NULL || pi->dev == NULL) 1244 return (ENXIO); 1245 *child = pi->dev; 1246 return (0); 1247} 1248 1249static int 1250notify_siblings(device_t dev, int detaching) 1251{ 1252 device_t sibling; 1253 int error, i; 1254 1255 error = 0; 1256 for (i = 0; i < PCI_FUNCMAX; i++) { 1257 if (i == pci_get_function(dev)) 1258 continue; 1259 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), 1260 pci_get_slot(dev), i); 1261 if (sibling == NULL || !device_is_attached(sibling)) 1262 continue; 1263 if (detaching) 1264 error = T4_DETACH_CHILD(sibling); 1265 else 1266 (void)T4_ATTACH_CHILD(sibling); 1267 if (error) 1268 break; 1269 } 1270 return (error); 1271} 1272 1273/* 1274 * Idempotent 1275 */ 1276static int 1277t4_detach(device_t dev) 1278{ 1279 struct adapter *sc; 1280 int rc; 1281 1282 sc = device_get_softc(dev); 1283 1284 rc = notify_siblings(dev, 1); 1285 if (rc) { 1286 device_printf(dev, 1287 "failed to detach sibling devices: %d\n", rc); 1288 return (rc); 1289 } 1290 1291 return (t4_detach_common(dev)); 1292} 1293 1294int 1295t4_detach_common(device_t dev) 1296{ 1297 struct adapter *sc; 1298 struct port_info *pi; 1299 int i, rc; 1300 1301 sc = device_get_softc(dev); 1302 1303 if (sc->flags & FULL_INIT_DONE) { 1304 if (!(sc->flags & IS_VF)) 1305 t4_intr_disable(sc); 1306 } 1307 1308 if (sc->cdev) { 1309 destroy_dev(sc->cdev); 1310 sc->cdev = NULL; 1311 } 1312 1313 if (device_is_attached(dev)) { 1314 rc = bus_generic_detach(dev); 1315 if (rc) { 1316 device_printf(dev, 1317 "failed to detach child devices: %d\n", rc); 1318 return (rc); 1319 } 1320 } 1321 1322 for (i = 0; i < sc->intr_count; i++) 1323 t4_free_irq(sc, &sc->irq[i]); 1324 1325 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1326 t4_free_tx_sched(sc); 1327 1328 for (i = 0; i < MAX_NPORTS; i++) { 1329 pi = sc->port[i]; 1330 if (pi) { 1331 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1332 if (pi->dev) 1333 device_delete_child(dev, pi->dev); 1334 1335 mtx_destroy(&pi->pi_lock); 1336 free(pi->vi, M_CXGBE); 1337 free(pi, M_CXGBE); 1338 } 1339 } 1340 1341 if (sc->flags & FULL_INIT_DONE) 1342 adapter_full_uninit(sc); 1343 1344 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1345 t4_fw_bye(sc, sc->mbox); 1346 1347 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1348 pci_release_msi(dev); 1349 1350 if (sc->regs_res) 1351 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1352 sc->regs_res); 1353 1354 if (sc->udbs_res) 1355 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1356 sc->udbs_res); 1357 1358 if (sc->msix_res) 1359 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1360 sc->msix_res); 1361 1362 if (sc->l2t) 1363 t4_free_l2t(sc->l2t); 1364 1365#ifdef TCP_OFFLOAD 1366 free(sc->sge.ofld_rxq, M_CXGBE); 1367 free(sc->sge.ofld_txq, M_CXGBE); 1368#endif 1369#ifdef DEV_NETMAP 1370 free(sc->sge.nm_rxq, M_CXGBE); 1371 free(sc->sge.nm_txq, M_CXGBE); 1372#endif 1373 free(sc->irq, M_CXGBE); 1374 free(sc->sge.rxq, M_CXGBE); 1375 free(sc->sge.txq, M_CXGBE); 1376 free(sc->sge.ctrlq, M_CXGBE); 1377 free(sc->sge.iqmap, M_CXGBE); 1378 free(sc->sge.eqmap, M_CXGBE); 1379 free(sc->tids.ftid_tab, M_CXGBE); 1380 t4_destroy_dma_tag(sc); 1381 if (mtx_initialized(&sc->sc_lock)) { 1382 sx_xlock(&t4_list_lock); 1383 SLIST_REMOVE(&t4_list, sc, adapter, link); 1384 sx_xunlock(&t4_list_lock); 1385 mtx_destroy(&sc->sc_lock); 1386 } 1387 1388 callout_drain(&sc->sfl_callout); 1389 if (mtx_initialized(&sc->tids.ftid_lock)) 1390 mtx_destroy(&sc->tids.ftid_lock); 1391 if (mtx_initialized(&sc->sfl_lock)) 1392 mtx_destroy(&sc->sfl_lock); 1393 if (mtx_initialized(&sc->ifp_lock)) 1394 mtx_destroy(&sc->ifp_lock); 1395 if (mtx_initialized(&sc->reg_lock)) 1396 mtx_destroy(&sc->reg_lock); 1397 1398 for (i = 0; i < NUM_MEMWIN; i++) { 1399 struct memwin *mw = &sc->memwin[i]; 1400 1401 if (rw_initialized(&mw->mw_lock)) 1402 rw_destroy(&mw->mw_lock); 1403 } 1404 1405 bzero(sc, sizeof(*sc)); 1406 1407 return (0); 1408} 1409 1410static int 1411cxgbe_probe(device_t dev) 1412{ 1413 char buf[128]; 1414 struct port_info *pi = device_get_softc(dev); 1415 1416 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1417 device_set_desc_copy(dev, buf); 1418 1419 return (BUS_PROBE_DEFAULT); 1420} 1421 1422#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1423 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1424 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1425#define T4_CAP_ENABLE (T4_CAP) 1426 1427static int 1428cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1429{ 1430 struct ifnet *ifp; 1431 struct sbuf *sb; 1432 1433 vi->xact_addr_filt = -1; 1434 callout_init(&vi->tick, 1); 1435 1436 /* Allocate an ifnet and set it up */ 1437 ifp = if_alloc(IFT_ETHER); 1438 if (ifp == NULL) { 1439 device_printf(dev, "Cannot allocate ifnet\n"); 1440 return (ENOMEM); 1441 } 1442 vi->ifp = ifp; 1443 ifp->if_softc = vi; 1444 1445 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1446 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1447 1448 ifp->if_init = cxgbe_init; 1449 ifp->if_ioctl = cxgbe_ioctl; 1450 ifp->if_transmit = cxgbe_transmit; 1451 ifp->if_qflush = cxgbe_qflush; 1452 ifp->if_get_counter = cxgbe_get_counter; 1453 1454 ifp->if_capabilities = T4_CAP; 1455#ifdef TCP_OFFLOAD 1456 if (vi->nofldrxq != 0) 1457 ifp->if_capabilities |= IFCAP_TOE; 1458#endif 1459#ifdef DEV_NETMAP 1460 if (vi->nnmrxq != 0) 1461 ifp->if_capabilities |= IFCAP_NETMAP; 1462#endif 1463 ifp->if_capenable = T4_CAP_ENABLE; 1464 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1465 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1466 1467 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1468 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1469 ifp->if_hw_tsomaxsegsize = 65536; 1470 1471 /* Initialize ifmedia for this VI */ 1472 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1473 cxgbe_media_status); 1474 build_medialist(vi->pi, &vi->media); 1475 1476 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1477 EVENTHANDLER_PRI_ANY); 1478 1479 ether_ifattach(ifp, vi->hw_addr); 1480#ifdef DEV_NETMAP 1481 if (ifp->if_capabilities & IFCAP_NETMAP) 1482 cxgbe_nm_attach(vi); 1483#endif 1484 sb = sbuf_new_auto(); 1485 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1486#ifdef TCP_OFFLOAD 1487 if (ifp->if_capabilities & IFCAP_TOE) 1488 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1489 vi->nofldtxq, vi->nofldrxq); 1490#endif 1491#ifdef DEV_NETMAP 1492 if (ifp->if_capabilities & IFCAP_NETMAP) 1493 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1494 vi->nnmtxq, vi->nnmrxq); 1495#endif 1496 sbuf_finish(sb); 1497 device_printf(dev, "%s\n", sbuf_data(sb)); 1498 sbuf_delete(sb); 1499 1500 vi_sysctls(vi); 1501 1502 return (0); 1503} 1504 1505static int 1506cxgbe_attach(device_t dev) 1507{ 1508 struct port_info *pi = device_get_softc(dev); 1509 struct adapter *sc = pi->adapter; 1510 struct vi_info *vi; 1511 int i, rc; 1512 1513 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1514 1515 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1516 if (rc) 1517 return (rc); 1518 1519 for_each_vi(pi, i, vi) { 1520 if (i == 0) 1521 continue; 1522 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1523 if (vi->dev == NULL) { 1524 device_printf(dev, "failed to add VI %d\n", i); 1525 continue; 1526 } 1527 device_set_softc(vi->dev, vi); 1528 } 1529 1530 cxgbe_sysctls(pi); 1531 1532 bus_generic_attach(dev); 1533 1534 return (0); 1535} 1536 1537static void 1538cxgbe_vi_detach(struct vi_info *vi) 1539{ 1540 struct ifnet *ifp = vi->ifp; 1541 1542 ether_ifdetach(ifp); 1543 1544 if (vi->vlan_c) 1545 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1546 1547 /* Let detach proceed even if these fail. */ 1548#ifdef DEV_NETMAP 1549 if (ifp->if_capabilities & IFCAP_NETMAP) 1550 cxgbe_nm_detach(vi); 1551#endif 1552 cxgbe_uninit_synchronized(vi); 1553 callout_drain(&vi->tick); 1554 vi_full_uninit(vi); 1555 1556 ifmedia_removeall(&vi->media); 1557 if_free(vi->ifp); 1558 vi->ifp = NULL; 1559} 1560 1561static int 1562cxgbe_detach(device_t dev) 1563{ 1564 struct port_info *pi = device_get_softc(dev); 1565 struct adapter *sc = pi->adapter; 1566 int rc; 1567 1568 /* Detach the extra VIs first. */ 1569 rc = bus_generic_detach(dev); 1570 if (rc) 1571 return (rc); 1572 device_delete_children(dev); 1573 1574 doom_vi(sc, &pi->vi[0]); 1575 1576 if (pi->flags & HAS_TRACEQ) { 1577 sc->traceq = -1; /* cloner should not create ifnet */ 1578 t4_tracer_port_detach(sc); 1579 } 1580 1581 cxgbe_vi_detach(&pi->vi[0]); 1582 callout_drain(&pi->tick); 1583 1584 end_synchronized_op(sc, 0); 1585 1586 return (0); 1587} 1588 1589static void 1590cxgbe_init(void *arg) 1591{ 1592 struct vi_info *vi = arg; 1593 struct adapter *sc = vi->pi->adapter; 1594 1595 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1596 return; 1597 cxgbe_init_synchronized(vi); 1598 end_synchronized_op(sc, 0); 1599} 1600 1601static int 1602cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1603{ 1604 int rc = 0, mtu, flags, can_sleep; 1605 struct vi_info *vi = ifp->if_softc; 1606 struct adapter *sc = vi->pi->adapter; 1607 struct ifreq *ifr = (struct ifreq *)data; 1608 uint32_t mask; 1609 1610 switch (cmd) { 1611 case SIOCSIFMTU: 1612 mtu = ifr->ifr_mtu; 1613 if (mtu < ETHERMIN || mtu > MAX_MTU) 1614 return (EINVAL); 1615 1616 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1617 if (rc) 1618 return (rc); 1619 ifp->if_mtu = mtu; 1620 if (vi->flags & VI_INIT_DONE) { 1621 t4_update_fl_bufsize(ifp); 1622 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1623 rc = update_mac_settings(ifp, XGMAC_MTU); 1624 } 1625 end_synchronized_op(sc, 0); 1626 break; 1627 1628 case SIOCSIFFLAGS: 1629 can_sleep = 0; 1630redo_sifflags: 1631 rc = begin_synchronized_op(sc, vi, 1632 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1633 if (rc) 1634 return (rc); 1635 1636 if (ifp->if_flags & IFF_UP) { 1637 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1638 flags = vi->if_flags; 1639 if ((ifp->if_flags ^ flags) & 1640 (IFF_PROMISC | IFF_ALLMULTI)) { 1641 if (can_sleep == 1) { 1642 end_synchronized_op(sc, 0); 1643 can_sleep = 0; 1644 goto redo_sifflags; 1645 } 1646 rc = update_mac_settings(ifp, 1647 XGMAC_PROMISC | XGMAC_ALLMULTI); 1648 } 1649 } else { 1650 if (can_sleep == 0) { 1651 end_synchronized_op(sc, LOCK_HELD); 1652 can_sleep = 1; 1653 goto redo_sifflags; 1654 } 1655 rc = cxgbe_init_synchronized(vi); 1656 } 1657 vi->if_flags = ifp->if_flags; 1658 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1659 if (can_sleep == 0) { 1660 end_synchronized_op(sc, LOCK_HELD); 1661 can_sleep = 1; 1662 goto redo_sifflags; 1663 } 1664 rc = cxgbe_uninit_synchronized(vi); 1665 } 1666 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1667 break; 1668 1669 case SIOCADDMULTI: 1670 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1671 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1672 if (rc) 1673 return (rc); 1674 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1675 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1676 end_synchronized_op(sc, LOCK_HELD); 1677 break; 1678 1679 case SIOCSIFCAP: 1680 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1681 if (rc) 1682 return (rc); 1683 1684 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1685 if (mask & IFCAP_TXCSUM) { 1686 ifp->if_capenable ^= IFCAP_TXCSUM; 1687 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1688 1689 if (IFCAP_TSO4 & ifp->if_capenable && 1690 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1691 ifp->if_capenable &= ~IFCAP_TSO4; 1692 if_printf(ifp, 1693 "tso4 disabled due to -txcsum.\n"); 1694 } 1695 } 1696 if (mask & IFCAP_TXCSUM_IPV6) { 1697 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1698 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1699 1700 if (IFCAP_TSO6 & ifp->if_capenable && 1701 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1702 ifp->if_capenable &= ~IFCAP_TSO6; 1703 if_printf(ifp, 1704 "tso6 disabled due to -txcsum6.\n"); 1705 } 1706 } 1707 if (mask & IFCAP_RXCSUM) 1708 ifp->if_capenable ^= IFCAP_RXCSUM; 1709 if (mask & IFCAP_RXCSUM_IPV6) 1710 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1711 1712 /* 1713 * Note that we leave CSUM_TSO alone (it is always set). The 1714 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1715 * sending a TSO request our way, so it's sufficient to toggle 1716 * IFCAP_TSOx only. 1717 */ 1718 if (mask & IFCAP_TSO4) { 1719 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1720 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1721 if_printf(ifp, "enable txcsum first.\n"); 1722 rc = EAGAIN; 1723 goto fail; 1724 } 1725 ifp->if_capenable ^= IFCAP_TSO4; 1726 } 1727 if (mask & IFCAP_TSO6) { 1728 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1729 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1730 if_printf(ifp, "enable txcsum6 first.\n"); 1731 rc = EAGAIN; 1732 goto fail; 1733 } 1734 ifp->if_capenable ^= IFCAP_TSO6; 1735 } 1736 if (mask & IFCAP_LRO) { 1737#if defined(INET) || defined(INET6) 1738 int i; 1739 struct sge_rxq *rxq; 1740 1741 ifp->if_capenable ^= IFCAP_LRO; 1742 for_each_rxq(vi, i, rxq) { 1743 if (ifp->if_capenable & IFCAP_LRO) 1744 rxq->iq.flags |= IQ_LRO_ENABLED; 1745 else 1746 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1747 } 1748#endif 1749 } 1750#ifdef TCP_OFFLOAD 1751 if (mask & IFCAP_TOE) { 1752 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1753 1754 rc = toe_capability(vi, enable); 1755 if (rc != 0) 1756 goto fail; 1757 1758 ifp->if_capenable ^= mask; 1759 } 1760#endif 1761 if (mask & IFCAP_VLAN_HWTAGGING) { 1762 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1763 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1764 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1765 } 1766 if (mask & IFCAP_VLAN_MTU) { 1767 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1768 1769 /* Need to find out how to disable auto-mtu-inflation */ 1770 } 1771 if (mask & IFCAP_VLAN_HWTSO) 1772 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1773 if (mask & IFCAP_VLAN_HWCSUM) 1774 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1775 1776#ifdef VLAN_CAPABILITIES 1777 VLAN_CAPABILITIES(ifp); 1778#endif 1779fail: 1780 end_synchronized_op(sc, 0); 1781 break; 1782 1783 case SIOCSIFMEDIA: 1784 case SIOCGIFMEDIA: 1785 case SIOCGIFXMEDIA: 1786 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1787 break; 1788 1789 case SIOCGI2C: { 1790 struct ifi2creq i2c; 1791 1792 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1793 if (rc != 0) 1794 break; 1795 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1796 rc = EPERM; 1797 break; 1798 } 1799 if (i2c.len > sizeof(i2c.data)) { 1800 rc = EINVAL; 1801 break; 1802 } 1803 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1804 if (rc) 1805 return (rc); 1806 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1807 i2c.offset, i2c.len, &i2c.data[0]); 1808 end_synchronized_op(sc, 0); 1809 if (rc == 0) 1810 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1811 break; 1812 } 1813 1814 default: 1815 rc = ether_ioctl(ifp, cmd, data); 1816 } 1817 1818 return (rc); 1819} 1820 1821static int 1822cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1823{ 1824 struct vi_info *vi = ifp->if_softc; 1825 struct port_info *pi = vi->pi; 1826 struct adapter *sc = pi->adapter; 1827 struct sge_txq *txq; 1828 void *items[1]; 1829 int rc; 1830 1831 M_ASSERTPKTHDR(m); 1832 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1833 1834 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1835 m_freem(m); 1836 return (ENETDOWN); 1837 } 1838 1839 rc = parse_pkt(sc, &m); 1840 if (__predict_false(rc != 0)) { 1841 MPASS(m == NULL); /* was freed already */ 1842 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1843 return (rc); 1844 } 1845 1846 /* Select a txq. */ 1847 txq = &sc->sge.txq[vi->first_txq]; 1848 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1849 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1850 vi->rsrv_noflowq); 1851 1852 items[0] = m; 1853 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1854 if (__predict_false(rc != 0)) 1855 m_freem(m); 1856 1857 return (rc); 1858} 1859 1860static void 1861cxgbe_qflush(struct ifnet *ifp) 1862{ 1863 struct vi_info *vi = ifp->if_softc; 1864 struct sge_txq *txq; 1865 int i; 1866 1867 /* queues do not exist if !VI_INIT_DONE. */ 1868 if (vi->flags & VI_INIT_DONE) { 1869 for_each_txq(vi, i, txq) { 1870 TXQ_LOCK(txq); 1871 txq->eq.flags &= ~EQ_ENABLED; 1872 TXQ_UNLOCK(txq); 1873 while (!mp_ring_is_idle(txq->r)) { 1874 mp_ring_check_drainage(txq->r, 0); 1875 pause("qflush", 1); 1876 } 1877 } 1878 } 1879 if_qflush(ifp); 1880} 1881 1882static uint64_t 1883vi_get_counter(struct ifnet *ifp, ift_counter c) 1884{ 1885 struct vi_info *vi = ifp->if_softc; 1886 struct fw_vi_stats_vf *s = &vi->stats; 1887 1888 vi_refresh_stats(vi->pi->adapter, vi); 1889 1890 switch (c) { 1891 case IFCOUNTER_IPACKETS: 1892 return (s->rx_bcast_frames + s->rx_mcast_frames + 1893 s->rx_ucast_frames); 1894 case IFCOUNTER_IERRORS: 1895 return (s->rx_err_frames); 1896 case IFCOUNTER_OPACKETS: 1897 return (s->tx_bcast_frames + s->tx_mcast_frames + 1898 s->tx_ucast_frames + s->tx_offload_frames); 1899 case IFCOUNTER_OERRORS: 1900 return (s->tx_drop_frames); 1901 case IFCOUNTER_IBYTES: 1902 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1903 s->rx_ucast_bytes); 1904 case IFCOUNTER_OBYTES: 1905 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1906 s->tx_ucast_bytes + s->tx_offload_bytes); 1907 case IFCOUNTER_IMCASTS: 1908 return (s->rx_mcast_frames); 1909 case IFCOUNTER_OMCASTS: 1910 return (s->tx_mcast_frames); 1911 case IFCOUNTER_OQDROPS: { 1912 uint64_t drops; 1913 1914 drops = 0; 1915 if (vi->flags & VI_INIT_DONE) { 1916 int i; 1917 struct sge_txq *txq; 1918 1919 for_each_txq(vi, i, txq) 1920 drops += counter_u64_fetch(txq->r->drops); 1921 } 1922 1923 return (drops); 1924 1925 } 1926 1927 default: 1928 return (if_get_counter_default(ifp, c)); 1929 } 1930} 1931 1932uint64_t 1933cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1934{ 1935 struct vi_info *vi = ifp->if_softc; 1936 struct port_info *pi = vi->pi; 1937 struct adapter *sc = pi->adapter; 1938 struct port_stats *s = &pi->stats; 1939 1940 if (pi->nvi > 1 || sc->flags & IS_VF) 1941 return (vi_get_counter(ifp, c)); 1942 1943 cxgbe_refresh_stats(sc, pi); 1944 1945 switch (c) { 1946 case IFCOUNTER_IPACKETS: 1947 return (s->rx_frames); 1948 1949 case IFCOUNTER_IERRORS: 1950 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1951 s->rx_fcs_err + s->rx_len_err); 1952 1953 case IFCOUNTER_OPACKETS: 1954 return (s->tx_frames); 1955 1956 case IFCOUNTER_OERRORS: 1957 return (s->tx_error_frames); 1958 1959 case IFCOUNTER_IBYTES: 1960 return (s->rx_octets); 1961 1962 case IFCOUNTER_OBYTES: 1963 return (s->tx_octets); 1964 1965 case IFCOUNTER_IMCASTS: 1966 return (s->rx_mcast_frames); 1967 1968 case IFCOUNTER_OMCASTS: 1969 return (s->tx_mcast_frames); 1970 1971 case IFCOUNTER_IQDROPS: 1972 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1973 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1974 s->rx_trunc3 + pi->tnl_cong_drops); 1975 1976 case IFCOUNTER_OQDROPS: { 1977 uint64_t drops; 1978 1979 drops = s->tx_drop; 1980 if (vi->flags & VI_INIT_DONE) { 1981 int i; 1982 struct sge_txq *txq; 1983 1984 for_each_txq(vi, i, txq) 1985 drops += counter_u64_fetch(txq->r->drops); 1986 } 1987 1988 return (drops); 1989 1990 } 1991 1992 default: 1993 return (if_get_counter_default(ifp, c)); 1994 } 1995} 1996 1997static int 1998cxgbe_media_change(struct ifnet *ifp) 1999{ 2000 struct vi_info *vi = ifp->if_softc; 2001 2002 device_printf(vi->dev, "%s unimplemented.\n", __func__); 2003 2004 return (EOPNOTSUPP); 2005} 2006 2007static void 2008cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2009{ 2010 struct vi_info *vi = ifp->if_softc; 2011 struct port_info *pi = vi->pi; 2012 struct ifmedia_entry *cur; 2013 int speed = pi->link_cfg.speed; 2014 2015 cur = vi->media.ifm_cur; 2016 2017 ifmr->ifm_status = IFM_AVALID; 2018 if (!pi->link_cfg.link_ok) 2019 return; 2020 2021 ifmr->ifm_status |= IFM_ACTIVE; 2022 2023 /* active and current will differ iff current media is autoselect. */ 2024 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 2025 return; 2026 2027 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2028 if (speed == 10000) 2029 ifmr->ifm_active |= IFM_10G_T; 2030 else if (speed == 1000) 2031 ifmr->ifm_active |= IFM_1000_T; 2032 else if (speed == 100) 2033 ifmr->ifm_active |= IFM_100_TX; 2034 else if (speed == 10) 2035 ifmr->ifm_active |= IFM_10_T; 2036 else 2037 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 2038 speed)); 2039} 2040 2041static int 2042vcxgbe_probe(device_t dev) 2043{ 2044 char buf[128]; 2045 struct vi_info *vi = device_get_softc(dev); 2046 2047 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 2048 vi - vi->pi->vi); 2049 device_set_desc_copy(dev, buf); 2050 2051 return (BUS_PROBE_DEFAULT); 2052} 2053 2054static int 2055vcxgbe_attach(device_t dev) 2056{ 2057 struct vi_info *vi; 2058 struct port_info *pi; 2059 struct adapter *sc; 2060 int func, index, rc; 2061 u32 param, val; 2062 2063 vi = device_get_softc(dev); 2064 pi = vi->pi; 2065 sc = pi->adapter; 2066 2067 index = vi - pi->vi; 2068 KASSERT(index < nitems(vi_mac_funcs), 2069 ("%s: VI %s doesn't have a MAC func", __func__, 2070 device_get_nameunit(dev))); 2071 func = vi_mac_funcs[index]; 2072 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 2073 vi->hw_addr, &vi->rss_size, func, 0); 2074 if (rc < 0) { 2075 device_printf(dev, "Failed to allocate virtual interface " 2076 "for port %d: %d\n", pi->port_id, -rc); 2077 return (-rc); 2078 } 2079 vi->viid = rc; 2080 if (chip_id(sc) <= CHELSIO_T5) 2081 vi->smt_idx = (rc & 0x7f) << 1; 2082 else 2083 vi->smt_idx = (rc & 0x7f); 2084 2085 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2086 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 2087 V_FW_PARAMS_PARAM_YZ(vi->viid); 2088 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2089 if (rc) 2090 vi->rss_base = 0xffff; 2091 else { 2092 /* MPASS((val >> 16) == rss_size); */ 2093 vi->rss_base = val & 0xffff; 2094 } 2095 2096 rc = cxgbe_vi_attach(dev, vi); 2097 if (rc) { 2098 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2099 return (rc); 2100 } 2101 return (0); 2102} 2103 2104static int 2105vcxgbe_detach(device_t dev) 2106{ 2107 struct vi_info *vi; 2108 struct adapter *sc; 2109 2110 vi = device_get_softc(dev); 2111 sc = vi->pi->adapter; 2112 2113 doom_vi(sc, vi); 2114 2115 cxgbe_vi_detach(vi); 2116 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2117 2118 end_synchronized_op(sc, 0); 2119 2120 return (0); 2121} 2122 2123void 2124t4_fatal_err(struct adapter *sc) 2125{ 2126 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2127 t4_intr_disable(sc); 2128 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 2129 device_get_nameunit(sc->dev)); 2130} 2131 2132void 2133t4_add_adapter(struct adapter *sc) 2134{ 2135 sx_xlock(&t4_list_lock); 2136 SLIST_INSERT_HEAD(&t4_list, sc, link); 2137 sx_xunlock(&t4_list_lock); 2138} 2139 2140int 2141t4_map_bars_0_and_4(struct adapter *sc) 2142{ 2143 sc->regs_rid = PCIR_BAR(0); 2144 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2145 &sc->regs_rid, RF_ACTIVE); 2146 if (sc->regs_res == NULL) { 2147 device_printf(sc->dev, "cannot map registers.\n"); 2148 return (ENXIO); 2149 } 2150 sc->bt = rman_get_bustag(sc->regs_res); 2151 sc->bh = rman_get_bushandle(sc->regs_res); 2152 sc->mmio_len = rman_get_size(sc->regs_res); 2153 setbit(&sc->doorbells, DOORBELL_KDB); 2154 2155 sc->msix_rid = PCIR_BAR(4); 2156 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2157 &sc->msix_rid, RF_ACTIVE); 2158 if (sc->msix_res == NULL) { 2159 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 2160 return (ENXIO); 2161 } 2162 2163 return (0); 2164} 2165 2166int 2167t4_map_bar_2(struct adapter *sc) 2168{ 2169 2170 /* 2171 * T4: only iWARP driver uses the userspace doorbells. There is no need 2172 * to map it if RDMA is disabled. 2173 */ 2174 if (is_t4(sc) && sc->rdmacaps == 0) 2175 return (0); 2176 2177 sc->udbs_rid = PCIR_BAR(2); 2178 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2179 &sc->udbs_rid, RF_ACTIVE); 2180 if (sc->udbs_res == NULL) { 2181 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 2182 return (ENXIO); 2183 } 2184 sc->udbs_base = rman_get_virtual(sc->udbs_res); 2185 2186 if (chip_id(sc) >= CHELSIO_T5) { 2187 setbit(&sc->doorbells, DOORBELL_UDB); 2188#if defined(__i386__) || defined(__amd64__) 2189 if (t5_write_combine) { 2190 int rc, mode; 2191 2192 /* 2193 * Enable write combining on BAR2. This is the 2194 * userspace doorbell BAR and is split into 128B 2195 * (UDBS_SEG_SIZE) doorbell regions, each associated 2196 * with an egress queue. The first 64B has the doorbell 2197 * and the second 64B can be used to submit a tx work 2198 * request with an implicit doorbell. 2199 */ 2200 2201 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 2202 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 2203 if (rc == 0) { 2204 clrbit(&sc->doorbells, DOORBELL_UDB); 2205 setbit(&sc->doorbells, DOORBELL_WCWR); 2206 setbit(&sc->doorbells, DOORBELL_UDBWC); 2207 } else { 2208 device_printf(sc->dev, 2209 "couldn't enable write combining: %d\n", 2210 rc); 2211 } 2212 2213 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2214 t4_write_reg(sc, A_SGE_STAT_CFG, 2215 V_STATSOURCE_T5(7) | mode); 2216 } 2217#endif 2218 } 2219 2220 return (0); 2221} 2222 2223struct memwin_init { 2224 uint32_t base; 2225 uint32_t aperture; 2226}; 2227 2228static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2229 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2230 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2231 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2232}; 2233 2234static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2235 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2236 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2237 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2238}; 2239 2240static void 2241setup_memwin(struct adapter *sc) 2242{ 2243 const struct memwin_init *mw_init; 2244 struct memwin *mw; 2245 int i; 2246 uint32_t bar0; 2247 2248 if (is_t4(sc)) { 2249 /* 2250 * Read low 32b of bar0 indirectly via the hardware backdoor 2251 * mechanism. Works from within PCI passthrough environments 2252 * too, where rman_get_start() can return a different value. We 2253 * need to program the T4 memory window decoders with the actual 2254 * addresses that will be coming across the PCIe link. 2255 */ 2256 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2257 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2258 2259 mw_init = &t4_memwin[0]; 2260 } else { 2261 /* T5+ use the relative offset inside the PCIe BAR */ 2262 bar0 = 0; 2263 2264 mw_init = &t5_memwin[0]; 2265 } 2266 2267 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2268 rw_init(&mw->mw_lock, "memory window access"); 2269 mw->mw_base = mw_init->base; 2270 mw->mw_aperture = mw_init->aperture; 2271 mw->mw_curpos = 0; 2272 t4_write_reg(sc, 2273 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2274 (mw->mw_base + bar0) | V_BIR(0) | 2275 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2276 rw_wlock(&mw->mw_lock); 2277 position_memwin(sc, i, 0); 2278 rw_wunlock(&mw->mw_lock); 2279 } 2280 2281 /* flush */ 2282 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2283} 2284 2285/* 2286 * Positions the memory window at the given address in the card's address space. 2287 * There are some alignment requirements and the actual position may be at an 2288 * address prior to the requested address. mw->mw_curpos always has the actual 2289 * position of the window. 2290 */ 2291static void 2292position_memwin(struct adapter *sc, int idx, uint32_t addr) 2293{ 2294 struct memwin *mw; 2295 uint32_t pf; 2296 uint32_t reg; 2297 2298 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2299 mw = &sc->memwin[idx]; 2300 rw_assert(&mw->mw_lock, RA_WLOCKED); 2301 2302 if (is_t4(sc)) { 2303 pf = 0; 2304 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2305 } else { 2306 pf = V_PFNUM(sc->pf); 2307 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2308 } 2309 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2310 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2311 t4_read_reg(sc, reg); /* flush */ 2312} 2313 2314static int 2315rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2316 int len, int rw) 2317{ 2318 struct memwin *mw; 2319 uint32_t mw_end, v; 2320 2321 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2322 2323 /* Memory can only be accessed in naturally aligned 4 byte units */ 2324 if (addr & 3 || len & 3 || len <= 0) 2325 return (EINVAL); 2326 2327 mw = &sc->memwin[idx]; 2328 while (len > 0) { 2329 rw_rlock(&mw->mw_lock); 2330 mw_end = mw->mw_curpos + mw->mw_aperture; 2331 if (addr >= mw_end || addr < mw->mw_curpos) { 2332 /* Will need to reposition the window */ 2333 if (!rw_try_upgrade(&mw->mw_lock)) { 2334 rw_runlock(&mw->mw_lock); 2335 rw_wlock(&mw->mw_lock); 2336 } 2337 rw_assert(&mw->mw_lock, RA_WLOCKED); 2338 position_memwin(sc, idx, addr); 2339 rw_downgrade(&mw->mw_lock); 2340 mw_end = mw->mw_curpos + mw->mw_aperture; 2341 } 2342 rw_assert(&mw->mw_lock, RA_RLOCKED); 2343 while (addr < mw_end && len > 0) { 2344 if (rw == 0) { 2345 v = t4_read_reg(sc, mw->mw_base + addr - 2346 mw->mw_curpos); 2347 *val++ = le32toh(v); 2348 } else { 2349 v = *val++; 2350 t4_write_reg(sc, mw->mw_base + addr - 2351 mw->mw_curpos, htole32(v)); 2352 } 2353 addr += 4; 2354 len -= 4; 2355 } 2356 rw_runlock(&mw->mw_lock); 2357 } 2358 2359 return (0); 2360} 2361 2362static inline int 2363read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2364 int len) 2365{ 2366 2367 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2368} 2369 2370static inline int 2371write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2372 const uint32_t *val, int len) 2373{ 2374 2375 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2376} 2377 2378static int 2379t4_range_cmp(const void *a, const void *b) 2380{ 2381 return ((const struct t4_range *)a)->start - 2382 ((const struct t4_range *)b)->start; 2383} 2384 2385/* 2386 * Verify that the memory range specified by the addr/len pair is valid within 2387 * the card's address space. 2388 */ 2389static int 2390validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2391{ 2392 struct t4_range mem_ranges[4], *r, *next; 2393 uint32_t em, addr_len; 2394 int i, n, remaining; 2395 2396 /* Memory can only be accessed in naturally aligned 4 byte units */ 2397 if (addr & 3 || len & 3 || len <= 0) 2398 return (EINVAL); 2399 2400 /* Enabled memories */ 2401 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2402 2403 r = &mem_ranges[0]; 2404 n = 0; 2405 bzero(r, sizeof(mem_ranges)); 2406 if (em & F_EDRAM0_ENABLE) { 2407 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2408 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2409 if (r->size > 0) { 2410 r->start = G_EDRAM0_BASE(addr_len) << 20; 2411 if (addr >= r->start && 2412 addr + len <= r->start + r->size) 2413 return (0); 2414 r++; 2415 n++; 2416 } 2417 } 2418 if (em & F_EDRAM1_ENABLE) { 2419 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2420 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2421 if (r->size > 0) { 2422 r->start = G_EDRAM1_BASE(addr_len) << 20; 2423 if (addr >= r->start && 2424 addr + len <= r->start + r->size) 2425 return (0); 2426 r++; 2427 n++; 2428 } 2429 } 2430 if (em & F_EXT_MEM_ENABLE) { 2431 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2432 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2433 if (r->size > 0) { 2434 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2435 if (addr >= r->start && 2436 addr + len <= r->start + r->size) 2437 return (0); 2438 r++; 2439 n++; 2440 } 2441 } 2442 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2443 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2444 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2445 if (r->size > 0) { 2446 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2447 if (addr >= r->start && 2448 addr + len <= r->start + r->size) 2449 return (0); 2450 r++; 2451 n++; 2452 } 2453 } 2454 MPASS(n <= nitems(mem_ranges)); 2455 2456 if (n > 1) { 2457 /* Sort and merge the ranges. */ 2458 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2459 2460 /* Start from index 0 and examine the next n - 1 entries. */ 2461 r = &mem_ranges[0]; 2462 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2463 2464 MPASS(r->size > 0); /* r is a valid entry. */ 2465 next = r + 1; 2466 MPASS(next->size > 0); /* and so is the next one. */ 2467 2468 while (r->start + r->size >= next->start) { 2469 /* Merge the next one into the current entry. */ 2470 r->size = max(r->start + r->size, 2471 next->start + next->size) - r->start; 2472 n--; /* One fewer entry in total. */ 2473 if (--remaining == 0) 2474 goto done; /* short circuit */ 2475 next++; 2476 } 2477 if (next != r + 1) { 2478 /* 2479 * Some entries were merged into r and next 2480 * points to the first valid entry that couldn't 2481 * be merged. 2482 */ 2483 MPASS(next->size > 0); /* must be valid */ 2484 memcpy(r + 1, next, remaining * sizeof(*r)); 2485#ifdef INVARIANTS 2486 /* 2487 * This so that the foo->size assertion in the 2488 * next iteration of the loop do the right 2489 * thing for entries that were pulled up and are 2490 * no longer valid. 2491 */ 2492 MPASS(n < nitems(mem_ranges)); 2493 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2494 sizeof(struct t4_range)); 2495#endif 2496 } 2497 } 2498done: 2499 /* Done merging the ranges. */ 2500 MPASS(n > 0); 2501 r = &mem_ranges[0]; 2502 for (i = 0; i < n; i++, r++) { 2503 if (addr >= r->start && 2504 addr + len <= r->start + r->size) 2505 return (0); 2506 } 2507 } 2508 2509 return (EFAULT); 2510} 2511 2512static int 2513fwmtype_to_hwmtype(int mtype) 2514{ 2515 2516 switch (mtype) { 2517 case FW_MEMTYPE_EDC0: 2518 return (MEM_EDC0); 2519 case FW_MEMTYPE_EDC1: 2520 return (MEM_EDC1); 2521 case FW_MEMTYPE_EXTMEM: 2522 return (MEM_MC0); 2523 case FW_MEMTYPE_EXTMEM1: 2524 return (MEM_MC1); 2525 default: 2526 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2527 } 2528} 2529 2530/* 2531 * Verify that the memory range specified by the memtype/offset/len pair is 2532 * valid and lies entirely within the memtype specified. The global address of 2533 * the start of the range is returned in addr. 2534 */ 2535static int 2536validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2537 uint32_t *addr) 2538{ 2539 uint32_t em, addr_len, maddr; 2540 2541 /* Memory can only be accessed in naturally aligned 4 byte units */ 2542 if (off & 3 || len & 3 || len == 0) 2543 return (EINVAL); 2544 2545 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2546 switch (fwmtype_to_hwmtype(mtype)) { 2547 case MEM_EDC0: 2548 if (!(em & F_EDRAM0_ENABLE)) 2549 return (EINVAL); 2550 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2551 maddr = G_EDRAM0_BASE(addr_len) << 20; 2552 break; 2553 case MEM_EDC1: 2554 if (!(em & F_EDRAM1_ENABLE)) 2555 return (EINVAL); 2556 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2557 maddr = G_EDRAM1_BASE(addr_len) << 20; 2558 break; 2559 case MEM_MC: 2560 if (!(em & F_EXT_MEM_ENABLE)) 2561 return (EINVAL); 2562 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2563 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2564 break; 2565 case MEM_MC1: 2566 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2567 return (EINVAL); 2568 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2569 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2570 break; 2571 default: 2572 return (EINVAL); 2573 } 2574 2575 *addr = maddr + off; /* global address */ 2576 return (validate_mem_range(sc, *addr, len)); 2577} 2578 2579static int 2580fixup_devlog_params(struct adapter *sc) 2581{ 2582 struct devlog_params *dparams = &sc->params.devlog; 2583 int rc; 2584 2585 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2586 dparams->size, &dparams->addr); 2587 2588 return (rc); 2589} 2590 2591static int 2592cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2593 struct intrs_and_queues *iaq) 2594{ 2595 int rc, itype, navail, nrxq10g, nrxq1g, n; 2596 int nofldrxq10g = 0, nofldrxq1g = 0; 2597 2598 bzero(iaq, sizeof(*iaq)); 2599 2600 iaq->ntxq10g = t4_ntxq10g; 2601 iaq->ntxq1g = t4_ntxq1g; 2602 iaq->ntxq_vi = t4_ntxq_vi; 2603 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2604 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2605 iaq->nrxq_vi = t4_nrxq_vi; 2606 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2607#ifdef TCP_OFFLOAD 2608 if (is_offload(sc)) { 2609 iaq->nofldtxq10g = t4_nofldtxq10g; 2610 iaq->nofldtxq1g = t4_nofldtxq1g; 2611 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2612 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2613 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2614 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2615 } 2616#endif 2617#ifdef DEV_NETMAP 2618 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2619 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2620#endif 2621 2622 for (itype = INTR_MSIX; itype; itype >>= 1) { 2623 2624 if ((itype & t4_intr_types) == 0) 2625 continue; /* not allowed */ 2626 2627 if (itype == INTR_MSIX) 2628 navail = pci_msix_count(sc->dev); 2629 else if (itype == INTR_MSI) 2630 navail = pci_msi_count(sc->dev); 2631 else 2632 navail = 1; 2633restart: 2634 if (navail == 0) 2635 continue; 2636 2637 iaq->intr_type = itype; 2638 iaq->intr_flags_10g = 0; 2639 iaq->intr_flags_1g = 0; 2640 2641 /* 2642 * Best option: an interrupt vector for errors, one for the 2643 * firmware event queue, and one for every rxq (NIC and TOE) of 2644 * every VI. The VIs that support netmap use the same 2645 * interrupts for the NIC rx queues and the netmap rx queues 2646 * because only one set of queues is active at a time. 2647 */ 2648 iaq->nirq = T4_EXTRA_INTR; 2649 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2650 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2651 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2652 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2653 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2654 if (iaq->nirq <= navail && 2655 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2656 iaq->intr_flags_10g = INTR_ALL; 2657 iaq->intr_flags_1g = INTR_ALL; 2658 goto allocate; 2659 } 2660 2661 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2662 if (num_vis > 1) { 2663 device_printf(sc->dev, "virtual interfaces disabled " 2664 "because num_vis=%u with current settings " 2665 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2666 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2667 "nnmrxq_vi=%u) would need %u interrupts but " 2668 "only %u are available.\n", num_vis, nrxq10g, 2669 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2670 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2671 navail); 2672 num_vis = 1; 2673 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2674 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2675 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2676 goto restart; 2677 } 2678 2679 /* 2680 * Second best option: a vector for errors, one for the firmware 2681 * event queue, and vectors for either all the NIC rx queues or 2682 * all the TOE rx queues. The queues that don't get vectors 2683 * will forward their interrupts to those that do. 2684 */ 2685 iaq->nirq = T4_EXTRA_INTR; 2686 if (nrxq10g >= nofldrxq10g) { 2687 iaq->intr_flags_10g = INTR_RXQ; 2688 iaq->nirq += n10g * nrxq10g; 2689 } else { 2690 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2691 iaq->nirq += n10g * nofldrxq10g; 2692 } 2693 if (nrxq1g >= nofldrxq1g) { 2694 iaq->intr_flags_1g = INTR_RXQ; 2695 iaq->nirq += n1g * nrxq1g; 2696 } else { 2697 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2698 iaq->nirq += n1g * nofldrxq1g; 2699 } 2700 if (iaq->nirq <= navail && 2701 (itype != INTR_MSI || powerof2(iaq->nirq))) 2702 goto allocate; 2703 2704 /* 2705 * Next best option: an interrupt vector for errors, one for the 2706 * firmware event queue, and at least one per main-VI. At this 2707 * point we know we'll have to downsize nrxq and/or nofldrxq to 2708 * fit what's available to us. 2709 */ 2710 iaq->nirq = T4_EXTRA_INTR; 2711 iaq->nirq += n10g + n1g; 2712 if (iaq->nirq <= navail) { 2713 int leftover = navail - iaq->nirq; 2714 2715 if (n10g > 0) { 2716 int target = max(nrxq10g, nofldrxq10g); 2717 2718 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2719 INTR_RXQ : INTR_OFLD_RXQ; 2720 2721 n = 1; 2722 while (n < target && leftover >= n10g) { 2723 leftover -= n10g; 2724 iaq->nirq += n10g; 2725 n++; 2726 } 2727 iaq->nrxq10g = min(n, nrxq10g); 2728#ifdef TCP_OFFLOAD 2729 iaq->nofldrxq10g = min(n, nofldrxq10g); 2730#endif 2731 } 2732 2733 if (n1g > 0) { 2734 int target = max(nrxq1g, nofldrxq1g); 2735 2736 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2737 INTR_RXQ : INTR_OFLD_RXQ; 2738 2739 n = 1; 2740 while (n < target && leftover >= n1g) { 2741 leftover -= n1g; 2742 iaq->nirq += n1g; 2743 n++; 2744 } 2745 iaq->nrxq1g = min(n, nrxq1g); 2746#ifdef TCP_OFFLOAD 2747 iaq->nofldrxq1g = min(n, nofldrxq1g); 2748#endif 2749 } 2750 2751 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2752 goto allocate; 2753 } 2754 2755 /* 2756 * Least desirable option: one interrupt vector for everything. 2757 */ 2758 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2759 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2760#ifdef TCP_OFFLOAD 2761 if (is_offload(sc)) 2762 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2763#endif 2764allocate: 2765 navail = iaq->nirq; 2766 rc = 0; 2767 if (itype == INTR_MSIX) 2768 rc = pci_alloc_msix(sc->dev, &navail); 2769 else if (itype == INTR_MSI) 2770 rc = pci_alloc_msi(sc->dev, &navail); 2771 2772 if (rc == 0) { 2773 if (navail == iaq->nirq) 2774 return (0); 2775 2776 /* 2777 * Didn't get the number requested. Use whatever number 2778 * the kernel is willing to allocate (it's in navail). 2779 */ 2780 device_printf(sc->dev, "fewer vectors than requested, " 2781 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2782 itype, iaq->nirq, navail); 2783 pci_release_msi(sc->dev); 2784 goto restart; 2785 } 2786 2787 device_printf(sc->dev, 2788 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2789 itype, rc, iaq->nirq, navail); 2790 } 2791 2792 device_printf(sc->dev, 2793 "failed to find a usable interrupt type. " 2794 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2795 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2796 2797 return (ENXIO); 2798} 2799 2800#define FW_VERSION(chip) ( \ 2801 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2802 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2803 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2804 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2805#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2806 2807struct fw_info { 2808 uint8_t chip; 2809 char *kld_name; 2810 char *fw_mod_name; 2811 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2812} fw_info[] = { 2813 { 2814 .chip = CHELSIO_T4, 2815 .kld_name = "t4fw_cfg", 2816 .fw_mod_name = "t4fw", 2817 .fw_hdr = { 2818 .chip = FW_HDR_CHIP_T4, 2819 .fw_ver = htobe32_const(FW_VERSION(T4)), 2820 .intfver_nic = FW_INTFVER(T4, NIC), 2821 .intfver_vnic = FW_INTFVER(T4, VNIC), 2822 .intfver_ofld = FW_INTFVER(T4, OFLD), 2823 .intfver_ri = FW_INTFVER(T4, RI), 2824 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2825 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2826 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2827 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2828 }, 2829 }, { 2830 .chip = CHELSIO_T5, 2831 .kld_name = "t5fw_cfg", 2832 .fw_mod_name = "t5fw", 2833 .fw_hdr = { 2834 .chip = FW_HDR_CHIP_T5, 2835 .fw_ver = htobe32_const(FW_VERSION(T5)), 2836 .intfver_nic = FW_INTFVER(T5, NIC), 2837 .intfver_vnic = FW_INTFVER(T5, VNIC), 2838 .intfver_ofld = FW_INTFVER(T5, OFLD), 2839 .intfver_ri = FW_INTFVER(T5, RI), 2840 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2841 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2842 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2843 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2844 }, 2845 }, { 2846 .chip = CHELSIO_T6, 2847 .kld_name = "t6fw_cfg", 2848 .fw_mod_name = "t6fw", 2849 .fw_hdr = { 2850 .chip = FW_HDR_CHIP_T6, 2851 .fw_ver = htobe32_const(FW_VERSION(T6)), 2852 .intfver_nic = FW_INTFVER(T6, NIC), 2853 .intfver_vnic = FW_INTFVER(T6, VNIC), 2854 .intfver_ofld = FW_INTFVER(T6, OFLD), 2855 .intfver_ri = FW_INTFVER(T6, RI), 2856 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 2857 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2858 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 2859 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2860 }, 2861 } 2862}; 2863 2864static struct fw_info * 2865find_fw_info(int chip) 2866{ 2867 int i; 2868 2869 for (i = 0; i < nitems(fw_info); i++) { 2870 if (fw_info[i].chip == chip) 2871 return (&fw_info[i]); 2872 } 2873 return (NULL); 2874} 2875 2876/* 2877 * Is the given firmware API compatible with the one the driver was compiled 2878 * with? 2879 */ 2880static int 2881fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2882{ 2883 2884 /* short circuit if it's the exact same firmware version */ 2885 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2886 return (1); 2887 2888 /* 2889 * XXX: Is this too conservative? Perhaps I should limit this to the 2890 * features that are supported in the driver. 2891 */ 2892#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2893 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2894 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2895 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2896 return (1); 2897#undef SAME_INTF 2898 2899 return (0); 2900} 2901 2902/* 2903 * The firmware in the KLD is usable, but should it be installed? This routine 2904 * explains itself in detail if it indicates the KLD firmware should be 2905 * installed. 2906 */ 2907static int 2908should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2909{ 2910 const char *reason; 2911 2912 if (!card_fw_usable) { 2913 reason = "incompatible or unusable"; 2914 goto install; 2915 } 2916 2917 if (k > c) { 2918 reason = "older than the version bundled with this driver"; 2919 goto install; 2920 } 2921 2922 if (t4_fw_install == 2 && k != c) { 2923 reason = "different than the version bundled with this driver"; 2924 goto install; 2925 } 2926 2927 return (0); 2928 2929install: 2930 if (t4_fw_install == 0) { 2931 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2932 "but the driver is prohibited from installing a different " 2933 "firmware on the card.\n", 2934 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2935 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2936 2937 return (0); 2938 } 2939 2940 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2941 "installing firmware %u.%u.%u.%u on card.\n", 2942 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2943 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2944 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2945 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2946 2947 return (1); 2948} 2949/* 2950 * Establish contact with the firmware and determine if we are the master driver 2951 * or not, and whether we are responsible for chip initialization. 2952 */ 2953static int 2954prep_firmware(struct adapter *sc) 2955{ 2956 const struct firmware *fw = NULL, *default_cfg; 2957 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2958 enum dev_state state; 2959 struct fw_info *fw_info; 2960 struct fw_hdr *card_fw; /* fw on the card */ 2961 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2962 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2963 against */ 2964 2965 /* Contact firmware. */ 2966 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2967 if (rc < 0 || state == DEV_STATE_ERR) { 2968 rc = -rc; 2969 device_printf(sc->dev, 2970 "failed to connect to the firmware: %d, %d.\n", rc, state); 2971 return (rc); 2972 } 2973 pf = rc; 2974 if (pf == sc->mbox) 2975 sc->flags |= MASTER_PF; 2976 else if (state == DEV_STATE_UNINIT) { 2977 /* 2978 * We didn't get to be the master so we definitely won't be 2979 * configuring the chip. It's a bug if someone else hasn't 2980 * configured it already. 2981 */ 2982 device_printf(sc->dev, "couldn't be master(%d), " 2983 "device not already initialized either(%d).\n", rc, state); 2984 return (EDOOFUS); 2985 } 2986 2987 /* This is the firmware whose headers the driver was compiled against */ 2988 fw_info = find_fw_info(chip_id(sc)); 2989 if (fw_info == NULL) { 2990 device_printf(sc->dev, 2991 "unable to look up firmware information for chip %d.\n", 2992 chip_id(sc)); 2993 return (EINVAL); 2994 } 2995 drv_fw = &fw_info->fw_hdr; 2996 2997 /* 2998 * The firmware KLD contains many modules. The KLD name is also the 2999 * name of the module that contains the default config file. 3000 */ 3001 default_cfg = firmware_get(fw_info->kld_name); 3002 3003 /* Read the header of the firmware on the card */ 3004 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 3005 rc = -t4_read_flash(sc, FLASH_FW_START, 3006 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 3007 if (rc == 0) 3008 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 3009 else { 3010 device_printf(sc->dev, 3011 "Unable to read card's firmware header: %d\n", rc); 3012 card_fw_usable = 0; 3013 } 3014 3015 /* This is the firmware in the KLD */ 3016 fw = firmware_get(fw_info->fw_mod_name); 3017 if (fw != NULL) { 3018 kld_fw = (const void *)fw->data; 3019 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 3020 } else { 3021 kld_fw = NULL; 3022 kld_fw_usable = 0; 3023 } 3024 3025 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 3026 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 3027 /* 3028 * Common case: the firmware on the card is an exact match and 3029 * the KLD is an exact match too, or the KLD is 3030 * absent/incompatible. Note that t4_fw_install = 2 is ignored 3031 * here -- use cxgbetool loadfw if you want to reinstall the 3032 * same firmware as the one on the card. 3033 */ 3034 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 3035 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 3036 be32toh(card_fw->fw_ver))) { 3037 3038 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 3039 if (rc != 0) { 3040 device_printf(sc->dev, 3041 "failed to install firmware: %d\n", rc); 3042 goto done; 3043 } 3044 3045 /* Installed successfully, update the cached header too. */ 3046 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 3047 card_fw_usable = 1; 3048 need_fw_reset = 0; /* already reset as part of load_fw */ 3049 } 3050 3051 if (!card_fw_usable) { 3052 uint32_t d, c, k; 3053 3054 d = ntohl(drv_fw->fw_ver); 3055 c = ntohl(card_fw->fw_ver); 3056 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 3057 3058 device_printf(sc->dev, "Cannot find a usable firmware: " 3059 "fw_install %d, chip state %d, " 3060 "driver compiled with %d.%d.%d.%d, " 3061 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 3062 t4_fw_install, state, 3063 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3064 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 3065 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3066 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 3067 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3068 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 3069 rc = EINVAL; 3070 goto done; 3071 } 3072 3073 /* Reset device */ 3074 if (need_fw_reset && 3075 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 3076 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 3077 if (rc != ETIMEDOUT && rc != EIO) 3078 t4_fw_bye(sc, sc->mbox); 3079 goto done; 3080 } 3081 sc->flags |= FW_OK; 3082 3083 rc = get_params__pre_init(sc); 3084 if (rc != 0) 3085 goto done; /* error message displayed already */ 3086 3087 /* Partition adapter resources as specified in the config file. */ 3088 if (state == DEV_STATE_UNINIT) { 3089 3090 KASSERT(sc->flags & MASTER_PF, 3091 ("%s: trying to change chip settings when not master.", 3092 __func__)); 3093 3094 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 3095 if (rc != 0) 3096 goto done; /* error message displayed already */ 3097 3098 t4_tweak_chip_settings(sc); 3099 3100 /* get basic stuff going */ 3101 rc = -t4_fw_initialize(sc, sc->mbox); 3102 if (rc != 0) { 3103 device_printf(sc->dev, "fw init failed: %d.\n", rc); 3104 goto done; 3105 } 3106 } else { 3107 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 3108 sc->cfcsum = 0; 3109 } 3110 3111done: 3112 free(card_fw, M_CXGBE); 3113 if (fw != NULL) 3114 firmware_put(fw, FIRMWARE_UNLOAD); 3115 if (default_cfg != NULL) 3116 firmware_put(default_cfg, FIRMWARE_UNLOAD); 3117 3118 return (rc); 3119} 3120 3121#define FW_PARAM_DEV(param) \ 3122 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3123 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3124#define FW_PARAM_PFVF(param) \ 3125 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3126 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 3127 3128/* 3129 * Partition chip resources for use between various PFs, VFs, etc. 3130 */ 3131static int 3132partition_resources(struct adapter *sc, const struct firmware *default_cfg, 3133 const char *name_prefix) 3134{ 3135 const struct firmware *cfg = NULL; 3136 int rc = 0; 3137 struct fw_caps_config_cmd caps; 3138 uint32_t mtype, moff, finicsum, cfcsum; 3139 3140 /* 3141 * Figure out what configuration file to use. Pick the default config 3142 * file for the card if the user hasn't specified one explicitly. 3143 */ 3144 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 3145 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 3146 /* Card specific overrides go here. */ 3147 if (pci_get_device(sc->dev) == 0x440a) 3148 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 3149 if (is_fpga(sc)) 3150 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 3151 } 3152 3153 /* 3154 * We need to load another module if the profile is anything except 3155 * "default" or "flash". 3156 */ 3157 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 3158 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3159 char s[32]; 3160 3161 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 3162 cfg = firmware_get(s); 3163 if (cfg == NULL) { 3164 if (default_cfg != NULL) { 3165 device_printf(sc->dev, 3166 "unable to load module \"%s\" for " 3167 "configuration profile \"%s\", will use " 3168 "the default config file instead.\n", 3169 s, sc->cfg_file); 3170 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3171 "%s", DEFAULT_CF); 3172 } else { 3173 device_printf(sc->dev, 3174 "unable to load module \"%s\" for " 3175 "configuration profile \"%s\", will use " 3176 "the config file on the card's flash " 3177 "instead.\n", s, sc->cfg_file); 3178 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3179 "%s", FLASH_CF); 3180 } 3181 } 3182 } 3183 3184 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 3185 default_cfg == NULL) { 3186 device_printf(sc->dev, 3187 "default config file not available, will use the config " 3188 "file on the card's flash instead.\n"); 3189 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 3190 } 3191 3192 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3193 u_int cflen; 3194 const uint32_t *cfdata; 3195 uint32_t param, val, addr; 3196 3197 KASSERT(cfg != NULL || default_cfg != NULL, 3198 ("%s: no config to upload", __func__)); 3199 3200 /* 3201 * Ask the firmware where it wants us to upload the config file. 3202 */ 3203 param = FW_PARAM_DEV(CF); 3204 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3205 if (rc != 0) { 3206 /* No support for config file? Shouldn't happen. */ 3207 device_printf(sc->dev, 3208 "failed to query config file location: %d.\n", rc); 3209 goto done; 3210 } 3211 mtype = G_FW_PARAMS_PARAM_Y(val); 3212 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3213 3214 /* 3215 * XXX: sheer laziness. We deliberately added 4 bytes of 3216 * useless stuffing/comments at the end of the config file so 3217 * it's ok to simply throw away the last remaining bytes when 3218 * the config file is not an exact multiple of 4. This also 3219 * helps with the validate_mt_off_len check. 3220 */ 3221 if (cfg != NULL) { 3222 cflen = cfg->datasize & ~3; 3223 cfdata = cfg->data; 3224 } else { 3225 cflen = default_cfg->datasize & ~3; 3226 cfdata = default_cfg->data; 3227 } 3228 3229 if (cflen > FLASH_CFG_MAX_SIZE) { 3230 device_printf(sc->dev, 3231 "config file too long (%d, max allowed is %d). " 3232 "Will try to use the config on the card, if any.\n", 3233 cflen, FLASH_CFG_MAX_SIZE); 3234 goto use_config_on_flash; 3235 } 3236 3237 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3238 if (rc != 0) { 3239 device_printf(sc->dev, 3240 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3241 "Will try to use the config on the card, if any.\n", 3242 __func__, mtype, moff, cflen, rc); 3243 goto use_config_on_flash; 3244 } 3245 write_via_memwin(sc, 2, addr, cfdata, cflen); 3246 } else { 3247use_config_on_flash: 3248 mtype = FW_MEMTYPE_FLASH; 3249 moff = t4_flash_cfg_addr(sc); 3250 } 3251 3252 bzero(&caps, sizeof(caps)); 3253 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3254 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3255 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3256 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3257 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3258 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3259 if (rc != 0) { 3260 device_printf(sc->dev, 3261 "failed to pre-process config file: %d " 3262 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3263 goto done; 3264 } 3265 3266 finicsum = be32toh(caps.finicsum); 3267 cfcsum = be32toh(caps.cfcsum); 3268 if (finicsum != cfcsum) { 3269 device_printf(sc->dev, 3270 "WARNING: config file checksum mismatch: %08x %08x\n", 3271 finicsum, cfcsum); 3272 } 3273 sc->cfcsum = cfcsum; 3274 3275#define LIMIT_CAPS(x) do { \ 3276 caps.x &= htobe16(t4_##x##_allowed); \ 3277} while (0) 3278 3279 /* 3280 * Let the firmware know what features will (not) be used so it can tune 3281 * things accordingly. 3282 */ 3283 LIMIT_CAPS(nbmcaps); 3284 LIMIT_CAPS(linkcaps); 3285 LIMIT_CAPS(switchcaps); 3286 LIMIT_CAPS(niccaps); 3287 LIMIT_CAPS(toecaps); 3288 LIMIT_CAPS(rdmacaps); 3289 LIMIT_CAPS(cryptocaps); 3290 LIMIT_CAPS(iscsicaps); 3291 LIMIT_CAPS(fcoecaps); 3292#undef LIMIT_CAPS 3293 3294 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3295 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3296 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3297 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3298 if (rc != 0) { 3299 device_printf(sc->dev, 3300 "failed to process config file: %d.\n", rc); 3301 } 3302done: 3303 if (cfg != NULL) 3304 firmware_put(cfg, FIRMWARE_UNLOAD); 3305 return (rc); 3306} 3307 3308/* 3309 * Retrieve parameters that are needed (or nice to have) very early. 3310 */ 3311static int 3312get_params__pre_init(struct adapter *sc) 3313{ 3314 int rc; 3315 uint32_t param[2], val[2]; 3316 3317 t4_get_version_info(sc); 3318 3319 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 3320 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3321 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3322 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3323 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 3324 3325 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 3326 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 3327 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 3328 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 3329 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 3330 3331 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 3332 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 3333 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 3334 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 3335 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 3336 3337 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 3338 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 3339 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 3340 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 3341 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 3342 3343 param[0] = FW_PARAM_DEV(PORTVEC); 3344 param[1] = FW_PARAM_DEV(CCLK); 3345 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3346 if (rc != 0) { 3347 device_printf(sc->dev, 3348 "failed to query parameters (pre_init): %d.\n", rc); 3349 return (rc); 3350 } 3351 3352 sc->params.portvec = val[0]; 3353 sc->params.nports = bitcount32(val[0]); 3354 sc->params.vpd.cclk = val[1]; 3355 3356 /* Read device log parameters. */ 3357 rc = -t4_init_devlog_params(sc, 1); 3358 if (rc == 0) 3359 fixup_devlog_params(sc); 3360 else { 3361 device_printf(sc->dev, 3362 "failed to get devlog parameters: %d.\n", rc); 3363 rc = 0; /* devlog isn't critical for device operation */ 3364 } 3365 3366 return (rc); 3367} 3368 3369/* 3370 * Retrieve various parameters that are of interest to the driver. The device 3371 * has been initialized by the firmware at this point. 3372 */ 3373static int 3374get_params__post_init(struct adapter *sc) 3375{ 3376 int rc; 3377 uint32_t param[7], val[7]; 3378 struct fw_caps_config_cmd caps; 3379 3380 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3381 param[1] = FW_PARAM_PFVF(EQ_START); 3382 param[2] = FW_PARAM_PFVF(FILTER_START); 3383 param[3] = FW_PARAM_PFVF(FILTER_END); 3384 param[4] = FW_PARAM_PFVF(L2T_START); 3385 param[5] = FW_PARAM_PFVF(L2T_END); 3386 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3387 if (rc != 0) { 3388 device_printf(sc->dev, 3389 "failed to query parameters (post_init): %d.\n", rc); 3390 return (rc); 3391 } 3392 3393 sc->sge.iq_start = val[0]; 3394 sc->sge.eq_start = val[1]; 3395 sc->tids.ftid_base = val[2]; 3396 sc->tids.nftids = val[3] - val[2] + 1; 3397 sc->params.ftid_min = val[2]; 3398 sc->params.ftid_max = val[3]; 3399 sc->vres.l2t.start = val[4]; 3400 sc->vres.l2t.size = val[5] - val[4] + 1; 3401 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3402 ("%s: L2 table size (%u) larger than expected (%u)", 3403 __func__, sc->vres.l2t.size, L2T_SIZE)); 3404 3405 /* get capabilites */ 3406 bzero(&caps, sizeof(caps)); 3407 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3408 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3409 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3410 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3411 if (rc != 0) { 3412 device_printf(sc->dev, 3413 "failed to get card capabilities: %d.\n", rc); 3414 return (rc); 3415 } 3416 3417#define READ_CAPS(x) do { \ 3418 sc->x = htobe16(caps.x); \ 3419} while (0) 3420 READ_CAPS(nbmcaps); 3421 READ_CAPS(linkcaps); 3422 READ_CAPS(switchcaps); 3423 READ_CAPS(niccaps); 3424 READ_CAPS(toecaps); 3425 READ_CAPS(rdmacaps); 3426 READ_CAPS(cryptocaps); 3427 READ_CAPS(iscsicaps); 3428 READ_CAPS(fcoecaps); 3429 3430 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3431 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3432 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3433 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3434 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3435 if (rc != 0) { 3436 device_printf(sc->dev, 3437 "failed to query NIC parameters: %d.\n", rc); 3438 return (rc); 3439 } 3440 sc->tids.etid_base = val[0]; 3441 sc->params.etid_min = val[0]; 3442 sc->tids.netids = val[1] - val[0] + 1; 3443 sc->params.netids = sc->tids.netids; 3444 sc->params.eo_wr_cred = val[2]; 3445 sc->params.ethoffload = 1; 3446 } 3447 3448 if (sc->toecaps) { 3449 /* query offload-related parameters */ 3450 param[0] = FW_PARAM_DEV(NTID); 3451 param[1] = FW_PARAM_PFVF(SERVER_START); 3452 param[2] = FW_PARAM_PFVF(SERVER_END); 3453 param[3] = FW_PARAM_PFVF(TDDP_START); 3454 param[4] = FW_PARAM_PFVF(TDDP_END); 3455 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3456 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3457 if (rc != 0) { 3458 device_printf(sc->dev, 3459 "failed to query TOE parameters: %d.\n", rc); 3460 return (rc); 3461 } 3462 sc->tids.ntids = val[0]; 3463 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3464 sc->tids.stid_base = val[1]; 3465 sc->tids.nstids = val[2] - val[1] + 1; 3466 sc->vres.ddp.start = val[3]; 3467 sc->vres.ddp.size = val[4] - val[3] + 1; 3468 sc->params.ofldq_wr_cred = val[5]; 3469 sc->params.offload = 1; 3470 } 3471 if (sc->rdmacaps) { 3472 param[0] = FW_PARAM_PFVF(STAG_START); 3473 param[1] = FW_PARAM_PFVF(STAG_END); 3474 param[2] = FW_PARAM_PFVF(RQ_START); 3475 param[3] = FW_PARAM_PFVF(RQ_END); 3476 param[4] = FW_PARAM_PFVF(PBL_START); 3477 param[5] = FW_PARAM_PFVF(PBL_END); 3478 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3479 if (rc != 0) { 3480 device_printf(sc->dev, 3481 "failed to query RDMA parameters(1): %d.\n", rc); 3482 return (rc); 3483 } 3484 sc->vres.stag.start = val[0]; 3485 sc->vres.stag.size = val[1] - val[0] + 1; 3486 sc->vres.rq.start = val[2]; 3487 sc->vres.rq.size = val[3] - val[2] + 1; 3488 sc->vres.pbl.start = val[4]; 3489 sc->vres.pbl.size = val[5] - val[4] + 1; 3490 3491 param[0] = FW_PARAM_PFVF(SQRQ_START); 3492 param[1] = FW_PARAM_PFVF(SQRQ_END); 3493 param[2] = FW_PARAM_PFVF(CQ_START); 3494 param[3] = FW_PARAM_PFVF(CQ_END); 3495 param[4] = FW_PARAM_PFVF(OCQ_START); 3496 param[5] = FW_PARAM_PFVF(OCQ_END); 3497 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3498 if (rc != 0) { 3499 device_printf(sc->dev, 3500 "failed to query RDMA parameters(2): %d.\n", rc); 3501 return (rc); 3502 } 3503 sc->vres.qp.start = val[0]; 3504 sc->vres.qp.size = val[1] - val[0] + 1; 3505 sc->vres.cq.start = val[2]; 3506 sc->vres.cq.size = val[3] - val[2] + 1; 3507 sc->vres.ocq.start = val[4]; 3508 sc->vres.ocq.size = val[5] - val[4] + 1; 3509 3510 param[0] = FW_PARAM_PFVF(SRQ_START); 3511 param[1] = FW_PARAM_PFVF(SRQ_END); 3512 param[2] = FW_PARAM_DEV(MAXORDIRD_QP); 3513 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); 3514 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); 3515 if (rc != 0) { 3516 device_printf(sc->dev, 3517 "failed to query RDMA parameters(3): %d.\n", rc); 3518 return (rc); 3519 } 3520 sc->vres.srq.start = val[0]; 3521 sc->vres.srq.size = val[1] - val[0] + 1; 3522 sc->params.max_ordird_qp = val[2]; 3523 sc->params.max_ird_adapter = val[3]; 3524 } 3525 if (sc->iscsicaps) { 3526 param[0] = FW_PARAM_PFVF(ISCSI_START); 3527 param[1] = FW_PARAM_PFVF(ISCSI_END); 3528 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3529 if (rc != 0) { 3530 device_printf(sc->dev, 3531 "failed to query iSCSI parameters: %d.\n", rc); 3532 return (rc); 3533 } 3534 sc->vres.iscsi.start = val[0]; 3535 sc->vres.iscsi.size = val[1] - val[0] + 1; 3536 } 3537 3538 t4_init_sge_params(sc); 3539 3540 /* 3541 * We've got the params we wanted to query via the firmware. Now grab 3542 * some others directly from the chip. 3543 */ 3544 rc = t4_read_chip_settings(sc); 3545 3546 return (rc); 3547} 3548 3549static int 3550set_params__post_init(struct adapter *sc) 3551{ 3552 uint32_t param, val; 3553 3554 /* ask for encapsulated CPLs */ 3555 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3556 val = 1; 3557 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3558 3559 return (0); 3560} 3561 3562#undef FW_PARAM_PFVF 3563#undef FW_PARAM_DEV 3564 3565static void 3566t4_set_desc(struct adapter *sc) 3567{ 3568 char buf[128]; 3569 struct adapter_params *p = &sc->params; 3570 3571 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3572 3573 device_set_desc_copy(sc->dev, buf); 3574} 3575 3576static void 3577build_medialist(struct port_info *pi, struct ifmedia *media) 3578{ 3579 int m; 3580 3581 PORT_LOCK(pi); 3582 3583 ifmedia_removeall(media); 3584 3585 m = IFM_ETHER | IFM_FDX; 3586 3587 switch(pi->port_type) { 3588 case FW_PORT_TYPE_BT_XFI: 3589 case FW_PORT_TYPE_BT_XAUI: 3590 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3591 /* fall through */ 3592 3593 case FW_PORT_TYPE_BT_SGMII: 3594 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3595 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3596 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3597 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3598 break; 3599 3600 case FW_PORT_TYPE_CX4: 3601 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3602 ifmedia_set(media, m | IFM_10G_CX4); 3603 break; 3604 3605 case FW_PORT_TYPE_QSFP_10G: 3606 case FW_PORT_TYPE_SFP: 3607 case FW_PORT_TYPE_FIBER_XFI: 3608 case FW_PORT_TYPE_FIBER_XAUI: 3609 switch (pi->mod_type) { 3610 3611 case FW_PORT_MOD_TYPE_LR: 3612 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3613 ifmedia_set(media, m | IFM_10G_LR); 3614 break; 3615 3616 case FW_PORT_MOD_TYPE_SR: 3617 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3618 ifmedia_set(media, m | IFM_10G_SR); 3619 break; 3620 3621 case FW_PORT_MOD_TYPE_LRM: 3622 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3623 ifmedia_set(media, m | IFM_10G_LRM); 3624 break; 3625 3626 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3627 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3628 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3629 ifmedia_set(media, m | IFM_10G_TWINAX); 3630 break; 3631 3632 case FW_PORT_MOD_TYPE_NONE: 3633 m &= ~IFM_FDX; 3634 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3635 ifmedia_set(media, m | IFM_NONE); 3636 break; 3637 3638 case FW_PORT_MOD_TYPE_NA: 3639 case FW_PORT_MOD_TYPE_ER: 3640 default: 3641 device_printf(pi->dev, 3642 "unknown port_type (%d), mod_type (%d)\n", 3643 pi->port_type, pi->mod_type); 3644 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3645 ifmedia_set(media, m | IFM_UNKNOWN); 3646 break; 3647 } 3648 break; 3649 3650 case FW_PORT_TYPE_CR_QSFP: 3651 case FW_PORT_TYPE_SFP28: 3652 case FW_PORT_TYPE_KR_SFP28: 3653 switch (pi->mod_type) { 3654 3655 case FW_PORT_MOD_TYPE_SR: 3656 ifmedia_add(media, m | IFM_25G_SR, 0, NULL); 3657 ifmedia_set(media, m | IFM_25G_SR); 3658 break; 3659 3660 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3661 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3662 ifmedia_add(media, m | IFM_25G_CR, 0, NULL); 3663 ifmedia_set(media, m | IFM_25G_CR); 3664 break; 3665 3666 case FW_PORT_MOD_TYPE_NONE: 3667 m &= ~IFM_FDX; 3668 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3669 ifmedia_set(media, m | IFM_NONE); 3670 break; 3671 3672 default: 3673 device_printf(pi->dev, 3674 "unknown port_type (%d), mod_type (%d)\n", 3675 pi->port_type, pi->mod_type); 3676 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3677 ifmedia_set(media, m | IFM_UNKNOWN); 3678 break; 3679 } 3680 break; 3681 3682 case FW_PORT_TYPE_QSFP: 3683 switch (pi->mod_type) { 3684 3685 case FW_PORT_MOD_TYPE_LR: 3686 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3687 ifmedia_set(media, m | IFM_40G_LR4); 3688 break; 3689 3690 case FW_PORT_MOD_TYPE_SR: 3691 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3692 ifmedia_set(media, m | IFM_40G_SR4); 3693 break; 3694 3695 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3696 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3697 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3698 ifmedia_set(media, m | IFM_40G_CR4); 3699 break; 3700 3701 case FW_PORT_MOD_TYPE_NONE: 3702 m &= ~IFM_FDX; 3703 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3704 ifmedia_set(media, m | IFM_NONE); 3705 break; 3706 3707 default: 3708 device_printf(pi->dev, 3709 "unknown port_type (%d), mod_type (%d)\n", 3710 pi->port_type, pi->mod_type); 3711 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3712 ifmedia_set(media, m | IFM_UNKNOWN); 3713 break; 3714 } 3715 break; 3716 3717 case FW_PORT_TYPE_KR4_100G: 3718 case FW_PORT_TYPE_CR4_QSFP: 3719 switch (pi->mod_type) { 3720 3721 case FW_PORT_MOD_TYPE_LR: 3722 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL); 3723 ifmedia_set(media, m | IFM_100G_LR4); 3724 break; 3725 3726 case FW_PORT_MOD_TYPE_SR: 3727 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL); 3728 ifmedia_set(media, m | IFM_100G_SR4); 3729 break; 3730 3731 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3732 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3733 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL); 3734 ifmedia_set(media, m | IFM_100G_CR4); 3735 break; 3736 3737 case FW_PORT_MOD_TYPE_NONE: 3738 m &= ~IFM_FDX; 3739 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3740 ifmedia_set(media, m | IFM_NONE); 3741 break; 3742 3743 default: 3744 device_printf(pi->dev, 3745 "unknown port_type (%d), mod_type (%d)\n", 3746 pi->port_type, pi->mod_type); 3747 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3748 ifmedia_set(media, m | IFM_UNKNOWN); 3749 break; 3750 } 3751 break; 3752 3753 default: 3754 device_printf(pi->dev, 3755 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3756 pi->mod_type); 3757 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3758 ifmedia_set(media, m | IFM_UNKNOWN); 3759 break; 3760 } 3761 3762 PORT_UNLOCK(pi); 3763} 3764 3765#define FW_MAC_EXACT_CHUNK 7 3766 3767/* 3768 * Program the port's XGMAC based on parameters in ifnet. The caller also 3769 * indicates which parameters should be programmed (the rest are left alone). 3770 */ 3771int 3772update_mac_settings(struct ifnet *ifp, int flags) 3773{ 3774 int rc = 0; 3775 struct vi_info *vi = ifp->if_softc; 3776 struct port_info *pi = vi->pi; 3777 struct adapter *sc = pi->adapter; 3778 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3779 3780 ASSERT_SYNCHRONIZED_OP(sc); 3781 KASSERT(flags, ("%s: not told what to update.", __func__)); 3782 3783 if (flags & XGMAC_MTU) 3784 mtu = ifp->if_mtu; 3785 3786 if (flags & XGMAC_PROMISC) 3787 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3788 3789 if (flags & XGMAC_ALLMULTI) 3790 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3791 3792 if (flags & XGMAC_VLANEX) 3793 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3794 3795 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3796 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3797 allmulti, 1, vlanex, false); 3798 if (rc) { 3799 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3800 rc); 3801 return (rc); 3802 } 3803 } 3804 3805 if (flags & XGMAC_UCADDR) { 3806 uint8_t ucaddr[ETHER_ADDR_LEN]; 3807 3808 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3809 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3810 ucaddr, true, true); 3811 if (rc < 0) { 3812 rc = -rc; 3813 if_printf(ifp, "change_mac failed: %d\n", rc); 3814 return (rc); 3815 } else { 3816 vi->xact_addr_filt = rc; 3817 rc = 0; 3818 } 3819 } 3820 3821 if (flags & XGMAC_MCADDRS) { 3822 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3823 int del = 1; 3824 uint64_t hash = 0; 3825 struct ifmultiaddr *ifma; 3826 int i = 0, j; 3827 3828 if_maddr_rlock(ifp); 3829 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3830 if (ifma->ifma_addr->sa_family != AF_LINK) 3831 continue; 3832 mcaddr[i] = 3833 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3834 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3835 i++; 3836 3837 if (i == FW_MAC_EXACT_CHUNK) { 3838 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3839 del, i, mcaddr, NULL, &hash, 0); 3840 if (rc < 0) { 3841 rc = -rc; 3842 for (j = 0; j < i; j++) { 3843 if_printf(ifp, 3844 "failed to add mc address" 3845 " %02x:%02x:%02x:" 3846 "%02x:%02x:%02x rc=%d\n", 3847 mcaddr[j][0], mcaddr[j][1], 3848 mcaddr[j][2], mcaddr[j][3], 3849 mcaddr[j][4], mcaddr[j][5], 3850 rc); 3851 } 3852 goto mcfail; 3853 } 3854 del = 0; 3855 i = 0; 3856 } 3857 } 3858 if (i > 0) { 3859 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3860 mcaddr, NULL, &hash, 0); 3861 if (rc < 0) { 3862 rc = -rc; 3863 for (j = 0; j < i; j++) { 3864 if_printf(ifp, 3865 "failed to add mc address" 3866 " %02x:%02x:%02x:" 3867 "%02x:%02x:%02x rc=%d\n", 3868 mcaddr[j][0], mcaddr[j][1], 3869 mcaddr[j][2], mcaddr[j][3], 3870 mcaddr[j][4], mcaddr[j][5], 3871 rc); 3872 } 3873 goto mcfail; 3874 } 3875 } 3876 3877 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3878 if (rc != 0) 3879 if_printf(ifp, "failed to set mc address hash: %d", rc); 3880mcfail: 3881 if_maddr_runlock(ifp); 3882 } 3883 3884 return (rc); 3885} 3886 3887/* 3888 * {begin|end}_synchronized_op must be called from the same thread. 3889 */ 3890int 3891begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3892 char *wmesg) 3893{ 3894 int rc, pri; 3895 3896#ifdef WITNESS 3897 /* the caller thinks it's ok to sleep, but is it really? */ 3898 if (flags & SLEEP_OK) 3899 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3900 "begin_synchronized_op"); 3901#endif 3902 3903 if (INTR_OK) 3904 pri = PCATCH; 3905 else 3906 pri = 0; 3907 3908 ADAPTER_LOCK(sc); 3909 for (;;) { 3910 3911 if (vi && IS_DOOMED(vi)) { 3912 rc = ENXIO; 3913 goto done; 3914 } 3915 3916 if (!IS_BUSY(sc)) { 3917 rc = 0; 3918 break; 3919 } 3920 3921 if (!(flags & SLEEP_OK)) { 3922 rc = EBUSY; 3923 goto done; 3924 } 3925 3926 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3927 rc = EINTR; 3928 goto done; 3929 } 3930 } 3931 3932 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3933 SET_BUSY(sc); 3934#ifdef INVARIANTS 3935 sc->last_op = wmesg; 3936 sc->last_op_thr = curthread; 3937 sc->last_op_flags = flags; 3938#endif 3939 3940done: 3941 if (!(flags & HOLD_LOCK) || rc) 3942 ADAPTER_UNLOCK(sc); 3943 3944 return (rc); 3945} 3946 3947/* 3948 * Tell if_ioctl and if_init that the VI is going away. This is 3949 * special variant of begin_synchronized_op and must be paired with a 3950 * call to end_synchronized_op. 3951 */ 3952void 3953doom_vi(struct adapter *sc, struct vi_info *vi) 3954{ 3955 3956 ADAPTER_LOCK(sc); 3957 SET_DOOMED(vi); 3958 wakeup(&sc->flags); 3959 while (IS_BUSY(sc)) 3960 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3961 SET_BUSY(sc); 3962#ifdef INVARIANTS 3963 sc->last_op = "t4detach"; 3964 sc->last_op_thr = curthread; 3965 sc->last_op_flags = 0; 3966#endif 3967 ADAPTER_UNLOCK(sc); 3968} 3969 3970/* 3971 * {begin|end}_synchronized_op must be called from the same thread. 3972 */ 3973void 3974end_synchronized_op(struct adapter *sc, int flags) 3975{ 3976 3977 if (flags & LOCK_HELD) 3978 ADAPTER_LOCK_ASSERT_OWNED(sc); 3979 else 3980 ADAPTER_LOCK(sc); 3981 3982 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3983 CLR_BUSY(sc); 3984 wakeup(&sc->flags); 3985 ADAPTER_UNLOCK(sc); 3986} 3987 3988static int 3989cxgbe_init_synchronized(struct vi_info *vi) 3990{ 3991 struct port_info *pi = vi->pi; 3992 struct adapter *sc = pi->adapter; 3993 struct ifnet *ifp = vi->ifp; 3994 int rc = 0, i; 3995 struct sge_txq *txq; 3996 3997 ASSERT_SYNCHRONIZED_OP(sc); 3998 3999 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 4000 return (0); /* already running */ 4001 4002 if (!(sc->flags & FULL_INIT_DONE) && 4003 ((rc = adapter_full_init(sc)) != 0)) 4004 return (rc); /* error message displayed already */ 4005 4006 if (!(vi->flags & VI_INIT_DONE) && 4007 ((rc = vi_full_init(vi)) != 0)) 4008 return (rc); /* error message displayed already */ 4009 4010 rc = update_mac_settings(ifp, XGMAC_ALL); 4011 if (rc) 4012 goto done; /* error message displayed already */ 4013 4014 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 4015 if (rc != 0) { 4016 if_printf(ifp, "enable_vi failed: %d\n", rc); 4017 goto done; 4018 } 4019 4020 /* 4021 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 4022 * if this changes. 4023 */ 4024 4025 for_each_txq(vi, i, txq) { 4026 TXQ_LOCK(txq); 4027 txq->eq.flags |= EQ_ENABLED; 4028 TXQ_UNLOCK(txq); 4029 } 4030 4031 /* 4032 * The first iq of the first port to come up is used for tracing. 4033 */ 4034 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 4035 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 4036 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 4037 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 4038 V_QUEUENUMBER(sc->traceq)); 4039 pi->flags |= HAS_TRACEQ; 4040 } 4041 4042 /* all ok */ 4043 PORT_LOCK(pi); 4044 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4045 pi->up_vis++; 4046 4047 if (pi->nvi > 1 || sc->flags & IS_VF) 4048 callout_reset(&vi->tick, hz, vi_tick, vi); 4049 else 4050 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 4051 PORT_UNLOCK(pi); 4052done: 4053 if (rc != 0) 4054 cxgbe_uninit_synchronized(vi); 4055 4056 return (rc); 4057} 4058 4059/* 4060 * Idempotent. 4061 */ 4062static int 4063cxgbe_uninit_synchronized(struct vi_info *vi) 4064{ 4065 struct port_info *pi = vi->pi; 4066 struct adapter *sc = pi->adapter; 4067 struct ifnet *ifp = vi->ifp; 4068 int rc, i; 4069 struct sge_txq *txq; 4070 4071 ASSERT_SYNCHRONIZED_OP(sc); 4072 4073 if (!(vi->flags & VI_INIT_DONE)) { 4074 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 4075 ("uninited VI is running")); 4076 return (0); 4077 } 4078 4079 /* 4080 * Disable the VI so that all its data in either direction is discarded 4081 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 4082 * tick) intact as the TP can deliver negative advice or data that it's 4083 * holding in its RAM (for an offloaded connection) even after the VI is 4084 * disabled. 4085 */ 4086 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 4087 if (rc) { 4088 if_printf(ifp, "disable_vi failed: %d\n", rc); 4089 return (rc); 4090 } 4091 4092 for_each_txq(vi, i, txq) { 4093 TXQ_LOCK(txq); 4094 txq->eq.flags &= ~EQ_ENABLED; 4095 TXQ_UNLOCK(txq); 4096 } 4097 4098 PORT_LOCK(pi); 4099 if (pi->nvi > 1 || sc->flags & IS_VF) 4100 callout_stop(&vi->tick); 4101 else 4102 callout_stop(&pi->tick); 4103 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 4104 PORT_UNLOCK(pi); 4105 return (0); 4106 } 4107 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 4108 pi->up_vis--; 4109 if (pi->up_vis > 0) { 4110 PORT_UNLOCK(pi); 4111 return (0); 4112 } 4113 PORT_UNLOCK(pi); 4114 4115 pi->link_cfg.link_ok = 0; 4116 pi->link_cfg.speed = 0; 4117 pi->link_cfg.link_down_rc = 255; 4118 t4_os_link_changed(sc, pi->port_id, 0); 4119 4120 return (0); 4121} 4122 4123/* 4124 * It is ok for this function to fail midway and return right away. t4_detach 4125 * will walk the entire sc->irq list and clean up whatever is valid. 4126 */ 4127int 4128t4_setup_intr_handlers(struct adapter *sc) 4129{ 4130 int rc, rid, p, q, v; 4131 char s[8]; 4132 struct irq *irq; 4133 struct port_info *pi; 4134 struct vi_info *vi; 4135 struct sge *sge = &sc->sge; 4136 struct sge_rxq *rxq; 4137#ifdef TCP_OFFLOAD 4138 struct sge_ofld_rxq *ofld_rxq; 4139#endif 4140#ifdef DEV_NETMAP 4141 struct sge_nm_rxq *nm_rxq; 4142#endif 4143#ifdef RSS 4144 int nbuckets = rss_getnumbuckets(); 4145#endif 4146 4147 /* 4148 * Setup interrupts. 4149 */ 4150 irq = &sc->irq[0]; 4151 rid = sc->intr_type == INTR_INTX ? 0 : 1; 4152 if (sc->intr_count == 1) 4153 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 4154 4155 /* Multiple interrupts. */ 4156 if (sc->flags & IS_VF) 4157 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 4158 ("%s: too few intr.", __func__)); 4159 else 4160 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 4161 ("%s: too few intr.", __func__)); 4162 4163 /* The first one is always error intr on PFs */ 4164 if (!(sc->flags & IS_VF)) { 4165 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 4166 if (rc != 0) 4167 return (rc); 4168 irq++; 4169 rid++; 4170 } 4171 4172 /* The second one is always the firmware event queue (first on VFs) */ 4173 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 4174 if (rc != 0) 4175 return (rc); 4176 irq++; 4177 rid++; 4178 4179 for_each_port(sc, p) { 4180 pi = sc->port[p]; 4181 for_each_vi(pi, v, vi) { 4182 vi->first_intr = rid - 1; 4183 4184 if (vi->nnmrxq > 0) { 4185 int n = max(vi->nrxq, vi->nnmrxq); 4186 4187 MPASS(vi->flags & INTR_RXQ); 4188 4189 rxq = &sge->rxq[vi->first_rxq]; 4190#ifdef DEV_NETMAP 4191 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 4192#endif 4193 for (q = 0; q < n; q++) { 4194 snprintf(s, sizeof(s), "%x%c%x", p, 4195 'a' + v, q); 4196 if (q < vi->nrxq) 4197 irq->rxq = rxq++; 4198#ifdef DEV_NETMAP 4199 if (q < vi->nnmrxq) 4200 irq->nm_rxq = nm_rxq++; 4201#endif 4202 rc = t4_alloc_irq(sc, irq, rid, 4203 t4_vi_intr, irq, s); 4204 if (rc != 0) 4205 return (rc); 4206 irq++; 4207 rid++; 4208 vi->nintr++; 4209 } 4210 } else if (vi->flags & INTR_RXQ) { 4211 for_each_rxq(vi, q, rxq) { 4212 snprintf(s, sizeof(s), "%x%c%x", p, 4213 'a' + v, q); 4214 rc = t4_alloc_irq(sc, irq, rid, 4215 t4_intr, rxq, s); 4216 if (rc != 0) 4217 return (rc); 4218#ifdef RSS 4219 bus_bind_intr(sc->dev, irq->res, 4220 rss_getcpu(q % nbuckets)); 4221#endif 4222 irq++; 4223 rid++; 4224 vi->nintr++; 4225 } 4226 } 4227#ifdef TCP_OFFLOAD 4228 if (vi->flags & INTR_OFLD_RXQ) { 4229 for_each_ofld_rxq(vi, q, ofld_rxq) { 4230 snprintf(s, sizeof(s), "%x%c%x", p, 4231 'A' + v, q); 4232 rc = t4_alloc_irq(sc, irq, rid, 4233 t4_intr, ofld_rxq, s); 4234 if (rc != 0) 4235 return (rc); 4236 irq++; 4237 rid++; 4238 vi->nintr++; 4239 } 4240 } 4241#endif 4242 } 4243 } 4244 MPASS(irq == &sc->irq[sc->intr_count]); 4245 4246 return (0); 4247} 4248 4249int 4250adapter_full_init(struct adapter *sc) 4251{ 4252 int rc, i; 4253#ifdef RSS 4254 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4255 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4256#endif 4257 4258 ASSERT_SYNCHRONIZED_OP(sc); 4259 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4260 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 4261 ("%s: FULL_INIT_DONE already", __func__)); 4262 4263 /* 4264 * queues that belong to the adapter (not any particular port). 4265 */ 4266 rc = t4_setup_adapter_queues(sc); 4267 if (rc != 0) 4268 goto done; 4269 4270 for (i = 0; i < nitems(sc->tq); i++) { 4271 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 4272 taskqueue_thread_enqueue, &sc->tq[i]); 4273 if (sc->tq[i] == NULL) { 4274 device_printf(sc->dev, 4275 "failed to allocate task queue %d\n", i); 4276 rc = ENOMEM; 4277 goto done; 4278 } 4279 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4280 device_get_nameunit(sc->dev), i); 4281 } 4282#ifdef RSS 4283 MPASS(RSS_KEYSIZE == 40); 4284 rss_getkey((void *)&raw_rss_key[0]); 4285 for (i = 0; i < nitems(rss_key); i++) { 4286 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4287 } 4288 t4_write_rss_key(sc, &rss_key[0], -1); 4289#endif 4290 4291 if (!(sc->flags & IS_VF)) 4292 t4_intr_enable(sc); 4293 sc->flags |= FULL_INIT_DONE; 4294done: 4295 if (rc != 0) 4296 adapter_full_uninit(sc); 4297 4298 return (rc); 4299} 4300 4301int 4302adapter_full_uninit(struct adapter *sc) 4303{ 4304 int i; 4305 4306 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4307 4308 t4_teardown_adapter_queues(sc); 4309 4310 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4311 taskqueue_free(sc->tq[i]); 4312 sc->tq[i] = NULL; 4313 } 4314 4315 sc->flags &= ~FULL_INIT_DONE; 4316 4317 return (0); 4318} 4319 4320#ifdef RSS 4321#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4322 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4323 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4324 RSS_HASHTYPE_RSS_UDP_IPV6) 4325 4326/* Translates kernel hash types to hardware. */ 4327static int 4328hashconfig_to_hashen(int hashconfig) 4329{ 4330 int hashen = 0; 4331 4332 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4333 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4334 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4335 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4336 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4337 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4338 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4339 } 4340 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4341 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4342 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4343 } 4344 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4345 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4346 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4347 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4348 4349 return (hashen); 4350} 4351 4352/* Translates hardware hash types to kernel. */ 4353static int 4354hashen_to_hashconfig(int hashen) 4355{ 4356 int hashconfig = 0; 4357 4358 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4359 /* 4360 * If UDP hashing was enabled it must have been enabled for 4361 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4362 * enabling any 4-tuple hash is nonsense configuration. 4363 */ 4364 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4365 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4366 4367 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4368 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4369 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4370 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4371 } 4372 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4373 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4374 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4375 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4376 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4377 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4378 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4379 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4380 4381 return (hashconfig); 4382} 4383#endif 4384 4385int 4386vi_full_init(struct vi_info *vi) 4387{ 4388 struct adapter *sc = vi->pi->adapter; 4389 struct ifnet *ifp = vi->ifp; 4390 uint16_t *rss; 4391 struct sge_rxq *rxq; 4392 int rc, i, j, hashen; 4393#ifdef RSS 4394 int nbuckets = rss_getnumbuckets(); 4395 int hashconfig = rss_gethashconfig(); 4396 int extra; 4397#endif 4398 4399 ASSERT_SYNCHRONIZED_OP(sc); 4400 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4401 ("%s: VI_INIT_DONE already", __func__)); 4402 4403 sysctl_ctx_init(&vi->ctx); 4404 vi->flags |= VI_SYSCTL_CTX; 4405 4406 /* 4407 * Allocate tx/rx/fl queues for this VI. 4408 */ 4409 rc = t4_setup_vi_queues(vi); 4410 if (rc != 0) 4411 goto done; /* error message displayed already */ 4412 4413 /* 4414 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4415 */ 4416 if (vi->nrxq > vi->rss_size) { 4417 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4418 "some queues will never receive traffic.\n", vi->nrxq, 4419 vi->rss_size); 4420 } else if (vi->rss_size % vi->nrxq) { 4421 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4422 "expect uneven traffic distribution.\n", vi->nrxq, 4423 vi->rss_size); 4424 } 4425#ifdef RSS 4426 if (vi->nrxq != nbuckets) { 4427 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4428 "performance will be impacted.\n", vi->nrxq, nbuckets); 4429 } 4430#endif 4431 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4432 for (i = 0; i < vi->rss_size;) { 4433#ifdef RSS 4434 j = rss_get_indirection_to_bucket(i); 4435 j %= vi->nrxq; 4436 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4437 rss[i++] = rxq->iq.abs_id; 4438#else 4439 for_each_rxq(vi, j, rxq) { 4440 rss[i++] = rxq->iq.abs_id; 4441 if (i == vi->rss_size) 4442 break; 4443 } 4444#endif 4445 } 4446 4447 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4448 vi->rss_size); 4449 if (rc != 0) { 4450 if_printf(ifp, "rss_config failed: %d\n", rc); 4451 goto done; 4452 } 4453 4454#ifdef RSS 4455 hashen = hashconfig_to_hashen(hashconfig); 4456 4457 /* 4458 * We may have had to enable some hashes even though the global config 4459 * wants them disabled. This is a potential problem that must be 4460 * reported to the user. 4461 */ 4462 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4463 4464 /* 4465 * If we consider only the supported hash types, then the enabled hashes 4466 * are a superset of the requested hashes. In other words, there cannot 4467 * be any supported hash that was requested but not enabled, but there 4468 * can be hashes that were not requested but had to be enabled. 4469 */ 4470 extra &= SUPPORTED_RSS_HASHTYPES; 4471 MPASS((extra & hashconfig) == 0); 4472 4473 if (extra) { 4474 if_printf(ifp, 4475 "global RSS config (0x%x) cannot be accommodated.\n", 4476 hashconfig); 4477 } 4478 if (extra & RSS_HASHTYPE_RSS_IPV4) 4479 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4480 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4481 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4482 if (extra & RSS_HASHTYPE_RSS_IPV6) 4483 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4484 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4485 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4486 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4487 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4488 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4489 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4490#else 4491 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4492 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4493 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4494 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4495#endif 4496 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); 4497 if (rc != 0) { 4498 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4499 goto done; 4500 } 4501 4502 vi->rss = rss; 4503 vi->flags |= VI_INIT_DONE; 4504done: 4505 if (rc != 0) 4506 vi_full_uninit(vi); 4507 4508 return (rc); 4509} 4510 4511/* 4512 * Idempotent. 4513 */ 4514int 4515vi_full_uninit(struct vi_info *vi) 4516{ 4517 struct port_info *pi = vi->pi; 4518 struct adapter *sc = pi->adapter; 4519 int i; 4520 struct sge_rxq *rxq; 4521 struct sge_txq *txq; 4522#ifdef TCP_OFFLOAD 4523 struct sge_ofld_rxq *ofld_rxq; 4524 struct sge_wrq *ofld_txq; 4525#endif 4526 4527 if (vi->flags & VI_INIT_DONE) { 4528 4529 /* Need to quiesce queues. */ 4530 4531 /* XXX: Only for the first VI? */ 4532 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4533 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4534 4535 for_each_txq(vi, i, txq) { 4536 quiesce_txq(sc, txq); 4537 } 4538 4539#ifdef TCP_OFFLOAD 4540 for_each_ofld_txq(vi, i, ofld_txq) { 4541 quiesce_wrq(sc, ofld_txq); 4542 } 4543#endif 4544 4545 for_each_rxq(vi, i, rxq) { 4546 quiesce_iq(sc, &rxq->iq); 4547 quiesce_fl(sc, &rxq->fl); 4548 } 4549 4550#ifdef TCP_OFFLOAD 4551 for_each_ofld_rxq(vi, i, ofld_rxq) { 4552 quiesce_iq(sc, &ofld_rxq->iq); 4553 quiesce_fl(sc, &ofld_rxq->fl); 4554 } 4555#endif 4556 free(vi->rss, M_CXGBE); 4557 free(vi->nm_rss, M_CXGBE); 4558 } 4559 4560 t4_teardown_vi_queues(vi); 4561 vi->flags &= ~VI_INIT_DONE; 4562 4563 return (0); 4564} 4565 4566static void 4567quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4568{ 4569 struct sge_eq *eq = &txq->eq; 4570 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4571 4572 (void) sc; /* unused */ 4573 4574#ifdef INVARIANTS 4575 TXQ_LOCK(txq); 4576 MPASS((eq->flags & EQ_ENABLED) == 0); 4577 TXQ_UNLOCK(txq); 4578#endif 4579 4580 /* Wait for the mp_ring to empty. */ 4581 while (!mp_ring_is_idle(txq->r)) { 4582 mp_ring_check_drainage(txq->r, 0); 4583 pause("rquiesce", 1); 4584 } 4585 4586 /* Then wait for the hardware to finish. */ 4587 while (spg->cidx != htobe16(eq->pidx)) 4588 pause("equiesce", 1); 4589 4590 /* Finally, wait for the driver to reclaim all descriptors. */ 4591 while (eq->cidx != eq->pidx) 4592 pause("dquiesce", 1); 4593} 4594 4595static void 4596quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4597{ 4598 4599 /* XXXTX */ 4600} 4601 4602static void 4603quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4604{ 4605 (void) sc; /* unused */ 4606 4607 /* Synchronize with the interrupt handler */ 4608 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4609 pause("iqfree", 1); 4610} 4611 4612static void 4613quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4614{ 4615 mtx_lock(&sc->sfl_lock); 4616 FL_LOCK(fl); 4617 fl->flags |= FL_DOOMED; 4618 FL_UNLOCK(fl); 4619 callout_stop(&sc->sfl_callout); 4620 mtx_unlock(&sc->sfl_lock); 4621 4622 KASSERT((fl->flags & FL_STARVING) == 0, 4623 ("%s: still starving", __func__)); 4624} 4625 4626static int 4627t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4628 driver_intr_t *handler, void *arg, char *name) 4629{ 4630 int rc; 4631 4632 irq->rid = rid; 4633 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4634 RF_SHAREABLE | RF_ACTIVE); 4635 if (irq->res == NULL) { 4636 device_printf(sc->dev, 4637 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4638 return (ENOMEM); 4639 } 4640 4641 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4642 NULL, handler, arg, &irq->tag); 4643 if (rc != 0) { 4644 device_printf(sc->dev, 4645 "failed to setup interrupt for rid %d, name %s: %d\n", 4646 rid, name, rc); 4647 } else if (name) 4648 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); 4649 4650 return (rc); 4651} 4652 4653static int 4654t4_free_irq(struct adapter *sc, struct irq *irq) 4655{ 4656 if (irq->tag) 4657 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4658 if (irq->res) 4659 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4660 4661 bzero(irq, sizeof(*irq)); 4662 4663 return (0); 4664} 4665 4666static void 4667get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4668{ 4669 4670 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4671 t4_get_regs(sc, buf, regs->len); 4672} 4673 4674#define A_PL_INDIR_CMD 0x1f8 4675 4676#define S_PL_AUTOINC 31 4677#define M_PL_AUTOINC 0x1U 4678#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4679#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4680 4681#define S_PL_VFID 20 4682#define M_PL_VFID 0xffU 4683#define V_PL_VFID(x) ((x) << S_PL_VFID) 4684#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4685 4686#define S_PL_ADDR 0 4687#define M_PL_ADDR 0xfffffU 4688#define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4689#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4690 4691#define A_PL_INDIR_DATA 0x1fc 4692 4693static uint64_t 4694read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4695{ 4696 u32 stats[2]; 4697 4698 mtx_assert(&sc->reg_lock, MA_OWNED); 4699 if (sc->flags & IS_VF) { 4700 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4701 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4702 } else { 4703 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4704 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4705 V_PL_ADDR(VF_MPS_REG(reg))); 4706 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4707 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4708 } 4709 return (((uint64_t)stats[1]) << 32 | stats[0]); 4710} 4711 4712static void 4713t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4714 struct fw_vi_stats_vf *stats) 4715{ 4716 4717#define GET_STAT(name) \ 4718 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4719 4720 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4721 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4722 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4723 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4724 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4725 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4726 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4727 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4728 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4729 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4730 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4731 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4732 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4733 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4734 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4735 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4736 4737#undef GET_STAT 4738} 4739 4740static void 4741t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4742{ 4743 int reg; 4744 4745 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4746 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4747 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4748 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4749 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4750 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4751} 4752 4753static void 4754vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4755{ 4756 struct timeval tv; 4757 const struct timeval interval = {0, 250000}; /* 250ms */ 4758 4759 if (!(vi->flags & VI_INIT_DONE)) 4760 return; 4761 4762 getmicrotime(&tv); 4763 timevalsub(&tv, &interval); 4764 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4765 return; 4766 4767 mtx_lock(&sc->reg_lock); 4768 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4769 getmicrotime(&vi->last_refreshed); 4770 mtx_unlock(&sc->reg_lock); 4771} 4772 4773static void 4774cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4775{ 4776 int i; 4777 u_int v, tnl_cong_drops; 4778 struct timeval tv; 4779 const struct timeval interval = {0, 250000}; /* 250ms */ 4780 4781 getmicrotime(&tv); 4782 timevalsub(&tv, &interval); 4783 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4784 return; 4785 4786 tnl_cong_drops = 0; 4787 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4788 for (i = 0; i < sc->chip_params->nchan; i++) { 4789 if (pi->rx_chan_map & (1 << i)) { 4790 mtx_lock(&sc->reg_lock); 4791 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4792 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4793 mtx_unlock(&sc->reg_lock); 4794 tnl_cong_drops += v; 4795 } 4796 } 4797 pi->tnl_cong_drops = tnl_cong_drops; 4798 getmicrotime(&pi->last_refreshed); 4799} 4800 4801static void 4802cxgbe_tick(void *arg) 4803{ 4804 struct port_info *pi = arg; 4805 struct adapter *sc = pi->adapter; 4806 4807 PORT_LOCK_ASSERT_OWNED(pi); 4808 cxgbe_refresh_stats(sc, pi); 4809 4810 callout_schedule(&pi->tick, hz); 4811} 4812 4813void 4814vi_tick(void *arg) 4815{ 4816 struct vi_info *vi = arg; 4817 struct adapter *sc = vi->pi->adapter; 4818 4819 vi_refresh_stats(sc, vi); 4820 4821 callout_schedule(&vi->tick, hz); 4822} 4823 4824static void 4825cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4826{ 4827 struct ifnet *vlan; 4828 4829 if (arg != ifp || ifp->if_type != IFT_ETHER) 4830 return; 4831 4832 vlan = VLAN_DEVAT(ifp, vid); 4833 VLAN_SETCOOKIE(vlan, ifp); 4834} 4835 4836/* 4837 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 4838 */ 4839static char *caps_decoder[] = { 4840 "\20\001IPMI\002NCSI", /* 0: NBM */ 4841 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 4842 "\20\001INGRESS\002EGRESS", /* 2: switch */ 4843 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 4844 "\006HASHFILTER\007ETHOFLD", 4845 "\20\001TOE", /* 4: TOE */ 4846 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 4847 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 4848 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 4849 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 4850 "\007T10DIF" 4851 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 4852 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 4853 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 4854 "\004PO_INITIATOR\005PO_TARGET", 4855}; 4856 4857void 4858t4_sysctls(struct adapter *sc) 4859{ 4860 struct sysctl_ctx_list *ctx; 4861 struct sysctl_oid *oid; 4862 struct sysctl_oid_list *children, *c0; 4863 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4864 4865 ctx = device_get_sysctl_ctx(sc->dev); 4866 4867 /* 4868 * dev.t4nex.X. 4869 */ 4870 oid = device_get_sysctl_tree(sc->dev); 4871 c0 = children = SYSCTL_CHILDREN(oid); 4872 4873 sc->sc_do_rxcopy = 1; 4874 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4875 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4876 4877 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4878 sc->params.nports, "# of ports"); 4879 4880 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4881 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4882 sysctl_bitfield, "A", "available doorbells"); 4883 4884 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4885 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4886 4887 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4888 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4889 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4890 "interrupt holdoff timer values (us)"); 4891 4892 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4893 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4894 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4895 "interrupt holdoff packet counter values"); 4896 4897 t4_sge_sysctls(sc, ctx, children); 4898 4899 sc->lro_timeout = 100; 4900 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4901 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4902 4903 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 4904 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4905 4906 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 4907 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 4908 4909 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4910 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4911 4912 if (sc->flags & IS_VF) 4913 return; 4914 4915 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4916 NULL, chip_rev(sc), "chip hardware revision"); 4917 4918 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 4919 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 4920 4921 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 4922 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 4923 4924 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 4925 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 4926 4927 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 4928 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 4929 4930 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 4931 sc->er_version, 0, "expansion ROM version"); 4932 4933 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 4934 sc->bs_version, 0, "bootstrap firmware version"); 4935 4936 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 4937 NULL, sc->params.scfg_vers, "serial config version"); 4938 4939 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 4940 NULL, sc->params.vpd_vers, "VPD version"); 4941 4942 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4943 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4944 4945 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4946 sc->cfcsum, "config file checksum"); 4947 4948#define SYSCTL_CAP(name, n, text) \ 4949 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 4950 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 4951 sysctl_bitfield, "A", "available " text " capabilities") 4952 4953 SYSCTL_CAP(nbmcaps, 0, "NBM"); 4954 SYSCTL_CAP(linkcaps, 1, "link"); 4955 SYSCTL_CAP(switchcaps, 2, "switch"); 4956 SYSCTL_CAP(niccaps, 3, "NIC"); 4957 SYSCTL_CAP(toecaps, 4, "TCP offload"); 4958 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 4959 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 4960 SYSCTL_CAP(cryptocaps, 7, "crypto"); 4961 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 4962#undef SYSCTL_CAP 4963 4964 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4965 NULL, sc->tids.nftids, "number of filters"); 4966 4967 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4968 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4969 "chip temperature (in Celsius)"); 4970 4971#ifdef SBUF_DRAIN 4972 /* 4973 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4974 */ 4975 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4976 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4977 "logs and miscellaneous information"); 4978 children = SYSCTL_CHILDREN(oid); 4979 4980 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4981 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4982 sysctl_cctrl, "A", "congestion control"); 4983 4984 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4985 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4986 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4987 4988 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4989 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4990 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4991 4992 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4993 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4994 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4995 4996 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4997 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4998 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4999 5000 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 5001 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 5002 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 5003 5004 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 5005 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 5006 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 5007 5008 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 5009 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5010 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 5011 "A", "CIM logic analyzer"); 5012 5013 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 5014 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5015 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 5016 5017 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 5018 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 5019 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 5020 5021 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 5022 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 5023 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 5024 5025 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 5026 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 5027 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 5028 5029 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 5030 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 5031 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 5032 5033 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 5034 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 5035 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 5036 5037 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 5038 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 5039 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 5040 5041 if (chip_id(sc) > CHELSIO_T4) { 5042 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 5043 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 5044 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 5045 5046 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 5047 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 5048 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 5049 } 5050 5051 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 5052 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5053 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 5054 5055 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 5056 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5057 sysctl_cim_qcfg, "A", "CIM queue configuration"); 5058 5059 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 5060 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5061 sysctl_cpl_stats, "A", "CPL statistics"); 5062 5063 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 5064 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5065 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 5066 5067 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 5068 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5069 sysctl_devlog, "A", "firmware's device log"); 5070 5071 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 5072 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5073 sysctl_fcoe_stats, "A", "FCoE statistics"); 5074 5075 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 5076 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5077 sysctl_hw_sched, "A", "hardware scheduler "); 5078 5079 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 5080 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5081 sysctl_l2t, "A", "hardware L2 table"); 5082 5083 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 5084 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5085 sysctl_lb_stats, "A", "loopback statistics"); 5086 5087 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 5088 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5089 sysctl_meminfo, "A", "memory regions"); 5090 5091 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 5092 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5093 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 5094 "A", "MPS TCAM entries"); 5095 5096 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 5097 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5098 sysctl_path_mtus, "A", "path MTUs"); 5099 5100 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 5101 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5102 sysctl_pm_stats, "A", "PM statistics"); 5103 5104 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 5105 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5106 sysctl_rdma_stats, "A", "RDMA statistics"); 5107 5108 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 5109 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5110 sysctl_tcp_stats, "A", "TCP statistics"); 5111 5112 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 5113 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5114 sysctl_tids, "A", "TID information"); 5115 5116 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 5117 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5118 sysctl_tp_err_stats, "A", "TP error statistics"); 5119 5120 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 5121 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 5122 "TP logic analyzer event capture mask"); 5123 5124 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 5125 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5126 sysctl_tp_la, "A", "TP logic analyzer"); 5127 5128 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 5129 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5130 sysctl_tx_rate, "A", "Tx rate"); 5131 5132 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 5133 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5134 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 5135 5136 if (chip_id(sc) >= CHELSIO_T5) { 5137 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 5138 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 5139 sysctl_wcwr_stats, "A", "write combined work requests"); 5140 } 5141#endif 5142 5143#ifdef TCP_OFFLOAD 5144 if (is_offload(sc)) { 5145 /* 5146 * dev.t4nex.X.toe. 5147 */ 5148 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 5149 NULL, "TOE parameters"); 5150 children = SYSCTL_CHILDREN(oid); 5151 5152 sc->tt.sndbuf = 256 * 1024; 5153 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 5154 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 5155 5156 sc->tt.ddp = 0; 5157 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 5158 &sc->tt.ddp, 0, "DDP allowed"); 5159 5160 sc->tt.rx_coalesce = 1; 5161 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 5162 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 5163 5164 sc->tt.tx_align = 1; 5165 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 5166 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 5167 5168 sc->tt.tx_zcopy = 0; 5169 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", 5170 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, 5171 "Enable zero-copy aio_write(2)"); 5172 5173 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 5174 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 5175 "TP timer tick (us)"); 5176 5177 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 5178 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 5179 "TCP timestamp tick (us)"); 5180 5181 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 5182 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 5183 "DACK tick (us)"); 5184 5185 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 5186 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 5187 "IU", "DACK timer (us)"); 5188 5189 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 5190 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 5191 sysctl_tp_timer, "LU", "Retransmit min (us)"); 5192 5193 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 5194 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 5195 sysctl_tp_timer, "LU", "Retransmit max (us)"); 5196 5197 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 5198 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 5199 sysctl_tp_timer, "LU", "Persist timer min (us)"); 5200 5201 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 5202 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 5203 sysctl_tp_timer, "LU", "Persist timer max (us)"); 5204 5205 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 5206 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 5207 sysctl_tp_timer, "LU", "Keepidle idle timer (us)"); 5208 5209 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl", 5210 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 5211 sysctl_tp_timer, "LU", "Keepidle interval (us)"); 5212 5213 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 5214 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 5215 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 5216 5217 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 5218 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 5219 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 5220 } 5221#endif 5222} 5223 5224void 5225vi_sysctls(struct vi_info *vi) 5226{ 5227 struct sysctl_ctx_list *ctx; 5228 struct sysctl_oid *oid; 5229 struct sysctl_oid_list *children; 5230 5231 ctx = device_get_sysctl_ctx(vi->dev); 5232 5233 /* 5234 * dev.v?(cxgbe|cxl).X. 5235 */ 5236 oid = device_get_sysctl_tree(vi->dev); 5237 children = SYSCTL_CHILDREN(oid); 5238 5239 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 5240 vi->viid, "VI identifer"); 5241 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 5242 &vi->nrxq, 0, "# of rx queues"); 5243 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 5244 &vi->ntxq, 0, "# of tx queues"); 5245 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 5246 &vi->first_rxq, 0, "index of first rx queue"); 5247 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 5248 &vi->first_txq, 0, "index of first tx queue"); 5249 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 5250 vi->rss_size, "size of RSS indirection table"); 5251 5252 if (IS_MAIN_VI(vi)) { 5253 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 5254 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 5255 "Reserve queue 0 for non-flowid packets"); 5256 } 5257 5258#ifdef TCP_OFFLOAD 5259 if (vi->nofldrxq != 0) { 5260 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5261 &vi->nofldrxq, 0, 5262 "# of rx queues for offloaded TCP connections"); 5263 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5264 &vi->nofldtxq, 0, 5265 "# of tx queues for offloaded TCP connections"); 5266 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5267 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5268 "index of first TOE rx queue"); 5269 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5270 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5271 "index of first TOE tx queue"); 5272 } 5273#endif 5274#ifdef DEV_NETMAP 5275 if (vi->nnmrxq != 0) { 5276 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 5277 &vi->nnmrxq, 0, "# of netmap rx queues"); 5278 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 5279 &vi->nnmtxq, 0, "# of netmap tx queues"); 5280 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 5281 CTLFLAG_RD, &vi->first_nm_rxq, 0, 5282 "index of first netmap rx queue"); 5283 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 5284 CTLFLAG_RD, &vi->first_nm_txq, 0, 5285 "index of first netmap tx queue"); 5286 } 5287#endif 5288 5289 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5290 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5291 "holdoff timer index"); 5292 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5293 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5294 "holdoff packet counter index"); 5295 5296 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5297 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5298 "rx queue size"); 5299 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5300 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5301 "tx queue size"); 5302} 5303 5304static void 5305cxgbe_sysctls(struct port_info *pi) 5306{ 5307 struct sysctl_ctx_list *ctx; 5308 struct sysctl_oid *oid; 5309 struct sysctl_oid_list *children, *children2; 5310 struct adapter *sc = pi->adapter; 5311 int i; 5312 char name[16]; 5313 5314 ctx = device_get_sysctl_ctx(pi->dev); 5315 5316 /* 5317 * dev.cxgbe.X. 5318 */ 5319 oid = device_get_sysctl_tree(pi->dev); 5320 children = SYSCTL_CHILDREN(oid); 5321 5322 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5323 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5324 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5325 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5326 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5327 "PHY temperature (in Celsius)"); 5328 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5329 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5330 "PHY firmware version"); 5331 } 5332 5333 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5334 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", 5335 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5336 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", 5337 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", 5338 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); 5339 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", 5340 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", 5341 "autonegotiation (-1 = not supported)"); 5342 5343 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5344 port_top_speed(pi), "max speed (in Gbps)"); 5345 5346 if (sc->flags & IS_VF) 5347 return; 5348 5349 /* 5350 * dev.(cxgbe|cxl).X.tc. 5351 */ 5352 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5353 "Tx scheduler traffic classes (cl_rl)"); 5354 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5355 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; 5356 5357 snprintf(name, sizeof(name), "%d", i); 5358 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5359 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5360 "traffic class")); 5361 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5362 &tc->flags, 0, "flags"); 5363 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5364 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5365#ifdef SBUF_DRAIN 5366 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5367 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5368 sysctl_tc_params, "A", "traffic class parameters"); 5369#endif 5370 } 5371 5372 /* 5373 * dev.cxgbe.X.stats. 5374 */ 5375 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5376 NULL, "port statistics"); 5377 children = SYSCTL_CHILDREN(oid); 5378 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5379 &pi->tx_parse_error, 0, 5380 "# of tx packets with invalid length or # of segments"); 5381 5382#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5383 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5384 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5385 sysctl_handle_t4_reg64, "QU", desc) 5386 5387 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5388 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5389 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5390 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5391 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5392 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5393 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5394 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5395 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5396 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5397 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5398 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5399 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5400 "# of tx frames in this range", 5401 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5402 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5403 "# of tx frames in this range", 5404 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5405 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5406 "# of tx frames in this range", 5407 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5408 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5409 "# of tx frames in this range", 5410 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5411 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5412 "# of tx frames in this range", 5413 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5414 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5415 "# of tx frames in this range", 5416 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5417 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5418 "# of tx frames in this range", 5419 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5420 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5421 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5422 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5423 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5424 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5425 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5426 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5427 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5428 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5429 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5430 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5431 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5432 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5433 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5434 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5435 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5436 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5437 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5438 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5439 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5440 5441 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5442 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5443 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5444 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5445 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5446 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5447 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5448 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5449 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5450 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5451 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5452 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5453 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5454 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5455 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5456 "# of frames received with bad FCS", 5457 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5458 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5459 "# of frames received with length error", 5460 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5461 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5462 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5463 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5464 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5465 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5466 "# of rx frames in this range", 5467 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5468 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5469 "# of rx frames in this range", 5470 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5471 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5472 "# of rx frames in this range", 5473 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5474 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5475 "# of rx frames in this range", 5476 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5477 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5478 "# of rx frames in this range", 5479 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5480 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5481 "# of rx frames in this range", 5482 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5483 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5484 "# of rx frames in this range", 5485 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5486 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5487 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5488 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5489 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5490 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5491 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5492 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5493 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5494 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5495 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5496 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5497 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5498 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5499 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5500 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5501 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5502 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5503 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5504 5505#undef SYSCTL_ADD_T4_REG64 5506 5507#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5508 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5509 &pi->stats.name, desc) 5510 5511 /* We get these from port_stats and they may be stale by up to 1s */ 5512 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5513 "# drops due to buffer-group 0 overflows"); 5514 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5515 "# drops due to buffer-group 1 overflows"); 5516 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5517 "# drops due to buffer-group 2 overflows"); 5518 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5519 "# drops due to buffer-group 3 overflows"); 5520 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5521 "# of buffer-group 0 truncated packets"); 5522 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5523 "# of buffer-group 1 truncated packets"); 5524 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5525 "# of buffer-group 2 truncated packets"); 5526 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5527 "# of buffer-group 3 truncated packets"); 5528 5529#undef SYSCTL_ADD_T4_PORTSTAT 5530} 5531 5532static int 5533sysctl_int_array(SYSCTL_HANDLER_ARGS) 5534{ 5535 int rc, *i, space = 0; 5536 struct sbuf sb; 5537 5538 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5539 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5540 if (space) 5541 sbuf_printf(&sb, " "); 5542 sbuf_printf(&sb, "%d", *i); 5543 space = 1; 5544 } 5545 rc = sbuf_finish(&sb); 5546 sbuf_delete(&sb); 5547 return (rc); 5548} 5549 5550static int 5551sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5552{ 5553 int rc; 5554 struct sbuf *sb; 5555 5556 rc = sysctl_wire_old_buffer(req, 0); 5557 if (rc != 0) 5558 return(rc); 5559 5560 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5561 if (sb == NULL) 5562 return (ENOMEM); 5563 5564 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5565 rc = sbuf_finish(sb); 5566 sbuf_delete(sb); 5567 5568 return (rc); 5569} 5570 5571static int 5572sysctl_btphy(SYSCTL_HANDLER_ARGS) 5573{ 5574 struct port_info *pi = arg1; 5575 int op = arg2; 5576 struct adapter *sc = pi->adapter; 5577 u_int v; 5578 int rc; 5579 5580 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5581 if (rc) 5582 return (rc); 5583 /* XXX: magic numbers */ 5584 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5585 &v); 5586 end_synchronized_op(sc, 0); 5587 if (rc) 5588 return (rc); 5589 if (op == 0) 5590 v /= 256; 5591 5592 rc = sysctl_handle_int(oidp, &v, 0, req); 5593 return (rc); 5594} 5595 5596static int 5597sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5598{ 5599 struct vi_info *vi = arg1; 5600 int rc, val; 5601 5602 val = vi->rsrv_noflowq; 5603 rc = sysctl_handle_int(oidp, &val, 0, req); 5604 if (rc != 0 || req->newptr == NULL) 5605 return (rc); 5606 5607 if ((val >= 1) && (vi->ntxq > 1)) 5608 vi->rsrv_noflowq = 1; 5609 else 5610 vi->rsrv_noflowq = 0; 5611 5612 return (rc); 5613} 5614 5615static int 5616sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5617{ 5618 struct vi_info *vi = arg1; 5619 struct adapter *sc = vi->pi->adapter; 5620 int idx, rc, i; 5621 struct sge_rxq *rxq; 5622#ifdef TCP_OFFLOAD 5623 struct sge_ofld_rxq *ofld_rxq; 5624#endif 5625 uint8_t v; 5626 5627 idx = vi->tmr_idx; 5628 5629 rc = sysctl_handle_int(oidp, &idx, 0, req); 5630 if (rc != 0 || req->newptr == NULL) 5631 return (rc); 5632 5633 if (idx < 0 || idx >= SGE_NTIMERS) 5634 return (EINVAL); 5635 5636 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5637 "t4tmr"); 5638 if (rc) 5639 return (rc); 5640 5641 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5642 for_each_rxq(vi, i, rxq) { 5643#ifdef atomic_store_rel_8 5644 atomic_store_rel_8(&rxq->iq.intr_params, v); 5645#else 5646 rxq->iq.intr_params = v; 5647#endif 5648 } 5649#ifdef TCP_OFFLOAD 5650 for_each_ofld_rxq(vi, i, ofld_rxq) { 5651#ifdef atomic_store_rel_8 5652 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5653#else 5654 ofld_rxq->iq.intr_params = v; 5655#endif 5656 } 5657#endif 5658 vi->tmr_idx = idx; 5659 5660 end_synchronized_op(sc, LOCK_HELD); 5661 return (0); 5662} 5663 5664static int 5665sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5666{ 5667 struct vi_info *vi = arg1; 5668 struct adapter *sc = vi->pi->adapter; 5669 int idx, rc; 5670 5671 idx = vi->pktc_idx; 5672 5673 rc = sysctl_handle_int(oidp, &idx, 0, req); 5674 if (rc != 0 || req->newptr == NULL) 5675 return (rc); 5676 5677 if (idx < -1 || idx >= SGE_NCOUNTERS) 5678 return (EINVAL); 5679 5680 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5681 "t4pktc"); 5682 if (rc) 5683 return (rc); 5684 5685 if (vi->flags & VI_INIT_DONE) 5686 rc = EBUSY; /* cannot be changed once the queues are created */ 5687 else 5688 vi->pktc_idx = idx; 5689 5690 end_synchronized_op(sc, LOCK_HELD); 5691 return (rc); 5692} 5693 5694static int 5695sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5696{ 5697 struct vi_info *vi = arg1; 5698 struct adapter *sc = vi->pi->adapter; 5699 int qsize, rc; 5700 5701 qsize = vi->qsize_rxq; 5702 5703 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5704 if (rc != 0 || req->newptr == NULL) 5705 return (rc); 5706 5707 if (qsize < 128 || (qsize & 7)) 5708 return (EINVAL); 5709 5710 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5711 "t4rxqs"); 5712 if (rc) 5713 return (rc); 5714 5715 if (vi->flags & VI_INIT_DONE) 5716 rc = EBUSY; /* cannot be changed once the queues are created */ 5717 else 5718 vi->qsize_rxq = qsize; 5719 5720 end_synchronized_op(sc, LOCK_HELD); 5721 return (rc); 5722} 5723 5724static int 5725sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5726{ 5727 struct vi_info *vi = arg1; 5728 struct adapter *sc = vi->pi->adapter; 5729 int qsize, rc; 5730 5731 qsize = vi->qsize_txq; 5732 5733 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5734 if (rc != 0 || req->newptr == NULL) 5735 return (rc); 5736 5737 if (qsize < 128 || qsize > 65536) 5738 return (EINVAL); 5739 5740 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5741 "t4txqs"); 5742 if (rc) 5743 return (rc); 5744 5745 if (vi->flags & VI_INIT_DONE) 5746 rc = EBUSY; /* cannot be changed once the queues are created */ 5747 else 5748 vi->qsize_txq = qsize; 5749 5750 end_synchronized_op(sc, LOCK_HELD); 5751 return (rc); 5752} 5753 5754static int 5755sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5756{ 5757 struct port_info *pi = arg1; 5758 struct adapter *sc = pi->adapter; 5759 struct link_config *lc = &pi->link_cfg; 5760 int rc; 5761 5762 if (req->newptr == NULL) { 5763 struct sbuf *sb; 5764 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5765 5766 rc = sysctl_wire_old_buffer(req, 0); 5767 if (rc != 0) 5768 return(rc); 5769 5770 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5771 if (sb == NULL) 5772 return (ENOMEM); 5773 5774 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5775 rc = sbuf_finish(sb); 5776 sbuf_delete(sb); 5777 } else { 5778 char s[2]; 5779 int n; 5780 5781 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5782 s[1] = 0; 5783 5784 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5785 if (rc != 0) 5786 return(rc); 5787 5788 if (s[1] != 0) 5789 return (EINVAL); 5790 if (s[0] < '0' || s[0] > '9') 5791 return (EINVAL); /* not a number */ 5792 n = s[0] - '0'; 5793 if (n & ~(PAUSE_TX | PAUSE_RX)) 5794 return (EINVAL); /* some other bit is set too */ 5795 5796 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5797 "t4PAUSE"); 5798 if (rc) 5799 return (rc); 5800 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5801 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5802 lc->requested_fc |= n; 5803 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5804 } 5805 end_synchronized_op(sc, 0); 5806 } 5807 5808 return (rc); 5809} 5810 5811static int 5812sysctl_fec(SYSCTL_HANDLER_ARGS) 5813{ 5814 struct port_info *pi = arg1; 5815 struct adapter *sc = pi->adapter; 5816 struct link_config *lc = &pi->link_cfg; 5817 int rc; 5818 5819 if (req->newptr == NULL) { 5820 struct sbuf *sb; 5821 static char *bits = "\20\1RS\2BASER_RS\3RESERVED"; 5822 5823 rc = sysctl_wire_old_buffer(req, 0); 5824 if (rc != 0) 5825 return(rc); 5826 5827 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5828 if (sb == NULL) 5829 return (ENOMEM); 5830 5831 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits); 5832 rc = sbuf_finish(sb); 5833 sbuf_delete(sb); 5834 } else { 5835 char s[2]; 5836 int n; 5837 5838 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC); 5839 s[1] = 0; 5840 5841 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5842 if (rc != 0) 5843 return(rc); 5844 5845 if (s[1] != 0) 5846 return (EINVAL); 5847 if (s[0] < '0' || s[0] > '9') 5848 return (EINVAL); /* not a number */ 5849 n = s[0] - '0'; 5850 if (n & ~M_FW_PORT_CAP_FEC) 5851 return (EINVAL); /* some other bit is set too */ 5852 5853 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5854 "t4fec"); 5855 if (rc) 5856 return (rc); 5857 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) { 5858 lc->requested_fec = n & 5859 G_FW_PORT_CAP_FEC(lc->supported); 5860 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5861 } 5862 end_synchronized_op(sc, 0); 5863 } 5864 5865 return (rc); 5866} 5867 5868static int 5869sysctl_autoneg(SYSCTL_HANDLER_ARGS) 5870{ 5871 struct port_info *pi = arg1; 5872 struct adapter *sc = pi->adapter; 5873 struct link_config *lc = &pi->link_cfg; 5874 int rc, val, old; 5875 5876 if (lc->supported & FW_PORT_CAP_ANEG) 5877 val = lc->autoneg == AUTONEG_ENABLE ? 1 : 0; 5878 else 5879 val = -1; 5880 rc = sysctl_handle_int(oidp, &val, 0, req); 5881 if (rc != 0 || req->newptr == NULL) 5882 return (rc); 5883 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) 5884 return (ENOTSUP); 5885 5886 val = val ? AUTONEG_ENABLE : AUTONEG_DISABLE; 5887 if (lc->autoneg == val) 5888 return (0); /* no change */ 5889 5890 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5891 "t4aneg"); 5892 if (rc) 5893 return (rc); 5894 old = lc->autoneg; 5895 lc->autoneg = val; 5896 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5897 if (rc != 0) 5898 lc->autoneg = old; 5899 return (rc); 5900} 5901 5902static int 5903sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5904{ 5905 struct adapter *sc = arg1; 5906 int reg = arg2; 5907 uint64_t val; 5908 5909 val = t4_read_reg64(sc, reg); 5910 5911 return (sysctl_handle_64(oidp, &val, 0, req)); 5912} 5913 5914static int 5915sysctl_temperature(SYSCTL_HANDLER_ARGS) 5916{ 5917 struct adapter *sc = arg1; 5918 int rc, t; 5919 uint32_t param, val; 5920 5921 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5922 if (rc) 5923 return (rc); 5924 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5925 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5926 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5927 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5928 end_synchronized_op(sc, 0); 5929 if (rc) 5930 return (rc); 5931 5932 /* unknown is returned as 0 but we display -1 in that case */ 5933 t = val == 0 ? -1 : val; 5934 5935 rc = sysctl_handle_int(oidp, &t, 0, req); 5936 return (rc); 5937} 5938 5939#ifdef SBUF_DRAIN 5940static int 5941sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5942{ 5943 struct adapter *sc = arg1; 5944 struct sbuf *sb; 5945 int rc, i; 5946 uint16_t incr[NMTUS][NCCTRL_WIN]; 5947 static const char *dec_fac[] = { 5948 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5949 "0.9375" 5950 }; 5951 5952 rc = sysctl_wire_old_buffer(req, 0); 5953 if (rc != 0) 5954 return (rc); 5955 5956 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5957 if (sb == NULL) 5958 return (ENOMEM); 5959 5960 t4_read_cong_tbl(sc, incr); 5961 5962 for (i = 0; i < NCCTRL_WIN; ++i) { 5963 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5964 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5965 incr[5][i], incr[6][i], incr[7][i]); 5966 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5967 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5968 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5969 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5970 } 5971 5972 rc = sbuf_finish(sb); 5973 sbuf_delete(sb); 5974 5975 return (rc); 5976} 5977 5978static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5979 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5980 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5981 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5982}; 5983 5984static int 5985sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5986{ 5987 struct adapter *sc = arg1; 5988 struct sbuf *sb; 5989 int rc, i, n, qid = arg2; 5990 uint32_t *buf, *p; 5991 char *qtype; 5992 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5993 5994 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5995 ("%s: bad qid %d\n", __func__, qid)); 5996 5997 if (qid < CIM_NUM_IBQ) { 5998 /* inbound queue */ 5999 qtype = "IBQ"; 6000 n = 4 * CIM_IBQ_SIZE; 6001 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6002 rc = t4_read_cim_ibq(sc, qid, buf, n); 6003 } else { 6004 /* outbound queue */ 6005 qtype = "OBQ"; 6006 qid -= CIM_NUM_IBQ; 6007 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 6008 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 6009 rc = t4_read_cim_obq(sc, qid, buf, n); 6010 } 6011 6012 if (rc < 0) { 6013 rc = -rc; 6014 goto done; 6015 } 6016 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 6017 6018 rc = sysctl_wire_old_buffer(req, 0); 6019 if (rc != 0) 6020 goto done; 6021 6022 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6023 if (sb == NULL) { 6024 rc = ENOMEM; 6025 goto done; 6026 } 6027 6028 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 6029 for (i = 0, p = buf; i < n; i += 16, p += 4) 6030 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 6031 p[2], p[3]); 6032 6033 rc = sbuf_finish(sb); 6034 sbuf_delete(sb); 6035done: 6036 free(buf, M_CXGBE); 6037 return (rc); 6038} 6039 6040static int 6041sysctl_cim_la(SYSCTL_HANDLER_ARGS) 6042{ 6043 struct adapter *sc = arg1; 6044 u_int cfg; 6045 struct sbuf *sb; 6046 uint32_t *buf, *p; 6047 int rc; 6048 6049 MPASS(chip_id(sc) <= CHELSIO_T5); 6050 6051 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6052 if (rc != 0) 6053 return (rc); 6054 6055 rc = sysctl_wire_old_buffer(req, 0); 6056 if (rc != 0) 6057 return (rc); 6058 6059 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6060 if (sb == NULL) 6061 return (ENOMEM); 6062 6063 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6064 M_ZERO | M_WAITOK); 6065 6066 rc = -t4_cim_read_la(sc, buf, NULL); 6067 if (rc != 0) 6068 goto done; 6069 6070 sbuf_printf(sb, "Status Data PC%s", 6071 cfg & F_UPDBGLACAPTPCONLY ? "" : 6072 " LS0Stat LS0Addr LS0Data"); 6073 6074 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 6075 if (cfg & F_UPDBGLACAPTPCONLY) { 6076 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 6077 p[6], p[7]); 6078 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 6079 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 6080 p[4] & 0xff, p[5] >> 8); 6081 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 6082 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6083 p[1] & 0xf, p[2] >> 4); 6084 } else { 6085 sbuf_printf(sb, 6086 "\n %02x %x%07x %x%07x %08x %08x " 6087 "%08x%08x%08x%08x", 6088 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 6089 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 6090 p[6], p[7]); 6091 } 6092 } 6093 6094 rc = sbuf_finish(sb); 6095 sbuf_delete(sb); 6096done: 6097 free(buf, M_CXGBE); 6098 return (rc); 6099} 6100 6101static int 6102sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 6103{ 6104 struct adapter *sc = arg1; 6105 u_int cfg; 6106 struct sbuf *sb; 6107 uint32_t *buf, *p; 6108 int rc; 6109 6110 MPASS(chip_id(sc) > CHELSIO_T5); 6111 6112 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 6113 if (rc != 0) 6114 return (rc); 6115 6116 rc = sysctl_wire_old_buffer(req, 0); 6117 if (rc != 0) 6118 return (rc); 6119 6120 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6121 if (sb == NULL) 6122 return (ENOMEM); 6123 6124 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 6125 M_ZERO | M_WAITOK); 6126 6127 rc = -t4_cim_read_la(sc, buf, NULL); 6128 if (rc != 0) 6129 goto done; 6130 6131 sbuf_printf(sb, "Status Inst Data PC%s", 6132 cfg & F_UPDBGLACAPTPCONLY ? "" : 6133 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 6134 6135 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 6136 if (cfg & F_UPDBGLACAPTPCONLY) { 6137 sbuf_printf(sb, "\n %02x %08x %08x %08x", 6138 p[3] & 0xff, p[2], p[1], p[0]); 6139 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 6140 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 6141 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 6142 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 6143 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 6144 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 6145 p[6] >> 16); 6146 } else { 6147 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 6148 "%08x %08x %08x %08x %08x %08x", 6149 (p[9] >> 16) & 0xff, 6150 p[9] & 0xffff, p[8] >> 16, 6151 p[8] & 0xffff, p[7] >> 16, 6152 p[7] & 0xffff, p[6] >> 16, 6153 p[2], p[1], p[0], p[5], p[4], p[3]); 6154 } 6155 } 6156 6157 rc = sbuf_finish(sb); 6158 sbuf_delete(sb); 6159done: 6160 free(buf, M_CXGBE); 6161 return (rc); 6162} 6163 6164static int 6165sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 6166{ 6167 struct adapter *sc = arg1; 6168 u_int i; 6169 struct sbuf *sb; 6170 uint32_t *buf, *p; 6171 int rc; 6172 6173 rc = sysctl_wire_old_buffer(req, 0); 6174 if (rc != 0) 6175 return (rc); 6176 6177 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6178 if (sb == NULL) 6179 return (ENOMEM); 6180 6181 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 6182 M_ZERO | M_WAITOK); 6183 6184 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 6185 p = buf; 6186 6187 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6188 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 6189 p[1], p[0]); 6190 } 6191 6192 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 6193 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6194 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 6195 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 6196 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 6197 (p[1] >> 2) | ((p[2] & 3) << 30), 6198 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 6199 p[0] & 1); 6200 } 6201 6202 rc = sbuf_finish(sb); 6203 sbuf_delete(sb); 6204 free(buf, M_CXGBE); 6205 return (rc); 6206} 6207 6208static int 6209sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 6210{ 6211 struct adapter *sc = arg1; 6212 u_int i; 6213 struct sbuf *sb; 6214 uint32_t *buf, *p; 6215 int rc; 6216 6217 rc = sysctl_wire_old_buffer(req, 0); 6218 if (rc != 0) 6219 return (rc); 6220 6221 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6222 if (sb == NULL) 6223 return (ENOMEM); 6224 6225 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 6226 M_ZERO | M_WAITOK); 6227 6228 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 6229 p = buf; 6230 6231 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 6232 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6233 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 6234 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 6235 p[4], p[3], p[2], p[1], p[0]); 6236 } 6237 6238 sbuf_printf(sb, "\n\nCntl ID Data"); 6239 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6240 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 6241 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 6242 } 6243 6244 rc = sbuf_finish(sb); 6245 sbuf_delete(sb); 6246 free(buf, M_CXGBE); 6247 return (rc); 6248} 6249 6250static int 6251sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 6252{ 6253 struct adapter *sc = arg1; 6254 struct sbuf *sb; 6255 int rc, i; 6256 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6257 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6258 uint16_t thres[CIM_NUM_IBQ]; 6259 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 6260 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 6261 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 6262 6263 cim_num_obq = sc->chip_params->cim_num_obq; 6264 if (is_t4(sc)) { 6265 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 6266 obq_rdaddr = A_UP_OBQ_0_REALADDR; 6267 } else { 6268 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 6269 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 6270 } 6271 nq = CIM_NUM_IBQ + cim_num_obq; 6272 6273 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 6274 if (rc == 0) 6275 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 6276 if (rc != 0) 6277 return (rc); 6278 6279 t4_read_cimq_cfg(sc, base, size, thres); 6280 6281 rc = sysctl_wire_old_buffer(req, 0); 6282 if (rc != 0) 6283 return (rc); 6284 6285 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6286 if (sb == NULL) 6287 return (ENOMEM); 6288 6289 sbuf_printf(sb, 6290 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 6291 6292 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 6293 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 6294 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 6295 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6296 G_QUEREMFLITS(p[2]) * 16); 6297 for ( ; i < nq; i++, p += 4, wr += 2) 6298 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 6299 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 6300 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6301 G_QUEREMFLITS(p[2]) * 16); 6302 6303 rc = sbuf_finish(sb); 6304 sbuf_delete(sb); 6305 6306 return (rc); 6307} 6308 6309static int 6310sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 6311{ 6312 struct adapter *sc = arg1; 6313 struct sbuf *sb; 6314 int rc; 6315 struct tp_cpl_stats stats; 6316 6317 rc = sysctl_wire_old_buffer(req, 0); 6318 if (rc != 0) 6319 return (rc); 6320 6321 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6322 if (sb == NULL) 6323 return (ENOMEM); 6324 6325 mtx_lock(&sc->reg_lock); 6326 t4_tp_get_cpl_stats(sc, &stats); 6327 mtx_unlock(&sc->reg_lock); 6328 6329 if (sc->chip_params->nchan > 2) { 6330 sbuf_printf(sb, " channel 0 channel 1" 6331 " channel 2 channel 3"); 6332 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 6333 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 6334 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 6335 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 6336 } else { 6337 sbuf_printf(sb, " channel 0 channel 1"); 6338 sbuf_printf(sb, "\nCPL requests: %10u %10u", 6339 stats.req[0], stats.req[1]); 6340 sbuf_printf(sb, "\nCPL responses: %10u %10u", 6341 stats.rsp[0], stats.rsp[1]); 6342 } 6343 6344 rc = sbuf_finish(sb); 6345 sbuf_delete(sb); 6346 6347 return (rc); 6348} 6349 6350static int 6351sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 6352{ 6353 struct adapter *sc = arg1; 6354 struct sbuf *sb; 6355 int rc; 6356 struct tp_usm_stats stats; 6357 6358 rc = sysctl_wire_old_buffer(req, 0); 6359 if (rc != 0) 6360 return(rc); 6361 6362 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6363 if (sb == NULL) 6364 return (ENOMEM); 6365 6366 t4_get_usm_stats(sc, &stats); 6367 6368 sbuf_printf(sb, "Frames: %u\n", stats.frames); 6369 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 6370 sbuf_printf(sb, "Drops: %u", stats.drops); 6371 6372 rc = sbuf_finish(sb); 6373 sbuf_delete(sb); 6374 6375 return (rc); 6376} 6377 6378static const char * const devlog_level_strings[] = { 6379 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 6380 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 6381 [FW_DEVLOG_LEVEL_ERR] = "ERR", 6382 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 6383 [FW_DEVLOG_LEVEL_INFO] = "INFO", 6384 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 6385}; 6386 6387static const char * const devlog_facility_strings[] = { 6388 [FW_DEVLOG_FACILITY_CORE] = "CORE", 6389 [FW_DEVLOG_FACILITY_CF] = "CF", 6390 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 6391 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 6392 [FW_DEVLOG_FACILITY_RES] = "RES", 6393 [FW_DEVLOG_FACILITY_HW] = "HW", 6394 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6395 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6396 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6397 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6398 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6399 [FW_DEVLOG_FACILITY_VI] = "VI", 6400 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6401 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6402 [FW_DEVLOG_FACILITY_TM] = "TM", 6403 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6404 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6405 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6406 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6407 [FW_DEVLOG_FACILITY_RI] = "RI", 6408 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6409 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6410 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6411 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6412 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6413}; 6414 6415static int 6416sysctl_devlog(SYSCTL_HANDLER_ARGS) 6417{ 6418 struct adapter *sc = arg1; 6419 struct devlog_params *dparams = &sc->params.devlog; 6420 struct fw_devlog_e *buf, *e; 6421 int i, j, rc, nentries, first = 0; 6422 struct sbuf *sb; 6423 uint64_t ftstamp = UINT64_MAX; 6424 6425 if (dparams->addr == 0) 6426 return (ENXIO); 6427 6428 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6429 if (buf == NULL) 6430 return (ENOMEM); 6431 6432 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6433 if (rc != 0) 6434 goto done; 6435 6436 nentries = dparams->size / sizeof(struct fw_devlog_e); 6437 for (i = 0; i < nentries; i++) { 6438 e = &buf[i]; 6439 6440 if (e->timestamp == 0) 6441 break; /* end */ 6442 6443 e->timestamp = be64toh(e->timestamp); 6444 e->seqno = be32toh(e->seqno); 6445 for (j = 0; j < 8; j++) 6446 e->params[j] = be32toh(e->params[j]); 6447 6448 if (e->timestamp < ftstamp) { 6449 ftstamp = e->timestamp; 6450 first = i; 6451 } 6452 } 6453 6454 if (buf[first].timestamp == 0) 6455 goto done; /* nothing in the log */ 6456 6457 rc = sysctl_wire_old_buffer(req, 0); 6458 if (rc != 0) 6459 goto done; 6460 6461 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6462 if (sb == NULL) { 6463 rc = ENOMEM; 6464 goto done; 6465 } 6466 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6467 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6468 6469 i = first; 6470 do { 6471 e = &buf[i]; 6472 if (e->timestamp == 0) 6473 break; /* end */ 6474 6475 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6476 e->seqno, e->timestamp, 6477 (e->level < nitems(devlog_level_strings) ? 6478 devlog_level_strings[e->level] : "UNKNOWN"), 6479 (e->facility < nitems(devlog_facility_strings) ? 6480 devlog_facility_strings[e->facility] : "UNKNOWN")); 6481 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6482 e->params[2], e->params[3], e->params[4], 6483 e->params[5], e->params[6], e->params[7]); 6484 6485 if (++i == nentries) 6486 i = 0; 6487 } while (i != first); 6488 6489 rc = sbuf_finish(sb); 6490 sbuf_delete(sb); 6491done: 6492 free(buf, M_CXGBE); 6493 return (rc); 6494} 6495 6496static int 6497sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6498{ 6499 struct adapter *sc = arg1; 6500 struct sbuf *sb; 6501 int rc; 6502 struct tp_fcoe_stats stats[MAX_NCHAN]; 6503 int i, nchan = sc->chip_params->nchan; 6504 6505 rc = sysctl_wire_old_buffer(req, 0); 6506 if (rc != 0) 6507 return (rc); 6508 6509 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6510 if (sb == NULL) 6511 return (ENOMEM); 6512 6513 for (i = 0; i < nchan; i++) 6514 t4_get_fcoe_stats(sc, i, &stats[i]); 6515 6516 if (nchan > 2) { 6517 sbuf_printf(sb, " channel 0 channel 1" 6518 " channel 2 channel 3"); 6519 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6520 stats[0].octets_ddp, stats[1].octets_ddp, 6521 stats[2].octets_ddp, stats[3].octets_ddp); 6522 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6523 stats[0].frames_ddp, stats[1].frames_ddp, 6524 stats[2].frames_ddp, stats[3].frames_ddp); 6525 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6526 stats[0].frames_drop, stats[1].frames_drop, 6527 stats[2].frames_drop, stats[3].frames_drop); 6528 } else { 6529 sbuf_printf(sb, " channel 0 channel 1"); 6530 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6531 stats[0].octets_ddp, stats[1].octets_ddp); 6532 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6533 stats[0].frames_ddp, stats[1].frames_ddp); 6534 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6535 stats[0].frames_drop, stats[1].frames_drop); 6536 } 6537 6538 rc = sbuf_finish(sb); 6539 sbuf_delete(sb); 6540 6541 return (rc); 6542} 6543 6544static int 6545sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6546{ 6547 struct adapter *sc = arg1; 6548 struct sbuf *sb; 6549 int rc, i; 6550 unsigned int map, kbps, ipg, mode; 6551 unsigned int pace_tab[NTX_SCHED]; 6552 6553 rc = sysctl_wire_old_buffer(req, 0); 6554 if (rc != 0) 6555 return (rc); 6556 6557 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6558 if (sb == NULL) 6559 return (ENOMEM); 6560 6561 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6562 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6563 t4_read_pace_tbl(sc, pace_tab); 6564 6565 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6566 "Class IPG (0.1 ns) Flow IPG (us)"); 6567 6568 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6569 t4_get_tx_sched(sc, i, &kbps, &ipg); 6570 sbuf_printf(sb, "\n %u %-5s %u ", i, 6571 (mode & (1 << i)) ? "flow" : "class", map & 3); 6572 if (kbps) 6573 sbuf_printf(sb, "%9u ", kbps); 6574 else 6575 sbuf_printf(sb, " disabled "); 6576 6577 if (ipg) 6578 sbuf_printf(sb, "%13u ", ipg); 6579 else 6580 sbuf_printf(sb, " disabled "); 6581 6582 if (pace_tab[i]) 6583 sbuf_printf(sb, "%10u", pace_tab[i]); 6584 else 6585 sbuf_printf(sb, " disabled"); 6586 } 6587 6588 rc = sbuf_finish(sb); 6589 sbuf_delete(sb); 6590 6591 return (rc); 6592} 6593 6594static int 6595sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6596{ 6597 struct adapter *sc = arg1; 6598 struct sbuf *sb; 6599 int rc, i, j; 6600 uint64_t *p0, *p1; 6601 struct lb_port_stats s[2]; 6602 static const char *stat_name[] = { 6603 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6604 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6605 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6606 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6607 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6608 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6609 "BG2FramesTrunc:", "BG3FramesTrunc:" 6610 }; 6611 6612 rc = sysctl_wire_old_buffer(req, 0); 6613 if (rc != 0) 6614 return (rc); 6615 6616 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6617 if (sb == NULL) 6618 return (ENOMEM); 6619 6620 memset(s, 0, sizeof(s)); 6621 6622 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6623 t4_get_lb_stats(sc, i, &s[0]); 6624 t4_get_lb_stats(sc, i + 1, &s[1]); 6625 6626 p0 = &s[0].octets; 6627 p1 = &s[1].octets; 6628 sbuf_printf(sb, "%s Loopback %u" 6629 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6630 6631 for (j = 0; j < nitems(stat_name); j++) 6632 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6633 *p0++, *p1++); 6634 } 6635 6636 rc = sbuf_finish(sb); 6637 sbuf_delete(sb); 6638 6639 return (rc); 6640} 6641 6642static int 6643sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6644{ 6645 int rc = 0; 6646 struct port_info *pi = arg1; 6647 struct link_config *lc = &pi->link_cfg; 6648 struct sbuf *sb; 6649 6650 rc = sysctl_wire_old_buffer(req, 0); 6651 if (rc != 0) 6652 return(rc); 6653 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6654 if (sb == NULL) 6655 return (ENOMEM); 6656 6657 if (lc->link_ok || lc->link_down_rc == 255) 6658 sbuf_printf(sb, "n/a"); 6659 else 6660 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); 6661 6662 rc = sbuf_finish(sb); 6663 sbuf_delete(sb); 6664 6665 return (rc); 6666} 6667 6668struct mem_desc { 6669 unsigned int base; 6670 unsigned int limit; 6671 unsigned int idx; 6672}; 6673 6674static int 6675mem_desc_cmp(const void *a, const void *b) 6676{ 6677 return ((const struct mem_desc *)a)->base - 6678 ((const struct mem_desc *)b)->base; 6679} 6680 6681static void 6682mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6683 unsigned int to) 6684{ 6685 unsigned int size; 6686 6687 if (from == to) 6688 return; 6689 6690 size = to - from + 1; 6691 if (size == 0) 6692 return; 6693 6694 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6695 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6696} 6697 6698static int 6699sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6700{ 6701 struct adapter *sc = arg1; 6702 struct sbuf *sb; 6703 int rc, i, n; 6704 uint32_t lo, hi, used, alloc; 6705 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6706 static const char *region[] = { 6707 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6708 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6709 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6710 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6711 "RQUDP region:", "PBL region:", "TXPBL region:", 6712 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6713 "On-chip queues:" 6714 }; 6715 struct mem_desc avail[4]; 6716 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6717 struct mem_desc *md = mem; 6718 6719 rc = sysctl_wire_old_buffer(req, 0); 6720 if (rc != 0) 6721 return (rc); 6722 6723 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6724 if (sb == NULL) 6725 return (ENOMEM); 6726 6727 for (i = 0; i < nitems(mem); i++) { 6728 mem[i].limit = 0; 6729 mem[i].idx = i; 6730 } 6731 6732 /* Find and sort the populated memory ranges */ 6733 i = 0; 6734 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6735 if (lo & F_EDRAM0_ENABLE) { 6736 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6737 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6738 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6739 avail[i].idx = 0; 6740 i++; 6741 } 6742 if (lo & F_EDRAM1_ENABLE) { 6743 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6744 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6745 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6746 avail[i].idx = 1; 6747 i++; 6748 } 6749 if (lo & F_EXT_MEM_ENABLE) { 6750 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6751 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6752 avail[i].limit = avail[i].base + 6753 (G_EXT_MEM_SIZE(hi) << 20); 6754 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6755 i++; 6756 } 6757 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6758 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6759 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6760 avail[i].limit = avail[i].base + 6761 (G_EXT_MEM1_SIZE(hi) << 20); 6762 avail[i].idx = 4; 6763 i++; 6764 } 6765 if (!i) /* no memory available */ 6766 return 0; 6767 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6768 6769 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6770 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6771 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6772 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6773 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6774 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6775 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6776 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6777 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6778 6779 /* the next few have explicit upper bounds */ 6780 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6781 md->limit = md->base - 1 + 6782 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6783 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6784 md++; 6785 6786 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6787 md->limit = md->base - 1 + 6788 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6789 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6790 md++; 6791 6792 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6793 if (chip_id(sc) <= CHELSIO_T5) 6794 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6795 else 6796 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6797 md->limit = 0; 6798 } else { 6799 md->base = 0; 6800 md->idx = nitems(region); /* hide it */ 6801 } 6802 md++; 6803 6804#define ulp_region(reg) \ 6805 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6806 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6807 6808 ulp_region(RX_ISCSI); 6809 ulp_region(RX_TDDP); 6810 ulp_region(TX_TPT); 6811 ulp_region(RX_STAG); 6812 ulp_region(RX_RQ); 6813 ulp_region(RX_RQUDP); 6814 ulp_region(RX_PBL); 6815 ulp_region(TX_PBL); 6816#undef ulp_region 6817 6818 md->base = 0; 6819 md->idx = nitems(region); 6820 if (!is_t4(sc)) { 6821 uint32_t size = 0; 6822 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6823 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6824 6825 if (is_t5(sc)) { 6826 if (sge_ctrl & F_VFIFO_ENABLE) 6827 size = G_DBVFIFO_SIZE(fifo_size); 6828 } else 6829 size = G_T6_DBVFIFO_SIZE(fifo_size); 6830 6831 if (size) { 6832 md->base = G_BASEADDR(t4_read_reg(sc, 6833 A_SGE_DBVFIFO_BADDR)); 6834 md->limit = md->base + (size << 2) - 1; 6835 } 6836 } 6837 md++; 6838 6839 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6840 md->limit = 0; 6841 md++; 6842 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6843 md->limit = 0; 6844 md++; 6845 6846 md->base = sc->vres.ocq.start; 6847 if (sc->vres.ocq.size) 6848 md->limit = md->base + sc->vres.ocq.size - 1; 6849 else 6850 md->idx = nitems(region); /* hide it */ 6851 md++; 6852 6853 /* add any address-space holes, there can be up to 3 */ 6854 for (n = 0; n < i - 1; n++) 6855 if (avail[n].limit < avail[n + 1].base) 6856 (md++)->base = avail[n].limit; 6857 if (avail[n].limit) 6858 (md++)->base = avail[n].limit; 6859 6860 n = md - mem; 6861 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6862 6863 for (lo = 0; lo < i; lo++) 6864 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6865 avail[lo].limit - 1); 6866 6867 sbuf_printf(sb, "\n"); 6868 for (i = 0; i < n; i++) { 6869 if (mem[i].idx >= nitems(region)) 6870 continue; /* skip holes */ 6871 if (!mem[i].limit) 6872 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6873 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6874 mem[i].limit); 6875 } 6876 6877 sbuf_printf(sb, "\n"); 6878 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6879 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6880 mem_region_show(sb, "uP RAM:", lo, hi); 6881 6882 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6883 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6884 mem_region_show(sb, "uP Extmem2:", lo, hi); 6885 6886 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6887 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6888 G_PMRXMAXPAGE(lo), 6889 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6890 (lo & F_PMRXNUMCHN) ? 2 : 1); 6891 6892 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6893 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6894 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6895 G_PMTXMAXPAGE(lo), 6896 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6897 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6898 sbuf_printf(sb, "%u p-structs\n", 6899 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6900 6901 for (i = 0; i < 4; i++) { 6902 if (chip_id(sc) > CHELSIO_T5) 6903 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6904 else 6905 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6906 if (is_t5(sc)) { 6907 used = G_T5_USED(lo); 6908 alloc = G_T5_ALLOC(lo); 6909 } else { 6910 used = G_USED(lo); 6911 alloc = G_ALLOC(lo); 6912 } 6913 /* For T6 these are MAC buffer groups */ 6914 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6915 i, used, alloc); 6916 } 6917 for (i = 0; i < sc->chip_params->nchan; i++) { 6918 if (chip_id(sc) > CHELSIO_T5) 6919 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6920 else 6921 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6922 if (is_t5(sc)) { 6923 used = G_T5_USED(lo); 6924 alloc = G_T5_ALLOC(lo); 6925 } else { 6926 used = G_USED(lo); 6927 alloc = G_ALLOC(lo); 6928 } 6929 /* For T6 these are MAC buffer groups */ 6930 sbuf_printf(sb, 6931 "\nLoopback %d using %u pages out of %u allocated", 6932 i, used, alloc); 6933 } 6934 6935 rc = sbuf_finish(sb); 6936 sbuf_delete(sb); 6937 6938 return (rc); 6939} 6940 6941static inline void 6942tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6943{ 6944 *mask = x | y; 6945 y = htobe64(y); 6946 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6947} 6948 6949static int 6950sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6951{ 6952 struct adapter *sc = arg1; 6953 struct sbuf *sb; 6954 int rc, i; 6955 6956 MPASS(chip_id(sc) <= CHELSIO_T5); 6957 6958 rc = sysctl_wire_old_buffer(req, 0); 6959 if (rc != 0) 6960 return (rc); 6961 6962 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6963 if (sb == NULL) 6964 return (ENOMEM); 6965 6966 sbuf_printf(sb, 6967 "Idx Ethernet address Mask Vld Ports PF" 6968 " VF Replication P0 P1 P2 P3 ML"); 6969 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6970 uint64_t tcamx, tcamy, mask; 6971 uint32_t cls_lo, cls_hi; 6972 uint8_t addr[ETHER_ADDR_LEN]; 6973 6974 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6975 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6976 if (tcamx & tcamy) 6977 continue; 6978 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6979 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6980 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6981 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6982 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6983 addr[3], addr[4], addr[5], (uintmax_t)mask, 6984 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6985 G_PORTMAP(cls_hi), G_PF(cls_lo), 6986 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6987 6988 if (cls_lo & F_REPLICATE) { 6989 struct fw_ldst_cmd ldst_cmd; 6990 6991 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6992 ldst_cmd.op_to_addrspace = 6993 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6994 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6995 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6996 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6997 ldst_cmd.u.mps.rplc.fid_idx = 6998 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6999 V_FW_LDST_CMD_IDX(i)); 7000 7001 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7002 "t4mps"); 7003 if (rc) 7004 break; 7005 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7006 sizeof(ldst_cmd), &ldst_cmd); 7007 end_synchronized_op(sc, 0); 7008 7009 if (rc != 0) { 7010 sbuf_printf(sb, "%36d", rc); 7011 rc = 0; 7012 } else { 7013 sbuf_printf(sb, " %08x %08x %08x %08x", 7014 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7015 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7016 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7017 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7018 } 7019 } else 7020 sbuf_printf(sb, "%36s", ""); 7021 7022 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 7023 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 7024 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 7025 } 7026 7027 if (rc) 7028 (void) sbuf_finish(sb); 7029 else 7030 rc = sbuf_finish(sb); 7031 sbuf_delete(sb); 7032 7033 return (rc); 7034} 7035 7036static int 7037sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 7038{ 7039 struct adapter *sc = arg1; 7040 struct sbuf *sb; 7041 int rc, i; 7042 7043 MPASS(chip_id(sc) > CHELSIO_T5); 7044 7045 rc = sysctl_wire_old_buffer(req, 0); 7046 if (rc != 0) 7047 return (rc); 7048 7049 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7050 if (sb == NULL) 7051 return (ENOMEM); 7052 7053 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 7054 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 7055 " Replication" 7056 " P0 P1 P2 P3 ML\n"); 7057 7058 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 7059 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 7060 uint16_t ivlan; 7061 uint64_t tcamx, tcamy, val, mask; 7062 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 7063 uint8_t addr[ETHER_ADDR_LEN]; 7064 7065 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 7066 if (i < 256) 7067 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 7068 else 7069 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 7070 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7071 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7072 tcamy = G_DMACH(val) << 32; 7073 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7074 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7075 lookup_type = G_DATALKPTYPE(data2); 7076 port_num = G_DATAPORTNUM(data2); 7077 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7078 /* Inner header VNI */ 7079 vniy = ((data2 & F_DATAVIDH2) << 23) | 7080 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7081 dip_hit = data2 & F_DATADIPHIT; 7082 vlan_vld = 0; 7083 } else { 7084 vniy = 0; 7085 dip_hit = 0; 7086 vlan_vld = data2 & F_DATAVIDH2; 7087 ivlan = G_VIDL(val); 7088 } 7089 7090 ctl |= V_CTLXYBITSEL(1); 7091 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 7092 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 7093 tcamx = G_DMACH(val) << 32; 7094 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 7095 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 7096 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7097 /* Inner header VNI mask */ 7098 vnix = ((data2 & F_DATAVIDH2) << 23) | 7099 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 7100 } else 7101 vnix = 0; 7102 7103 if (tcamx & tcamy) 7104 continue; 7105 tcamxy2valmask(tcamx, tcamy, addr, &mask); 7106 7107 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 7108 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 7109 7110 if (lookup_type && lookup_type != M_DATALKPTYPE) { 7111 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7112 "%012jx %06x %06x - - %3c" 7113 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 7114 addr[1], addr[2], addr[3], addr[4], addr[5], 7115 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 7116 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7117 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7118 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7119 } else { 7120 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 7121 "%012jx - - ", i, addr[0], addr[1], 7122 addr[2], addr[3], addr[4], addr[5], 7123 (uintmax_t)mask); 7124 7125 if (vlan_vld) 7126 sbuf_printf(sb, "%4u Y ", ivlan); 7127 else 7128 sbuf_printf(sb, " - N "); 7129 7130 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 7131 lookup_type ? 'I' : 'O', port_num, 7132 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 7133 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 7134 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 7135 } 7136 7137 7138 if (cls_lo & F_T6_REPLICATE) { 7139 struct fw_ldst_cmd ldst_cmd; 7140 7141 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 7142 ldst_cmd.op_to_addrspace = 7143 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 7144 F_FW_CMD_REQUEST | F_FW_CMD_READ | 7145 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 7146 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 7147 ldst_cmd.u.mps.rplc.fid_idx = 7148 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 7149 V_FW_LDST_CMD_IDX(i)); 7150 7151 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7152 "t6mps"); 7153 if (rc) 7154 break; 7155 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 7156 sizeof(ldst_cmd), &ldst_cmd); 7157 end_synchronized_op(sc, 0); 7158 7159 if (rc != 0) { 7160 sbuf_printf(sb, "%72d", rc); 7161 rc = 0; 7162 } else { 7163 sbuf_printf(sb, " %08x %08x %08x %08x" 7164 " %08x %08x %08x %08x", 7165 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 7166 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 7167 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 7168 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 7169 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7170 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7171 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7172 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7173 } 7174 } else 7175 sbuf_printf(sb, "%72s", ""); 7176 7177 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 7178 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 7179 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 7180 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 7181 } 7182 7183 if (rc) 7184 (void) sbuf_finish(sb); 7185 else 7186 rc = sbuf_finish(sb); 7187 sbuf_delete(sb); 7188 7189 return (rc); 7190} 7191 7192static int 7193sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 7194{ 7195 struct adapter *sc = arg1; 7196 struct sbuf *sb; 7197 int rc; 7198 uint16_t mtus[NMTUS]; 7199 7200 rc = sysctl_wire_old_buffer(req, 0); 7201 if (rc != 0) 7202 return (rc); 7203 7204 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7205 if (sb == NULL) 7206 return (ENOMEM); 7207 7208 t4_read_mtu_tbl(sc, mtus, NULL); 7209 7210 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 7211 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 7212 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 7213 mtus[14], mtus[15]); 7214 7215 rc = sbuf_finish(sb); 7216 sbuf_delete(sb); 7217 7218 return (rc); 7219} 7220 7221static int 7222sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 7223{ 7224 struct adapter *sc = arg1; 7225 struct sbuf *sb; 7226 int rc, i; 7227 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 7228 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 7229 static const char *tx_stats[MAX_PM_NSTATS] = { 7230 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 7231 "Tx FIFO wait", NULL, "Tx latency" 7232 }; 7233 static const char *rx_stats[MAX_PM_NSTATS] = { 7234 "Read:", "Write bypass:", "Write mem:", "Flush:", 7235 "Rx FIFO wait", NULL, "Rx latency" 7236 }; 7237 7238 rc = sysctl_wire_old_buffer(req, 0); 7239 if (rc != 0) 7240 return (rc); 7241 7242 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7243 if (sb == NULL) 7244 return (ENOMEM); 7245 7246 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 7247 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 7248 7249 sbuf_printf(sb, " Tx pcmds Tx bytes"); 7250 for (i = 0; i < 4; i++) { 7251 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7252 tx_cyc[i]); 7253 } 7254 7255 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 7256 for (i = 0; i < 4; i++) { 7257 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7258 rx_cyc[i]); 7259 } 7260 7261 if (chip_id(sc) > CHELSIO_T5) { 7262 sbuf_printf(sb, 7263 "\n Total wait Total occupancy"); 7264 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7265 tx_cyc[i]); 7266 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7267 rx_cyc[i]); 7268 7269 i += 2; 7270 MPASS(i < nitems(tx_stats)); 7271 7272 sbuf_printf(sb, 7273 "\n Reads Total wait"); 7274 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7275 tx_cyc[i]); 7276 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7277 rx_cyc[i]); 7278 } 7279 7280 rc = sbuf_finish(sb); 7281 sbuf_delete(sb); 7282 7283 return (rc); 7284} 7285 7286static int 7287sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 7288{ 7289 struct adapter *sc = arg1; 7290 struct sbuf *sb; 7291 int rc; 7292 struct tp_rdma_stats stats; 7293 7294 rc = sysctl_wire_old_buffer(req, 0); 7295 if (rc != 0) 7296 return (rc); 7297 7298 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7299 if (sb == NULL) 7300 return (ENOMEM); 7301 7302 mtx_lock(&sc->reg_lock); 7303 t4_tp_get_rdma_stats(sc, &stats); 7304 mtx_unlock(&sc->reg_lock); 7305 7306 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 7307 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 7308 7309 rc = sbuf_finish(sb); 7310 sbuf_delete(sb); 7311 7312 return (rc); 7313} 7314 7315static int 7316sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 7317{ 7318 struct adapter *sc = arg1; 7319 struct sbuf *sb; 7320 int rc; 7321 struct tp_tcp_stats v4, v6; 7322 7323 rc = sysctl_wire_old_buffer(req, 0); 7324 if (rc != 0) 7325 return (rc); 7326 7327 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7328 if (sb == NULL) 7329 return (ENOMEM); 7330 7331 mtx_lock(&sc->reg_lock); 7332 t4_tp_get_tcp_stats(sc, &v4, &v6); 7333 mtx_unlock(&sc->reg_lock); 7334 7335 sbuf_printf(sb, 7336 " IP IPv6\n"); 7337 sbuf_printf(sb, "OutRsts: %20u %20u\n", 7338 v4.tcp_out_rsts, v6.tcp_out_rsts); 7339 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 7340 v4.tcp_in_segs, v6.tcp_in_segs); 7341 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 7342 v4.tcp_out_segs, v6.tcp_out_segs); 7343 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 7344 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 7345 7346 rc = sbuf_finish(sb); 7347 sbuf_delete(sb); 7348 7349 return (rc); 7350} 7351 7352static int 7353sysctl_tids(SYSCTL_HANDLER_ARGS) 7354{ 7355 struct adapter *sc = arg1; 7356 struct sbuf *sb; 7357 int rc; 7358 struct tid_info *t = &sc->tids; 7359 7360 rc = sysctl_wire_old_buffer(req, 0); 7361 if (rc != 0) 7362 return (rc); 7363 7364 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7365 if (sb == NULL) 7366 return (ENOMEM); 7367 7368 if (t->natids) { 7369 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 7370 t->atids_in_use); 7371 } 7372 7373 if (t->ntids) { 7374 sbuf_printf(sb, "TID range: "); 7375 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7376 uint32_t b, hb; 7377 7378 if (chip_id(sc) <= CHELSIO_T5) { 7379 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 7380 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 7381 } else { 7382 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 7383 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); 7384 } 7385 7386 if (b) 7387 sbuf_printf(sb, "0-%u, ", b - 1); 7388 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); 7389 } else 7390 sbuf_printf(sb, "0-%u", t->ntids - 1); 7391 sbuf_printf(sb, ", in use: %u\n", 7392 atomic_load_acq_int(&t->tids_in_use)); 7393 } 7394 7395 if (t->nstids) { 7396 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 7397 t->stid_base + t->nstids - 1, t->stids_in_use); 7398 } 7399 7400 if (t->nftids) { 7401 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7402 t->ftid_base + t->nftids - 1); 7403 } 7404 7405 if (t->netids) { 7406 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7407 t->etid_base + t->netids - 1); 7408 } 7409 7410 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7411 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7412 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7413 7414 rc = sbuf_finish(sb); 7415 sbuf_delete(sb); 7416 7417 return (rc); 7418} 7419 7420static int 7421sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7422{ 7423 struct adapter *sc = arg1; 7424 struct sbuf *sb; 7425 int rc; 7426 struct tp_err_stats stats; 7427 7428 rc = sysctl_wire_old_buffer(req, 0); 7429 if (rc != 0) 7430 return (rc); 7431 7432 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7433 if (sb == NULL) 7434 return (ENOMEM); 7435 7436 mtx_lock(&sc->reg_lock); 7437 t4_tp_get_err_stats(sc, &stats); 7438 mtx_unlock(&sc->reg_lock); 7439 7440 if (sc->chip_params->nchan > 2) { 7441 sbuf_printf(sb, " channel 0 channel 1" 7442 " channel 2 channel 3\n"); 7443 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7444 stats.mac_in_errs[0], stats.mac_in_errs[1], 7445 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7446 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7447 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7448 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7449 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7450 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7451 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7452 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7453 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7454 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7455 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7456 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7457 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7458 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7459 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7460 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7461 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7462 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7463 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7464 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7465 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7466 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7467 } else { 7468 sbuf_printf(sb, " channel 0 channel 1\n"); 7469 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7470 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7471 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7472 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7473 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7474 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7475 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7476 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7477 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7478 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7479 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7480 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7481 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7482 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7483 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7484 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7485 } 7486 7487 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7488 stats.ofld_no_neigh, stats.ofld_cong_defer); 7489 7490 rc = sbuf_finish(sb); 7491 sbuf_delete(sb); 7492 7493 return (rc); 7494} 7495 7496static int 7497sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7498{ 7499 struct adapter *sc = arg1; 7500 struct tp_params *tpp = &sc->params.tp; 7501 u_int mask; 7502 int rc; 7503 7504 mask = tpp->la_mask >> 16; 7505 rc = sysctl_handle_int(oidp, &mask, 0, req); 7506 if (rc != 0 || req->newptr == NULL) 7507 return (rc); 7508 if (mask > 0xffff) 7509 return (EINVAL); 7510 tpp->la_mask = mask << 16; 7511 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7512 7513 return (0); 7514} 7515 7516struct field_desc { 7517 const char *name; 7518 u_int start; 7519 u_int width; 7520}; 7521 7522static void 7523field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7524{ 7525 char buf[32]; 7526 int line_size = 0; 7527 7528 while (f->name) { 7529 uint64_t mask = (1ULL << f->width) - 1; 7530 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7531 ((uintmax_t)v >> f->start) & mask); 7532 7533 if (line_size + len >= 79) { 7534 line_size = 8; 7535 sbuf_printf(sb, "\n "); 7536 } 7537 sbuf_printf(sb, "%s ", buf); 7538 line_size += len + 1; 7539 f++; 7540 } 7541 sbuf_printf(sb, "\n"); 7542} 7543 7544static const struct field_desc tp_la0[] = { 7545 { "RcfOpCodeOut", 60, 4 }, 7546 { "State", 56, 4 }, 7547 { "WcfState", 52, 4 }, 7548 { "RcfOpcSrcOut", 50, 2 }, 7549 { "CRxError", 49, 1 }, 7550 { "ERxError", 48, 1 }, 7551 { "SanityFailed", 47, 1 }, 7552 { "SpuriousMsg", 46, 1 }, 7553 { "FlushInputMsg", 45, 1 }, 7554 { "FlushInputCpl", 44, 1 }, 7555 { "RssUpBit", 43, 1 }, 7556 { "RssFilterHit", 42, 1 }, 7557 { "Tid", 32, 10 }, 7558 { "InitTcb", 31, 1 }, 7559 { "LineNumber", 24, 7 }, 7560 { "Emsg", 23, 1 }, 7561 { "EdataOut", 22, 1 }, 7562 { "Cmsg", 21, 1 }, 7563 { "CdataOut", 20, 1 }, 7564 { "EreadPdu", 19, 1 }, 7565 { "CreadPdu", 18, 1 }, 7566 { "TunnelPkt", 17, 1 }, 7567 { "RcfPeerFin", 16, 1 }, 7568 { "RcfReasonOut", 12, 4 }, 7569 { "TxCchannel", 10, 2 }, 7570 { "RcfTxChannel", 8, 2 }, 7571 { "RxEchannel", 6, 2 }, 7572 { "RcfRxChannel", 5, 1 }, 7573 { "RcfDataOutSrdy", 4, 1 }, 7574 { "RxDvld", 3, 1 }, 7575 { "RxOoDvld", 2, 1 }, 7576 { "RxCongestion", 1, 1 }, 7577 { "TxCongestion", 0, 1 }, 7578 { NULL } 7579}; 7580 7581static const struct field_desc tp_la1[] = { 7582 { "CplCmdIn", 56, 8 }, 7583 { "CplCmdOut", 48, 8 }, 7584 { "ESynOut", 47, 1 }, 7585 { "EAckOut", 46, 1 }, 7586 { "EFinOut", 45, 1 }, 7587 { "ERstOut", 44, 1 }, 7588 { "SynIn", 43, 1 }, 7589 { "AckIn", 42, 1 }, 7590 { "FinIn", 41, 1 }, 7591 { "RstIn", 40, 1 }, 7592 { "DataIn", 39, 1 }, 7593 { "DataInVld", 38, 1 }, 7594 { "PadIn", 37, 1 }, 7595 { "RxBufEmpty", 36, 1 }, 7596 { "RxDdp", 35, 1 }, 7597 { "RxFbCongestion", 34, 1 }, 7598 { "TxFbCongestion", 33, 1 }, 7599 { "TxPktSumSrdy", 32, 1 }, 7600 { "RcfUlpType", 28, 4 }, 7601 { "Eread", 27, 1 }, 7602 { "Ebypass", 26, 1 }, 7603 { "Esave", 25, 1 }, 7604 { "Static0", 24, 1 }, 7605 { "Cread", 23, 1 }, 7606 { "Cbypass", 22, 1 }, 7607 { "Csave", 21, 1 }, 7608 { "CPktOut", 20, 1 }, 7609 { "RxPagePoolFull", 18, 2 }, 7610 { "RxLpbkPkt", 17, 1 }, 7611 { "TxLpbkPkt", 16, 1 }, 7612 { "RxVfValid", 15, 1 }, 7613 { "SynLearned", 14, 1 }, 7614 { "SetDelEntry", 13, 1 }, 7615 { "SetInvEntry", 12, 1 }, 7616 { "CpcmdDvld", 11, 1 }, 7617 { "CpcmdSave", 10, 1 }, 7618 { "RxPstructsFull", 8, 2 }, 7619 { "EpcmdDvld", 7, 1 }, 7620 { "EpcmdFlush", 6, 1 }, 7621 { "EpcmdTrimPrefix", 5, 1 }, 7622 { "EpcmdTrimPostfix", 4, 1 }, 7623 { "ERssIp4Pkt", 3, 1 }, 7624 { "ERssIp6Pkt", 2, 1 }, 7625 { "ERssTcpUdpPkt", 1, 1 }, 7626 { "ERssFceFipPkt", 0, 1 }, 7627 { NULL } 7628}; 7629 7630static const struct field_desc tp_la2[] = { 7631 { "CplCmdIn", 56, 8 }, 7632 { "MpsVfVld", 55, 1 }, 7633 { "MpsPf", 52, 3 }, 7634 { "MpsVf", 44, 8 }, 7635 { "SynIn", 43, 1 }, 7636 { "AckIn", 42, 1 }, 7637 { "FinIn", 41, 1 }, 7638 { "RstIn", 40, 1 }, 7639 { "DataIn", 39, 1 }, 7640 { "DataInVld", 38, 1 }, 7641 { "PadIn", 37, 1 }, 7642 { "RxBufEmpty", 36, 1 }, 7643 { "RxDdp", 35, 1 }, 7644 { "RxFbCongestion", 34, 1 }, 7645 { "TxFbCongestion", 33, 1 }, 7646 { "TxPktSumSrdy", 32, 1 }, 7647 { "RcfUlpType", 28, 4 }, 7648 { "Eread", 27, 1 }, 7649 { "Ebypass", 26, 1 }, 7650 { "Esave", 25, 1 }, 7651 { "Static0", 24, 1 }, 7652 { "Cread", 23, 1 }, 7653 { "Cbypass", 22, 1 }, 7654 { "Csave", 21, 1 }, 7655 { "CPktOut", 20, 1 }, 7656 { "RxPagePoolFull", 18, 2 }, 7657 { "RxLpbkPkt", 17, 1 }, 7658 { "TxLpbkPkt", 16, 1 }, 7659 { "RxVfValid", 15, 1 }, 7660 { "SynLearned", 14, 1 }, 7661 { "SetDelEntry", 13, 1 }, 7662 { "SetInvEntry", 12, 1 }, 7663 { "CpcmdDvld", 11, 1 }, 7664 { "CpcmdSave", 10, 1 }, 7665 { "RxPstructsFull", 8, 2 }, 7666 { "EpcmdDvld", 7, 1 }, 7667 { "EpcmdFlush", 6, 1 }, 7668 { "EpcmdTrimPrefix", 5, 1 }, 7669 { "EpcmdTrimPostfix", 4, 1 }, 7670 { "ERssIp4Pkt", 3, 1 }, 7671 { "ERssIp6Pkt", 2, 1 }, 7672 { "ERssTcpUdpPkt", 1, 1 }, 7673 { "ERssFceFipPkt", 0, 1 }, 7674 { NULL } 7675}; 7676 7677static void 7678tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7679{ 7680 7681 field_desc_show(sb, *p, tp_la0); 7682} 7683 7684static void 7685tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7686{ 7687 7688 if (idx) 7689 sbuf_printf(sb, "\n"); 7690 field_desc_show(sb, p[0], tp_la0); 7691 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7692 field_desc_show(sb, p[1], tp_la0); 7693} 7694 7695static void 7696tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7697{ 7698 7699 if (idx) 7700 sbuf_printf(sb, "\n"); 7701 field_desc_show(sb, p[0], tp_la0); 7702 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7703 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7704} 7705 7706static int 7707sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7708{ 7709 struct adapter *sc = arg1; 7710 struct sbuf *sb; 7711 uint64_t *buf, *p; 7712 int rc; 7713 u_int i, inc; 7714 void (*show_func)(struct sbuf *, uint64_t *, int); 7715 7716 rc = sysctl_wire_old_buffer(req, 0); 7717 if (rc != 0) 7718 return (rc); 7719 7720 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7721 if (sb == NULL) 7722 return (ENOMEM); 7723 7724 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7725 7726 t4_tp_read_la(sc, buf, NULL); 7727 p = buf; 7728 7729 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7730 case 2: 7731 inc = 2; 7732 show_func = tp_la_show2; 7733 break; 7734 case 3: 7735 inc = 2; 7736 show_func = tp_la_show3; 7737 break; 7738 default: 7739 inc = 1; 7740 show_func = tp_la_show; 7741 } 7742 7743 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7744 (*show_func)(sb, p, i); 7745 7746 rc = sbuf_finish(sb); 7747 sbuf_delete(sb); 7748 free(buf, M_CXGBE); 7749 return (rc); 7750} 7751 7752static int 7753sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7754{ 7755 struct adapter *sc = arg1; 7756 struct sbuf *sb; 7757 int rc; 7758 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7759 7760 rc = sysctl_wire_old_buffer(req, 0); 7761 if (rc != 0) 7762 return (rc); 7763 7764 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7765 if (sb == NULL) 7766 return (ENOMEM); 7767 7768 t4_get_chan_txrate(sc, nrate, orate); 7769 7770 if (sc->chip_params->nchan > 2) { 7771 sbuf_printf(sb, " channel 0 channel 1" 7772 " channel 2 channel 3\n"); 7773 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7774 nrate[0], nrate[1], nrate[2], nrate[3]); 7775 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7776 orate[0], orate[1], orate[2], orate[3]); 7777 } else { 7778 sbuf_printf(sb, " channel 0 channel 1\n"); 7779 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7780 nrate[0], nrate[1]); 7781 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7782 orate[0], orate[1]); 7783 } 7784 7785 rc = sbuf_finish(sb); 7786 sbuf_delete(sb); 7787 7788 return (rc); 7789} 7790 7791static int 7792sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7793{ 7794 struct adapter *sc = arg1; 7795 struct sbuf *sb; 7796 uint32_t *buf, *p; 7797 int rc, i; 7798 7799 rc = sysctl_wire_old_buffer(req, 0); 7800 if (rc != 0) 7801 return (rc); 7802 7803 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7804 if (sb == NULL) 7805 return (ENOMEM); 7806 7807 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7808 M_ZERO | M_WAITOK); 7809 7810 t4_ulprx_read_la(sc, buf); 7811 p = buf; 7812 7813 sbuf_printf(sb, " Pcmd Type Message" 7814 " Data"); 7815 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7816 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7817 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7818 } 7819 7820 rc = sbuf_finish(sb); 7821 sbuf_delete(sb); 7822 free(buf, M_CXGBE); 7823 return (rc); 7824} 7825 7826static int 7827sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7828{ 7829 struct adapter *sc = arg1; 7830 struct sbuf *sb; 7831 int rc, v; 7832 7833 MPASS(chip_id(sc) >= CHELSIO_T5); 7834 7835 rc = sysctl_wire_old_buffer(req, 0); 7836 if (rc != 0) 7837 return (rc); 7838 7839 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7840 if (sb == NULL) 7841 return (ENOMEM); 7842 7843 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7844 if (G_STATSOURCE_T5(v) == 7) { 7845 int mode; 7846 7847 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 7848 if (mode == 0) { 7849 sbuf_printf(sb, "total %d, incomplete %d", 7850 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7851 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7852 } else if (mode == 1) { 7853 sbuf_printf(sb, "total %d, data overflow %d", 7854 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7855 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7856 } else { 7857 sbuf_printf(sb, "unknown mode %d", mode); 7858 } 7859 } 7860 rc = sbuf_finish(sb); 7861 sbuf_delete(sb); 7862 7863 return (rc); 7864} 7865 7866static int 7867sysctl_tc_params(SYSCTL_HANDLER_ARGS) 7868{ 7869 struct adapter *sc = arg1; 7870 struct tx_cl_rl_params tc; 7871 struct sbuf *sb; 7872 int i, rc, port_id, mbps, gbps; 7873 7874 rc = sysctl_wire_old_buffer(req, 0); 7875 if (rc != 0) 7876 return (rc); 7877 7878 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7879 if (sb == NULL) 7880 return (ENOMEM); 7881 7882 port_id = arg2 >> 16; 7883 MPASS(port_id < sc->params.nports); 7884 MPASS(sc->port[port_id] != NULL); 7885 i = arg2 & 0xffff; 7886 MPASS(i < sc->chip_params->nsched_cls); 7887 7888 mtx_lock(&sc->tc_lock); 7889 tc = sc->port[port_id]->sched_params->cl_rl[i]; 7890 mtx_unlock(&sc->tc_lock); 7891 7892 if (tc.flags & TX_CLRL_ERROR) { 7893 sbuf_printf(sb, "error"); 7894 goto done; 7895 } 7896 7897 if (tc.ratemode == SCHED_CLASS_RATEMODE_REL) { 7898 /* XXX: top speed or actual link speed? */ 7899 gbps = port_top_speed(sc->port[port_id]); 7900 sbuf_printf(sb, " %u%% of %uGbps", tc.maxrate, gbps); 7901 } else if (tc.ratemode == SCHED_CLASS_RATEMODE_ABS) { 7902 switch (tc.rateunit) { 7903 case SCHED_CLASS_RATEUNIT_BITS: 7904 mbps = tc.maxrate / 1000; 7905 gbps = tc.maxrate / 1000000; 7906 if (tc.maxrate == gbps * 1000000) 7907 sbuf_printf(sb, " %uGbps", gbps); 7908 else if (tc.maxrate == mbps * 1000) 7909 sbuf_printf(sb, " %uMbps", mbps); 7910 else 7911 sbuf_printf(sb, " %uKbps", tc.maxrate); 7912 break; 7913 case SCHED_CLASS_RATEUNIT_PKTS: 7914 sbuf_printf(sb, " %upps", tc.maxrate); 7915 break; 7916 default: 7917 rc = ENXIO; 7918 goto done; 7919 } 7920 } 7921 7922 switch (tc.mode) { 7923 case SCHED_CLASS_MODE_CLASS: 7924 sbuf_printf(sb, " aggregate"); 7925 break; 7926 case SCHED_CLASS_MODE_FLOW: 7927 sbuf_printf(sb, " per-flow"); 7928 break; 7929 default: 7930 rc = ENXIO; 7931 goto done; 7932 } 7933 7934done: 7935 if (rc == 0) 7936 rc = sbuf_finish(sb); 7937 sbuf_delete(sb); 7938 7939 return (rc); 7940} 7941#endif 7942 7943#ifdef TCP_OFFLOAD 7944static void 7945unit_conv(char *buf, size_t len, u_int val, u_int factor) 7946{ 7947 u_int rem = val % factor; 7948 7949 if (rem == 0) 7950 snprintf(buf, len, "%u", val / factor); 7951 else { 7952 while (rem % 10 == 0) 7953 rem /= 10; 7954 snprintf(buf, len, "%u.%u", val / factor, rem); 7955 } 7956} 7957 7958static int 7959sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 7960{ 7961 struct adapter *sc = arg1; 7962 char buf[16]; 7963 u_int res, re; 7964 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7965 7966 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7967 switch (arg2) { 7968 case 0: 7969 /* timer_tick */ 7970 re = G_TIMERRESOLUTION(res); 7971 break; 7972 case 1: 7973 /* TCP timestamp tick */ 7974 re = G_TIMESTAMPRESOLUTION(res); 7975 break; 7976 case 2: 7977 /* DACK tick */ 7978 re = G_DELAYEDACKRESOLUTION(res); 7979 break; 7980 default: 7981 return (EDOOFUS); 7982 } 7983 7984 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 7985 7986 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 7987} 7988 7989static int 7990sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 7991{ 7992 struct adapter *sc = arg1; 7993 u_int res, dack_re, v; 7994 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7995 7996 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7997 dack_re = G_DELAYEDACKRESOLUTION(res); 7998 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 7999 8000 return (sysctl_handle_int(oidp, &v, 0, req)); 8001} 8002 8003static int 8004sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 8005{ 8006 struct adapter *sc = arg1; 8007 int reg = arg2; 8008 u_int tre; 8009 u_long tp_tick_us, v; 8010 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 8011 8012 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 8013 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 8014 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || 8015 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); 8016 8017 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 8018 tp_tick_us = (cclk_ps << tre) / 1000000; 8019 8020 if (reg == A_TP_INIT_SRTT) 8021 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 8022 else 8023 v = tp_tick_us * t4_read_reg(sc, reg); 8024 8025 return (sysctl_handle_long(oidp, &v, 0, req)); 8026} 8027#endif 8028 8029static uint32_t 8030fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 8031{ 8032 uint32_t mode; 8033 8034 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 8035 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 8036 8037 if (fconf & F_FRAGMENTATION) 8038 mode |= T4_FILTER_IP_FRAGMENT; 8039 8040 if (fconf & F_MPSHITTYPE) 8041 mode |= T4_FILTER_MPS_HIT_TYPE; 8042 8043 if (fconf & F_MACMATCH) 8044 mode |= T4_FILTER_MAC_IDX; 8045 8046 if (fconf & F_ETHERTYPE) 8047 mode |= T4_FILTER_ETH_TYPE; 8048 8049 if (fconf & F_PROTOCOL) 8050 mode |= T4_FILTER_IP_PROTO; 8051 8052 if (fconf & F_TOS) 8053 mode |= T4_FILTER_IP_TOS; 8054 8055 if (fconf & F_VLAN) 8056 mode |= T4_FILTER_VLAN; 8057 8058 if (fconf & F_VNIC_ID) { 8059 mode |= T4_FILTER_VNIC; 8060 if (iconf & F_VNIC) 8061 mode |= T4_FILTER_IC_VNIC; 8062 } 8063 8064 if (fconf & F_PORT) 8065 mode |= T4_FILTER_PORT; 8066 8067 if (fconf & F_FCOE) 8068 mode |= T4_FILTER_FCoE; 8069 8070 return (mode); 8071} 8072 8073static uint32_t 8074mode_to_fconf(uint32_t mode) 8075{ 8076 uint32_t fconf = 0; 8077 8078 if (mode & T4_FILTER_IP_FRAGMENT) 8079 fconf |= F_FRAGMENTATION; 8080 8081 if (mode & T4_FILTER_MPS_HIT_TYPE) 8082 fconf |= F_MPSHITTYPE; 8083 8084 if (mode & T4_FILTER_MAC_IDX) 8085 fconf |= F_MACMATCH; 8086 8087 if (mode & T4_FILTER_ETH_TYPE) 8088 fconf |= F_ETHERTYPE; 8089 8090 if (mode & T4_FILTER_IP_PROTO) 8091 fconf |= F_PROTOCOL; 8092 8093 if (mode & T4_FILTER_IP_TOS) 8094 fconf |= F_TOS; 8095 8096 if (mode & T4_FILTER_VLAN) 8097 fconf |= F_VLAN; 8098 8099 if (mode & T4_FILTER_VNIC) 8100 fconf |= F_VNIC_ID; 8101 8102 if (mode & T4_FILTER_PORT) 8103 fconf |= F_PORT; 8104 8105 if (mode & T4_FILTER_FCoE) 8106 fconf |= F_FCOE; 8107 8108 return (fconf); 8109} 8110 8111static uint32_t 8112mode_to_iconf(uint32_t mode) 8113{ 8114 8115 if (mode & T4_FILTER_IC_VNIC) 8116 return (F_VNIC); 8117 return (0); 8118} 8119 8120static int check_fspec_against_fconf_iconf(struct adapter *sc, 8121 struct t4_filter_specification *fs) 8122{ 8123 struct tp_params *tpp = &sc->params.tp; 8124 uint32_t fconf = 0; 8125 8126 if (fs->val.frag || fs->mask.frag) 8127 fconf |= F_FRAGMENTATION; 8128 8129 if (fs->val.matchtype || fs->mask.matchtype) 8130 fconf |= F_MPSHITTYPE; 8131 8132 if (fs->val.macidx || fs->mask.macidx) 8133 fconf |= F_MACMATCH; 8134 8135 if (fs->val.ethtype || fs->mask.ethtype) 8136 fconf |= F_ETHERTYPE; 8137 8138 if (fs->val.proto || fs->mask.proto) 8139 fconf |= F_PROTOCOL; 8140 8141 if (fs->val.tos || fs->mask.tos) 8142 fconf |= F_TOS; 8143 8144 if (fs->val.vlan_vld || fs->mask.vlan_vld) 8145 fconf |= F_VLAN; 8146 8147 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 8148 fconf |= F_VNIC_ID; 8149 if (tpp->ingress_config & F_VNIC) 8150 return (EINVAL); 8151 } 8152 8153 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 8154 fconf |= F_VNIC_ID; 8155 if ((tpp->ingress_config & F_VNIC) == 0) 8156 return (EINVAL); 8157 } 8158 8159 if (fs->val.iport || fs->mask.iport) 8160 fconf |= F_PORT; 8161 8162 if (fs->val.fcoe || fs->mask.fcoe) 8163 fconf |= F_FCOE; 8164 8165 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 8166 return (E2BIG); 8167 8168 return (0); 8169} 8170 8171static int 8172get_filter_mode(struct adapter *sc, uint32_t *mode) 8173{ 8174 struct tp_params *tpp = &sc->params.tp; 8175 8176 /* 8177 * We trust the cached values of the relevant TP registers. This means 8178 * things work reliably only if writes to those registers are always via 8179 * t4_set_filter_mode. 8180 */ 8181 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 8182 8183 return (0); 8184} 8185 8186static int 8187set_filter_mode(struct adapter *sc, uint32_t mode) 8188{ 8189 struct tp_params *tpp = &sc->params.tp; 8190 uint32_t fconf, iconf; 8191 int rc; 8192 8193 iconf = mode_to_iconf(mode); 8194 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 8195 /* 8196 * For now we just complain if A_TP_INGRESS_CONFIG is not 8197 * already set to the correct value for the requested filter 8198 * mode. It's not clear if it's safe to write to this register 8199 * on the fly. (And we trust the cached value of the register). 8200 */ 8201 return (EBUSY); 8202 } 8203 8204 fconf = mode_to_fconf(mode); 8205 8206 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8207 "t4setfm"); 8208 if (rc) 8209 return (rc); 8210 8211 if (sc->tids.ftids_in_use > 0) { 8212 rc = EBUSY; 8213 goto done; 8214 } 8215 8216#ifdef TCP_OFFLOAD 8217 if (uld_active(sc, ULD_TOM)) { 8218 rc = EBUSY; 8219 goto done; 8220 } 8221#endif 8222 8223 rc = -t4_set_filter_mode(sc, fconf); 8224done: 8225 end_synchronized_op(sc, LOCK_HELD); 8226 return (rc); 8227} 8228 8229static inline uint64_t 8230get_filter_hits(struct adapter *sc, uint32_t fid) 8231{ 8232 uint32_t tcb_addr; 8233 8234 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 8235 (fid + sc->tids.ftid_base) * TCB_SIZE; 8236 8237 if (is_t4(sc)) { 8238 uint64_t hits; 8239 8240 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 8241 return (be64toh(hits)); 8242 } else { 8243 uint32_t hits; 8244 8245 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 8246 return (be32toh(hits)); 8247 } 8248} 8249 8250static int 8251get_filter(struct adapter *sc, struct t4_filter *t) 8252{ 8253 int i, rc, nfilters = sc->tids.nftids; 8254 struct filter_entry *f; 8255 8256 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8257 "t4getf"); 8258 if (rc) 8259 return (rc); 8260 8261 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 8262 t->idx >= nfilters) { 8263 t->idx = 0xffffffff; 8264 goto done; 8265 } 8266 8267 f = &sc->tids.ftid_tab[t->idx]; 8268 for (i = t->idx; i < nfilters; i++, f++) { 8269 if (f->valid) { 8270 t->idx = i; 8271 t->l2tidx = f->l2t ? f->l2t->idx : 0; 8272 t->smtidx = f->smtidx; 8273 if (f->fs.hitcnts) 8274 t->hits = get_filter_hits(sc, t->idx); 8275 else 8276 t->hits = UINT64_MAX; 8277 t->fs = f->fs; 8278 8279 goto done; 8280 } 8281 } 8282 8283 t->idx = 0xffffffff; 8284done: 8285 end_synchronized_op(sc, LOCK_HELD); 8286 return (0); 8287} 8288 8289static int 8290set_filter(struct adapter *sc, struct t4_filter *t) 8291{ 8292 unsigned int nfilters, nports; 8293 struct filter_entry *f; 8294 int i, rc; 8295 8296 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 8297 if (rc) 8298 return (rc); 8299 8300 nfilters = sc->tids.nftids; 8301 nports = sc->params.nports; 8302 8303 if (nfilters == 0) { 8304 rc = ENOTSUP; 8305 goto done; 8306 } 8307 8308 if (t->idx >= nfilters) { 8309 rc = EINVAL; 8310 goto done; 8311 } 8312 8313 /* Validate against the global filter mode and ingress config */ 8314 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 8315 if (rc != 0) 8316 goto done; 8317 8318 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 8319 rc = EINVAL; 8320 goto done; 8321 } 8322 8323 if (t->fs.val.iport >= nports) { 8324 rc = EINVAL; 8325 goto done; 8326 } 8327 8328 /* Can't specify an iq if not steering to it */ 8329 if (!t->fs.dirsteer && t->fs.iq) { 8330 rc = EINVAL; 8331 goto done; 8332 } 8333 8334 /* IPv6 filter idx must be 4 aligned */ 8335 if (t->fs.type == 1 && 8336 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 8337 rc = EINVAL; 8338 goto done; 8339 } 8340 8341 if (!(sc->flags & FULL_INIT_DONE) && 8342 ((rc = adapter_full_init(sc)) != 0)) 8343 goto done; 8344 8345 if (sc->tids.ftid_tab == NULL) { 8346 KASSERT(sc->tids.ftids_in_use == 0, 8347 ("%s: no memory allocated but filters_in_use > 0", 8348 __func__)); 8349 8350 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 8351 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 8352 if (sc->tids.ftid_tab == NULL) { 8353 rc = ENOMEM; 8354 goto done; 8355 } 8356 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 8357 } 8358 8359 for (i = 0; i < 4; i++) { 8360 f = &sc->tids.ftid_tab[t->idx + i]; 8361 8362 if (f->pending || f->valid) { 8363 rc = EBUSY; 8364 goto done; 8365 } 8366 if (f->locked) { 8367 rc = EPERM; 8368 goto done; 8369 } 8370 8371 if (t->fs.type == 0) 8372 break; 8373 } 8374 8375 f = &sc->tids.ftid_tab[t->idx]; 8376 f->fs = t->fs; 8377 8378 rc = set_filter_wr(sc, t->idx); 8379done: 8380 end_synchronized_op(sc, 0); 8381 8382 if (rc == 0) { 8383 mtx_lock(&sc->tids.ftid_lock); 8384 for (;;) { 8385 if (f->pending == 0) { 8386 rc = f->valid ? 0 : EIO; 8387 break; 8388 } 8389 8390 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8391 PCATCH, "t4setfw", 0)) { 8392 rc = EINPROGRESS; 8393 break; 8394 } 8395 } 8396 mtx_unlock(&sc->tids.ftid_lock); 8397 } 8398 return (rc); 8399} 8400 8401static int 8402del_filter(struct adapter *sc, struct t4_filter *t) 8403{ 8404 unsigned int nfilters; 8405 struct filter_entry *f; 8406 int rc; 8407 8408 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8409 if (rc) 8410 return (rc); 8411 8412 nfilters = sc->tids.nftids; 8413 8414 if (nfilters == 0) { 8415 rc = ENOTSUP; 8416 goto done; 8417 } 8418 8419 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8420 t->idx >= nfilters) { 8421 rc = EINVAL; 8422 goto done; 8423 } 8424 8425 if (!(sc->flags & FULL_INIT_DONE)) { 8426 rc = EAGAIN; 8427 goto done; 8428 } 8429 8430 f = &sc->tids.ftid_tab[t->idx]; 8431 8432 if (f->pending) { 8433 rc = EBUSY; 8434 goto done; 8435 } 8436 if (f->locked) { 8437 rc = EPERM; 8438 goto done; 8439 } 8440 8441 if (f->valid) { 8442 t->fs = f->fs; /* extra info for the caller */ 8443 rc = del_filter_wr(sc, t->idx); 8444 } 8445 8446done: 8447 end_synchronized_op(sc, 0); 8448 8449 if (rc == 0) { 8450 mtx_lock(&sc->tids.ftid_lock); 8451 for (;;) { 8452 if (f->pending == 0) { 8453 rc = f->valid ? EIO : 0; 8454 break; 8455 } 8456 8457 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8458 PCATCH, "t4delfw", 0)) { 8459 rc = EINPROGRESS; 8460 break; 8461 } 8462 } 8463 mtx_unlock(&sc->tids.ftid_lock); 8464 } 8465 8466 return (rc); 8467} 8468 8469static void 8470clear_filter(struct filter_entry *f) 8471{ 8472 if (f->l2t) 8473 t4_l2t_release(f->l2t); 8474 8475 bzero(f, sizeof (*f)); 8476} 8477 8478static int 8479set_filter_wr(struct adapter *sc, int fidx) 8480{ 8481 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8482 struct fw_filter_wr *fwr; 8483 unsigned int ftid, vnic_vld, vnic_vld_mask; 8484 struct wrq_cookie cookie; 8485 8486 ASSERT_SYNCHRONIZED_OP(sc); 8487 8488 if (f->fs.newdmac || f->fs.newvlan) { 8489 /* This filter needs an L2T entry; allocate one. */ 8490 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8491 if (f->l2t == NULL) 8492 return (EAGAIN); 8493 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8494 f->fs.dmac)) { 8495 t4_l2t_release(f->l2t); 8496 f->l2t = NULL; 8497 return (ENOMEM); 8498 } 8499 } 8500 8501 /* Already validated against fconf, iconf */ 8502 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8503 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8504 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8505 vnic_vld = 1; 8506 else 8507 vnic_vld = 0; 8508 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8509 vnic_vld_mask = 1; 8510 else 8511 vnic_vld_mask = 0; 8512 8513 ftid = sc->tids.ftid_base + fidx; 8514 8515 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8516 if (fwr == NULL) 8517 return (ENOMEM); 8518 bzero(fwr, sizeof(*fwr)); 8519 8520 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8521 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8522 fwr->tid_to_iq = 8523 htobe32(V_FW_FILTER_WR_TID(ftid) | 8524 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8525 V_FW_FILTER_WR_NOREPLY(0) | 8526 V_FW_FILTER_WR_IQ(f->fs.iq)); 8527 fwr->del_filter_to_l2tix = 8528 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8529 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8530 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8531 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8532 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8533 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8534 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8535 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8536 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8537 f->fs.newvlan == VLAN_REWRITE) | 8538 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8539 f->fs.newvlan == VLAN_REWRITE) | 8540 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8541 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8542 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8543 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8544 fwr->ethtype = htobe16(f->fs.val.ethtype); 8545 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8546 fwr->frag_to_ovlan_vldm = 8547 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8548 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8549 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8550 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8551 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8552 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8553 fwr->smac_sel = 0; 8554 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8555 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8556 fwr->maci_to_matchtypem = 8557 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8558 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8559 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8560 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8561 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8562 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8563 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8564 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8565 fwr->ptcl = f->fs.val.proto; 8566 fwr->ptclm = f->fs.mask.proto; 8567 fwr->ttyp = f->fs.val.tos; 8568 fwr->ttypm = f->fs.mask.tos; 8569 fwr->ivlan = htobe16(f->fs.val.vlan); 8570 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8571 fwr->ovlan = htobe16(f->fs.val.vnic); 8572 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8573 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8574 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8575 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8576 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8577 fwr->lp = htobe16(f->fs.val.dport); 8578 fwr->lpm = htobe16(f->fs.mask.dport); 8579 fwr->fp = htobe16(f->fs.val.sport); 8580 fwr->fpm = htobe16(f->fs.mask.sport); 8581 if (f->fs.newsmac) 8582 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8583 8584 f->pending = 1; 8585 sc->tids.ftids_in_use++; 8586 8587 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8588 return (0); 8589} 8590 8591static int 8592del_filter_wr(struct adapter *sc, int fidx) 8593{ 8594 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8595 struct fw_filter_wr *fwr; 8596 unsigned int ftid; 8597 struct wrq_cookie cookie; 8598 8599 ftid = sc->tids.ftid_base + fidx; 8600 8601 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8602 if (fwr == NULL) 8603 return (ENOMEM); 8604 bzero(fwr, sizeof (*fwr)); 8605 8606 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8607 8608 f->pending = 1; 8609 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8610 return (0); 8611} 8612 8613int 8614t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8615{ 8616 struct adapter *sc = iq->adapter; 8617 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8618 unsigned int idx = GET_TID(rpl); 8619 unsigned int rc; 8620 struct filter_entry *f; 8621 8622 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8623 rss->opcode)); 8624 MPASS(iq == &sc->sge.fwq); 8625 MPASS(is_ftid(sc, idx)); 8626 8627 idx -= sc->tids.ftid_base; 8628 f = &sc->tids.ftid_tab[idx]; 8629 rc = G_COOKIE(rpl->cookie); 8630 8631 mtx_lock(&sc->tids.ftid_lock); 8632 if (rc == FW_FILTER_WR_FLT_ADDED) { 8633 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8634 __func__, idx)); 8635 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8636 f->pending = 0; /* asynchronous setup completed */ 8637 f->valid = 1; 8638 } else { 8639 if (rc != FW_FILTER_WR_FLT_DELETED) { 8640 /* Add or delete failed, display an error */ 8641 log(LOG_ERR, 8642 "filter %u setup failed with error %u\n", 8643 idx, rc); 8644 } 8645 8646 clear_filter(f); 8647 sc->tids.ftids_in_use--; 8648 } 8649 wakeup(&sc->tids.ftid_tab); 8650 mtx_unlock(&sc->tids.ftid_lock); 8651 8652 return (0); 8653} 8654 8655static int 8656set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8657{ 8658 8659 MPASS(iq->set_tcb_rpl != NULL); 8660 return (iq->set_tcb_rpl(iq, rss, m)); 8661} 8662 8663static int 8664l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8665{ 8666 8667 MPASS(iq->l2t_write_rpl != NULL); 8668 return (iq->l2t_write_rpl(iq, rss, m)); 8669} 8670 8671static int 8672get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8673{ 8674 int rc; 8675 8676 if (cntxt->cid > M_CTXTQID) 8677 return (EINVAL); 8678 8679 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8680 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8681 return (EINVAL); 8682 8683 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8684 if (rc) 8685 return (rc); 8686 8687 if (sc->flags & FW_OK) { 8688 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8689 &cntxt->data[0]); 8690 if (rc == 0) 8691 goto done; 8692 } 8693 8694 /* 8695 * Read via firmware failed or wasn't even attempted. Read directly via 8696 * the backdoor. 8697 */ 8698 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8699done: 8700 end_synchronized_op(sc, 0); 8701 return (rc); 8702} 8703 8704static int 8705load_fw(struct adapter *sc, struct t4_data *fw) 8706{ 8707 int rc; 8708 uint8_t *fw_data; 8709 8710 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8711 if (rc) 8712 return (rc); 8713 8714 if (sc->flags & FULL_INIT_DONE) { 8715 rc = EBUSY; 8716 goto done; 8717 } 8718 8719 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8720 if (fw_data == NULL) { 8721 rc = ENOMEM; 8722 goto done; 8723 } 8724 8725 rc = copyin(fw->data, fw_data, fw->len); 8726 if (rc == 0) 8727 rc = -t4_load_fw(sc, fw_data, fw->len); 8728 8729 free(fw_data, M_CXGBE); 8730done: 8731 end_synchronized_op(sc, 0); 8732 return (rc); 8733} 8734 8735static int 8736load_cfg(struct adapter *sc, struct t4_data *cfg) 8737{ 8738 int rc; 8739 uint8_t *cfg_data = NULL; 8740 8741 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 8742 if (rc) 8743 return (rc); 8744 8745 if (cfg->len == 0) { 8746 /* clear */ 8747 rc = -t4_load_cfg(sc, NULL, 0); 8748 goto done; 8749 } 8750 8751 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); 8752 if (cfg_data == NULL) { 8753 rc = ENOMEM; 8754 goto done; 8755 } 8756 8757 rc = copyin(cfg->data, cfg_data, cfg->len); 8758 if (rc == 0) 8759 rc = -t4_load_cfg(sc, cfg_data, cfg->len); 8760 8761 free(cfg_data, M_CXGBE); 8762done: 8763 end_synchronized_op(sc, 0); 8764 return (rc); 8765} 8766 8767#define MAX_READ_BUF_SIZE (128 * 1024) 8768static int 8769read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8770{ 8771 uint32_t addr, remaining, n; 8772 uint32_t *buf; 8773 int rc; 8774 uint8_t *dst; 8775 8776 rc = validate_mem_range(sc, mr->addr, mr->len); 8777 if (rc != 0) 8778 return (rc); 8779 8780 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8781 addr = mr->addr; 8782 remaining = mr->len; 8783 dst = (void *)mr->data; 8784 8785 while (remaining) { 8786 n = min(remaining, MAX_READ_BUF_SIZE); 8787 read_via_memwin(sc, 2, addr, buf, n); 8788 8789 rc = copyout(buf, dst, n); 8790 if (rc != 0) 8791 break; 8792 8793 dst += n; 8794 remaining -= n; 8795 addr += n; 8796 } 8797 8798 free(buf, M_CXGBE); 8799 return (rc); 8800} 8801#undef MAX_READ_BUF_SIZE 8802 8803static int 8804read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8805{ 8806 int rc; 8807 8808 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8809 return (EINVAL); 8810 8811 if (i2cd->len > sizeof(i2cd->data)) 8812 return (EFBIG); 8813 8814 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8815 if (rc) 8816 return (rc); 8817 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8818 i2cd->offset, i2cd->len, &i2cd->data[0]); 8819 end_synchronized_op(sc, 0); 8820 8821 return (rc); 8822} 8823 8824int 8825t4_os_find_pci_capability(struct adapter *sc, int cap) 8826{ 8827 int i; 8828 8829 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8830} 8831 8832int 8833t4_os_pci_save_state(struct adapter *sc) 8834{ 8835 device_t dev; 8836 struct pci_devinfo *dinfo; 8837 8838 dev = sc->dev; 8839 dinfo = device_get_ivars(dev); 8840 8841 pci_cfg_save(dev, dinfo, 0); 8842 return (0); 8843} 8844 8845int 8846t4_os_pci_restore_state(struct adapter *sc) 8847{ 8848 device_t dev; 8849 struct pci_devinfo *dinfo; 8850 8851 dev = sc->dev; 8852 dinfo = device_get_ivars(dev); 8853 8854 pci_cfg_restore(dev, dinfo); 8855 return (0); 8856} 8857 8858void 8859t4_os_portmod_changed(const struct adapter *sc, int idx) 8860{ 8861 struct port_info *pi = sc->port[idx]; 8862 struct vi_info *vi; 8863 struct ifnet *ifp; 8864 int v; 8865 static const char *mod_str[] = { 8866 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8867 }; 8868 8869 for_each_vi(pi, v, vi) { 8870 build_medialist(pi, &vi->media); 8871 } 8872 8873 ifp = pi->vi[0].ifp; 8874 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8875 if_printf(ifp, "transceiver unplugged.\n"); 8876 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8877 if_printf(ifp, "unknown transceiver inserted.\n"); 8878 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8879 if_printf(ifp, "unsupported transceiver inserted.\n"); 8880 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8881 if_printf(ifp, "%s transceiver inserted.\n", 8882 mod_str[pi->mod_type]); 8883 } else { 8884 if_printf(ifp, "transceiver (type %d) inserted.\n", 8885 pi->mod_type); 8886 } 8887} 8888 8889void 8890t4_os_link_changed(struct adapter *sc, int idx, int link_stat) 8891{ 8892 struct port_info *pi = sc->port[idx]; 8893 struct vi_info *vi; 8894 struct ifnet *ifp; 8895 int v; 8896 8897 for_each_vi(pi, v, vi) { 8898 ifp = vi->ifp; 8899 if (ifp == NULL) 8900 continue; 8901 8902 if (link_stat) { 8903 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8904 if_link_state_change(ifp, LINK_STATE_UP); 8905 } else { 8906 if_link_state_change(ifp, LINK_STATE_DOWN); 8907 } 8908 } 8909} 8910 8911void 8912t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8913{ 8914 struct adapter *sc; 8915 8916 sx_slock(&t4_list_lock); 8917 SLIST_FOREACH(sc, &t4_list, link) { 8918 /* 8919 * func should not make any assumptions about what state sc is 8920 * in - the only guarantee is that sc->sc_lock is a valid lock. 8921 */ 8922 func(sc, arg); 8923 } 8924 sx_sunlock(&t4_list_lock); 8925} 8926 8927static int 8928t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8929 struct thread *td) 8930{ 8931 int rc; 8932 struct adapter *sc = dev->si_drv1; 8933 8934 rc = priv_check(td, PRIV_DRIVER); 8935 if (rc != 0) 8936 return (rc); 8937 8938 switch (cmd) { 8939 case CHELSIO_T4_GETREG: { 8940 struct t4_reg *edata = (struct t4_reg *)data; 8941 8942 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8943 return (EFAULT); 8944 8945 if (edata->size == 4) 8946 edata->val = t4_read_reg(sc, edata->addr); 8947 else if (edata->size == 8) 8948 edata->val = t4_read_reg64(sc, edata->addr); 8949 else 8950 return (EINVAL); 8951 8952 break; 8953 } 8954 case CHELSIO_T4_SETREG: { 8955 struct t4_reg *edata = (struct t4_reg *)data; 8956 8957 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8958 return (EFAULT); 8959 8960 if (edata->size == 4) { 8961 if (edata->val & 0xffffffff00000000) 8962 return (EINVAL); 8963 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8964 } else if (edata->size == 8) 8965 t4_write_reg64(sc, edata->addr, edata->val); 8966 else 8967 return (EINVAL); 8968 break; 8969 } 8970 case CHELSIO_T4_REGDUMP: { 8971 struct t4_regdump *regs = (struct t4_regdump *)data; 8972 int reglen = t4_get_regs_len(sc); 8973 uint8_t *buf; 8974 8975 if (regs->len < reglen) { 8976 regs->len = reglen; /* hint to the caller */ 8977 return (ENOBUFS); 8978 } 8979 8980 regs->len = reglen; 8981 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8982 get_regs(sc, regs, buf); 8983 rc = copyout(buf, regs->data, reglen); 8984 free(buf, M_CXGBE); 8985 break; 8986 } 8987 case CHELSIO_T4_GET_FILTER_MODE: 8988 rc = get_filter_mode(sc, (uint32_t *)data); 8989 break; 8990 case CHELSIO_T4_SET_FILTER_MODE: 8991 rc = set_filter_mode(sc, *(uint32_t *)data); 8992 break; 8993 case CHELSIO_T4_GET_FILTER: 8994 rc = get_filter(sc, (struct t4_filter *)data); 8995 break; 8996 case CHELSIO_T4_SET_FILTER: 8997 rc = set_filter(sc, (struct t4_filter *)data); 8998 break; 8999 case CHELSIO_T4_DEL_FILTER: 9000 rc = del_filter(sc, (struct t4_filter *)data); 9001 break; 9002 case CHELSIO_T4_GET_SGE_CONTEXT: 9003 rc = get_sge_context(sc, (struct t4_sge_context *)data); 9004 break; 9005 case CHELSIO_T4_LOAD_FW: 9006 rc = load_fw(sc, (struct t4_data *)data); 9007 break; 9008 case CHELSIO_T4_GET_MEM: 9009 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 9010 break; 9011 case CHELSIO_T4_GET_I2C: 9012 rc = read_i2c(sc, (struct t4_i2c_data *)data); 9013 break; 9014 case CHELSIO_T4_CLEAR_STATS: { 9015 int i, v; 9016 u_int port_id = *(uint32_t *)data; 9017 struct port_info *pi; 9018 struct vi_info *vi; 9019 9020 if (port_id >= sc->params.nports) 9021 return (EINVAL); 9022 pi = sc->port[port_id]; 9023 if (pi == NULL) 9024 return (EIO); 9025 9026 /* MAC stats */ 9027 t4_clr_port_stats(sc, pi->tx_chan); 9028 pi->tx_parse_error = 0; 9029 mtx_lock(&sc->reg_lock); 9030 for_each_vi(pi, v, vi) { 9031 if (vi->flags & VI_INIT_DONE) 9032 t4_clr_vi_stats(sc, vi->viid); 9033 } 9034 mtx_unlock(&sc->reg_lock); 9035 9036 /* 9037 * Since this command accepts a port, clear stats for 9038 * all VIs on this port. 9039 */ 9040 for_each_vi(pi, v, vi) { 9041 if (vi->flags & VI_INIT_DONE) { 9042 struct sge_rxq *rxq; 9043 struct sge_txq *txq; 9044 struct sge_wrq *wrq; 9045 9046 for_each_rxq(vi, i, rxq) { 9047#if defined(INET) || defined(INET6) 9048 rxq->lro.lro_queued = 0; 9049 rxq->lro.lro_flushed = 0; 9050#endif 9051 rxq->rxcsum = 0; 9052 rxq->vlan_extraction = 0; 9053 } 9054 9055 for_each_txq(vi, i, txq) { 9056 txq->txcsum = 0; 9057 txq->tso_wrs = 0; 9058 txq->vlan_insertion = 0; 9059 txq->imm_wrs = 0; 9060 txq->sgl_wrs = 0; 9061 txq->txpkt_wrs = 0; 9062 txq->txpkts0_wrs = 0; 9063 txq->txpkts1_wrs = 0; 9064 txq->txpkts0_pkts = 0; 9065 txq->txpkts1_pkts = 0; 9066 mp_ring_reset_stats(txq->r); 9067 } 9068 9069#ifdef TCP_OFFLOAD 9070 /* nothing to clear for each ofld_rxq */ 9071 9072 for_each_ofld_txq(vi, i, wrq) { 9073 wrq->tx_wrs_direct = 0; 9074 wrq->tx_wrs_copied = 0; 9075 } 9076#endif 9077 9078 if (IS_MAIN_VI(vi)) { 9079 wrq = &sc->sge.ctrlq[pi->port_id]; 9080 wrq->tx_wrs_direct = 0; 9081 wrq->tx_wrs_copied = 0; 9082 } 9083 } 9084 } 9085 break; 9086 } 9087 case CHELSIO_T4_SCHED_CLASS: 9088 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 9089 break; 9090 case CHELSIO_T4_SCHED_QUEUE: 9091 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 9092 break; 9093 case CHELSIO_T4_GET_TRACER: 9094 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 9095 break; 9096 case CHELSIO_T4_SET_TRACER: 9097 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 9098 break; 9099 case CHELSIO_T4_LOAD_CFG: 9100 rc = load_cfg(sc, (struct t4_data *)data); 9101 break; 9102 default: 9103 rc = ENOTTY; 9104 } 9105 9106 return (rc); 9107} 9108 9109void 9110t4_db_full(struct adapter *sc) 9111{ 9112 9113 CXGBE_UNIMPLEMENTED(__func__); 9114} 9115 9116void 9117t4_db_dropped(struct adapter *sc) 9118{ 9119 9120 CXGBE_UNIMPLEMENTED(__func__); 9121} 9122 9123#ifdef TCP_OFFLOAD 9124void 9125t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 9126{ 9127 9128 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 9129 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 9130 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 9131 V_HPZ3(pgsz_order[3])); 9132} 9133 9134static int 9135toe_capability(struct vi_info *vi, int enable) 9136{ 9137 int rc; 9138 struct port_info *pi = vi->pi; 9139 struct adapter *sc = pi->adapter; 9140 9141 ASSERT_SYNCHRONIZED_OP(sc); 9142 9143 if (!is_offload(sc)) 9144 return (ENODEV); 9145 9146 if (enable) { 9147 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 9148 /* TOE is already enabled. */ 9149 return (0); 9150 } 9151 9152 /* 9153 * We need the port's queues around so that we're able to send 9154 * and receive CPLs to/from the TOE even if the ifnet for this 9155 * port has never been UP'd administratively. 9156 */ 9157 if (!(vi->flags & VI_INIT_DONE)) { 9158 rc = vi_full_init(vi); 9159 if (rc) 9160 return (rc); 9161 } 9162 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 9163 rc = vi_full_init(&pi->vi[0]); 9164 if (rc) 9165 return (rc); 9166 } 9167 9168 if (isset(&sc->offload_map, pi->port_id)) { 9169 /* TOE is enabled on another VI of this port. */ 9170 pi->uld_vis++; 9171 return (0); 9172 } 9173 9174 if (!uld_active(sc, ULD_TOM)) { 9175 rc = t4_activate_uld(sc, ULD_TOM); 9176 if (rc == EAGAIN) { 9177 log(LOG_WARNING, 9178 "You must kldload t4_tom.ko before trying " 9179 "to enable TOE on a cxgbe interface.\n"); 9180 } 9181 if (rc != 0) 9182 return (rc); 9183 KASSERT(sc->tom_softc != NULL, 9184 ("%s: TOM activated but softc NULL", __func__)); 9185 KASSERT(uld_active(sc, ULD_TOM), 9186 ("%s: TOM activated but flag not set", __func__)); 9187 } 9188 9189 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 9190 if (!uld_active(sc, ULD_IWARP)) 9191 (void) t4_activate_uld(sc, ULD_IWARP); 9192 if (!uld_active(sc, ULD_ISCSI)) 9193 (void) t4_activate_uld(sc, ULD_ISCSI); 9194 9195 pi->uld_vis++; 9196 setbit(&sc->offload_map, pi->port_id); 9197 } else { 9198 pi->uld_vis--; 9199 9200 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9201 return (0); 9202 9203 KASSERT(uld_active(sc, ULD_TOM), 9204 ("%s: TOM never initialized?", __func__)); 9205 clrbit(&sc->offload_map, pi->port_id); 9206 } 9207 9208 return (0); 9209} 9210 9211/* 9212 * Add an upper layer driver to the global list. 9213 */ 9214int 9215t4_register_uld(struct uld_info *ui) 9216{ 9217 int rc = 0; 9218 struct uld_info *u; 9219 9220 sx_xlock(&t4_uld_list_lock); 9221 SLIST_FOREACH(u, &t4_uld_list, link) { 9222 if (u->uld_id == ui->uld_id) { 9223 rc = EEXIST; 9224 goto done; 9225 } 9226 } 9227 9228 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9229 ui->refcount = 0; 9230done: 9231 sx_xunlock(&t4_uld_list_lock); 9232 return (rc); 9233} 9234 9235int 9236t4_unregister_uld(struct uld_info *ui) 9237{ 9238 int rc = EINVAL; 9239 struct uld_info *u; 9240 9241 sx_xlock(&t4_uld_list_lock); 9242 9243 SLIST_FOREACH(u, &t4_uld_list, link) { 9244 if (u == ui) { 9245 if (ui->refcount > 0) { 9246 rc = EBUSY; 9247 goto done; 9248 } 9249 9250 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9251 rc = 0; 9252 goto done; 9253 } 9254 } 9255done: 9256 sx_xunlock(&t4_uld_list_lock); 9257 return (rc); 9258} 9259 9260int 9261t4_activate_uld(struct adapter *sc, int id) 9262{ 9263 int rc; 9264 struct uld_info *ui; 9265 9266 ASSERT_SYNCHRONIZED_OP(sc); 9267 9268 if (id < 0 || id > ULD_MAX) 9269 return (EINVAL); 9270 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9271 9272 sx_slock(&t4_uld_list_lock); 9273 9274 SLIST_FOREACH(ui, &t4_uld_list, link) { 9275 if (ui->uld_id == id) { 9276 if (!(sc->flags & FULL_INIT_DONE)) { 9277 rc = adapter_full_init(sc); 9278 if (rc != 0) 9279 break; 9280 } 9281 9282 rc = ui->activate(sc); 9283 if (rc == 0) { 9284 setbit(&sc->active_ulds, id); 9285 ui->refcount++; 9286 } 9287 break; 9288 } 9289 } 9290 9291 sx_sunlock(&t4_uld_list_lock); 9292 9293 return (rc); 9294} 9295 9296int 9297t4_deactivate_uld(struct adapter *sc, int id) 9298{ 9299 int rc; 9300 struct uld_info *ui; 9301 9302 ASSERT_SYNCHRONIZED_OP(sc); 9303 9304 if (id < 0 || id > ULD_MAX) 9305 return (EINVAL); 9306 rc = ENXIO; 9307 9308 sx_slock(&t4_uld_list_lock); 9309 9310 SLIST_FOREACH(ui, &t4_uld_list, link) { 9311 if (ui->uld_id == id) { 9312 rc = ui->deactivate(sc); 9313 if (rc == 0) { 9314 clrbit(&sc->active_ulds, id); 9315 ui->refcount--; 9316 } 9317 break; 9318 } 9319 } 9320 9321 sx_sunlock(&t4_uld_list_lock); 9322 9323 return (rc); 9324} 9325 9326int 9327uld_active(struct adapter *sc, int uld_id) 9328{ 9329 9330 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9331 9332 return (isset(&sc->active_ulds, uld_id)); 9333} 9334#endif 9335 9336/* 9337 * t = ptr to tunable. 9338 * nc = number of CPUs. 9339 * c = compiled in default for that tunable. 9340 */ 9341static void 9342calculate_nqueues(int *t, int nc, const int c) 9343{ 9344 int nq; 9345 9346 if (*t > 0) 9347 return; 9348 nq = *t < 0 ? -*t : c; 9349 *t = min(nc, nq); 9350} 9351 9352/* 9353 * Come up with reasonable defaults for some of the tunables, provided they're 9354 * not set by the user (in which case we'll use the values as is). 9355 */ 9356static void 9357tweak_tunables(void) 9358{ 9359 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9360 9361 if (t4_ntxq10g < 1) { 9362#ifdef RSS 9363 t4_ntxq10g = rss_getnumbuckets(); 9364#else 9365 calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G); 9366#endif 9367 } 9368 9369 if (t4_ntxq1g < 1) { 9370#ifdef RSS 9371 /* XXX: way too many for 1GbE? */ 9372 t4_ntxq1g = rss_getnumbuckets(); 9373#else 9374 calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G); 9375#endif 9376 } 9377 9378 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); 9379 9380 if (t4_nrxq10g < 1) { 9381#ifdef RSS 9382 t4_nrxq10g = rss_getnumbuckets(); 9383#else 9384 calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G); 9385#endif 9386 } 9387 9388 if (t4_nrxq1g < 1) { 9389#ifdef RSS 9390 /* XXX: way too many for 1GbE? */ 9391 t4_nrxq1g = rss_getnumbuckets(); 9392#else 9393 calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G); 9394#endif 9395 } 9396 9397 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); 9398 9399#ifdef TCP_OFFLOAD 9400 calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G); 9401 calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G); 9402 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); 9403 calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G); 9404 calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G); 9405 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); 9406 9407 if (t4_toecaps_allowed == -1) 9408 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9409 9410 if (t4_rdmacaps_allowed == -1) { 9411 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9412 FW_CAPS_CONFIG_RDMA_RDMAC; 9413 } 9414 9415 if (t4_iscsicaps_allowed == -1) { 9416 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9417 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9418 FW_CAPS_CONFIG_ISCSI_T10DIF; 9419 } 9420#else 9421 if (t4_toecaps_allowed == -1) 9422 t4_toecaps_allowed = 0; 9423 9424 if (t4_rdmacaps_allowed == -1) 9425 t4_rdmacaps_allowed = 0; 9426 9427 if (t4_iscsicaps_allowed == -1) 9428 t4_iscsicaps_allowed = 0; 9429#endif 9430 9431#ifdef DEV_NETMAP 9432 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); 9433 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); 9434#endif 9435 9436 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9437 t4_tmr_idx_10g = TMR_IDX_10G; 9438 9439 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9440 t4_pktc_idx_10g = PKTC_IDX_10G; 9441 9442 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9443 t4_tmr_idx_1g = TMR_IDX_1G; 9444 9445 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9446 t4_pktc_idx_1g = PKTC_IDX_1G; 9447 9448 if (t4_qsize_txq < 128) 9449 t4_qsize_txq = 128; 9450 9451 if (t4_qsize_rxq < 128) 9452 t4_qsize_rxq = 128; 9453 while (t4_qsize_rxq & 7) 9454 t4_qsize_rxq++; 9455 9456 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9457} 9458 9459#ifdef DDB 9460static void 9461t4_dump_tcb(struct adapter *sc, int tid) 9462{ 9463 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9464 9465 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9466 save = t4_read_reg(sc, reg); 9467 base = sc->memwin[2].mw_base; 9468 9469 /* Dump TCB for the tid */ 9470 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9471 tcb_addr += tid * TCB_SIZE; 9472 9473 if (is_t4(sc)) { 9474 pf = 0; 9475 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9476 } else { 9477 pf = V_PFNUM(sc->pf); 9478 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9479 } 9480 t4_write_reg(sc, reg, win_pos | pf); 9481 t4_read_reg(sc, reg); 9482 9483 off = tcb_addr - win_pos; 9484 for (i = 0; i < 4; i++) { 9485 uint32_t buf[8]; 9486 for (j = 0; j < 8; j++, off += 4) 9487 buf[j] = htonl(t4_read_reg(sc, base + off)); 9488 9489 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9490 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9491 buf[7]); 9492 } 9493 9494 t4_write_reg(sc, reg, save); 9495 t4_read_reg(sc, reg); 9496} 9497 9498static void 9499t4_dump_devlog(struct adapter *sc) 9500{ 9501 struct devlog_params *dparams = &sc->params.devlog; 9502 struct fw_devlog_e e; 9503 int i, first, j, m, nentries, rc; 9504 uint64_t ftstamp = UINT64_MAX; 9505 9506 if (dparams->start == 0) { 9507 db_printf("devlog params not valid\n"); 9508 return; 9509 } 9510 9511 nentries = dparams->size / sizeof(struct fw_devlog_e); 9512 m = fwmtype_to_hwmtype(dparams->memtype); 9513 9514 /* Find the first entry. */ 9515 first = -1; 9516 for (i = 0; i < nentries && !db_pager_quit; i++) { 9517 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9518 sizeof(e), (void *)&e); 9519 if (rc != 0) 9520 break; 9521 9522 if (e.timestamp == 0) 9523 break; 9524 9525 e.timestamp = be64toh(e.timestamp); 9526 if (e.timestamp < ftstamp) { 9527 ftstamp = e.timestamp; 9528 first = i; 9529 } 9530 } 9531 9532 if (first == -1) 9533 return; 9534 9535 i = first; 9536 do { 9537 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9538 sizeof(e), (void *)&e); 9539 if (rc != 0) 9540 return; 9541 9542 if (e.timestamp == 0) 9543 return; 9544 9545 e.timestamp = be64toh(e.timestamp); 9546 e.seqno = be32toh(e.seqno); 9547 for (j = 0; j < 8; j++) 9548 e.params[j] = be32toh(e.params[j]); 9549 9550 db_printf("%10d %15ju %8s %8s ", 9551 e.seqno, e.timestamp, 9552 (e.level < nitems(devlog_level_strings) ? 9553 devlog_level_strings[e.level] : "UNKNOWN"), 9554 (e.facility < nitems(devlog_facility_strings) ? 9555 devlog_facility_strings[e.facility] : "UNKNOWN")); 9556 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9557 e.params[3], e.params[4], e.params[5], e.params[6], 9558 e.params[7]); 9559 9560 if (++i == nentries) 9561 i = 0; 9562 } while (i != first && !db_pager_quit); 9563} 9564 9565static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9566_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9567 9568DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9569{ 9570 device_t dev; 9571 int t; 9572 bool valid; 9573 9574 valid = false; 9575 t = db_read_token(); 9576 if (t == tIDENT) { 9577 dev = device_lookup_by_name(db_tok_string); 9578 valid = true; 9579 } 9580 db_skip_to_eol(); 9581 if (!valid) { 9582 db_printf("usage: show t4 devlog <nexus>\n"); 9583 return; 9584 } 9585 9586 if (dev == NULL) { 9587 db_printf("device not found\n"); 9588 return; 9589 } 9590 9591 t4_dump_devlog(device_get_softc(dev)); 9592} 9593 9594DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 9595{ 9596 device_t dev; 9597 int radix, tid, t; 9598 bool valid; 9599 9600 valid = false; 9601 radix = db_radix; 9602 db_radix = 10; 9603 t = db_read_token(); 9604 if (t == tIDENT) { 9605 dev = device_lookup_by_name(db_tok_string); 9606 t = db_read_token(); 9607 if (t == tNUMBER) { 9608 tid = db_tok_number; 9609 valid = true; 9610 } 9611 } 9612 db_radix = radix; 9613 db_skip_to_eol(); 9614 if (!valid) { 9615 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 9616 return; 9617 } 9618 9619 if (dev == NULL) { 9620 db_printf("device not found\n"); 9621 return; 9622 } 9623 if (tid < 0) { 9624 db_printf("invalid tid\n"); 9625 return; 9626 } 9627 9628 t4_dump_tcb(device_get_softc(dev), tid); 9629} 9630#endif 9631 9632static struct sx mlu; /* mod load unload */ 9633SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9634 9635static int 9636mod_event(module_t mod, int cmd, void *arg) 9637{ 9638 int rc = 0; 9639 static int loaded = 0; 9640 9641 switch (cmd) { 9642 case MOD_LOAD: 9643 sx_xlock(&mlu); 9644 if (loaded++ == 0) { 9645 t4_sge_modload(); 9646 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 9647 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 9648 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 9649 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 9650 sx_init(&t4_list_lock, "T4/T5 adapters"); 9651 SLIST_INIT(&t4_list); 9652#ifdef TCP_OFFLOAD 9653 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9654 SLIST_INIT(&t4_uld_list); 9655#endif 9656 t4_tracer_modload(); 9657 tweak_tunables(); 9658 } 9659 sx_xunlock(&mlu); 9660 break; 9661 9662 case MOD_UNLOAD: 9663 sx_xlock(&mlu); 9664 if (--loaded == 0) { 9665 int tries; 9666 9667 sx_slock(&t4_list_lock); 9668 if (!SLIST_EMPTY(&t4_list)) { 9669 rc = EBUSY; 9670 sx_sunlock(&t4_list_lock); 9671 goto done_unload; 9672 } 9673#ifdef TCP_OFFLOAD 9674 sx_slock(&t4_uld_list_lock); 9675 if (!SLIST_EMPTY(&t4_uld_list)) { 9676 rc = EBUSY; 9677 sx_sunlock(&t4_uld_list_lock); 9678 sx_sunlock(&t4_list_lock); 9679 goto done_unload; 9680 } 9681#endif 9682 tries = 0; 9683 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 9684 uprintf("%ju clusters with custom free routine " 9685 "still is use.\n", t4_sge_extfree_refs()); 9686 pause("t4unload", 2 * hz); 9687 } 9688#ifdef TCP_OFFLOAD 9689 sx_sunlock(&t4_uld_list_lock); 9690#endif 9691 sx_sunlock(&t4_list_lock); 9692 9693 if (t4_sge_extfree_refs() == 0) { 9694 t4_tracer_modunload(); 9695#ifdef TCP_OFFLOAD 9696 sx_destroy(&t4_uld_list_lock); 9697#endif 9698 sx_destroy(&t4_list_lock); 9699 t4_sge_modunload(); 9700 loaded = 0; 9701 } else { 9702 rc = EBUSY; 9703 loaded++; /* undo earlier decrement */ 9704 } 9705 } 9706done_unload: 9707 sx_xunlock(&mlu); 9708 break; 9709 } 9710 9711 return (rc); 9712} 9713 9714static devclass_t t4_devclass, t5_devclass, t6_devclass; 9715static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 9716static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 9717 9718DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9719MODULE_VERSION(t4nex, 1); 9720MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9721#ifdef DEV_NETMAP 9722MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 9723#endif /* DEV_NETMAP */ 9724 9725DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9726MODULE_VERSION(t5nex, 1); 9727MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9728#ifdef DEV_NETMAP 9729MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 9730#endif /* DEV_NETMAP */ 9731 9732DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 9733MODULE_VERSION(t6nex, 1); 9734MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 9735#ifdef DEV_NETMAP 9736MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 9737#endif /* DEV_NETMAP */ 9738 9739DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9740MODULE_VERSION(cxgbe, 1); 9741 9742DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9743MODULE_VERSION(cxl, 1); 9744 9745DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 9746MODULE_VERSION(cc, 1); 9747 9748DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9749MODULE_VERSION(vcxgbe, 1); 9750 9751DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9752MODULE_VERSION(vcxl, 1); 9753 9754DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 9755MODULE_VERSION(vcc, 1); 9756