t4_main.c revision 306661
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/t4_main.c 306661 2016-10-03 23:15:44Z jhb $"); 30 31#include "opt_ddb.h" 32#include "opt_inet.h" 33#include "opt_inet6.h" 34#include "opt_rss.h" 35 36#include <sys/param.h> 37#include <sys/conf.h> 38#include <sys/priv.h> 39#include <sys/kernel.h> 40#include <sys/bus.h> 41#include <sys/module.h> 42#include <sys/malloc.h> 43#include <sys/queue.h> 44#include <sys/taskqueue.h> 45#include <sys/pciio.h> 46#include <dev/pci/pcireg.h> 47#include <dev/pci/pcivar.h> 48#include <dev/pci/pci_private.h> 49#include <sys/firmware.h> 50#include <sys/sbuf.h> 51#include <sys/smp.h> 52#include <sys/socket.h> 53#include <sys/sockio.h> 54#include <sys/sysctl.h> 55#include <net/ethernet.h> 56#include <net/if.h> 57#include <net/if_types.h> 58#include <net/if_dl.h> 59#include <net/if_vlan_var.h> 60#ifdef RSS 61#include <net/rss_config.h> 62#endif 63#if defined(__i386__) || defined(__amd64__) 64#include <vm/vm.h> 65#include <vm/pmap.h> 66#endif 67#ifdef DDB 68#include <ddb/ddb.h> 69#include <ddb/db_lex.h> 70#endif 71 72#include "common/common.h" 73#include "common/t4_msg.h" 74#include "common/t4_regs.h" 75#include "common/t4_regs_values.h" 76#include "t4_ioctl.h" 77#include "t4_l2t.h" 78#include "t4_mp_ring.h" 79#include "t4_if.h" 80 81/* T4 bus driver interface */ 82static int t4_probe(device_t); 83static int t4_attach(device_t); 84static int t4_detach(device_t); 85static int t4_ready(device_t); 86static int t4_read_port_device(device_t, int, device_t *); 87static device_method_t t4_methods[] = { 88 DEVMETHOD(device_probe, t4_probe), 89 DEVMETHOD(device_attach, t4_attach), 90 DEVMETHOD(device_detach, t4_detach), 91 92 DEVMETHOD(t4_is_main_ready, t4_ready), 93 DEVMETHOD(t4_read_port_device, t4_read_port_device), 94 95 DEVMETHOD_END 96}; 97static driver_t t4_driver = { 98 "t4nex", 99 t4_methods, 100 sizeof(struct adapter) 101}; 102 103 104/* T4 port (cxgbe) interface */ 105static int cxgbe_probe(device_t); 106static int cxgbe_attach(device_t); 107static int cxgbe_detach(device_t); 108static device_method_t cxgbe_methods[] = { 109 DEVMETHOD(device_probe, cxgbe_probe), 110 DEVMETHOD(device_attach, cxgbe_attach), 111 DEVMETHOD(device_detach, cxgbe_detach), 112 { 0, 0 } 113}; 114static driver_t cxgbe_driver = { 115 "cxgbe", 116 cxgbe_methods, 117 sizeof(struct port_info) 118}; 119 120/* T4 VI (vcxgbe) interface */ 121static int vcxgbe_probe(device_t); 122static int vcxgbe_attach(device_t); 123static int vcxgbe_detach(device_t); 124static device_method_t vcxgbe_methods[] = { 125 DEVMETHOD(device_probe, vcxgbe_probe), 126 DEVMETHOD(device_attach, vcxgbe_attach), 127 DEVMETHOD(device_detach, vcxgbe_detach), 128 { 0, 0 } 129}; 130static driver_t vcxgbe_driver = { 131 "vcxgbe", 132 vcxgbe_methods, 133 sizeof(struct vi_info) 134}; 135 136static d_ioctl_t t4_ioctl; 137static d_open_t t4_open; 138static d_close_t t4_close; 139 140static struct cdevsw t4_cdevsw = { 141 .d_version = D_VERSION, 142 .d_flags = 0, 143 .d_open = t4_open, 144 .d_close = t4_close, 145 .d_ioctl = t4_ioctl, 146 .d_name = "t4nex", 147}; 148 149/* T5 bus driver interface */ 150static int t5_probe(device_t); 151static device_method_t t5_methods[] = { 152 DEVMETHOD(device_probe, t5_probe), 153 DEVMETHOD(device_attach, t4_attach), 154 DEVMETHOD(device_detach, t4_detach), 155 156 DEVMETHOD(t4_is_main_ready, t4_ready), 157 DEVMETHOD(t4_read_port_device, t4_read_port_device), 158 159 DEVMETHOD_END 160}; 161static driver_t t5_driver = { 162 "t5nex", 163 t5_methods, 164 sizeof(struct adapter) 165}; 166 167 168/* T5 port (cxl) interface */ 169static driver_t cxl_driver = { 170 "cxl", 171 cxgbe_methods, 172 sizeof(struct port_info) 173}; 174 175/* T5 VI (vcxl) interface */ 176static driver_t vcxl_driver = { 177 "vcxl", 178 vcxgbe_methods, 179 sizeof(struct vi_info) 180}; 181 182static struct cdevsw t5_cdevsw = { 183 .d_version = D_VERSION, 184 .d_flags = 0, 185 .d_open = t4_open, 186 .d_close = t4_close, 187 .d_ioctl = t4_ioctl, 188 .d_name = "t5nex", 189}; 190 191/* ifnet + media interface */ 192static void cxgbe_init(void *); 193static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 194static int cxgbe_transmit(struct ifnet *, struct mbuf *); 195static void cxgbe_qflush(struct ifnet *); 196static int cxgbe_media_change(struct ifnet *); 197static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 198 199MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 200 201/* 202 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 203 * then ADAPTER_LOCK, then t4_uld_list_lock. 204 */ 205static struct sx t4_list_lock; 206SLIST_HEAD(, adapter) t4_list; 207#ifdef TCP_OFFLOAD 208static struct sx t4_uld_list_lock; 209SLIST_HEAD(, uld_info) t4_uld_list; 210#endif 211 212/* 213 * Tunables. See tweak_tunables() too. 214 * 215 * Each tunable is set to a default value here if it's known at compile-time. 216 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 217 * provide a reasonable default when the driver is loaded. 218 * 219 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 220 * T5 are under hw.cxl. 221 */ 222 223/* 224 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 225 */ 226#define NTXQ_10G 16 227static int t4_ntxq10g = -1; 228TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 229 230#define NRXQ_10G 8 231static int t4_nrxq10g = -1; 232TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 233 234#define NTXQ_1G 4 235static int t4_ntxq1g = -1; 236TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 237 238#define NRXQ_1G 2 239static int t4_nrxq1g = -1; 240TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 241 242#define NTXQ_VI 1 243static int t4_ntxq_vi = -1; 244TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 245 246#define NRXQ_VI 1 247static int t4_nrxq_vi = -1; 248TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 249 250static int t4_rsrv_noflowq = 0; 251TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 252 253#ifdef TCP_OFFLOAD 254#define NOFLDTXQ_10G 8 255static int t4_nofldtxq10g = -1; 256TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 257 258#define NOFLDRXQ_10G 2 259static int t4_nofldrxq10g = -1; 260TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 261 262#define NOFLDTXQ_1G 2 263static int t4_nofldtxq1g = -1; 264TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 265 266#define NOFLDRXQ_1G 1 267static int t4_nofldrxq1g = -1; 268TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 269 270#define NOFLDTXQ_VI 1 271static int t4_nofldtxq_vi = -1; 272TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 273 274#define NOFLDRXQ_VI 1 275static int t4_nofldrxq_vi = -1; 276TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 277#endif 278 279#ifdef DEV_NETMAP 280#define NNMTXQ_VI 2 281static int t4_nnmtxq_vi = -1; 282TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 283 284#define NNMRXQ_VI 2 285static int t4_nnmrxq_vi = -1; 286TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 287#endif 288 289/* 290 * Holdoff parameters for 10G and 1G ports. 291 */ 292#define TMR_IDX_10G 1 293static int t4_tmr_idx_10g = TMR_IDX_10G; 294TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 295 296#define PKTC_IDX_10G (-1) 297static int t4_pktc_idx_10g = PKTC_IDX_10G; 298TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 299 300#define TMR_IDX_1G 1 301static int t4_tmr_idx_1g = TMR_IDX_1G; 302TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 303 304#define PKTC_IDX_1G (-1) 305static int t4_pktc_idx_1g = PKTC_IDX_1G; 306TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 307 308/* 309 * Size (# of entries) of each tx and rx queue. 310 */ 311static unsigned int t4_qsize_txq = TX_EQ_QSIZE; 312TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 313 314static unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 315TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 316 317/* 318 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 319 */ 320static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 321TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 322 323/* 324 * Configuration file. 325 */ 326#define DEFAULT_CF "default" 327#define FLASH_CF "flash" 328#define UWIRE_CF "uwire" 329#define FPGA_CF "fpga" 330static char t4_cfg_file[32] = DEFAULT_CF; 331TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 332 333/* 334 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 335 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 336 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 337 * mark or when signalled to do so, 0 to never emit PAUSE. 338 */ 339static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 340TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 341 342/* 343 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 344 * encouraged respectively). 345 */ 346static unsigned int t4_fw_install = 1; 347TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 348 349/* 350 * ASIC features that will be used. Disable the ones you don't want so that the 351 * chip resources aren't wasted on features that will not be used. 352 */ 353static int t4_nbmcaps_allowed = 0; 354TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 355 356static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 357TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 358 359static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 360 FW_CAPS_CONFIG_SWITCH_EGRESS; 361TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 362 363static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 364TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 365 366static int t4_toecaps_allowed = -1; 367TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 368 369static int t4_rdmacaps_allowed = -1; 370TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 371 372static int t4_tlscaps_allowed = 0; 373TUNABLE_INT("hw.cxgbe.tlscaps_allowed", &t4_tlscaps_allowed); 374 375static int t4_iscsicaps_allowed = -1; 376TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 377 378static int t4_fcoecaps_allowed = 0; 379TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 380 381static int t5_write_combine = 0; 382TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 383 384static int t4_num_vis = 1; 385TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 386 387/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 388static int vi_mac_funcs[] = { 389 FW_VI_FUNC_OFLD, 390 FW_VI_FUNC_IWARP, 391 FW_VI_FUNC_OPENISCSI, 392 FW_VI_FUNC_OPENFCOE, 393 FW_VI_FUNC_FOISCSI, 394 FW_VI_FUNC_FOFCOE, 395}; 396 397struct intrs_and_queues { 398 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 399 uint16_t nirq; /* Total # of vectors */ 400 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 401 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 402 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 403 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 404 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 405 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 406 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 407 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 408 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 409 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 410 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 411 412 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 413 uint16_t ntxq_vi; /* # of NIC txq's */ 414 uint16_t nrxq_vi; /* # of NIC rxq's */ 415 uint16_t nofldtxq_vi; /* # of TOE txq's */ 416 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 417 uint16_t nnmtxq_vi; /* # of netmap txq's */ 418 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 419}; 420 421struct filter_entry { 422 uint32_t valid:1; /* filter allocated and valid */ 423 uint32_t locked:1; /* filter is administratively locked */ 424 uint32_t pending:1; /* filter action is pending firmware reply */ 425 uint32_t smtidx:8; /* Source MAC Table index for smac */ 426 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 427 428 struct t4_filter_specification fs; 429}; 430 431static int map_bars_0_and_4(struct adapter *); 432static int map_bar_2(struct adapter *); 433static void setup_memwin(struct adapter *); 434static void position_memwin(struct adapter *, int, uint32_t); 435static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 436static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 437 int); 438static inline int write_via_memwin(struct adapter *, int, uint32_t, 439 const uint32_t *, int); 440static int validate_mem_range(struct adapter *, uint32_t, int); 441static int fwmtype_to_hwmtype(int); 442static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 443 uint32_t *); 444static int fixup_devlog_params(struct adapter *); 445static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 446 struct intrs_and_queues *); 447static int prep_firmware(struct adapter *); 448static int partition_resources(struct adapter *, const struct firmware *, 449 const char *); 450static int get_params__pre_init(struct adapter *); 451static int get_params__post_init(struct adapter *); 452static int set_params__post_init(struct adapter *); 453static void t4_set_desc(struct adapter *); 454static void build_medialist(struct port_info *, struct ifmedia *); 455static int cxgbe_init_synchronized(struct vi_info *); 456static int cxgbe_uninit_synchronized(struct vi_info *); 457static int setup_intr_handlers(struct adapter *); 458static void quiesce_txq(struct adapter *, struct sge_txq *); 459static void quiesce_wrq(struct adapter *, struct sge_wrq *); 460static void quiesce_iq(struct adapter *, struct sge_iq *); 461static void quiesce_fl(struct adapter *, struct sge_fl *); 462static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 463 driver_intr_t *, void *, char *); 464static int t4_free_irq(struct adapter *, struct irq *); 465static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 466static void vi_refresh_stats(struct adapter *, struct vi_info *); 467static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 468static void cxgbe_tick(void *); 469static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 470static void t4_sysctls(struct adapter *); 471static void cxgbe_sysctls(struct port_info *); 472static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 473static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 474static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 475static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 476static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 477static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 478static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 479static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 480static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 481static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 482static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 483#ifdef SBUF_DRAIN 484static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 485static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 486static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 487static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 488static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 489static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 490static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 491static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 492static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 493static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 494static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 495static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 496static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 497static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 498static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 499static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 500static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 501static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 502static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 503static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 504static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 505static int sysctl_tids(SYSCTL_HANDLER_ARGS); 506static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 507static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 508static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 509static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 510static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 511static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 512static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 513#endif 514#ifdef TCP_OFFLOAD 515static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 516static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 517static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 518#endif 519static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 520static uint32_t mode_to_fconf(uint32_t); 521static uint32_t mode_to_iconf(uint32_t); 522static int check_fspec_against_fconf_iconf(struct adapter *, 523 struct t4_filter_specification *); 524static int get_filter_mode(struct adapter *, uint32_t *); 525static int set_filter_mode(struct adapter *, uint32_t); 526static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 527static int get_filter(struct adapter *, struct t4_filter *); 528static int set_filter(struct adapter *, struct t4_filter *); 529static int del_filter(struct adapter *, struct t4_filter *); 530static void clear_filter(struct filter_entry *); 531static int set_filter_wr(struct adapter *, int); 532static int del_filter_wr(struct adapter *, int); 533static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 534 struct mbuf *); 535static int get_sge_context(struct adapter *, struct t4_sge_context *); 536static int load_fw(struct adapter *, struct t4_data *); 537static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 538static int read_i2c(struct adapter *, struct t4_i2c_data *); 539static int set_sched_class(struct adapter *, struct t4_sched_params *); 540static int set_sched_queue(struct adapter *, struct t4_sched_queue *); 541#ifdef TCP_OFFLOAD 542static int toe_capability(struct vi_info *, int); 543#endif 544static int mod_event(module_t, int, void *); 545static int notify_siblings(device_t, int); 546 547struct { 548 uint16_t device; 549 char *desc; 550} t4_pciids[] = { 551 {0xa000, "Chelsio Terminator 4 FPGA"}, 552 {0x4400, "Chelsio T440-dbg"}, 553 {0x4401, "Chelsio T420-CR"}, 554 {0x4402, "Chelsio T422-CR"}, 555 {0x4403, "Chelsio T440-CR"}, 556 {0x4404, "Chelsio T420-BCH"}, 557 {0x4405, "Chelsio T440-BCH"}, 558 {0x4406, "Chelsio T440-CH"}, 559 {0x4407, "Chelsio T420-SO"}, 560 {0x4408, "Chelsio T420-CX"}, 561 {0x4409, "Chelsio T420-BT"}, 562 {0x440a, "Chelsio T404-BT"}, 563 {0x440e, "Chelsio T440-LP-CR"}, 564}, t5_pciids[] = { 565 {0xb000, "Chelsio Terminator 5 FPGA"}, 566 {0x5400, "Chelsio T580-dbg"}, 567 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 568 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 569 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 570 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 571 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 572 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 573 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 574 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 575 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 576 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 577 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 578 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 579 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 580#ifdef notyet 581 {0x5404, "Chelsio T520-BCH"}, 582 {0x5405, "Chelsio T540-BCH"}, 583 {0x5406, "Chelsio T540-CH"}, 584 {0x5408, "Chelsio T520-CX"}, 585 {0x540b, "Chelsio B520-SR"}, 586 {0x540c, "Chelsio B504-BT"}, 587 {0x540f, "Chelsio Amsterdam"}, 588 {0x5413, "Chelsio T580-CHR"}, 589#endif 590}; 591 592#ifdef TCP_OFFLOAD 593/* 594 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 595 * exactly the same for both rxq and ofld_rxq. 596 */ 597CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 598CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 599#endif 600CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 601 602static int 603t4_probe(device_t dev) 604{ 605 int i; 606 uint16_t v = pci_get_vendor(dev); 607 uint16_t d = pci_get_device(dev); 608 uint8_t f = pci_get_function(dev); 609 610 if (v != PCI_VENDOR_ID_CHELSIO) 611 return (ENXIO); 612 613 /* Attach only to PF0 of the FPGA */ 614 if (d == 0xa000 && f != 0) 615 return (ENXIO); 616 617 for (i = 0; i < nitems(t4_pciids); i++) { 618 if (d == t4_pciids[i].device) { 619 device_set_desc(dev, t4_pciids[i].desc); 620 return (BUS_PROBE_DEFAULT); 621 } 622 } 623 624 return (ENXIO); 625} 626 627static int 628t5_probe(device_t dev) 629{ 630 int i; 631 uint16_t v = pci_get_vendor(dev); 632 uint16_t d = pci_get_device(dev); 633 uint8_t f = pci_get_function(dev); 634 635 if (v != PCI_VENDOR_ID_CHELSIO) 636 return (ENXIO); 637 638 /* Attach only to PF0 of the FPGA */ 639 if (d == 0xb000 && f != 0) 640 return (ENXIO); 641 642 for (i = 0; i < nitems(t5_pciids); i++) { 643 if (d == t5_pciids[i].device) { 644 device_set_desc(dev, t5_pciids[i].desc); 645 return (BUS_PROBE_DEFAULT); 646 } 647 } 648 649 return (ENXIO); 650} 651 652static void 653t5_attribute_workaround(device_t dev) 654{ 655 device_t root_port; 656 uint32_t v; 657 658 /* 659 * The T5 chips do not properly echo the No Snoop and Relaxed 660 * Ordering attributes when replying to a TLP from a Root 661 * Port. As a workaround, find the parent Root Port and 662 * disable No Snoop and Relaxed Ordering. Note that this 663 * affects all devices under this root port. 664 */ 665 root_port = pci_find_pcie_root_port(dev); 666 if (root_port == NULL) { 667 device_printf(dev, "Unable to find parent root port\n"); 668 return; 669 } 670 671 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 672 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 673 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 674 0) 675 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 676 device_get_nameunit(root_port)); 677} 678 679static int 680t4_attach(device_t dev) 681{ 682 struct adapter *sc; 683 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 684 struct intrs_and_queues iaq; 685 struct sge *s; 686 uint8_t *buf; 687#ifdef TCP_OFFLOAD 688 int ofld_rqidx, ofld_tqidx; 689#endif 690#ifdef DEV_NETMAP 691 int nm_rqidx, nm_tqidx; 692#endif 693 int num_vis; 694 695 sc = device_get_softc(dev); 696 sc->dev = dev; 697 TUNABLE_INT_FETCH("hw.cxgbe.debug_flags", &sc->debug_flags); 698 699 if ((pci_get_device(dev) & 0xff00) == 0x5400) 700 t5_attribute_workaround(dev); 701 pci_enable_busmaster(dev); 702 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 703 uint32_t v; 704 705 pci_set_max_read_req(dev, 4096); 706 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 707 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 708 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 709 710 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 711 } 712 713 sc->traceq = -1; 714 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 715 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 716 device_get_nameunit(dev)); 717 718 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 719 device_get_nameunit(dev)); 720 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 721 sx_xlock(&t4_list_lock); 722 SLIST_INSERT_HEAD(&t4_list, sc, link); 723 sx_xunlock(&t4_list_lock); 724 725 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 726 TAILQ_INIT(&sc->sfl); 727 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 728 729 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 730 731 rc = map_bars_0_and_4(sc); 732 if (rc != 0) 733 goto done; /* error message displayed already */ 734 735 /* 736 * This is the real PF# to which we're attaching. Works from within PCI 737 * passthrough environments too, where pci_get_function() could return a 738 * different PF# depending on the passthrough configuration. We need to 739 * use the real PF# in all our communication with the firmware. 740 */ 741 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 742 sc->mbox = sc->pf; 743 744 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 745 746 /* Prepare the adapter for operation. */ 747 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 748 rc = -t4_prep_adapter(sc, buf); 749 free(buf, M_CXGBE); 750 if (rc != 0) { 751 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 752 goto done; 753 } 754 755 /* 756 * Do this really early, with the memory windows set up even before the 757 * character device. The userland tool's register i/o and mem read 758 * will work even in "recovery mode". 759 */ 760 setup_memwin(sc); 761 if (t4_init_devlog_params(sc, 0) == 0) 762 fixup_devlog_params(sc); 763 sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw, 764 device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", 765 device_get_nameunit(dev)); 766 if (sc->cdev == NULL) 767 device_printf(dev, "failed to create nexus char device.\n"); 768 else 769 sc->cdev->si_drv1 = sc; 770 771 /* Go no further if recovery mode has been requested. */ 772 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 773 device_printf(dev, "recovery mode.\n"); 774 goto done; 775 } 776 777#if defined(__i386__) 778 if ((cpu_feature & CPUID_CX8) == 0) { 779 device_printf(dev, "64 bit atomics not available.\n"); 780 rc = ENOTSUP; 781 goto done; 782 } 783#endif 784 785 /* Prepare the firmware for operation */ 786 rc = prep_firmware(sc); 787 if (rc != 0) 788 goto done; /* error message displayed already */ 789 790 rc = get_params__post_init(sc); 791 if (rc != 0) 792 goto done; /* error message displayed already */ 793 794 rc = set_params__post_init(sc); 795 if (rc != 0) 796 goto done; /* error message displayed already */ 797 798 rc = map_bar_2(sc); 799 if (rc != 0) 800 goto done; /* error message displayed already */ 801 802 rc = t4_create_dma_tag(sc); 803 if (rc != 0) 804 goto done; /* error message displayed already */ 805 806 /* 807 * Number of VIs to create per-port. The first VI is the "main" regular 808 * VI for the port. The rest are additional virtual interfaces on the 809 * same physical port. Note that the main VI does not have native 810 * netmap support but the extra VIs do. 811 * 812 * Limit the number of VIs per port to the number of available 813 * MAC addresses per port. 814 */ 815 if (t4_num_vis >= 1) 816 num_vis = t4_num_vis; 817 else 818 num_vis = 1; 819 if (num_vis > nitems(vi_mac_funcs)) { 820 num_vis = nitems(vi_mac_funcs); 821 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 822 } 823 824 /* 825 * First pass over all the ports - allocate VIs and initialize some 826 * basic parameters like mac address, port type, etc. We also figure 827 * out whether a port is 10G or 1G and use that information when 828 * calculating how many interrupts to attempt to allocate. 829 */ 830 n10g = n1g = 0; 831 for_each_port(sc, i) { 832 struct port_info *pi; 833 834 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 835 sc->port[i] = pi; 836 837 /* These must be set before t4_port_init */ 838 pi->adapter = sc; 839 pi->port_id = i; 840 /* 841 * XXX: vi[0] is special so we can't delay this allocation until 842 * pi->nvi's final value is known. 843 */ 844 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 845 M_ZERO | M_WAITOK); 846 847 /* 848 * Allocate the "main" VI and initialize parameters 849 * like mac addr. 850 */ 851 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 852 if (rc != 0) { 853 device_printf(dev, "unable to initialize port %d: %d\n", 854 i, rc); 855 free(pi->vi, M_CXGBE); 856 free(pi, M_CXGBE); 857 sc->port[i] = NULL; 858 goto done; 859 } 860 861 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 862 pi->link_cfg.requested_fc |= t4_pause_settings; 863 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 864 pi->link_cfg.fc |= t4_pause_settings; 865 866 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 867 if (rc != 0) { 868 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 869 free(pi->vi, M_CXGBE); 870 free(pi, M_CXGBE); 871 sc->port[i] = NULL; 872 goto done; 873 } 874 875 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 876 device_get_nameunit(dev), i); 877 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 878 sc->chan_map[pi->tx_chan] = i; 879 880 pi->tc = malloc(sizeof(struct tx_sched_class) * 881 sc->chip_params->nsched_cls, M_CXGBE, M_ZERO | M_WAITOK); 882 883 if (is_10G_port(pi) || is_40G_port(pi)) { 884 n10g++; 885 } else { 886 n1g++; 887 } 888 889 pi->linkdnrc = -1; 890 891 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 892 if (pi->dev == NULL) { 893 device_printf(dev, 894 "failed to add device for port %d.\n", i); 895 rc = ENXIO; 896 goto done; 897 } 898 pi->vi[0].dev = pi->dev; 899 device_set_softc(pi->dev, pi); 900 } 901 902 /* 903 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 904 */ 905 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 906 if (rc != 0) 907 goto done; /* error message displayed already */ 908 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 909 num_vis = 1; 910 911 sc->intr_type = iaq.intr_type; 912 sc->intr_count = iaq.nirq; 913 914 s = &sc->sge; 915 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 916 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 917 if (num_vis > 1) { 918 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 919 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 920 } 921 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 922 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 923 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 924#ifdef TCP_OFFLOAD 925 if (is_offload(sc)) { 926 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 927 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 928 if (num_vis > 1) { 929 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 930 iaq.nofldrxq_vi; 931 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 932 iaq.nofldtxq_vi; 933 } 934 s->neq += s->nofldtxq + s->nofldrxq; 935 s->niq += s->nofldrxq; 936 937 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 938 M_CXGBE, M_ZERO | M_WAITOK); 939 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 940 M_CXGBE, M_ZERO | M_WAITOK); 941 } 942#endif 943#ifdef DEV_NETMAP 944 if (num_vis > 1) { 945 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 946 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 947 } 948 s->neq += s->nnmtxq + s->nnmrxq; 949 s->niq += s->nnmrxq; 950 951 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 952 M_CXGBE, M_ZERO | M_WAITOK); 953 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 954 M_CXGBE, M_ZERO | M_WAITOK); 955#endif 956 957 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 958 M_ZERO | M_WAITOK); 959 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 960 M_ZERO | M_WAITOK); 961 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 962 M_ZERO | M_WAITOK); 963 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 964 M_ZERO | M_WAITOK); 965 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 966 M_ZERO | M_WAITOK); 967 968 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 969 M_ZERO | M_WAITOK); 970 971 t4_init_l2t(sc, M_WAITOK); 972 973 /* 974 * Second pass over the ports. This time we know the number of rx and 975 * tx queues that each port should get. 976 */ 977 rqidx = tqidx = 0; 978#ifdef TCP_OFFLOAD 979 ofld_rqidx = ofld_tqidx = 0; 980#endif 981#ifdef DEV_NETMAP 982 nm_rqidx = nm_tqidx = 0; 983#endif 984 for_each_port(sc, i) { 985 struct port_info *pi = sc->port[i]; 986 struct vi_info *vi; 987 988 if (pi == NULL) 989 continue; 990 991 pi->nvi = num_vis; 992 for_each_vi(pi, j, vi) { 993 vi->pi = pi; 994 vi->qsize_rxq = t4_qsize_rxq; 995 vi->qsize_txq = t4_qsize_txq; 996 997 vi->first_rxq = rqidx; 998 vi->first_txq = tqidx; 999 if (is_10G_port(pi) || is_40G_port(pi)) { 1000 vi->tmr_idx = t4_tmr_idx_10g; 1001 vi->pktc_idx = t4_pktc_idx_10g; 1002 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1003 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 1004 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 1005 } else { 1006 vi->tmr_idx = t4_tmr_idx_1g; 1007 vi->pktc_idx = t4_pktc_idx_1g; 1008 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1009 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 1010 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 1011 } 1012 rqidx += vi->nrxq; 1013 tqidx += vi->ntxq; 1014 1015 if (j == 0 && vi->ntxq > 1) 1016 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1017 else 1018 vi->rsrv_noflowq = 0; 1019 1020#ifdef TCP_OFFLOAD 1021 vi->first_ofld_rxq = ofld_rqidx; 1022 vi->first_ofld_txq = ofld_tqidx; 1023 if (is_10G_port(pi) || is_40G_port(pi)) { 1024 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1025 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1026 iaq.nofldrxq_vi; 1027 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1028 iaq.nofldtxq_vi; 1029 } else { 1030 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1031 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1032 iaq.nofldrxq_vi; 1033 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1034 iaq.nofldtxq_vi; 1035 } 1036 ofld_rqidx += vi->nofldrxq; 1037 ofld_tqidx += vi->nofldtxq; 1038#endif 1039#ifdef DEV_NETMAP 1040 if (j > 0) { 1041 vi->first_nm_rxq = nm_rqidx; 1042 vi->first_nm_txq = nm_tqidx; 1043 vi->nnmrxq = iaq.nnmrxq_vi; 1044 vi->nnmtxq = iaq.nnmtxq_vi; 1045 nm_rqidx += vi->nnmrxq; 1046 nm_tqidx += vi->nnmtxq; 1047 } 1048#endif 1049 } 1050 } 1051 1052 rc = setup_intr_handlers(sc); 1053 if (rc != 0) { 1054 device_printf(dev, 1055 "failed to setup interrupt handlers: %d\n", rc); 1056 goto done; 1057 } 1058 1059 rc = bus_generic_attach(dev); 1060 if (rc != 0) { 1061 device_printf(dev, 1062 "failed to attach all child ports: %d\n", rc); 1063 goto done; 1064 } 1065 1066 device_printf(dev, 1067 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1068 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1069 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1070 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1071 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1072 1073 t4_set_desc(sc); 1074 1075 notify_siblings(dev, 0); 1076 1077done: 1078 if (rc != 0 && sc->cdev) { 1079 /* cdev was created and so cxgbetool works; recover that way. */ 1080 device_printf(dev, 1081 "error during attach, adapter is now in recovery mode.\n"); 1082 rc = 0; 1083 } 1084 1085 if (rc != 0) 1086 t4_detach(dev); 1087 else 1088 t4_sysctls(sc); 1089 1090 return (rc); 1091} 1092 1093static int 1094t4_ready(device_t dev) 1095{ 1096 struct adapter *sc; 1097 1098 sc = device_get_softc(dev); 1099 if (sc->flags & FW_OK) 1100 return (0); 1101 return (ENXIO); 1102} 1103 1104static int 1105t4_read_port_device(device_t dev, int port, device_t *child) 1106{ 1107 struct adapter *sc; 1108 struct port_info *pi; 1109 1110 sc = device_get_softc(dev); 1111 if (port < 0 || port >= MAX_NPORTS) 1112 return (EINVAL); 1113 pi = sc->port[port]; 1114 if (pi == NULL || pi->dev == NULL) 1115 return (ENXIO); 1116 *child = pi->dev; 1117 return (0); 1118} 1119 1120static int 1121notify_siblings(device_t dev, int detaching) 1122{ 1123 device_t sibling; 1124 int error, i; 1125 1126 error = 0; 1127 for (i = 0; i < PCI_FUNCMAX; i++) { 1128 if (i == pci_get_function(dev)) 1129 continue; 1130 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), 1131 pci_get_slot(dev), i); 1132 if (sibling == NULL || !device_is_attached(sibling)) 1133 continue; 1134 if (detaching) 1135 error = T4_DETACH_CHILD(sibling); 1136 else 1137 (void)T4_ATTACH_CHILD(sibling); 1138 if (error) 1139 break; 1140 } 1141 return (error); 1142} 1143 1144/* 1145 * Idempotent 1146 */ 1147static int 1148t4_detach(device_t dev) 1149{ 1150 struct adapter *sc; 1151 struct port_info *pi; 1152 int i, rc; 1153 1154 sc = device_get_softc(dev); 1155 1156 rc = notify_siblings(dev, 1); 1157 if (rc) { 1158 device_printf(dev, 1159 "failed to detach sibling devices: %d\n", rc); 1160 return (rc); 1161 } 1162 1163 if (sc->flags & FULL_INIT_DONE) 1164 t4_intr_disable(sc); 1165 1166 if (sc->cdev) { 1167 destroy_dev(sc->cdev); 1168 sc->cdev = NULL; 1169 } 1170 1171 rc = bus_generic_detach(dev); 1172 if (rc) { 1173 device_printf(dev, 1174 "failed to detach child devices: %d\n", rc); 1175 return (rc); 1176 } 1177 1178 for (i = 0; i < sc->intr_count; i++) 1179 t4_free_irq(sc, &sc->irq[i]); 1180 1181 for (i = 0; i < MAX_NPORTS; i++) { 1182 pi = sc->port[i]; 1183 if (pi) { 1184 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1185 if (pi->dev) 1186 device_delete_child(dev, pi->dev); 1187 1188 mtx_destroy(&pi->pi_lock); 1189 free(pi->vi, M_CXGBE); 1190 free(pi->tc, M_CXGBE); 1191 free(pi, M_CXGBE); 1192 } 1193 } 1194 1195 if (sc->flags & FULL_INIT_DONE) 1196 adapter_full_uninit(sc); 1197 1198 if (sc->flags & FW_OK) 1199 t4_fw_bye(sc, sc->mbox); 1200 1201 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1202 pci_release_msi(dev); 1203 1204 if (sc->regs_res) 1205 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1206 sc->regs_res); 1207 1208 if (sc->udbs_res) 1209 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1210 sc->udbs_res); 1211 1212 if (sc->msix_res) 1213 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1214 sc->msix_res); 1215 1216 if (sc->l2t) 1217 t4_free_l2t(sc->l2t); 1218 1219#ifdef TCP_OFFLOAD 1220 free(sc->sge.ofld_rxq, M_CXGBE); 1221 free(sc->sge.ofld_txq, M_CXGBE); 1222#endif 1223#ifdef DEV_NETMAP 1224 free(sc->sge.nm_rxq, M_CXGBE); 1225 free(sc->sge.nm_txq, M_CXGBE); 1226#endif 1227 free(sc->irq, M_CXGBE); 1228 free(sc->sge.rxq, M_CXGBE); 1229 free(sc->sge.txq, M_CXGBE); 1230 free(sc->sge.ctrlq, M_CXGBE); 1231 free(sc->sge.iqmap, M_CXGBE); 1232 free(sc->sge.eqmap, M_CXGBE); 1233 free(sc->tids.ftid_tab, M_CXGBE); 1234 t4_destroy_dma_tag(sc); 1235 if (mtx_initialized(&sc->sc_lock)) { 1236 sx_xlock(&t4_list_lock); 1237 SLIST_REMOVE(&t4_list, sc, adapter, link); 1238 sx_xunlock(&t4_list_lock); 1239 mtx_destroy(&sc->sc_lock); 1240 } 1241 1242 callout_drain(&sc->sfl_callout); 1243 if (mtx_initialized(&sc->tids.ftid_lock)) 1244 mtx_destroy(&sc->tids.ftid_lock); 1245 if (mtx_initialized(&sc->sfl_lock)) 1246 mtx_destroy(&sc->sfl_lock); 1247 if (mtx_initialized(&sc->ifp_lock)) 1248 mtx_destroy(&sc->ifp_lock); 1249 if (mtx_initialized(&sc->reg_lock)) 1250 mtx_destroy(&sc->reg_lock); 1251 1252 for (i = 0; i < NUM_MEMWIN; i++) { 1253 struct memwin *mw = &sc->memwin[i]; 1254 1255 if (rw_initialized(&mw->mw_lock)) 1256 rw_destroy(&mw->mw_lock); 1257 } 1258 1259 bzero(sc, sizeof(*sc)); 1260 1261 return (0); 1262} 1263 1264static int 1265cxgbe_probe(device_t dev) 1266{ 1267 char buf[128]; 1268 struct port_info *pi = device_get_softc(dev); 1269 1270 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1271 device_set_desc_copy(dev, buf); 1272 1273 return (BUS_PROBE_DEFAULT); 1274} 1275 1276#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1277 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1278 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1279#define T4_CAP_ENABLE (T4_CAP) 1280 1281static int 1282cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1283{ 1284 struct ifnet *ifp; 1285 struct sbuf *sb; 1286 1287 vi->xact_addr_filt = -1; 1288 callout_init(&vi->tick, 1); 1289 1290 /* Allocate an ifnet and set it up */ 1291 ifp = if_alloc(IFT_ETHER); 1292 if (ifp == NULL) { 1293 device_printf(dev, "Cannot allocate ifnet\n"); 1294 return (ENOMEM); 1295 } 1296 vi->ifp = ifp; 1297 ifp->if_softc = vi; 1298 1299 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1300 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1301 1302 ifp->if_init = cxgbe_init; 1303 ifp->if_ioctl = cxgbe_ioctl; 1304 ifp->if_transmit = cxgbe_transmit; 1305 ifp->if_qflush = cxgbe_qflush; 1306 ifp->if_get_counter = cxgbe_get_counter; 1307 1308 ifp->if_capabilities = T4_CAP; 1309#ifdef TCP_OFFLOAD 1310 if (vi->nofldrxq != 0) 1311 ifp->if_capabilities |= IFCAP_TOE; 1312#endif 1313 ifp->if_capenable = T4_CAP_ENABLE; 1314 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1315 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1316 1317 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1318 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1319 ifp->if_hw_tsomaxsegsize = 65536; 1320 1321 /* Initialize ifmedia for this VI */ 1322 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1323 cxgbe_media_status); 1324 build_medialist(vi->pi, &vi->media); 1325 1326 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1327 EVENTHANDLER_PRI_ANY); 1328 1329 ether_ifattach(ifp, vi->hw_addr); 1330#ifdef DEV_NETMAP 1331 if (vi->nnmrxq != 0) 1332 cxgbe_nm_attach(vi); 1333#endif 1334 sb = sbuf_new_auto(); 1335 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1336#ifdef TCP_OFFLOAD 1337 if (ifp->if_capabilities & IFCAP_TOE) 1338 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1339 vi->nofldtxq, vi->nofldrxq); 1340#endif 1341#ifdef DEV_NETMAP 1342 if (ifp->if_capabilities & IFCAP_NETMAP) 1343 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1344 vi->nnmtxq, vi->nnmrxq); 1345#endif 1346 sbuf_finish(sb); 1347 device_printf(dev, "%s\n", sbuf_data(sb)); 1348 sbuf_delete(sb); 1349 1350 vi_sysctls(vi); 1351 1352 return (0); 1353} 1354 1355static int 1356cxgbe_attach(device_t dev) 1357{ 1358 struct port_info *pi = device_get_softc(dev); 1359 struct vi_info *vi; 1360 int i, rc; 1361 1362 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1363 1364 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1365 if (rc) 1366 return (rc); 1367 1368 for_each_vi(pi, i, vi) { 1369 if (i == 0) 1370 continue; 1371 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1372 "vcxgbe" : "vcxl", -1); 1373 if (vi->dev == NULL) { 1374 device_printf(dev, "failed to add VI %d\n", i); 1375 continue; 1376 } 1377 device_set_softc(vi->dev, vi); 1378 } 1379 1380 cxgbe_sysctls(pi); 1381 1382 bus_generic_attach(dev); 1383 1384 return (0); 1385} 1386 1387static void 1388cxgbe_vi_detach(struct vi_info *vi) 1389{ 1390 struct ifnet *ifp = vi->ifp; 1391 1392 ether_ifdetach(ifp); 1393 1394 if (vi->vlan_c) 1395 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1396 1397 /* Let detach proceed even if these fail. */ 1398#ifdef DEV_NETMAP 1399 if (ifp->if_capabilities & IFCAP_NETMAP) 1400 cxgbe_nm_detach(vi); 1401#endif 1402 cxgbe_uninit_synchronized(vi); 1403 callout_drain(&vi->tick); 1404 vi_full_uninit(vi); 1405 1406 ifmedia_removeall(&vi->media); 1407 if_free(vi->ifp); 1408 vi->ifp = NULL; 1409} 1410 1411static int 1412cxgbe_detach(device_t dev) 1413{ 1414 struct port_info *pi = device_get_softc(dev); 1415 struct adapter *sc = pi->adapter; 1416 int rc; 1417 1418 /* Detach the extra VIs first. */ 1419 rc = bus_generic_detach(dev); 1420 if (rc) 1421 return (rc); 1422 device_delete_children(dev); 1423 1424 doom_vi(sc, &pi->vi[0]); 1425 1426 if (pi->flags & HAS_TRACEQ) { 1427 sc->traceq = -1; /* cloner should not create ifnet */ 1428 t4_tracer_port_detach(sc); 1429 } 1430 1431 cxgbe_vi_detach(&pi->vi[0]); 1432 callout_drain(&pi->tick); 1433 1434 end_synchronized_op(sc, 0); 1435 1436 return (0); 1437} 1438 1439static void 1440cxgbe_init(void *arg) 1441{ 1442 struct vi_info *vi = arg; 1443 struct adapter *sc = vi->pi->adapter; 1444 1445 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1446 return; 1447 cxgbe_init_synchronized(vi); 1448 end_synchronized_op(sc, 0); 1449} 1450 1451static int 1452cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1453{ 1454 int rc = 0, mtu, flags, can_sleep; 1455 struct vi_info *vi = ifp->if_softc; 1456 struct adapter *sc = vi->pi->adapter; 1457 struct ifreq *ifr = (struct ifreq *)data; 1458 uint32_t mask; 1459 1460 switch (cmd) { 1461 case SIOCSIFMTU: 1462 mtu = ifr->ifr_mtu; 1463 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1464 return (EINVAL); 1465 1466 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1467 if (rc) 1468 return (rc); 1469 ifp->if_mtu = mtu; 1470 if (vi->flags & VI_INIT_DONE) { 1471 t4_update_fl_bufsize(ifp); 1472 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1473 rc = update_mac_settings(ifp, XGMAC_MTU); 1474 } 1475 end_synchronized_op(sc, 0); 1476 break; 1477 1478 case SIOCSIFFLAGS: 1479 can_sleep = 0; 1480redo_sifflags: 1481 rc = begin_synchronized_op(sc, vi, 1482 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1483 if (rc) 1484 return (rc); 1485 1486 if (ifp->if_flags & IFF_UP) { 1487 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1488 flags = vi->if_flags; 1489 if ((ifp->if_flags ^ flags) & 1490 (IFF_PROMISC | IFF_ALLMULTI)) { 1491 if (can_sleep == 1) { 1492 end_synchronized_op(sc, 0); 1493 can_sleep = 0; 1494 goto redo_sifflags; 1495 } 1496 rc = update_mac_settings(ifp, 1497 XGMAC_PROMISC | XGMAC_ALLMULTI); 1498 } 1499 } else { 1500 if (can_sleep == 0) { 1501 end_synchronized_op(sc, LOCK_HELD); 1502 can_sleep = 1; 1503 goto redo_sifflags; 1504 } 1505 rc = cxgbe_init_synchronized(vi); 1506 } 1507 vi->if_flags = ifp->if_flags; 1508 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1509 if (can_sleep == 0) { 1510 end_synchronized_op(sc, LOCK_HELD); 1511 can_sleep = 1; 1512 goto redo_sifflags; 1513 } 1514 rc = cxgbe_uninit_synchronized(vi); 1515 } 1516 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1517 break; 1518 1519 case SIOCADDMULTI: 1520 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1521 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1522 if (rc) 1523 return (rc); 1524 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1525 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1526 end_synchronized_op(sc, LOCK_HELD); 1527 break; 1528 1529 case SIOCSIFCAP: 1530 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1531 if (rc) 1532 return (rc); 1533 1534 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1535 if (mask & IFCAP_TXCSUM) { 1536 ifp->if_capenable ^= IFCAP_TXCSUM; 1537 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1538 1539 if (IFCAP_TSO4 & ifp->if_capenable && 1540 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1541 ifp->if_capenable &= ~IFCAP_TSO4; 1542 if_printf(ifp, 1543 "tso4 disabled due to -txcsum.\n"); 1544 } 1545 } 1546 if (mask & IFCAP_TXCSUM_IPV6) { 1547 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1548 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1549 1550 if (IFCAP_TSO6 & ifp->if_capenable && 1551 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1552 ifp->if_capenable &= ~IFCAP_TSO6; 1553 if_printf(ifp, 1554 "tso6 disabled due to -txcsum6.\n"); 1555 } 1556 } 1557 if (mask & IFCAP_RXCSUM) 1558 ifp->if_capenable ^= IFCAP_RXCSUM; 1559 if (mask & IFCAP_RXCSUM_IPV6) 1560 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1561 1562 /* 1563 * Note that we leave CSUM_TSO alone (it is always set). The 1564 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1565 * sending a TSO request our way, so it's sufficient to toggle 1566 * IFCAP_TSOx only. 1567 */ 1568 if (mask & IFCAP_TSO4) { 1569 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1570 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1571 if_printf(ifp, "enable txcsum first.\n"); 1572 rc = EAGAIN; 1573 goto fail; 1574 } 1575 ifp->if_capenable ^= IFCAP_TSO4; 1576 } 1577 if (mask & IFCAP_TSO6) { 1578 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1579 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1580 if_printf(ifp, "enable txcsum6 first.\n"); 1581 rc = EAGAIN; 1582 goto fail; 1583 } 1584 ifp->if_capenable ^= IFCAP_TSO6; 1585 } 1586 if (mask & IFCAP_LRO) { 1587#if defined(INET) || defined(INET6) 1588 int i; 1589 struct sge_rxq *rxq; 1590 1591 ifp->if_capenable ^= IFCAP_LRO; 1592 for_each_rxq(vi, i, rxq) { 1593 if (ifp->if_capenable & IFCAP_LRO) 1594 rxq->iq.flags |= IQ_LRO_ENABLED; 1595 else 1596 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1597 } 1598#endif 1599 } 1600#ifdef TCP_OFFLOAD 1601 if (mask & IFCAP_TOE) { 1602 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1603 1604 rc = toe_capability(vi, enable); 1605 if (rc != 0) 1606 goto fail; 1607 1608 ifp->if_capenable ^= mask; 1609 } 1610#endif 1611 if (mask & IFCAP_VLAN_HWTAGGING) { 1612 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1613 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1614 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1615 } 1616 if (mask & IFCAP_VLAN_MTU) { 1617 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1618 1619 /* Need to find out how to disable auto-mtu-inflation */ 1620 } 1621 if (mask & IFCAP_VLAN_HWTSO) 1622 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1623 if (mask & IFCAP_VLAN_HWCSUM) 1624 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1625 1626#ifdef VLAN_CAPABILITIES 1627 VLAN_CAPABILITIES(ifp); 1628#endif 1629fail: 1630 end_synchronized_op(sc, 0); 1631 break; 1632 1633 case SIOCSIFMEDIA: 1634 case SIOCGIFMEDIA: 1635 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1636 break; 1637 1638 case SIOCGI2C: { 1639 struct ifi2creq i2c; 1640 1641 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1642 if (rc != 0) 1643 break; 1644 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1645 rc = EPERM; 1646 break; 1647 } 1648 if (i2c.len > sizeof(i2c.data)) { 1649 rc = EINVAL; 1650 break; 1651 } 1652 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1653 if (rc) 1654 return (rc); 1655 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1656 i2c.offset, i2c.len, &i2c.data[0]); 1657 end_synchronized_op(sc, 0); 1658 if (rc == 0) 1659 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1660 break; 1661 } 1662 1663 default: 1664 rc = ether_ioctl(ifp, cmd, data); 1665 } 1666 1667 return (rc); 1668} 1669 1670static int 1671cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1672{ 1673 struct vi_info *vi = ifp->if_softc; 1674 struct port_info *pi = vi->pi; 1675 struct adapter *sc = pi->adapter; 1676 struct sge_txq *txq; 1677 void *items[1]; 1678 int rc; 1679 1680 M_ASSERTPKTHDR(m); 1681 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1682 1683 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1684 m_freem(m); 1685 return (ENETDOWN); 1686 } 1687 1688 rc = parse_pkt(&m); 1689 if (__predict_false(rc != 0)) { 1690 MPASS(m == NULL); /* was freed already */ 1691 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1692 return (rc); 1693 } 1694 1695 /* Select a txq. */ 1696 txq = &sc->sge.txq[vi->first_txq]; 1697 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1698 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1699 vi->rsrv_noflowq); 1700 1701 items[0] = m; 1702 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1703 if (__predict_false(rc != 0)) 1704 m_freem(m); 1705 1706 return (rc); 1707} 1708 1709static void 1710cxgbe_qflush(struct ifnet *ifp) 1711{ 1712 struct vi_info *vi = ifp->if_softc; 1713 struct sge_txq *txq; 1714 int i; 1715 1716 /* queues do not exist if !VI_INIT_DONE. */ 1717 if (vi->flags & VI_INIT_DONE) { 1718 for_each_txq(vi, i, txq) { 1719 TXQ_LOCK(txq); 1720 txq->eq.flags &= ~EQ_ENABLED; 1721 TXQ_UNLOCK(txq); 1722 while (!mp_ring_is_idle(txq->r)) { 1723 mp_ring_check_drainage(txq->r, 0); 1724 pause("qflush", 1); 1725 } 1726 } 1727 } 1728 if_qflush(ifp); 1729} 1730 1731static uint64_t 1732vi_get_counter(struct ifnet *ifp, ift_counter c) 1733{ 1734 struct vi_info *vi = ifp->if_softc; 1735 struct fw_vi_stats_vf *s = &vi->stats; 1736 1737 vi_refresh_stats(vi->pi->adapter, vi); 1738 1739 switch (c) { 1740 case IFCOUNTER_IPACKETS: 1741 return (s->rx_bcast_frames + s->rx_mcast_frames + 1742 s->rx_ucast_frames); 1743 case IFCOUNTER_IERRORS: 1744 return (s->rx_err_frames); 1745 case IFCOUNTER_OPACKETS: 1746 return (s->tx_bcast_frames + s->tx_mcast_frames + 1747 s->tx_ucast_frames + s->tx_offload_frames); 1748 case IFCOUNTER_OERRORS: 1749 return (s->tx_drop_frames); 1750 case IFCOUNTER_IBYTES: 1751 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 1752 s->rx_ucast_bytes); 1753 case IFCOUNTER_OBYTES: 1754 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 1755 s->tx_ucast_bytes + s->tx_offload_bytes); 1756 case IFCOUNTER_IMCASTS: 1757 return (s->rx_mcast_frames); 1758 case IFCOUNTER_OMCASTS: 1759 return (s->tx_mcast_frames); 1760 case IFCOUNTER_OQDROPS: { 1761 uint64_t drops; 1762 1763 drops = 0; 1764 if (vi->flags & VI_INIT_DONE) { 1765 int i; 1766 struct sge_txq *txq; 1767 1768 for_each_txq(vi, i, txq) 1769 drops += counter_u64_fetch(txq->r->drops); 1770 } 1771 1772 return (drops); 1773 1774 } 1775 1776 default: 1777 return (if_get_counter_default(ifp, c)); 1778 } 1779} 1780 1781uint64_t 1782cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 1783{ 1784 struct vi_info *vi = ifp->if_softc; 1785 struct port_info *pi = vi->pi; 1786 struct adapter *sc = pi->adapter; 1787 struct port_stats *s = &pi->stats; 1788 1789 if (pi->nvi > 1) 1790 return (vi_get_counter(ifp, c)); 1791 1792 cxgbe_refresh_stats(sc, pi); 1793 1794 switch (c) { 1795 case IFCOUNTER_IPACKETS: 1796 return (s->rx_frames); 1797 1798 case IFCOUNTER_IERRORS: 1799 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 1800 s->rx_fcs_err + s->rx_len_err); 1801 1802 case IFCOUNTER_OPACKETS: 1803 return (s->tx_frames); 1804 1805 case IFCOUNTER_OERRORS: 1806 return (s->tx_error_frames); 1807 1808 case IFCOUNTER_IBYTES: 1809 return (s->rx_octets); 1810 1811 case IFCOUNTER_OBYTES: 1812 return (s->tx_octets); 1813 1814 case IFCOUNTER_IMCASTS: 1815 return (s->rx_mcast_frames); 1816 1817 case IFCOUNTER_OMCASTS: 1818 return (s->tx_mcast_frames); 1819 1820 case IFCOUNTER_IQDROPS: 1821 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 1822 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 1823 s->rx_trunc3 + pi->tnl_cong_drops); 1824 1825 case IFCOUNTER_OQDROPS: { 1826 uint64_t drops; 1827 1828 drops = s->tx_drop; 1829 if (vi->flags & VI_INIT_DONE) { 1830 int i; 1831 struct sge_txq *txq; 1832 1833 for_each_txq(vi, i, txq) 1834 drops += counter_u64_fetch(txq->r->drops); 1835 } 1836 1837 return (drops); 1838 1839 } 1840 1841 default: 1842 return (if_get_counter_default(ifp, c)); 1843 } 1844} 1845 1846static int 1847cxgbe_media_change(struct ifnet *ifp) 1848{ 1849 struct vi_info *vi = ifp->if_softc; 1850 1851 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1852 1853 return (EOPNOTSUPP); 1854} 1855 1856static void 1857cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1858{ 1859 struct vi_info *vi = ifp->if_softc; 1860 struct port_info *pi = vi->pi; 1861 struct ifmedia_entry *cur; 1862 int speed = pi->link_cfg.speed; 1863 1864 cur = vi->media.ifm_cur; 1865 1866 ifmr->ifm_status = IFM_AVALID; 1867 if (!pi->link_cfg.link_ok) 1868 return; 1869 1870 ifmr->ifm_status |= IFM_ACTIVE; 1871 1872 /* active and current will differ iff current media is autoselect. */ 1873 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1874 return; 1875 1876 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1877 if (speed == 10000) 1878 ifmr->ifm_active |= IFM_10G_T; 1879 else if (speed == 1000) 1880 ifmr->ifm_active |= IFM_1000_T; 1881 else if (speed == 100) 1882 ifmr->ifm_active |= IFM_100_TX; 1883 else if (speed == 10) 1884 ifmr->ifm_active |= IFM_10_T; 1885 else 1886 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1887 speed)); 1888} 1889 1890static int 1891vcxgbe_probe(device_t dev) 1892{ 1893 char buf[128]; 1894 struct vi_info *vi = device_get_softc(dev); 1895 1896 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 1897 vi - vi->pi->vi); 1898 device_set_desc_copy(dev, buf); 1899 1900 return (BUS_PROBE_DEFAULT); 1901} 1902 1903static int 1904vcxgbe_attach(device_t dev) 1905{ 1906 struct vi_info *vi; 1907 struct port_info *pi; 1908 struct adapter *sc; 1909 int func, index, rc; 1910 u32 param, val; 1911 1912 vi = device_get_softc(dev); 1913 pi = vi->pi; 1914 sc = pi->adapter; 1915 1916 index = vi - pi->vi; 1917 KASSERT(index < nitems(vi_mac_funcs), 1918 ("%s: VI %s doesn't have a MAC func", __func__, 1919 device_get_nameunit(dev))); 1920 func = vi_mac_funcs[index]; 1921 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 1922 vi->hw_addr, &vi->rss_size, func, 0); 1923 if (rc < 0) { 1924 device_printf(dev, "Failed to allocate virtual interface " 1925 "for port %d: %d\n", pi->port_id, -rc); 1926 return (-rc); 1927 } 1928 vi->viid = rc; 1929 1930 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1931 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 1932 V_FW_PARAMS_PARAM_YZ(vi->viid); 1933 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1934 if (rc) 1935 vi->rss_base = 0xffff; 1936 else { 1937 /* MPASS((val >> 16) == rss_size); */ 1938 vi->rss_base = val & 0xffff; 1939 } 1940 1941 rc = cxgbe_vi_attach(dev, vi); 1942 if (rc) { 1943 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1944 return (rc); 1945 } 1946 return (0); 1947} 1948 1949static int 1950vcxgbe_detach(device_t dev) 1951{ 1952 struct vi_info *vi; 1953 struct adapter *sc; 1954 1955 vi = device_get_softc(dev); 1956 sc = vi->pi->adapter; 1957 1958 doom_vi(sc, vi); 1959 1960 cxgbe_vi_detach(vi); 1961 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1962 1963 end_synchronized_op(sc, 0); 1964 1965 return (0); 1966} 1967 1968void 1969t4_fatal_err(struct adapter *sc) 1970{ 1971 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1972 t4_intr_disable(sc); 1973 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1974 device_get_nameunit(sc->dev)); 1975} 1976 1977static int 1978map_bars_0_and_4(struct adapter *sc) 1979{ 1980 sc->regs_rid = PCIR_BAR(0); 1981 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1982 &sc->regs_rid, RF_ACTIVE); 1983 if (sc->regs_res == NULL) { 1984 device_printf(sc->dev, "cannot map registers.\n"); 1985 return (ENXIO); 1986 } 1987 sc->bt = rman_get_bustag(sc->regs_res); 1988 sc->bh = rman_get_bushandle(sc->regs_res); 1989 sc->mmio_len = rman_get_size(sc->regs_res); 1990 setbit(&sc->doorbells, DOORBELL_KDB); 1991 1992 sc->msix_rid = PCIR_BAR(4); 1993 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1994 &sc->msix_rid, RF_ACTIVE); 1995 if (sc->msix_res == NULL) { 1996 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1997 return (ENXIO); 1998 } 1999 2000 return (0); 2001} 2002 2003static int 2004map_bar_2(struct adapter *sc) 2005{ 2006 2007 /* 2008 * T4: only iWARP driver uses the userspace doorbells. There is no need 2009 * to map it if RDMA is disabled. 2010 */ 2011 if (is_t4(sc) && sc->rdmacaps == 0) 2012 return (0); 2013 2014 sc->udbs_rid = PCIR_BAR(2); 2015 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2016 &sc->udbs_rid, RF_ACTIVE); 2017 if (sc->udbs_res == NULL) { 2018 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 2019 return (ENXIO); 2020 } 2021 sc->udbs_base = rman_get_virtual(sc->udbs_res); 2022 2023 if (is_t5(sc)) { 2024 setbit(&sc->doorbells, DOORBELL_UDB); 2025#if defined(__i386__) || defined(__amd64__) 2026 if (t5_write_combine) { 2027 int rc; 2028 2029 /* 2030 * Enable write combining on BAR2. This is the 2031 * userspace doorbell BAR and is split into 128B 2032 * (UDBS_SEG_SIZE) doorbell regions, each associated 2033 * with an egress queue. The first 64B has the doorbell 2034 * and the second 64B can be used to submit a tx work 2035 * request with an implicit doorbell. 2036 */ 2037 2038 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 2039 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 2040 if (rc == 0) { 2041 clrbit(&sc->doorbells, DOORBELL_UDB); 2042 setbit(&sc->doorbells, DOORBELL_WCWR); 2043 setbit(&sc->doorbells, DOORBELL_UDBWC); 2044 } else { 2045 device_printf(sc->dev, 2046 "couldn't enable write combining: %d\n", 2047 rc); 2048 } 2049 2050 t4_write_reg(sc, A_SGE_STAT_CFG, 2051 V_STATSOURCE_T5(7) | V_STATMODE(0)); 2052 } 2053#endif 2054 } 2055 2056 return (0); 2057} 2058 2059struct memwin_init { 2060 uint32_t base; 2061 uint32_t aperture; 2062}; 2063 2064static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2065 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2066 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2067 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2068}; 2069 2070static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2071 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2072 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2073 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2074}; 2075 2076static void 2077setup_memwin(struct adapter *sc) 2078{ 2079 const struct memwin_init *mw_init; 2080 struct memwin *mw; 2081 int i; 2082 uint32_t bar0; 2083 2084 if (is_t4(sc)) { 2085 /* 2086 * Read low 32b of bar0 indirectly via the hardware backdoor 2087 * mechanism. Works from within PCI passthrough environments 2088 * too, where rman_get_start() can return a different value. We 2089 * need to program the T4 memory window decoders with the actual 2090 * addresses that will be coming across the PCIe link. 2091 */ 2092 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2093 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2094 2095 mw_init = &t4_memwin[0]; 2096 } else { 2097 /* T5+ use the relative offset inside the PCIe BAR */ 2098 bar0 = 0; 2099 2100 mw_init = &t5_memwin[0]; 2101 } 2102 2103 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2104 rw_init(&mw->mw_lock, "memory window access"); 2105 mw->mw_base = mw_init->base; 2106 mw->mw_aperture = mw_init->aperture; 2107 mw->mw_curpos = 0; 2108 t4_write_reg(sc, 2109 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2110 (mw->mw_base + bar0) | V_BIR(0) | 2111 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2112 rw_wlock(&mw->mw_lock); 2113 position_memwin(sc, i, 0); 2114 rw_wunlock(&mw->mw_lock); 2115 } 2116 2117 /* flush */ 2118 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2119} 2120 2121/* 2122 * Positions the memory window at the given address in the card's address space. 2123 * There are some alignment requirements and the actual position may be at an 2124 * address prior to the requested address. mw->mw_curpos always has the actual 2125 * position of the window. 2126 */ 2127static void 2128position_memwin(struct adapter *sc, int idx, uint32_t addr) 2129{ 2130 struct memwin *mw; 2131 uint32_t pf; 2132 uint32_t reg; 2133 2134 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2135 mw = &sc->memwin[idx]; 2136 rw_assert(&mw->mw_lock, RA_WLOCKED); 2137 2138 if (is_t4(sc)) { 2139 pf = 0; 2140 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2141 } else { 2142 pf = V_PFNUM(sc->pf); 2143 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2144 } 2145 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2146 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2147 t4_read_reg(sc, reg); /* flush */ 2148} 2149 2150static int 2151rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2152 int len, int rw) 2153{ 2154 struct memwin *mw; 2155 uint32_t mw_end, v; 2156 2157 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2158 2159 /* Memory can only be accessed in naturally aligned 4 byte units */ 2160 if (addr & 3 || len & 3 || len <= 0) 2161 return (EINVAL); 2162 2163 mw = &sc->memwin[idx]; 2164 while (len > 0) { 2165 rw_rlock(&mw->mw_lock); 2166 mw_end = mw->mw_curpos + mw->mw_aperture; 2167 if (addr >= mw_end || addr < mw->mw_curpos) { 2168 /* Will need to reposition the window */ 2169 if (!rw_try_upgrade(&mw->mw_lock)) { 2170 rw_runlock(&mw->mw_lock); 2171 rw_wlock(&mw->mw_lock); 2172 } 2173 rw_assert(&mw->mw_lock, RA_WLOCKED); 2174 position_memwin(sc, idx, addr); 2175 rw_downgrade(&mw->mw_lock); 2176 mw_end = mw->mw_curpos + mw->mw_aperture; 2177 } 2178 rw_assert(&mw->mw_lock, RA_RLOCKED); 2179 while (addr < mw_end && len > 0) { 2180 if (rw == 0) { 2181 v = t4_read_reg(sc, mw->mw_base + addr - 2182 mw->mw_curpos); 2183 *val++ = le32toh(v); 2184 } else { 2185 v = *val++; 2186 t4_write_reg(sc, mw->mw_base + addr - 2187 mw->mw_curpos, htole32(v)); 2188 } 2189 addr += 4; 2190 len -= 4; 2191 } 2192 rw_runlock(&mw->mw_lock); 2193 } 2194 2195 return (0); 2196} 2197 2198static inline int 2199read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2200 int len) 2201{ 2202 2203 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2204} 2205 2206static inline int 2207write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2208 const uint32_t *val, int len) 2209{ 2210 2211 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2212} 2213 2214static int 2215t4_range_cmp(const void *a, const void *b) 2216{ 2217 return ((const struct t4_range *)a)->start - 2218 ((const struct t4_range *)b)->start; 2219} 2220 2221/* 2222 * Verify that the memory range specified by the addr/len pair is valid within 2223 * the card's address space. 2224 */ 2225static int 2226validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2227{ 2228 struct t4_range mem_ranges[4], *r, *next; 2229 uint32_t em, addr_len; 2230 int i, n, remaining; 2231 2232 /* Memory can only be accessed in naturally aligned 4 byte units */ 2233 if (addr & 3 || len & 3 || len <= 0) 2234 return (EINVAL); 2235 2236 /* Enabled memories */ 2237 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2238 2239 r = &mem_ranges[0]; 2240 n = 0; 2241 bzero(r, sizeof(mem_ranges)); 2242 if (em & F_EDRAM0_ENABLE) { 2243 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2244 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2245 if (r->size > 0) { 2246 r->start = G_EDRAM0_BASE(addr_len) << 20; 2247 if (addr >= r->start && 2248 addr + len <= r->start + r->size) 2249 return (0); 2250 r++; 2251 n++; 2252 } 2253 } 2254 if (em & F_EDRAM1_ENABLE) { 2255 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2256 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2257 if (r->size > 0) { 2258 r->start = G_EDRAM1_BASE(addr_len) << 20; 2259 if (addr >= r->start && 2260 addr + len <= r->start + r->size) 2261 return (0); 2262 r++; 2263 n++; 2264 } 2265 } 2266 if (em & F_EXT_MEM_ENABLE) { 2267 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2268 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2269 if (r->size > 0) { 2270 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2271 if (addr >= r->start && 2272 addr + len <= r->start + r->size) 2273 return (0); 2274 r++; 2275 n++; 2276 } 2277 } 2278 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2279 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2280 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2281 if (r->size > 0) { 2282 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2283 if (addr >= r->start && 2284 addr + len <= r->start + r->size) 2285 return (0); 2286 r++; 2287 n++; 2288 } 2289 } 2290 MPASS(n <= nitems(mem_ranges)); 2291 2292 if (n > 1) { 2293 /* Sort and merge the ranges. */ 2294 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2295 2296 /* Start from index 0 and examine the next n - 1 entries. */ 2297 r = &mem_ranges[0]; 2298 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2299 2300 MPASS(r->size > 0); /* r is a valid entry. */ 2301 next = r + 1; 2302 MPASS(next->size > 0); /* and so is the next one. */ 2303 2304 while (r->start + r->size >= next->start) { 2305 /* Merge the next one into the current entry. */ 2306 r->size = max(r->start + r->size, 2307 next->start + next->size) - r->start; 2308 n--; /* One fewer entry in total. */ 2309 if (--remaining == 0) 2310 goto done; /* short circuit */ 2311 next++; 2312 } 2313 if (next != r + 1) { 2314 /* 2315 * Some entries were merged into r and next 2316 * points to the first valid entry that couldn't 2317 * be merged. 2318 */ 2319 MPASS(next->size > 0); /* must be valid */ 2320 memcpy(r + 1, next, remaining * sizeof(*r)); 2321#ifdef INVARIANTS 2322 /* 2323 * This so that the foo->size assertion in the 2324 * next iteration of the loop do the right 2325 * thing for entries that were pulled up and are 2326 * no longer valid. 2327 */ 2328 MPASS(n < nitems(mem_ranges)); 2329 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2330 sizeof(struct t4_range)); 2331#endif 2332 } 2333 } 2334done: 2335 /* Done merging the ranges. */ 2336 MPASS(n > 0); 2337 r = &mem_ranges[0]; 2338 for (i = 0; i < n; i++, r++) { 2339 if (addr >= r->start && 2340 addr + len <= r->start + r->size) 2341 return (0); 2342 } 2343 } 2344 2345 return (EFAULT); 2346} 2347 2348static int 2349fwmtype_to_hwmtype(int mtype) 2350{ 2351 2352 switch (mtype) { 2353 case FW_MEMTYPE_EDC0: 2354 return (MEM_EDC0); 2355 case FW_MEMTYPE_EDC1: 2356 return (MEM_EDC1); 2357 case FW_MEMTYPE_EXTMEM: 2358 return (MEM_MC0); 2359 case FW_MEMTYPE_EXTMEM1: 2360 return (MEM_MC1); 2361 default: 2362 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2363 } 2364} 2365 2366/* 2367 * Verify that the memory range specified by the memtype/offset/len pair is 2368 * valid and lies entirely within the memtype specified. The global address of 2369 * the start of the range is returned in addr. 2370 */ 2371static int 2372validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2373 uint32_t *addr) 2374{ 2375 uint32_t em, addr_len, maddr; 2376 2377 /* Memory can only be accessed in naturally aligned 4 byte units */ 2378 if (off & 3 || len & 3 || len == 0) 2379 return (EINVAL); 2380 2381 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2382 switch (fwmtype_to_hwmtype(mtype)) { 2383 case MEM_EDC0: 2384 if (!(em & F_EDRAM0_ENABLE)) 2385 return (EINVAL); 2386 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2387 maddr = G_EDRAM0_BASE(addr_len) << 20; 2388 break; 2389 case MEM_EDC1: 2390 if (!(em & F_EDRAM1_ENABLE)) 2391 return (EINVAL); 2392 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2393 maddr = G_EDRAM1_BASE(addr_len) << 20; 2394 break; 2395 case MEM_MC: 2396 if (!(em & F_EXT_MEM_ENABLE)) 2397 return (EINVAL); 2398 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2399 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2400 break; 2401 case MEM_MC1: 2402 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2403 return (EINVAL); 2404 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2405 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2406 break; 2407 default: 2408 return (EINVAL); 2409 } 2410 2411 *addr = maddr + off; /* global address */ 2412 return (validate_mem_range(sc, *addr, len)); 2413} 2414 2415static int 2416fixup_devlog_params(struct adapter *sc) 2417{ 2418 struct devlog_params *dparams = &sc->params.devlog; 2419 int rc; 2420 2421 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2422 dparams->size, &dparams->addr); 2423 2424 return (rc); 2425} 2426 2427static int 2428cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2429 struct intrs_and_queues *iaq) 2430{ 2431 int rc, itype, navail, nrxq10g, nrxq1g, n; 2432 int nofldrxq10g = 0, nofldrxq1g = 0; 2433 2434 bzero(iaq, sizeof(*iaq)); 2435 2436 iaq->ntxq10g = t4_ntxq10g; 2437 iaq->ntxq1g = t4_ntxq1g; 2438 iaq->ntxq_vi = t4_ntxq_vi; 2439 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2440 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2441 iaq->nrxq_vi = t4_nrxq_vi; 2442 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2443#ifdef TCP_OFFLOAD 2444 if (is_offload(sc)) { 2445 iaq->nofldtxq10g = t4_nofldtxq10g; 2446 iaq->nofldtxq1g = t4_nofldtxq1g; 2447 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2448 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2449 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2450 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2451 } 2452#endif 2453#ifdef DEV_NETMAP 2454 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2455 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2456#endif 2457 2458 for (itype = INTR_MSIX; itype; itype >>= 1) { 2459 2460 if ((itype & t4_intr_types) == 0) 2461 continue; /* not allowed */ 2462 2463 if (itype == INTR_MSIX) 2464 navail = pci_msix_count(sc->dev); 2465 else if (itype == INTR_MSI) 2466 navail = pci_msi_count(sc->dev); 2467 else 2468 navail = 1; 2469restart: 2470 if (navail == 0) 2471 continue; 2472 2473 iaq->intr_type = itype; 2474 iaq->intr_flags_10g = 0; 2475 iaq->intr_flags_1g = 0; 2476 2477 /* 2478 * Best option: an interrupt vector for errors, one for the 2479 * firmware event queue, and one for every rxq (NIC and TOE) of 2480 * every VI. The VIs that support netmap use the same 2481 * interrupts for the NIC rx queues and the netmap rx queues 2482 * because only one set of queues is active at a time. 2483 */ 2484 iaq->nirq = T4_EXTRA_INTR; 2485 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2486 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2487 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2488 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2489 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2490 if (iaq->nirq <= navail && 2491 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2492 iaq->intr_flags_10g = INTR_ALL; 2493 iaq->intr_flags_1g = INTR_ALL; 2494 goto allocate; 2495 } 2496 2497 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2498 if (num_vis > 1) { 2499 device_printf(sc->dev, "virtual interfaces disabled " 2500 "because num_vis=%u with current settings " 2501 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2502 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2503 "nnmrxq_vi=%u) would need %u interrupts but " 2504 "only %u are available.\n", num_vis, nrxq10g, 2505 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2506 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2507 navail); 2508 num_vis = 1; 2509 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2510 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2511 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2512 goto restart; 2513 } 2514 2515 /* 2516 * Second best option: a vector for errors, one for the firmware 2517 * event queue, and vectors for either all the NIC rx queues or 2518 * all the TOE rx queues. The queues that don't get vectors 2519 * will forward their interrupts to those that do. 2520 */ 2521 iaq->nirq = T4_EXTRA_INTR; 2522 if (nrxq10g >= nofldrxq10g) { 2523 iaq->intr_flags_10g = INTR_RXQ; 2524 iaq->nirq += n10g * nrxq10g; 2525 } else { 2526 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2527 iaq->nirq += n10g * nofldrxq10g; 2528 } 2529 if (nrxq1g >= nofldrxq1g) { 2530 iaq->intr_flags_1g = INTR_RXQ; 2531 iaq->nirq += n1g * nrxq1g; 2532 } else { 2533 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2534 iaq->nirq += n1g * nofldrxq1g; 2535 } 2536 if (iaq->nirq <= navail && 2537 (itype != INTR_MSI || powerof2(iaq->nirq))) 2538 goto allocate; 2539 2540 /* 2541 * Next best option: an interrupt vector for errors, one for the 2542 * firmware event queue, and at least one per main-VI. At this 2543 * point we know we'll have to downsize nrxq and/or nofldrxq to 2544 * fit what's available to us. 2545 */ 2546 iaq->nirq = T4_EXTRA_INTR; 2547 iaq->nirq += n10g + n1g; 2548 if (iaq->nirq <= navail) { 2549 int leftover = navail - iaq->nirq; 2550 2551 if (n10g > 0) { 2552 int target = max(nrxq10g, nofldrxq10g); 2553 2554 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2555 INTR_RXQ : INTR_OFLD_RXQ; 2556 2557 n = 1; 2558 while (n < target && leftover >= n10g) { 2559 leftover -= n10g; 2560 iaq->nirq += n10g; 2561 n++; 2562 } 2563 iaq->nrxq10g = min(n, nrxq10g); 2564#ifdef TCP_OFFLOAD 2565 iaq->nofldrxq10g = min(n, nofldrxq10g); 2566#endif 2567 } 2568 2569 if (n1g > 0) { 2570 int target = max(nrxq1g, nofldrxq1g); 2571 2572 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2573 INTR_RXQ : INTR_OFLD_RXQ; 2574 2575 n = 1; 2576 while (n < target && leftover >= n1g) { 2577 leftover -= n1g; 2578 iaq->nirq += n1g; 2579 n++; 2580 } 2581 iaq->nrxq1g = min(n, nrxq1g); 2582#ifdef TCP_OFFLOAD 2583 iaq->nofldrxq1g = min(n, nofldrxq1g); 2584#endif 2585 } 2586 2587 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2588 goto allocate; 2589 } 2590 2591 /* 2592 * Least desirable option: one interrupt vector for everything. 2593 */ 2594 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2595 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2596#ifdef TCP_OFFLOAD 2597 if (is_offload(sc)) 2598 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2599#endif 2600allocate: 2601 navail = iaq->nirq; 2602 rc = 0; 2603 if (itype == INTR_MSIX) 2604 rc = pci_alloc_msix(sc->dev, &navail); 2605 else if (itype == INTR_MSI) 2606 rc = pci_alloc_msi(sc->dev, &navail); 2607 2608 if (rc == 0) { 2609 if (navail == iaq->nirq) 2610 return (0); 2611 2612 /* 2613 * Didn't get the number requested. Use whatever number 2614 * the kernel is willing to allocate (it's in navail). 2615 */ 2616 device_printf(sc->dev, "fewer vectors than requested, " 2617 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2618 itype, iaq->nirq, navail); 2619 pci_release_msi(sc->dev); 2620 goto restart; 2621 } 2622 2623 device_printf(sc->dev, 2624 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2625 itype, rc, iaq->nirq, navail); 2626 } 2627 2628 device_printf(sc->dev, 2629 "failed to find a usable interrupt type. " 2630 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2631 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2632 2633 return (ENXIO); 2634} 2635 2636#define FW_VERSION(chip) ( \ 2637 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2638 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2639 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2640 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2641#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2642 2643struct fw_info { 2644 uint8_t chip; 2645 char *kld_name; 2646 char *fw_mod_name; 2647 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2648} fw_info[] = { 2649 { 2650 .chip = CHELSIO_T4, 2651 .kld_name = "t4fw_cfg", 2652 .fw_mod_name = "t4fw", 2653 .fw_hdr = { 2654 .chip = FW_HDR_CHIP_T4, 2655 .fw_ver = htobe32_const(FW_VERSION(T4)), 2656 .intfver_nic = FW_INTFVER(T4, NIC), 2657 .intfver_vnic = FW_INTFVER(T4, VNIC), 2658 .intfver_ofld = FW_INTFVER(T4, OFLD), 2659 .intfver_ri = FW_INTFVER(T4, RI), 2660 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2661 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2662 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2663 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2664 }, 2665 }, { 2666 .chip = CHELSIO_T5, 2667 .kld_name = "t5fw_cfg", 2668 .fw_mod_name = "t5fw", 2669 .fw_hdr = { 2670 .chip = FW_HDR_CHIP_T5, 2671 .fw_ver = htobe32_const(FW_VERSION(T5)), 2672 .intfver_nic = FW_INTFVER(T5, NIC), 2673 .intfver_vnic = FW_INTFVER(T5, VNIC), 2674 .intfver_ofld = FW_INTFVER(T5, OFLD), 2675 .intfver_ri = FW_INTFVER(T5, RI), 2676 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2677 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2678 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2679 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2680 }, 2681 } 2682}; 2683 2684static struct fw_info * 2685find_fw_info(int chip) 2686{ 2687 int i; 2688 2689 for (i = 0; i < nitems(fw_info); i++) { 2690 if (fw_info[i].chip == chip) 2691 return (&fw_info[i]); 2692 } 2693 return (NULL); 2694} 2695 2696/* 2697 * Is the given firmware API compatible with the one the driver was compiled 2698 * with? 2699 */ 2700static int 2701fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2702{ 2703 2704 /* short circuit if it's the exact same firmware version */ 2705 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2706 return (1); 2707 2708 /* 2709 * XXX: Is this too conservative? Perhaps I should limit this to the 2710 * features that are supported in the driver. 2711 */ 2712#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2713 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2714 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2715 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2716 return (1); 2717#undef SAME_INTF 2718 2719 return (0); 2720} 2721 2722/* 2723 * The firmware in the KLD is usable, but should it be installed? This routine 2724 * explains itself in detail if it indicates the KLD firmware should be 2725 * installed. 2726 */ 2727static int 2728should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2729{ 2730 const char *reason; 2731 2732 if (!card_fw_usable) { 2733 reason = "incompatible or unusable"; 2734 goto install; 2735 } 2736 2737 if (k > c) { 2738 reason = "older than the version bundled with this driver"; 2739 goto install; 2740 } 2741 2742 if (t4_fw_install == 2 && k != c) { 2743 reason = "different than the version bundled with this driver"; 2744 goto install; 2745 } 2746 2747 return (0); 2748 2749install: 2750 if (t4_fw_install == 0) { 2751 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2752 "but the driver is prohibited from installing a different " 2753 "firmware on the card.\n", 2754 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2755 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2756 2757 return (0); 2758 } 2759 2760 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2761 "installing firmware %u.%u.%u.%u on card.\n", 2762 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2763 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2764 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2765 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2766 2767 return (1); 2768} 2769/* 2770 * Establish contact with the firmware and determine if we are the master driver 2771 * or not, and whether we are responsible for chip initialization. 2772 */ 2773static int 2774prep_firmware(struct adapter *sc) 2775{ 2776 const struct firmware *fw = NULL, *default_cfg; 2777 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2778 enum dev_state state; 2779 struct fw_info *fw_info; 2780 struct fw_hdr *card_fw; /* fw on the card */ 2781 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2782 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2783 against */ 2784 2785 /* Contact firmware. */ 2786 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2787 if (rc < 0 || state == DEV_STATE_ERR) { 2788 rc = -rc; 2789 device_printf(sc->dev, 2790 "failed to connect to the firmware: %d, %d.\n", rc, state); 2791 return (rc); 2792 } 2793 pf = rc; 2794 if (pf == sc->mbox) 2795 sc->flags |= MASTER_PF; 2796 else if (state == DEV_STATE_UNINIT) { 2797 /* 2798 * We didn't get to be the master so we definitely won't be 2799 * configuring the chip. It's a bug if someone else hasn't 2800 * configured it already. 2801 */ 2802 device_printf(sc->dev, "couldn't be master(%d), " 2803 "device not already initialized either(%d).\n", rc, state); 2804 return (EDOOFUS); 2805 } 2806 2807 /* This is the firmware whose headers the driver was compiled against */ 2808 fw_info = find_fw_info(chip_id(sc)); 2809 if (fw_info == NULL) { 2810 device_printf(sc->dev, 2811 "unable to look up firmware information for chip %d.\n", 2812 chip_id(sc)); 2813 return (EINVAL); 2814 } 2815 drv_fw = &fw_info->fw_hdr; 2816 2817 /* 2818 * The firmware KLD contains many modules. The KLD name is also the 2819 * name of the module that contains the default config file. 2820 */ 2821 default_cfg = firmware_get(fw_info->kld_name); 2822 2823 /* Read the header of the firmware on the card */ 2824 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2825 rc = -t4_read_flash(sc, FLASH_FW_START, 2826 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2827 if (rc == 0) 2828 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2829 else { 2830 device_printf(sc->dev, 2831 "Unable to read card's firmware header: %d\n", rc); 2832 card_fw_usable = 0; 2833 } 2834 2835 /* This is the firmware in the KLD */ 2836 fw = firmware_get(fw_info->fw_mod_name); 2837 if (fw != NULL) { 2838 kld_fw = (const void *)fw->data; 2839 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2840 } else { 2841 kld_fw = NULL; 2842 kld_fw_usable = 0; 2843 } 2844 2845 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2846 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2847 /* 2848 * Common case: the firmware on the card is an exact match and 2849 * the KLD is an exact match too, or the KLD is 2850 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2851 * here -- use cxgbetool loadfw if you want to reinstall the 2852 * same firmware as the one on the card. 2853 */ 2854 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2855 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2856 be32toh(card_fw->fw_ver))) { 2857 2858 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2859 if (rc != 0) { 2860 device_printf(sc->dev, 2861 "failed to install firmware: %d\n", rc); 2862 goto done; 2863 } 2864 2865 /* Installed successfully, update the cached header too. */ 2866 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2867 card_fw_usable = 1; 2868 need_fw_reset = 0; /* already reset as part of load_fw */ 2869 } 2870 2871 if (!card_fw_usable) { 2872 uint32_t d, c, k; 2873 2874 d = ntohl(drv_fw->fw_ver); 2875 c = ntohl(card_fw->fw_ver); 2876 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2877 2878 device_printf(sc->dev, "Cannot find a usable firmware: " 2879 "fw_install %d, chip state %d, " 2880 "driver compiled with %d.%d.%d.%d, " 2881 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2882 t4_fw_install, state, 2883 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2884 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2885 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2886 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2887 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2888 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2889 rc = EINVAL; 2890 goto done; 2891 } 2892 2893 /* We're using whatever's on the card and it's known to be good. */ 2894 sc->params.fw_vers = ntohl(card_fw->fw_ver); 2895 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2896 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2897 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2898 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2899 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2900 2901 t4_get_tp_version(sc, &sc->params.tp_vers); 2902 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 2903 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 2904 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 2905 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 2906 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 2907 2908 if (t4_get_exprom_version(sc, &sc->params.exprom_vers) != 0) 2909 sc->params.exprom_vers = 0; 2910 else { 2911 snprintf(sc->exprom_version, sizeof(sc->exprom_version), 2912 "%u.%u.%u.%u", 2913 G_FW_HDR_FW_VER_MAJOR(sc->params.exprom_vers), 2914 G_FW_HDR_FW_VER_MINOR(sc->params.exprom_vers), 2915 G_FW_HDR_FW_VER_MICRO(sc->params.exprom_vers), 2916 G_FW_HDR_FW_VER_BUILD(sc->params.exprom_vers)); 2917 } 2918 2919 /* Reset device */ 2920 if (need_fw_reset && 2921 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2922 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2923 if (rc != ETIMEDOUT && rc != EIO) 2924 t4_fw_bye(sc, sc->mbox); 2925 goto done; 2926 } 2927 sc->flags |= FW_OK; 2928 2929 rc = get_params__pre_init(sc); 2930 if (rc != 0) 2931 goto done; /* error message displayed already */ 2932 2933 /* Partition adapter resources as specified in the config file. */ 2934 if (state == DEV_STATE_UNINIT) { 2935 2936 KASSERT(sc->flags & MASTER_PF, 2937 ("%s: trying to change chip settings when not master.", 2938 __func__)); 2939 2940 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2941 if (rc != 0) 2942 goto done; /* error message displayed already */ 2943 2944 t4_tweak_chip_settings(sc); 2945 2946 /* get basic stuff going */ 2947 rc = -t4_fw_initialize(sc, sc->mbox); 2948 if (rc != 0) { 2949 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2950 goto done; 2951 } 2952 } else { 2953 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2954 sc->cfcsum = 0; 2955 } 2956 2957done: 2958 free(card_fw, M_CXGBE); 2959 if (fw != NULL) 2960 firmware_put(fw, FIRMWARE_UNLOAD); 2961 if (default_cfg != NULL) 2962 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2963 2964 return (rc); 2965} 2966 2967#define FW_PARAM_DEV(param) \ 2968 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2969 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2970#define FW_PARAM_PFVF(param) \ 2971 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2972 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2973 2974/* 2975 * Partition chip resources for use between various PFs, VFs, etc. 2976 */ 2977static int 2978partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2979 const char *name_prefix) 2980{ 2981 const struct firmware *cfg = NULL; 2982 int rc = 0; 2983 struct fw_caps_config_cmd caps; 2984 uint32_t mtype, moff, finicsum, cfcsum; 2985 2986 /* 2987 * Figure out what configuration file to use. Pick the default config 2988 * file for the card if the user hasn't specified one explicitly. 2989 */ 2990 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2991 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2992 /* Card specific overrides go here. */ 2993 if (pci_get_device(sc->dev) == 0x440a) 2994 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2995 if (is_fpga(sc)) 2996 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2997 } 2998 2999 /* 3000 * We need to load another module if the profile is anything except 3001 * "default" or "flash". 3002 */ 3003 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 3004 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3005 char s[32]; 3006 3007 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 3008 cfg = firmware_get(s); 3009 if (cfg == NULL) { 3010 if (default_cfg != NULL) { 3011 device_printf(sc->dev, 3012 "unable to load module \"%s\" for " 3013 "configuration profile \"%s\", will use " 3014 "the default config file instead.\n", 3015 s, sc->cfg_file); 3016 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3017 "%s", DEFAULT_CF); 3018 } else { 3019 device_printf(sc->dev, 3020 "unable to load module \"%s\" for " 3021 "configuration profile \"%s\", will use " 3022 "the config file on the card's flash " 3023 "instead.\n", s, sc->cfg_file); 3024 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 3025 "%s", FLASH_CF); 3026 } 3027 } 3028 } 3029 3030 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 3031 default_cfg == NULL) { 3032 device_printf(sc->dev, 3033 "default config file not available, will use the config " 3034 "file on the card's flash instead.\n"); 3035 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 3036 } 3037 3038 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 3039 u_int cflen; 3040 const uint32_t *cfdata; 3041 uint32_t param, val, addr; 3042 3043 KASSERT(cfg != NULL || default_cfg != NULL, 3044 ("%s: no config to upload", __func__)); 3045 3046 /* 3047 * Ask the firmware where it wants us to upload the config file. 3048 */ 3049 param = FW_PARAM_DEV(CF); 3050 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3051 if (rc != 0) { 3052 /* No support for config file? Shouldn't happen. */ 3053 device_printf(sc->dev, 3054 "failed to query config file location: %d.\n", rc); 3055 goto done; 3056 } 3057 mtype = G_FW_PARAMS_PARAM_Y(val); 3058 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3059 3060 /* 3061 * XXX: sheer laziness. We deliberately added 4 bytes of 3062 * useless stuffing/comments at the end of the config file so 3063 * it's ok to simply throw away the last remaining bytes when 3064 * the config file is not an exact multiple of 4. This also 3065 * helps with the validate_mt_off_len check. 3066 */ 3067 if (cfg != NULL) { 3068 cflen = cfg->datasize & ~3; 3069 cfdata = cfg->data; 3070 } else { 3071 cflen = default_cfg->datasize & ~3; 3072 cfdata = default_cfg->data; 3073 } 3074 3075 if (cflen > FLASH_CFG_MAX_SIZE) { 3076 device_printf(sc->dev, 3077 "config file too long (%d, max allowed is %d). " 3078 "Will try to use the config on the card, if any.\n", 3079 cflen, FLASH_CFG_MAX_SIZE); 3080 goto use_config_on_flash; 3081 } 3082 3083 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3084 if (rc != 0) { 3085 device_printf(sc->dev, 3086 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3087 "Will try to use the config on the card, if any.\n", 3088 __func__, mtype, moff, cflen, rc); 3089 goto use_config_on_flash; 3090 } 3091 write_via_memwin(sc, 2, addr, cfdata, cflen); 3092 } else { 3093use_config_on_flash: 3094 mtype = FW_MEMTYPE_FLASH; 3095 moff = t4_flash_cfg_addr(sc); 3096 } 3097 3098 bzero(&caps, sizeof(caps)); 3099 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3100 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3101 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3102 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3103 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3104 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3105 if (rc != 0) { 3106 device_printf(sc->dev, 3107 "failed to pre-process config file: %d " 3108 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3109 goto done; 3110 } 3111 3112 finicsum = be32toh(caps.finicsum); 3113 cfcsum = be32toh(caps.cfcsum); 3114 if (finicsum != cfcsum) { 3115 device_printf(sc->dev, 3116 "WARNING: config file checksum mismatch: %08x %08x\n", 3117 finicsum, cfcsum); 3118 } 3119 sc->cfcsum = cfcsum; 3120 3121#define LIMIT_CAPS(x) do { \ 3122 caps.x &= htobe16(t4_##x##_allowed); \ 3123} while (0) 3124 3125 /* 3126 * Let the firmware know what features will (not) be used so it can tune 3127 * things accordingly. 3128 */ 3129 LIMIT_CAPS(nbmcaps); 3130 LIMIT_CAPS(linkcaps); 3131 LIMIT_CAPS(switchcaps); 3132 LIMIT_CAPS(niccaps); 3133 LIMIT_CAPS(toecaps); 3134 LIMIT_CAPS(rdmacaps); 3135 LIMIT_CAPS(tlscaps); 3136 LIMIT_CAPS(iscsicaps); 3137 LIMIT_CAPS(fcoecaps); 3138#undef LIMIT_CAPS 3139 3140 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3141 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3142 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3143 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3144 if (rc != 0) { 3145 device_printf(sc->dev, 3146 "failed to process config file: %d.\n", rc); 3147 } 3148done: 3149 if (cfg != NULL) 3150 firmware_put(cfg, FIRMWARE_UNLOAD); 3151 return (rc); 3152} 3153 3154/* 3155 * Retrieve parameters that are needed (or nice to have) very early. 3156 */ 3157static int 3158get_params__pre_init(struct adapter *sc) 3159{ 3160 int rc; 3161 uint32_t param[2], val[2]; 3162 3163 param[0] = FW_PARAM_DEV(PORTVEC); 3164 param[1] = FW_PARAM_DEV(CCLK); 3165 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3166 if (rc != 0) { 3167 device_printf(sc->dev, 3168 "failed to query parameters (pre_init): %d.\n", rc); 3169 return (rc); 3170 } 3171 3172 sc->params.portvec = val[0]; 3173 sc->params.nports = bitcount32(val[0]); 3174 sc->params.vpd.cclk = val[1]; 3175 3176 /* Read device log parameters. */ 3177 rc = -t4_init_devlog_params(sc, 1); 3178 if (rc == 0) 3179 fixup_devlog_params(sc); 3180 else { 3181 device_printf(sc->dev, 3182 "failed to get devlog parameters: %d.\n", rc); 3183 rc = 0; /* devlog isn't critical for device operation */ 3184 } 3185 3186 return (rc); 3187} 3188 3189/* 3190 * Retrieve various parameters that are of interest to the driver. The device 3191 * has been initialized by the firmware at this point. 3192 */ 3193static int 3194get_params__post_init(struct adapter *sc) 3195{ 3196 int rc; 3197 uint32_t param[7], val[7]; 3198 struct fw_caps_config_cmd caps; 3199 3200 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3201 param[1] = FW_PARAM_PFVF(EQ_START); 3202 param[2] = FW_PARAM_PFVF(FILTER_START); 3203 param[3] = FW_PARAM_PFVF(FILTER_END); 3204 param[4] = FW_PARAM_PFVF(L2T_START); 3205 param[5] = FW_PARAM_PFVF(L2T_END); 3206 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3207 if (rc != 0) { 3208 device_printf(sc->dev, 3209 "failed to query parameters (post_init): %d.\n", rc); 3210 return (rc); 3211 } 3212 3213 sc->sge.iq_start = val[0]; 3214 sc->sge.eq_start = val[1]; 3215 sc->tids.ftid_base = val[2]; 3216 sc->tids.nftids = val[3] - val[2] + 1; 3217 sc->params.ftid_min = val[2]; 3218 sc->params.ftid_max = val[3]; 3219 sc->vres.l2t.start = val[4]; 3220 sc->vres.l2t.size = val[5] - val[4] + 1; 3221 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3222 ("%s: L2 table size (%u) larger than expected (%u)", 3223 __func__, sc->vres.l2t.size, L2T_SIZE)); 3224 3225 /* get capabilites */ 3226 bzero(&caps, sizeof(caps)); 3227 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3228 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3229 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3230 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3231 if (rc != 0) { 3232 device_printf(sc->dev, 3233 "failed to get card capabilities: %d.\n", rc); 3234 return (rc); 3235 } 3236 3237#define READ_CAPS(x) do { \ 3238 sc->x = htobe16(caps.x); \ 3239} while (0) 3240 READ_CAPS(nbmcaps); 3241 READ_CAPS(linkcaps); 3242 READ_CAPS(switchcaps); 3243 READ_CAPS(niccaps); 3244 READ_CAPS(toecaps); 3245 READ_CAPS(rdmacaps); 3246 READ_CAPS(tlscaps); 3247 READ_CAPS(iscsicaps); 3248 READ_CAPS(fcoecaps); 3249 3250 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3251 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3252 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3253 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3254 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3255 if (rc != 0) { 3256 device_printf(sc->dev, 3257 "failed to query NIC parameters: %d.\n", rc); 3258 return (rc); 3259 } 3260 sc->tids.etid_base = val[0]; 3261 sc->params.etid_min = val[0]; 3262 sc->tids.netids = val[1] - val[0] + 1; 3263 sc->params.netids = sc->tids.netids; 3264 sc->params.eo_wr_cred = val[2]; 3265 sc->params.ethoffload = 1; 3266 } 3267 3268 if (sc->toecaps) { 3269 /* query offload-related parameters */ 3270 param[0] = FW_PARAM_DEV(NTID); 3271 param[1] = FW_PARAM_PFVF(SERVER_START); 3272 param[2] = FW_PARAM_PFVF(SERVER_END); 3273 param[3] = FW_PARAM_PFVF(TDDP_START); 3274 param[4] = FW_PARAM_PFVF(TDDP_END); 3275 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3276 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3277 if (rc != 0) { 3278 device_printf(sc->dev, 3279 "failed to query TOE parameters: %d.\n", rc); 3280 return (rc); 3281 } 3282 sc->tids.ntids = val[0]; 3283 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3284 sc->tids.stid_base = val[1]; 3285 sc->tids.nstids = val[2] - val[1] + 1; 3286 sc->vres.ddp.start = val[3]; 3287 sc->vres.ddp.size = val[4] - val[3] + 1; 3288 sc->params.ofldq_wr_cred = val[5]; 3289 sc->params.offload = 1; 3290 } 3291 if (sc->rdmacaps) { 3292 param[0] = FW_PARAM_PFVF(STAG_START); 3293 param[1] = FW_PARAM_PFVF(STAG_END); 3294 param[2] = FW_PARAM_PFVF(RQ_START); 3295 param[3] = FW_PARAM_PFVF(RQ_END); 3296 param[4] = FW_PARAM_PFVF(PBL_START); 3297 param[5] = FW_PARAM_PFVF(PBL_END); 3298 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3299 if (rc != 0) { 3300 device_printf(sc->dev, 3301 "failed to query RDMA parameters(1): %d.\n", rc); 3302 return (rc); 3303 } 3304 sc->vres.stag.start = val[0]; 3305 sc->vres.stag.size = val[1] - val[0] + 1; 3306 sc->vres.rq.start = val[2]; 3307 sc->vres.rq.size = val[3] - val[2] + 1; 3308 sc->vres.pbl.start = val[4]; 3309 sc->vres.pbl.size = val[5] - val[4] + 1; 3310 3311 param[0] = FW_PARAM_PFVF(SQRQ_START); 3312 param[1] = FW_PARAM_PFVF(SQRQ_END); 3313 param[2] = FW_PARAM_PFVF(CQ_START); 3314 param[3] = FW_PARAM_PFVF(CQ_END); 3315 param[4] = FW_PARAM_PFVF(OCQ_START); 3316 param[5] = FW_PARAM_PFVF(OCQ_END); 3317 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3318 if (rc != 0) { 3319 device_printf(sc->dev, 3320 "failed to query RDMA parameters(2): %d.\n", rc); 3321 return (rc); 3322 } 3323 sc->vres.qp.start = val[0]; 3324 sc->vres.qp.size = val[1] - val[0] + 1; 3325 sc->vres.cq.start = val[2]; 3326 sc->vres.cq.size = val[3] - val[2] + 1; 3327 sc->vres.ocq.start = val[4]; 3328 sc->vres.ocq.size = val[5] - val[4] + 1; 3329 } 3330 if (sc->iscsicaps) { 3331 param[0] = FW_PARAM_PFVF(ISCSI_START); 3332 param[1] = FW_PARAM_PFVF(ISCSI_END); 3333 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3334 if (rc != 0) { 3335 device_printf(sc->dev, 3336 "failed to query iSCSI parameters: %d.\n", rc); 3337 return (rc); 3338 } 3339 sc->vres.iscsi.start = val[0]; 3340 sc->vres.iscsi.size = val[1] - val[0] + 1; 3341 } 3342 3343 /* 3344 * We've got the params we wanted to query via the firmware. Now grab 3345 * some others directly from the chip. 3346 */ 3347 rc = t4_read_chip_settings(sc); 3348 3349 return (rc); 3350} 3351 3352static int 3353set_params__post_init(struct adapter *sc) 3354{ 3355 uint32_t param, val; 3356 3357 /* ask for encapsulated CPLs */ 3358 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3359 val = 1; 3360 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3361 3362 return (0); 3363} 3364 3365#undef FW_PARAM_PFVF 3366#undef FW_PARAM_DEV 3367 3368static void 3369t4_set_desc(struct adapter *sc) 3370{ 3371 char buf[128]; 3372 struct adapter_params *p = &sc->params; 3373 3374 snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, " 3375 "P/N:%s, E/C:%s", p->vpd.id, is_offload(sc) ? "R" : "", 3376 chip_rev(sc), p->vpd.sn, p->vpd.pn, p->vpd.ec); 3377 3378 device_set_desc_copy(sc->dev, buf); 3379} 3380 3381static void 3382build_medialist(struct port_info *pi, struct ifmedia *media) 3383{ 3384 int m; 3385 3386 PORT_LOCK(pi); 3387 3388 ifmedia_removeall(media); 3389 3390 m = IFM_ETHER | IFM_FDX; 3391 3392 switch(pi->port_type) { 3393 case FW_PORT_TYPE_BT_XFI: 3394 case FW_PORT_TYPE_BT_XAUI: 3395 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3396 /* fall through */ 3397 3398 case FW_PORT_TYPE_BT_SGMII: 3399 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3400 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3401 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3402 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3403 break; 3404 3405 case FW_PORT_TYPE_CX4: 3406 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3407 ifmedia_set(media, m | IFM_10G_CX4); 3408 break; 3409 3410 case FW_PORT_TYPE_QSFP_10G: 3411 case FW_PORT_TYPE_SFP: 3412 case FW_PORT_TYPE_FIBER_XFI: 3413 case FW_PORT_TYPE_FIBER_XAUI: 3414 switch (pi->mod_type) { 3415 3416 case FW_PORT_MOD_TYPE_LR: 3417 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3418 ifmedia_set(media, m | IFM_10G_LR); 3419 break; 3420 3421 case FW_PORT_MOD_TYPE_SR: 3422 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3423 ifmedia_set(media, m | IFM_10G_SR); 3424 break; 3425 3426 case FW_PORT_MOD_TYPE_LRM: 3427 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3428 ifmedia_set(media, m | IFM_10G_LRM); 3429 break; 3430 3431 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3432 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3433 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3434 ifmedia_set(media, m | IFM_10G_TWINAX); 3435 break; 3436 3437 case FW_PORT_MOD_TYPE_NONE: 3438 m &= ~IFM_FDX; 3439 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3440 ifmedia_set(media, m | IFM_NONE); 3441 break; 3442 3443 case FW_PORT_MOD_TYPE_NA: 3444 case FW_PORT_MOD_TYPE_ER: 3445 default: 3446 device_printf(pi->dev, 3447 "unknown port_type (%d), mod_type (%d)\n", 3448 pi->port_type, pi->mod_type); 3449 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3450 ifmedia_set(media, m | IFM_UNKNOWN); 3451 break; 3452 } 3453 break; 3454 3455 case FW_PORT_TYPE_QSFP: 3456 switch (pi->mod_type) { 3457 3458 case FW_PORT_MOD_TYPE_LR: 3459 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3460 ifmedia_set(media, m | IFM_40G_LR4); 3461 break; 3462 3463 case FW_PORT_MOD_TYPE_SR: 3464 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3465 ifmedia_set(media, m | IFM_40G_SR4); 3466 break; 3467 3468 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3469 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3470 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3471 ifmedia_set(media, m | IFM_40G_CR4); 3472 break; 3473 3474 case FW_PORT_MOD_TYPE_NONE: 3475 m &= ~IFM_FDX; 3476 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3477 ifmedia_set(media, m | IFM_NONE); 3478 break; 3479 3480 default: 3481 device_printf(pi->dev, 3482 "unknown port_type (%d), mod_type (%d)\n", 3483 pi->port_type, pi->mod_type); 3484 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3485 ifmedia_set(media, m | IFM_UNKNOWN); 3486 break; 3487 } 3488 break; 3489 3490 default: 3491 device_printf(pi->dev, 3492 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3493 pi->mod_type); 3494 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3495 ifmedia_set(media, m | IFM_UNKNOWN); 3496 break; 3497 } 3498 3499 PORT_UNLOCK(pi); 3500} 3501 3502#define FW_MAC_EXACT_CHUNK 7 3503 3504/* 3505 * Program the port's XGMAC based on parameters in ifnet. The caller also 3506 * indicates which parameters should be programmed (the rest are left alone). 3507 */ 3508int 3509update_mac_settings(struct ifnet *ifp, int flags) 3510{ 3511 int rc = 0; 3512 struct vi_info *vi = ifp->if_softc; 3513 struct port_info *pi = vi->pi; 3514 struct adapter *sc = pi->adapter; 3515 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3516 3517 ASSERT_SYNCHRONIZED_OP(sc); 3518 KASSERT(flags, ("%s: not told what to update.", __func__)); 3519 3520 if (flags & XGMAC_MTU) 3521 mtu = ifp->if_mtu; 3522 3523 if (flags & XGMAC_PROMISC) 3524 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3525 3526 if (flags & XGMAC_ALLMULTI) 3527 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3528 3529 if (flags & XGMAC_VLANEX) 3530 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3531 3532 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3533 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3534 allmulti, 1, vlanex, false); 3535 if (rc) { 3536 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3537 rc); 3538 return (rc); 3539 } 3540 } 3541 3542 if (flags & XGMAC_UCADDR) { 3543 uint8_t ucaddr[ETHER_ADDR_LEN]; 3544 3545 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3546 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3547 ucaddr, true, true); 3548 if (rc < 0) { 3549 rc = -rc; 3550 if_printf(ifp, "change_mac failed: %d\n", rc); 3551 return (rc); 3552 } else { 3553 vi->xact_addr_filt = rc; 3554 rc = 0; 3555 } 3556 } 3557 3558 if (flags & XGMAC_MCADDRS) { 3559 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3560 int del = 1; 3561 uint64_t hash = 0; 3562 struct ifmultiaddr *ifma; 3563 int i = 0, j; 3564 3565 if_maddr_rlock(ifp); 3566 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3567 if (ifma->ifma_addr->sa_family != AF_LINK) 3568 continue; 3569 mcaddr[i] = 3570 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3571 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3572 i++; 3573 3574 if (i == FW_MAC_EXACT_CHUNK) { 3575 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3576 del, i, mcaddr, NULL, &hash, 0); 3577 if (rc < 0) { 3578 rc = -rc; 3579 for (j = 0; j < i; j++) { 3580 if_printf(ifp, 3581 "failed to add mc address" 3582 " %02x:%02x:%02x:" 3583 "%02x:%02x:%02x rc=%d\n", 3584 mcaddr[j][0], mcaddr[j][1], 3585 mcaddr[j][2], mcaddr[j][3], 3586 mcaddr[j][4], mcaddr[j][5], 3587 rc); 3588 } 3589 goto mcfail; 3590 } 3591 del = 0; 3592 i = 0; 3593 } 3594 } 3595 if (i > 0) { 3596 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3597 mcaddr, NULL, &hash, 0); 3598 if (rc < 0) { 3599 rc = -rc; 3600 for (j = 0; j < i; j++) { 3601 if_printf(ifp, 3602 "failed to add mc address" 3603 " %02x:%02x:%02x:" 3604 "%02x:%02x:%02x rc=%d\n", 3605 mcaddr[j][0], mcaddr[j][1], 3606 mcaddr[j][2], mcaddr[j][3], 3607 mcaddr[j][4], mcaddr[j][5], 3608 rc); 3609 } 3610 goto mcfail; 3611 } 3612 } 3613 3614 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3615 if (rc != 0) 3616 if_printf(ifp, "failed to set mc address hash: %d", rc); 3617mcfail: 3618 if_maddr_runlock(ifp); 3619 } 3620 3621 return (rc); 3622} 3623 3624/* 3625 * {begin|end}_synchronized_op must be called from the same thread. 3626 */ 3627int 3628begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3629 char *wmesg) 3630{ 3631 int rc, pri; 3632 3633#ifdef WITNESS 3634 /* the caller thinks it's ok to sleep, but is it really? */ 3635 if (flags & SLEEP_OK) 3636 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3637 "begin_synchronized_op"); 3638#endif 3639 3640 if (INTR_OK) 3641 pri = PCATCH; 3642 else 3643 pri = 0; 3644 3645 ADAPTER_LOCK(sc); 3646 for (;;) { 3647 3648 if (vi && IS_DOOMED(vi)) { 3649 rc = ENXIO; 3650 goto done; 3651 } 3652 3653 if (!IS_BUSY(sc)) { 3654 rc = 0; 3655 break; 3656 } 3657 3658 if (!(flags & SLEEP_OK)) { 3659 rc = EBUSY; 3660 goto done; 3661 } 3662 3663 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3664 rc = EINTR; 3665 goto done; 3666 } 3667 } 3668 3669 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3670 SET_BUSY(sc); 3671#ifdef INVARIANTS 3672 sc->last_op = wmesg; 3673 sc->last_op_thr = curthread; 3674 sc->last_op_flags = flags; 3675#endif 3676 3677done: 3678 if (!(flags & HOLD_LOCK) || rc) 3679 ADAPTER_UNLOCK(sc); 3680 3681 return (rc); 3682} 3683 3684/* 3685 * Tell if_ioctl and if_init that the VI is going away. This is 3686 * special variant of begin_synchronized_op and must be paired with a 3687 * call to end_synchronized_op. 3688 */ 3689void 3690doom_vi(struct adapter *sc, struct vi_info *vi) 3691{ 3692 3693 ADAPTER_LOCK(sc); 3694 SET_DOOMED(vi); 3695 wakeup(&sc->flags); 3696 while (IS_BUSY(sc)) 3697 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3698 SET_BUSY(sc); 3699#ifdef INVARIANTS 3700 sc->last_op = "t4detach"; 3701 sc->last_op_thr = curthread; 3702 sc->last_op_flags = 0; 3703#endif 3704 ADAPTER_UNLOCK(sc); 3705} 3706 3707/* 3708 * {begin|end}_synchronized_op must be called from the same thread. 3709 */ 3710void 3711end_synchronized_op(struct adapter *sc, int flags) 3712{ 3713 3714 if (flags & LOCK_HELD) 3715 ADAPTER_LOCK_ASSERT_OWNED(sc); 3716 else 3717 ADAPTER_LOCK(sc); 3718 3719 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3720 CLR_BUSY(sc); 3721 wakeup(&sc->flags); 3722 ADAPTER_UNLOCK(sc); 3723} 3724 3725static int 3726cxgbe_init_synchronized(struct vi_info *vi) 3727{ 3728 struct port_info *pi = vi->pi; 3729 struct adapter *sc = pi->adapter; 3730 struct ifnet *ifp = vi->ifp; 3731 int rc = 0, i; 3732 struct sge_txq *txq; 3733 3734 ASSERT_SYNCHRONIZED_OP(sc); 3735 3736 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3737 return (0); /* already running */ 3738 3739 if (!(sc->flags & FULL_INIT_DONE) && 3740 ((rc = adapter_full_init(sc)) != 0)) 3741 return (rc); /* error message displayed already */ 3742 3743 if (!(vi->flags & VI_INIT_DONE) && 3744 ((rc = vi_full_init(vi)) != 0)) 3745 return (rc); /* error message displayed already */ 3746 3747 rc = update_mac_settings(ifp, XGMAC_ALL); 3748 if (rc) 3749 goto done; /* error message displayed already */ 3750 3751 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3752 if (rc != 0) { 3753 if_printf(ifp, "enable_vi failed: %d\n", rc); 3754 goto done; 3755 } 3756 3757 /* 3758 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3759 * if this changes. 3760 */ 3761 3762 for_each_txq(vi, i, txq) { 3763 TXQ_LOCK(txq); 3764 txq->eq.flags |= EQ_ENABLED; 3765 TXQ_UNLOCK(txq); 3766 } 3767 3768 /* 3769 * The first iq of the first port to come up is used for tracing. 3770 */ 3771 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3772 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3773 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3774 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3775 V_QUEUENUMBER(sc->traceq)); 3776 pi->flags |= HAS_TRACEQ; 3777 } 3778 3779 /* all ok */ 3780 PORT_LOCK(pi); 3781 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3782 pi->up_vis++; 3783 3784 if (pi->nvi > 1) 3785 callout_reset(&vi->tick, hz, vi_tick, vi); 3786 else 3787 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3788 PORT_UNLOCK(pi); 3789done: 3790 if (rc != 0) 3791 cxgbe_uninit_synchronized(vi); 3792 3793 return (rc); 3794} 3795 3796/* 3797 * Idempotent. 3798 */ 3799static int 3800cxgbe_uninit_synchronized(struct vi_info *vi) 3801{ 3802 struct port_info *pi = vi->pi; 3803 struct adapter *sc = pi->adapter; 3804 struct ifnet *ifp = vi->ifp; 3805 int rc, i; 3806 struct sge_txq *txq; 3807 3808 ASSERT_SYNCHRONIZED_OP(sc); 3809 3810 if (!(vi->flags & VI_INIT_DONE)) { 3811 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3812 ("uninited VI is running")); 3813 return (0); 3814 } 3815 3816 /* 3817 * Disable the VI so that all its data in either direction is discarded 3818 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3819 * tick) intact as the TP can deliver negative advice or data that it's 3820 * holding in its RAM (for an offloaded connection) even after the VI is 3821 * disabled. 3822 */ 3823 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 3824 if (rc) { 3825 if_printf(ifp, "disable_vi failed: %d\n", rc); 3826 return (rc); 3827 } 3828 3829 for_each_txq(vi, i, txq) { 3830 TXQ_LOCK(txq); 3831 txq->eq.flags &= ~EQ_ENABLED; 3832 TXQ_UNLOCK(txq); 3833 } 3834 3835 PORT_LOCK(pi); 3836 if (pi->nvi == 1) 3837 callout_stop(&pi->tick); 3838 else 3839 callout_stop(&vi->tick); 3840 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3841 PORT_UNLOCK(pi); 3842 return (0); 3843 } 3844 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3845 pi->up_vis--; 3846 if (pi->up_vis > 0) { 3847 PORT_UNLOCK(pi); 3848 return (0); 3849 } 3850 PORT_UNLOCK(pi); 3851 3852 pi->link_cfg.link_ok = 0; 3853 pi->link_cfg.speed = 0; 3854 pi->linkdnrc = -1; 3855 t4_os_link_changed(sc, pi->port_id, 0, -1); 3856 3857 return (0); 3858} 3859 3860/* 3861 * It is ok for this function to fail midway and return right away. t4_detach 3862 * will walk the entire sc->irq list and clean up whatever is valid. 3863 */ 3864static int 3865setup_intr_handlers(struct adapter *sc) 3866{ 3867 int rc, rid, p, q, v; 3868 char s[8]; 3869 struct irq *irq; 3870 struct port_info *pi; 3871 struct vi_info *vi; 3872 struct sge *sge = &sc->sge; 3873 struct sge_rxq *rxq; 3874#ifdef TCP_OFFLOAD 3875 struct sge_ofld_rxq *ofld_rxq; 3876#endif 3877#ifdef DEV_NETMAP 3878 struct sge_nm_rxq *nm_rxq; 3879#endif 3880#ifdef RSS 3881 int nbuckets = rss_getnumbuckets(); 3882#endif 3883 3884 /* 3885 * Setup interrupts. 3886 */ 3887 irq = &sc->irq[0]; 3888 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3889 if (sc->intr_count == 1) 3890 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3891 3892 /* Multiple interrupts. */ 3893 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3894 ("%s: too few intr.", __func__)); 3895 3896 /* The first one is always error intr */ 3897 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3898 if (rc != 0) 3899 return (rc); 3900 irq++; 3901 rid++; 3902 3903 /* The second one is always the firmware event queue */ 3904 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 3905 if (rc != 0) 3906 return (rc); 3907 irq++; 3908 rid++; 3909 3910 for_each_port(sc, p) { 3911 pi = sc->port[p]; 3912 for_each_vi(pi, v, vi) { 3913 vi->first_intr = rid - 1; 3914 3915 if (vi->nnmrxq > 0) { 3916 int n = max(vi->nrxq, vi->nnmrxq); 3917 3918 MPASS(vi->flags & INTR_RXQ); 3919 3920 rxq = &sge->rxq[vi->first_rxq]; 3921#ifdef DEV_NETMAP 3922 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 3923#endif 3924 for (q = 0; q < n; q++) { 3925 snprintf(s, sizeof(s), "%x%c%x", p, 3926 'a' + v, q); 3927 if (q < vi->nrxq) 3928 irq->rxq = rxq++; 3929#ifdef DEV_NETMAP 3930 if (q < vi->nnmrxq) 3931 irq->nm_rxq = nm_rxq++; 3932#endif 3933 rc = t4_alloc_irq(sc, irq, rid, 3934 t4_vi_intr, irq, s); 3935 if (rc != 0) 3936 return (rc); 3937 irq++; 3938 rid++; 3939 vi->nintr++; 3940 } 3941 } else if (vi->flags & INTR_RXQ) { 3942 for_each_rxq(vi, q, rxq) { 3943 snprintf(s, sizeof(s), "%x%c%x", p, 3944 'a' + v, q); 3945 rc = t4_alloc_irq(sc, irq, rid, 3946 t4_intr, rxq, s); 3947 if (rc != 0) 3948 return (rc); 3949#ifdef RSS 3950 bus_bind_intr(sc->dev, irq->res, 3951 rss_getcpu(q % nbuckets)); 3952#endif 3953 irq++; 3954 rid++; 3955 vi->nintr++; 3956 } 3957 } 3958#ifdef TCP_OFFLOAD 3959 if (vi->flags & INTR_OFLD_RXQ) { 3960 for_each_ofld_rxq(vi, q, ofld_rxq) { 3961 snprintf(s, sizeof(s), "%x%c%x", p, 3962 'A' + v, q); 3963 rc = t4_alloc_irq(sc, irq, rid, 3964 t4_intr, ofld_rxq, s); 3965 if (rc != 0) 3966 return (rc); 3967 irq++; 3968 rid++; 3969 vi->nintr++; 3970 } 3971 } 3972#endif 3973 } 3974 } 3975 MPASS(irq == &sc->irq[sc->intr_count]); 3976 3977 return (0); 3978} 3979 3980int 3981adapter_full_init(struct adapter *sc) 3982{ 3983 int rc, i; 3984 3985 ASSERT_SYNCHRONIZED_OP(sc); 3986 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3987 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3988 ("%s: FULL_INIT_DONE already", __func__)); 3989 3990 /* 3991 * queues that belong to the adapter (not any particular port). 3992 */ 3993 rc = t4_setup_adapter_queues(sc); 3994 if (rc != 0) 3995 goto done; 3996 3997 for (i = 0; i < nitems(sc->tq); i++) { 3998 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3999 taskqueue_thread_enqueue, &sc->tq[i]); 4000 if (sc->tq[i] == NULL) { 4001 device_printf(sc->dev, 4002 "failed to allocate task queue %d\n", i); 4003 rc = ENOMEM; 4004 goto done; 4005 } 4006 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4007 device_get_nameunit(sc->dev), i); 4008 } 4009 4010 t4_intr_enable(sc); 4011 sc->flags |= FULL_INIT_DONE; 4012done: 4013 if (rc != 0) 4014 adapter_full_uninit(sc); 4015 4016 return (rc); 4017} 4018 4019int 4020adapter_full_uninit(struct adapter *sc) 4021{ 4022 int i; 4023 4024 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4025 4026 t4_teardown_adapter_queues(sc); 4027 4028 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4029 taskqueue_free(sc->tq[i]); 4030 sc->tq[i] = NULL; 4031 } 4032 4033 sc->flags &= ~FULL_INIT_DONE; 4034 4035 return (0); 4036} 4037 4038#ifdef RSS 4039#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4040 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4041 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4042 RSS_HASHTYPE_RSS_UDP_IPV6) 4043 4044/* Translates kernel hash types to hardware. */ 4045static int 4046hashconfig_to_hashen(int hashconfig) 4047{ 4048 int hashen = 0; 4049 4050 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4051 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4052 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4053 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4054 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4055 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4056 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4057 } 4058 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4059 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4060 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4061 } 4062 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4063 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4064 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4065 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4066 4067 return (hashen); 4068} 4069 4070/* Translates hardware hash types to kernel. */ 4071static int 4072hashen_to_hashconfig(int hashen) 4073{ 4074 int hashconfig = 0; 4075 4076 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4077 /* 4078 * If UDP hashing was enabled it must have been enabled for 4079 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4080 * enabling any 4-tuple hash is nonsense configuration. 4081 */ 4082 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4083 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4084 4085 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4086 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4087 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4088 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4089 } 4090 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4091 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4092 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4093 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4094 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4095 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4096 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4097 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4098 4099 return (hashconfig); 4100} 4101#endif 4102 4103int 4104vi_full_init(struct vi_info *vi) 4105{ 4106 struct adapter *sc = vi->pi->adapter; 4107 struct ifnet *ifp = vi->ifp; 4108 uint16_t *rss; 4109 struct sge_rxq *rxq; 4110 int rc, i, j, hashen; 4111#ifdef RSS 4112 int nbuckets = rss_getnumbuckets(); 4113 int hashconfig = rss_gethashconfig(); 4114 int extra; 4115 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4116 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4117#endif 4118 4119 ASSERT_SYNCHRONIZED_OP(sc); 4120 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4121 ("%s: VI_INIT_DONE already", __func__)); 4122 4123 sysctl_ctx_init(&vi->ctx); 4124 vi->flags |= VI_SYSCTL_CTX; 4125 4126 /* 4127 * Allocate tx/rx/fl queues for this VI. 4128 */ 4129 rc = t4_setup_vi_queues(vi); 4130 if (rc != 0) 4131 goto done; /* error message displayed already */ 4132 4133 /* 4134 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4135 */ 4136 if (vi->nrxq > vi->rss_size) { 4137 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4138 "some queues will never receive traffic.\n", vi->nrxq, 4139 vi->rss_size); 4140 } else if (vi->rss_size % vi->nrxq) { 4141 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4142 "expect uneven traffic distribution.\n", vi->nrxq, 4143 vi->rss_size); 4144 } 4145#ifdef RSS 4146 MPASS(RSS_KEYSIZE == 40); 4147 if (vi->nrxq != nbuckets) { 4148 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4149 "performance will be impacted.\n", vi->nrxq, nbuckets); 4150 } 4151 4152 rss_getkey((void *)&raw_rss_key[0]); 4153 for (i = 0; i < nitems(rss_key); i++) { 4154 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4155 } 4156 t4_write_rss_key(sc, &rss_key[0], -1); 4157#endif 4158 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4159 for (i = 0; i < vi->rss_size;) { 4160#ifdef RSS 4161 j = rss_get_indirection_to_bucket(i); 4162 j %= vi->nrxq; 4163 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4164 rss[i++] = rxq->iq.abs_id; 4165#else 4166 for_each_rxq(vi, j, rxq) { 4167 rss[i++] = rxq->iq.abs_id; 4168 if (i == vi->rss_size) 4169 break; 4170 } 4171#endif 4172 } 4173 4174 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4175 vi->rss_size); 4176 if (rc != 0) { 4177 if_printf(ifp, "rss_config failed: %d\n", rc); 4178 goto done; 4179 } 4180 4181#ifdef RSS 4182 hashen = hashconfig_to_hashen(hashconfig); 4183 4184 /* 4185 * We may have had to enable some hashes even though the global config 4186 * wants them disabled. This is a potential problem that must be 4187 * reported to the user. 4188 */ 4189 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4190 4191 /* 4192 * If we consider only the supported hash types, then the enabled hashes 4193 * are a superset of the requested hashes. In other words, there cannot 4194 * be any supported hash that was requested but not enabled, but there 4195 * can be hashes that were not requested but had to be enabled. 4196 */ 4197 extra &= SUPPORTED_RSS_HASHTYPES; 4198 MPASS((extra & hashconfig) == 0); 4199 4200 if (extra) { 4201 if_printf(ifp, 4202 "global RSS config (0x%x) cannot be accommodated.\n", 4203 hashconfig); 4204 } 4205 if (extra & RSS_HASHTYPE_RSS_IPV4) 4206 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4207 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4208 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4209 if (extra & RSS_HASHTYPE_RSS_IPV6) 4210 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4211 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4212 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4213 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4214 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4215 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4216 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4217#else 4218 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4219 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4220 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4221 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4222#endif 4223 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]); 4224 if (rc != 0) { 4225 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4226 goto done; 4227 } 4228 4229 vi->rss = rss; 4230 vi->flags |= VI_INIT_DONE; 4231done: 4232 if (rc != 0) 4233 vi_full_uninit(vi); 4234 4235 return (rc); 4236} 4237 4238/* 4239 * Idempotent. 4240 */ 4241int 4242vi_full_uninit(struct vi_info *vi) 4243{ 4244 struct port_info *pi = vi->pi; 4245 struct adapter *sc = pi->adapter; 4246 int i; 4247 struct sge_rxq *rxq; 4248 struct sge_txq *txq; 4249#ifdef TCP_OFFLOAD 4250 struct sge_ofld_rxq *ofld_rxq; 4251 struct sge_wrq *ofld_txq; 4252#endif 4253 4254 if (vi->flags & VI_INIT_DONE) { 4255 4256 /* Need to quiesce queues. */ 4257 4258 /* XXX: Only for the first VI? */ 4259 if (IS_MAIN_VI(vi)) 4260 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4261 4262 for_each_txq(vi, i, txq) { 4263 quiesce_txq(sc, txq); 4264 } 4265 4266#ifdef TCP_OFFLOAD 4267 for_each_ofld_txq(vi, i, ofld_txq) { 4268 quiesce_wrq(sc, ofld_txq); 4269 } 4270#endif 4271 4272 for_each_rxq(vi, i, rxq) { 4273 quiesce_iq(sc, &rxq->iq); 4274 quiesce_fl(sc, &rxq->fl); 4275 } 4276 4277#ifdef TCP_OFFLOAD 4278 for_each_ofld_rxq(vi, i, ofld_rxq) { 4279 quiesce_iq(sc, &ofld_rxq->iq); 4280 quiesce_fl(sc, &ofld_rxq->fl); 4281 } 4282#endif 4283 free(vi->rss, M_CXGBE); 4284 free(vi->nm_rss, M_CXGBE); 4285 } 4286 4287 t4_teardown_vi_queues(vi); 4288 vi->flags &= ~VI_INIT_DONE; 4289 4290 return (0); 4291} 4292 4293static void 4294quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4295{ 4296 struct sge_eq *eq = &txq->eq; 4297 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4298 4299 (void) sc; /* unused */ 4300 4301#ifdef INVARIANTS 4302 TXQ_LOCK(txq); 4303 MPASS((eq->flags & EQ_ENABLED) == 0); 4304 TXQ_UNLOCK(txq); 4305#endif 4306 4307 /* Wait for the mp_ring to empty. */ 4308 while (!mp_ring_is_idle(txq->r)) { 4309 mp_ring_check_drainage(txq->r, 0); 4310 pause("rquiesce", 1); 4311 } 4312 4313 /* Then wait for the hardware to finish. */ 4314 while (spg->cidx != htobe16(eq->pidx)) 4315 pause("equiesce", 1); 4316 4317 /* Finally, wait for the driver to reclaim all descriptors. */ 4318 while (eq->cidx != eq->pidx) 4319 pause("dquiesce", 1); 4320} 4321 4322static void 4323quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4324{ 4325 4326 /* XXXTX */ 4327} 4328 4329static void 4330quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4331{ 4332 (void) sc; /* unused */ 4333 4334 /* Synchronize with the interrupt handler */ 4335 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4336 pause("iqfree", 1); 4337} 4338 4339static void 4340quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4341{ 4342 mtx_lock(&sc->sfl_lock); 4343 FL_LOCK(fl); 4344 fl->flags |= FL_DOOMED; 4345 FL_UNLOCK(fl); 4346 callout_stop(&sc->sfl_callout); 4347 mtx_unlock(&sc->sfl_lock); 4348 4349 KASSERT((fl->flags & FL_STARVING) == 0, 4350 ("%s: still starving", __func__)); 4351} 4352 4353static int 4354t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4355 driver_intr_t *handler, void *arg, char *name) 4356{ 4357 int rc; 4358 4359 irq->rid = rid; 4360 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4361 RF_SHAREABLE | RF_ACTIVE); 4362 if (irq->res == NULL) { 4363 device_printf(sc->dev, 4364 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4365 return (ENOMEM); 4366 } 4367 4368 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4369 NULL, handler, arg, &irq->tag); 4370 if (rc != 0) { 4371 device_printf(sc->dev, 4372 "failed to setup interrupt for rid %d, name %s: %d\n", 4373 rid, name, rc); 4374 } else if (name) 4375 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 4376 4377 return (rc); 4378} 4379 4380static int 4381t4_free_irq(struct adapter *sc, struct irq *irq) 4382{ 4383 if (irq->tag) 4384 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4385 if (irq->res) 4386 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4387 4388 bzero(irq, sizeof(*irq)); 4389 4390 return (0); 4391} 4392 4393static void 4394get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4395{ 4396 4397 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4398 t4_get_regs(sc, buf, regs->len); 4399} 4400 4401#define A_PL_INDIR_CMD 0x1f8 4402 4403#define S_PL_AUTOINC 31 4404#define M_PL_AUTOINC 0x1U 4405#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4406#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4407 4408#define S_PL_VFID 20 4409#define M_PL_VFID 0xffU 4410#define V_PL_VFID(x) ((x) << S_PL_VFID) 4411#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4412 4413#define S_PL_ADDR 0 4414#define M_PL_ADDR 0xfffffU 4415#define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4416#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4417 4418#define A_PL_INDIR_DATA 0x1fc 4419 4420static uint64_t 4421read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4422{ 4423 u32 stats[2]; 4424 4425 mtx_assert(&sc->reg_lock, MA_OWNED); 4426 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4427 V_PL_VFID(G_FW_VIID_VIN(viid)) | V_PL_ADDR(VF_MPS_REG(reg))); 4428 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4429 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4430 return (((uint64_t)stats[1]) << 32 | stats[0]); 4431} 4432 4433static void 4434t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4435 struct fw_vi_stats_vf *stats) 4436{ 4437 4438#define GET_STAT(name) \ 4439 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4440 4441 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4442 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4443 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4444 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4445 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4446 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4447 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4448 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4449 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4450 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4451 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4452 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4453 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4454 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4455 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4456 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4457 4458#undef GET_STAT 4459} 4460 4461static void 4462t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4463{ 4464 int reg; 4465 4466 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4467 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4468 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4469 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4470 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4471 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4472} 4473 4474static void 4475vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4476{ 4477 struct timeval tv; 4478 const struct timeval interval = {0, 250000}; /* 250ms */ 4479 4480 if (!(vi->flags & VI_INIT_DONE)) 4481 return; 4482 4483 getmicrotime(&tv); 4484 timevalsub(&tv, &interval); 4485 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4486 return; 4487 4488 mtx_lock(&sc->reg_lock); 4489 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4490 getmicrotime(&vi->last_refreshed); 4491 mtx_unlock(&sc->reg_lock); 4492} 4493 4494static void 4495cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4496{ 4497 int i; 4498 u_int v, tnl_cong_drops; 4499 struct timeval tv; 4500 const struct timeval interval = {0, 250000}; /* 250ms */ 4501 4502 getmicrotime(&tv); 4503 timevalsub(&tv, &interval); 4504 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4505 return; 4506 4507 tnl_cong_drops = 0; 4508 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 4509 for (i = 0; i < sc->chip_params->nchan; i++) { 4510 if (pi->rx_chan_map & (1 << i)) { 4511 mtx_lock(&sc->reg_lock); 4512 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4513 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4514 mtx_unlock(&sc->reg_lock); 4515 tnl_cong_drops += v; 4516 } 4517 } 4518 pi->tnl_cong_drops = tnl_cong_drops; 4519 getmicrotime(&pi->last_refreshed); 4520} 4521 4522static void 4523cxgbe_tick(void *arg) 4524{ 4525 struct port_info *pi = arg; 4526 struct adapter *sc = pi->adapter; 4527 4528 PORT_LOCK_ASSERT_OWNED(pi); 4529 cxgbe_refresh_stats(sc, pi); 4530 4531 callout_schedule(&pi->tick, hz); 4532} 4533 4534void 4535vi_tick(void *arg) 4536{ 4537 struct vi_info *vi = arg; 4538 struct adapter *sc = vi->pi->adapter; 4539 4540 vi_refresh_stats(sc, vi); 4541 4542 callout_schedule(&vi->tick, hz); 4543} 4544 4545static void 4546cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4547{ 4548 struct ifnet *vlan; 4549 4550 if (arg != ifp || ifp->if_type != IFT_ETHER) 4551 return; 4552 4553 vlan = VLAN_DEVAT(ifp, vid); 4554 VLAN_SETCOOKIE(vlan, ifp); 4555} 4556 4557/* 4558 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 4559 */ 4560static char *caps_decoder[] = { 4561 "\20\001IPMI\002NCSI", /* 0: NBM */ 4562 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 4563 "\20\001INGRESS\002EGRESS", /* 2: switch */ 4564 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 4565 "\006HASHFILTER\007ETHOFLD", 4566 "\20\001TOE", /* 4: TOE */ 4567 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 4568 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 4569 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 4570 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 4571 "\007T10DIF" 4572 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 4573 "\20\00KEYS", /* 7: TLS */ 4574 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 4575 "\004PO_INITIATOR\005PO_TARGET", 4576}; 4577 4578static void 4579t4_sysctls(struct adapter *sc) 4580{ 4581 struct sysctl_ctx_list *ctx; 4582 struct sysctl_oid *oid; 4583 struct sysctl_oid_list *children, *c0; 4584 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4585 4586 ctx = device_get_sysctl_ctx(sc->dev); 4587 4588 /* 4589 * dev.t4nex.X. 4590 */ 4591 oid = device_get_sysctl_tree(sc->dev); 4592 c0 = children = SYSCTL_CHILDREN(oid); 4593 4594 sc->sc_do_rxcopy = 1; 4595 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4596 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4597 4598 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4599 sc->params.nports, "# of ports"); 4600 4601 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4602 NULL, chip_rev(sc), "chip hardware revision"); 4603 4604 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 4605 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 4606 4607 if (sc->params.exprom_vers != 0) { 4608 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "exprom_version", 4609 CTLFLAG_RD, sc->exprom_version, 0, "expansion ROM version"); 4610 } 4611 4612 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4613 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4614 4615 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4616 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4617 4618 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4619 sc->cfcsum, "config file checksum"); 4620 4621 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4622 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4623 sysctl_bitfield, "A", "available doorbells"); 4624 4625#define SYSCTL_CAP(name, n, text) \ 4626 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 4627 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 4628 sysctl_bitfield, "A", "available " text "capabilities") 4629 4630 SYSCTL_CAP(nbmcaps, 0, "NBM"); 4631 SYSCTL_CAP(linkcaps, 1, "link"); 4632 SYSCTL_CAP(switchcaps, 2, "switch"); 4633 SYSCTL_CAP(niccaps, 3, "NIC"); 4634 SYSCTL_CAP(toecaps, 4, "TCP offload"); 4635 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 4636 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 4637 SYSCTL_CAP(tlscaps, 7, "TLS"); 4638 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 4639#undef SYSCTL_CAP 4640 4641 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4642 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4643 4644 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4645 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4646 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4647 "interrupt holdoff timer values (us)"); 4648 4649 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4650 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4651 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4652 "interrupt holdoff packet counter values"); 4653 4654 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4655 NULL, sc->tids.nftids, "number of filters"); 4656 4657 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4658 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4659 "chip temperature (in Celsius)"); 4660 4661 t4_sge_sysctls(sc, ctx, children); 4662 4663 sc->lro_timeout = 100; 4664 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4665 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4666 4667 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "debug_flags", CTLFLAG_RW, 4668 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4669 4670#ifdef SBUF_DRAIN 4671 /* 4672 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4673 */ 4674 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4675 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4676 "logs and miscellaneous information"); 4677 children = SYSCTL_CHILDREN(oid); 4678 4679 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4680 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4681 sysctl_cctrl, "A", "congestion control"); 4682 4683 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4684 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4685 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4686 4687 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4688 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4689 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4690 4691 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4692 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4693 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4694 4695 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4696 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4697 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4698 4699 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4700 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4701 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4702 4703 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4704 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4705 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4706 4707 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4708 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4709 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4710 "A", "CIM logic analyzer"); 4711 4712 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4713 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4714 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4715 4716 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4717 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4718 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4719 4720 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4721 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4722 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4723 4724 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4725 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4726 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4727 4728 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4729 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4730 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4731 4732 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4733 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4734 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4735 4736 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4737 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4738 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4739 4740 if (chip_id(sc) > CHELSIO_T4) { 4741 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4742 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4743 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4744 4745 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4746 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4747 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4748 } 4749 4750 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4751 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4752 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4753 4754 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4755 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4756 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4757 4758 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4759 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4760 sysctl_cpl_stats, "A", "CPL statistics"); 4761 4762 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4763 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4764 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4765 4766 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4767 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4768 sysctl_devlog, "A", "firmware's device log"); 4769 4770 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4771 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4772 sysctl_fcoe_stats, "A", "FCoE statistics"); 4773 4774 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4775 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4776 sysctl_hw_sched, "A", "hardware scheduler "); 4777 4778 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4779 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4780 sysctl_l2t, "A", "hardware L2 table"); 4781 4782 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4783 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4784 sysctl_lb_stats, "A", "loopback statistics"); 4785 4786 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4787 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4788 sysctl_meminfo, "A", "memory regions"); 4789 4790 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4791 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4792 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 4793 "A", "MPS TCAM entries"); 4794 4795 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4796 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4797 sysctl_path_mtus, "A", "path MTUs"); 4798 4799 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4800 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4801 sysctl_pm_stats, "A", "PM statistics"); 4802 4803 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4804 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4805 sysctl_rdma_stats, "A", "RDMA statistics"); 4806 4807 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4808 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4809 sysctl_tcp_stats, "A", "TCP statistics"); 4810 4811 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4812 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4813 sysctl_tids, "A", "TID information"); 4814 4815 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4816 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4817 sysctl_tp_err_stats, "A", "TP error statistics"); 4818 4819 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 4820 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 4821 "TP logic analyzer event capture mask"); 4822 4823 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4824 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4825 sysctl_tp_la, "A", "TP logic analyzer"); 4826 4827 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4828 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4829 sysctl_tx_rate, "A", "Tx rate"); 4830 4831 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4832 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4833 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4834 4835 if (is_t5(sc)) { 4836 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4837 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4838 sysctl_wcwr_stats, "A", "write combined work requests"); 4839 } 4840#endif 4841 4842#ifdef TCP_OFFLOAD 4843 if (is_offload(sc)) { 4844 /* 4845 * dev.t4nex.X.toe. 4846 */ 4847 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4848 NULL, "TOE parameters"); 4849 children = SYSCTL_CHILDREN(oid); 4850 4851 sc->tt.sndbuf = 256 * 1024; 4852 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4853 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4854 4855 sc->tt.ddp = 0; 4856 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4857 &sc->tt.ddp, 0, "DDP allowed"); 4858 4859 sc->tt.rx_coalesce = 1; 4860 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4861 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4862 4863 sc->tt.tx_align = 1; 4864 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4865 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4866 4867 sc->tt.tx_zcopy = 0; 4868 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", 4869 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, 4870 "Enable zero-copy aio_write(2)"); 4871 4872 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 4873 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 4874 "TP timer tick (us)"); 4875 4876 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 4877 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 4878 "TCP timestamp tick (us)"); 4879 4880 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 4881 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 4882 "DACK tick (us)"); 4883 4884 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 4885 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 4886 "IU", "DACK timer (us)"); 4887 4888 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 4889 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 4890 sysctl_tp_timer, "LU", "Retransmit min (us)"); 4891 4892 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 4893 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 4894 sysctl_tp_timer, "LU", "Retransmit max (us)"); 4895 4896 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 4897 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 4898 sysctl_tp_timer, "LU", "Persist timer min (us)"); 4899 4900 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 4901 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 4902 sysctl_tp_timer, "LU", "Persist timer max (us)"); 4903 4904 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 4905 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 4906 sysctl_tp_timer, "LU", "Keepidle idle timer (us)"); 4907 4908 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl", 4909 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 4910 sysctl_tp_timer, "LU", "Keepidle interval (us)"); 4911 4912 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 4913 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 4914 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 4915 4916 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 4917 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 4918 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 4919 } 4920#endif 4921} 4922 4923void 4924vi_sysctls(struct vi_info *vi) 4925{ 4926 struct sysctl_ctx_list *ctx; 4927 struct sysctl_oid *oid; 4928 struct sysctl_oid_list *children; 4929 4930 ctx = device_get_sysctl_ctx(vi->dev); 4931 4932 /* 4933 * dev.v?(cxgbe|cxl).X. 4934 */ 4935 oid = device_get_sysctl_tree(vi->dev); 4936 children = SYSCTL_CHILDREN(oid); 4937 4938 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 4939 vi->viid, "VI identifer"); 4940 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4941 &vi->nrxq, 0, "# of rx queues"); 4942 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4943 &vi->ntxq, 0, "# of tx queues"); 4944 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4945 &vi->first_rxq, 0, "index of first rx queue"); 4946 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4947 &vi->first_txq, 0, "index of first tx queue"); 4948 4949 if (IS_MAIN_VI(vi)) { 4950 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 4951 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 4952 "Reserve queue 0 for non-flowid packets"); 4953 } 4954 4955#ifdef TCP_OFFLOAD 4956 if (vi->nofldrxq != 0) { 4957 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 4958 &vi->nofldrxq, 0, 4959 "# of rx queues for offloaded TCP connections"); 4960 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 4961 &vi->nofldtxq, 0, 4962 "# of tx queues for offloaded TCP connections"); 4963 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 4964 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 4965 "index of first TOE rx queue"); 4966 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 4967 CTLFLAG_RD, &vi->first_ofld_txq, 0, 4968 "index of first TOE tx queue"); 4969 } 4970#endif 4971#ifdef DEV_NETMAP 4972 if (vi->nnmrxq != 0) { 4973 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 4974 &vi->nnmrxq, 0, "# of netmap rx queues"); 4975 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 4976 &vi->nnmtxq, 0, "# of netmap tx queues"); 4977 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 4978 CTLFLAG_RD, &vi->first_nm_rxq, 0, 4979 "index of first netmap rx queue"); 4980 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 4981 CTLFLAG_RD, &vi->first_nm_txq, 0, 4982 "index of first netmap tx queue"); 4983 } 4984#endif 4985 4986 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 4987 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 4988 "holdoff timer index"); 4989 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 4990 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 4991 "holdoff packet counter index"); 4992 4993 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 4994 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 4995 "rx queue size"); 4996 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 4997 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 4998 "tx queue size"); 4999} 5000 5001static void 5002cxgbe_sysctls(struct port_info *pi) 5003{ 5004 struct sysctl_ctx_list *ctx; 5005 struct sysctl_oid *oid; 5006 struct sysctl_oid_list *children, *children2; 5007 struct adapter *sc = pi->adapter; 5008 int i; 5009 char name[16]; 5010 5011 ctx = device_get_sysctl_ctx(pi->dev); 5012 5013 /* 5014 * dev.cxgbe.X. 5015 */ 5016 oid = device_get_sysctl_tree(pi->dev); 5017 children = SYSCTL_CHILDREN(oid); 5018 5019 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5020 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5021 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5022 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5023 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5024 "PHY temperature (in Celsius)"); 5025 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5026 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5027 "PHY firmware version"); 5028 } 5029 5030 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5031 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 5032 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5033 5034 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5035 port_top_speed(pi), "max speed (in Gbps)"); 5036 5037 /* 5038 * dev.(cxgbe|cxl).X.tc. 5039 */ 5040 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5041 "Tx scheduler traffic classes"); 5042 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5043 struct tx_sched_class *tc = &pi->tc[i]; 5044 5045 snprintf(name, sizeof(name), "%d", i); 5046 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5047 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5048 "traffic class")); 5049 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5050 &tc->flags, 0, "flags"); 5051 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5052 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5053#ifdef SBUF_DRAIN 5054 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5055 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5056 sysctl_tc_params, "A", "traffic class parameters"); 5057#endif 5058 } 5059 5060 /* 5061 * dev.cxgbe.X.stats. 5062 */ 5063 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5064 NULL, "port statistics"); 5065 children = SYSCTL_CHILDREN(oid); 5066 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5067 &pi->tx_parse_error, 0, 5068 "# of tx packets with invalid length or # of segments"); 5069 5070#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5071 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5072 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5073 sysctl_handle_t4_reg64, "QU", desc) 5074 5075 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5076 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5077 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5078 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5079 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5080 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5081 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5082 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5083 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5084 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5085 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5086 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5087 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5088 "# of tx frames in this range", 5089 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5090 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5091 "# of tx frames in this range", 5092 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5093 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5094 "# of tx frames in this range", 5095 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5096 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5097 "# of tx frames in this range", 5098 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5099 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5100 "# of tx frames in this range", 5101 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5102 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5103 "# of tx frames in this range", 5104 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5105 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5106 "# of tx frames in this range", 5107 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5108 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5109 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5110 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5111 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5112 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5113 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5114 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5115 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5116 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5117 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5118 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5119 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5120 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5121 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5122 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5123 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5124 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5125 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5126 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5127 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5128 5129 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5130 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5131 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5132 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5133 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5134 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5135 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5136 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5137 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5138 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5139 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5140 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5141 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5142 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5143 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5144 "# of frames received with bad FCS", 5145 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5146 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5147 "# of frames received with length error", 5148 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5149 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5150 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5151 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5152 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5153 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5154 "# of rx frames in this range", 5155 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5156 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5157 "# of rx frames in this range", 5158 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5159 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5160 "# of rx frames in this range", 5161 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5162 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5163 "# of rx frames in this range", 5164 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5165 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5166 "# of rx frames in this range", 5167 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5168 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5169 "# of rx frames in this range", 5170 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5171 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5172 "# of rx frames in this range", 5173 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5174 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5175 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5176 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5177 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5178 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5179 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5180 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5181 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5182 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5183 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5184 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5185 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5186 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5187 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5188 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5189 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5190 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5191 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5192 5193#undef SYSCTL_ADD_T4_REG64 5194 5195#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5196 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5197 &pi->stats.name, desc) 5198 5199 /* We get these from port_stats and they may be stale by up to 1s */ 5200 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5201 "# drops due to buffer-group 0 overflows"); 5202 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5203 "# drops due to buffer-group 1 overflows"); 5204 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5205 "# drops due to buffer-group 2 overflows"); 5206 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5207 "# drops due to buffer-group 3 overflows"); 5208 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5209 "# of buffer-group 0 truncated packets"); 5210 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5211 "# of buffer-group 1 truncated packets"); 5212 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5213 "# of buffer-group 2 truncated packets"); 5214 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5215 "# of buffer-group 3 truncated packets"); 5216 5217#undef SYSCTL_ADD_T4_PORTSTAT 5218} 5219 5220static int 5221sysctl_int_array(SYSCTL_HANDLER_ARGS) 5222{ 5223 int rc, *i, space = 0; 5224 struct sbuf sb; 5225 5226 sbuf_new_for_sysctl(&sb, NULL, 64, req); 5227 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5228 if (space) 5229 sbuf_printf(&sb, " "); 5230 sbuf_printf(&sb, "%d", *i); 5231 space = 1; 5232 } 5233 rc = sbuf_finish(&sb); 5234 sbuf_delete(&sb); 5235 return (rc); 5236} 5237 5238static int 5239sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5240{ 5241 int rc; 5242 struct sbuf *sb; 5243 5244 rc = sysctl_wire_old_buffer(req, 0); 5245 if (rc != 0) 5246 return(rc); 5247 5248 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5249 if (sb == NULL) 5250 return (ENOMEM); 5251 5252 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5253 rc = sbuf_finish(sb); 5254 sbuf_delete(sb); 5255 5256 return (rc); 5257} 5258 5259static int 5260sysctl_btphy(SYSCTL_HANDLER_ARGS) 5261{ 5262 struct port_info *pi = arg1; 5263 int op = arg2; 5264 struct adapter *sc = pi->adapter; 5265 u_int v; 5266 int rc; 5267 5268 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5269 if (rc) 5270 return (rc); 5271 /* XXX: magic numbers */ 5272 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5273 &v); 5274 end_synchronized_op(sc, 0); 5275 if (rc) 5276 return (rc); 5277 if (op == 0) 5278 v /= 256; 5279 5280 rc = sysctl_handle_int(oidp, &v, 0, req); 5281 return (rc); 5282} 5283 5284static int 5285sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5286{ 5287 struct vi_info *vi = arg1; 5288 int rc, val; 5289 5290 val = vi->rsrv_noflowq; 5291 rc = sysctl_handle_int(oidp, &val, 0, req); 5292 if (rc != 0 || req->newptr == NULL) 5293 return (rc); 5294 5295 if ((val >= 1) && (vi->ntxq > 1)) 5296 vi->rsrv_noflowq = 1; 5297 else 5298 vi->rsrv_noflowq = 0; 5299 5300 return (rc); 5301} 5302 5303static int 5304sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5305{ 5306 struct vi_info *vi = arg1; 5307 struct adapter *sc = vi->pi->adapter; 5308 int idx, rc, i; 5309 struct sge_rxq *rxq; 5310#ifdef TCP_OFFLOAD 5311 struct sge_ofld_rxq *ofld_rxq; 5312#endif 5313 uint8_t v; 5314 5315 idx = vi->tmr_idx; 5316 5317 rc = sysctl_handle_int(oidp, &idx, 0, req); 5318 if (rc != 0 || req->newptr == NULL) 5319 return (rc); 5320 5321 if (idx < 0 || idx >= SGE_NTIMERS) 5322 return (EINVAL); 5323 5324 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5325 "t4tmr"); 5326 if (rc) 5327 return (rc); 5328 5329 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5330 for_each_rxq(vi, i, rxq) { 5331#ifdef atomic_store_rel_8 5332 atomic_store_rel_8(&rxq->iq.intr_params, v); 5333#else 5334 rxq->iq.intr_params = v; 5335#endif 5336 } 5337#ifdef TCP_OFFLOAD 5338 for_each_ofld_rxq(vi, i, ofld_rxq) { 5339#ifdef atomic_store_rel_8 5340 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5341#else 5342 ofld_rxq->iq.intr_params = v; 5343#endif 5344 } 5345#endif 5346 vi->tmr_idx = idx; 5347 5348 end_synchronized_op(sc, LOCK_HELD); 5349 return (0); 5350} 5351 5352static int 5353sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5354{ 5355 struct vi_info *vi = arg1; 5356 struct adapter *sc = vi->pi->adapter; 5357 int idx, rc; 5358 5359 idx = vi->pktc_idx; 5360 5361 rc = sysctl_handle_int(oidp, &idx, 0, req); 5362 if (rc != 0 || req->newptr == NULL) 5363 return (rc); 5364 5365 if (idx < -1 || idx >= SGE_NCOUNTERS) 5366 return (EINVAL); 5367 5368 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5369 "t4pktc"); 5370 if (rc) 5371 return (rc); 5372 5373 if (vi->flags & VI_INIT_DONE) 5374 rc = EBUSY; /* cannot be changed once the queues are created */ 5375 else 5376 vi->pktc_idx = idx; 5377 5378 end_synchronized_op(sc, LOCK_HELD); 5379 return (rc); 5380} 5381 5382static int 5383sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5384{ 5385 struct vi_info *vi = arg1; 5386 struct adapter *sc = vi->pi->adapter; 5387 int qsize, rc; 5388 5389 qsize = vi->qsize_rxq; 5390 5391 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5392 if (rc != 0 || req->newptr == NULL) 5393 return (rc); 5394 5395 if (qsize < 128 || (qsize & 7)) 5396 return (EINVAL); 5397 5398 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5399 "t4rxqs"); 5400 if (rc) 5401 return (rc); 5402 5403 if (vi->flags & VI_INIT_DONE) 5404 rc = EBUSY; /* cannot be changed once the queues are created */ 5405 else 5406 vi->qsize_rxq = qsize; 5407 5408 end_synchronized_op(sc, LOCK_HELD); 5409 return (rc); 5410} 5411 5412static int 5413sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5414{ 5415 struct vi_info *vi = arg1; 5416 struct adapter *sc = vi->pi->adapter; 5417 int qsize, rc; 5418 5419 qsize = vi->qsize_txq; 5420 5421 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5422 if (rc != 0 || req->newptr == NULL) 5423 return (rc); 5424 5425 if (qsize < 128 || qsize > 65536) 5426 return (EINVAL); 5427 5428 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5429 "t4txqs"); 5430 if (rc) 5431 return (rc); 5432 5433 if (vi->flags & VI_INIT_DONE) 5434 rc = EBUSY; /* cannot be changed once the queues are created */ 5435 else 5436 vi->qsize_txq = qsize; 5437 5438 end_synchronized_op(sc, LOCK_HELD); 5439 return (rc); 5440} 5441 5442static int 5443sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5444{ 5445 struct port_info *pi = arg1; 5446 struct adapter *sc = pi->adapter; 5447 struct link_config *lc = &pi->link_cfg; 5448 int rc; 5449 5450 if (req->newptr == NULL) { 5451 struct sbuf *sb; 5452 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5453 5454 rc = sysctl_wire_old_buffer(req, 0); 5455 if (rc != 0) 5456 return(rc); 5457 5458 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5459 if (sb == NULL) 5460 return (ENOMEM); 5461 5462 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5463 rc = sbuf_finish(sb); 5464 sbuf_delete(sb); 5465 } else { 5466 char s[2]; 5467 int n; 5468 5469 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5470 s[1] = 0; 5471 5472 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5473 if (rc != 0) 5474 return(rc); 5475 5476 if (s[1] != 0) 5477 return (EINVAL); 5478 if (s[0] < '0' || s[0] > '9') 5479 return (EINVAL); /* not a number */ 5480 n = s[0] - '0'; 5481 if (n & ~(PAUSE_TX | PAUSE_RX)) 5482 return (EINVAL); /* some other bit is set too */ 5483 5484 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5485 "t4PAUSE"); 5486 if (rc) 5487 return (rc); 5488 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5489 int link_ok = lc->link_ok; 5490 5491 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5492 lc->requested_fc |= n; 5493 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5494 lc->link_ok = link_ok; /* restore */ 5495 } 5496 end_synchronized_op(sc, 0); 5497 } 5498 5499 return (rc); 5500} 5501 5502static int 5503sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5504{ 5505 struct adapter *sc = arg1; 5506 int reg = arg2; 5507 uint64_t val; 5508 5509 val = t4_read_reg64(sc, reg); 5510 5511 return (sysctl_handle_64(oidp, &val, 0, req)); 5512} 5513 5514static int 5515sysctl_temperature(SYSCTL_HANDLER_ARGS) 5516{ 5517 struct adapter *sc = arg1; 5518 int rc, t; 5519 uint32_t param, val; 5520 5521 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5522 if (rc) 5523 return (rc); 5524 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5525 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5526 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5527 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5528 end_synchronized_op(sc, 0); 5529 if (rc) 5530 return (rc); 5531 5532 /* unknown is returned as 0 but we display -1 in that case */ 5533 t = val == 0 ? -1 : val; 5534 5535 rc = sysctl_handle_int(oidp, &t, 0, req); 5536 return (rc); 5537} 5538 5539#ifdef SBUF_DRAIN 5540static int 5541sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5542{ 5543 struct adapter *sc = arg1; 5544 struct sbuf *sb; 5545 int rc, i; 5546 uint16_t incr[NMTUS][NCCTRL_WIN]; 5547 static const char *dec_fac[] = { 5548 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5549 "0.9375" 5550 }; 5551 5552 rc = sysctl_wire_old_buffer(req, 0); 5553 if (rc != 0) 5554 return (rc); 5555 5556 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5557 if (sb == NULL) 5558 return (ENOMEM); 5559 5560 t4_read_cong_tbl(sc, incr); 5561 5562 for (i = 0; i < NCCTRL_WIN; ++i) { 5563 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5564 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5565 incr[5][i], incr[6][i], incr[7][i]); 5566 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5567 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5568 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5569 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5570 } 5571 5572 rc = sbuf_finish(sb); 5573 sbuf_delete(sb); 5574 5575 return (rc); 5576} 5577 5578static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5579 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5580 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5581 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5582}; 5583 5584static int 5585sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5586{ 5587 struct adapter *sc = arg1; 5588 struct sbuf *sb; 5589 int rc, i, n, qid = arg2; 5590 uint32_t *buf, *p; 5591 char *qtype; 5592 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5593 5594 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5595 ("%s: bad qid %d\n", __func__, qid)); 5596 5597 if (qid < CIM_NUM_IBQ) { 5598 /* inbound queue */ 5599 qtype = "IBQ"; 5600 n = 4 * CIM_IBQ_SIZE; 5601 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5602 rc = t4_read_cim_ibq(sc, qid, buf, n); 5603 } else { 5604 /* outbound queue */ 5605 qtype = "OBQ"; 5606 qid -= CIM_NUM_IBQ; 5607 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5608 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5609 rc = t4_read_cim_obq(sc, qid, buf, n); 5610 } 5611 5612 if (rc < 0) { 5613 rc = -rc; 5614 goto done; 5615 } 5616 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5617 5618 rc = sysctl_wire_old_buffer(req, 0); 5619 if (rc != 0) 5620 goto done; 5621 5622 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5623 if (sb == NULL) { 5624 rc = ENOMEM; 5625 goto done; 5626 } 5627 5628 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5629 for (i = 0, p = buf; i < n; i += 16, p += 4) 5630 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5631 p[2], p[3]); 5632 5633 rc = sbuf_finish(sb); 5634 sbuf_delete(sb); 5635done: 5636 free(buf, M_CXGBE); 5637 return (rc); 5638} 5639 5640static int 5641sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5642{ 5643 struct adapter *sc = arg1; 5644 u_int cfg; 5645 struct sbuf *sb; 5646 uint32_t *buf, *p; 5647 int rc; 5648 5649 MPASS(chip_id(sc) <= CHELSIO_T5); 5650 5651 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5652 if (rc != 0) 5653 return (rc); 5654 5655 rc = sysctl_wire_old_buffer(req, 0); 5656 if (rc != 0) 5657 return (rc); 5658 5659 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5660 if (sb == NULL) 5661 return (ENOMEM); 5662 5663 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5664 M_ZERO | M_WAITOK); 5665 5666 rc = -t4_cim_read_la(sc, buf, NULL); 5667 if (rc != 0) 5668 goto done; 5669 5670 sbuf_printf(sb, "Status Data PC%s", 5671 cfg & F_UPDBGLACAPTPCONLY ? "" : 5672 " LS0Stat LS0Addr LS0Data"); 5673 5674 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5675 if (cfg & F_UPDBGLACAPTPCONLY) { 5676 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5677 p[6], p[7]); 5678 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5679 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5680 p[4] & 0xff, p[5] >> 8); 5681 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5682 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5683 p[1] & 0xf, p[2] >> 4); 5684 } else { 5685 sbuf_printf(sb, 5686 "\n %02x %x%07x %x%07x %08x %08x " 5687 "%08x%08x%08x%08x", 5688 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5689 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5690 p[6], p[7]); 5691 } 5692 } 5693 5694 rc = sbuf_finish(sb); 5695 sbuf_delete(sb); 5696done: 5697 free(buf, M_CXGBE); 5698 return (rc); 5699} 5700 5701static int 5702sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5703{ 5704 struct adapter *sc = arg1; 5705 u_int cfg; 5706 struct sbuf *sb; 5707 uint32_t *buf, *p; 5708 int rc; 5709 5710 MPASS(chip_id(sc) > CHELSIO_T5); 5711 5712 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5713 if (rc != 0) 5714 return (rc); 5715 5716 rc = sysctl_wire_old_buffer(req, 0); 5717 if (rc != 0) 5718 return (rc); 5719 5720 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5721 if (sb == NULL) 5722 return (ENOMEM); 5723 5724 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5725 M_ZERO | M_WAITOK); 5726 5727 rc = -t4_cim_read_la(sc, buf, NULL); 5728 if (rc != 0) 5729 goto done; 5730 5731 sbuf_printf(sb, "Status Inst Data PC%s", 5732 cfg & F_UPDBGLACAPTPCONLY ? "" : 5733 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5734 5735 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5736 if (cfg & F_UPDBGLACAPTPCONLY) { 5737 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5738 p[3] & 0xff, p[2], p[1], p[0]); 5739 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5740 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5741 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5742 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5743 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5744 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5745 p[6] >> 16); 5746 } else { 5747 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5748 "%08x %08x %08x %08x %08x %08x", 5749 (p[9] >> 16) & 0xff, 5750 p[9] & 0xffff, p[8] >> 16, 5751 p[8] & 0xffff, p[7] >> 16, 5752 p[7] & 0xffff, p[6] >> 16, 5753 p[2], p[1], p[0], p[5], p[4], p[3]); 5754 } 5755 } 5756 5757 rc = sbuf_finish(sb); 5758 sbuf_delete(sb); 5759done: 5760 free(buf, M_CXGBE); 5761 return (rc); 5762} 5763 5764static int 5765sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5766{ 5767 struct adapter *sc = arg1; 5768 u_int i; 5769 struct sbuf *sb; 5770 uint32_t *buf, *p; 5771 int rc; 5772 5773 rc = sysctl_wire_old_buffer(req, 0); 5774 if (rc != 0) 5775 return (rc); 5776 5777 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5778 if (sb == NULL) 5779 return (ENOMEM); 5780 5781 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5782 M_ZERO | M_WAITOK); 5783 5784 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5785 p = buf; 5786 5787 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5788 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5789 p[1], p[0]); 5790 } 5791 5792 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5793 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5794 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5795 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5796 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5797 (p[1] >> 2) | ((p[2] & 3) << 30), 5798 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5799 p[0] & 1); 5800 } 5801 5802 rc = sbuf_finish(sb); 5803 sbuf_delete(sb); 5804 free(buf, M_CXGBE); 5805 return (rc); 5806} 5807 5808static int 5809sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5810{ 5811 struct adapter *sc = arg1; 5812 u_int i; 5813 struct sbuf *sb; 5814 uint32_t *buf, *p; 5815 int rc; 5816 5817 rc = sysctl_wire_old_buffer(req, 0); 5818 if (rc != 0) 5819 return (rc); 5820 5821 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5822 if (sb == NULL) 5823 return (ENOMEM); 5824 5825 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5826 M_ZERO | M_WAITOK); 5827 5828 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5829 p = buf; 5830 5831 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5832 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5833 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5834 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5835 p[4], p[3], p[2], p[1], p[0]); 5836 } 5837 5838 sbuf_printf(sb, "\n\nCntl ID Data"); 5839 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5840 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5841 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5842 } 5843 5844 rc = sbuf_finish(sb); 5845 sbuf_delete(sb); 5846 free(buf, M_CXGBE); 5847 return (rc); 5848} 5849 5850static int 5851sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5852{ 5853 struct adapter *sc = arg1; 5854 struct sbuf *sb; 5855 int rc, i; 5856 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5857 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5858 uint16_t thres[CIM_NUM_IBQ]; 5859 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5860 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5861 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5862 5863 cim_num_obq = sc->chip_params->cim_num_obq; 5864 if (is_t4(sc)) { 5865 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5866 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5867 } else { 5868 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5869 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5870 } 5871 nq = CIM_NUM_IBQ + cim_num_obq; 5872 5873 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5874 if (rc == 0) 5875 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5876 if (rc != 0) 5877 return (rc); 5878 5879 t4_read_cimq_cfg(sc, base, size, thres); 5880 5881 rc = sysctl_wire_old_buffer(req, 0); 5882 if (rc != 0) 5883 return (rc); 5884 5885 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5886 if (sb == NULL) 5887 return (ENOMEM); 5888 5889 sbuf_printf(sb, "Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5890 5891 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5892 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5893 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5894 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5895 G_QUEREMFLITS(p[2]) * 16); 5896 for ( ; i < nq; i++, p += 4, wr += 2) 5897 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5898 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5899 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5900 G_QUEREMFLITS(p[2]) * 16); 5901 5902 rc = sbuf_finish(sb); 5903 sbuf_delete(sb); 5904 5905 return (rc); 5906} 5907 5908static int 5909sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5910{ 5911 struct adapter *sc = arg1; 5912 struct sbuf *sb; 5913 int rc; 5914 struct tp_cpl_stats stats; 5915 5916 rc = sysctl_wire_old_buffer(req, 0); 5917 if (rc != 0) 5918 return (rc); 5919 5920 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5921 if (sb == NULL) 5922 return (ENOMEM); 5923 5924 mtx_lock(&sc->reg_lock); 5925 t4_tp_get_cpl_stats(sc, &stats); 5926 mtx_unlock(&sc->reg_lock); 5927 5928 if (sc->chip_params->nchan > 2) { 5929 sbuf_printf(sb, " channel 0 channel 1" 5930 " channel 2 channel 3"); 5931 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 5932 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5933 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 5934 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5935 } else { 5936 sbuf_printf(sb, " channel 0 channel 1"); 5937 sbuf_printf(sb, "\nCPL requests: %10u %10u", 5938 stats.req[0], stats.req[1]); 5939 sbuf_printf(sb, "\nCPL responses: %10u %10u", 5940 stats.rsp[0], stats.rsp[1]); 5941 } 5942 5943 rc = sbuf_finish(sb); 5944 sbuf_delete(sb); 5945 5946 return (rc); 5947} 5948 5949static int 5950sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5951{ 5952 struct adapter *sc = arg1; 5953 struct sbuf *sb; 5954 int rc; 5955 struct tp_usm_stats stats; 5956 5957 rc = sysctl_wire_old_buffer(req, 0); 5958 if (rc != 0) 5959 return(rc); 5960 5961 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5962 if (sb == NULL) 5963 return (ENOMEM); 5964 5965 t4_get_usm_stats(sc, &stats); 5966 5967 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5968 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5969 sbuf_printf(sb, "Drops: %u", stats.drops); 5970 5971 rc = sbuf_finish(sb); 5972 sbuf_delete(sb); 5973 5974 return (rc); 5975} 5976 5977static const char * const devlog_level_strings[] = { 5978 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5979 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5980 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5981 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5982 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5983 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5984}; 5985 5986static const char * const devlog_facility_strings[] = { 5987 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5988 [FW_DEVLOG_FACILITY_CF] = "CF", 5989 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5990 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5991 [FW_DEVLOG_FACILITY_RES] = "RES", 5992 [FW_DEVLOG_FACILITY_HW] = "HW", 5993 [FW_DEVLOG_FACILITY_FLR] = "FLR", 5994 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 5995 [FW_DEVLOG_FACILITY_PHY] = "PHY", 5996 [FW_DEVLOG_FACILITY_MAC] = "MAC", 5997 [FW_DEVLOG_FACILITY_PORT] = "PORT", 5998 [FW_DEVLOG_FACILITY_VI] = "VI", 5999 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6000 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6001 [FW_DEVLOG_FACILITY_TM] = "TM", 6002 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6003 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6004 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6005 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6006 [FW_DEVLOG_FACILITY_RI] = "RI", 6007 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6008 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6009 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6010 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6011 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6012}; 6013 6014static int 6015sysctl_devlog(SYSCTL_HANDLER_ARGS) 6016{ 6017 struct adapter *sc = arg1; 6018 struct devlog_params *dparams = &sc->params.devlog; 6019 struct fw_devlog_e *buf, *e; 6020 int i, j, rc, nentries, first = 0; 6021 struct sbuf *sb; 6022 uint64_t ftstamp = UINT64_MAX; 6023 6024 if (dparams->addr == 0) 6025 return (ENXIO); 6026 6027 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6028 if (buf == NULL) 6029 return (ENOMEM); 6030 6031 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6032 if (rc != 0) 6033 goto done; 6034 6035 nentries = dparams->size / sizeof(struct fw_devlog_e); 6036 for (i = 0; i < nentries; i++) { 6037 e = &buf[i]; 6038 6039 if (e->timestamp == 0) 6040 break; /* end */ 6041 6042 e->timestamp = be64toh(e->timestamp); 6043 e->seqno = be32toh(e->seqno); 6044 for (j = 0; j < 8; j++) 6045 e->params[j] = be32toh(e->params[j]); 6046 6047 if (e->timestamp < ftstamp) { 6048 ftstamp = e->timestamp; 6049 first = i; 6050 } 6051 } 6052 6053 if (buf[first].timestamp == 0) 6054 goto done; /* nothing in the log */ 6055 6056 rc = sysctl_wire_old_buffer(req, 0); 6057 if (rc != 0) 6058 goto done; 6059 6060 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6061 if (sb == NULL) { 6062 rc = ENOMEM; 6063 goto done; 6064 } 6065 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6066 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6067 6068 i = first; 6069 do { 6070 e = &buf[i]; 6071 if (e->timestamp == 0) 6072 break; /* end */ 6073 6074 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6075 e->seqno, e->timestamp, 6076 (e->level < nitems(devlog_level_strings) ? 6077 devlog_level_strings[e->level] : "UNKNOWN"), 6078 (e->facility < nitems(devlog_facility_strings) ? 6079 devlog_facility_strings[e->facility] : "UNKNOWN")); 6080 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6081 e->params[2], e->params[3], e->params[4], 6082 e->params[5], e->params[6], e->params[7]); 6083 6084 if (++i == nentries) 6085 i = 0; 6086 } while (i != first); 6087 6088 rc = sbuf_finish(sb); 6089 sbuf_delete(sb); 6090done: 6091 free(buf, M_CXGBE); 6092 return (rc); 6093} 6094 6095static int 6096sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6097{ 6098 struct adapter *sc = arg1; 6099 struct sbuf *sb; 6100 int rc; 6101 struct tp_fcoe_stats stats[MAX_NCHAN]; 6102 int i, nchan = sc->chip_params->nchan; 6103 6104 rc = sysctl_wire_old_buffer(req, 0); 6105 if (rc != 0) 6106 return (rc); 6107 6108 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6109 if (sb == NULL) 6110 return (ENOMEM); 6111 6112 for (i = 0; i < nchan; i++) 6113 t4_get_fcoe_stats(sc, i, &stats[i]); 6114 6115 if (nchan > 2) { 6116 sbuf_printf(sb, " channel 0 channel 1" 6117 " channel 2 channel 3"); 6118 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6119 stats[0].octets_ddp, stats[1].octets_ddp, 6120 stats[2].octets_ddp, stats[3].octets_ddp); 6121 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6122 stats[0].frames_ddp, stats[1].frames_ddp, 6123 stats[2].frames_ddp, stats[3].frames_ddp); 6124 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6125 stats[0].frames_drop, stats[1].frames_drop, 6126 stats[2].frames_drop, stats[3].frames_drop); 6127 } else { 6128 sbuf_printf(sb, " channel 0 channel 1"); 6129 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6130 stats[0].octets_ddp, stats[1].octets_ddp); 6131 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6132 stats[0].frames_ddp, stats[1].frames_ddp); 6133 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6134 stats[0].frames_drop, stats[1].frames_drop); 6135 } 6136 6137 rc = sbuf_finish(sb); 6138 sbuf_delete(sb); 6139 6140 return (rc); 6141} 6142 6143static int 6144sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6145{ 6146 struct adapter *sc = arg1; 6147 struct sbuf *sb; 6148 int rc, i; 6149 unsigned int map, kbps, ipg, mode; 6150 unsigned int pace_tab[NTX_SCHED]; 6151 6152 rc = sysctl_wire_old_buffer(req, 0); 6153 if (rc != 0) 6154 return (rc); 6155 6156 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6157 if (sb == NULL) 6158 return (ENOMEM); 6159 6160 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6161 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6162 t4_read_pace_tbl(sc, pace_tab); 6163 6164 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6165 "Class IPG (0.1 ns) Flow IPG (us)"); 6166 6167 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6168 t4_get_tx_sched(sc, i, &kbps, &ipg); 6169 sbuf_printf(sb, "\n %u %-5s %u ", i, 6170 (mode & (1 << i)) ? "flow" : "class", map & 3); 6171 if (kbps) 6172 sbuf_printf(sb, "%9u ", kbps); 6173 else 6174 sbuf_printf(sb, " disabled "); 6175 6176 if (ipg) 6177 sbuf_printf(sb, "%13u ", ipg); 6178 else 6179 sbuf_printf(sb, " disabled "); 6180 6181 if (pace_tab[i]) 6182 sbuf_printf(sb, "%10u", pace_tab[i]); 6183 else 6184 sbuf_printf(sb, " disabled"); 6185 } 6186 6187 rc = sbuf_finish(sb); 6188 sbuf_delete(sb); 6189 6190 return (rc); 6191} 6192 6193static int 6194sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6195{ 6196 struct adapter *sc = arg1; 6197 struct sbuf *sb; 6198 int rc, i, j; 6199 uint64_t *p0, *p1; 6200 struct lb_port_stats s[2]; 6201 static const char *stat_name[] = { 6202 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6203 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6204 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6205 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6206 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6207 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6208 "BG2FramesTrunc:", "BG3FramesTrunc:" 6209 }; 6210 6211 rc = sysctl_wire_old_buffer(req, 0); 6212 if (rc != 0) 6213 return (rc); 6214 6215 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6216 if (sb == NULL) 6217 return (ENOMEM); 6218 6219 memset(s, 0, sizeof(s)); 6220 6221 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6222 t4_get_lb_stats(sc, i, &s[0]); 6223 t4_get_lb_stats(sc, i + 1, &s[1]); 6224 6225 p0 = &s[0].octets; 6226 p1 = &s[1].octets; 6227 sbuf_printf(sb, "%s Loopback %u" 6228 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6229 6230 for (j = 0; j < nitems(stat_name); j++) 6231 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6232 *p0++, *p1++); 6233 } 6234 6235 rc = sbuf_finish(sb); 6236 sbuf_delete(sb); 6237 6238 return (rc); 6239} 6240 6241static int 6242sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6243{ 6244 int rc = 0; 6245 struct port_info *pi = arg1; 6246 struct sbuf *sb; 6247 6248 rc = sysctl_wire_old_buffer(req, 0); 6249 if (rc != 0) 6250 return(rc); 6251 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6252 if (sb == NULL) 6253 return (ENOMEM); 6254 6255 if (pi->linkdnrc < 0) 6256 sbuf_printf(sb, "n/a"); 6257 else 6258 sbuf_printf(sb, "%s", t4_link_down_rc_str(pi->linkdnrc)); 6259 6260 rc = sbuf_finish(sb); 6261 sbuf_delete(sb); 6262 6263 return (rc); 6264} 6265 6266struct mem_desc { 6267 unsigned int base; 6268 unsigned int limit; 6269 unsigned int idx; 6270}; 6271 6272static int 6273mem_desc_cmp(const void *a, const void *b) 6274{ 6275 return ((const struct mem_desc *)a)->base - 6276 ((const struct mem_desc *)b)->base; 6277} 6278 6279static void 6280mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6281 unsigned int to) 6282{ 6283 unsigned int size; 6284 6285 if (from == to) 6286 return; 6287 6288 size = to - from + 1; 6289 if (size == 0) 6290 return; 6291 6292 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6293 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6294} 6295 6296static int 6297sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6298{ 6299 struct adapter *sc = arg1; 6300 struct sbuf *sb; 6301 int rc, i, n; 6302 uint32_t lo, hi, used, alloc; 6303 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6304 static const char *region[] = { 6305 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6306 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6307 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6308 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6309 "RQUDP region:", "PBL region:", "TXPBL region:", 6310 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6311 "On-chip queues:" 6312 }; 6313 struct mem_desc avail[4]; 6314 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6315 struct mem_desc *md = mem; 6316 6317 rc = sysctl_wire_old_buffer(req, 0); 6318 if (rc != 0) 6319 return (rc); 6320 6321 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6322 if (sb == NULL) 6323 return (ENOMEM); 6324 6325 for (i = 0; i < nitems(mem); i++) { 6326 mem[i].limit = 0; 6327 mem[i].idx = i; 6328 } 6329 6330 /* Find and sort the populated memory ranges */ 6331 i = 0; 6332 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6333 if (lo & F_EDRAM0_ENABLE) { 6334 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6335 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6336 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6337 avail[i].idx = 0; 6338 i++; 6339 } 6340 if (lo & F_EDRAM1_ENABLE) { 6341 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6342 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6343 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6344 avail[i].idx = 1; 6345 i++; 6346 } 6347 if (lo & F_EXT_MEM_ENABLE) { 6348 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6349 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6350 avail[i].limit = avail[i].base + 6351 (G_EXT_MEM_SIZE(hi) << 20); 6352 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6353 i++; 6354 } 6355 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6356 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6357 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6358 avail[i].limit = avail[i].base + 6359 (G_EXT_MEM1_SIZE(hi) << 20); 6360 avail[i].idx = 4; 6361 i++; 6362 } 6363 if (!i) /* no memory available */ 6364 return 0; 6365 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6366 6367 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6368 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6369 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6370 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6371 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6372 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6373 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6374 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6375 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6376 6377 /* the next few have explicit upper bounds */ 6378 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6379 md->limit = md->base - 1 + 6380 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6381 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6382 md++; 6383 6384 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6385 md->limit = md->base - 1 + 6386 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6387 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6388 md++; 6389 6390 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6391 if (chip_id(sc) <= CHELSIO_T5) 6392 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6393 else 6394 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6395 md->limit = 0; 6396 } else { 6397 md->base = 0; 6398 md->idx = nitems(region); /* hide it */ 6399 } 6400 md++; 6401 6402#define ulp_region(reg) \ 6403 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6404 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6405 6406 ulp_region(RX_ISCSI); 6407 ulp_region(RX_TDDP); 6408 ulp_region(TX_TPT); 6409 ulp_region(RX_STAG); 6410 ulp_region(RX_RQ); 6411 ulp_region(RX_RQUDP); 6412 ulp_region(RX_PBL); 6413 ulp_region(TX_PBL); 6414#undef ulp_region 6415 6416 md->base = 0; 6417 md->idx = nitems(region); 6418 if (!is_t4(sc)) { 6419 uint32_t size = 0; 6420 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6421 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6422 6423 if (is_t5(sc)) { 6424 if (sge_ctrl & F_VFIFO_ENABLE) 6425 size = G_DBVFIFO_SIZE(fifo_size); 6426 } else 6427 size = G_T6_DBVFIFO_SIZE(fifo_size); 6428 6429 if (size) { 6430 md->base = G_BASEADDR(t4_read_reg(sc, 6431 A_SGE_DBVFIFO_BADDR)); 6432 md->limit = md->base + (size << 2) - 1; 6433 } 6434 } 6435 md++; 6436 6437 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6438 md->limit = 0; 6439 md++; 6440 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6441 md->limit = 0; 6442 md++; 6443 6444 md->base = sc->vres.ocq.start; 6445 if (sc->vres.ocq.size) 6446 md->limit = md->base + sc->vres.ocq.size - 1; 6447 else 6448 md->idx = nitems(region); /* hide it */ 6449 md++; 6450 6451 /* add any address-space holes, there can be up to 3 */ 6452 for (n = 0; n < i - 1; n++) 6453 if (avail[n].limit < avail[n + 1].base) 6454 (md++)->base = avail[n].limit; 6455 if (avail[n].limit) 6456 (md++)->base = avail[n].limit; 6457 6458 n = md - mem; 6459 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6460 6461 for (lo = 0; lo < i; lo++) 6462 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6463 avail[lo].limit - 1); 6464 6465 sbuf_printf(sb, "\n"); 6466 for (i = 0; i < n; i++) { 6467 if (mem[i].idx >= nitems(region)) 6468 continue; /* skip holes */ 6469 if (!mem[i].limit) 6470 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6471 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6472 mem[i].limit); 6473 } 6474 6475 sbuf_printf(sb, "\n"); 6476 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6477 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6478 mem_region_show(sb, "uP RAM:", lo, hi); 6479 6480 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6481 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6482 mem_region_show(sb, "uP Extmem2:", lo, hi); 6483 6484 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6485 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6486 G_PMRXMAXPAGE(lo), 6487 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6488 (lo & F_PMRXNUMCHN) ? 2 : 1); 6489 6490 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6491 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6492 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6493 G_PMTXMAXPAGE(lo), 6494 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6495 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6496 sbuf_printf(sb, "%u p-structs\n", 6497 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6498 6499 for (i = 0; i < 4; i++) { 6500 if (chip_id(sc) > CHELSIO_T5) 6501 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6502 else 6503 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6504 if (is_t5(sc)) { 6505 used = G_T5_USED(lo); 6506 alloc = G_T5_ALLOC(lo); 6507 } else { 6508 used = G_USED(lo); 6509 alloc = G_ALLOC(lo); 6510 } 6511 /* For T6 these are MAC buffer groups */ 6512 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6513 i, used, alloc); 6514 } 6515 for (i = 0; i < sc->chip_params->nchan; i++) { 6516 if (chip_id(sc) > CHELSIO_T5) 6517 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6518 else 6519 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6520 if (is_t5(sc)) { 6521 used = G_T5_USED(lo); 6522 alloc = G_T5_ALLOC(lo); 6523 } else { 6524 used = G_USED(lo); 6525 alloc = G_ALLOC(lo); 6526 } 6527 /* For T6 these are MAC buffer groups */ 6528 sbuf_printf(sb, 6529 "\nLoopback %d using %u pages out of %u allocated", 6530 i, used, alloc); 6531 } 6532 6533 rc = sbuf_finish(sb); 6534 sbuf_delete(sb); 6535 6536 return (rc); 6537} 6538 6539static inline void 6540tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6541{ 6542 *mask = x | y; 6543 y = htobe64(y); 6544 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6545} 6546 6547static int 6548sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6549{ 6550 struct adapter *sc = arg1; 6551 struct sbuf *sb; 6552 int rc, i; 6553 6554 MPASS(chip_id(sc) <= CHELSIO_T5); 6555 6556 rc = sysctl_wire_old_buffer(req, 0); 6557 if (rc != 0) 6558 return (rc); 6559 6560 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6561 if (sb == NULL) 6562 return (ENOMEM); 6563 6564 sbuf_printf(sb, 6565 "Idx Ethernet address Mask Vld Ports PF" 6566 " VF Replication P0 P1 P2 P3 ML"); 6567 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6568 uint64_t tcamx, tcamy, mask; 6569 uint32_t cls_lo, cls_hi; 6570 uint8_t addr[ETHER_ADDR_LEN]; 6571 6572 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6573 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6574 if (tcamx & tcamy) 6575 continue; 6576 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6577 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6578 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6579 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6580 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6581 addr[3], addr[4], addr[5], (uintmax_t)mask, 6582 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6583 G_PORTMAP(cls_hi), G_PF(cls_lo), 6584 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6585 6586 if (cls_lo & F_REPLICATE) { 6587 struct fw_ldst_cmd ldst_cmd; 6588 6589 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6590 ldst_cmd.op_to_addrspace = 6591 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6592 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6593 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6594 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6595 ldst_cmd.u.mps.rplc.fid_idx = 6596 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6597 V_FW_LDST_CMD_IDX(i)); 6598 6599 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6600 "t4mps"); 6601 if (rc) 6602 break; 6603 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6604 sizeof(ldst_cmd), &ldst_cmd); 6605 end_synchronized_op(sc, 0); 6606 6607 if (rc != 0) { 6608 sbuf_printf(sb, "%36d", rc); 6609 rc = 0; 6610 } else { 6611 sbuf_printf(sb, " %08x %08x %08x %08x", 6612 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6613 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6614 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6615 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6616 } 6617 } else 6618 sbuf_printf(sb, "%36s", ""); 6619 6620 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6621 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6622 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6623 } 6624 6625 if (rc) 6626 (void) sbuf_finish(sb); 6627 else 6628 rc = sbuf_finish(sb); 6629 sbuf_delete(sb); 6630 6631 return (rc); 6632} 6633 6634static int 6635sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6636{ 6637 struct adapter *sc = arg1; 6638 struct sbuf *sb; 6639 int rc, i; 6640 6641 MPASS(chip_id(sc) > CHELSIO_T5); 6642 6643 rc = sysctl_wire_old_buffer(req, 0); 6644 if (rc != 0) 6645 return (rc); 6646 6647 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6648 if (sb == NULL) 6649 return (ENOMEM); 6650 6651 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6652 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6653 " Replication" 6654 " P0 P1 P2 P3 ML\n"); 6655 6656 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6657 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6658 uint16_t ivlan; 6659 uint64_t tcamx, tcamy, val, mask; 6660 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6661 uint8_t addr[ETHER_ADDR_LEN]; 6662 6663 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6664 if (i < 256) 6665 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6666 else 6667 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6668 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6669 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6670 tcamy = G_DMACH(val) << 32; 6671 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6672 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6673 lookup_type = G_DATALKPTYPE(data2); 6674 port_num = G_DATAPORTNUM(data2); 6675 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6676 /* Inner header VNI */ 6677 vniy = ((data2 & F_DATAVIDH2) << 23) | 6678 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6679 dip_hit = data2 & F_DATADIPHIT; 6680 vlan_vld = 0; 6681 } else { 6682 vniy = 0; 6683 dip_hit = 0; 6684 vlan_vld = data2 & F_DATAVIDH2; 6685 ivlan = G_VIDL(val); 6686 } 6687 6688 ctl |= V_CTLXYBITSEL(1); 6689 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6690 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6691 tcamx = G_DMACH(val) << 32; 6692 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6693 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6694 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6695 /* Inner header VNI mask */ 6696 vnix = ((data2 & F_DATAVIDH2) << 23) | 6697 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6698 } else 6699 vnix = 0; 6700 6701 if (tcamx & tcamy) 6702 continue; 6703 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6704 6705 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6706 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6707 6708 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6709 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6710 "%012jx %06x %06x - - %3c" 6711 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6712 addr[1], addr[2], addr[3], addr[4], addr[5], 6713 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6714 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6715 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6716 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6717 } else { 6718 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6719 "%012jx - - ", i, addr[0], addr[1], 6720 addr[2], addr[3], addr[4], addr[5], 6721 (uintmax_t)mask); 6722 6723 if (vlan_vld) 6724 sbuf_printf(sb, "%4u Y ", ivlan); 6725 else 6726 sbuf_printf(sb, " - N "); 6727 6728 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6729 lookup_type ? 'I' : 'O', port_num, 6730 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6731 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6732 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6733 } 6734 6735 6736 if (cls_lo & F_T6_REPLICATE) { 6737 struct fw_ldst_cmd ldst_cmd; 6738 6739 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6740 ldst_cmd.op_to_addrspace = 6741 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6742 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6743 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6744 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6745 ldst_cmd.u.mps.rplc.fid_idx = 6746 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6747 V_FW_LDST_CMD_IDX(i)); 6748 6749 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6750 "t6mps"); 6751 if (rc) 6752 break; 6753 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6754 sizeof(ldst_cmd), &ldst_cmd); 6755 end_synchronized_op(sc, 0); 6756 6757 if (rc != 0) { 6758 sbuf_printf(sb, "%72d", rc); 6759 rc = 0; 6760 } else { 6761 sbuf_printf(sb, " %08x %08x %08x %08x" 6762 " %08x %08x %08x %08x", 6763 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 6764 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 6765 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 6766 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 6767 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6768 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6769 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6770 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6771 } 6772 } else 6773 sbuf_printf(sb, "%72s", ""); 6774 6775 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 6776 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 6777 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 6778 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 6779 } 6780 6781 if (rc) 6782 (void) sbuf_finish(sb); 6783 else 6784 rc = sbuf_finish(sb); 6785 sbuf_delete(sb); 6786 6787 return (rc); 6788} 6789 6790static int 6791sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6792{ 6793 struct adapter *sc = arg1; 6794 struct sbuf *sb; 6795 int rc; 6796 uint16_t mtus[NMTUS]; 6797 6798 rc = sysctl_wire_old_buffer(req, 0); 6799 if (rc != 0) 6800 return (rc); 6801 6802 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6803 if (sb == NULL) 6804 return (ENOMEM); 6805 6806 t4_read_mtu_tbl(sc, mtus, NULL); 6807 6808 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6809 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6810 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6811 mtus[14], mtus[15]); 6812 6813 rc = sbuf_finish(sb); 6814 sbuf_delete(sb); 6815 6816 return (rc); 6817} 6818 6819static int 6820sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6821{ 6822 struct adapter *sc = arg1; 6823 struct sbuf *sb; 6824 int rc, i; 6825 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 6826 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 6827 static const char *tx_stats[MAX_PM_NSTATS] = { 6828 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 6829 "Tx FIFO wait", NULL, "Tx latency" 6830 }; 6831 static const char *rx_stats[MAX_PM_NSTATS] = { 6832 "Read:", "Write bypass:", "Write mem:", "Flush:", 6833 " Rx FIFO wait", NULL, "Rx latency" 6834 }; 6835 6836 rc = sysctl_wire_old_buffer(req, 0); 6837 if (rc != 0) 6838 return (rc); 6839 6840 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6841 if (sb == NULL) 6842 return (ENOMEM); 6843 6844 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 6845 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 6846 6847 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6848 for (i = 0; i < 4; i++) { 6849 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6850 tx_cyc[i]); 6851 } 6852 6853 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6854 for (i = 0; i < 4; i++) { 6855 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6856 rx_cyc[i]); 6857 } 6858 6859 if (chip_id(sc) > CHELSIO_T5) { 6860 sbuf_printf(sb, 6861 "\n Total wait Total occupancy"); 6862 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6863 tx_cyc[i]); 6864 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6865 rx_cyc[i]); 6866 6867 i += 2; 6868 MPASS(i < nitems(tx_stats)); 6869 6870 sbuf_printf(sb, 6871 "\n Reads Total wait"); 6872 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6873 tx_cyc[i]); 6874 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6875 rx_cyc[i]); 6876 } 6877 6878 rc = sbuf_finish(sb); 6879 sbuf_delete(sb); 6880 6881 return (rc); 6882} 6883 6884static int 6885sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6886{ 6887 struct adapter *sc = arg1; 6888 struct sbuf *sb; 6889 int rc; 6890 struct tp_rdma_stats stats; 6891 6892 rc = sysctl_wire_old_buffer(req, 0); 6893 if (rc != 0) 6894 return (rc); 6895 6896 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6897 if (sb == NULL) 6898 return (ENOMEM); 6899 6900 mtx_lock(&sc->reg_lock); 6901 t4_tp_get_rdma_stats(sc, &stats); 6902 mtx_unlock(&sc->reg_lock); 6903 6904 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6905 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6906 6907 rc = sbuf_finish(sb); 6908 sbuf_delete(sb); 6909 6910 return (rc); 6911} 6912 6913static int 6914sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6915{ 6916 struct adapter *sc = arg1; 6917 struct sbuf *sb; 6918 int rc; 6919 struct tp_tcp_stats v4, v6; 6920 6921 rc = sysctl_wire_old_buffer(req, 0); 6922 if (rc != 0) 6923 return (rc); 6924 6925 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6926 if (sb == NULL) 6927 return (ENOMEM); 6928 6929 mtx_lock(&sc->reg_lock); 6930 t4_tp_get_tcp_stats(sc, &v4, &v6); 6931 mtx_unlock(&sc->reg_lock); 6932 6933 sbuf_printf(sb, 6934 " IP IPv6\n"); 6935 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6936 v4.tcp_out_rsts, v6.tcp_out_rsts); 6937 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6938 v4.tcp_in_segs, v6.tcp_in_segs); 6939 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6940 v4.tcp_out_segs, v6.tcp_out_segs); 6941 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6942 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 6943 6944 rc = sbuf_finish(sb); 6945 sbuf_delete(sb); 6946 6947 return (rc); 6948} 6949 6950static int 6951sysctl_tids(SYSCTL_HANDLER_ARGS) 6952{ 6953 struct adapter *sc = arg1; 6954 struct sbuf *sb; 6955 int rc; 6956 struct tid_info *t = &sc->tids; 6957 6958 rc = sysctl_wire_old_buffer(req, 0); 6959 if (rc != 0) 6960 return (rc); 6961 6962 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6963 if (sb == NULL) 6964 return (ENOMEM); 6965 6966 if (t->natids) { 6967 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6968 t->atids_in_use); 6969 } 6970 6971 if (t->ntids) { 6972 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6973 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6974 6975 if (b) { 6976 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6977 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6978 t->ntids - 1); 6979 } else { 6980 sbuf_printf(sb, "TID range: %u-%u", 6981 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6982 t->ntids - 1); 6983 } 6984 } else 6985 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6986 sbuf_printf(sb, ", in use: %u\n", 6987 atomic_load_acq_int(&t->tids_in_use)); 6988 } 6989 6990 if (t->nstids) { 6991 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6992 t->stid_base + t->nstids - 1, t->stids_in_use); 6993 } 6994 6995 if (t->nftids) { 6996 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 6997 t->ftid_base + t->nftids - 1); 6998 } 6999 7000 if (t->netids) { 7001 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7002 t->etid_base + t->netids - 1); 7003 } 7004 7005 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7006 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7007 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7008 7009 rc = sbuf_finish(sb); 7010 sbuf_delete(sb); 7011 7012 return (rc); 7013} 7014 7015static int 7016sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7017{ 7018 struct adapter *sc = arg1; 7019 struct sbuf *sb; 7020 int rc; 7021 struct tp_err_stats stats; 7022 7023 rc = sysctl_wire_old_buffer(req, 0); 7024 if (rc != 0) 7025 return (rc); 7026 7027 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7028 if (sb == NULL) 7029 return (ENOMEM); 7030 7031 mtx_lock(&sc->reg_lock); 7032 t4_tp_get_err_stats(sc, &stats); 7033 mtx_unlock(&sc->reg_lock); 7034 7035 if (sc->chip_params->nchan > 2) { 7036 sbuf_printf(sb, " channel 0 channel 1" 7037 " channel 2 channel 3\n"); 7038 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7039 stats.mac_in_errs[0], stats.mac_in_errs[1], 7040 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7041 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7042 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7043 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7044 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7045 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7046 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7047 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7048 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7049 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7050 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7051 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7052 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7053 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7054 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7055 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7056 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7057 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7058 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7059 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7060 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7061 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7062 } else { 7063 sbuf_printf(sb, " channel 0 channel 1\n"); 7064 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7065 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7066 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7067 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7068 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7069 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7070 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7071 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7072 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7073 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7074 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7075 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7076 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7077 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7078 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7079 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7080 } 7081 7082 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7083 stats.ofld_no_neigh, stats.ofld_cong_defer); 7084 7085 rc = sbuf_finish(sb); 7086 sbuf_delete(sb); 7087 7088 return (rc); 7089} 7090 7091static int 7092sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7093{ 7094 struct adapter *sc = arg1; 7095 struct tp_params *tpp = &sc->params.tp; 7096 u_int mask; 7097 int rc; 7098 7099 mask = tpp->la_mask >> 16; 7100 rc = sysctl_handle_int(oidp, &mask, 0, req); 7101 if (rc != 0 || req->newptr == NULL) 7102 return (rc); 7103 if (mask > 0xffff) 7104 return (EINVAL); 7105 tpp->la_mask = mask << 16; 7106 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7107 7108 return (0); 7109} 7110 7111struct field_desc { 7112 const char *name; 7113 u_int start; 7114 u_int width; 7115}; 7116 7117static void 7118field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7119{ 7120 char buf[32]; 7121 int line_size = 0; 7122 7123 while (f->name) { 7124 uint64_t mask = (1ULL << f->width) - 1; 7125 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7126 ((uintmax_t)v >> f->start) & mask); 7127 7128 if (line_size + len >= 79) { 7129 line_size = 8; 7130 sbuf_printf(sb, "\n "); 7131 } 7132 sbuf_printf(sb, "%s ", buf); 7133 line_size += len + 1; 7134 f++; 7135 } 7136 sbuf_printf(sb, "\n"); 7137} 7138 7139static const struct field_desc tp_la0[] = { 7140 { "RcfOpCodeOut", 60, 4 }, 7141 { "State", 56, 4 }, 7142 { "WcfState", 52, 4 }, 7143 { "RcfOpcSrcOut", 50, 2 }, 7144 { "CRxError", 49, 1 }, 7145 { "ERxError", 48, 1 }, 7146 { "SanityFailed", 47, 1 }, 7147 { "SpuriousMsg", 46, 1 }, 7148 { "FlushInputMsg", 45, 1 }, 7149 { "FlushInputCpl", 44, 1 }, 7150 { "RssUpBit", 43, 1 }, 7151 { "RssFilterHit", 42, 1 }, 7152 { "Tid", 32, 10 }, 7153 { "InitTcb", 31, 1 }, 7154 { "LineNumber", 24, 7 }, 7155 { "Emsg", 23, 1 }, 7156 { "EdataOut", 22, 1 }, 7157 { "Cmsg", 21, 1 }, 7158 { "CdataOut", 20, 1 }, 7159 { "EreadPdu", 19, 1 }, 7160 { "CreadPdu", 18, 1 }, 7161 { "TunnelPkt", 17, 1 }, 7162 { "RcfPeerFin", 16, 1 }, 7163 { "RcfReasonOut", 12, 4 }, 7164 { "TxCchannel", 10, 2 }, 7165 { "RcfTxChannel", 8, 2 }, 7166 { "RxEchannel", 6, 2 }, 7167 { "RcfRxChannel", 5, 1 }, 7168 { "RcfDataOutSrdy", 4, 1 }, 7169 { "RxDvld", 3, 1 }, 7170 { "RxOoDvld", 2, 1 }, 7171 { "RxCongestion", 1, 1 }, 7172 { "TxCongestion", 0, 1 }, 7173 { NULL } 7174}; 7175 7176static const struct field_desc tp_la1[] = { 7177 { "CplCmdIn", 56, 8 }, 7178 { "CplCmdOut", 48, 8 }, 7179 { "ESynOut", 47, 1 }, 7180 { "EAckOut", 46, 1 }, 7181 { "EFinOut", 45, 1 }, 7182 { "ERstOut", 44, 1 }, 7183 { "SynIn", 43, 1 }, 7184 { "AckIn", 42, 1 }, 7185 { "FinIn", 41, 1 }, 7186 { "RstIn", 40, 1 }, 7187 { "DataIn", 39, 1 }, 7188 { "DataInVld", 38, 1 }, 7189 { "PadIn", 37, 1 }, 7190 { "RxBufEmpty", 36, 1 }, 7191 { "RxDdp", 35, 1 }, 7192 { "RxFbCongestion", 34, 1 }, 7193 { "TxFbCongestion", 33, 1 }, 7194 { "TxPktSumSrdy", 32, 1 }, 7195 { "RcfUlpType", 28, 4 }, 7196 { "Eread", 27, 1 }, 7197 { "Ebypass", 26, 1 }, 7198 { "Esave", 25, 1 }, 7199 { "Static0", 24, 1 }, 7200 { "Cread", 23, 1 }, 7201 { "Cbypass", 22, 1 }, 7202 { "Csave", 21, 1 }, 7203 { "CPktOut", 20, 1 }, 7204 { "RxPagePoolFull", 18, 2 }, 7205 { "RxLpbkPkt", 17, 1 }, 7206 { "TxLpbkPkt", 16, 1 }, 7207 { "RxVfValid", 15, 1 }, 7208 { "SynLearned", 14, 1 }, 7209 { "SetDelEntry", 13, 1 }, 7210 { "SetInvEntry", 12, 1 }, 7211 { "CpcmdDvld", 11, 1 }, 7212 { "CpcmdSave", 10, 1 }, 7213 { "RxPstructsFull", 8, 2 }, 7214 { "EpcmdDvld", 7, 1 }, 7215 { "EpcmdFlush", 6, 1 }, 7216 { "EpcmdTrimPrefix", 5, 1 }, 7217 { "EpcmdTrimPostfix", 4, 1 }, 7218 { "ERssIp4Pkt", 3, 1 }, 7219 { "ERssIp6Pkt", 2, 1 }, 7220 { "ERssTcpUdpPkt", 1, 1 }, 7221 { "ERssFceFipPkt", 0, 1 }, 7222 { NULL } 7223}; 7224 7225static const struct field_desc tp_la2[] = { 7226 { "CplCmdIn", 56, 8 }, 7227 { "MpsVfVld", 55, 1 }, 7228 { "MpsPf", 52, 3 }, 7229 { "MpsVf", 44, 8 }, 7230 { "SynIn", 43, 1 }, 7231 { "AckIn", 42, 1 }, 7232 { "FinIn", 41, 1 }, 7233 { "RstIn", 40, 1 }, 7234 { "DataIn", 39, 1 }, 7235 { "DataInVld", 38, 1 }, 7236 { "PadIn", 37, 1 }, 7237 { "RxBufEmpty", 36, 1 }, 7238 { "RxDdp", 35, 1 }, 7239 { "RxFbCongestion", 34, 1 }, 7240 { "TxFbCongestion", 33, 1 }, 7241 { "TxPktSumSrdy", 32, 1 }, 7242 { "RcfUlpType", 28, 4 }, 7243 { "Eread", 27, 1 }, 7244 { "Ebypass", 26, 1 }, 7245 { "Esave", 25, 1 }, 7246 { "Static0", 24, 1 }, 7247 { "Cread", 23, 1 }, 7248 { "Cbypass", 22, 1 }, 7249 { "Csave", 21, 1 }, 7250 { "CPktOut", 20, 1 }, 7251 { "RxPagePoolFull", 18, 2 }, 7252 { "RxLpbkPkt", 17, 1 }, 7253 { "TxLpbkPkt", 16, 1 }, 7254 { "RxVfValid", 15, 1 }, 7255 { "SynLearned", 14, 1 }, 7256 { "SetDelEntry", 13, 1 }, 7257 { "SetInvEntry", 12, 1 }, 7258 { "CpcmdDvld", 11, 1 }, 7259 { "CpcmdSave", 10, 1 }, 7260 { "RxPstructsFull", 8, 2 }, 7261 { "EpcmdDvld", 7, 1 }, 7262 { "EpcmdFlush", 6, 1 }, 7263 { "EpcmdTrimPrefix", 5, 1 }, 7264 { "EpcmdTrimPostfix", 4, 1 }, 7265 { "ERssIp4Pkt", 3, 1 }, 7266 { "ERssIp6Pkt", 2, 1 }, 7267 { "ERssTcpUdpPkt", 1, 1 }, 7268 { "ERssFceFipPkt", 0, 1 }, 7269 { NULL } 7270}; 7271 7272static void 7273tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7274{ 7275 7276 field_desc_show(sb, *p, tp_la0); 7277} 7278 7279static void 7280tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7281{ 7282 7283 if (idx) 7284 sbuf_printf(sb, "\n"); 7285 field_desc_show(sb, p[0], tp_la0); 7286 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7287 field_desc_show(sb, p[1], tp_la0); 7288} 7289 7290static void 7291tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7292{ 7293 7294 if (idx) 7295 sbuf_printf(sb, "\n"); 7296 field_desc_show(sb, p[0], tp_la0); 7297 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7298 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7299} 7300 7301static int 7302sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7303{ 7304 struct adapter *sc = arg1; 7305 struct sbuf *sb; 7306 uint64_t *buf, *p; 7307 int rc; 7308 u_int i, inc; 7309 void (*show_func)(struct sbuf *, uint64_t *, int); 7310 7311 rc = sysctl_wire_old_buffer(req, 0); 7312 if (rc != 0) 7313 return (rc); 7314 7315 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7316 if (sb == NULL) 7317 return (ENOMEM); 7318 7319 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7320 7321 t4_tp_read_la(sc, buf, NULL); 7322 p = buf; 7323 7324 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7325 case 2: 7326 inc = 2; 7327 show_func = tp_la_show2; 7328 break; 7329 case 3: 7330 inc = 2; 7331 show_func = tp_la_show3; 7332 break; 7333 default: 7334 inc = 1; 7335 show_func = tp_la_show; 7336 } 7337 7338 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7339 (*show_func)(sb, p, i); 7340 7341 rc = sbuf_finish(sb); 7342 sbuf_delete(sb); 7343 free(buf, M_CXGBE); 7344 return (rc); 7345} 7346 7347static int 7348sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7349{ 7350 struct adapter *sc = arg1; 7351 struct sbuf *sb; 7352 int rc; 7353 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7354 7355 rc = sysctl_wire_old_buffer(req, 0); 7356 if (rc != 0) 7357 return (rc); 7358 7359 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7360 if (sb == NULL) 7361 return (ENOMEM); 7362 7363 t4_get_chan_txrate(sc, nrate, orate); 7364 7365 if (sc->chip_params->nchan > 2) { 7366 sbuf_printf(sb, " channel 0 channel 1" 7367 " channel 2 channel 3\n"); 7368 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7369 nrate[0], nrate[1], nrate[2], nrate[3]); 7370 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7371 orate[0], orate[1], orate[2], orate[3]); 7372 } else { 7373 sbuf_printf(sb, " channel 0 channel 1\n"); 7374 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7375 nrate[0], nrate[1]); 7376 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7377 orate[0], orate[1]); 7378 } 7379 7380 rc = sbuf_finish(sb); 7381 sbuf_delete(sb); 7382 7383 return (rc); 7384} 7385 7386static int 7387sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7388{ 7389 struct adapter *sc = arg1; 7390 struct sbuf *sb; 7391 uint32_t *buf, *p; 7392 int rc, i; 7393 7394 rc = sysctl_wire_old_buffer(req, 0); 7395 if (rc != 0) 7396 return (rc); 7397 7398 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7399 if (sb == NULL) 7400 return (ENOMEM); 7401 7402 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7403 M_ZERO | M_WAITOK); 7404 7405 t4_ulprx_read_la(sc, buf); 7406 p = buf; 7407 7408 sbuf_printf(sb, " Pcmd Type Message" 7409 " Data"); 7410 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7411 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7412 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7413 } 7414 7415 rc = sbuf_finish(sb); 7416 sbuf_delete(sb); 7417 free(buf, M_CXGBE); 7418 return (rc); 7419} 7420 7421static int 7422sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7423{ 7424 struct adapter *sc = arg1; 7425 struct sbuf *sb; 7426 int rc, v; 7427 7428 rc = sysctl_wire_old_buffer(req, 0); 7429 if (rc != 0) 7430 return (rc); 7431 7432 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7433 if (sb == NULL) 7434 return (ENOMEM); 7435 7436 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7437 if (G_STATSOURCE_T5(v) == 7) { 7438 if (G_STATMODE(v) == 0) { 7439 sbuf_printf(sb, "total %d, incomplete %d", 7440 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7441 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7442 } else if (G_STATMODE(v) == 1) { 7443 sbuf_printf(sb, "total %d, data overflow %d", 7444 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7445 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7446 } 7447 } 7448 rc = sbuf_finish(sb); 7449 sbuf_delete(sb); 7450 7451 return (rc); 7452} 7453 7454static int 7455sysctl_tc_params(SYSCTL_HANDLER_ARGS) 7456{ 7457 struct adapter *sc = arg1; 7458 struct tx_sched_class *tc; 7459 struct t4_sched_class_params p; 7460 struct sbuf *sb; 7461 int i, rc, port_id, flags, mbps, gbps; 7462 7463 rc = sysctl_wire_old_buffer(req, 0); 7464 if (rc != 0) 7465 return (rc); 7466 7467 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7468 if (sb == NULL) 7469 return (ENOMEM); 7470 7471 port_id = arg2 >> 16; 7472 MPASS(port_id < sc->params.nports); 7473 MPASS(sc->port[port_id] != NULL); 7474 i = arg2 & 0xffff; 7475 MPASS(i < sc->chip_params->nsched_cls); 7476 tc = &sc->port[port_id]->tc[i]; 7477 7478 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7479 "t4tc_p"); 7480 if (rc) 7481 goto done; 7482 flags = tc->flags; 7483 p = tc->params; 7484 end_synchronized_op(sc, LOCK_HELD); 7485 7486 if ((flags & TX_SC_OK) == 0) { 7487 sbuf_printf(sb, "none"); 7488 goto done; 7489 } 7490 7491 if (p.level == SCHED_CLASS_LEVEL_CL_WRR) { 7492 sbuf_printf(sb, "cl-wrr weight %u", p.weight); 7493 goto done; 7494 } else if (p.level == SCHED_CLASS_LEVEL_CL_RL) 7495 sbuf_printf(sb, "cl-rl"); 7496 else if (p.level == SCHED_CLASS_LEVEL_CH_RL) 7497 sbuf_printf(sb, "ch-rl"); 7498 else { 7499 rc = ENXIO; 7500 goto done; 7501 } 7502 7503 if (p.ratemode == SCHED_CLASS_RATEMODE_REL) { 7504 /* XXX: top speed or actual link speed? */ 7505 gbps = port_top_speed(sc->port[port_id]); 7506 sbuf_printf(sb, " %u%% of %uGbps", p.maxrate, gbps); 7507 } 7508 else if (p.ratemode == SCHED_CLASS_RATEMODE_ABS) { 7509 switch (p.rateunit) { 7510 case SCHED_CLASS_RATEUNIT_BITS: 7511 mbps = p.maxrate / 1000; 7512 gbps = p.maxrate / 1000000; 7513 if (p.maxrate == gbps * 1000000) 7514 sbuf_printf(sb, " %uGbps", gbps); 7515 else if (p.maxrate == mbps * 1000) 7516 sbuf_printf(sb, " %uMbps", mbps); 7517 else 7518 sbuf_printf(sb, " %uKbps", p.maxrate); 7519 break; 7520 case SCHED_CLASS_RATEUNIT_PKTS: 7521 sbuf_printf(sb, " %upps", p.maxrate); 7522 break; 7523 default: 7524 rc = ENXIO; 7525 goto done; 7526 } 7527 } 7528 7529 switch (p.mode) { 7530 case SCHED_CLASS_MODE_CLASS: 7531 sbuf_printf(sb, " aggregate"); 7532 break; 7533 case SCHED_CLASS_MODE_FLOW: 7534 sbuf_printf(sb, " per-flow"); 7535 break; 7536 default: 7537 rc = ENXIO; 7538 goto done; 7539 } 7540 7541done: 7542 if (rc == 0) 7543 rc = sbuf_finish(sb); 7544 sbuf_delete(sb); 7545 7546 return (rc); 7547} 7548#endif 7549 7550#ifdef TCP_OFFLOAD 7551static void 7552unit_conv(char *buf, size_t len, u_int val, u_int factor) 7553{ 7554 u_int rem = val % factor; 7555 7556 if (rem == 0) 7557 snprintf(buf, len, "%u", val / factor); 7558 else { 7559 while (rem % 10 == 0) 7560 rem /= 10; 7561 snprintf(buf, len, "%u.%u", val / factor, rem); 7562 } 7563} 7564 7565static int 7566sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 7567{ 7568 struct adapter *sc = arg1; 7569 char buf[16]; 7570 u_int res, re; 7571 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7572 7573 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7574 switch (arg2) { 7575 case 0: 7576 /* timer_tick */ 7577 re = G_TIMERRESOLUTION(res); 7578 break; 7579 case 1: 7580 /* TCP timestamp tick */ 7581 re = G_TIMESTAMPRESOLUTION(res); 7582 break; 7583 case 2: 7584 /* DACK tick */ 7585 re = G_DELAYEDACKRESOLUTION(res); 7586 break; 7587 default: 7588 return (EDOOFUS); 7589 } 7590 7591 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 7592 7593 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 7594} 7595 7596static int 7597sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 7598{ 7599 struct adapter *sc = arg1; 7600 u_int res, dack_re, v; 7601 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7602 7603 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7604 dack_re = G_DELAYEDACKRESOLUTION(res); 7605 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 7606 7607 return (sysctl_handle_int(oidp, &v, 0, req)); 7608} 7609 7610static int 7611sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 7612{ 7613 struct adapter *sc = arg1; 7614 int reg = arg2; 7615 u_int tre; 7616 u_long tp_tick_us, v; 7617 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7618 7619 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 7620 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 7621 reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || 7622 reg == A_TP_FINWAIT2_TIMER); 7623 7624 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 7625 tp_tick_us = (cclk_ps << tre) / 1000000; 7626 7627 if (reg == A_TP_INIT_SRTT) 7628 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 7629 else 7630 v = tp_tick_us * t4_read_reg(sc, reg); 7631 7632 return (sysctl_handle_long(oidp, &v, 0, req)); 7633} 7634#endif 7635 7636static uint32_t 7637fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7638{ 7639 uint32_t mode; 7640 7641 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7642 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7643 7644 if (fconf & F_FRAGMENTATION) 7645 mode |= T4_FILTER_IP_FRAGMENT; 7646 7647 if (fconf & F_MPSHITTYPE) 7648 mode |= T4_FILTER_MPS_HIT_TYPE; 7649 7650 if (fconf & F_MACMATCH) 7651 mode |= T4_FILTER_MAC_IDX; 7652 7653 if (fconf & F_ETHERTYPE) 7654 mode |= T4_FILTER_ETH_TYPE; 7655 7656 if (fconf & F_PROTOCOL) 7657 mode |= T4_FILTER_IP_PROTO; 7658 7659 if (fconf & F_TOS) 7660 mode |= T4_FILTER_IP_TOS; 7661 7662 if (fconf & F_VLAN) 7663 mode |= T4_FILTER_VLAN; 7664 7665 if (fconf & F_VNIC_ID) { 7666 mode |= T4_FILTER_VNIC; 7667 if (iconf & F_VNIC) 7668 mode |= T4_FILTER_IC_VNIC; 7669 } 7670 7671 if (fconf & F_PORT) 7672 mode |= T4_FILTER_PORT; 7673 7674 if (fconf & F_FCOE) 7675 mode |= T4_FILTER_FCoE; 7676 7677 return (mode); 7678} 7679 7680static uint32_t 7681mode_to_fconf(uint32_t mode) 7682{ 7683 uint32_t fconf = 0; 7684 7685 if (mode & T4_FILTER_IP_FRAGMENT) 7686 fconf |= F_FRAGMENTATION; 7687 7688 if (mode & T4_FILTER_MPS_HIT_TYPE) 7689 fconf |= F_MPSHITTYPE; 7690 7691 if (mode & T4_FILTER_MAC_IDX) 7692 fconf |= F_MACMATCH; 7693 7694 if (mode & T4_FILTER_ETH_TYPE) 7695 fconf |= F_ETHERTYPE; 7696 7697 if (mode & T4_FILTER_IP_PROTO) 7698 fconf |= F_PROTOCOL; 7699 7700 if (mode & T4_FILTER_IP_TOS) 7701 fconf |= F_TOS; 7702 7703 if (mode & T4_FILTER_VLAN) 7704 fconf |= F_VLAN; 7705 7706 if (mode & T4_FILTER_VNIC) 7707 fconf |= F_VNIC_ID; 7708 7709 if (mode & T4_FILTER_PORT) 7710 fconf |= F_PORT; 7711 7712 if (mode & T4_FILTER_FCoE) 7713 fconf |= F_FCOE; 7714 7715 return (fconf); 7716} 7717 7718static uint32_t 7719mode_to_iconf(uint32_t mode) 7720{ 7721 7722 if (mode & T4_FILTER_IC_VNIC) 7723 return (F_VNIC); 7724 return (0); 7725} 7726 7727static int check_fspec_against_fconf_iconf(struct adapter *sc, 7728 struct t4_filter_specification *fs) 7729{ 7730 struct tp_params *tpp = &sc->params.tp; 7731 uint32_t fconf = 0; 7732 7733 if (fs->val.frag || fs->mask.frag) 7734 fconf |= F_FRAGMENTATION; 7735 7736 if (fs->val.matchtype || fs->mask.matchtype) 7737 fconf |= F_MPSHITTYPE; 7738 7739 if (fs->val.macidx || fs->mask.macidx) 7740 fconf |= F_MACMATCH; 7741 7742 if (fs->val.ethtype || fs->mask.ethtype) 7743 fconf |= F_ETHERTYPE; 7744 7745 if (fs->val.proto || fs->mask.proto) 7746 fconf |= F_PROTOCOL; 7747 7748 if (fs->val.tos || fs->mask.tos) 7749 fconf |= F_TOS; 7750 7751 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7752 fconf |= F_VLAN; 7753 7754 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 7755 fconf |= F_VNIC_ID; 7756 if (tpp->ingress_config & F_VNIC) 7757 return (EINVAL); 7758 } 7759 7760 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 7761 fconf |= F_VNIC_ID; 7762 if ((tpp->ingress_config & F_VNIC) == 0) 7763 return (EINVAL); 7764 } 7765 7766 if (fs->val.iport || fs->mask.iport) 7767 fconf |= F_PORT; 7768 7769 if (fs->val.fcoe || fs->mask.fcoe) 7770 fconf |= F_FCOE; 7771 7772 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 7773 return (E2BIG); 7774 7775 return (0); 7776} 7777 7778static int 7779get_filter_mode(struct adapter *sc, uint32_t *mode) 7780{ 7781 struct tp_params *tpp = &sc->params.tp; 7782 7783 /* 7784 * We trust the cached values of the relevant TP registers. This means 7785 * things work reliably only if writes to those registers are always via 7786 * t4_set_filter_mode. 7787 */ 7788 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 7789 7790 return (0); 7791} 7792 7793static int 7794set_filter_mode(struct adapter *sc, uint32_t mode) 7795{ 7796 struct tp_params *tpp = &sc->params.tp; 7797 uint32_t fconf, iconf; 7798 int rc; 7799 7800 iconf = mode_to_iconf(mode); 7801 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 7802 /* 7803 * For now we just complain if A_TP_INGRESS_CONFIG is not 7804 * already set to the correct value for the requested filter 7805 * mode. It's not clear if it's safe to write to this register 7806 * on the fly. (And we trust the cached value of the register). 7807 */ 7808 return (EBUSY); 7809 } 7810 7811 fconf = mode_to_fconf(mode); 7812 7813 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7814 "t4setfm"); 7815 if (rc) 7816 return (rc); 7817 7818 if (sc->tids.ftids_in_use > 0) { 7819 rc = EBUSY; 7820 goto done; 7821 } 7822 7823#ifdef TCP_OFFLOAD 7824 if (uld_active(sc, ULD_TOM)) { 7825 rc = EBUSY; 7826 goto done; 7827 } 7828#endif 7829 7830 rc = -t4_set_filter_mode(sc, fconf); 7831done: 7832 end_synchronized_op(sc, LOCK_HELD); 7833 return (rc); 7834} 7835 7836static inline uint64_t 7837get_filter_hits(struct adapter *sc, uint32_t fid) 7838{ 7839 uint32_t tcb_addr; 7840 7841 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 7842 (fid + sc->tids.ftid_base) * TCB_SIZE; 7843 7844 if (is_t4(sc)) { 7845 uint64_t hits; 7846 7847 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 7848 return (be64toh(hits)); 7849 } else { 7850 uint32_t hits; 7851 7852 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 7853 return (be32toh(hits)); 7854 } 7855} 7856 7857static int 7858get_filter(struct adapter *sc, struct t4_filter *t) 7859{ 7860 int i, rc, nfilters = sc->tids.nftids; 7861 struct filter_entry *f; 7862 7863 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7864 "t4getf"); 7865 if (rc) 7866 return (rc); 7867 7868 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7869 t->idx >= nfilters) { 7870 t->idx = 0xffffffff; 7871 goto done; 7872 } 7873 7874 f = &sc->tids.ftid_tab[t->idx]; 7875 for (i = t->idx; i < nfilters; i++, f++) { 7876 if (f->valid) { 7877 t->idx = i; 7878 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7879 t->smtidx = f->smtidx; 7880 if (f->fs.hitcnts) 7881 t->hits = get_filter_hits(sc, t->idx); 7882 else 7883 t->hits = UINT64_MAX; 7884 t->fs = f->fs; 7885 7886 goto done; 7887 } 7888 } 7889 7890 t->idx = 0xffffffff; 7891done: 7892 end_synchronized_op(sc, LOCK_HELD); 7893 return (0); 7894} 7895 7896static int 7897set_filter(struct adapter *sc, struct t4_filter *t) 7898{ 7899 unsigned int nfilters, nports; 7900 struct filter_entry *f; 7901 int i, rc; 7902 7903 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7904 if (rc) 7905 return (rc); 7906 7907 nfilters = sc->tids.nftids; 7908 nports = sc->params.nports; 7909 7910 if (nfilters == 0) { 7911 rc = ENOTSUP; 7912 goto done; 7913 } 7914 7915 if (!(sc->flags & FULL_INIT_DONE)) { 7916 rc = EAGAIN; 7917 goto done; 7918 } 7919 7920 if (t->idx >= nfilters) { 7921 rc = EINVAL; 7922 goto done; 7923 } 7924 7925 /* Validate against the global filter mode and ingress config */ 7926 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 7927 if (rc != 0) 7928 goto done; 7929 7930 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7931 rc = EINVAL; 7932 goto done; 7933 } 7934 7935 if (t->fs.val.iport >= nports) { 7936 rc = EINVAL; 7937 goto done; 7938 } 7939 7940 /* Can't specify an iq if not steering to it */ 7941 if (!t->fs.dirsteer && t->fs.iq) { 7942 rc = EINVAL; 7943 goto done; 7944 } 7945 7946 /* IPv6 filter idx must be 4 aligned */ 7947 if (t->fs.type == 1 && 7948 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7949 rc = EINVAL; 7950 goto done; 7951 } 7952 7953 if (sc->tids.ftid_tab == NULL) { 7954 KASSERT(sc->tids.ftids_in_use == 0, 7955 ("%s: no memory allocated but filters_in_use > 0", 7956 __func__)); 7957 7958 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7959 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7960 if (sc->tids.ftid_tab == NULL) { 7961 rc = ENOMEM; 7962 goto done; 7963 } 7964 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7965 } 7966 7967 for (i = 0; i < 4; i++) { 7968 f = &sc->tids.ftid_tab[t->idx + i]; 7969 7970 if (f->pending || f->valid) { 7971 rc = EBUSY; 7972 goto done; 7973 } 7974 if (f->locked) { 7975 rc = EPERM; 7976 goto done; 7977 } 7978 7979 if (t->fs.type == 0) 7980 break; 7981 } 7982 7983 f = &sc->tids.ftid_tab[t->idx]; 7984 f->fs = t->fs; 7985 7986 rc = set_filter_wr(sc, t->idx); 7987done: 7988 end_synchronized_op(sc, 0); 7989 7990 if (rc == 0) { 7991 mtx_lock(&sc->tids.ftid_lock); 7992 for (;;) { 7993 if (f->pending == 0) { 7994 rc = f->valid ? 0 : EIO; 7995 break; 7996 } 7997 7998 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7999 PCATCH, "t4setfw", 0)) { 8000 rc = EINPROGRESS; 8001 break; 8002 } 8003 } 8004 mtx_unlock(&sc->tids.ftid_lock); 8005 } 8006 return (rc); 8007} 8008 8009static int 8010del_filter(struct adapter *sc, struct t4_filter *t) 8011{ 8012 unsigned int nfilters; 8013 struct filter_entry *f; 8014 int rc; 8015 8016 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8017 if (rc) 8018 return (rc); 8019 8020 nfilters = sc->tids.nftids; 8021 8022 if (nfilters == 0) { 8023 rc = ENOTSUP; 8024 goto done; 8025 } 8026 8027 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8028 t->idx >= nfilters) { 8029 rc = EINVAL; 8030 goto done; 8031 } 8032 8033 if (!(sc->flags & FULL_INIT_DONE)) { 8034 rc = EAGAIN; 8035 goto done; 8036 } 8037 8038 f = &sc->tids.ftid_tab[t->idx]; 8039 8040 if (f->pending) { 8041 rc = EBUSY; 8042 goto done; 8043 } 8044 if (f->locked) { 8045 rc = EPERM; 8046 goto done; 8047 } 8048 8049 if (f->valid) { 8050 t->fs = f->fs; /* extra info for the caller */ 8051 rc = del_filter_wr(sc, t->idx); 8052 } 8053 8054done: 8055 end_synchronized_op(sc, 0); 8056 8057 if (rc == 0) { 8058 mtx_lock(&sc->tids.ftid_lock); 8059 for (;;) { 8060 if (f->pending == 0) { 8061 rc = f->valid ? EIO : 0; 8062 break; 8063 } 8064 8065 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8066 PCATCH, "t4delfw", 0)) { 8067 rc = EINPROGRESS; 8068 break; 8069 } 8070 } 8071 mtx_unlock(&sc->tids.ftid_lock); 8072 } 8073 8074 return (rc); 8075} 8076 8077static void 8078clear_filter(struct filter_entry *f) 8079{ 8080 if (f->l2t) 8081 t4_l2t_release(f->l2t); 8082 8083 bzero(f, sizeof (*f)); 8084} 8085 8086static int 8087set_filter_wr(struct adapter *sc, int fidx) 8088{ 8089 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8090 struct fw_filter_wr *fwr; 8091 unsigned int ftid, vnic_vld, vnic_vld_mask; 8092 struct wrq_cookie cookie; 8093 8094 ASSERT_SYNCHRONIZED_OP(sc); 8095 8096 if (f->fs.newdmac || f->fs.newvlan) { 8097 /* This filter needs an L2T entry; allocate one. */ 8098 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8099 if (f->l2t == NULL) 8100 return (EAGAIN); 8101 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8102 f->fs.dmac)) { 8103 t4_l2t_release(f->l2t); 8104 f->l2t = NULL; 8105 return (ENOMEM); 8106 } 8107 } 8108 8109 /* Already validated against fconf, iconf */ 8110 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8111 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8112 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8113 vnic_vld = 1; 8114 else 8115 vnic_vld = 0; 8116 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8117 vnic_vld_mask = 1; 8118 else 8119 vnic_vld_mask = 0; 8120 8121 ftid = sc->tids.ftid_base + fidx; 8122 8123 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8124 if (fwr == NULL) 8125 return (ENOMEM); 8126 bzero(fwr, sizeof(*fwr)); 8127 8128 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8129 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8130 fwr->tid_to_iq = 8131 htobe32(V_FW_FILTER_WR_TID(ftid) | 8132 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8133 V_FW_FILTER_WR_NOREPLY(0) | 8134 V_FW_FILTER_WR_IQ(f->fs.iq)); 8135 fwr->del_filter_to_l2tix = 8136 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8137 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8138 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8139 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8140 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8141 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8142 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8143 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8144 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8145 f->fs.newvlan == VLAN_REWRITE) | 8146 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8147 f->fs.newvlan == VLAN_REWRITE) | 8148 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8149 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8150 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8151 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8152 fwr->ethtype = htobe16(f->fs.val.ethtype); 8153 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8154 fwr->frag_to_ovlan_vldm = 8155 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8156 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8157 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8158 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8159 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8160 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8161 fwr->smac_sel = 0; 8162 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8163 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8164 fwr->maci_to_matchtypem = 8165 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8166 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8167 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8168 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8169 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8170 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8171 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8172 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8173 fwr->ptcl = f->fs.val.proto; 8174 fwr->ptclm = f->fs.mask.proto; 8175 fwr->ttyp = f->fs.val.tos; 8176 fwr->ttypm = f->fs.mask.tos; 8177 fwr->ivlan = htobe16(f->fs.val.vlan); 8178 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8179 fwr->ovlan = htobe16(f->fs.val.vnic); 8180 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8181 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8182 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8183 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8184 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8185 fwr->lp = htobe16(f->fs.val.dport); 8186 fwr->lpm = htobe16(f->fs.mask.dport); 8187 fwr->fp = htobe16(f->fs.val.sport); 8188 fwr->fpm = htobe16(f->fs.mask.sport); 8189 if (f->fs.newsmac) 8190 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8191 8192 f->pending = 1; 8193 sc->tids.ftids_in_use++; 8194 8195 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8196 return (0); 8197} 8198 8199static int 8200del_filter_wr(struct adapter *sc, int fidx) 8201{ 8202 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8203 struct fw_filter_wr *fwr; 8204 unsigned int ftid; 8205 struct wrq_cookie cookie; 8206 8207 ftid = sc->tids.ftid_base + fidx; 8208 8209 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8210 if (fwr == NULL) 8211 return (ENOMEM); 8212 bzero(fwr, sizeof (*fwr)); 8213 8214 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8215 8216 f->pending = 1; 8217 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8218 return (0); 8219} 8220 8221int 8222t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8223{ 8224 struct adapter *sc = iq->adapter; 8225 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8226 unsigned int idx = GET_TID(rpl); 8227 unsigned int rc; 8228 struct filter_entry *f; 8229 8230 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8231 rss->opcode)); 8232 MPASS(iq == &sc->sge.fwq); 8233 MPASS(is_ftid(sc, idx)); 8234 8235 idx -= sc->tids.ftid_base; 8236 f = &sc->tids.ftid_tab[idx]; 8237 rc = G_COOKIE(rpl->cookie); 8238 8239 mtx_lock(&sc->tids.ftid_lock); 8240 if (rc == FW_FILTER_WR_FLT_ADDED) { 8241 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8242 __func__, idx)); 8243 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8244 f->pending = 0; /* asynchronous setup completed */ 8245 f->valid = 1; 8246 } else { 8247 if (rc != FW_FILTER_WR_FLT_DELETED) { 8248 /* Add or delete failed, display an error */ 8249 log(LOG_ERR, 8250 "filter %u setup failed with error %u\n", 8251 idx, rc); 8252 } 8253 8254 clear_filter(f); 8255 sc->tids.ftids_in_use--; 8256 } 8257 wakeup(&sc->tids.ftid_tab); 8258 mtx_unlock(&sc->tids.ftid_lock); 8259 8260 return (0); 8261} 8262 8263static int 8264set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8265{ 8266 8267 MPASS(iq->set_tcb_rpl != NULL); 8268 return (iq->set_tcb_rpl(iq, rss, m)); 8269} 8270 8271static int 8272l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8273{ 8274 8275 MPASS(iq->l2t_write_rpl != NULL); 8276 return (iq->l2t_write_rpl(iq, rss, m)); 8277} 8278 8279static int 8280get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8281{ 8282 int rc; 8283 8284 if (cntxt->cid > M_CTXTQID) 8285 return (EINVAL); 8286 8287 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8288 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8289 return (EINVAL); 8290 8291 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8292 if (rc) 8293 return (rc); 8294 8295 if (sc->flags & FW_OK) { 8296 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8297 &cntxt->data[0]); 8298 if (rc == 0) 8299 goto done; 8300 } 8301 8302 /* 8303 * Read via firmware failed or wasn't even attempted. Read directly via 8304 * the backdoor. 8305 */ 8306 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8307done: 8308 end_synchronized_op(sc, 0); 8309 return (rc); 8310} 8311 8312static int 8313load_fw(struct adapter *sc, struct t4_data *fw) 8314{ 8315 int rc; 8316 uint8_t *fw_data; 8317 8318 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8319 if (rc) 8320 return (rc); 8321 8322 if (sc->flags & FULL_INIT_DONE) { 8323 rc = EBUSY; 8324 goto done; 8325 } 8326 8327 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8328 if (fw_data == NULL) { 8329 rc = ENOMEM; 8330 goto done; 8331 } 8332 8333 rc = copyin(fw->data, fw_data, fw->len); 8334 if (rc == 0) 8335 rc = -t4_load_fw(sc, fw_data, fw->len); 8336 8337 free(fw_data, M_CXGBE); 8338done: 8339 end_synchronized_op(sc, 0); 8340 return (rc); 8341} 8342 8343#define MAX_READ_BUF_SIZE (128 * 1024) 8344static int 8345read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8346{ 8347 uint32_t addr, remaining, n; 8348 uint32_t *buf; 8349 int rc; 8350 uint8_t *dst; 8351 8352 rc = validate_mem_range(sc, mr->addr, mr->len); 8353 if (rc != 0) 8354 return (rc); 8355 8356 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8357 addr = mr->addr; 8358 remaining = mr->len; 8359 dst = (void *)mr->data; 8360 8361 while (remaining) { 8362 n = min(remaining, MAX_READ_BUF_SIZE); 8363 read_via_memwin(sc, 2, addr, buf, n); 8364 8365 rc = copyout(buf, dst, n); 8366 if (rc != 0) 8367 break; 8368 8369 dst += n; 8370 remaining -= n; 8371 addr += n; 8372 } 8373 8374 free(buf, M_CXGBE); 8375 return (rc); 8376} 8377#undef MAX_READ_BUF_SIZE 8378 8379static int 8380read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8381{ 8382 int rc; 8383 8384 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8385 return (EINVAL); 8386 8387 if (i2cd->len > sizeof(i2cd->data)) 8388 return (EFBIG); 8389 8390 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8391 if (rc) 8392 return (rc); 8393 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8394 i2cd->offset, i2cd->len, &i2cd->data[0]); 8395 end_synchronized_op(sc, 0); 8396 8397 return (rc); 8398} 8399 8400static int 8401in_range(int val, int lo, int hi) 8402{ 8403 8404 return (val < 0 || (val <= hi && val >= lo)); 8405} 8406 8407static int 8408set_sched_class_config(struct adapter *sc, int minmax) 8409{ 8410 int rc; 8411 8412 if (minmax < 0) 8413 return (EINVAL); 8414 8415 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc"); 8416 if (rc) 8417 return (rc); 8418 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1); 8419 end_synchronized_op(sc, 0); 8420 8421 return (rc); 8422} 8423 8424static int 8425set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p, 8426 int sleep_ok) 8427{ 8428 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode; 8429 struct port_info *pi; 8430 struct tx_sched_class *tc; 8431 8432 if (p->level == SCHED_CLASS_LEVEL_CL_RL) 8433 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8434 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8435 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8436 else if (p->level == SCHED_CLASS_LEVEL_CH_RL) 8437 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8438 else 8439 return (EINVAL); 8440 8441 if (p->mode == SCHED_CLASS_MODE_CLASS) 8442 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8443 else if (p->mode == SCHED_CLASS_MODE_FLOW) 8444 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8445 else 8446 return (EINVAL); 8447 8448 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) 8449 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8450 else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8451 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8452 else 8453 return (EINVAL); 8454 8455 if (p->ratemode == SCHED_CLASS_RATEMODE_REL) 8456 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8457 else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) 8458 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8459 else 8460 return (EINVAL); 8461 8462 /* Vet our parameters ... */ 8463 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1)) 8464 return (ERANGE); 8465 8466 pi = sc->port[sc->chan_map[p->channel]]; 8467 if (pi == NULL) 8468 return (ENXIO); 8469 MPASS(pi->tx_chan == p->channel); 8470 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */ 8471 8472 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) || 8473 !in_range(p->minrate, 0, top_speed) || 8474 !in_range(p->maxrate, 0, top_speed) || 8475 !in_range(p->weight, 0, 100)) 8476 return (ERANGE); 8477 8478 /* 8479 * Translate any unset parameters into the firmware's 8480 * nomenclature and/or fail the call if the parameters 8481 * are required ... 8482 */ 8483 if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0) 8484 return (EINVAL); 8485 8486 if (p->minrate < 0) 8487 p->minrate = 0; 8488 if (p->maxrate < 0) { 8489 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8490 p->level == SCHED_CLASS_LEVEL_CH_RL) 8491 return (EINVAL); 8492 else 8493 p->maxrate = 0; 8494 } 8495 if (p->weight < 0) { 8496 if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8497 return (EINVAL); 8498 else 8499 p->weight = 0; 8500 } 8501 if (p->pktsize < 0) { 8502 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8503 p->level == SCHED_CLASS_LEVEL_CH_RL) 8504 return (EINVAL); 8505 else 8506 p->pktsize = 0; 8507 } 8508 8509 rc = begin_synchronized_op(sc, NULL, 8510 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp"); 8511 if (rc) 8512 return (rc); 8513 tc = &pi->tc[p->cl]; 8514 tc->params = *p; 8515 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode, 8516 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate, 8517 p->weight, p->pktsize, sleep_ok); 8518 if (rc == 0) 8519 tc->flags |= TX_SC_OK; 8520 else { 8521 /* 8522 * Unknown state at this point, see tc->params for what was 8523 * attempted. 8524 */ 8525 tc->flags &= ~TX_SC_OK; 8526 } 8527 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD); 8528 8529 return (rc); 8530} 8531 8532static int 8533set_sched_class(struct adapter *sc, struct t4_sched_params *p) 8534{ 8535 8536 if (p->type != SCHED_CLASS_TYPE_PACKET) 8537 return (EINVAL); 8538 8539 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 8540 return (set_sched_class_config(sc, p->u.config.minmax)); 8541 8542 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 8543 return (set_sched_class_params(sc, &p->u.params, 1)); 8544 8545 return (EINVAL); 8546} 8547 8548static int 8549set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8550{ 8551 struct port_info *pi = NULL; 8552 struct vi_info *vi; 8553 struct sge_txq *txq; 8554 uint32_t fw_mnem, fw_queue, fw_class; 8555 int i, rc; 8556 8557 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8558 if (rc) 8559 return (rc); 8560 8561 if (p->port >= sc->params.nports) { 8562 rc = EINVAL; 8563 goto done; 8564 } 8565 8566 /* XXX: Only supported for the main VI. */ 8567 pi = sc->port[p->port]; 8568 vi = &pi->vi[0]; 8569 if (!(vi->flags & VI_INIT_DONE)) { 8570 /* tx queues not set up yet */ 8571 rc = EAGAIN; 8572 goto done; 8573 } 8574 8575 if (!in_range(p->queue, 0, vi->ntxq - 1) || 8576 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) { 8577 rc = EINVAL; 8578 goto done; 8579 } 8580 8581 /* 8582 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8583 * Scheduling Class in this case). 8584 */ 8585 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8586 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8587 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8588 8589 /* 8590 * If op.queue is non-negative, then we're only changing the scheduling 8591 * on a single specified TX queue. 8592 */ 8593 if (p->queue >= 0) { 8594 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8595 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8596 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8597 &fw_class); 8598 goto done; 8599 } 8600 8601 /* 8602 * Change the scheduling on all the TX queues for the 8603 * interface. 8604 */ 8605 for_each_txq(vi, i, txq) { 8606 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8607 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8608 &fw_class); 8609 if (rc) 8610 goto done; 8611 } 8612 8613 rc = 0; 8614done: 8615 end_synchronized_op(sc, 0); 8616 return (rc); 8617} 8618 8619int 8620t4_os_find_pci_capability(struct adapter *sc, int cap) 8621{ 8622 int i; 8623 8624 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8625} 8626 8627int 8628t4_os_pci_save_state(struct adapter *sc) 8629{ 8630 device_t dev; 8631 struct pci_devinfo *dinfo; 8632 8633 dev = sc->dev; 8634 dinfo = device_get_ivars(dev); 8635 8636 pci_cfg_save(dev, dinfo, 0); 8637 return (0); 8638} 8639 8640int 8641t4_os_pci_restore_state(struct adapter *sc) 8642{ 8643 device_t dev; 8644 struct pci_devinfo *dinfo; 8645 8646 dev = sc->dev; 8647 dinfo = device_get_ivars(dev); 8648 8649 pci_cfg_restore(dev, dinfo); 8650 return (0); 8651} 8652 8653void 8654t4_os_portmod_changed(const struct adapter *sc, int idx) 8655{ 8656 struct port_info *pi = sc->port[idx]; 8657 struct vi_info *vi; 8658 struct ifnet *ifp; 8659 int v; 8660 static const char *mod_str[] = { 8661 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8662 }; 8663 8664 for_each_vi(pi, v, vi) { 8665 build_medialist(pi, &vi->media); 8666 } 8667 8668 ifp = pi->vi[0].ifp; 8669 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8670 if_printf(ifp, "transceiver unplugged.\n"); 8671 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8672 if_printf(ifp, "unknown transceiver inserted.\n"); 8673 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8674 if_printf(ifp, "unsupported transceiver inserted.\n"); 8675 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8676 if_printf(ifp, "%s transceiver inserted.\n", 8677 mod_str[pi->mod_type]); 8678 } else { 8679 if_printf(ifp, "transceiver (type %d) inserted.\n", 8680 pi->mod_type); 8681 } 8682} 8683 8684void 8685t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 8686{ 8687 struct port_info *pi = sc->port[idx]; 8688 struct vi_info *vi; 8689 struct ifnet *ifp; 8690 int v; 8691 8692 if (link_stat) 8693 pi->linkdnrc = -1; 8694 else { 8695 if (reason >= 0) 8696 pi->linkdnrc = reason; 8697 } 8698 for_each_vi(pi, v, vi) { 8699 ifp = vi->ifp; 8700 if (ifp == NULL) 8701 continue; 8702 8703 if (link_stat) { 8704 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8705 if_link_state_change(ifp, LINK_STATE_UP); 8706 } else { 8707 if_link_state_change(ifp, LINK_STATE_DOWN); 8708 } 8709 } 8710} 8711 8712void 8713t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8714{ 8715 struct adapter *sc; 8716 8717 sx_slock(&t4_list_lock); 8718 SLIST_FOREACH(sc, &t4_list, link) { 8719 /* 8720 * func should not make any assumptions about what state sc is 8721 * in - the only guarantee is that sc->sc_lock is a valid lock. 8722 */ 8723 func(sc, arg); 8724 } 8725 sx_sunlock(&t4_list_lock); 8726} 8727 8728static int 8729t4_open(struct cdev *dev, int flags, int type, struct thread *td) 8730{ 8731 return (0); 8732} 8733 8734static int 8735t4_close(struct cdev *dev, int flags, int type, struct thread *td) 8736{ 8737 return (0); 8738} 8739 8740static int 8741t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8742 struct thread *td) 8743{ 8744 int rc; 8745 struct adapter *sc = dev->si_drv1; 8746 8747 rc = priv_check(td, PRIV_DRIVER); 8748 if (rc != 0) 8749 return (rc); 8750 8751 switch (cmd) { 8752 case CHELSIO_T4_GETREG: { 8753 struct t4_reg *edata = (struct t4_reg *)data; 8754 8755 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8756 return (EFAULT); 8757 8758 if (edata->size == 4) 8759 edata->val = t4_read_reg(sc, edata->addr); 8760 else if (edata->size == 8) 8761 edata->val = t4_read_reg64(sc, edata->addr); 8762 else 8763 return (EINVAL); 8764 8765 break; 8766 } 8767 case CHELSIO_T4_SETREG: { 8768 struct t4_reg *edata = (struct t4_reg *)data; 8769 8770 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8771 return (EFAULT); 8772 8773 if (edata->size == 4) { 8774 if (edata->val & 0xffffffff00000000) 8775 return (EINVAL); 8776 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8777 } else if (edata->size == 8) 8778 t4_write_reg64(sc, edata->addr, edata->val); 8779 else 8780 return (EINVAL); 8781 break; 8782 } 8783 case CHELSIO_T4_REGDUMP: { 8784 struct t4_regdump *regs = (struct t4_regdump *)data; 8785 int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE; 8786 uint8_t *buf; 8787 8788 if (regs->len < reglen) { 8789 regs->len = reglen; /* hint to the caller */ 8790 return (ENOBUFS); 8791 } 8792 8793 regs->len = reglen; 8794 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8795 get_regs(sc, regs, buf); 8796 rc = copyout(buf, regs->data, reglen); 8797 free(buf, M_CXGBE); 8798 break; 8799 } 8800 case CHELSIO_T4_GET_FILTER_MODE: 8801 rc = get_filter_mode(sc, (uint32_t *)data); 8802 break; 8803 case CHELSIO_T4_SET_FILTER_MODE: 8804 rc = set_filter_mode(sc, *(uint32_t *)data); 8805 break; 8806 case CHELSIO_T4_GET_FILTER: 8807 rc = get_filter(sc, (struct t4_filter *)data); 8808 break; 8809 case CHELSIO_T4_SET_FILTER: 8810 rc = set_filter(sc, (struct t4_filter *)data); 8811 break; 8812 case CHELSIO_T4_DEL_FILTER: 8813 rc = del_filter(sc, (struct t4_filter *)data); 8814 break; 8815 case CHELSIO_T4_GET_SGE_CONTEXT: 8816 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8817 break; 8818 case CHELSIO_T4_LOAD_FW: 8819 rc = load_fw(sc, (struct t4_data *)data); 8820 break; 8821 case CHELSIO_T4_GET_MEM: 8822 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8823 break; 8824 case CHELSIO_T4_GET_I2C: 8825 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8826 break; 8827 case CHELSIO_T4_CLEAR_STATS: { 8828 int i, v; 8829 u_int port_id = *(uint32_t *)data; 8830 struct port_info *pi; 8831 struct vi_info *vi; 8832 8833 if (port_id >= sc->params.nports) 8834 return (EINVAL); 8835 pi = sc->port[port_id]; 8836 8837 /* MAC stats */ 8838 t4_clr_port_stats(sc, pi->tx_chan); 8839 pi->tx_parse_error = 0; 8840 mtx_lock(&sc->reg_lock); 8841 for_each_vi(pi, v, vi) { 8842 if (vi->flags & VI_INIT_DONE) 8843 t4_clr_vi_stats(sc, vi->viid); 8844 } 8845 mtx_unlock(&sc->reg_lock); 8846 8847 /* 8848 * Since this command accepts a port, clear stats for 8849 * all VIs on this port. 8850 */ 8851 for_each_vi(pi, v, vi) { 8852 if (vi->flags & VI_INIT_DONE) { 8853 struct sge_rxq *rxq; 8854 struct sge_txq *txq; 8855 struct sge_wrq *wrq; 8856 8857 for_each_rxq(vi, i, rxq) { 8858#if defined(INET) || defined(INET6) 8859 rxq->lro.lro_queued = 0; 8860 rxq->lro.lro_flushed = 0; 8861#endif 8862 rxq->rxcsum = 0; 8863 rxq->vlan_extraction = 0; 8864 } 8865 8866 for_each_txq(vi, i, txq) { 8867 txq->txcsum = 0; 8868 txq->tso_wrs = 0; 8869 txq->vlan_insertion = 0; 8870 txq->imm_wrs = 0; 8871 txq->sgl_wrs = 0; 8872 txq->txpkt_wrs = 0; 8873 txq->txpkts0_wrs = 0; 8874 txq->txpkts1_wrs = 0; 8875 txq->txpkts0_pkts = 0; 8876 txq->txpkts1_pkts = 0; 8877 mp_ring_reset_stats(txq->r); 8878 } 8879 8880#ifdef TCP_OFFLOAD 8881 /* nothing to clear for each ofld_rxq */ 8882 8883 for_each_ofld_txq(vi, i, wrq) { 8884 wrq->tx_wrs_direct = 0; 8885 wrq->tx_wrs_copied = 0; 8886 } 8887#endif 8888 8889 if (IS_MAIN_VI(vi)) { 8890 wrq = &sc->sge.ctrlq[pi->port_id]; 8891 wrq->tx_wrs_direct = 0; 8892 wrq->tx_wrs_copied = 0; 8893 } 8894 } 8895 } 8896 break; 8897 } 8898 case CHELSIO_T4_SCHED_CLASS: 8899 rc = set_sched_class(sc, (struct t4_sched_params *)data); 8900 break; 8901 case CHELSIO_T4_SCHED_QUEUE: 8902 rc = set_sched_queue(sc, (struct t4_sched_queue *)data); 8903 break; 8904 case CHELSIO_T4_GET_TRACER: 8905 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8906 break; 8907 case CHELSIO_T4_SET_TRACER: 8908 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8909 break; 8910 default: 8911 rc = EINVAL; 8912 } 8913 8914 return (rc); 8915} 8916 8917void 8918t4_db_full(struct adapter *sc) 8919{ 8920 8921 CXGBE_UNIMPLEMENTED(__func__); 8922} 8923 8924void 8925t4_db_dropped(struct adapter *sc) 8926{ 8927 8928 CXGBE_UNIMPLEMENTED(__func__); 8929} 8930 8931#ifdef TCP_OFFLOAD 8932void 8933t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 8934{ 8935 8936 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8937 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8938 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8939 V_HPZ3(pgsz_order[3])); 8940} 8941 8942static int 8943toe_capability(struct vi_info *vi, int enable) 8944{ 8945 int rc; 8946 struct port_info *pi = vi->pi; 8947 struct adapter *sc = pi->adapter; 8948 8949 ASSERT_SYNCHRONIZED_OP(sc); 8950 8951 if (!is_offload(sc)) 8952 return (ENODEV); 8953 8954 if (enable) { 8955 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 8956 /* TOE is already enabled. */ 8957 return (0); 8958 } 8959 8960 /* 8961 * We need the port's queues around so that we're able to send 8962 * and receive CPLs to/from the TOE even if the ifnet for this 8963 * port has never been UP'd administratively. 8964 */ 8965 if (!(vi->flags & VI_INIT_DONE)) { 8966 rc = vi_full_init(vi); 8967 if (rc) 8968 return (rc); 8969 } 8970 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 8971 rc = vi_full_init(&pi->vi[0]); 8972 if (rc) 8973 return (rc); 8974 } 8975 8976 if (isset(&sc->offload_map, pi->port_id)) { 8977 /* TOE is enabled on another VI of this port. */ 8978 pi->uld_vis++; 8979 return (0); 8980 } 8981 8982 if (!uld_active(sc, ULD_TOM)) { 8983 rc = t4_activate_uld(sc, ULD_TOM); 8984 if (rc == EAGAIN) { 8985 log(LOG_WARNING, 8986 "You must kldload t4_tom.ko before trying " 8987 "to enable TOE on a cxgbe interface.\n"); 8988 } 8989 if (rc != 0) 8990 return (rc); 8991 KASSERT(sc->tom_softc != NULL, 8992 ("%s: TOM activated but softc NULL", __func__)); 8993 KASSERT(uld_active(sc, ULD_TOM), 8994 ("%s: TOM activated but flag not set", __func__)); 8995 } 8996 8997 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 8998 if (!uld_active(sc, ULD_IWARP)) 8999 (void) t4_activate_uld(sc, ULD_IWARP); 9000 if (!uld_active(sc, ULD_ISCSI)) 9001 (void) t4_activate_uld(sc, ULD_ISCSI); 9002 9003 pi->uld_vis++; 9004 setbit(&sc->offload_map, pi->port_id); 9005 } else { 9006 pi->uld_vis--; 9007 9008 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9009 return (0); 9010 9011 KASSERT(uld_active(sc, ULD_TOM), 9012 ("%s: TOM never initialized?", __func__)); 9013 clrbit(&sc->offload_map, pi->port_id); 9014 } 9015 9016 return (0); 9017} 9018 9019/* 9020 * Add an upper layer driver to the global list. 9021 */ 9022int 9023t4_register_uld(struct uld_info *ui) 9024{ 9025 int rc = 0; 9026 struct uld_info *u; 9027 9028 sx_xlock(&t4_uld_list_lock); 9029 SLIST_FOREACH(u, &t4_uld_list, link) { 9030 if (u->uld_id == ui->uld_id) { 9031 rc = EEXIST; 9032 goto done; 9033 } 9034 } 9035 9036 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9037 ui->refcount = 0; 9038done: 9039 sx_xunlock(&t4_uld_list_lock); 9040 return (rc); 9041} 9042 9043int 9044t4_unregister_uld(struct uld_info *ui) 9045{ 9046 int rc = EINVAL; 9047 struct uld_info *u; 9048 9049 sx_xlock(&t4_uld_list_lock); 9050 9051 SLIST_FOREACH(u, &t4_uld_list, link) { 9052 if (u == ui) { 9053 if (ui->refcount > 0) { 9054 rc = EBUSY; 9055 goto done; 9056 } 9057 9058 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9059 rc = 0; 9060 goto done; 9061 } 9062 } 9063done: 9064 sx_xunlock(&t4_uld_list_lock); 9065 return (rc); 9066} 9067 9068int 9069t4_activate_uld(struct adapter *sc, int id) 9070{ 9071 int rc; 9072 struct uld_info *ui; 9073 9074 ASSERT_SYNCHRONIZED_OP(sc); 9075 9076 if (id < 0 || id > ULD_MAX) 9077 return (EINVAL); 9078 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9079 9080 sx_slock(&t4_uld_list_lock); 9081 9082 SLIST_FOREACH(ui, &t4_uld_list, link) { 9083 if (ui->uld_id == id) { 9084 if (!(sc->flags & FULL_INIT_DONE)) { 9085 rc = adapter_full_init(sc); 9086 if (rc != 0) 9087 break; 9088 } 9089 9090 rc = ui->activate(sc); 9091 if (rc == 0) { 9092 setbit(&sc->active_ulds, id); 9093 ui->refcount++; 9094 } 9095 break; 9096 } 9097 } 9098 9099 sx_sunlock(&t4_uld_list_lock); 9100 9101 return (rc); 9102} 9103 9104int 9105t4_deactivate_uld(struct adapter *sc, int id) 9106{ 9107 int rc; 9108 struct uld_info *ui; 9109 9110 ASSERT_SYNCHRONIZED_OP(sc); 9111 9112 if (id < 0 || id > ULD_MAX) 9113 return (EINVAL); 9114 rc = ENXIO; 9115 9116 sx_slock(&t4_uld_list_lock); 9117 9118 SLIST_FOREACH(ui, &t4_uld_list, link) { 9119 if (ui->uld_id == id) { 9120 rc = ui->deactivate(sc); 9121 if (rc == 0) { 9122 clrbit(&sc->active_ulds, id); 9123 ui->refcount--; 9124 } 9125 break; 9126 } 9127 } 9128 9129 sx_sunlock(&t4_uld_list_lock); 9130 9131 return (rc); 9132} 9133 9134int 9135uld_active(struct adapter *sc, int uld_id) 9136{ 9137 9138 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9139 9140 return (isset(&sc->active_ulds, uld_id)); 9141} 9142#endif 9143 9144/* 9145 * Come up with reasonable defaults for some of the tunables, provided they're 9146 * not set by the user (in which case we'll use the values as is). 9147 */ 9148static void 9149tweak_tunables(void) 9150{ 9151 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9152 9153 if (t4_ntxq10g < 1) { 9154#ifdef RSS 9155 t4_ntxq10g = rss_getnumbuckets(); 9156#else 9157 t4_ntxq10g = min(nc, NTXQ_10G); 9158#endif 9159 } 9160 9161 if (t4_ntxq1g < 1) { 9162#ifdef RSS 9163 /* XXX: way too many for 1GbE? */ 9164 t4_ntxq1g = rss_getnumbuckets(); 9165#else 9166 t4_ntxq1g = min(nc, NTXQ_1G); 9167#endif 9168 } 9169 9170 if (t4_ntxq_vi < 1) 9171 t4_ntxq_vi = min(nc, NTXQ_VI); 9172 9173 if (t4_nrxq10g < 1) { 9174#ifdef RSS 9175 t4_nrxq10g = rss_getnumbuckets(); 9176#else 9177 t4_nrxq10g = min(nc, NRXQ_10G); 9178#endif 9179 } 9180 9181 if (t4_nrxq1g < 1) { 9182#ifdef RSS 9183 /* XXX: way too many for 1GbE? */ 9184 t4_nrxq1g = rss_getnumbuckets(); 9185#else 9186 t4_nrxq1g = min(nc, NRXQ_1G); 9187#endif 9188 } 9189 9190 if (t4_nrxq_vi < 1) 9191 t4_nrxq_vi = min(nc, NRXQ_VI); 9192 9193#ifdef TCP_OFFLOAD 9194 if (t4_nofldtxq10g < 1) 9195 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 9196 9197 if (t4_nofldtxq1g < 1) 9198 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 9199 9200 if (t4_nofldtxq_vi < 1) 9201 t4_nofldtxq_vi = min(nc, NOFLDTXQ_VI); 9202 9203 if (t4_nofldrxq10g < 1) 9204 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 9205 9206 if (t4_nofldrxq1g < 1) 9207 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 9208 9209 if (t4_nofldrxq_vi < 1) 9210 t4_nofldrxq_vi = min(nc, NOFLDRXQ_VI); 9211 9212 if (t4_toecaps_allowed == -1) 9213 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9214 9215 if (t4_rdmacaps_allowed == -1) { 9216 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9217 FW_CAPS_CONFIG_RDMA_RDMAC; 9218 } 9219 9220 if (t4_iscsicaps_allowed == -1) { 9221 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9222 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9223 FW_CAPS_CONFIG_ISCSI_T10DIF; 9224 } 9225#else 9226 if (t4_toecaps_allowed == -1) 9227 t4_toecaps_allowed = 0; 9228 9229 if (t4_rdmacaps_allowed == -1) 9230 t4_rdmacaps_allowed = 0; 9231 9232 if (t4_iscsicaps_allowed == -1) 9233 t4_iscsicaps_allowed = 0; 9234#endif 9235 9236#ifdef DEV_NETMAP 9237 if (t4_nnmtxq_vi < 1) 9238 t4_nnmtxq_vi = min(nc, NNMTXQ_VI); 9239 9240 if (t4_nnmrxq_vi < 1) 9241 t4_nnmrxq_vi = min(nc, NNMRXQ_VI); 9242#endif 9243 9244 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9245 t4_tmr_idx_10g = TMR_IDX_10G; 9246 9247 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9248 t4_pktc_idx_10g = PKTC_IDX_10G; 9249 9250 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9251 t4_tmr_idx_1g = TMR_IDX_1G; 9252 9253 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9254 t4_pktc_idx_1g = PKTC_IDX_1G; 9255 9256 if (t4_qsize_txq < 128) 9257 t4_qsize_txq = 128; 9258 9259 if (t4_qsize_rxq < 128) 9260 t4_qsize_rxq = 128; 9261 while (t4_qsize_rxq & 7) 9262 t4_qsize_rxq++; 9263 9264 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9265} 9266 9267#ifdef DDB 9268static void 9269t4_dump_tcb(struct adapter *sc, int tid) 9270{ 9271 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9272 9273 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9274 save = t4_read_reg(sc, reg); 9275 base = sc->memwin[2].mw_base; 9276 9277 /* Dump TCB for the tid */ 9278 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9279 tcb_addr += tid * TCB_SIZE; 9280 9281 if (is_t4(sc)) { 9282 pf = 0; 9283 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9284 } else { 9285 pf = V_PFNUM(sc->pf); 9286 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9287 } 9288 t4_write_reg(sc, reg, win_pos | pf); 9289 t4_read_reg(sc, reg); 9290 9291 off = tcb_addr - win_pos; 9292 for (i = 0; i < 4; i++) { 9293 uint32_t buf[8]; 9294 for (j = 0; j < 8; j++, off += 4) 9295 buf[j] = htonl(t4_read_reg(sc, base + off)); 9296 9297 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9298 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9299 buf[7]); 9300 } 9301 9302 t4_write_reg(sc, reg, save); 9303 t4_read_reg(sc, reg); 9304} 9305 9306static void 9307t4_dump_devlog(struct adapter *sc) 9308{ 9309 struct devlog_params *dparams = &sc->params.devlog; 9310 struct fw_devlog_e e; 9311 int i, first, j, m, nentries, rc; 9312 uint64_t ftstamp = UINT64_MAX; 9313 9314 if (dparams->start == 0) { 9315 db_printf("devlog params not valid\n"); 9316 return; 9317 } 9318 9319 nentries = dparams->size / sizeof(struct fw_devlog_e); 9320 m = fwmtype_to_hwmtype(dparams->memtype); 9321 9322 /* Find the first entry. */ 9323 first = -1; 9324 for (i = 0; i < nentries && !db_pager_quit; i++) { 9325 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9326 sizeof(e), (void *)&e); 9327 if (rc != 0) 9328 break; 9329 9330 if (e.timestamp == 0) 9331 break; 9332 9333 e.timestamp = be64toh(e.timestamp); 9334 if (e.timestamp < ftstamp) { 9335 ftstamp = e.timestamp; 9336 first = i; 9337 } 9338 } 9339 9340 if (first == -1) 9341 return; 9342 9343 i = first; 9344 do { 9345 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9346 sizeof(e), (void *)&e); 9347 if (rc != 0) 9348 return; 9349 9350 if (e.timestamp == 0) 9351 return; 9352 9353 e.timestamp = be64toh(e.timestamp); 9354 e.seqno = be32toh(e.seqno); 9355 for (j = 0; j < 8; j++) 9356 e.params[j] = be32toh(e.params[j]); 9357 9358 db_printf("%10d %15ju %8s %8s ", 9359 e.seqno, e.timestamp, 9360 (e.level < nitems(devlog_level_strings) ? 9361 devlog_level_strings[e.level] : "UNKNOWN"), 9362 (e.facility < nitems(devlog_facility_strings) ? 9363 devlog_facility_strings[e.facility] : "UNKNOWN")); 9364 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9365 e.params[3], e.params[4], e.params[5], e.params[6], 9366 e.params[7]); 9367 9368 if (++i == nentries) 9369 i = 0; 9370 } while (i != first && !db_pager_quit); 9371} 9372 9373static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9374_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9375 9376DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9377{ 9378 device_t dev; 9379 int t; 9380 bool valid; 9381 9382 valid = false; 9383 t = db_read_token(); 9384 if (t == tIDENT) { 9385 dev = device_lookup_by_name(db_tok_string); 9386 valid = true; 9387 } 9388 db_skip_to_eol(); 9389 if (!valid) { 9390 db_printf("usage: show t4 devlog <nexus>\n"); 9391 return; 9392 } 9393 9394 if (dev == NULL) { 9395 db_printf("device not found\n"); 9396 return; 9397 } 9398 9399 t4_dump_devlog(device_get_softc(dev)); 9400} 9401 9402DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 9403{ 9404 device_t dev; 9405 int radix, tid, t; 9406 bool valid; 9407 9408 valid = false; 9409 radix = db_radix; 9410 db_radix = 10; 9411 t = db_read_token(); 9412 if (t == tIDENT) { 9413 dev = device_lookup_by_name(db_tok_string); 9414 t = db_read_token(); 9415 if (t == tNUMBER) { 9416 tid = db_tok_number; 9417 valid = true; 9418 } 9419 } 9420 db_radix = radix; 9421 db_skip_to_eol(); 9422 if (!valid) { 9423 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 9424 return; 9425 } 9426 9427 if (dev == NULL) { 9428 db_printf("device not found\n"); 9429 return; 9430 } 9431 if (tid < 0) { 9432 db_printf("invalid tid\n"); 9433 return; 9434 } 9435 9436 t4_dump_tcb(device_get_softc(dev), tid); 9437} 9438#endif 9439 9440static struct sx mlu; /* mod load unload */ 9441SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9442 9443static int 9444mod_event(module_t mod, int cmd, void *arg) 9445{ 9446 int rc = 0; 9447 static int loaded = 0; 9448 9449 switch (cmd) { 9450 case MOD_LOAD: 9451 sx_xlock(&mlu); 9452 if (loaded++ == 0) { 9453 t4_sge_modload(); 9454 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 9455 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 9456 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 9457 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 9458 sx_init(&t4_list_lock, "T4/T5 adapters"); 9459 SLIST_INIT(&t4_list); 9460#ifdef TCP_OFFLOAD 9461 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9462 SLIST_INIT(&t4_uld_list); 9463#endif 9464 t4_tracer_modload(); 9465 tweak_tunables(); 9466 } 9467 sx_xunlock(&mlu); 9468 break; 9469 9470 case MOD_UNLOAD: 9471 sx_xlock(&mlu); 9472 if (--loaded == 0) { 9473 int tries; 9474 9475 sx_slock(&t4_list_lock); 9476 if (!SLIST_EMPTY(&t4_list)) { 9477 rc = EBUSY; 9478 sx_sunlock(&t4_list_lock); 9479 goto done_unload; 9480 } 9481#ifdef TCP_OFFLOAD 9482 sx_slock(&t4_uld_list_lock); 9483 if (!SLIST_EMPTY(&t4_uld_list)) { 9484 rc = EBUSY; 9485 sx_sunlock(&t4_uld_list_lock); 9486 sx_sunlock(&t4_list_lock); 9487 goto done_unload; 9488 } 9489#endif 9490 tries = 0; 9491 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 9492 uprintf("%ju clusters with custom free routine " 9493 "still is use.\n", t4_sge_extfree_refs()); 9494 pause("t4unload", 2 * hz); 9495 } 9496#ifdef TCP_OFFLOAD 9497 sx_sunlock(&t4_uld_list_lock); 9498#endif 9499 sx_sunlock(&t4_list_lock); 9500 9501 if (t4_sge_extfree_refs() == 0) { 9502 t4_tracer_modunload(); 9503#ifdef TCP_OFFLOAD 9504 sx_destroy(&t4_uld_list_lock); 9505#endif 9506 sx_destroy(&t4_list_lock); 9507 t4_sge_modunload(); 9508 loaded = 0; 9509 } else { 9510 rc = EBUSY; 9511 loaded++; /* undo earlier decrement */ 9512 } 9513 } 9514done_unload: 9515 sx_xunlock(&mlu); 9516 break; 9517 } 9518 9519 return (rc); 9520} 9521 9522static devclass_t t4_devclass, t5_devclass; 9523static devclass_t cxgbe_devclass, cxl_devclass; 9524static devclass_t vcxgbe_devclass, vcxl_devclass; 9525 9526DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9527MODULE_VERSION(t4nex, 1); 9528MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9529#ifdef DEV_NETMAP 9530MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 9531#endif /* DEV_NETMAP */ 9532 9533 9534DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9535MODULE_VERSION(t5nex, 1); 9536MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9537#ifdef DEV_NETMAP 9538MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 9539#endif /* DEV_NETMAP */ 9540 9541DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9542MODULE_VERSION(cxgbe, 1); 9543 9544DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9545MODULE_VERSION(cxl, 1); 9546 9547DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9548MODULE_VERSION(vcxgbe, 1); 9549 9550DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9551MODULE_VERSION(vcxl, 1); 9552