t4_main.c revision 309459
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_main.c 309459 2016-12-03 01:10:45Z jhb $"); 30 31#include "opt_ddb.h" 32#include "opt_inet.h" 33#include "opt_inet6.h" 34 35#include <sys/param.h> 36#include <sys/conf.h> 37#include <sys/priv.h> 38#include <sys/kernel.h> 39#include <sys/bus.h> 40#include <sys/systm.h> 41#include <sys/counter.h> 42#include <sys/module.h> 43#include <sys/malloc.h> 44#include <sys/queue.h> 45#include <sys/taskqueue.h> 46#include <sys/pciio.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49#include <dev/pci/pci_private.h> 50#include <sys/firmware.h> 51#include <sys/sbuf.h> 52#include <sys/smp.h> 53#include <sys/socket.h> 54#include <sys/sockio.h> 55#include <sys/sysctl.h> 56#include <net/ethernet.h> 57#include <net/if.h> 58#include <net/if_types.h> 59#include <net/if_dl.h> 60#include <net/if_vlan_var.h> 61#ifdef RSS 62#include <net/rss_config.h> 63#endif 64#if defined(__i386__) || defined(__amd64__) 65#include <vm/vm.h> 66#include <vm/pmap.h> 67#endif 68#ifdef DDB 69#include <ddb/ddb.h> 70#include <ddb/db_lex.h> 71#endif 72 73#include "common/common.h" 74#include "common/t4_msg.h" 75#include "common/t4_regs.h" 76#include "common/t4_regs_values.h" 77#include "t4_ioctl.h" 78#include "t4_l2t.h" 79#include "t4_mp_ring.h" 80 81/* T4 bus driver interface */ 82static int t4_probe(device_t); 83static int t4_attach(device_t); 84static int t4_detach(device_t); 85static device_method_t t4_methods[] = { 86 DEVMETHOD(device_probe, t4_probe), 87 DEVMETHOD(device_attach, t4_attach), 88 DEVMETHOD(device_detach, t4_detach), 89 90 DEVMETHOD_END 91}; 92static driver_t t4_driver = { 93 "t4nex", 94 t4_methods, 95 sizeof(struct adapter) 96}; 97 98 99/* T4 port (cxgbe) interface */ 100static int cxgbe_probe(device_t); 101static int cxgbe_attach(device_t); 102static int cxgbe_detach(device_t); 103device_method_t cxgbe_methods[] = { 104 DEVMETHOD(device_probe, cxgbe_probe), 105 DEVMETHOD(device_attach, cxgbe_attach), 106 DEVMETHOD(device_detach, cxgbe_detach), 107 { 0, 0 } 108}; 109static driver_t cxgbe_driver = { 110 "cxgbe", 111 cxgbe_methods, 112 sizeof(struct port_info) 113}; 114 115/* T4 VI (vcxgbe) interface */ 116static int vcxgbe_probe(device_t); 117static int vcxgbe_attach(device_t); 118static int vcxgbe_detach(device_t); 119static device_method_t vcxgbe_methods[] = { 120 DEVMETHOD(device_probe, vcxgbe_probe), 121 DEVMETHOD(device_attach, vcxgbe_attach), 122 DEVMETHOD(device_detach, vcxgbe_detach), 123 { 0, 0 } 124}; 125static driver_t vcxgbe_driver = { 126 "vcxgbe", 127 vcxgbe_methods, 128 sizeof(struct vi_info) 129}; 130 131static d_ioctl_t t4_ioctl; 132 133static struct cdevsw t4_cdevsw = { 134 .d_version = D_VERSION, 135 .d_ioctl = t4_ioctl, 136 .d_name = "t4nex", 137}; 138 139/* T5 bus driver interface */ 140static int t5_probe(device_t); 141static device_method_t t5_methods[] = { 142 DEVMETHOD(device_probe, t5_probe), 143 DEVMETHOD(device_attach, t4_attach), 144 DEVMETHOD(device_detach, t4_detach), 145 146 DEVMETHOD_END 147}; 148static driver_t t5_driver = { 149 "t5nex", 150 t5_methods, 151 sizeof(struct adapter) 152}; 153 154 155/* T5 port (cxl) interface */ 156static driver_t cxl_driver = { 157 "cxl", 158 cxgbe_methods, 159 sizeof(struct port_info) 160}; 161 162/* T5 VI (vcxl) interface */ 163static driver_t vcxl_driver = { 164 "vcxl", 165 vcxgbe_methods, 166 sizeof(struct vi_info) 167}; 168 169/* ifnet + media interface */ 170static void cxgbe_init(void *); 171static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 172static int cxgbe_transmit(struct ifnet *, struct mbuf *); 173static void cxgbe_qflush(struct ifnet *); 174static int cxgbe_media_change(struct ifnet *); 175static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 176 177MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 178 179/* 180 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 181 * then ADAPTER_LOCK, then t4_uld_list_lock. 182 */ 183static struct sx t4_list_lock; 184SLIST_HEAD(, adapter) t4_list; 185#ifdef TCP_OFFLOAD 186static struct sx t4_uld_list_lock; 187SLIST_HEAD(, uld_info) t4_uld_list; 188#endif 189 190/* 191 * Tunables. See tweak_tunables() too. 192 * 193 * Each tunable is set to a default value here if it's known at compile-time. 194 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 195 * provide a reasonable default when the driver is loaded. 196 * 197 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 198 * T5 are under hw.cxl. 199 */ 200 201/* 202 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 203 */ 204#define NTXQ_10G 16 205int t4_ntxq10g = -1; 206TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 207 208#define NRXQ_10G 8 209int t4_nrxq10g = -1; 210TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 211 212#define NTXQ_1G 4 213int t4_ntxq1g = -1; 214TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 215 216#define NRXQ_1G 2 217int t4_nrxq1g = -1; 218TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 219 220#define NTXQ_VI 1 221static int t4_ntxq_vi = -1; 222TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 223 224#define NRXQ_VI 1 225static int t4_nrxq_vi = -1; 226TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 227 228static int t4_rsrv_noflowq = 0; 229TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 230 231#ifdef TCP_OFFLOAD 232#define NOFLDTXQ_10G 8 233static int t4_nofldtxq10g = -1; 234TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 235 236#define NOFLDRXQ_10G 2 237static int t4_nofldrxq10g = -1; 238TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 239 240#define NOFLDTXQ_1G 2 241static int t4_nofldtxq1g = -1; 242TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 243 244#define NOFLDRXQ_1G 1 245static int t4_nofldrxq1g = -1; 246TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 247 248#define NOFLDTXQ_VI 1 249static int t4_nofldtxq_vi = -1; 250TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 251 252#define NOFLDRXQ_VI 1 253static int t4_nofldrxq_vi = -1; 254TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 255#endif 256 257#ifdef DEV_NETMAP 258#define NNMTXQ_VI 2 259static int t4_nnmtxq_vi = -1; 260TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 261 262#define NNMRXQ_VI 2 263static int t4_nnmrxq_vi = -1; 264TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 265#endif 266 267/* 268 * Holdoff parameters for 10G and 1G ports. 269 */ 270#define TMR_IDX_10G 1 271int t4_tmr_idx_10g = TMR_IDX_10G; 272TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 273 274#define PKTC_IDX_10G (-1) 275int t4_pktc_idx_10g = PKTC_IDX_10G; 276TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 277 278#define TMR_IDX_1G 1 279int t4_tmr_idx_1g = TMR_IDX_1G; 280TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 281 282#define PKTC_IDX_1G (-1) 283int t4_pktc_idx_1g = PKTC_IDX_1G; 284TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 285 286/* 287 * Size (# of entries) of each tx and rx queue. 288 */ 289unsigned int t4_qsize_txq = TX_EQ_QSIZE; 290TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 291 292unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 293TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 294 295/* 296 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 297 */ 298int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 299TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 300 301/* 302 * Configuration file. 303 */ 304#define DEFAULT_CF "default" 305#define FLASH_CF "flash" 306#define UWIRE_CF "uwire" 307#define FPGA_CF "fpga" 308static char t4_cfg_file[32] = DEFAULT_CF; 309TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 310 311/* 312 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 313 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 314 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 315 * mark or when signalled to do so, 0 to never emit PAUSE. 316 */ 317static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 318TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 319 320/* 321 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 322 * encouraged respectively). 323 */ 324static unsigned int t4_fw_install = 1; 325TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 326 327/* 328 * ASIC features that will be used. Disable the ones you don't want so that the 329 * chip resources aren't wasted on features that will not be used. 330 */ 331static int t4_nbmcaps_allowed = 0; 332TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 333 334static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 335TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 336 337static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 338 FW_CAPS_CONFIG_SWITCH_EGRESS; 339TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 340 341static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 342TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 343 344static int t4_toecaps_allowed = -1; 345TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 346 347static int t4_rdmacaps_allowed = -1; 348TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 349 350static int t4_tlscaps_allowed = 0; 351TUNABLE_INT("hw.cxgbe.tlscaps_allowed", &t4_tlscaps_allowed); 352 353static int t4_iscsicaps_allowed = -1; 354TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 355 356static int t4_fcoecaps_allowed = 0; 357TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 358 359static int t5_write_combine = 0; 360TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 361 362static int t4_num_vis = 1; 363TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 364 365/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 366static int vi_mac_funcs[] = { 367 FW_VI_FUNC_OFLD, 368 FW_VI_FUNC_IWARP, 369 FW_VI_FUNC_OPENISCSI, 370 FW_VI_FUNC_OPENFCOE, 371 FW_VI_FUNC_FOISCSI, 372 FW_VI_FUNC_FOFCOE, 373}; 374 375struct intrs_and_queues { 376 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 377 uint16_t nirq; /* Total # of vectors */ 378 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 379 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 380 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 381 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 382 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 383 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 384 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 385 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 386 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 387 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 388 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 389 390 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 391 uint16_t ntxq_vi; /* # of NIC txq's */ 392 uint16_t nrxq_vi; /* # of NIC rxq's */ 393 uint16_t nofldtxq_vi; /* # of TOE txq's */ 394 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 395 uint16_t nnmtxq_vi; /* # of netmap txq's */ 396 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 397}; 398 399struct filter_entry { 400 uint32_t valid:1; /* filter allocated and valid */ 401 uint32_t locked:1; /* filter is administratively locked */ 402 uint32_t pending:1; /* filter action is pending firmware reply */ 403 uint32_t smtidx:8; /* Source MAC Table index for smac */ 404 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 405 406 struct t4_filter_specification fs; 407}; 408 409static void setup_memwin(struct adapter *); 410static void position_memwin(struct adapter *, int, uint32_t); 411static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 412static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 413 int); 414static inline int write_via_memwin(struct adapter *, int, uint32_t, 415 const uint32_t *, int); 416static int validate_mem_range(struct adapter *, uint32_t, int); 417static int fwmtype_to_hwmtype(int); 418static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 419 uint32_t *); 420static int fixup_devlog_params(struct adapter *); 421static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 422 struct intrs_and_queues *); 423static int prep_firmware(struct adapter *); 424static int partition_resources(struct adapter *, const struct firmware *, 425 const char *); 426static int get_params__pre_init(struct adapter *); 427static int get_params__post_init(struct adapter *); 428static int set_params__post_init(struct adapter *); 429static void t4_set_desc(struct adapter *); 430static void build_medialist(struct port_info *, struct ifmedia *); 431static int cxgbe_init_synchronized(struct vi_info *); 432static int cxgbe_uninit_synchronized(struct vi_info *); 433static void quiesce_txq(struct adapter *, struct sge_txq *); 434static void quiesce_wrq(struct adapter *, struct sge_wrq *); 435static void quiesce_iq(struct adapter *, struct sge_iq *); 436static void quiesce_fl(struct adapter *, struct sge_fl *); 437static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 438 driver_intr_t *, void *, char *); 439static int t4_free_irq(struct adapter *, struct irq *); 440static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 441static void vi_refresh_stats(struct adapter *, struct vi_info *); 442static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 443static void cxgbe_tick(void *); 444static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 445static void cxgbe_sysctls(struct port_info *); 446static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 447static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 448static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 449static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 450static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 451static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 452static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 453static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 454static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 455static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 456static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 457#ifdef SBUF_DRAIN 458static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 459static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 460static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 461static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 462static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 463static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 464static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 465static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 466static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 467static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 468static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 469static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 470static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 471static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 472static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 473static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 474static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 475static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 476static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 477static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 478static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 479static int sysctl_tids(SYSCTL_HANDLER_ARGS); 480static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 481static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 482static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 483static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 484static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 485static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 486static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 487#endif 488#ifdef TCP_OFFLOAD 489static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 490static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 491static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 492#endif 493static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 494static uint32_t mode_to_fconf(uint32_t); 495static uint32_t mode_to_iconf(uint32_t); 496static int check_fspec_against_fconf_iconf(struct adapter *, 497 struct t4_filter_specification *); 498static int get_filter_mode(struct adapter *, uint32_t *); 499static int set_filter_mode(struct adapter *, uint32_t); 500static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 501static int get_filter(struct adapter *, struct t4_filter *); 502static int set_filter(struct adapter *, struct t4_filter *); 503static int del_filter(struct adapter *, struct t4_filter *); 504static void clear_filter(struct filter_entry *); 505static int set_filter_wr(struct adapter *, int); 506static int del_filter_wr(struct adapter *, int); 507static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 508 struct mbuf *); 509static int get_sge_context(struct adapter *, struct t4_sge_context *); 510static int load_fw(struct adapter *, struct t4_data *); 511static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 512static int read_i2c(struct adapter *, struct t4_i2c_data *); 513#ifdef TCP_OFFLOAD 514static int toe_capability(struct vi_info *, int); 515#endif 516static int mod_event(module_t, int, void *); 517 518struct { 519 uint16_t device; 520 char *desc; 521} t4_pciids[] = { 522 {0xa000, "Chelsio Terminator 4 FPGA"}, 523 {0x4400, "Chelsio T440-dbg"}, 524 {0x4401, "Chelsio T420-CR"}, 525 {0x4402, "Chelsio T422-CR"}, 526 {0x4403, "Chelsio T440-CR"}, 527 {0x4404, "Chelsio T420-BCH"}, 528 {0x4405, "Chelsio T440-BCH"}, 529 {0x4406, "Chelsio T440-CH"}, 530 {0x4407, "Chelsio T420-SO"}, 531 {0x4408, "Chelsio T420-CX"}, 532 {0x4409, "Chelsio T420-BT"}, 533 {0x440a, "Chelsio T404-BT"}, 534 {0x440e, "Chelsio T440-LP-CR"}, 535}, t5_pciids[] = { 536 {0xb000, "Chelsio Terminator 5 FPGA"}, 537 {0x5400, "Chelsio T580-dbg"}, 538 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 539 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 540 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 541 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 542 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 543 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 544 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 545 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 546 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 547 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 548 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 549 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 550 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 551#ifdef notyet 552 {0x5404, "Chelsio T520-BCH"}, 553 {0x5405, "Chelsio T540-BCH"}, 554 {0x5406, "Chelsio T540-CH"}, 555 {0x5408, "Chelsio T520-CX"}, 556 {0x540b, "Chelsio B520-SR"}, 557 {0x540c, "Chelsio B504-BT"}, 558 {0x540f, "Chelsio Amsterdam"}, 559 {0x5413, "Chelsio T580-CHR"}, 560#endif 561}; 562 563#ifdef TCP_OFFLOAD 564/* 565 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 566 * exactly the same for both rxq and ofld_rxq. 567 */ 568CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 569CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 570#endif 571CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 572 573static int 574t4_probe(device_t dev) 575{ 576 int i; 577 uint16_t v = pci_get_vendor(dev); 578 uint16_t d = pci_get_device(dev); 579 uint8_t f = pci_get_function(dev); 580 581 if (v != PCI_VENDOR_ID_CHELSIO) 582 return (ENXIO); 583 584 /* Attach only to PF0 of the FPGA */ 585 if (d == 0xa000 && f != 0) 586 return (ENXIO); 587 588 for (i = 0; i < nitems(t4_pciids); i++) { 589 if (d == t4_pciids[i].device) { 590 device_set_desc(dev, t4_pciids[i].desc); 591 return (BUS_PROBE_DEFAULT); 592 } 593 } 594 595 return (ENXIO); 596} 597 598static int 599t5_probe(device_t dev) 600{ 601 int i; 602 uint16_t v = pci_get_vendor(dev); 603 uint16_t d = pci_get_device(dev); 604 uint8_t f = pci_get_function(dev); 605 606 if (v != PCI_VENDOR_ID_CHELSIO) 607 return (ENXIO); 608 609 /* Attach only to PF0 of the FPGA */ 610 if (d == 0xb000 && f != 0) 611 return (ENXIO); 612 613 for (i = 0; i < nitems(t5_pciids); i++) { 614 if (d == t5_pciids[i].device) { 615 device_set_desc(dev, t5_pciids[i].desc); 616 return (BUS_PROBE_DEFAULT); 617 } 618 } 619 620 return (ENXIO); 621} 622 623static void 624t5_attribute_workaround(device_t dev) 625{ 626 device_t root_port; 627 uint32_t v; 628 629 /* 630 * The T5 chips do not properly echo the No Snoop and Relaxed 631 * Ordering attributes when replying to a TLP from a Root 632 * Port. As a workaround, find the parent Root Port and 633 * disable No Snoop and Relaxed Ordering. Note that this 634 * affects all devices under this root port. 635 */ 636 root_port = pci_find_pcie_root_port(dev); 637 if (root_port == NULL) { 638 device_printf(dev, "Unable to find parent root port\n"); 639 return; 640 } 641 642 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 643 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 644 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 645 0) 646 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 647 device_get_nameunit(root_port)); 648} 649 650static int 651t4_attach(device_t dev) 652{ 653 struct adapter *sc; 654 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 655 struct make_dev_args mda; 656 struct intrs_and_queues iaq; 657 struct sge *s; 658 uint8_t *buf; 659#ifdef TCP_OFFLOAD 660 int ofld_rqidx, ofld_tqidx; 661#endif 662#ifdef DEV_NETMAP 663 int nm_rqidx, nm_tqidx; 664#endif 665 int num_vis; 666 667 sc = device_get_softc(dev); 668 sc->dev = dev; 669 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 670 671 if ((pci_get_device(dev) & 0xff00) == 0x5400) 672 t5_attribute_workaround(dev); 673 pci_enable_busmaster(dev); 674 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 675 uint32_t v; 676 677 pci_set_max_read_req(dev, 4096); 678 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 679 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 680 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 681 682 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 683 } 684 685 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 686 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 687 sc->traceq = -1; 688 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 689 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 690 device_get_nameunit(dev)); 691 692 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 693 device_get_nameunit(dev)); 694 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 695 t4_add_adapter(sc); 696 697 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 698 TAILQ_INIT(&sc->sfl); 699 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 700 701 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 702 703 rc = t4_map_bars_0_and_4(sc); 704 if (rc != 0) 705 goto done; /* error message displayed already */ 706 707 /* 708 * This is the real PF# to which we're attaching. Works from within PCI 709 * passthrough environments too, where pci_get_function() could return a 710 * different PF# depending on the passthrough configuration. We need to 711 * use the real PF# in all our communication with the firmware. 712 */ 713 sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI)); 714 sc->mbox = sc->pf; 715 716 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 717 718 /* Prepare the adapter for operation. */ 719 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 720 rc = -t4_prep_adapter(sc, buf); 721 free(buf, M_CXGBE); 722 if (rc != 0) { 723 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 724 goto done; 725 } 726 727 /* 728 * Do this really early, with the memory windows set up even before the 729 * character device. The userland tool's register i/o and mem read 730 * will work even in "recovery mode". 731 */ 732 setup_memwin(sc); 733 if (t4_init_devlog_params(sc, 0) == 0) 734 fixup_devlog_params(sc); 735 make_dev_args_init(&mda); 736 mda.mda_devsw = &t4_cdevsw; 737 mda.mda_uid = UID_ROOT; 738 mda.mda_gid = GID_WHEEL; 739 mda.mda_mode = 0600; 740 mda.mda_si_drv1 = sc; 741 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 742 if (rc != 0) 743 device_printf(dev, "failed to create nexus char device: %d.\n", 744 rc); 745 746 /* Go no further if recovery mode has been requested. */ 747 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 748 device_printf(dev, "recovery mode.\n"); 749 goto done; 750 } 751 752#if defined(__i386__) 753 if ((cpu_feature & CPUID_CX8) == 0) { 754 device_printf(dev, "64 bit atomics not available.\n"); 755 rc = ENOTSUP; 756 goto done; 757 } 758#endif 759 760 /* Prepare the firmware for operation */ 761 rc = prep_firmware(sc); 762 if (rc != 0) 763 goto done; /* error message displayed already */ 764 765 rc = get_params__post_init(sc); 766 if (rc != 0) 767 goto done; /* error message displayed already */ 768 769 rc = set_params__post_init(sc); 770 if (rc != 0) 771 goto done; /* error message displayed already */ 772 773 rc = t4_map_bar_2(sc); 774 if (rc != 0) 775 goto done; /* error message displayed already */ 776 777 rc = t4_create_dma_tag(sc); 778 if (rc != 0) 779 goto done; /* error message displayed already */ 780 781 /* 782 * Number of VIs to create per-port. The first VI is the "main" regular 783 * VI for the port. The rest are additional virtual interfaces on the 784 * same physical port. Note that the main VI does not have native 785 * netmap support but the extra VIs do. 786 * 787 * Limit the number of VIs per port to the number of available 788 * MAC addresses per port. 789 */ 790 if (t4_num_vis >= 1) 791 num_vis = t4_num_vis; 792 else 793 num_vis = 1; 794 if (num_vis > nitems(vi_mac_funcs)) { 795 num_vis = nitems(vi_mac_funcs); 796 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 797 } 798 799 /* 800 * First pass over all the ports - allocate VIs and initialize some 801 * basic parameters like mac address, port type, etc. We also figure 802 * out whether a port is 10G or 1G and use that information when 803 * calculating how many interrupts to attempt to allocate. 804 */ 805 n10g = n1g = 0; 806 for_each_port(sc, i) { 807 struct port_info *pi; 808 809 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 810 sc->port[i] = pi; 811 812 /* These must be set before t4_port_init */ 813 pi->adapter = sc; 814 pi->port_id = i; 815 /* 816 * XXX: vi[0] is special so we can't delay this allocation until 817 * pi->nvi's final value is known. 818 */ 819 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 820 M_ZERO | M_WAITOK); 821 822 /* 823 * Allocate the "main" VI and initialize parameters 824 * like mac addr. 825 */ 826 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 827 if (rc != 0) { 828 device_printf(dev, "unable to initialize port %d: %d\n", 829 i, rc); 830 free(pi->vi, M_CXGBE); 831 free(pi, M_CXGBE); 832 sc->port[i] = NULL; 833 goto done; 834 } 835 836 pi->link_cfg.requested_fc &= ~(PAUSE_TX | PAUSE_RX); 837 pi->link_cfg.requested_fc |= t4_pause_settings; 838 pi->link_cfg.fc &= ~(PAUSE_TX | PAUSE_RX); 839 pi->link_cfg.fc |= t4_pause_settings; 840 841 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, &pi->link_cfg); 842 if (rc != 0) { 843 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 844 free(pi->vi, M_CXGBE); 845 free(pi, M_CXGBE); 846 sc->port[i] = NULL; 847 goto done; 848 } 849 850 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 851 device_get_nameunit(dev), i); 852 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 853 sc->chan_map[pi->tx_chan] = i; 854 855 pi->tc = malloc(sizeof(struct tx_sched_class) * 856 sc->chip_params->nsched_cls, M_CXGBE, M_ZERO | M_WAITOK); 857 858 if (is_10G_port(pi) || is_40G_port(pi)) { 859 n10g++; 860 } else { 861 n1g++; 862 } 863 864 pi->linkdnrc = -1; 865 866 pi->dev = device_add_child(dev, is_t4(sc) ? "cxgbe" : "cxl", -1); 867 if (pi->dev == NULL) { 868 device_printf(dev, 869 "failed to add device for port %d.\n", i); 870 rc = ENXIO; 871 goto done; 872 } 873 pi->vi[0].dev = pi->dev; 874 device_set_softc(pi->dev, pi); 875 } 876 877 /* 878 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 879 */ 880 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 881 if (rc != 0) 882 goto done; /* error message displayed already */ 883 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 884 num_vis = 1; 885 886 sc->intr_type = iaq.intr_type; 887 sc->intr_count = iaq.nirq; 888 889 s = &sc->sge; 890 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 891 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 892 if (num_vis > 1) { 893 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 894 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 895 } 896 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 897 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 898 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 899#ifdef TCP_OFFLOAD 900 if (is_offload(sc)) { 901 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 902 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 903 if (num_vis > 1) { 904 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 905 iaq.nofldrxq_vi; 906 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 907 iaq.nofldtxq_vi; 908 } 909 s->neq += s->nofldtxq + s->nofldrxq; 910 s->niq += s->nofldrxq; 911 912 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 913 M_CXGBE, M_ZERO | M_WAITOK); 914 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 915 M_CXGBE, M_ZERO | M_WAITOK); 916 } 917#endif 918#ifdef DEV_NETMAP 919 if (num_vis > 1) { 920 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 921 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 922 } 923 s->neq += s->nnmtxq + s->nnmrxq; 924 s->niq += s->nnmrxq; 925 926 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 927 M_CXGBE, M_ZERO | M_WAITOK); 928 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 929 M_CXGBE, M_ZERO | M_WAITOK); 930#endif 931 932 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 933 M_ZERO | M_WAITOK); 934 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 935 M_ZERO | M_WAITOK); 936 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 937 M_ZERO | M_WAITOK); 938 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 939 M_ZERO | M_WAITOK); 940 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 941 M_ZERO | M_WAITOK); 942 943 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 944 M_ZERO | M_WAITOK); 945 946 t4_init_l2t(sc, M_WAITOK); 947 948 /* 949 * Second pass over the ports. This time we know the number of rx and 950 * tx queues that each port should get. 951 */ 952 rqidx = tqidx = 0; 953#ifdef TCP_OFFLOAD 954 ofld_rqidx = ofld_tqidx = 0; 955#endif 956#ifdef DEV_NETMAP 957 nm_rqidx = nm_tqidx = 0; 958#endif 959 for_each_port(sc, i) { 960 struct port_info *pi = sc->port[i]; 961 struct vi_info *vi; 962 963 if (pi == NULL) 964 continue; 965 966 pi->nvi = num_vis; 967 for_each_vi(pi, j, vi) { 968 vi->pi = pi; 969 vi->qsize_rxq = t4_qsize_rxq; 970 vi->qsize_txq = t4_qsize_txq; 971 972 vi->first_rxq = rqidx; 973 vi->first_txq = tqidx; 974 if (is_10G_port(pi) || is_40G_port(pi)) { 975 vi->tmr_idx = t4_tmr_idx_10g; 976 vi->pktc_idx = t4_pktc_idx_10g; 977 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 978 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 979 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 980 } else { 981 vi->tmr_idx = t4_tmr_idx_1g; 982 vi->pktc_idx = t4_pktc_idx_1g; 983 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 984 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 985 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 986 } 987 rqidx += vi->nrxq; 988 tqidx += vi->ntxq; 989 990 if (j == 0 && vi->ntxq > 1) 991 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 992 else 993 vi->rsrv_noflowq = 0; 994 995#ifdef TCP_OFFLOAD 996 vi->first_ofld_rxq = ofld_rqidx; 997 vi->first_ofld_txq = ofld_tqidx; 998 if (is_10G_port(pi) || is_40G_port(pi)) { 999 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1000 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1001 iaq.nofldrxq_vi; 1002 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1003 iaq.nofldtxq_vi; 1004 } else { 1005 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1006 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1007 iaq.nofldrxq_vi; 1008 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1009 iaq.nofldtxq_vi; 1010 } 1011 ofld_rqidx += vi->nofldrxq; 1012 ofld_tqidx += vi->nofldtxq; 1013#endif 1014#ifdef DEV_NETMAP 1015 if (j > 0) { 1016 vi->first_nm_rxq = nm_rqidx; 1017 vi->first_nm_txq = nm_tqidx; 1018 vi->nnmrxq = iaq.nnmrxq_vi; 1019 vi->nnmtxq = iaq.nnmtxq_vi; 1020 nm_rqidx += vi->nnmrxq; 1021 nm_tqidx += vi->nnmtxq; 1022 } 1023#endif 1024 } 1025 } 1026 1027 rc = t4_setup_intr_handlers(sc); 1028 if (rc != 0) { 1029 device_printf(dev, 1030 "failed to setup interrupt handlers: %d\n", rc); 1031 goto done; 1032 } 1033 1034 rc = bus_generic_attach(dev); 1035 if (rc != 0) { 1036 device_printf(dev, 1037 "failed to attach all child ports: %d\n", rc); 1038 goto done; 1039 } 1040 1041 device_printf(dev, 1042 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1043 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1044 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1045 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1046 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1047 1048 t4_set_desc(sc); 1049 1050done: 1051 if (rc != 0 && sc->cdev) { 1052 /* cdev was created and so cxgbetool works; recover that way. */ 1053 device_printf(dev, 1054 "error during attach, adapter is now in recovery mode.\n"); 1055 rc = 0; 1056 } 1057 1058 if (rc != 0) 1059 t4_detach_common(dev); 1060 else 1061 t4_sysctls(sc); 1062 1063 return (rc); 1064} 1065 1066/* 1067 * Idempotent 1068 */ 1069static int 1070t4_detach(device_t dev) 1071{ 1072 struct adapter *sc; 1073 1074 sc = device_get_softc(dev); 1075 1076 return (t4_detach_common(dev)); 1077} 1078 1079int 1080t4_detach_common(device_t dev) 1081{ 1082 struct adapter *sc; 1083 struct port_info *pi; 1084 int i, rc; 1085 1086 sc = device_get_softc(dev); 1087 1088 if (sc->flags & FULL_INIT_DONE) { 1089 if (!(sc->flags & IS_VF)) 1090 t4_intr_disable(sc); 1091 } 1092 1093 if (sc->cdev) { 1094 destroy_dev(sc->cdev); 1095 sc->cdev = NULL; 1096 } 1097 1098 if (device_is_attached(dev)) { 1099 rc = bus_generic_detach(dev); 1100 if (rc) { 1101 device_printf(dev, 1102 "failed to detach child devices: %d\n", rc); 1103 return (rc); 1104 } 1105 } 1106 1107 for (i = 0; i < sc->intr_count; i++) 1108 t4_free_irq(sc, &sc->irq[i]); 1109 1110 for (i = 0; i < MAX_NPORTS; i++) { 1111 pi = sc->port[i]; 1112 if (pi) { 1113 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1114 if (pi->dev) 1115 device_delete_child(dev, pi->dev); 1116 1117 mtx_destroy(&pi->pi_lock); 1118 free(pi->vi, M_CXGBE); 1119 free(pi->tc, M_CXGBE); 1120 free(pi, M_CXGBE); 1121 } 1122 } 1123 1124 if (sc->flags & FULL_INIT_DONE) 1125 adapter_full_uninit(sc); 1126 1127 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1128 t4_fw_bye(sc, sc->mbox); 1129 1130 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1131 pci_release_msi(dev); 1132 1133 if (sc->regs_res) 1134 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1135 sc->regs_res); 1136 1137 if (sc->udbs_res) 1138 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1139 sc->udbs_res); 1140 1141 if (sc->msix_res) 1142 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1143 sc->msix_res); 1144 1145 if (sc->l2t) 1146 t4_free_l2t(sc->l2t); 1147 1148#ifdef TCP_OFFLOAD 1149 free(sc->sge.ofld_rxq, M_CXGBE); 1150 free(sc->sge.ofld_txq, M_CXGBE); 1151#endif 1152#ifdef DEV_NETMAP 1153 free(sc->sge.nm_rxq, M_CXGBE); 1154 free(sc->sge.nm_txq, M_CXGBE); 1155#endif 1156 free(sc->irq, M_CXGBE); 1157 free(sc->sge.rxq, M_CXGBE); 1158 free(sc->sge.txq, M_CXGBE); 1159 free(sc->sge.ctrlq, M_CXGBE); 1160 free(sc->sge.iqmap, M_CXGBE); 1161 free(sc->sge.eqmap, M_CXGBE); 1162 free(sc->tids.ftid_tab, M_CXGBE); 1163 t4_destroy_dma_tag(sc); 1164 if (mtx_initialized(&sc->sc_lock)) { 1165 sx_xlock(&t4_list_lock); 1166 SLIST_REMOVE(&t4_list, sc, adapter, link); 1167 sx_xunlock(&t4_list_lock); 1168 mtx_destroy(&sc->sc_lock); 1169 } 1170 1171 callout_drain(&sc->sfl_callout); 1172 if (mtx_initialized(&sc->tids.ftid_lock)) 1173 mtx_destroy(&sc->tids.ftid_lock); 1174 if (mtx_initialized(&sc->sfl_lock)) 1175 mtx_destroy(&sc->sfl_lock); 1176 if (mtx_initialized(&sc->ifp_lock)) 1177 mtx_destroy(&sc->ifp_lock); 1178 if (mtx_initialized(&sc->reg_lock)) 1179 mtx_destroy(&sc->reg_lock); 1180 1181 for (i = 0; i < NUM_MEMWIN; i++) { 1182 struct memwin *mw = &sc->memwin[i]; 1183 1184 if (rw_initialized(&mw->mw_lock)) 1185 rw_destroy(&mw->mw_lock); 1186 } 1187 1188 bzero(sc, sizeof(*sc)); 1189 1190 return (0); 1191} 1192 1193static int 1194cxgbe_probe(device_t dev) 1195{ 1196 char buf[128]; 1197 struct port_info *pi = device_get_softc(dev); 1198 1199 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1200 device_set_desc_copy(dev, buf); 1201 1202 return (BUS_PROBE_DEFAULT); 1203} 1204 1205#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1206 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1207 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1208#define T4_CAP_ENABLE (T4_CAP) 1209 1210static int 1211cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1212{ 1213 struct ifnet *ifp; 1214 struct sbuf *sb; 1215 1216 vi->xact_addr_filt = -1; 1217 callout_init(&vi->tick, 1); 1218 1219 /* Allocate an ifnet and set it up */ 1220 ifp = if_alloc(IFT_ETHER); 1221 if (ifp == NULL) { 1222 device_printf(dev, "Cannot allocate ifnet\n"); 1223 return (ENOMEM); 1224 } 1225 vi->ifp = ifp; 1226 ifp->if_softc = vi; 1227 1228 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1229 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1230 1231 ifp->if_init = cxgbe_init; 1232 ifp->if_ioctl = cxgbe_ioctl; 1233 ifp->if_transmit = cxgbe_transmit; 1234 ifp->if_qflush = cxgbe_qflush; 1235 1236 ifp->if_capabilities = T4_CAP; 1237#ifdef TCP_OFFLOAD 1238 if (vi->nofldrxq != 0) 1239 ifp->if_capabilities |= IFCAP_TOE; 1240#endif 1241 ifp->if_capenable = T4_CAP_ENABLE; 1242 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1243 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1244 1245 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1246 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1247 ifp->if_hw_tsomaxsegsize = 65536; 1248 1249 /* Initialize ifmedia for this VI */ 1250 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1251 cxgbe_media_status); 1252 build_medialist(vi->pi, &vi->media); 1253 1254 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1255 EVENTHANDLER_PRI_ANY); 1256 1257 ether_ifattach(ifp, vi->hw_addr); 1258#ifdef DEV_NETMAP 1259 if (vi->nnmrxq != 0) 1260 cxgbe_nm_attach(vi); 1261#endif 1262 sb = sbuf_new_auto(); 1263 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1264#ifdef TCP_OFFLOAD 1265 if (ifp->if_capabilities & IFCAP_TOE) 1266 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1267 vi->nofldtxq, vi->nofldrxq); 1268#endif 1269#ifdef DEV_NETMAP 1270 if (ifp->if_capabilities & IFCAP_NETMAP) 1271 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1272 vi->nnmtxq, vi->nnmrxq); 1273#endif 1274 sbuf_finish(sb); 1275 device_printf(dev, "%s\n", sbuf_data(sb)); 1276 sbuf_delete(sb); 1277 1278 vi_sysctls(vi); 1279 1280 return (0); 1281} 1282 1283static int 1284cxgbe_attach(device_t dev) 1285{ 1286 struct port_info *pi = device_get_softc(dev); 1287 struct vi_info *vi; 1288 int i, rc; 1289 1290 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1291 1292 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1293 if (rc) 1294 return (rc); 1295 1296 for_each_vi(pi, i, vi) { 1297 if (i == 0) 1298 continue; 1299 vi->dev = device_add_child(dev, is_t4(pi->adapter) ? 1300 "vcxgbe" : "vcxl", -1); 1301 if (vi->dev == NULL) { 1302 device_printf(dev, "failed to add VI %d\n", i); 1303 continue; 1304 } 1305 device_set_softc(vi->dev, vi); 1306 } 1307 1308 cxgbe_sysctls(pi); 1309 1310 bus_generic_attach(dev); 1311 1312 return (0); 1313} 1314 1315static void 1316cxgbe_vi_detach(struct vi_info *vi) 1317{ 1318 struct ifnet *ifp = vi->ifp; 1319 1320 ether_ifdetach(ifp); 1321 1322 if (vi->vlan_c) 1323 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1324 1325 /* Let detach proceed even if these fail. */ 1326#ifdef DEV_NETMAP 1327 if (ifp->if_capabilities & IFCAP_NETMAP) 1328 cxgbe_nm_detach(vi); 1329#endif 1330 cxgbe_uninit_synchronized(vi); 1331 callout_drain(&vi->tick); 1332 vi_full_uninit(vi); 1333 1334 ifmedia_removeall(&vi->media); 1335 if_free(vi->ifp); 1336 vi->ifp = NULL; 1337} 1338 1339static int 1340cxgbe_detach(device_t dev) 1341{ 1342 struct port_info *pi = device_get_softc(dev); 1343 struct adapter *sc = pi->adapter; 1344 int rc; 1345 1346 /* Detach the extra VIs first. */ 1347 rc = bus_generic_detach(dev); 1348 if (rc) 1349 return (rc); 1350 device_delete_children(dev); 1351 1352 doom_vi(sc, &pi->vi[0]); 1353 1354 if (pi->flags & HAS_TRACEQ) { 1355 sc->traceq = -1; /* cloner should not create ifnet */ 1356 t4_tracer_port_detach(sc); 1357 } 1358 1359 cxgbe_vi_detach(&pi->vi[0]); 1360 callout_drain(&pi->tick); 1361 1362 end_synchronized_op(sc, 0); 1363 1364 return (0); 1365} 1366 1367static void 1368cxgbe_init(void *arg) 1369{ 1370 struct vi_info *vi = arg; 1371 struct adapter *sc = vi->pi->adapter; 1372 1373 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1374 return; 1375 cxgbe_init_synchronized(vi); 1376 end_synchronized_op(sc, 0); 1377} 1378 1379static int 1380cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1381{ 1382 int rc = 0, mtu, flags, can_sleep; 1383 struct vi_info *vi = ifp->if_softc; 1384 struct adapter *sc = vi->pi->adapter; 1385 struct ifreq *ifr = (struct ifreq *)data; 1386 uint32_t mask; 1387 1388 switch (cmd) { 1389 case SIOCSIFMTU: 1390 mtu = ifr->ifr_mtu; 1391 if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) 1392 return (EINVAL); 1393 1394 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1395 if (rc) 1396 return (rc); 1397 ifp->if_mtu = mtu; 1398 if (vi->flags & VI_INIT_DONE) { 1399 t4_update_fl_bufsize(ifp); 1400 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1401 rc = update_mac_settings(ifp, XGMAC_MTU); 1402 } 1403 end_synchronized_op(sc, 0); 1404 break; 1405 1406 case SIOCSIFFLAGS: 1407 can_sleep = 0; 1408redo_sifflags: 1409 rc = begin_synchronized_op(sc, vi, 1410 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1411 if (rc) 1412 return (rc); 1413 1414 if (ifp->if_flags & IFF_UP) { 1415 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1416 flags = vi->if_flags; 1417 if ((ifp->if_flags ^ flags) & 1418 (IFF_PROMISC | IFF_ALLMULTI)) { 1419 if (can_sleep == 1) { 1420 end_synchronized_op(sc, 0); 1421 can_sleep = 0; 1422 goto redo_sifflags; 1423 } 1424 rc = update_mac_settings(ifp, 1425 XGMAC_PROMISC | XGMAC_ALLMULTI); 1426 } 1427 } else { 1428 if (can_sleep == 0) { 1429 end_synchronized_op(sc, LOCK_HELD); 1430 can_sleep = 1; 1431 goto redo_sifflags; 1432 } 1433 rc = cxgbe_init_synchronized(vi); 1434 } 1435 vi->if_flags = ifp->if_flags; 1436 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1437 if (can_sleep == 0) { 1438 end_synchronized_op(sc, LOCK_HELD); 1439 can_sleep = 1; 1440 goto redo_sifflags; 1441 } 1442 rc = cxgbe_uninit_synchronized(vi); 1443 } 1444 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1445 break; 1446 1447 case SIOCADDMULTI: 1448 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1449 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1450 if (rc) 1451 return (rc); 1452 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1453 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1454 end_synchronized_op(sc, LOCK_HELD); 1455 break; 1456 1457 case SIOCSIFCAP: 1458 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1459 if (rc) 1460 return (rc); 1461 1462 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1463 if (mask & IFCAP_TXCSUM) { 1464 ifp->if_capenable ^= IFCAP_TXCSUM; 1465 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1466 1467 if (IFCAP_TSO4 & ifp->if_capenable && 1468 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1469 ifp->if_capenable &= ~IFCAP_TSO4; 1470 if_printf(ifp, 1471 "tso4 disabled due to -txcsum.\n"); 1472 } 1473 } 1474 if (mask & IFCAP_TXCSUM_IPV6) { 1475 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1476 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1477 1478 if (IFCAP_TSO6 & ifp->if_capenable && 1479 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1480 ifp->if_capenable &= ~IFCAP_TSO6; 1481 if_printf(ifp, 1482 "tso6 disabled due to -txcsum6.\n"); 1483 } 1484 } 1485 if (mask & IFCAP_RXCSUM) 1486 ifp->if_capenable ^= IFCAP_RXCSUM; 1487 if (mask & IFCAP_RXCSUM_IPV6) 1488 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1489 1490 /* 1491 * Note that we leave CSUM_TSO alone (it is always set). The 1492 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1493 * sending a TSO request our way, so it's sufficient to toggle 1494 * IFCAP_TSOx only. 1495 */ 1496 if (mask & IFCAP_TSO4) { 1497 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1498 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1499 if_printf(ifp, "enable txcsum first.\n"); 1500 rc = EAGAIN; 1501 goto fail; 1502 } 1503 ifp->if_capenable ^= IFCAP_TSO4; 1504 } 1505 if (mask & IFCAP_TSO6) { 1506 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1507 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1508 if_printf(ifp, "enable txcsum6 first.\n"); 1509 rc = EAGAIN; 1510 goto fail; 1511 } 1512 ifp->if_capenable ^= IFCAP_TSO6; 1513 } 1514 if (mask & IFCAP_LRO) { 1515#if defined(INET) || defined(INET6) 1516 int i; 1517 struct sge_rxq *rxq; 1518 1519 ifp->if_capenable ^= IFCAP_LRO; 1520 for_each_rxq(vi, i, rxq) { 1521 if (ifp->if_capenable & IFCAP_LRO) 1522 rxq->iq.flags |= IQ_LRO_ENABLED; 1523 else 1524 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1525 } 1526#endif 1527 } 1528#ifdef TCP_OFFLOAD 1529 if (mask & IFCAP_TOE) { 1530 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1531 1532 rc = toe_capability(vi, enable); 1533 if (rc != 0) 1534 goto fail; 1535 1536 ifp->if_capenable ^= mask; 1537 } 1538#endif 1539 if (mask & IFCAP_VLAN_HWTAGGING) { 1540 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1541 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1542 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1543 } 1544 if (mask & IFCAP_VLAN_MTU) { 1545 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1546 1547 /* Need to find out how to disable auto-mtu-inflation */ 1548 } 1549 if (mask & IFCAP_VLAN_HWTSO) 1550 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1551 if (mask & IFCAP_VLAN_HWCSUM) 1552 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1553 1554#ifdef VLAN_CAPABILITIES 1555 VLAN_CAPABILITIES(ifp); 1556#endif 1557fail: 1558 end_synchronized_op(sc, 0); 1559 break; 1560 1561 case SIOCSIFMEDIA: 1562 case SIOCGIFMEDIA: 1563 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1564 break; 1565 1566 case SIOCGI2C: { 1567 struct ifi2creq i2c; 1568 1569 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1570 if (rc != 0) 1571 break; 1572 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1573 rc = EPERM; 1574 break; 1575 } 1576 if (i2c.len > sizeof(i2c.data)) { 1577 rc = EINVAL; 1578 break; 1579 } 1580 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1581 if (rc) 1582 return (rc); 1583 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1584 i2c.offset, i2c.len, &i2c.data[0]); 1585 end_synchronized_op(sc, 0); 1586 if (rc == 0) 1587 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1588 break; 1589 } 1590 1591 default: 1592 rc = ether_ioctl(ifp, cmd, data); 1593 } 1594 1595 return (rc); 1596} 1597 1598static int 1599cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1600{ 1601 struct vi_info *vi = ifp->if_softc; 1602 struct port_info *pi = vi->pi; 1603 struct adapter *sc = pi->adapter; 1604 struct sge_txq *txq; 1605 void *items[1]; 1606 int rc; 1607 1608 M_ASSERTPKTHDR(m); 1609 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1610 1611 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1612 m_freem(m); 1613 return (ENETDOWN); 1614 } 1615 1616 rc = parse_pkt(sc, &m); 1617 if (__predict_false(rc != 0)) { 1618 MPASS(m == NULL); /* was freed already */ 1619 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1620 return (rc); 1621 } 1622 1623 /* Select a txq. */ 1624 txq = &sc->sge.txq[vi->first_txq]; 1625 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1626 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1627 vi->rsrv_noflowq); 1628 1629 items[0] = m; 1630 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1631 if (__predict_false(rc != 0)) 1632 m_freem(m); 1633 1634 return (rc); 1635} 1636 1637static void 1638cxgbe_qflush(struct ifnet *ifp) 1639{ 1640 struct vi_info *vi = ifp->if_softc; 1641 struct sge_txq *txq; 1642 int i; 1643 1644 /* queues do not exist if !VI_INIT_DONE. */ 1645 if (vi->flags & VI_INIT_DONE) { 1646 for_each_txq(vi, i, txq) { 1647 TXQ_LOCK(txq); 1648 txq->eq.flags &= ~EQ_ENABLED; 1649 TXQ_UNLOCK(txq); 1650 while (!mp_ring_is_idle(txq->r)) { 1651 mp_ring_check_drainage(txq->r, 0); 1652 pause("qflush", 1); 1653 } 1654 } 1655 } 1656 if_qflush(ifp); 1657} 1658 1659static int 1660cxgbe_media_change(struct ifnet *ifp) 1661{ 1662 struct vi_info *vi = ifp->if_softc; 1663 1664 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1665 1666 return (EOPNOTSUPP); 1667} 1668 1669static void 1670cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1671{ 1672 struct vi_info *vi = ifp->if_softc; 1673 struct port_info *pi = vi->pi; 1674 struct ifmedia_entry *cur; 1675 int speed = pi->link_cfg.speed; 1676 1677 cur = vi->media.ifm_cur; 1678 1679 ifmr->ifm_status = IFM_AVALID; 1680 if (!pi->link_cfg.link_ok) 1681 return; 1682 1683 ifmr->ifm_status |= IFM_ACTIVE; 1684 1685 /* active and current will differ iff current media is autoselect. */ 1686 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1687 return; 1688 1689 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1690 if (speed == 10000) 1691 ifmr->ifm_active |= IFM_10G_T; 1692 else if (speed == 1000) 1693 ifmr->ifm_active |= IFM_1000_T; 1694 else if (speed == 100) 1695 ifmr->ifm_active |= IFM_100_TX; 1696 else if (speed == 10) 1697 ifmr->ifm_active |= IFM_10_T; 1698 else 1699 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1700 speed)); 1701} 1702 1703static int 1704vcxgbe_probe(device_t dev) 1705{ 1706 char buf[128]; 1707 struct vi_info *vi = device_get_softc(dev); 1708 1709 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 1710 vi - vi->pi->vi); 1711 device_set_desc_copy(dev, buf); 1712 1713 return (BUS_PROBE_DEFAULT); 1714} 1715 1716static int 1717vcxgbe_attach(device_t dev) 1718{ 1719 struct vi_info *vi; 1720 struct port_info *pi; 1721 struct adapter *sc; 1722 int func, index, rc; 1723 u32 param, val; 1724 1725 vi = device_get_softc(dev); 1726 pi = vi->pi; 1727 sc = pi->adapter; 1728 1729 index = vi - pi->vi; 1730 KASSERT(index < nitems(vi_mac_funcs), 1731 ("%s: VI %s doesn't have a MAC func", __func__, 1732 device_get_nameunit(dev))); 1733 func = vi_mac_funcs[index]; 1734 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 1735 vi->hw_addr, &vi->rss_size, func, 0); 1736 if (rc < 0) { 1737 device_printf(dev, "Failed to allocate virtual interface " 1738 "for port %d: %d\n", pi->port_id, -rc); 1739 return (-rc); 1740 } 1741 vi->viid = rc; 1742 1743 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1744 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 1745 V_FW_PARAMS_PARAM_YZ(vi->viid); 1746 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1747 if (rc) 1748 vi->rss_base = 0xffff; 1749 else { 1750 /* MPASS((val >> 16) == rss_size); */ 1751 vi->rss_base = val & 0xffff; 1752 } 1753 1754 rc = cxgbe_vi_attach(dev, vi); 1755 if (rc) { 1756 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1757 return (rc); 1758 } 1759 return (0); 1760} 1761 1762static int 1763vcxgbe_detach(device_t dev) 1764{ 1765 struct vi_info *vi; 1766 struct adapter *sc; 1767 1768 vi = device_get_softc(dev); 1769 sc = vi->pi->adapter; 1770 1771 doom_vi(sc, vi); 1772 1773 cxgbe_vi_detach(vi); 1774 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1775 1776 end_synchronized_op(sc, 0); 1777 1778 return (0); 1779} 1780 1781void 1782t4_fatal_err(struct adapter *sc) 1783{ 1784 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1785 t4_intr_disable(sc); 1786 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1787 device_get_nameunit(sc->dev)); 1788} 1789 1790void 1791t4_add_adapter(struct adapter *sc) 1792{ 1793 sx_xlock(&t4_list_lock); 1794 SLIST_INSERT_HEAD(&t4_list, sc, link); 1795 sx_xunlock(&t4_list_lock); 1796} 1797 1798int 1799t4_map_bars_0_and_4(struct adapter *sc) 1800{ 1801 sc->regs_rid = PCIR_BAR(0); 1802 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1803 &sc->regs_rid, RF_ACTIVE); 1804 if (sc->regs_res == NULL) { 1805 device_printf(sc->dev, "cannot map registers.\n"); 1806 return (ENXIO); 1807 } 1808 sc->bt = rman_get_bustag(sc->regs_res); 1809 sc->bh = rman_get_bushandle(sc->regs_res); 1810 sc->mmio_len = rman_get_size(sc->regs_res); 1811 setbit(&sc->doorbells, DOORBELL_KDB); 1812 1813 sc->msix_rid = PCIR_BAR(4); 1814 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1815 &sc->msix_rid, RF_ACTIVE); 1816 if (sc->msix_res == NULL) { 1817 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1818 return (ENXIO); 1819 } 1820 1821 return (0); 1822} 1823 1824int 1825t4_map_bar_2(struct adapter *sc) 1826{ 1827 1828 /* 1829 * T4: only iWARP driver uses the userspace doorbells. There is no need 1830 * to map it if RDMA is disabled. 1831 */ 1832 if (is_t4(sc) && sc->rdmacaps == 0) 1833 return (0); 1834 1835 sc->udbs_rid = PCIR_BAR(2); 1836 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1837 &sc->udbs_rid, RF_ACTIVE); 1838 if (sc->udbs_res == NULL) { 1839 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1840 return (ENXIO); 1841 } 1842 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1843 1844 if (is_t5(sc)) { 1845 setbit(&sc->doorbells, DOORBELL_UDB); 1846#if defined(__i386__) || defined(__amd64__) 1847 if (t5_write_combine) { 1848 int rc; 1849 1850 /* 1851 * Enable write combining on BAR2. This is the 1852 * userspace doorbell BAR and is split into 128B 1853 * (UDBS_SEG_SIZE) doorbell regions, each associated 1854 * with an egress queue. The first 64B has the doorbell 1855 * and the second 64B can be used to submit a tx work 1856 * request with an implicit doorbell. 1857 */ 1858 1859 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1860 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1861 if (rc == 0) { 1862 clrbit(&sc->doorbells, DOORBELL_UDB); 1863 setbit(&sc->doorbells, DOORBELL_WCWR); 1864 setbit(&sc->doorbells, DOORBELL_UDBWC); 1865 } else { 1866 device_printf(sc->dev, 1867 "couldn't enable write combining: %d\n", 1868 rc); 1869 } 1870 1871 t4_write_reg(sc, A_SGE_STAT_CFG, 1872 V_STATSOURCE_T5(7) | V_STATMODE(0)); 1873 } 1874#endif 1875 } 1876 1877 return (0); 1878} 1879 1880struct memwin_init { 1881 uint32_t base; 1882 uint32_t aperture; 1883}; 1884 1885static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 1886 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1887 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1888 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 1889}; 1890 1891static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 1892 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1893 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1894 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1895}; 1896 1897static void 1898setup_memwin(struct adapter *sc) 1899{ 1900 const struct memwin_init *mw_init; 1901 struct memwin *mw; 1902 int i; 1903 uint32_t bar0; 1904 1905 if (is_t4(sc)) { 1906 /* 1907 * Read low 32b of bar0 indirectly via the hardware backdoor 1908 * mechanism. Works from within PCI passthrough environments 1909 * too, where rman_get_start() can return a different value. We 1910 * need to program the T4 memory window decoders with the actual 1911 * addresses that will be coming across the PCIe link. 1912 */ 1913 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 1914 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 1915 1916 mw_init = &t4_memwin[0]; 1917 } else { 1918 /* T5+ use the relative offset inside the PCIe BAR */ 1919 bar0 = 0; 1920 1921 mw_init = &t5_memwin[0]; 1922 } 1923 1924 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 1925 rw_init(&mw->mw_lock, "memory window access"); 1926 mw->mw_base = mw_init->base; 1927 mw->mw_aperture = mw_init->aperture; 1928 mw->mw_curpos = 0; 1929 t4_write_reg(sc, 1930 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 1931 (mw->mw_base + bar0) | V_BIR(0) | 1932 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 1933 rw_wlock(&mw->mw_lock); 1934 position_memwin(sc, i, 0); 1935 rw_wunlock(&mw->mw_lock); 1936 } 1937 1938 /* flush */ 1939 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1940} 1941 1942/* 1943 * Positions the memory window at the given address in the card's address space. 1944 * There are some alignment requirements and the actual position may be at an 1945 * address prior to the requested address. mw->mw_curpos always has the actual 1946 * position of the window. 1947 */ 1948static void 1949position_memwin(struct adapter *sc, int idx, uint32_t addr) 1950{ 1951 struct memwin *mw; 1952 uint32_t pf; 1953 uint32_t reg; 1954 1955 MPASS(idx >= 0 && idx < NUM_MEMWIN); 1956 mw = &sc->memwin[idx]; 1957 rw_assert(&mw->mw_lock, RA_WLOCKED); 1958 1959 if (is_t4(sc)) { 1960 pf = 0; 1961 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 1962 } else { 1963 pf = V_PFNUM(sc->pf); 1964 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 1965 } 1966 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 1967 t4_write_reg(sc, reg, mw->mw_curpos | pf); 1968 t4_read_reg(sc, reg); /* flush */ 1969} 1970 1971static int 1972rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 1973 int len, int rw) 1974{ 1975 struct memwin *mw; 1976 uint32_t mw_end, v; 1977 1978 MPASS(idx >= 0 && idx < NUM_MEMWIN); 1979 1980 /* Memory can only be accessed in naturally aligned 4 byte units */ 1981 if (addr & 3 || len & 3 || len <= 0) 1982 return (EINVAL); 1983 1984 mw = &sc->memwin[idx]; 1985 while (len > 0) { 1986 rw_rlock(&mw->mw_lock); 1987 mw_end = mw->mw_curpos + mw->mw_aperture; 1988 if (addr >= mw_end || addr < mw->mw_curpos) { 1989 /* Will need to reposition the window */ 1990 if (!rw_try_upgrade(&mw->mw_lock)) { 1991 rw_runlock(&mw->mw_lock); 1992 rw_wlock(&mw->mw_lock); 1993 } 1994 rw_assert(&mw->mw_lock, RA_WLOCKED); 1995 position_memwin(sc, idx, addr); 1996 rw_downgrade(&mw->mw_lock); 1997 mw_end = mw->mw_curpos + mw->mw_aperture; 1998 } 1999 rw_assert(&mw->mw_lock, RA_RLOCKED); 2000 while (addr < mw_end && len > 0) { 2001 if (rw == 0) { 2002 v = t4_read_reg(sc, mw->mw_base + addr - 2003 mw->mw_curpos); 2004 *val++ = le32toh(v); 2005 } else { 2006 v = *val++; 2007 t4_write_reg(sc, mw->mw_base + addr - 2008 mw->mw_curpos, htole32(v));; 2009 } 2010 addr += 4; 2011 len -= 4; 2012 } 2013 rw_runlock(&mw->mw_lock); 2014 } 2015 2016 return (0); 2017} 2018 2019static inline int 2020read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2021 int len) 2022{ 2023 2024 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2025} 2026 2027static inline int 2028write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2029 const uint32_t *val, int len) 2030{ 2031 2032 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2033} 2034 2035static int 2036t4_range_cmp(const void *a, const void *b) 2037{ 2038 return ((const struct t4_range *)a)->start - 2039 ((const struct t4_range *)b)->start; 2040} 2041 2042/* 2043 * Verify that the memory range specified by the addr/len pair is valid within 2044 * the card's address space. 2045 */ 2046static int 2047validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2048{ 2049 struct t4_range mem_ranges[4], *r, *next; 2050 uint32_t em, addr_len; 2051 int i, n, remaining; 2052 2053 /* Memory can only be accessed in naturally aligned 4 byte units */ 2054 if (addr & 3 || len & 3 || len <= 0) 2055 return (EINVAL); 2056 2057 /* Enabled memories */ 2058 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2059 2060 r = &mem_ranges[0]; 2061 n = 0; 2062 bzero(r, sizeof(mem_ranges)); 2063 if (em & F_EDRAM0_ENABLE) { 2064 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2065 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2066 if (r->size > 0) { 2067 r->start = G_EDRAM0_BASE(addr_len) << 20; 2068 if (addr >= r->start && 2069 addr + len <= r->start + r->size) 2070 return (0); 2071 r++; 2072 n++; 2073 } 2074 } 2075 if (em & F_EDRAM1_ENABLE) { 2076 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2077 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2078 if (r->size > 0) { 2079 r->start = G_EDRAM1_BASE(addr_len) << 20; 2080 if (addr >= r->start && 2081 addr + len <= r->start + r->size) 2082 return (0); 2083 r++; 2084 n++; 2085 } 2086 } 2087 if (em & F_EXT_MEM_ENABLE) { 2088 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2089 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2090 if (r->size > 0) { 2091 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2092 if (addr >= r->start && 2093 addr + len <= r->start + r->size) 2094 return (0); 2095 r++; 2096 n++; 2097 } 2098 } 2099 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2100 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2101 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2102 if (r->size > 0) { 2103 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2104 if (addr >= r->start && 2105 addr + len <= r->start + r->size) 2106 return (0); 2107 r++; 2108 n++; 2109 } 2110 } 2111 MPASS(n <= nitems(mem_ranges)); 2112 2113 if (n > 1) { 2114 /* Sort and merge the ranges. */ 2115 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2116 2117 /* Start from index 0 and examine the next n - 1 entries. */ 2118 r = &mem_ranges[0]; 2119 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2120 2121 MPASS(r->size > 0); /* r is a valid entry. */ 2122 next = r + 1; 2123 MPASS(next->size > 0); /* and so is the next one. */ 2124 2125 while (r->start + r->size >= next->start) { 2126 /* Merge the next one into the current entry. */ 2127 r->size = max(r->start + r->size, 2128 next->start + next->size) - r->start; 2129 n--; /* One fewer entry in total. */ 2130 if (--remaining == 0) 2131 goto done; /* short circuit */ 2132 next++; 2133 } 2134 if (next != r + 1) { 2135 /* 2136 * Some entries were merged into r and next 2137 * points to the first valid entry that couldn't 2138 * be merged. 2139 */ 2140 MPASS(next->size > 0); /* must be valid */ 2141 memcpy(r + 1, next, remaining * sizeof(*r)); 2142#ifdef INVARIANTS 2143 /* 2144 * This so that the foo->size assertion in the 2145 * next iteration of the loop do the right 2146 * thing for entries that were pulled up and are 2147 * no longer valid. 2148 */ 2149 MPASS(n < nitems(mem_ranges)); 2150 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2151 sizeof(struct t4_range)); 2152#endif 2153 } 2154 } 2155done: 2156 /* Done merging the ranges. */ 2157 MPASS(n > 0); 2158 r = &mem_ranges[0]; 2159 for (i = 0; i < n; i++, r++) { 2160 if (addr >= r->start && 2161 addr + len <= r->start + r->size) 2162 return (0); 2163 } 2164 } 2165 2166 return (EFAULT); 2167} 2168 2169static int 2170fwmtype_to_hwmtype(int mtype) 2171{ 2172 2173 switch (mtype) { 2174 case FW_MEMTYPE_EDC0: 2175 return (MEM_EDC0); 2176 case FW_MEMTYPE_EDC1: 2177 return (MEM_EDC1); 2178 case FW_MEMTYPE_EXTMEM: 2179 return (MEM_MC0); 2180 case FW_MEMTYPE_EXTMEM1: 2181 return (MEM_MC1); 2182 default: 2183 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2184 } 2185} 2186 2187/* 2188 * Verify that the memory range specified by the memtype/offset/len pair is 2189 * valid and lies entirely within the memtype specified. The global address of 2190 * the start of the range is returned in addr. 2191 */ 2192static int 2193validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2194 uint32_t *addr) 2195{ 2196 uint32_t em, addr_len, maddr; 2197 2198 /* Memory can only be accessed in naturally aligned 4 byte units */ 2199 if (off & 3 || len & 3 || len == 0) 2200 return (EINVAL); 2201 2202 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2203 switch (fwmtype_to_hwmtype(mtype)) { 2204 case MEM_EDC0: 2205 if (!(em & F_EDRAM0_ENABLE)) 2206 return (EINVAL); 2207 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2208 maddr = G_EDRAM0_BASE(addr_len) << 20; 2209 break; 2210 case MEM_EDC1: 2211 if (!(em & F_EDRAM1_ENABLE)) 2212 return (EINVAL); 2213 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2214 maddr = G_EDRAM1_BASE(addr_len) << 20; 2215 break; 2216 case MEM_MC: 2217 if (!(em & F_EXT_MEM_ENABLE)) 2218 return (EINVAL); 2219 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2220 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2221 break; 2222 case MEM_MC1: 2223 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2224 return (EINVAL); 2225 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2226 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2227 break; 2228 default: 2229 return (EINVAL); 2230 } 2231 2232 *addr = maddr + off; /* global address */ 2233 return (validate_mem_range(sc, *addr, len)); 2234} 2235 2236static int 2237fixup_devlog_params(struct adapter *sc) 2238{ 2239 struct devlog_params *dparams = &sc->params.devlog; 2240 int rc; 2241 2242 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2243 dparams->size, &dparams->addr); 2244 2245 return (rc); 2246} 2247 2248static int 2249cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2250 struct intrs_and_queues *iaq) 2251{ 2252 int rc, itype, navail, nrxq10g, nrxq1g, n; 2253 int nofldrxq10g = 0, nofldrxq1g = 0; 2254 2255 bzero(iaq, sizeof(*iaq)); 2256 2257 iaq->ntxq10g = t4_ntxq10g; 2258 iaq->ntxq1g = t4_ntxq1g; 2259 iaq->ntxq_vi = t4_ntxq_vi; 2260 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2261 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2262 iaq->nrxq_vi = t4_nrxq_vi; 2263 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2264#ifdef TCP_OFFLOAD 2265 if (is_offload(sc)) { 2266 iaq->nofldtxq10g = t4_nofldtxq10g; 2267 iaq->nofldtxq1g = t4_nofldtxq1g; 2268 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2269 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2270 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2271 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2272 } 2273#endif 2274#ifdef DEV_NETMAP 2275 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2276 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2277#endif 2278 2279 for (itype = INTR_MSIX; itype; itype >>= 1) { 2280 2281 if ((itype & t4_intr_types) == 0) 2282 continue; /* not allowed */ 2283 2284 if (itype == INTR_MSIX) 2285 navail = pci_msix_count(sc->dev); 2286 else if (itype == INTR_MSI) 2287 navail = pci_msi_count(sc->dev); 2288 else 2289 navail = 1; 2290restart: 2291 if (navail == 0) 2292 continue; 2293 2294 iaq->intr_type = itype; 2295 iaq->intr_flags_10g = 0; 2296 iaq->intr_flags_1g = 0; 2297 2298 /* 2299 * Best option: an interrupt vector for errors, one for the 2300 * firmware event queue, and one for every rxq (NIC and TOE) of 2301 * every VI. The VIs that support netmap use the same 2302 * interrupts for the NIC rx queues and the netmap rx queues 2303 * because only one set of queues is active at a time. 2304 */ 2305 iaq->nirq = T4_EXTRA_INTR; 2306 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2307 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2308 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2309 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2310 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2311 if (iaq->nirq <= navail && 2312 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2313 iaq->intr_flags_10g = INTR_ALL; 2314 iaq->intr_flags_1g = INTR_ALL; 2315 goto allocate; 2316 } 2317 2318 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2319 if (num_vis > 1) { 2320 device_printf(sc->dev, "virtual interfaces disabled " 2321 "because num_vis=%u with current settings " 2322 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2323 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2324 "nnmrxq_vi=%u) would need %u interrupts but " 2325 "only %u are available.\n", num_vis, nrxq10g, 2326 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2327 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2328 navail); 2329 num_vis = 1; 2330 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2331 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2332 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2333 goto restart; 2334 } 2335 2336 /* 2337 * Second best option: a vector for errors, one for the firmware 2338 * event queue, and vectors for either all the NIC rx queues or 2339 * all the TOE rx queues. The queues that don't get vectors 2340 * will forward their interrupts to those that do. 2341 */ 2342 iaq->nirq = T4_EXTRA_INTR; 2343 if (nrxq10g >= nofldrxq10g) { 2344 iaq->intr_flags_10g = INTR_RXQ; 2345 iaq->nirq += n10g * nrxq10g; 2346 } else { 2347 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2348 iaq->nirq += n10g * nofldrxq10g; 2349 } 2350 if (nrxq1g >= nofldrxq1g) { 2351 iaq->intr_flags_1g = INTR_RXQ; 2352 iaq->nirq += n1g * nrxq1g; 2353 } else { 2354 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2355 iaq->nirq += n1g * nofldrxq1g; 2356 } 2357 if (iaq->nirq <= navail && 2358 (itype != INTR_MSI || powerof2(iaq->nirq))) 2359 goto allocate; 2360 2361 /* 2362 * Next best option: an interrupt vector for errors, one for the 2363 * firmware event queue, and at least one per main-VI. At this 2364 * point we know we'll have to downsize nrxq and/or nofldrxq to 2365 * fit what's available to us. 2366 */ 2367 iaq->nirq = T4_EXTRA_INTR; 2368 iaq->nirq += n10g + n1g; 2369 if (iaq->nirq <= navail) { 2370 int leftover = navail - iaq->nirq; 2371 2372 if (n10g > 0) { 2373 int target = max(nrxq10g, nofldrxq10g); 2374 2375 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2376 INTR_RXQ : INTR_OFLD_RXQ; 2377 2378 n = 1; 2379 while (n < target && leftover >= n10g) { 2380 leftover -= n10g; 2381 iaq->nirq += n10g; 2382 n++; 2383 } 2384 iaq->nrxq10g = min(n, nrxq10g); 2385#ifdef TCP_OFFLOAD 2386 iaq->nofldrxq10g = min(n, nofldrxq10g); 2387#endif 2388 } 2389 2390 if (n1g > 0) { 2391 int target = max(nrxq1g, nofldrxq1g); 2392 2393 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2394 INTR_RXQ : INTR_OFLD_RXQ; 2395 2396 n = 1; 2397 while (n < target && leftover >= n1g) { 2398 leftover -= n1g; 2399 iaq->nirq += n1g; 2400 n++; 2401 } 2402 iaq->nrxq1g = min(n, nrxq1g); 2403#ifdef TCP_OFFLOAD 2404 iaq->nofldrxq1g = min(n, nofldrxq1g); 2405#endif 2406 } 2407 2408 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2409 goto allocate; 2410 } 2411 2412 /* 2413 * Least desirable option: one interrupt vector for everything. 2414 */ 2415 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2416 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2417#ifdef TCP_OFFLOAD 2418 if (is_offload(sc)) 2419 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2420#endif 2421allocate: 2422 navail = iaq->nirq; 2423 rc = 0; 2424 if (itype == INTR_MSIX) 2425 rc = pci_alloc_msix(sc->dev, &navail); 2426 else if (itype == INTR_MSI) 2427 rc = pci_alloc_msi(sc->dev, &navail); 2428 2429 if (rc == 0) { 2430 if (navail == iaq->nirq) 2431 return (0); 2432 2433 /* 2434 * Didn't get the number requested. Use whatever number 2435 * the kernel is willing to allocate (it's in navail). 2436 */ 2437 device_printf(sc->dev, "fewer vectors than requested, " 2438 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2439 itype, iaq->nirq, navail); 2440 pci_release_msi(sc->dev); 2441 goto restart; 2442 } 2443 2444 device_printf(sc->dev, 2445 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2446 itype, rc, iaq->nirq, navail); 2447 } 2448 2449 device_printf(sc->dev, 2450 "failed to find a usable interrupt type. " 2451 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2452 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2453 2454 return (ENXIO); 2455} 2456 2457#define FW_VERSION(chip) ( \ 2458 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2459 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2460 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2461 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2462#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2463 2464struct fw_info { 2465 uint8_t chip; 2466 char *kld_name; 2467 char *fw_mod_name; 2468 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2469} fw_info[] = { 2470 { 2471 .chip = CHELSIO_T4, 2472 .kld_name = "t4fw_cfg", 2473 .fw_mod_name = "t4fw", 2474 .fw_hdr = { 2475 .chip = FW_HDR_CHIP_T4, 2476 .fw_ver = htobe32_const(FW_VERSION(T4)), 2477 .intfver_nic = FW_INTFVER(T4, NIC), 2478 .intfver_vnic = FW_INTFVER(T4, VNIC), 2479 .intfver_ofld = FW_INTFVER(T4, OFLD), 2480 .intfver_ri = FW_INTFVER(T4, RI), 2481 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2482 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2483 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2484 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2485 }, 2486 }, { 2487 .chip = CHELSIO_T5, 2488 .kld_name = "t5fw_cfg", 2489 .fw_mod_name = "t5fw", 2490 .fw_hdr = { 2491 .chip = FW_HDR_CHIP_T5, 2492 .fw_ver = htobe32_const(FW_VERSION(T5)), 2493 .intfver_nic = FW_INTFVER(T5, NIC), 2494 .intfver_vnic = FW_INTFVER(T5, VNIC), 2495 .intfver_ofld = FW_INTFVER(T5, OFLD), 2496 .intfver_ri = FW_INTFVER(T5, RI), 2497 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2498 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2499 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2500 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2501 }, 2502 } 2503}; 2504 2505static struct fw_info * 2506find_fw_info(int chip) 2507{ 2508 int i; 2509 2510 for (i = 0; i < nitems(fw_info); i++) { 2511 if (fw_info[i].chip == chip) 2512 return (&fw_info[i]); 2513 } 2514 return (NULL); 2515} 2516 2517/* 2518 * Is the given firmware API compatible with the one the driver was compiled 2519 * with? 2520 */ 2521static int 2522fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2523{ 2524 2525 /* short circuit if it's the exact same firmware version */ 2526 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2527 return (1); 2528 2529 /* 2530 * XXX: Is this too conservative? Perhaps I should limit this to the 2531 * features that are supported in the driver. 2532 */ 2533#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2534 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2535 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2536 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2537 return (1); 2538#undef SAME_INTF 2539 2540 return (0); 2541} 2542 2543/* 2544 * The firmware in the KLD is usable, but should it be installed? This routine 2545 * explains itself in detail if it indicates the KLD firmware should be 2546 * installed. 2547 */ 2548static int 2549should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2550{ 2551 const char *reason; 2552 2553 if (!card_fw_usable) { 2554 reason = "incompatible or unusable"; 2555 goto install; 2556 } 2557 2558 if (k > c) { 2559 reason = "older than the version bundled with this driver"; 2560 goto install; 2561 } 2562 2563 if (t4_fw_install == 2 && k != c) { 2564 reason = "different than the version bundled with this driver"; 2565 goto install; 2566 } 2567 2568 return (0); 2569 2570install: 2571 if (t4_fw_install == 0) { 2572 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2573 "but the driver is prohibited from installing a different " 2574 "firmware on the card.\n", 2575 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2576 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2577 2578 return (0); 2579 } 2580 2581 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2582 "installing firmware %u.%u.%u.%u on card.\n", 2583 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2584 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2585 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2586 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2587 2588 return (1); 2589} 2590/* 2591 * Establish contact with the firmware and determine if we are the master driver 2592 * or not, and whether we are responsible for chip initialization. 2593 */ 2594static int 2595prep_firmware(struct adapter *sc) 2596{ 2597 const struct firmware *fw = NULL, *default_cfg; 2598 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2599 enum dev_state state; 2600 struct fw_info *fw_info; 2601 struct fw_hdr *card_fw; /* fw on the card */ 2602 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2603 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2604 against */ 2605 2606 /* Contact firmware. */ 2607 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2608 if (rc < 0 || state == DEV_STATE_ERR) { 2609 rc = -rc; 2610 device_printf(sc->dev, 2611 "failed to connect to the firmware: %d, %d.\n", rc, state); 2612 return (rc); 2613 } 2614 pf = rc; 2615 if (pf == sc->mbox) 2616 sc->flags |= MASTER_PF; 2617 else if (state == DEV_STATE_UNINIT) { 2618 /* 2619 * We didn't get to be the master so we definitely won't be 2620 * configuring the chip. It's a bug if someone else hasn't 2621 * configured it already. 2622 */ 2623 device_printf(sc->dev, "couldn't be master(%d), " 2624 "device not already initialized either(%d).\n", rc, state); 2625 return (EDOOFUS); 2626 } 2627 2628 /* This is the firmware whose headers the driver was compiled against */ 2629 fw_info = find_fw_info(chip_id(sc)); 2630 if (fw_info == NULL) { 2631 device_printf(sc->dev, 2632 "unable to look up firmware information for chip %d.\n", 2633 chip_id(sc)); 2634 return (EINVAL); 2635 } 2636 drv_fw = &fw_info->fw_hdr; 2637 2638 /* 2639 * The firmware KLD contains many modules. The KLD name is also the 2640 * name of the module that contains the default config file. 2641 */ 2642 default_cfg = firmware_get(fw_info->kld_name); 2643 2644 /* Read the header of the firmware on the card */ 2645 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2646 rc = -t4_read_flash(sc, FLASH_FW_START, 2647 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2648 if (rc == 0) 2649 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2650 else { 2651 device_printf(sc->dev, 2652 "Unable to read card's firmware header: %d\n", rc); 2653 card_fw_usable = 0; 2654 } 2655 2656 /* This is the firmware in the KLD */ 2657 fw = firmware_get(fw_info->fw_mod_name); 2658 if (fw != NULL) { 2659 kld_fw = (const void *)fw->data; 2660 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2661 } else { 2662 kld_fw = NULL; 2663 kld_fw_usable = 0; 2664 } 2665 2666 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2667 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2668 /* 2669 * Common case: the firmware on the card is an exact match and 2670 * the KLD is an exact match too, or the KLD is 2671 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2672 * here -- use cxgbetool loadfw if you want to reinstall the 2673 * same firmware as the one on the card. 2674 */ 2675 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2676 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2677 be32toh(card_fw->fw_ver))) { 2678 2679 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2680 if (rc != 0) { 2681 device_printf(sc->dev, 2682 "failed to install firmware: %d\n", rc); 2683 goto done; 2684 } 2685 2686 /* Installed successfully, update the cached header too. */ 2687 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2688 card_fw_usable = 1; 2689 need_fw_reset = 0; /* already reset as part of load_fw */ 2690 } 2691 2692 if (!card_fw_usable) { 2693 uint32_t d, c, k; 2694 2695 d = ntohl(drv_fw->fw_ver); 2696 c = ntohl(card_fw->fw_ver); 2697 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2698 2699 device_printf(sc->dev, "Cannot find a usable firmware: " 2700 "fw_install %d, chip state %d, " 2701 "driver compiled with %d.%d.%d.%d, " 2702 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2703 t4_fw_install, state, 2704 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2705 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2706 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2707 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2708 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2709 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2710 rc = EINVAL; 2711 goto done; 2712 } 2713 2714 /* Reset device */ 2715 if (need_fw_reset && 2716 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2717 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2718 if (rc != ETIMEDOUT && rc != EIO) 2719 t4_fw_bye(sc, sc->mbox); 2720 goto done; 2721 } 2722 sc->flags |= FW_OK; 2723 2724 rc = get_params__pre_init(sc); 2725 if (rc != 0) 2726 goto done; /* error message displayed already */ 2727 2728 /* Partition adapter resources as specified in the config file. */ 2729 if (state == DEV_STATE_UNINIT) { 2730 2731 KASSERT(sc->flags & MASTER_PF, 2732 ("%s: trying to change chip settings when not master.", 2733 __func__)); 2734 2735 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2736 if (rc != 0) 2737 goto done; /* error message displayed already */ 2738 2739 t4_tweak_chip_settings(sc); 2740 2741 /* get basic stuff going */ 2742 rc = -t4_fw_initialize(sc, sc->mbox); 2743 if (rc != 0) { 2744 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2745 goto done; 2746 } 2747 } else { 2748 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2749 sc->cfcsum = 0; 2750 } 2751 2752done: 2753 free(card_fw, M_CXGBE); 2754 if (fw != NULL) 2755 firmware_put(fw, FIRMWARE_UNLOAD); 2756 if (default_cfg != NULL) 2757 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2758 2759 return (rc); 2760} 2761 2762#define FW_PARAM_DEV(param) \ 2763 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2764 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2765#define FW_PARAM_PFVF(param) \ 2766 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2767 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2768 2769/* 2770 * Partition chip resources for use between various PFs, VFs, etc. 2771 */ 2772static int 2773partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2774 const char *name_prefix) 2775{ 2776 const struct firmware *cfg = NULL; 2777 int rc = 0; 2778 struct fw_caps_config_cmd caps; 2779 uint32_t mtype, moff, finicsum, cfcsum; 2780 2781 /* 2782 * Figure out what configuration file to use. Pick the default config 2783 * file for the card if the user hasn't specified one explicitly. 2784 */ 2785 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2786 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2787 /* Card specific overrides go here. */ 2788 if (pci_get_device(sc->dev) == 0x440a) 2789 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2790 if (is_fpga(sc)) 2791 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2792 } 2793 2794 /* 2795 * We need to load another module if the profile is anything except 2796 * "default" or "flash". 2797 */ 2798 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2799 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2800 char s[32]; 2801 2802 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2803 cfg = firmware_get(s); 2804 if (cfg == NULL) { 2805 if (default_cfg != NULL) { 2806 device_printf(sc->dev, 2807 "unable to load module \"%s\" for " 2808 "configuration profile \"%s\", will use " 2809 "the default config file instead.\n", 2810 s, sc->cfg_file); 2811 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2812 "%s", DEFAULT_CF); 2813 } else { 2814 device_printf(sc->dev, 2815 "unable to load module \"%s\" for " 2816 "configuration profile \"%s\", will use " 2817 "the config file on the card's flash " 2818 "instead.\n", s, sc->cfg_file); 2819 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2820 "%s", FLASH_CF); 2821 } 2822 } 2823 } 2824 2825 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2826 default_cfg == NULL) { 2827 device_printf(sc->dev, 2828 "default config file not available, will use the config " 2829 "file on the card's flash instead.\n"); 2830 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2831 } 2832 2833 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2834 u_int cflen; 2835 const uint32_t *cfdata; 2836 uint32_t param, val, addr; 2837 2838 KASSERT(cfg != NULL || default_cfg != NULL, 2839 ("%s: no config to upload", __func__)); 2840 2841 /* 2842 * Ask the firmware where it wants us to upload the config file. 2843 */ 2844 param = FW_PARAM_DEV(CF); 2845 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2846 if (rc != 0) { 2847 /* No support for config file? Shouldn't happen. */ 2848 device_printf(sc->dev, 2849 "failed to query config file location: %d.\n", rc); 2850 goto done; 2851 } 2852 mtype = G_FW_PARAMS_PARAM_Y(val); 2853 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 2854 2855 /* 2856 * XXX: sheer laziness. We deliberately added 4 bytes of 2857 * useless stuffing/comments at the end of the config file so 2858 * it's ok to simply throw away the last remaining bytes when 2859 * the config file is not an exact multiple of 4. This also 2860 * helps with the validate_mt_off_len check. 2861 */ 2862 if (cfg != NULL) { 2863 cflen = cfg->datasize & ~3; 2864 cfdata = cfg->data; 2865 } else { 2866 cflen = default_cfg->datasize & ~3; 2867 cfdata = default_cfg->data; 2868 } 2869 2870 if (cflen > FLASH_CFG_MAX_SIZE) { 2871 device_printf(sc->dev, 2872 "config file too long (%d, max allowed is %d). " 2873 "Will try to use the config on the card, if any.\n", 2874 cflen, FLASH_CFG_MAX_SIZE); 2875 goto use_config_on_flash; 2876 } 2877 2878 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 2879 if (rc != 0) { 2880 device_printf(sc->dev, 2881 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 2882 "Will try to use the config on the card, if any.\n", 2883 __func__, mtype, moff, cflen, rc); 2884 goto use_config_on_flash; 2885 } 2886 write_via_memwin(sc, 2, addr, cfdata, cflen); 2887 } else { 2888use_config_on_flash: 2889 mtype = FW_MEMTYPE_FLASH; 2890 moff = t4_flash_cfg_addr(sc); 2891 } 2892 2893 bzero(&caps, sizeof(caps)); 2894 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2895 F_FW_CMD_REQUEST | F_FW_CMD_READ); 2896 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 2897 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 2898 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 2899 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 2900 if (rc != 0) { 2901 device_printf(sc->dev, 2902 "failed to pre-process config file: %d " 2903 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 2904 goto done; 2905 } 2906 2907 finicsum = be32toh(caps.finicsum); 2908 cfcsum = be32toh(caps.cfcsum); 2909 if (finicsum != cfcsum) { 2910 device_printf(sc->dev, 2911 "WARNING: config file checksum mismatch: %08x %08x\n", 2912 finicsum, cfcsum); 2913 } 2914 sc->cfcsum = cfcsum; 2915 2916#define LIMIT_CAPS(x) do { \ 2917 caps.x &= htobe16(t4_##x##_allowed); \ 2918} while (0) 2919 2920 /* 2921 * Let the firmware know what features will (not) be used so it can tune 2922 * things accordingly. 2923 */ 2924 LIMIT_CAPS(nbmcaps); 2925 LIMIT_CAPS(linkcaps); 2926 LIMIT_CAPS(switchcaps); 2927 LIMIT_CAPS(niccaps); 2928 LIMIT_CAPS(toecaps); 2929 LIMIT_CAPS(rdmacaps); 2930 LIMIT_CAPS(tlscaps); 2931 LIMIT_CAPS(iscsicaps); 2932 LIMIT_CAPS(fcoecaps); 2933#undef LIMIT_CAPS 2934 2935 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 2936 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 2937 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 2938 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 2939 if (rc != 0) { 2940 device_printf(sc->dev, 2941 "failed to process config file: %d.\n", rc); 2942 } 2943done: 2944 if (cfg != NULL) 2945 firmware_put(cfg, FIRMWARE_UNLOAD); 2946 return (rc); 2947} 2948 2949/* 2950 * Retrieve parameters that are needed (or nice to have) very early. 2951 */ 2952static int 2953get_params__pre_init(struct adapter *sc) 2954{ 2955 int rc; 2956 uint32_t param[2], val[2]; 2957 2958 t4_get_version_info(sc); 2959 2960 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 2961 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2962 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2963 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2964 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2965 2966 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 2967 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 2968 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 2969 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 2970 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 2971 2972 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 2973 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 2974 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 2975 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 2976 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 2977 2978 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 2979 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 2980 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 2981 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 2982 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 2983 2984 param[0] = FW_PARAM_DEV(PORTVEC); 2985 param[1] = FW_PARAM_DEV(CCLK); 2986 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 2987 if (rc != 0) { 2988 device_printf(sc->dev, 2989 "failed to query parameters (pre_init): %d.\n", rc); 2990 return (rc); 2991 } 2992 2993 sc->params.portvec = val[0]; 2994 sc->params.nports = bitcount32(val[0]); 2995 sc->params.vpd.cclk = val[1]; 2996 2997 /* Read device log parameters. */ 2998 rc = -t4_init_devlog_params(sc, 1); 2999 if (rc == 0) 3000 fixup_devlog_params(sc); 3001 else { 3002 device_printf(sc->dev, 3003 "failed to get devlog parameters: %d.\n", rc); 3004 rc = 0; /* devlog isn't critical for device operation */ 3005 } 3006 3007 return (rc); 3008} 3009 3010/* 3011 * Retrieve various parameters that are of interest to the driver. The device 3012 * has been initialized by the firmware at this point. 3013 */ 3014static int 3015get_params__post_init(struct adapter *sc) 3016{ 3017 int rc; 3018 uint32_t param[7], val[7]; 3019 struct fw_caps_config_cmd caps; 3020 3021 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3022 param[1] = FW_PARAM_PFVF(EQ_START); 3023 param[2] = FW_PARAM_PFVF(FILTER_START); 3024 param[3] = FW_PARAM_PFVF(FILTER_END); 3025 param[4] = FW_PARAM_PFVF(L2T_START); 3026 param[5] = FW_PARAM_PFVF(L2T_END); 3027 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3028 if (rc != 0) { 3029 device_printf(sc->dev, 3030 "failed to query parameters (post_init): %d.\n", rc); 3031 return (rc); 3032 } 3033 3034 sc->sge.iq_start = val[0]; 3035 sc->sge.eq_start = val[1]; 3036 sc->tids.ftid_base = val[2]; 3037 sc->tids.nftids = val[3] - val[2] + 1; 3038 sc->params.ftid_min = val[2]; 3039 sc->params.ftid_max = val[3]; 3040 sc->vres.l2t.start = val[4]; 3041 sc->vres.l2t.size = val[5] - val[4] + 1; 3042 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3043 ("%s: L2 table size (%u) larger than expected (%u)", 3044 __func__, sc->vres.l2t.size, L2T_SIZE)); 3045 3046 /* get capabilites */ 3047 bzero(&caps, sizeof(caps)); 3048 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3049 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3050 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3051 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3052 if (rc != 0) { 3053 device_printf(sc->dev, 3054 "failed to get card capabilities: %d.\n", rc); 3055 return (rc); 3056 } 3057 3058#define READ_CAPS(x) do { \ 3059 sc->x = htobe16(caps.x); \ 3060} while (0) 3061 READ_CAPS(nbmcaps); 3062 READ_CAPS(linkcaps); 3063 READ_CAPS(switchcaps); 3064 READ_CAPS(niccaps); 3065 READ_CAPS(toecaps); 3066 READ_CAPS(rdmacaps); 3067 READ_CAPS(tlscaps); 3068 READ_CAPS(iscsicaps); 3069 READ_CAPS(fcoecaps); 3070 3071 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3072 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3073 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3074 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3075 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3076 if (rc != 0) { 3077 device_printf(sc->dev, 3078 "failed to query NIC parameters: %d.\n", rc); 3079 return (rc); 3080 } 3081 sc->tids.etid_base = val[0]; 3082 sc->params.etid_min = val[0]; 3083 sc->tids.netids = val[1] - val[0] + 1; 3084 sc->params.netids = sc->tids.netids; 3085 sc->params.eo_wr_cred = val[2]; 3086 sc->params.ethoffload = 1; 3087 } 3088 3089 if (sc->toecaps) { 3090 /* query offload-related parameters */ 3091 param[0] = FW_PARAM_DEV(NTID); 3092 param[1] = FW_PARAM_PFVF(SERVER_START); 3093 param[2] = FW_PARAM_PFVF(SERVER_END); 3094 param[3] = FW_PARAM_PFVF(TDDP_START); 3095 param[4] = FW_PARAM_PFVF(TDDP_END); 3096 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3097 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3098 if (rc != 0) { 3099 device_printf(sc->dev, 3100 "failed to query TOE parameters: %d.\n", rc); 3101 return (rc); 3102 } 3103 sc->tids.ntids = val[0]; 3104 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3105 sc->tids.stid_base = val[1]; 3106 sc->tids.nstids = val[2] - val[1] + 1; 3107 sc->vres.ddp.start = val[3]; 3108 sc->vres.ddp.size = val[4] - val[3] + 1; 3109 sc->params.ofldq_wr_cred = val[5]; 3110 sc->params.offload = 1; 3111 } 3112 if (sc->rdmacaps) { 3113 param[0] = FW_PARAM_PFVF(STAG_START); 3114 param[1] = FW_PARAM_PFVF(STAG_END); 3115 param[2] = FW_PARAM_PFVF(RQ_START); 3116 param[3] = FW_PARAM_PFVF(RQ_END); 3117 param[4] = FW_PARAM_PFVF(PBL_START); 3118 param[5] = FW_PARAM_PFVF(PBL_END); 3119 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3120 if (rc != 0) { 3121 device_printf(sc->dev, 3122 "failed to query RDMA parameters(1): %d.\n", rc); 3123 return (rc); 3124 } 3125 sc->vres.stag.start = val[0]; 3126 sc->vres.stag.size = val[1] - val[0] + 1; 3127 sc->vres.rq.start = val[2]; 3128 sc->vres.rq.size = val[3] - val[2] + 1; 3129 sc->vres.pbl.start = val[4]; 3130 sc->vres.pbl.size = val[5] - val[4] + 1; 3131 3132 param[0] = FW_PARAM_PFVF(SQRQ_START); 3133 param[1] = FW_PARAM_PFVF(SQRQ_END); 3134 param[2] = FW_PARAM_PFVF(CQ_START); 3135 param[3] = FW_PARAM_PFVF(CQ_END); 3136 param[4] = FW_PARAM_PFVF(OCQ_START); 3137 param[5] = FW_PARAM_PFVF(OCQ_END); 3138 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3139 if (rc != 0) { 3140 device_printf(sc->dev, 3141 "failed to query RDMA parameters(2): %d.\n", rc); 3142 return (rc); 3143 } 3144 sc->vres.qp.start = val[0]; 3145 sc->vres.qp.size = val[1] - val[0] + 1; 3146 sc->vres.cq.start = val[2]; 3147 sc->vres.cq.size = val[3] - val[2] + 1; 3148 sc->vres.ocq.start = val[4]; 3149 sc->vres.ocq.size = val[5] - val[4] + 1; 3150 } 3151 if (sc->iscsicaps) { 3152 param[0] = FW_PARAM_PFVF(ISCSI_START); 3153 param[1] = FW_PARAM_PFVF(ISCSI_END); 3154 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3155 if (rc != 0) { 3156 device_printf(sc->dev, 3157 "failed to query iSCSI parameters: %d.\n", rc); 3158 return (rc); 3159 } 3160 sc->vres.iscsi.start = val[0]; 3161 sc->vres.iscsi.size = val[1] - val[0] + 1; 3162 } 3163 3164 t4_init_sge_params(sc); 3165 3166 /* 3167 * We've got the params we wanted to query via the firmware. Now grab 3168 * some others directly from the chip. 3169 */ 3170 rc = t4_read_chip_settings(sc); 3171 3172 return (rc); 3173} 3174 3175static int 3176set_params__post_init(struct adapter *sc) 3177{ 3178 uint32_t param, val; 3179 3180 /* ask for encapsulated CPLs */ 3181 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3182 val = 1; 3183 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3184 3185 return (0); 3186} 3187 3188#undef FW_PARAM_PFVF 3189#undef FW_PARAM_DEV 3190 3191static void 3192t4_set_desc(struct adapter *sc) 3193{ 3194 char buf[128]; 3195 struct adapter_params *p = &sc->params; 3196 3197 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3198 3199 device_set_desc_copy(sc->dev, buf); 3200} 3201 3202static void 3203build_medialist(struct port_info *pi, struct ifmedia *media) 3204{ 3205 int m; 3206 3207 PORT_LOCK(pi); 3208 3209 ifmedia_removeall(media); 3210 3211 m = IFM_ETHER | IFM_FDX; 3212 3213 switch(pi->port_type) { 3214 case FW_PORT_TYPE_BT_XFI: 3215 case FW_PORT_TYPE_BT_XAUI: 3216 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3217 /* fall through */ 3218 3219 case FW_PORT_TYPE_BT_SGMII: 3220 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3221 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3222 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3223 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3224 break; 3225 3226 case FW_PORT_TYPE_CX4: 3227 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3228 ifmedia_set(media, m | IFM_10G_CX4); 3229 break; 3230 3231 case FW_PORT_TYPE_QSFP_10G: 3232 case FW_PORT_TYPE_SFP: 3233 case FW_PORT_TYPE_FIBER_XFI: 3234 case FW_PORT_TYPE_FIBER_XAUI: 3235 switch (pi->mod_type) { 3236 3237 case FW_PORT_MOD_TYPE_LR: 3238 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3239 ifmedia_set(media, m | IFM_10G_LR); 3240 break; 3241 3242 case FW_PORT_MOD_TYPE_SR: 3243 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3244 ifmedia_set(media, m | IFM_10G_SR); 3245 break; 3246 3247 case FW_PORT_MOD_TYPE_LRM: 3248 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3249 ifmedia_set(media, m | IFM_10G_LRM); 3250 break; 3251 3252 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3253 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3254 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3255 ifmedia_set(media, m | IFM_10G_TWINAX); 3256 break; 3257 3258 case FW_PORT_MOD_TYPE_NONE: 3259 m &= ~IFM_FDX; 3260 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3261 ifmedia_set(media, m | IFM_NONE); 3262 break; 3263 3264 case FW_PORT_MOD_TYPE_NA: 3265 case FW_PORT_MOD_TYPE_ER: 3266 default: 3267 device_printf(pi->dev, 3268 "unknown port_type (%d), mod_type (%d)\n", 3269 pi->port_type, pi->mod_type); 3270 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3271 ifmedia_set(media, m | IFM_UNKNOWN); 3272 break; 3273 } 3274 break; 3275 3276 case FW_PORT_TYPE_QSFP: 3277 switch (pi->mod_type) { 3278 3279 case FW_PORT_MOD_TYPE_LR: 3280 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3281 ifmedia_set(media, m | IFM_40G_LR4); 3282 break; 3283 3284 case FW_PORT_MOD_TYPE_SR: 3285 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3286 ifmedia_set(media, m | IFM_40G_SR4); 3287 break; 3288 3289 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3290 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3291 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3292 ifmedia_set(media, m | IFM_40G_CR4); 3293 break; 3294 3295 case FW_PORT_MOD_TYPE_NONE: 3296 m &= ~IFM_FDX; 3297 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3298 ifmedia_set(media, m | IFM_NONE); 3299 break; 3300 3301 default: 3302 device_printf(pi->dev, 3303 "unknown port_type (%d), mod_type (%d)\n", 3304 pi->port_type, pi->mod_type); 3305 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3306 ifmedia_set(media, m | IFM_UNKNOWN); 3307 break; 3308 } 3309 break; 3310 3311 default: 3312 device_printf(pi->dev, 3313 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3314 pi->mod_type); 3315 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3316 ifmedia_set(media, m | IFM_UNKNOWN); 3317 break; 3318 } 3319 3320 PORT_UNLOCK(pi); 3321} 3322 3323#define FW_MAC_EXACT_CHUNK 7 3324 3325/* 3326 * Program the port's XGMAC based on parameters in ifnet. The caller also 3327 * indicates which parameters should be programmed (the rest are left alone). 3328 */ 3329int 3330update_mac_settings(struct ifnet *ifp, int flags) 3331{ 3332 int rc = 0; 3333 struct vi_info *vi = ifp->if_softc; 3334 struct port_info *pi = vi->pi; 3335 struct adapter *sc = pi->adapter; 3336 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3337 3338 ASSERT_SYNCHRONIZED_OP(sc); 3339 KASSERT(flags, ("%s: not told what to update.", __func__)); 3340 3341 if (flags & XGMAC_MTU) 3342 mtu = ifp->if_mtu; 3343 3344 if (flags & XGMAC_PROMISC) 3345 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3346 3347 if (flags & XGMAC_ALLMULTI) 3348 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3349 3350 if (flags & XGMAC_VLANEX) 3351 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3352 3353 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3354 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3355 allmulti, 1, vlanex, false); 3356 if (rc) { 3357 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3358 rc); 3359 return (rc); 3360 } 3361 } 3362 3363 if (flags & XGMAC_UCADDR) { 3364 uint8_t ucaddr[ETHER_ADDR_LEN]; 3365 3366 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3367 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3368 ucaddr, true, true); 3369 if (rc < 0) { 3370 rc = -rc; 3371 if_printf(ifp, "change_mac failed: %d\n", rc); 3372 return (rc); 3373 } else { 3374 vi->xact_addr_filt = rc; 3375 rc = 0; 3376 } 3377 } 3378 3379 if (flags & XGMAC_MCADDRS) { 3380 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3381 int del = 1; 3382 uint64_t hash = 0; 3383 struct ifmultiaddr *ifma; 3384 int i = 0, j; 3385 3386 if_maddr_rlock(ifp); 3387 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3388 if (ifma->ifma_addr->sa_family != AF_LINK) 3389 continue; 3390 mcaddr[i] = 3391 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3392 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3393 i++; 3394 3395 if (i == FW_MAC_EXACT_CHUNK) { 3396 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3397 del, i, mcaddr, NULL, &hash, 0); 3398 if (rc < 0) { 3399 rc = -rc; 3400 for (j = 0; j < i; j++) { 3401 if_printf(ifp, 3402 "failed to add mc address" 3403 " %02x:%02x:%02x:" 3404 "%02x:%02x:%02x rc=%d\n", 3405 mcaddr[j][0], mcaddr[j][1], 3406 mcaddr[j][2], mcaddr[j][3], 3407 mcaddr[j][4], mcaddr[j][5], 3408 rc); 3409 } 3410 goto mcfail; 3411 } 3412 del = 0; 3413 i = 0; 3414 } 3415 } 3416 if (i > 0) { 3417 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3418 mcaddr, NULL, &hash, 0); 3419 if (rc < 0) { 3420 rc = -rc; 3421 for (j = 0; j < i; j++) { 3422 if_printf(ifp, 3423 "failed to add mc address" 3424 " %02x:%02x:%02x:" 3425 "%02x:%02x:%02x rc=%d\n", 3426 mcaddr[j][0], mcaddr[j][1], 3427 mcaddr[j][2], mcaddr[j][3], 3428 mcaddr[j][4], mcaddr[j][5], 3429 rc); 3430 } 3431 goto mcfail; 3432 } 3433 } 3434 3435 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3436 if (rc != 0) 3437 if_printf(ifp, "failed to set mc address hash: %d", rc); 3438mcfail: 3439 if_maddr_runlock(ifp); 3440 } 3441 3442 return (rc); 3443} 3444 3445/* 3446 * {begin|end}_synchronized_op must be called from the same thread. 3447 */ 3448int 3449begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3450 char *wmesg) 3451{ 3452 int rc, pri; 3453 3454#ifdef WITNESS 3455 /* the caller thinks it's ok to sleep, but is it really? */ 3456 if (flags & SLEEP_OK) 3457 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3458 "begin_synchronized_op"); 3459#endif 3460 3461 if (INTR_OK) 3462 pri = PCATCH; 3463 else 3464 pri = 0; 3465 3466 ADAPTER_LOCK(sc); 3467 for (;;) { 3468 3469 if (vi && IS_DOOMED(vi)) { 3470 rc = ENXIO; 3471 goto done; 3472 } 3473 3474 if (!IS_BUSY(sc)) { 3475 rc = 0; 3476 break; 3477 } 3478 3479 if (!(flags & SLEEP_OK)) { 3480 rc = EBUSY; 3481 goto done; 3482 } 3483 3484 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3485 rc = EINTR; 3486 goto done; 3487 } 3488 } 3489 3490 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3491 SET_BUSY(sc); 3492#ifdef INVARIANTS 3493 sc->last_op = wmesg; 3494 sc->last_op_thr = curthread; 3495 sc->last_op_flags = flags; 3496#endif 3497 3498done: 3499 if (!(flags & HOLD_LOCK) || rc) 3500 ADAPTER_UNLOCK(sc); 3501 3502 return (rc); 3503} 3504 3505/* 3506 * Tell if_ioctl and if_init that the VI is going away. This is 3507 * special variant of begin_synchronized_op and must be paired with a 3508 * call to end_synchronized_op. 3509 */ 3510void 3511doom_vi(struct adapter *sc, struct vi_info *vi) 3512{ 3513 3514 ADAPTER_LOCK(sc); 3515 SET_DOOMED(vi); 3516 wakeup(&sc->flags); 3517 while (IS_BUSY(sc)) 3518 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3519 SET_BUSY(sc); 3520#ifdef INVARIANTS 3521 sc->last_op = "t4detach"; 3522 sc->last_op_thr = curthread; 3523 sc->last_op_flags = 0; 3524#endif 3525 ADAPTER_UNLOCK(sc); 3526} 3527 3528/* 3529 * {begin|end}_synchronized_op must be called from the same thread. 3530 */ 3531void 3532end_synchronized_op(struct adapter *sc, int flags) 3533{ 3534 3535 if (flags & LOCK_HELD) 3536 ADAPTER_LOCK_ASSERT_OWNED(sc); 3537 else 3538 ADAPTER_LOCK(sc); 3539 3540 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3541 CLR_BUSY(sc); 3542 wakeup(&sc->flags); 3543 ADAPTER_UNLOCK(sc); 3544} 3545 3546static int 3547cxgbe_init_synchronized(struct vi_info *vi) 3548{ 3549 struct port_info *pi = vi->pi; 3550 struct adapter *sc = pi->adapter; 3551 struct ifnet *ifp = vi->ifp; 3552 int rc = 0, i; 3553 struct sge_txq *txq; 3554 3555 ASSERT_SYNCHRONIZED_OP(sc); 3556 3557 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3558 return (0); /* already running */ 3559 3560 if (!(sc->flags & FULL_INIT_DONE) && 3561 ((rc = adapter_full_init(sc)) != 0)) 3562 return (rc); /* error message displayed already */ 3563 3564 if (!(vi->flags & VI_INIT_DONE) && 3565 ((rc = vi_full_init(vi)) != 0)) 3566 return (rc); /* error message displayed already */ 3567 3568 rc = update_mac_settings(ifp, XGMAC_ALL); 3569 if (rc) 3570 goto done; /* error message displayed already */ 3571 3572 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3573 if (rc != 0) { 3574 if_printf(ifp, "enable_vi failed: %d\n", rc); 3575 goto done; 3576 } 3577 3578 /* 3579 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3580 * if this changes. 3581 */ 3582 3583 for_each_txq(vi, i, txq) { 3584 TXQ_LOCK(txq); 3585 txq->eq.flags |= EQ_ENABLED; 3586 TXQ_UNLOCK(txq); 3587 } 3588 3589 /* 3590 * The first iq of the first port to come up is used for tracing. 3591 */ 3592 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3593 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3594 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3595 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3596 V_QUEUENUMBER(sc->traceq)); 3597 pi->flags |= HAS_TRACEQ; 3598 } 3599 3600 /* all ok */ 3601 PORT_LOCK(pi); 3602 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3603 pi->up_vis++; 3604 3605 if (pi->nvi > 1 || sc->flags & IS_VF) 3606 callout_reset(&vi->tick, hz, vi_tick, vi); 3607 else 3608 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3609 PORT_UNLOCK(pi); 3610done: 3611 if (rc != 0) 3612 cxgbe_uninit_synchronized(vi); 3613 3614 return (rc); 3615} 3616 3617/* 3618 * Idempotent. 3619 */ 3620static int 3621cxgbe_uninit_synchronized(struct vi_info *vi) 3622{ 3623 struct port_info *pi = vi->pi; 3624 struct adapter *sc = pi->adapter; 3625 struct ifnet *ifp = vi->ifp; 3626 int rc, i; 3627 struct sge_txq *txq; 3628 3629 ASSERT_SYNCHRONIZED_OP(sc); 3630 3631 if (!(vi->flags & VI_INIT_DONE)) { 3632 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3633 ("uninited VI is running")); 3634 return (0); 3635 } 3636 3637 /* 3638 * Disable the VI so that all its data in either direction is discarded 3639 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3640 * tick) intact as the TP can deliver negative advice or data that it's 3641 * holding in its RAM (for an offloaded connection) even after the VI is 3642 * disabled. 3643 */ 3644 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 3645 if (rc) { 3646 if_printf(ifp, "disable_vi failed: %d\n", rc); 3647 return (rc); 3648 } 3649 3650 for_each_txq(vi, i, txq) { 3651 TXQ_LOCK(txq); 3652 txq->eq.flags &= ~EQ_ENABLED; 3653 TXQ_UNLOCK(txq); 3654 } 3655 3656 PORT_LOCK(pi); 3657 if (pi->nvi > 1 || sc->flags & IS_VF) 3658 callout_stop(&vi->tick); 3659 else 3660 callout_stop(&pi->tick); 3661 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3662 PORT_UNLOCK(pi); 3663 return (0); 3664 } 3665 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3666 pi->up_vis--; 3667 if (pi->up_vis > 0) { 3668 PORT_UNLOCK(pi); 3669 return (0); 3670 } 3671 PORT_UNLOCK(pi); 3672 3673 pi->link_cfg.link_ok = 0; 3674 pi->link_cfg.speed = 0; 3675 pi->linkdnrc = -1; 3676 t4_os_link_changed(sc, pi->port_id, 0, -1); 3677 3678 return (0); 3679} 3680 3681/* 3682 * It is ok for this function to fail midway and return right away. t4_detach 3683 * will walk the entire sc->irq list and clean up whatever is valid. 3684 */ 3685int 3686t4_setup_intr_handlers(struct adapter *sc) 3687{ 3688 int rc, rid, p, q, v; 3689 char s[8]; 3690 struct irq *irq; 3691 struct port_info *pi; 3692 struct vi_info *vi; 3693 struct sge *sge = &sc->sge; 3694 struct sge_rxq *rxq; 3695#ifdef TCP_OFFLOAD 3696 struct sge_ofld_rxq *ofld_rxq; 3697#endif 3698#ifdef DEV_NETMAP 3699 struct sge_nm_rxq *nm_rxq; 3700#endif 3701 3702 /* 3703 * Setup interrupts. 3704 */ 3705 irq = &sc->irq[0]; 3706 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3707 if (sc->intr_count == 1) 3708 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3709 3710 /* Multiple interrupts. */ 3711 if (sc->flags & IS_VF) 3712 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 3713 ("%s: too few intr.", __func__)); 3714 else 3715 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3716 ("%s: too few intr.", __func__)); 3717 3718 /* The first one is always error intr on PFs */ 3719 if (!(sc->flags & IS_VF)) { 3720 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3721 if (rc != 0) 3722 return (rc); 3723 irq++; 3724 rid++; 3725 } 3726 3727 /* The second one is always the firmware event queue (first on VFs) */ 3728 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 3729 if (rc != 0) 3730 return (rc); 3731 irq++; 3732 rid++; 3733 3734 for_each_port(sc, p) { 3735 pi = sc->port[p]; 3736 for_each_vi(pi, v, vi) { 3737 vi->first_intr = rid - 1; 3738 3739 if (vi->nnmrxq > 0) { 3740 int n = max(vi->nrxq, vi->nnmrxq); 3741 3742 MPASS(vi->flags & INTR_RXQ); 3743 3744 rxq = &sge->rxq[vi->first_rxq]; 3745#ifdef DEV_NETMAP 3746 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 3747#endif 3748 for (q = 0; q < n; q++) { 3749 snprintf(s, sizeof(s), "%x%c%x", p, 3750 'a' + v, q); 3751 if (q < vi->nrxq) 3752 irq->rxq = rxq++; 3753#ifdef DEV_NETMAP 3754 if (q < vi->nnmrxq) 3755 irq->nm_rxq = nm_rxq++; 3756#endif 3757 rc = t4_alloc_irq(sc, irq, rid, 3758 t4_vi_intr, irq, s); 3759 if (rc != 0) 3760 return (rc); 3761 irq++; 3762 rid++; 3763 vi->nintr++; 3764 } 3765 } else if (vi->flags & INTR_RXQ) { 3766 for_each_rxq(vi, q, rxq) { 3767 snprintf(s, sizeof(s), "%x%c%x", p, 3768 'a' + v, q); 3769 rc = t4_alloc_irq(sc, irq, rid, 3770 t4_intr, rxq, s); 3771 if (rc != 0) 3772 return (rc); 3773 irq++; 3774 rid++; 3775 vi->nintr++; 3776 } 3777 } 3778#ifdef TCP_OFFLOAD 3779 if (vi->flags & INTR_OFLD_RXQ) { 3780 for_each_ofld_rxq(vi, q, ofld_rxq) { 3781 snprintf(s, sizeof(s), "%x%c%x", p, 3782 'A' + v, q); 3783 rc = t4_alloc_irq(sc, irq, rid, 3784 t4_intr, ofld_rxq, s); 3785 if (rc != 0) 3786 return (rc); 3787 irq++; 3788 rid++; 3789 vi->nintr++; 3790 } 3791 } 3792#endif 3793 } 3794 } 3795 MPASS(irq == &sc->irq[sc->intr_count]); 3796 3797 return (0); 3798} 3799 3800int 3801adapter_full_init(struct adapter *sc) 3802{ 3803 int rc, i; 3804 3805 ASSERT_SYNCHRONIZED_OP(sc); 3806 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3807 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 3808 ("%s: FULL_INIT_DONE already", __func__)); 3809 3810 /* 3811 * queues that belong to the adapter (not any particular port). 3812 */ 3813 rc = t4_setup_adapter_queues(sc); 3814 if (rc != 0) 3815 goto done; 3816 3817 for (i = 0; i < nitems(sc->tq); i++) { 3818 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 3819 taskqueue_thread_enqueue, &sc->tq[i]); 3820 if (sc->tq[i] == NULL) { 3821 device_printf(sc->dev, 3822 "failed to allocate task queue %d\n", i); 3823 rc = ENOMEM; 3824 goto done; 3825 } 3826 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 3827 device_get_nameunit(sc->dev), i); 3828 } 3829 3830 if (!(sc->flags & IS_VF)) 3831 t4_intr_enable(sc); 3832 sc->flags |= FULL_INIT_DONE; 3833done: 3834 if (rc != 0) 3835 adapter_full_uninit(sc); 3836 3837 return (rc); 3838} 3839 3840int 3841adapter_full_uninit(struct adapter *sc) 3842{ 3843 int i; 3844 3845 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 3846 3847 t4_teardown_adapter_queues(sc); 3848 3849 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 3850 taskqueue_free(sc->tq[i]); 3851 sc->tq[i] = NULL; 3852 } 3853 3854 sc->flags &= ~FULL_INIT_DONE; 3855 3856 return (0); 3857} 3858 3859#ifdef RSS 3860#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 3861 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 3862 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 3863 RSS_HASHTYPE_RSS_UDP_IPV6) 3864 3865/* Translates kernel hash types to hardware. */ 3866static int 3867hashconfig_to_hashen(int hashconfig) 3868{ 3869 int hashen = 0; 3870 3871 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 3872 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 3873 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 3874 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 3875 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 3876 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3877 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3878 } 3879 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 3880 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 3881 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 3882 } 3883 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 3884 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 3885 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 3886 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 3887 3888 return (hashen); 3889} 3890 3891/* Translates hardware hash types to kernel. */ 3892static int 3893hashen_to_hashconfig(int hashen) 3894{ 3895 int hashconfig = 0; 3896 3897 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 3898 /* 3899 * If UDP hashing was enabled it must have been enabled for 3900 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 3901 * enabling any 4-tuple hash is nonsense configuration. 3902 */ 3903 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 3904 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 3905 3906 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 3907 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 3908 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 3909 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 3910 } 3911 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 3912 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 3913 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 3914 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 3915 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 3916 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 3917 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 3918 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 3919 3920 return (hashconfig); 3921} 3922#endif 3923 3924int 3925vi_full_init(struct vi_info *vi) 3926{ 3927 struct adapter *sc = vi->pi->adapter; 3928 struct ifnet *ifp = vi->ifp; 3929 uint16_t *rss; 3930 struct sge_rxq *rxq; 3931 int rc, i, j, hashen; 3932#ifdef RSS 3933 int nbuckets = rss_getnumbuckets(); 3934 int hashconfig = rss_gethashconfig(); 3935 int extra; 3936 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 3937 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 3938#endif 3939 3940 ASSERT_SYNCHRONIZED_OP(sc); 3941 KASSERT((vi->flags & VI_INIT_DONE) == 0, 3942 ("%s: VI_INIT_DONE already", __func__)); 3943 3944 sysctl_ctx_init(&vi->ctx); 3945 vi->flags |= VI_SYSCTL_CTX; 3946 3947 /* 3948 * Allocate tx/rx/fl queues for this VI. 3949 */ 3950 rc = t4_setup_vi_queues(vi); 3951 if (rc != 0) 3952 goto done; /* error message displayed already */ 3953 3954 /* 3955 * Setup RSS for this VI. Save a copy of the RSS table for later use. 3956 */ 3957 if (vi->nrxq > vi->rss_size) { 3958 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 3959 "some queues will never receive traffic.\n", vi->nrxq, 3960 vi->rss_size); 3961 } else if (vi->rss_size % vi->nrxq) { 3962 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 3963 "expect uneven traffic distribution.\n", vi->nrxq, 3964 vi->rss_size); 3965 } 3966#ifdef RSS 3967 MPASS(RSS_KEYSIZE == 40); 3968 if (vi->nrxq != nbuckets) { 3969 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 3970 "performance will be impacted.\n", vi->nrxq, nbuckets); 3971 } 3972 3973 rss_getkey((void *)&raw_rss_key[0]); 3974 for (i = 0; i < nitems(rss_key); i++) { 3975 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 3976 } 3977 t4_write_rss_key(sc, &rss_key[0], -1); 3978#endif 3979 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 3980 for (i = 0; i < vi->rss_size;) { 3981#ifdef RSS 3982 j = rss_get_indirection_to_bucket(i); 3983 j %= vi->nrxq; 3984 rxq = &sc->sge.rxq[vi->first_rxq + j]; 3985 rss[i++] = rxq->iq.abs_id; 3986#else 3987 for_each_rxq(vi, j, rxq) { 3988 rss[i++] = rxq->iq.abs_id; 3989 if (i == vi->rss_size) 3990 break; 3991 } 3992#endif 3993 } 3994 3995 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 3996 vi->rss_size); 3997 if (rc != 0) { 3998 if_printf(ifp, "rss_config failed: %d\n", rc); 3999 goto done; 4000 } 4001 4002#ifdef RSS 4003 hashen = hashconfig_to_hashen(hashconfig); 4004 4005 /* 4006 * We may have had to enable some hashes even though the global config 4007 * wants them disabled. This is a potential problem that must be 4008 * reported to the user. 4009 */ 4010 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4011 4012 /* 4013 * If we consider only the supported hash types, then the enabled hashes 4014 * are a superset of the requested hashes. In other words, there cannot 4015 * be any supported hash that was requested but not enabled, but there 4016 * can be hashes that were not requested but had to be enabled. 4017 */ 4018 extra &= SUPPORTED_RSS_HASHTYPES; 4019 MPASS((extra & hashconfig) == 0); 4020 4021 if (extra) { 4022 if_printf(ifp, 4023 "global RSS config (0x%x) cannot be accomodated.\n", 4024 hashconfig); 4025 } 4026 if (extra & RSS_HASHTYPE_RSS_IPV4) 4027 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4028 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4029 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4030 if (extra & RSS_HASHTYPE_RSS_IPV6) 4031 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4032 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4033 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4034 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4035 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4036 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4037 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4038#else 4039 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4040 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4041 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4042 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4043#endif 4044 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0]); 4045 if (rc != 0) { 4046 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4047 goto done; 4048 } 4049 4050 vi->rss = rss; 4051 vi->flags |= VI_INIT_DONE; 4052done: 4053 if (rc != 0) 4054 vi_full_uninit(vi); 4055 4056 return (rc); 4057} 4058 4059/* 4060 * Idempotent. 4061 */ 4062int 4063vi_full_uninit(struct vi_info *vi) 4064{ 4065 struct port_info *pi = vi->pi; 4066 struct adapter *sc = pi->adapter; 4067 int i; 4068 struct sge_rxq *rxq; 4069 struct sge_txq *txq; 4070#ifdef TCP_OFFLOAD 4071 struct sge_ofld_rxq *ofld_rxq; 4072 struct sge_wrq *ofld_txq; 4073#endif 4074 4075 if (vi->flags & VI_INIT_DONE) { 4076 4077 /* Need to quiesce queues. */ 4078 4079 /* XXX: Only for the first VI? */ 4080 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4081 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4082 4083 for_each_txq(vi, i, txq) { 4084 quiesce_txq(sc, txq); 4085 } 4086 4087#ifdef TCP_OFFLOAD 4088 for_each_ofld_txq(vi, i, ofld_txq) { 4089 quiesce_wrq(sc, ofld_txq); 4090 } 4091#endif 4092 4093 for_each_rxq(vi, i, rxq) { 4094 quiesce_iq(sc, &rxq->iq); 4095 quiesce_fl(sc, &rxq->fl); 4096 } 4097 4098#ifdef TCP_OFFLOAD 4099 for_each_ofld_rxq(vi, i, ofld_rxq) { 4100 quiesce_iq(sc, &ofld_rxq->iq); 4101 quiesce_fl(sc, &ofld_rxq->fl); 4102 } 4103#endif 4104 free(vi->rss, M_CXGBE); 4105 free(vi->nm_rss, M_CXGBE); 4106 } 4107 4108 t4_teardown_vi_queues(vi); 4109 vi->flags &= ~VI_INIT_DONE; 4110 4111 return (0); 4112} 4113 4114static void 4115quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4116{ 4117 struct sge_eq *eq = &txq->eq; 4118 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4119 4120 (void) sc; /* unused */ 4121 4122#ifdef INVARIANTS 4123 TXQ_LOCK(txq); 4124 MPASS((eq->flags & EQ_ENABLED) == 0); 4125 TXQ_UNLOCK(txq); 4126#endif 4127 4128 /* Wait for the mp_ring to empty. */ 4129 while (!mp_ring_is_idle(txq->r)) { 4130 mp_ring_check_drainage(txq->r, 0); 4131 pause("rquiesce", 1); 4132 } 4133 4134 /* Then wait for the hardware to finish. */ 4135 while (spg->cidx != htobe16(eq->pidx)) 4136 pause("equiesce", 1); 4137 4138 /* Finally, wait for the driver to reclaim all descriptors. */ 4139 while (eq->cidx != eq->pidx) 4140 pause("dquiesce", 1); 4141} 4142 4143static void 4144quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4145{ 4146 4147 /* XXXTX */ 4148} 4149 4150static void 4151quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4152{ 4153 (void) sc; /* unused */ 4154 4155 /* Synchronize with the interrupt handler */ 4156 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4157 pause("iqfree", 1); 4158} 4159 4160static void 4161quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4162{ 4163 mtx_lock(&sc->sfl_lock); 4164 FL_LOCK(fl); 4165 fl->flags |= FL_DOOMED; 4166 FL_UNLOCK(fl); 4167 callout_stop(&sc->sfl_callout); 4168 mtx_unlock(&sc->sfl_lock); 4169 4170 KASSERT((fl->flags & FL_STARVING) == 0, 4171 ("%s: still starving", __func__)); 4172} 4173 4174static int 4175t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4176 driver_intr_t *handler, void *arg, char *name) 4177{ 4178 int rc; 4179 4180 irq->rid = rid; 4181 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4182 RF_SHAREABLE | RF_ACTIVE); 4183 if (irq->res == NULL) { 4184 device_printf(sc->dev, 4185 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4186 return (ENOMEM); 4187 } 4188 4189 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4190 NULL, handler, arg, &irq->tag); 4191 if (rc != 0) { 4192 device_printf(sc->dev, 4193 "failed to setup interrupt for rid %d, name %s: %d\n", 4194 rid, name, rc); 4195 } else if (name) 4196 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 4197 4198 return (rc); 4199} 4200 4201static int 4202t4_free_irq(struct adapter *sc, struct irq *irq) 4203{ 4204 if (irq->tag) 4205 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4206 if (irq->res) 4207 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4208 4209 bzero(irq, sizeof(*irq)); 4210 4211 return (0); 4212} 4213 4214static void 4215get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4216{ 4217 4218 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4219 t4_get_regs(sc, buf, regs->len); 4220} 4221 4222#define A_PL_INDIR_CMD 0x1f8 4223 4224#define S_PL_AUTOINC 31 4225#define M_PL_AUTOINC 0x1U 4226#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4227#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4228 4229#define S_PL_VFID 20 4230#define M_PL_VFID 0xffU 4231#define V_PL_VFID(x) ((x) << S_PL_VFID) 4232#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4233 4234#define S_PL_ADDR 0 4235#define M_PL_ADDR 0xfffffU 4236#define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4237#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4238 4239#define A_PL_INDIR_DATA 0x1fc 4240 4241static uint64_t 4242read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4243{ 4244 u32 stats[2]; 4245 4246 mtx_assert(&sc->reg_lock, MA_OWNED); 4247 if (sc->flags & IS_VF) { 4248 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4249 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4250 } else { 4251 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4252 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4253 V_PL_ADDR(VF_MPS_REG(reg))); 4254 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4255 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4256 } 4257 return (((uint64_t)stats[1]) << 32 | stats[0]); 4258} 4259 4260static void 4261t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4262 struct fw_vi_stats_vf *stats) 4263{ 4264 4265#define GET_STAT(name) \ 4266 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4267 4268 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4269 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4270 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4271 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4272 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4273 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4274 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4275 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4276 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4277 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4278 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4279 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4280 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4281 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4282 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4283 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4284 4285#undef GET_STAT 4286} 4287 4288static void 4289t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4290{ 4291 int reg; 4292 4293 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4294 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4295 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4296 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4297 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4298 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4299} 4300 4301static void 4302vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4303{ 4304 struct ifnet *ifp = vi->ifp; 4305 struct sge_txq *txq; 4306 int i, drops; 4307 struct fw_vi_stats_vf *s = &vi->stats; 4308 struct timeval tv; 4309 const struct timeval interval = {0, 250000}; /* 250ms */ 4310 4311 if (!(vi->flags & VI_INIT_DONE)) 4312 return; 4313 4314 getmicrotime(&tv); 4315 timevalsub(&tv, &interval); 4316 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4317 return; 4318 4319 mtx_lock(&sc->reg_lock); 4320 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4321 4322 ifp->if_ipackets = s->rx_bcast_frames + s->rx_mcast_frames + 4323 s->rx_ucast_frames; 4324 ifp->if_ierrors = s->rx_err_frames; 4325 ifp->if_opackets = s->tx_bcast_frames + s->tx_mcast_frames + 4326 s->tx_ucast_frames + s->tx_offload_frames; 4327 ifp->if_oerrors = s->tx_drop_frames; 4328 ifp->if_ibytes = s->rx_bcast_bytes + s->rx_mcast_bytes + 4329 s->rx_ucast_bytes; 4330 ifp->if_obytes = s->tx_bcast_bytes + s->tx_mcast_bytes + 4331 s->tx_ucast_bytes + s->tx_offload_bytes; 4332 ifp->if_imcasts = s->rx_mcast_frames; 4333 ifp->if_omcasts = s->tx_mcast_frames; 4334 4335 drops = 0; 4336 for_each_txq(vi, i, txq) 4337 drops += counter_u64_fetch(txq->r->drops); 4338 ifp->if_snd.ifq_drops = drops; 4339 4340 getmicrotime(&vi->last_refreshed); 4341 mtx_unlock(&sc->reg_lock); 4342} 4343 4344static void 4345cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4346{ 4347 struct vi_info *vi = &pi->vi[0]; 4348 struct ifnet *ifp = vi->ifp; 4349 struct sge_txq *txq; 4350 int i, drops; 4351 struct port_stats *s = &pi->stats; 4352 struct timeval tv; 4353 const struct timeval interval = {0, 250000}; /* 250ms */ 4354 4355 getmicrotime(&tv); 4356 timevalsub(&tv, &interval); 4357 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4358 return; 4359 4360 t4_get_port_stats(sc, pi->tx_chan, s); 4361 4362 ifp->if_opackets = s->tx_frames; 4363 ifp->if_ipackets = s->rx_frames; 4364 ifp->if_obytes = s->tx_octets; 4365 ifp->if_ibytes = s->rx_octets; 4366 ifp->if_omcasts = s->tx_mcast_frames; 4367 ifp->if_imcasts = s->rx_mcast_frames; 4368 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 4369 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 4370 s->rx_trunc3; 4371 for (i = 0; i < sc->chip_params->nchan; i++) { 4372 if (pi->rx_chan_map & (1 << i)) { 4373 uint32_t v; 4374 4375 mtx_lock(&sc->reg_lock); 4376 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4377 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4378 mtx_unlock(&sc->reg_lock); 4379 ifp->if_iqdrops += v; 4380 } 4381 } 4382 4383 drops = s->tx_drop; 4384 for_each_txq(vi, i, txq) 4385 drops += counter_u64_fetch(txq->r->drops); 4386 ifp->if_snd.ifq_drops = drops; 4387 4388 ifp->if_oerrors = s->tx_error_frames; 4389 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long + 4390 s->rx_fcs_err + s->rx_len_err; 4391 4392 getmicrotime(&pi->last_refreshed); 4393} 4394 4395static void 4396cxgbe_tick(void *arg) 4397{ 4398 struct port_info *pi = arg; 4399 struct adapter *sc = pi->adapter; 4400 4401 PORT_LOCK_ASSERT_OWNED(pi); 4402 cxgbe_refresh_stats(sc, pi); 4403 4404 callout_schedule(&pi->tick, hz); 4405} 4406 4407void 4408vi_tick(void *arg) 4409{ 4410 struct vi_info *vi = arg; 4411 struct adapter *sc = vi->pi->adapter; 4412 4413 vi_refresh_stats(sc, vi); 4414 4415 callout_schedule(&vi->tick, hz); 4416} 4417 4418static void 4419cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4420{ 4421 struct ifnet *vlan; 4422 4423 if (arg != ifp || ifp->if_type != IFT_ETHER) 4424 return; 4425 4426 vlan = VLAN_DEVAT(ifp, vid); 4427 VLAN_SETCOOKIE(vlan, ifp); 4428} 4429 4430/* 4431 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 4432 */ 4433static char *caps_decoder[] = { 4434 "\20\001IPMI\002NCSI", /* 0: NBM */ 4435 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 4436 "\20\001INGRESS\002EGRESS", /* 2: switch */ 4437 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 4438 "\006HASHFILTER\007ETHOFLD", 4439 "\20\001TOE", /* 4: TOE */ 4440 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 4441 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 4442 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 4443 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 4444 "\007T10DIF" 4445 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 4446 "\20\00KEYS", /* 7: TLS */ 4447 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 4448 "\004PO_INITIATOR\005PO_TARGET", 4449}; 4450 4451void 4452t4_sysctls(struct adapter *sc) 4453{ 4454 struct sysctl_ctx_list *ctx; 4455 struct sysctl_oid *oid; 4456 struct sysctl_oid_list *children, *c0; 4457 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4458 4459 ctx = device_get_sysctl_ctx(sc->dev); 4460 4461 /* 4462 * dev.t4nex.X. 4463 */ 4464 oid = device_get_sysctl_tree(sc->dev); 4465 c0 = children = SYSCTL_CHILDREN(oid); 4466 4467 sc->sc_do_rxcopy = 1; 4468 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4469 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4470 4471 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4472 sc->params.nports, "# of ports"); 4473 4474 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4475 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4476 sysctl_bitfield, "A", "available doorbells"); 4477 4478 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4479 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4480 4481 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4482 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4483 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4484 "interrupt holdoff timer values (us)"); 4485 4486 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4487 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4488 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4489 "interrupt holdoff packet counter values"); 4490 4491 t4_sge_sysctls(sc, ctx, children); 4492 4493 sc->lro_timeout = 100; 4494 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4495 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4496 4497 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 4498 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4499 4500 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 4501 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 4502 4503 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4504 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4505 4506 if (sc->flags & IS_VF) 4507 return; 4508 4509 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4510 NULL, chip_rev(sc), "chip hardware revision"); 4511 4512 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 4513 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 4514 4515 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 4516 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 4517 4518 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 4519 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 4520 4521 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 4522 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 4523 4524 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 4525 sc->er_version, 0, "expansion ROM version"); 4526 4527 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 4528 sc->bs_version, 0, "bootstrap firmware version"); 4529 4530 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 4531 NULL, sc->params.scfg_vers, "serial config version"); 4532 4533 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 4534 NULL, sc->params.vpd_vers, "VPD version"); 4535 4536 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4537 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4538 4539 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4540 sc->cfcsum, "config file checksum"); 4541 4542#define SYSCTL_CAP(name, n, text) \ 4543 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 4544 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 4545 sysctl_bitfield, "A", "available " text " capabilities") 4546 4547 SYSCTL_CAP(nbmcaps, 0, "NBM"); 4548 SYSCTL_CAP(linkcaps, 1, "link"); 4549 SYSCTL_CAP(switchcaps, 2, "switch"); 4550 SYSCTL_CAP(niccaps, 3, "NIC"); 4551 SYSCTL_CAP(toecaps, 4, "TCP offload"); 4552 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 4553 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 4554 SYSCTL_CAP(tlscaps, 7, "TLS"); 4555 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 4556#undef SYSCTL_CAP 4557 4558 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4559 NULL, sc->tids.nftids, "number of filters"); 4560 4561 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4562 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4563 "chip temperature (in Celsius)"); 4564 4565#ifdef SBUF_DRAIN 4566 /* 4567 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4568 */ 4569 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4570 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4571 "logs and miscellaneous information"); 4572 children = SYSCTL_CHILDREN(oid); 4573 4574 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4575 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4576 sysctl_cctrl, "A", "congestion control"); 4577 4578 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4579 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4580 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4581 4582 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4583 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4584 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4585 4586 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4587 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4588 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4589 4590 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4591 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4592 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4593 4594 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4595 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4596 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4597 4598 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4599 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4600 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4601 4602 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4603 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4604 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4605 "A", "CIM logic analyzer"); 4606 4607 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4608 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4609 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4610 4611 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4612 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4613 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4614 4615 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4616 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4617 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4618 4619 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4620 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4621 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4622 4623 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4624 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4625 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4626 4627 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4628 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4629 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4630 4631 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4632 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4633 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4634 4635 if (chip_id(sc) > CHELSIO_T4) { 4636 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4637 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4638 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4639 4640 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4641 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4642 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4643 } 4644 4645 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4646 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4647 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4648 4649 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4650 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4651 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4652 4653 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4654 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4655 sysctl_cpl_stats, "A", "CPL statistics"); 4656 4657 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4658 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4659 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4660 4661 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4662 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4663 sysctl_devlog, "A", "firmware's device log"); 4664 4665 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4666 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4667 sysctl_fcoe_stats, "A", "FCoE statistics"); 4668 4669 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4670 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4671 sysctl_hw_sched, "A", "hardware scheduler "); 4672 4673 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4674 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4675 sysctl_l2t, "A", "hardware L2 table"); 4676 4677 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4678 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4679 sysctl_lb_stats, "A", "loopback statistics"); 4680 4681 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4682 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4683 sysctl_meminfo, "A", "memory regions"); 4684 4685 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4686 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4687 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 4688 "A", "MPS TCAM entries"); 4689 4690 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4691 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4692 sysctl_path_mtus, "A", "path MTUs"); 4693 4694 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4695 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4696 sysctl_pm_stats, "A", "PM statistics"); 4697 4698 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4699 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4700 sysctl_rdma_stats, "A", "RDMA statistics"); 4701 4702 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4703 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4704 sysctl_tcp_stats, "A", "TCP statistics"); 4705 4706 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4707 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4708 sysctl_tids, "A", "TID information"); 4709 4710 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4711 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4712 sysctl_tp_err_stats, "A", "TP error statistics"); 4713 4714 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 4715 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 4716 "TP logic analyzer event capture mask"); 4717 4718 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4719 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4720 sysctl_tp_la, "A", "TP logic analyzer"); 4721 4722 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4723 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4724 sysctl_tx_rate, "A", "Tx rate"); 4725 4726 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4727 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4728 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4729 4730 if (is_t5(sc)) { 4731 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4732 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4733 sysctl_wcwr_stats, "A", "write combined work requests"); 4734 } 4735#endif 4736 4737#ifdef TCP_OFFLOAD 4738 if (is_offload(sc)) { 4739 /* 4740 * dev.t4nex.X.toe. 4741 */ 4742 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4743 NULL, "TOE parameters"); 4744 children = SYSCTL_CHILDREN(oid); 4745 4746 sc->tt.sndbuf = 256 * 1024; 4747 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4748 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4749 4750 sc->tt.ddp = 0; 4751 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4752 &sc->tt.ddp, 0, "DDP allowed"); 4753 4754 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4755 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4756 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4757 4758 sc->tt.ddp_thres = 4759 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4760 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4761 &sc->tt.ddp_thres, 0, "DDP threshold"); 4762 4763 sc->tt.rx_coalesce = 1; 4764 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4765 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4766 4767 sc->tt.tx_align = 1; 4768 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4769 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4770 4771 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 4772 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 4773 "TP timer tick (us)"); 4774 4775 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 4776 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 4777 "TCP timestamp tick (us)"); 4778 4779 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 4780 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 4781 "DACK tick (us)"); 4782 4783 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 4784 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 4785 "IU", "DACK timer (us)"); 4786 4787 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 4788 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 4789 sysctl_tp_timer, "LU", "Retransmit min (us)"); 4790 4791 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 4792 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 4793 sysctl_tp_timer, "LU", "Retransmit max (us)"); 4794 4795 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 4796 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 4797 sysctl_tp_timer, "LU", "Persist timer min (us)"); 4798 4799 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 4800 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 4801 sysctl_tp_timer, "LU", "Persist timer max (us)"); 4802 4803 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 4804 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 4805 sysctl_tp_timer, "LU", "Keepidle idle timer (us)"); 4806 4807 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl", 4808 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 4809 sysctl_tp_timer, "LU", "Keepidle interval (us)"); 4810 4811 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 4812 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 4813 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 4814 4815 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 4816 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 4817 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 4818 } 4819#endif 4820} 4821 4822void 4823vi_sysctls(struct vi_info *vi) 4824{ 4825 struct sysctl_ctx_list *ctx; 4826 struct sysctl_oid *oid; 4827 struct sysctl_oid_list *children; 4828 4829 ctx = device_get_sysctl_ctx(vi->dev); 4830 4831 /* 4832 * dev.v?(cxgbe|cxl).X. 4833 */ 4834 oid = device_get_sysctl_tree(vi->dev); 4835 children = SYSCTL_CHILDREN(oid); 4836 4837 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 4838 vi->viid, "VI identifer"); 4839 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 4840 &vi->nrxq, 0, "# of rx queues"); 4841 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 4842 &vi->ntxq, 0, "# of tx queues"); 4843 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 4844 &vi->first_rxq, 0, "index of first rx queue"); 4845 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 4846 &vi->first_txq, 0, "index of first tx queue"); 4847 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 4848 vi->rss_size, "size of RSS indirection table"); 4849 4850 if (IS_MAIN_VI(vi)) { 4851 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 4852 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 4853 "Reserve queue 0 for non-flowid packets"); 4854 } 4855 4856#ifdef TCP_OFFLOAD 4857 if (vi->nofldrxq != 0) { 4858 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 4859 &vi->nofldrxq, 0, 4860 "# of rx queues for offloaded TCP connections"); 4861 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 4862 &vi->nofldtxq, 0, 4863 "# of tx queues for offloaded TCP connections"); 4864 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 4865 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 4866 "index of first TOE rx queue"); 4867 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 4868 CTLFLAG_RD, &vi->first_ofld_txq, 0, 4869 "index of first TOE tx queue"); 4870 } 4871#endif 4872#ifdef DEV_NETMAP 4873 if (vi->nnmrxq != 0) { 4874 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 4875 &vi->nnmrxq, 0, "# of netmap rx queues"); 4876 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 4877 &vi->nnmtxq, 0, "# of netmap tx queues"); 4878 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 4879 CTLFLAG_RD, &vi->first_nm_rxq, 0, 4880 "index of first netmap rx queue"); 4881 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 4882 CTLFLAG_RD, &vi->first_nm_txq, 0, 4883 "index of first netmap tx queue"); 4884 } 4885#endif 4886 4887 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 4888 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 4889 "holdoff timer index"); 4890 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 4891 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 4892 "holdoff packet counter index"); 4893 4894 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 4895 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 4896 "rx queue size"); 4897 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 4898 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 4899 "tx queue size"); 4900} 4901 4902static void 4903cxgbe_sysctls(struct port_info *pi) 4904{ 4905 struct sysctl_ctx_list *ctx; 4906 struct sysctl_oid *oid; 4907 struct sysctl_oid_list *children, *children2; 4908 struct adapter *sc = pi->adapter; 4909 int i; 4910 char name[16]; 4911 4912 ctx = device_get_sysctl_ctx(pi->dev); 4913 4914 /* 4915 * dev.cxgbe.X. 4916 */ 4917 oid = device_get_sysctl_tree(pi->dev); 4918 children = SYSCTL_CHILDREN(oid); 4919 4920 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 4921 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 4922 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 4923 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 4924 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 4925 "PHY temperature (in Celsius)"); 4926 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 4927 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 4928 "PHY firmware version"); 4929 } 4930 4931 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 4932 CTLTYPE_STRING | CTLFLAG_RW, pi, PAUSE_TX, sysctl_pause_settings, 4933 "A", "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 4934 4935 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 4936 port_top_speed(pi), "max speed (in Gbps)"); 4937 4938 if (sc->flags & IS_VF) 4939 return; 4940 4941 /* 4942 * dev.(cxgbe|cxl).X.tc. 4943 */ 4944 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 4945 "Tx scheduler traffic classes"); 4946 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 4947 struct tx_sched_class *tc = &pi->tc[i]; 4948 4949 snprintf(name, sizeof(name), "%d", i); 4950 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 4951 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 4952 "traffic class")); 4953 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 4954 &tc->flags, 0, "flags"); 4955 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 4956 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 4957#ifdef SBUF_DRAIN 4958 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 4959 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 4960 sysctl_tc_params, "A", "traffic class parameters"); 4961#endif 4962 } 4963 4964 /* 4965 * dev.cxgbe.X.stats. 4966 */ 4967 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 4968 NULL, "port statistics"); 4969 children = SYSCTL_CHILDREN(oid); 4970 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 4971 &pi->tx_parse_error, 0, 4972 "# of tx packets with invalid length or # of segments"); 4973 4974#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 4975 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 4976 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 4977 sysctl_handle_t4_reg64, "QU", desc) 4978 4979 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 4980 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 4981 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 4982 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 4983 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 4984 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 4985 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 4986 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 4987 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 4988 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 4989 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 4990 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 4991 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 4992 "# of tx frames in this range", 4993 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 4994 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 4995 "# of tx frames in this range", 4996 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 4997 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 4998 "# of tx frames in this range", 4999 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5000 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5001 "# of tx frames in this range", 5002 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5003 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5004 "# of tx frames in this range", 5005 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5006 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5007 "# of tx frames in this range", 5008 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5009 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5010 "# of tx frames in this range", 5011 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5012 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5013 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5014 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5015 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5016 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5017 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5018 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5019 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5020 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5021 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5022 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5023 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5024 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5025 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5026 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5027 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5028 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5029 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5030 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5031 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5032 5033 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5034 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5035 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5036 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5037 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5038 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5039 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5040 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5041 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5042 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5043 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5044 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5045 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5046 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5047 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5048 "# of frames received with bad FCS", 5049 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5050 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5051 "# of frames received with length error", 5052 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5053 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5054 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5055 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5056 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5057 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5058 "# of rx frames in this range", 5059 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5060 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5061 "# of rx frames in this range", 5062 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5063 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5064 "# of rx frames in this range", 5065 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5066 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5067 "# of rx frames in this range", 5068 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5069 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5070 "# of rx frames in this range", 5071 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5072 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5073 "# of rx frames in this range", 5074 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5075 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5076 "# of rx frames in this range", 5077 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5078 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5079 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5080 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5081 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5082 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5083 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5084 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5085 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5086 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5087 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5088 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5089 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5090 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5091 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5092 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5093 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5094 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5095 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5096 5097#undef SYSCTL_ADD_T4_REG64 5098 5099#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5100 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5101 &pi->stats.name, desc) 5102 5103 /* We get these from port_stats and they may be stale by upto 1s */ 5104 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5105 "# drops due to buffer-group 0 overflows"); 5106 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5107 "# drops due to buffer-group 1 overflows"); 5108 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5109 "# drops due to buffer-group 2 overflows"); 5110 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5111 "# drops due to buffer-group 3 overflows"); 5112 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5113 "# of buffer-group 0 truncated packets"); 5114 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5115 "# of buffer-group 1 truncated packets"); 5116 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5117 "# of buffer-group 2 truncated packets"); 5118 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5119 "# of buffer-group 3 truncated packets"); 5120 5121#undef SYSCTL_ADD_T4_PORTSTAT 5122} 5123 5124static int 5125sysctl_int_array(SYSCTL_HANDLER_ARGS) 5126{ 5127 int rc, *i, space = 0; 5128 struct sbuf sb; 5129 5130 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 5131 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5132 if (space) 5133 sbuf_printf(&sb, " "); 5134 sbuf_printf(&sb, "%d", *i); 5135 space = 1; 5136 } 5137 sbuf_finish(&sb); 5138 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5139 sbuf_delete(&sb); 5140 return (rc); 5141} 5142 5143static int 5144sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5145{ 5146 int rc; 5147 struct sbuf *sb; 5148 5149 rc = sysctl_wire_old_buffer(req, 0); 5150 if (rc != 0) 5151 return(rc); 5152 5153 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5154 if (sb == NULL) 5155 return (ENOMEM); 5156 5157 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5158 rc = sbuf_finish(sb); 5159 sbuf_delete(sb); 5160 5161 return (rc); 5162} 5163 5164static int 5165sysctl_btphy(SYSCTL_HANDLER_ARGS) 5166{ 5167 struct port_info *pi = arg1; 5168 int op = arg2; 5169 struct adapter *sc = pi->adapter; 5170 u_int v; 5171 int rc; 5172 5173 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5174 if (rc) 5175 return (rc); 5176 /* XXX: magic numbers */ 5177 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5178 &v); 5179 end_synchronized_op(sc, 0); 5180 if (rc) 5181 return (rc); 5182 if (op == 0) 5183 v /= 256; 5184 5185 rc = sysctl_handle_int(oidp, &v, 0, req); 5186 return (rc); 5187} 5188 5189static int 5190sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5191{ 5192 struct vi_info *vi = arg1; 5193 int rc, val; 5194 5195 val = vi->rsrv_noflowq; 5196 rc = sysctl_handle_int(oidp, &val, 0, req); 5197 if (rc != 0 || req->newptr == NULL) 5198 return (rc); 5199 5200 if ((val >= 1) && (vi->ntxq > 1)) 5201 vi->rsrv_noflowq = 1; 5202 else 5203 vi->rsrv_noflowq = 0; 5204 5205 return (rc); 5206} 5207 5208static int 5209sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5210{ 5211 struct vi_info *vi = arg1; 5212 struct adapter *sc = vi->pi->adapter; 5213 int idx, rc, i; 5214 struct sge_rxq *rxq; 5215#ifdef TCP_OFFLOAD 5216 struct sge_ofld_rxq *ofld_rxq; 5217#endif 5218 uint8_t v; 5219 5220 idx = vi->tmr_idx; 5221 5222 rc = sysctl_handle_int(oidp, &idx, 0, req); 5223 if (rc != 0 || req->newptr == NULL) 5224 return (rc); 5225 5226 if (idx < 0 || idx >= SGE_NTIMERS) 5227 return (EINVAL); 5228 5229 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5230 "t4tmr"); 5231 if (rc) 5232 return (rc); 5233 5234 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5235 for_each_rxq(vi, i, rxq) { 5236#ifdef atomic_store_rel_8 5237 atomic_store_rel_8(&rxq->iq.intr_params, v); 5238#else 5239 rxq->iq.intr_params = v; 5240#endif 5241 } 5242#ifdef TCP_OFFLOAD 5243 for_each_ofld_rxq(vi, i, ofld_rxq) { 5244#ifdef atomic_store_rel_8 5245 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5246#else 5247 ofld_rxq->iq.intr_params = v; 5248#endif 5249 } 5250#endif 5251 vi->tmr_idx = idx; 5252 5253 end_synchronized_op(sc, LOCK_HELD); 5254 return (0); 5255} 5256 5257static int 5258sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5259{ 5260 struct vi_info *vi = arg1; 5261 struct adapter *sc = vi->pi->adapter; 5262 int idx, rc; 5263 5264 idx = vi->pktc_idx; 5265 5266 rc = sysctl_handle_int(oidp, &idx, 0, req); 5267 if (rc != 0 || req->newptr == NULL) 5268 return (rc); 5269 5270 if (idx < -1 || idx >= SGE_NCOUNTERS) 5271 return (EINVAL); 5272 5273 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5274 "t4pktc"); 5275 if (rc) 5276 return (rc); 5277 5278 if (vi->flags & VI_INIT_DONE) 5279 rc = EBUSY; /* cannot be changed once the queues are created */ 5280 else 5281 vi->pktc_idx = idx; 5282 5283 end_synchronized_op(sc, LOCK_HELD); 5284 return (rc); 5285} 5286 5287static int 5288sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5289{ 5290 struct vi_info *vi = arg1; 5291 struct adapter *sc = vi->pi->adapter; 5292 int qsize, rc; 5293 5294 qsize = vi->qsize_rxq; 5295 5296 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5297 if (rc != 0 || req->newptr == NULL) 5298 return (rc); 5299 5300 if (qsize < 128 || (qsize & 7)) 5301 return (EINVAL); 5302 5303 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5304 "t4rxqs"); 5305 if (rc) 5306 return (rc); 5307 5308 if (vi->flags & VI_INIT_DONE) 5309 rc = EBUSY; /* cannot be changed once the queues are created */ 5310 else 5311 vi->qsize_rxq = qsize; 5312 5313 end_synchronized_op(sc, LOCK_HELD); 5314 return (rc); 5315} 5316 5317static int 5318sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5319{ 5320 struct vi_info *vi = arg1; 5321 struct adapter *sc = vi->pi->adapter; 5322 int qsize, rc; 5323 5324 qsize = vi->qsize_txq; 5325 5326 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5327 if (rc != 0 || req->newptr == NULL) 5328 return (rc); 5329 5330 if (qsize < 128 || qsize > 65536) 5331 return (EINVAL); 5332 5333 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5334 "t4txqs"); 5335 if (rc) 5336 return (rc); 5337 5338 if (vi->flags & VI_INIT_DONE) 5339 rc = EBUSY; /* cannot be changed once the queues are created */ 5340 else 5341 vi->qsize_txq = qsize; 5342 5343 end_synchronized_op(sc, LOCK_HELD); 5344 return (rc); 5345} 5346 5347static int 5348sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5349{ 5350 struct port_info *pi = arg1; 5351 struct adapter *sc = pi->adapter; 5352 struct link_config *lc = &pi->link_cfg; 5353 int rc; 5354 5355 if (req->newptr == NULL) { 5356 struct sbuf *sb; 5357 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5358 5359 rc = sysctl_wire_old_buffer(req, 0); 5360 if (rc != 0) 5361 return(rc); 5362 5363 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5364 if (sb == NULL) 5365 return (ENOMEM); 5366 5367 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5368 rc = sbuf_finish(sb); 5369 sbuf_delete(sb); 5370 } else { 5371 char s[2]; 5372 int n; 5373 5374 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5375 s[1] = 0; 5376 5377 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5378 if (rc != 0) 5379 return(rc); 5380 5381 if (s[1] != 0) 5382 return (EINVAL); 5383 if (s[0] < '0' || s[0] > '9') 5384 return (EINVAL); /* not a number */ 5385 n = s[0] - '0'; 5386 if (n & ~(PAUSE_TX | PAUSE_RX)) 5387 return (EINVAL); /* some other bit is set too */ 5388 5389 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5390 "t4PAUSE"); 5391 if (rc) 5392 return (rc); 5393 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5394 int link_ok = lc->link_ok; 5395 5396 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5397 lc->requested_fc |= n; 5398 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5399 lc->link_ok = link_ok; /* restore */ 5400 } 5401 end_synchronized_op(sc, 0); 5402 } 5403 5404 return (rc); 5405} 5406 5407static int 5408sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5409{ 5410 struct adapter *sc = arg1; 5411 int reg = arg2; 5412 uint64_t val; 5413 5414 val = t4_read_reg64(sc, reg); 5415 5416 return (sysctl_handle_64(oidp, &val, 0, req)); 5417} 5418 5419static int 5420sysctl_temperature(SYSCTL_HANDLER_ARGS) 5421{ 5422 struct adapter *sc = arg1; 5423 int rc, t; 5424 uint32_t param, val; 5425 5426 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5427 if (rc) 5428 return (rc); 5429 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5430 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5431 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5432 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5433 end_synchronized_op(sc, 0); 5434 if (rc) 5435 return (rc); 5436 5437 /* unknown is returned as 0 but we display -1 in that case */ 5438 t = val == 0 ? -1 : val; 5439 5440 rc = sysctl_handle_int(oidp, &t, 0, req); 5441 return (rc); 5442} 5443 5444#ifdef SBUF_DRAIN 5445static int 5446sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5447{ 5448 struct adapter *sc = arg1; 5449 struct sbuf *sb; 5450 int rc, i; 5451 uint16_t incr[NMTUS][NCCTRL_WIN]; 5452 static const char *dec_fac[] = { 5453 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5454 "0.9375" 5455 }; 5456 5457 rc = sysctl_wire_old_buffer(req, 0); 5458 if (rc != 0) 5459 return (rc); 5460 5461 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5462 if (sb == NULL) 5463 return (ENOMEM); 5464 5465 t4_read_cong_tbl(sc, incr); 5466 5467 for (i = 0; i < NCCTRL_WIN; ++i) { 5468 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5469 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5470 incr[5][i], incr[6][i], incr[7][i]); 5471 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5472 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5473 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5474 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5475 } 5476 5477 rc = sbuf_finish(sb); 5478 sbuf_delete(sb); 5479 5480 return (rc); 5481} 5482 5483static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5484 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5485 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5486 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5487}; 5488 5489static int 5490sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5491{ 5492 struct adapter *sc = arg1; 5493 struct sbuf *sb; 5494 int rc, i, n, qid = arg2; 5495 uint32_t *buf, *p; 5496 char *qtype; 5497 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5498 5499 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5500 ("%s: bad qid %d\n", __func__, qid)); 5501 5502 if (qid < CIM_NUM_IBQ) { 5503 /* inbound queue */ 5504 qtype = "IBQ"; 5505 n = 4 * CIM_IBQ_SIZE; 5506 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5507 rc = t4_read_cim_ibq(sc, qid, buf, n); 5508 } else { 5509 /* outbound queue */ 5510 qtype = "OBQ"; 5511 qid -= CIM_NUM_IBQ; 5512 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5513 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5514 rc = t4_read_cim_obq(sc, qid, buf, n); 5515 } 5516 5517 if (rc < 0) { 5518 rc = -rc; 5519 goto done; 5520 } 5521 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5522 5523 rc = sysctl_wire_old_buffer(req, 0); 5524 if (rc != 0) 5525 goto done; 5526 5527 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5528 if (sb == NULL) { 5529 rc = ENOMEM; 5530 goto done; 5531 } 5532 5533 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5534 for (i = 0, p = buf; i < n; i += 16, p += 4) 5535 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5536 p[2], p[3]); 5537 5538 rc = sbuf_finish(sb); 5539 sbuf_delete(sb); 5540done: 5541 free(buf, M_CXGBE); 5542 return (rc); 5543} 5544 5545static int 5546sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5547{ 5548 struct adapter *sc = arg1; 5549 u_int cfg; 5550 struct sbuf *sb; 5551 uint32_t *buf, *p; 5552 int rc; 5553 5554 MPASS(chip_id(sc) <= CHELSIO_T5); 5555 5556 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5557 if (rc != 0) 5558 return (rc); 5559 5560 rc = sysctl_wire_old_buffer(req, 0); 5561 if (rc != 0) 5562 return (rc); 5563 5564 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5565 if (sb == NULL) 5566 return (ENOMEM); 5567 5568 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5569 M_ZERO | M_WAITOK); 5570 5571 rc = -t4_cim_read_la(sc, buf, NULL); 5572 if (rc != 0) 5573 goto done; 5574 5575 sbuf_printf(sb, "Status Data PC%s", 5576 cfg & F_UPDBGLACAPTPCONLY ? "" : 5577 " LS0Stat LS0Addr LS0Data"); 5578 5579 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5580 if (cfg & F_UPDBGLACAPTPCONLY) { 5581 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5582 p[6], p[7]); 5583 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5584 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5585 p[4] & 0xff, p[5] >> 8); 5586 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5587 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5588 p[1] & 0xf, p[2] >> 4); 5589 } else { 5590 sbuf_printf(sb, 5591 "\n %02x %x%07x %x%07x %08x %08x " 5592 "%08x%08x%08x%08x", 5593 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5594 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5595 p[6], p[7]); 5596 } 5597 } 5598 5599 rc = sbuf_finish(sb); 5600 sbuf_delete(sb); 5601done: 5602 free(buf, M_CXGBE); 5603 return (rc); 5604} 5605 5606static int 5607sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5608{ 5609 struct adapter *sc = arg1; 5610 u_int cfg; 5611 struct sbuf *sb; 5612 uint32_t *buf, *p; 5613 int rc; 5614 5615 MPASS(chip_id(sc) > CHELSIO_T5); 5616 5617 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5618 if (rc != 0) 5619 return (rc); 5620 5621 rc = sysctl_wire_old_buffer(req, 0); 5622 if (rc != 0) 5623 return (rc); 5624 5625 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5626 if (sb == NULL) 5627 return (ENOMEM); 5628 5629 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5630 M_ZERO | M_WAITOK); 5631 5632 rc = -t4_cim_read_la(sc, buf, NULL); 5633 if (rc != 0) 5634 goto done; 5635 5636 sbuf_printf(sb, "Status Inst Data PC%s", 5637 cfg & F_UPDBGLACAPTPCONLY ? "" : 5638 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5639 5640 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5641 if (cfg & F_UPDBGLACAPTPCONLY) { 5642 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5643 p[3] & 0xff, p[2], p[1], p[0]); 5644 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5645 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5646 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5647 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5648 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5649 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5650 p[6] >> 16); 5651 } else { 5652 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5653 "%08x %08x %08x %08x %08x %08x", 5654 (p[9] >> 16) & 0xff, 5655 p[9] & 0xffff, p[8] >> 16, 5656 p[8] & 0xffff, p[7] >> 16, 5657 p[7] & 0xffff, p[6] >> 16, 5658 p[2], p[1], p[0], p[5], p[4], p[3]); 5659 } 5660 } 5661 5662 rc = sbuf_finish(sb); 5663 sbuf_delete(sb); 5664done: 5665 free(buf, M_CXGBE); 5666 return (rc); 5667} 5668 5669static int 5670sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5671{ 5672 struct adapter *sc = arg1; 5673 u_int i; 5674 struct sbuf *sb; 5675 uint32_t *buf, *p; 5676 int rc; 5677 5678 rc = sysctl_wire_old_buffer(req, 0); 5679 if (rc != 0) 5680 return (rc); 5681 5682 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5683 if (sb == NULL) 5684 return (ENOMEM); 5685 5686 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 5687 M_ZERO | M_WAITOK); 5688 5689 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 5690 p = buf; 5691 5692 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5693 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 5694 p[1], p[0]); 5695 } 5696 5697 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 5698 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 5699 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 5700 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 5701 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 5702 (p[1] >> 2) | ((p[2] & 3) << 30), 5703 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 5704 p[0] & 1); 5705 } 5706 5707 rc = sbuf_finish(sb); 5708 sbuf_delete(sb); 5709 free(buf, M_CXGBE); 5710 return (rc); 5711} 5712 5713static int 5714sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 5715{ 5716 struct adapter *sc = arg1; 5717 u_int i; 5718 struct sbuf *sb; 5719 uint32_t *buf, *p; 5720 int rc; 5721 5722 rc = sysctl_wire_old_buffer(req, 0); 5723 if (rc != 0) 5724 return (rc); 5725 5726 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5727 if (sb == NULL) 5728 return (ENOMEM); 5729 5730 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 5731 M_ZERO | M_WAITOK); 5732 5733 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 5734 p = buf; 5735 5736 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 5737 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5738 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 5739 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 5740 p[4], p[3], p[2], p[1], p[0]); 5741 } 5742 5743 sbuf_printf(sb, "\n\nCntl ID Data"); 5744 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 5745 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 5746 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 5747 } 5748 5749 rc = sbuf_finish(sb); 5750 sbuf_delete(sb); 5751 free(buf, M_CXGBE); 5752 return (rc); 5753} 5754 5755static int 5756sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 5757{ 5758 struct adapter *sc = arg1; 5759 struct sbuf *sb; 5760 int rc, i; 5761 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5762 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 5763 uint16_t thres[CIM_NUM_IBQ]; 5764 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 5765 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 5766 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 5767 5768 cim_num_obq = sc->chip_params->cim_num_obq; 5769 if (is_t4(sc)) { 5770 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 5771 obq_rdaddr = A_UP_OBQ_0_REALADDR; 5772 } else { 5773 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 5774 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 5775 } 5776 nq = CIM_NUM_IBQ + cim_num_obq; 5777 5778 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 5779 if (rc == 0) 5780 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 5781 if (rc != 0) 5782 return (rc); 5783 5784 t4_read_cimq_cfg(sc, base, size, thres); 5785 5786 rc = sysctl_wire_old_buffer(req, 0); 5787 if (rc != 0) 5788 return (rc); 5789 5790 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5791 if (sb == NULL) 5792 return (ENOMEM); 5793 5794 sbuf_printf(sb, 5795 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 5796 5797 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 5798 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 5799 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 5800 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5801 G_QUEREMFLITS(p[2]) * 16); 5802 for ( ; i < nq; i++, p += 4, wr += 2) 5803 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 5804 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 5805 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 5806 G_QUEREMFLITS(p[2]) * 16); 5807 5808 rc = sbuf_finish(sb); 5809 sbuf_delete(sb); 5810 5811 return (rc); 5812} 5813 5814static int 5815sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 5816{ 5817 struct adapter *sc = arg1; 5818 struct sbuf *sb; 5819 int rc; 5820 struct tp_cpl_stats stats; 5821 5822 rc = sysctl_wire_old_buffer(req, 0); 5823 if (rc != 0) 5824 return (rc); 5825 5826 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5827 if (sb == NULL) 5828 return (ENOMEM); 5829 5830 mtx_lock(&sc->reg_lock); 5831 t4_tp_get_cpl_stats(sc, &stats); 5832 mtx_unlock(&sc->reg_lock); 5833 5834 if (sc->chip_params->nchan > 2) { 5835 sbuf_printf(sb, " channel 0 channel 1" 5836 " channel 2 channel 3"); 5837 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 5838 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 5839 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 5840 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 5841 } else { 5842 sbuf_printf(sb, " channel 0 channel 1"); 5843 sbuf_printf(sb, "\nCPL requests: %10u %10u", 5844 stats.req[0], stats.req[1]); 5845 sbuf_printf(sb, "\nCPL responses: %10u %10u", 5846 stats.rsp[0], stats.rsp[1]); 5847 } 5848 5849 rc = sbuf_finish(sb); 5850 sbuf_delete(sb); 5851 5852 return (rc); 5853} 5854 5855static int 5856sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 5857{ 5858 struct adapter *sc = arg1; 5859 struct sbuf *sb; 5860 int rc; 5861 struct tp_usm_stats stats; 5862 5863 rc = sysctl_wire_old_buffer(req, 0); 5864 if (rc != 0) 5865 return(rc); 5866 5867 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 5868 if (sb == NULL) 5869 return (ENOMEM); 5870 5871 t4_get_usm_stats(sc, &stats); 5872 5873 sbuf_printf(sb, "Frames: %u\n", stats.frames); 5874 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 5875 sbuf_printf(sb, "Drops: %u", stats.drops); 5876 5877 rc = sbuf_finish(sb); 5878 sbuf_delete(sb); 5879 5880 return (rc); 5881} 5882 5883static const char * const devlog_level_strings[] = { 5884 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 5885 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 5886 [FW_DEVLOG_LEVEL_ERR] = "ERR", 5887 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 5888 [FW_DEVLOG_LEVEL_INFO] = "INFO", 5889 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 5890}; 5891 5892static const char * const devlog_facility_strings[] = { 5893 [FW_DEVLOG_FACILITY_CORE] = "CORE", 5894 [FW_DEVLOG_FACILITY_CF] = "CF", 5895 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 5896 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 5897 [FW_DEVLOG_FACILITY_RES] = "RES", 5898 [FW_DEVLOG_FACILITY_HW] = "HW", 5899 [FW_DEVLOG_FACILITY_FLR] = "FLR", 5900 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 5901 [FW_DEVLOG_FACILITY_PHY] = "PHY", 5902 [FW_DEVLOG_FACILITY_MAC] = "MAC", 5903 [FW_DEVLOG_FACILITY_PORT] = "PORT", 5904 [FW_DEVLOG_FACILITY_VI] = "VI", 5905 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 5906 [FW_DEVLOG_FACILITY_ACL] = "ACL", 5907 [FW_DEVLOG_FACILITY_TM] = "TM", 5908 [FW_DEVLOG_FACILITY_QFC] = "QFC", 5909 [FW_DEVLOG_FACILITY_DCB] = "DCB", 5910 [FW_DEVLOG_FACILITY_ETH] = "ETH", 5911 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 5912 [FW_DEVLOG_FACILITY_RI] = "RI", 5913 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 5914 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 5915 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 5916 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 5917 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 5918}; 5919 5920static int 5921sysctl_devlog(SYSCTL_HANDLER_ARGS) 5922{ 5923 struct adapter *sc = arg1; 5924 struct devlog_params *dparams = &sc->params.devlog; 5925 struct fw_devlog_e *buf, *e; 5926 int i, j, rc, nentries, first = 0; 5927 struct sbuf *sb; 5928 uint64_t ftstamp = UINT64_MAX; 5929 5930 if (dparams->addr == 0) 5931 return (ENXIO); 5932 5933 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 5934 if (buf == NULL) 5935 return (ENOMEM); 5936 5937 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 5938 if (rc != 0) 5939 goto done; 5940 5941 nentries = dparams->size / sizeof(struct fw_devlog_e); 5942 for (i = 0; i < nentries; i++) { 5943 e = &buf[i]; 5944 5945 if (e->timestamp == 0) 5946 break; /* end */ 5947 5948 e->timestamp = be64toh(e->timestamp); 5949 e->seqno = be32toh(e->seqno); 5950 for (j = 0; j < 8; j++) 5951 e->params[j] = be32toh(e->params[j]); 5952 5953 if (e->timestamp < ftstamp) { 5954 ftstamp = e->timestamp; 5955 first = i; 5956 } 5957 } 5958 5959 if (buf[first].timestamp == 0) 5960 goto done; /* nothing in the log */ 5961 5962 rc = sysctl_wire_old_buffer(req, 0); 5963 if (rc != 0) 5964 goto done; 5965 5966 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5967 if (sb == NULL) { 5968 rc = ENOMEM; 5969 goto done; 5970 } 5971 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 5972 "Seq#", "Tstamp", "Level", "Facility", "Message"); 5973 5974 i = first; 5975 do { 5976 e = &buf[i]; 5977 if (e->timestamp == 0) 5978 break; /* end */ 5979 5980 sbuf_printf(sb, "%10d %15ju %8s %8s ", 5981 e->seqno, e->timestamp, 5982 (e->level < nitems(devlog_level_strings) ? 5983 devlog_level_strings[e->level] : "UNKNOWN"), 5984 (e->facility < nitems(devlog_facility_strings) ? 5985 devlog_facility_strings[e->facility] : "UNKNOWN")); 5986 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 5987 e->params[2], e->params[3], e->params[4], 5988 e->params[5], e->params[6], e->params[7]); 5989 5990 if (++i == nentries) 5991 i = 0; 5992 } while (i != first); 5993 5994 rc = sbuf_finish(sb); 5995 sbuf_delete(sb); 5996done: 5997 free(buf, M_CXGBE); 5998 return (rc); 5999} 6000 6001static int 6002sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6003{ 6004 struct adapter *sc = arg1; 6005 struct sbuf *sb; 6006 int rc; 6007 struct tp_fcoe_stats stats[MAX_NCHAN]; 6008 int i, nchan = sc->chip_params->nchan; 6009 6010 rc = sysctl_wire_old_buffer(req, 0); 6011 if (rc != 0) 6012 return (rc); 6013 6014 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6015 if (sb == NULL) 6016 return (ENOMEM); 6017 6018 for (i = 0; i < nchan; i++) 6019 t4_get_fcoe_stats(sc, i, &stats[i]); 6020 6021 if (nchan > 2) { 6022 sbuf_printf(sb, " channel 0 channel 1" 6023 " channel 2 channel 3"); 6024 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6025 stats[0].octets_ddp, stats[1].octets_ddp, 6026 stats[2].octets_ddp, stats[3].octets_ddp); 6027 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6028 stats[0].frames_ddp, stats[1].frames_ddp, 6029 stats[2].frames_ddp, stats[3].frames_ddp); 6030 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6031 stats[0].frames_drop, stats[1].frames_drop, 6032 stats[2].frames_drop, stats[3].frames_drop); 6033 } else { 6034 sbuf_printf(sb, " channel 0 channel 1"); 6035 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6036 stats[0].octets_ddp, stats[1].octets_ddp); 6037 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6038 stats[0].frames_ddp, stats[1].frames_ddp); 6039 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6040 stats[0].frames_drop, stats[1].frames_drop); 6041 } 6042 6043 rc = sbuf_finish(sb); 6044 sbuf_delete(sb); 6045 6046 return (rc); 6047} 6048 6049static int 6050sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6051{ 6052 struct adapter *sc = arg1; 6053 struct sbuf *sb; 6054 int rc, i; 6055 unsigned int map, kbps, ipg, mode; 6056 unsigned int pace_tab[NTX_SCHED]; 6057 6058 rc = sysctl_wire_old_buffer(req, 0); 6059 if (rc != 0) 6060 return (rc); 6061 6062 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6063 if (sb == NULL) 6064 return (ENOMEM); 6065 6066 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6067 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6068 t4_read_pace_tbl(sc, pace_tab); 6069 6070 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6071 "Class IPG (0.1 ns) Flow IPG (us)"); 6072 6073 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6074 t4_get_tx_sched(sc, i, &kbps, &ipg); 6075 sbuf_printf(sb, "\n %u %-5s %u ", i, 6076 (mode & (1 << i)) ? "flow" : "class", map & 3); 6077 if (kbps) 6078 sbuf_printf(sb, "%9u ", kbps); 6079 else 6080 sbuf_printf(sb, " disabled "); 6081 6082 if (ipg) 6083 sbuf_printf(sb, "%13u ", ipg); 6084 else 6085 sbuf_printf(sb, " disabled "); 6086 6087 if (pace_tab[i]) 6088 sbuf_printf(sb, "%10u", pace_tab[i]); 6089 else 6090 sbuf_printf(sb, " disabled"); 6091 } 6092 6093 rc = sbuf_finish(sb); 6094 sbuf_delete(sb); 6095 6096 return (rc); 6097} 6098 6099static int 6100sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6101{ 6102 struct adapter *sc = arg1; 6103 struct sbuf *sb; 6104 int rc, i, j; 6105 uint64_t *p0, *p1; 6106 struct lb_port_stats s[2]; 6107 static const char *stat_name[] = { 6108 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6109 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6110 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6111 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6112 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6113 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6114 "BG2FramesTrunc:", "BG3FramesTrunc:" 6115 }; 6116 6117 rc = sysctl_wire_old_buffer(req, 0); 6118 if (rc != 0) 6119 return (rc); 6120 6121 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6122 if (sb == NULL) 6123 return (ENOMEM); 6124 6125 memset(s, 0, sizeof(s)); 6126 6127 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6128 t4_get_lb_stats(sc, i, &s[0]); 6129 t4_get_lb_stats(sc, i + 1, &s[1]); 6130 6131 p0 = &s[0].octets; 6132 p1 = &s[1].octets; 6133 sbuf_printf(sb, "%s Loopback %u" 6134 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6135 6136 for (j = 0; j < nitems(stat_name); j++) 6137 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6138 *p0++, *p1++); 6139 } 6140 6141 rc = sbuf_finish(sb); 6142 sbuf_delete(sb); 6143 6144 return (rc); 6145} 6146 6147static int 6148sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6149{ 6150 int rc = 0; 6151 struct port_info *pi = arg1; 6152 struct sbuf *sb; 6153 6154 rc = sysctl_wire_old_buffer(req, 0); 6155 if (rc != 0) 6156 return(rc); 6157 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6158 if (sb == NULL) 6159 return (ENOMEM); 6160 6161 if (pi->linkdnrc < 0) 6162 sbuf_printf(sb, "n/a"); 6163 else 6164 sbuf_printf(sb, "%s", t4_link_down_rc_str(pi->linkdnrc)); 6165 6166 rc = sbuf_finish(sb); 6167 sbuf_delete(sb); 6168 6169 return (rc); 6170} 6171 6172struct mem_desc { 6173 unsigned int base; 6174 unsigned int limit; 6175 unsigned int idx; 6176}; 6177 6178static int 6179mem_desc_cmp(const void *a, const void *b) 6180{ 6181 return ((const struct mem_desc *)a)->base - 6182 ((const struct mem_desc *)b)->base; 6183} 6184 6185static void 6186mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6187 unsigned int to) 6188{ 6189 unsigned int size; 6190 6191 if (from == to) 6192 return; 6193 6194 size = to - from + 1; 6195 if (size == 0) 6196 return; 6197 6198 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6199 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6200} 6201 6202static int 6203sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6204{ 6205 struct adapter *sc = arg1; 6206 struct sbuf *sb; 6207 int rc, i, n; 6208 uint32_t lo, hi, used, alloc; 6209 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6210 static const char *region[] = { 6211 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6212 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6213 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6214 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6215 "RQUDP region:", "PBL region:", "TXPBL region:", 6216 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6217 "On-chip queues:" 6218 }; 6219 struct mem_desc avail[4]; 6220 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6221 struct mem_desc *md = mem; 6222 6223 rc = sysctl_wire_old_buffer(req, 0); 6224 if (rc != 0) 6225 return (rc); 6226 6227 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6228 if (sb == NULL) 6229 return (ENOMEM); 6230 6231 for (i = 0; i < nitems(mem); i++) { 6232 mem[i].limit = 0; 6233 mem[i].idx = i; 6234 } 6235 6236 /* Find and sort the populated memory ranges */ 6237 i = 0; 6238 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6239 if (lo & F_EDRAM0_ENABLE) { 6240 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6241 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6242 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6243 avail[i].idx = 0; 6244 i++; 6245 } 6246 if (lo & F_EDRAM1_ENABLE) { 6247 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6248 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6249 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6250 avail[i].idx = 1; 6251 i++; 6252 } 6253 if (lo & F_EXT_MEM_ENABLE) { 6254 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6255 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6256 avail[i].limit = avail[i].base + 6257 (G_EXT_MEM_SIZE(hi) << 20); 6258 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6259 i++; 6260 } 6261 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6262 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6263 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6264 avail[i].limit = avail[i].base + 6265 (G_EXT_MEM1_SIZE(hi) << 20); 6266 avail[i].idx = 4; 6267 i++; 6268 } 6269 if (!i) /* no memory available */ 6270 return 0; 6271 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6272 6273 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6274 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6275 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6276 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6277 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6278 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6279 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6280 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6281 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6282 6283 /* the next few have explicit upper bounds */ 6284 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6285 md->limit = md->base - 1 + 6286 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6287 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6288 md++; 6289 6290 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6291 md->limit = md->base - 1 + 6292 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6293 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6294 md++; 6295 6296 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6297 if (chip_id(sc) <= CHELSIO_T5) 6298 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6299 else 6300 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6301 md->limit = 0; 6302 } else { 6303 md->base = 0; 6304 md->idx = nitems(region); /* hide it */ 6305 } 6306 md++; 6307 6308#define ulp_region(reg) \ 6309 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6310 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6311 6312 ulp_region(RX_ISCSI); 6313 ulp_region(RX_TDDP); 6314 ulp_region(TX_TPT); 6315 ulp_region(RX_STAG); 6316 ulp_region(RX_RQ); 6317 ulp_region(RX_RQUDP); 6318 ulp_region(RX_PBL); 6319 ulp_region(TX_PBL); 6320#undef ulp_region 6321 6322 md->base = 0; 6323 md->idx = nitems(region); 6324 if (!is_t4(sc)) { 6325 uint32_t size = 0; 6326 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6327 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6328 6329 if (is_t5(sc)) { 6330 if (sge_ctrl & F_VFIFO_ENABLE) 6331 size = G_DBVFIFO_SIZE(fifo_size); 6332 } else 6333 size = G_T6_DBVFIFO_SIZE(fifo_size); 6334 6335 if (size) { 6336 md->base = G_BASEADDR(t4_read_reg(sc, 6337 A_SGE_DBVFIFO_BADDR)); 6338 md->limit = md->base + (size << 2) - 1; 6339 } 6340 } 6341 md++; 6342 6343 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6344 md->limit = 0; 6345 md++; 6346 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6347 md->limit = 0; 6348 md++; 6349 6350 md->base = sc->vres.ocq.start; 6351 if (sc->vres.ocq.size) 6352 md->limit = md->base + sc->vres.ocq.size - 1; 6353 else 6354 md->idx = nitems(region); /* hide it */ 6355 md++; 6356 6357 /* add any address-space holes, there can be up to 3 */ 6358 for (n = 0; n < i - 1; n++) 6359 if (avail[n].limit < avail[n + 1].base) 6360 (md++)->base = avail[n].limit; 6361 if (avail[n].limit) 6362 (md++)->base = avail[n].limit; 6363 6364 n = md - mem; 6365 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6366 6367 for (lo = 0; lo < i; lo++) 6368 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6369 avail[lo].limit - 1); 6370 6371 sbuf_printf(sb, "\n"); 6372 for (i = 0; i < n; i++) { 6373 if (mem[i].idx >= nitems(region)) 6374 continue; /* skip holes */ 6375 if (!mem[i].limit) 6376 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6377 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6378 mem[i].limit); 6379 } 6380 6381 sbuf_printf(sb, "\n"); 6382 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6383 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6384 mem_region_show(sb, "uP RAM:", lo, hi); 6385 6386 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6387 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6388 mem_region_show(sb, "uP Extmem2:", lo, hi); 6389 6390 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6391 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6392 G_PMRXMAXPAGE(lo), 6393 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6394 (lo & F_PMRXNUMCHN) ? 2 : 1); 6395 6396 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6397 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6398 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6399 G_PMTXMAXPAGE(lo), 6400 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6401 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6402 sbuf_printf(sb, "%u p-structs\n", 6403 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6404 6405 for (i = 0; i < 4; i++) { 6406 if (chip_id(sc) > CHELSIO_T5) 6407 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6408 else 6409 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6410 if (is_t5(sc)) { 6411 used = G_T5_USED(lo); 6412 alloc = G_T5_ALLOC(lo); 6413 } else { 6414 used = G_USED(lo); 6415 alloc = G_ALLOC(lo); 6416 } 6417 /* For T6 these are MAC buffer groups */ 6418 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6419 i, used, alloc); 6420 } 6421 for (i = 0; i < sc->chip_params->nchan; i++) { 6422 if (chip_id(sc) > CHELSIO_T5) 6423 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6424 else 6425 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6426 if (is_t5(sc)) { 6427 used = G_T5_USED(lo); 6428 alloc = G_T5_ALLOC(lo); 6429 } else { 6430 used = G_USED(lo); 6431 alloc = G_ALLOC(lo); 6432 } 6433 /* For T6 these are MAC buffer groups */ 6434 sbuf_printf(sb, 6435 "\nLoopback %d using %u pages out of %u allocated", 6436 i, used, alloc); 6437 } 6438 6439 rc = sbuf_finish(sb); 6440 sbuf_delete(sb); 6441 6442 return (rc); 6443} 6444 6445static inline void 6446tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6447{ 6448 *mask = x | y; 6449 y = htobe64(y); 6450 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6451} 6452 6453static int 6454sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6455{ 6456 struct adapter *sc = arg1; 6457 struct sbuf *sb; 6458 int rc, i; 6459 6460 MPASS(chip_id(sc) <= CHELSIO_T5); 6461 6462 rc = sysctl_wire_old_buffer(req, 0); 6463 if (rc != 0) 6464 return (rc); 6465 6466 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6467 if (sb == NULL) 6468 return (ENOMEM); 6469 6470 sbuf_printf(sb, 6471 "Idx Ethernet address Mask Vld Ports PF" 6472 " VF Replication P0 P1 P2 P3 ML"); 6473 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6474 uint64_t tcamx, tcamy, mask; 6475 uint32_t cls_lo, cls_hi; 6476 uint8_t addr[ETHER_ADDR_LEN]; 6477 6478 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6479 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6480 if (tcamx & tcamy) 6481 continue; 6482 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6483 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6484 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6485 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6486 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6487 addr[3], addr[4], addr[5], (uintmax_t)mask, 6488 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6489 G_PORTMAP(cls_hi), G_PF(cls_lo), 6490 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6491 6492 if (cls_lo & F_REPLICATE) { 6493 struct fw_ldst_cmd ldst_cmd; 6494 6495 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6496 ldst_cmd.op_to_addrspace = 6497 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6498 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6499 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6500 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6501 ldst_cmd.u.mps.rplc.fid_idx = 6502 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6503 V_FW_LDST_CMD_IDX(i)); 6504 6505 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6506 "t4mps"); 6507 if (rc) 6508 break; 6509 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6510 sizeof(ldst_cmd), &ldst_cmd); 6511 end_synchronized_op(sc, 0); 6512 6513 if (rc != 0) { 6514 sbuf_printf(sb, "%36d", rc); 6515 rc = 0; 6516 } else { 6517 sbuf_printf(sb, " %08x %08x %08x %08x", 6518 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6519 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6520 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6521 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6522 } 6523 } else 6524 sbuf_printf(sb, "%36s", ""); 6525 6526 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6527 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6528 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6529 } 6530 6531 if (rc) 6532 (void) sbuf_finish(sb); 6533 else 6534 rc = sbuf_finish(sb); 6535 sbuf_delete(sb); 6536 6537 return (rc); 6538} 6539 6540static int 6541sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6542{ 6543 struct adapter *sc = arg1; 6544 struct sbuf *sb; 6545 int rc, i; 6546 6547 MPASS(chip_id(sc) > CHELSIO_T5); 6548 6549 rc = sysctl_wire_old_buffer(req, 0); 6550 if (rc != 0) 6551 return (rc); 6552 6553 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6554 if (sb == NULL) 6555 return (ENOMEM); 6556 6557 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6558 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6559 " Replication" 6560 " P0 P1 P2 P3 ML\n"); 6561 6562 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6563 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6564 uint16_t ivlan; 6565 uint64_t tcamx, tcamy, val, mask; 6566 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6567 uint8_t addr[ETHER_ADDR_LEN]; 6568 6569 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6570 if (i < 256) 6571 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6572 else 6573 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6574 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6575 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6576 tcamy = G_DMACH(val) << 32; 6577 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6578 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6579 lookup_type = G_DATALKPTYPE(data2); 6580 port_num = G_DATAPORTNUM(data2); 6581 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6582 /* Inner header VNI */ 6583 vniy = ((data2 & F_DATAVIDH2) << 23) | 6584 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6585 dip_hit = data2 & F_DATADIPHIT; 6586 vlan_vld = 0; 6587 } else { 6588 vniy = 0; 6589 dip_hit = 0; 6590 vlan_vld = data2 & F_DATAVIDH2; 6591 ivlan = G_VIDL(val); 6592 } 6593 6594 ctl |= V_CTLXYBITSEL(1); 6595 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6596 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6597 tcamx = G_DMACH(val) << 32; 6598 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6599 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6600 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6601 /* Inner header VNI mask */ 6602 vnix = ((data2 & F_DATAVIDH2) << 23) | 6603 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6604 } else 6605 vnix = 0; 6606 6607 if (tcamx & tcamy) 6608 continue; 6609 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6610 6611 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6612 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6613 6614 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6615 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6616 "%012jx %06x %06x - - %3c" 6617 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6618 addr[1], addr[2], addr[3], addr[4], addr[5], 6619 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6620 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6621 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6622 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6623 } else { 6624 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6625 "%012jx - - ", i, addr[0], addr[1], 6626 addr[2], addr[3], addr[4], addr[5], 6627 (uintmax_t)mask); 6628 6629 if (vlan_vld) 6630 sbuf_printf(sb, "%4u Y ", ivlan); 6631 else 6632 sbuf_printf(sb, " - N "); 6633 6634 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6635 lookup_type ? 'I' : 'O', port_num, 6636 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6637 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6638 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6639 } 6640 6641 6642 if (cls_lo & F_T6_REPLICATE) { 6643 struct fw_ldst_cmd ldst_cmd; 6644 6645 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6646 ldst_cmd.op_to_addrspace = 6647 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6648 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6649 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6650 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6651 ldst_cmd.u.mps.rplc.fid_idx = 6652 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6653 V_FW_LDST_CMD_IDX(i)); 6654 6655 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6656 "t6mps"); 6657 if (rc) 6658 break; 6659 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6660 sizeof(ldst_cmd), &ldst_cmd); 6661 end_synchronized_op(sc, 0); 6662 6663 if (rc != 0) { 6664 sbuf_printf(sb, "%72d", rc); 6665 rc = 0; 6666 } else { 6667 sbuf_printf(sb, " %08x %08x %08x %08x" 6668 " %08x %08x %08x %08x", 6669 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 6670 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 6671 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 6672 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 6673 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6674 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6675 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6676 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6677 } 6678 } else 6679 sbuf_printf(sb, "%72s", ""); 6680 6681 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 6682 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 6683 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 6684 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 6685 } 6686 6687 if (rc) 6688 (void) sbuf_finish(sb); 6689 else 6690 rc = sbuf_finish(sb); 6691 sbuf_delete(sb); 6692 6693 return (rc); 6694} 6695 6696static int 6697sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 6698{ 6699 struct adapter *sc = arg1; 6700 struct sbuf *sb; 6701 int rc; 6702 uint16_t mtus[NMTUS]; 6703 6704 rc = sysctl_wire_old_buffer(req, 0); 6705 if (rc != 0) 6706 return (rc); 6707 6708 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6709 if (sb == NULL) 6710 return (ENOMEM); 6711 6712 t4_read_mtu_tbl(sc, mtus, NULL); 6713 6714 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 6715 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 6716 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 6717 mtus[14], mtus[15]); 6718 6719 rc = sbuf_finish(sb); 6720 sbuf_delete(sb); 6721 6722 return (rc); 6723} 6724 6725static int 6726sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 6727{ 6728 struct adapter *sc = arg1; 6729 struct sbuf *sb; 6730 int rc, i; 6731 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 6732 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 6733 static const char *tx_stats[MAX_PM_NSTATS] = { 6734 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 6735 "Tx FIFO wait", NULL, "Tx latency" 6736 }; 6737 static const char *rx_stats[MAX_PM_NSTATS] = { 6738 "Read:", "Write bypass:", "Write mem:", "Flush:", 6739 "Rx FIFO wait", NULL, "Rx latency" 6740 }; 6741 6742 rc = sysctl_wire_old_buffer(req, 0); 6743 if (rc != 0) 6744 return (rc); 6745 6746 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6747 if (sb == NULL) 6748 return (ENOMEM); 6749 6750 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 6751 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 6752 6753 sbuf_printf(sb, " Tx pcmds Tx bytes"); 6754 for (i = 0; i < 4; i++) { 6755 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6756 tx_cyc[i]); 6757 } 6758 6759 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 6760 for (i = 0; i < 4; i++) { 6761 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6762 rx_cyc[i]); 6763 } 6764 6765 if (chip_id(sc) > CHELSIO_T5) { 6766 sbuf_printf(sb, 6767 "\n Total wait Total occupancy"); 6768 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6769 tx_cyc[i]); 6770 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6771 rx_cyc[i]); 6772 6773 i += 2; 6774 MPASS(i < nitems(tx_stats)); 6775 6776 sbuf_printf(sb, 6777 "\n Reads Total wait"); 6778 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 6779 tx_cyc[i]); 6780 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 6781 rx_cyc[i]); 6782 } 6783 6784 rc = sbuf_finish(sb); 6785 sbuf_delete(sb); 6786 6787 return (rc); 6788} 6789 6790static int 6791sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 6792{ 6793 struct adapter *sc = arg1; 6794 struct sbuf *sb; 6795 int rc; 6796 struct tp_rdma_stats stats; 6797 6798 rc = sysctl_wire_old_buffer(req, 0); 6799 if (rc != 0) 6800 return (rc); 6801 6802 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6803 if (sb == NULL) 6804 return (ENOMEM); 6805 6806 mtx_lock(&sc->reg_lock); 6807 t4_tp_get_rdma_stats(sc, &stats); 6808 mtx_unlock(&sc->reg_lock); 6809 6810 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 6811 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 6812 6813 rc = sbuf_finish(sb); 6814 sbuf_delete(sb); 6815 6816 return (rc); 6817} 6818 6819static int 6820sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 6821{ 6822 struct adapter *sc = arg1; 6823 struct sbuf *sb; 6824 int rc; 6825 struct tp_tcp_stats v4, v6; 6826 6827 rc = sysctl_wire_old_buffer(req, 0); 6828 if (rc != 0) 6829 return (rc); 6830 6831 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6832 if (sb == NULL) 6833 return (ENOMEM); 6834 6835 mtx_lock(&sc->reg_lock); 6836 t4_tp_get_tcp_stats(sc, &v4, &v6); 6837 mtx_unlock(&sc->reg_lock); 6838 6839 sbuf_printf(sb, 6840 " IP IPv6\n"); 6841 sbuf_printf(sb, "OutRsts: %20u %20u\n", 6842 v4.tcp_out_rsts, v6.tcp_out_rsts); 6843 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 6844 v4.tcp_in_segs, v6.tcp_in_segs); 6845 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 6846 v4.tcp_out_segs, v6.tcp_out_segs); 6847 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 6848 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 6849 6850 rc = sbuf_finish(sb); 6851 sbuf_delete(sb); 6852 6853 return (rc); 6854} 6855 6856static int 6857sysctl_tids(SYSCTL_HANDLER_ARGS) 6858{ 6859 struct adapter *sc = arg1; 6860 struct sbuf *sb; 6861 int rc; 6862 struct tid_info *t = &sc->tids; 6863 6864 rc = sysctl_wire_old_buffer(req, 0); 6865 if (rc != 0) 6866 return (rc); 6867 6868 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6869 if (sb == NULL) 6870 return (ENOMEM); 6871 6872 if (t->natids) { 6873 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 6874 t->atids_in_use); 6875 } 6876 6877 if (t->ntids) { 6878 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6879 uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 6880 6881 if (b) { 6882 sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1, 6883 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6884 t->ntids - 1); 6885 } else { 6886 sbuf_printf(sb, "TID range: %u-%u", 6887 t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4, 6888 t->ntids - 1); 6889 } 6890 } else 6891 sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1); 6892 sbuf_printf(sb, ", in use: %u\n", 6893 atomic_load_acq_int(&t->tids_in_use)); 6894 } 6895 6896 if (t->nstids) { 6897 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 6898 t->stid_base + t->nstids - 1, t->stids_in_use); 6899 } 6900 6901 if (t->nftids) { 6902 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 6903 t->ftid_base + t->nftids - 1); 6904 } 6905 6906 if (t->netids) { 6907 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 6908 t->etid_base + t->netids - 1); 6909 } 6910 6911 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 6912 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 6913 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 6914 6915 rc = sbuf_finish(sb); 6916 sbuf_delete(sb); 6917 6918 return (rc); 6919} 6920 6921static int 6922sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 6923{ 6924 struct adapter *sc = arg1; 6925 struct sbuf *sb; 6926 int rc; 6927 struct tp_err_stats stats; 6928 6929 rc = sysctl_wire_old_buffer(req, 0); 6930 if (rc != 0) 6931 return (rc); 6932 6933 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6934 if (sb == NULL) 6935 return (ENOMEM); 6936 6937 mtx_lock(&sc->reg_lock); 6938 t4_tp_get_err_stats(sc, &stats); 6939 mtx_unlock(&sc->reg_lock); 6940 6941 if (sc->chip_params->nchan > 2) { 6942 sbuf_printf(sb, " channel 0 channel 1" 6943 " channel 2 channel 3\n"); 6944 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 6945 stats.mac_in_errs[0], stats.mac_in_errs[1], 6946 stats.mac_in_errs[2], stats.mac_in_errs[3]); 6947 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 6948 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 6949 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 6950 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 6951 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 6952 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 6953 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 6954 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 6955 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 6956 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 6957 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 6958 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 6959 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 6960 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 6961 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 6962 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 6963 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 6964 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 6965 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 6966 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 6967 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 6968 } else { 6969 sbuf_printf(sb, " channel 0 channel 1\n"); 6970 sbuf_printf(sb, "macInErrs: %10u %10u\n", 6971 stats.mac_in_errs[0], stats.mac_in_errs[1]); 6972 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 6973 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 6974 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 6975 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 6976 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 6977 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 6978 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 6979 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 6980 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 6981 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 6982 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 6983 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 6984 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 6985 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 6986 } 6987 6988 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 6989 stats.ofld_no_neigh, stats.ofld_cong_defer); 6990 6991 rc = sbuf_finish(sb); 6992 sbuf_delete(sb); 6993 6994 return (rc); 6995} 6996 6997static int 6998sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 6999{ 7000 struct adapter *sc = arg1; 7001 struct tp_params *tpp = &sc->params.tp; 7002 u_int mask; 7003 int rc; 7004 7005 mask = tpp->la_mask >> 16; 7006 rc = sysctl_handle_int(oidp, &mask, 0, req); 7007 if (rc != 0 || req->newptr == NULL) 7008 return (rc); 7009 if (mask > 0xffff) 7010 return (EINVAL); 7011 tpp->la_mask = mask << 16; 7012 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7013 7014 return (0); 7015} 7016 7017struct field_desc { 7018 const char *name; 7019 u_int start; 7020 u_int width; 7021}; 7022 7023static void 7024field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7025{ 7026 char buf[32]; 7027 int line_size = 0; 7028 7029 while (f->name) { 7030 uint64_t mask = (1ULL << f->width) - 1; 7031 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7032 ((uintmax_t)v >> f->start) & mask); 7033 7034 if (line_size + len >= 79) { 7035 line_size = 8; 7036 sbuf_printf(sb, "\n "); 7037 } 7038 sbuf_printf(sb, "%s ", buf); 7039 line_size += len + 1; 7040 f++; 7041 } 7042 sbuf_printf(sb, "\n"); 7043} 7044 7045static const struct field_desc tp_la0[] = { 7046 { "RcfOpCodeOut", 60, 4 }, 7047 { "State", 56, 4 }, 7048 { "WcfState", 52, 4 }, 7049 { "RcfOpcSrcOut", 50, 2 }, 7050 { "CRxError", 49, 1 }, 7051 { "ERxError", 48, 1 }, 7052 { "SanityFailed", 47, 1 }, 7053 { "SpuriousMsg", 46, 1 }, 7054 { "FlushInputMsg", 45, 1 }, 7055 { "FlushInputCpl", 44, 1 }, 7056 { "RssUpBit", 43, 1 }, 7057 { "RssFilterHit", 42, 1 }, 7058 { "Tid", 32, 10 }, 7059 { "InitTcb", 31, 1 }, 7060 { "LineNumber", 24, 7 }, 7061 { "Emsg", 23, 1 }, 7062 { "EdataOut", 22, 1 }, 7063 { "Cmsg", 21, 1 }, 7064 { "CdataOut", 20, 1 }, 7065 { "EreadPdu", 19, 1 }, 7066 { "CreadPdu", 18, 1 }, 7067 { "TunnelPkt", 17, 1 }, 7068 { "RcfPeerFin", 16, 1 }, 7069 { "RcfReasonOut", 12, 4 }, 7070 { "TxCchannel", 10, 2 }, 7071 { "RcfTxChannel", 8, 2 }, 7072 { "RxEchannel", 6, 2 }, 7073 { "RcfRxChannel", 5, 1 }, 7074 { "RcfDataOutSrdy", 4, 1 }, 7075 { "RxDvld", 3, 1 }, 7076 { "RxOoDvld", 2, 1 }, 7077 { "RxCongestion", 1, 1 }, 7078 { "TxCongestion", 0, 1 }, 7079 { NULL } 7080}; 7081 7082static const struct field_desc tp_la1[] = { 7083 { "CplCmdIn", 56, 8 }, 7084 { "CplCmdOut", 48, 8 }, 7085 { "ESynOut", 47, 1 }, 7086 { "EAckOut", 46, 1 }, 7087 { "EFinOut", 45, 1 }, 7088 { "ERstOut", 44, 1 }, 7089 { "SynIn", 43, 1 }, 7090 { "AckIn", 42, 1 }, 7091 { "FinIn", 41, 1 }, 7092 { "RstIn", 40, 1 }, 7093 { "DataIn", 39, 1 }, 7094 { "DataInVld", 38, 1 }, 7095 { "PadIn", 37, 1 }, 7096 { "RxBufEmpty", 36, 1 }, 7097 { "RxDdp", 35, 1 }, 7098 { "RxFbCongestion", 34, 1 }, 7099 { "TxFbCongestion", 33, 1 }, 7100 { "TxPktSumSrdy", 32, 1 }, 7101 { "RcfUlpType", 28, 4 }, 7102 { "Eread", 27, 1 }, 7103 { "Ebypass", 26, 1 }, 7104 { "Esave", 25, 1 }, 7105 { "Static0", 24, 1 }, 7106 { "Cread", 23, 1 }, 7107 { "Cbypass", 22, 1 }, 7108 { "Csave", 21, 1 }, 7109 { "CPktOut", 20, 1 }, 7110 { "RxPagePoolFull", 18, 2 }, 7111 { "RxLpbkPkt", 17, 1 }, 7112 { "TxLpbkPkt", 16, 1 }, 7113 { "RxVfValid", 15, 1 }, 7114 { "SynLearned", 14, 1 }, 7115 { "SetDelEntry", 13, 1 }, 7116 { "SetInvEntry", 12, 1 }, 7117 { "CpcmdDvld", 11, 1 }, 7118 { "CpcmdSave", 10, 1 }, 7119 { "RxPstructsFull", 8, 2 }, 7120 { "EpcmdDvld", 7, 1 }, 7121 { "EpcmdFlush", 6, 1 }, 7122 { "EpcmdTrimPrefix", 5, 1 }, 7123 { "EpcmdTrimPostfix", 4, 1 }, 7124 { "ERssIp4Pkt", 3, 1 }, 7125 { "ERssIp6Pkt", 2, 1 }, 7126 { "ERssTcpUdpPkt", 1, 1 }, 7127 { "ERssFceFipPkt", 0, 1 }, 7128 { NULL } 7129}; 7130 7131static const struct field_desc tp_la2[] = { 7132 { "CplCmdIn", 56, 8 }, 7133 { "MpsVfVld", 55, 1 }, 7134 { "MpsPf", 52, 3 }, 7135 { "MpsVf", 44, 8 }, 7136 { "SynIn", 43, 1 }, 7137 { "AckIn", 42, 1 }, 7138 { "FinIn", 41, 1 }, 7139 { "RstIn", 40, 1 }, 7140 { "DataIn", 39, 1 }, 7141 { "DataInVld", 38, 1 }, 7142 { "PadIn", 37, 1 }, 7143 { "RxBufEmpty", 36, 1 }, 7144 { "RxDdp", 35, 1 }, 7145 { "RxFbCongestion", 34, 1 }, 7146 { "TxFbCongestion", 33, 1 }, 7147 { "TxPktSumSrdy", 32, 1 }, 7148 { "RcfUlpType", 28, 4 }, 7149 { "Eread", 27, 1 }, 7150 { "Ebypass", 26, 1 }, 7151 { "Esave", 25, 1 }, 7152 { "Static0", 24, 1 }, 7153 { "Cread", 23, 1 }, 7154 { "Cbypass", 22, 1 }, 7155 { "Csave", 21, 1 }, 7156 { "CPktOut", 20, 1 }, 7157 { "RxPagePoolFull", 18, 2 }, 7158 { "RxLpbkPkt", 17, 1 }, 7159 { "TxLpbkPkt", 16, 1 }, 7160 { "RxVfValid", 15, 1 }, 7161 { "SynLearned", 14, 1 }, 7162 { "SetDelEntry", 13, 1 }, 7163 { "SetInvEntry", 12, 1 }, 7164 { "CpcmdDvld", 11, 1 }, 7165 { "CpcmdSave", 10, 1 }, 7166 { "RxPstructsFull", 8, 2 }, 7167 { "EpcmdDvld", 7, 1 }, 7168 { "EpcmdFlush", 6, 1 }, 7169 { "EpcmdTrimPrefix", 5, 1 }, 7170 { "EpcmdTrimPostfix", 4, 1 }, 7171 { "ERssIp4Pkt", 3, 1 }, 7172 { "ERssIp6Pkt", 2, 1 }, 7173 { "ERssTcpUdpPkt", 1, 1 }, 7174 { "ERssFceFipPkt", 0, 1 }, 7175 { NULL } 7176}; 7177 7178static void 7179tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7180{ 7181 7182 field_desc_show(sb, *p, tp_la0); 7183} 7184 7185static void 7186tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7187{ 7188 7189 if (idx) 7190 sbuf_printf(sb, "\n"); 7191 field_desc_show(sb, p[0], tp_la0); 7192 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7193 field_desc_show(sb, p[1], tp_la0); 7194} 7195 7196static void 7197tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7198{ 7199 7200 if (idx) 7201 sbuf_printf(sb, "\n"); 7202 field_desc_show(sb, p[0], tp_la0); 7203 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7204 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7205} 7206 7207static int 7208sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7209{ 7210 struct adapter *sc = arg1; 7211 struct sbuf *sb; 7212 uint64_t *buf, *p; 7213 int rc; 7214 u_int i, inc; 7215 void (*show_func)(struct sbuf *, uint64_t *, int); 7216 7217 rc = sysctl_wire_old_buffer(req, 0); 7218 if (rc != 0) 7219 return (rc); 7220 7221 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7222 if (sb == NULL) 7223 return (ENOMEM); 7224 7225 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7226 7227 t4_tp_read_la(sc, buf, NULL); 7228 p = buf; 7229 7230 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7231 case 2: 7232 inc = 2; 7233 show_func = tp_la_show2; 7234 break; 7235 case 3: 7236 inc = 2; 7237 show_func = tp_la_show3; 7238 break; 7239 default: 7240 inc = 1; 7241 show_func = tp_la_show; 7242 } 7243 7244 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7245 (*show_func)(sb, p, i); 7246 7247 rc = sbuf_finish(sb); 7248 sbuf_delete(sb); 7249 free(buf, M_CXGBE); 7250 return (rc); 7251} 7252 7253static int 7254sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7255{ 7256 struct adapter *sc = arg1; 7257 struct sbuf *sb; 7258 int rc; 7259 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7260 7261 rc = sysctl_wire_old_buffer(req, 0); 7262 if (rc != 0) 7263 return (rc); 7264 7265 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7266 if (sb == NULL) 7267 return (ENOMEM); 7268 7269 t4_get_chan_txrate(sc, nrate, orate); 7270 7271 if (sc->chip_params->nchan > 2) { 7272 sbuf_printf(sb, " channel 0 channel 1" 7273 " channel 2 channel 3\n"); 7274 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7275 nrate[0], nrate[1], nrate[2], nrate[3]); 7276 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7277 orate[0], orate[1], orate[2], orate[3]); 7278 } else { 7279 sbuf_printf(sb, " channel 0 channel 1\n"); 7280 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7281 nrate[0], nrate[1]); 7282 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7283 orate[0], orate[1]); 7284 } 7285 7286 rc = sbuf_finish(sb); 7287 sbuf_delete(sb); 7288 7289 return (rc); 7290} 7291 7292static int 7293sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7294{ 7295 struct adapter *sc = arg1; 7296 struct sbuf *sb; 7297 uint32_t *buf, *p; 7298 int rc, i; 7299 7300 rc = sysctl_wire_old_buffer(req, 0); 7301 if (rc != 0) 7302 return (rc); 7303 7304 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7305 if (sb == NULL) 7306 return (ENOMEM); 7307 7308 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7309 M_ZERO | M_WAITOK); 7310 7311 t4_ulprx_read_la(sc, buf); 7312 p = buf; 7313 7314 sbuf_printf(sb, " Pcmd Type Message" 7315 " Data"); 7316 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7317 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7318 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7319 } 7320 7321 rc = sbuf_finish(sb); 7322 sbuf_delete(sb); 7323 free(buf, M_CXGBE); 7324 return (rc); 7325} 7326 7327static int 7328sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7329{ 7330 struct adapter *sc = arg1; 7331 struct sbuf *sb; 7332 int rc, v; 7333 7334 rc = sysctl_wire_old_buffer(req, 0); 7335 if (rc != 0) 7336 return (rc); 7337 7338 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7339 if (sb == NULL) 7340 return (ENOMEM); 7341 7342 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7343 if (G_STATSOURCE_T5(v) == 7) { 7344 if (G_STATMODE(v) == 0) { 7345 sbuf_printf(sb, "total %d, incomplete %d", 7346 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7347 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7348 } else if (G_STATMODE(v) == 1) { 7349 sbuf_printf(sb, "total %d, data overflow %d", 7350 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7351 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7352 } 7353 } 7354 rc = sbuf_finish(sb); 7355 sbuf_delete(sb); 7356 7357 return (rc); 7358} 7359 7360static int 7361sysctl_tc_params(SYSCTL_HANDLER_ARGS) 7362{ 7363 struct adapter *sc = arg1; 7364 struct tx_sched_class *tc; 7365 struct t4_sched_class_params p; 7366 struct sbuf *sb; 7367 int i, rc, port_id, flags, mbps, gbps; 7368 7369 rc = sysctl_wire_old_buffer(req, 0); 7370 if (rc != 0) 7371 return (rc); 7372 7373 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7374 if (sb == NULL) 7375 return (ENOMEM); 7376 7377 port_id = arg2 >> 16; 7378 MPASS(port_id < sc->params.nports); 7379 MPASS(sc->port[port_id] != NULL); 7380 i = arg2 & 0xffff; 7381 MPASS(i < sc->chip_params->nsched_cls); 7382 tc = &sc->port[port_id]->tc[i]; 7383 7384 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7385 "t4tc_p"); 7386 if (rc) 7387 goto done; 7388 flags = tc->flags; 7389 p = tc->params; 7390 end_synchronized_op(sc, LOCK_HELD); 7391 7392 if ((flags & TX_SC_OK) == 0) { 7393 sbuf_printf(sb, "none"); 7394 goto done; 7395 } 7396 7397 if (p.level == SCHED_CLASS_LEVEL_CL_WRR) { 7398 sbuf_printf(sb, "cl-wrr weight %u", p.weight); 7399 goto done; 7400 } else if (p.level == SCHED_CLASS_LEVEL_CL_RL) 7401 sbuf_printf(sb, "cl-rl"); 7402 else if (p.level == SCHED_CLASS_LEVEL_CH_RL) 7403 sbuf_printf(sb, "ch-rl"); 7404 else { 7405 rc = ENXIO; 7406 goto done; 7407 } 7408 7409 if (p.ratemode == SCHED_CLASS_RATEMODE_REL) { 7410 /* XXX: top speed or actual link speed? */ 7411 gbps = port_top_speed(sc->port[port_id]); 7412 sbuf_printf(sb, " %u%% of %uGbps", p.maxrate, gbps); 7413 } 7414 else if (p.ratemode == SCHED_CLASS_RATEMODE_ABS) { 7415 switch (p.rateunit) { 7416 case SCHED_CLASS_RATEUNIT_BITS: 7417 mbps = p.maxrate / 1000; 7418 gbps = p.maxrate / 1000000; 7419 if (p.maxrate == gbps * 1000000) 7420 sbuf_printf(sb, " %uGbps", gbps); 7421 else if (p.maxrate == mbps * 1000) 7422 sbuf_printf(sb, " %uMbps", mbps); 7423 else 7424 sbuf_printf(sb, " %uKbps", p.maxrate); 7425 break; 7426 case SCHED_CLASS_RATEUNIT_PKTS: 7427 sbuf_printf(sb, " %upps", p.maxrate); 7428 break; 7429 default: 7430 rc = ENXIO; 7431 goto done; 7432 } 7433 } 7434 7435 switch (p.mode) { 7436 case SCHED_CLASS_MODE_CLASS: 7437 sbuf_printf(sb, " aggregate"); 7438 break; 7439 case SCHED_CLASS_MODE_FLOW: 7440 sbuf_printf(sb, " per-flow"); 7441 break; 7442 default: 7443 rc = ENXIO; 7444 goto done; 7445 } 7446 7447done: 7448 if (rc == 0) 7449 rc = sbuf_finish(sb); 7450 sbuf_delete(sb); 7451 7452 return (rc); 7453} 7454#endif 7455 7456#ifdef TCP_OFFLOAD 7457static void 7458unit_conv(char *buf, size_t len, u_int val, u_int factor) 7459{ 7460 u_int rem = val % factor; 7461 7462 if (rem == 0) 7463 snprintf(buf, len, "%u", val / factor); 7464 else { 7465 while (rem % 10 == 0) 7466 rem /= 10; 7467 snprintf(buf, len, "%u.%u", val / factor, rem); 7468 } 7469} 7470 7471static int 7472sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 7473{ 7474 struct adapter *sc = arg1; 7475 char buf[16]; 7476 u_int res, re; 7477 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7478 7479 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7480 switch (arg2) { 7481 case 0: 7482 /* timer_tick */ 7483 re = G_TIMERRESOLUTION(res); 7484 break; 7485 case 1: 7486 /* TCP timestamp tick */ 7487 re = G_TIMESTAMPRESOLUTION(res); 7488 break; 7489 case 2: 7490 /* DACK tick */ 7491 re = G_DELAYEDACKRESOLUTION(res); 7492 break; 7493 default: 7494 return (EDOOFUS); 7495 } 7496 7497 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 7498 7499 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 7500} 7501 7502static int 7503sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 7504{ 7505 struct adapter *sc = arg1; 7506 u_int res, dack_re, v; 7507 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7508 7509 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7510 dack_re = G_DELAYEDACKRESOLUTION(res); 7511 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 7512 7513 return (sysctl_handle_int(oidp, &v, 0, req)); 7514} 7515 7516static int 7517sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 7518{ 7519 struct adapter *sc = arg1; 7520 int reg = arg2; 7521 u_int tre; 7522 u_long tp_tick_us, v; 7523 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7524 7525 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 7526 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 7527 reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || 7528 reg == A_TP_FINWAIT2_TIMER); 7529 7530 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 7531 tp_tick_us = (cclk_ps << tre) / 1000000; 7532 7533 if (reg == A_TP_INIT_SRTT) 7534 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 7535 else 7536 v = tp_tick_us * t4_read_reg(sc, reg); 7537 7538 return (sysctl_handle_long(oidp, &v, 0, req)); 7539} 7540#endif 7541 7542static uint32_t 7543fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7544{ 7545 uint32_t mode; 7546 7547 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7548 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7549 7550 if (fconf & F_FRAGMENTATION) 7551 mode |= T4_FILTER_IP_FRAGMENT; 7552 7553 if (fconf & F_MPSHITTYPE) 7554 mode |= T4_FILTER_MPS_HIT_TYPE; 7555 7556 if (fconf & F_MACMATCH) 7557 mode |= T4_FILTER_MAC_IDX; 7558 7559 if (fconf & F_ETHERTYPE) 7560 mode |= T4_FILTER_ETH_TYPE; 7561 7562 if (fconf & F_PROTOCOL) 7563 mode |= T4_FILTER_IP_PROTO; 7564 7565 if (fconf & F_TOS) 7566 mode |= T4_FILTER_IP_TOS; 7567 7568 if (fconf & F_VLAN) 7569 mode |= T4_FILTER_VLAN; 7570 7571 if (fconf & F_VNIC_ID) { 7572 mode |= T4_FILTER_VNIC; 7573 if (iconf & F_VNIC) 7574 mode |= T4_FILTER_IC_VNIC; 7575 } 7576 7577 if (fconf & F_PORT) 7578 mode |= T4_FILTER_PORT; 7579 7580 if (fconf & F_FCOE) 7581 mode |= T4_FILTER_FCoE; 7582 7583 return (mode); 7584} 7585 7586static uint32_t 7587mode_to_fconf(uint32_t mode) 7588{ 7589 uint32_t fconf = 0; 7590 7591 if (mode & T4_FILTER_IP_FRAGMENT) 7592 fconf |= F_FRAGMENTATION; 7593 7594 if (mode & T4_FILTER_MPS_HIT_TYPE) 7595 fconf |= F_MPSHITTYPE; 7596 7597 if (mode & T4_FILTER_MAC_IDX) 7598 fconf |= F_MACMATCH; 7599 7600 if (mode & T4_FILTER_ETH_TYPE) 7601 fconf |= F_ETHERTYPE; 7602 7603 if (mode & T4_FILTER_IP_PROTO) 7604 fconf |= F_PROTOCOL; 7605 7606 if (mode & T4_FILTER_IP_TOS) 7607 fconf |= F_TOS; 7608 7609 if (mode & T4_FILTER_VLAN) 7610 fconf |= F_VLAN; 7611 7612 if (mode & T4_FILTER_VNIC) 7613 fconf |= F_VNIC_ID; 7614 7615 if (mode & T4_FILTER_PORT) 7616 fconf |= F_PORT; 7617 7618 if (mode & T4_FILTER_FCoE) 7619 fconf |= F_FCOE; 7620 7621 return (fconf); 7622} 7623 7624static uint32_t 7625mode_to_iconf(uint32_t mode) 7626{ 7627 7628 if (mode & T4_FILTER_IC_VNIC) 7629 return (F_VNIC); 7630 return (0); 7631} 7632 7633static int check_fspec_against_fconf_iconf(struct adapter *sc, 7634 struct t4_filter_specification *fs) 7635{ 7636 struct tp_params *tpp = &sc->params.tp; 7637 uint32_t fconf = 0; 7638 7639 if (fs->val.frag || fs->mask.frag) 7640 fconf |= F_FRAGMENTATION; 7641 7642 if (fs->val.matchtype || fs->mask.matchtype) 7643 fconf |= F_MPSHITTYPE; 7644 7645 if (fs->val.macidx || fs->mask.macidx) 7646 fconf |= F_MACMATCH; 7647 7648 if (fs->val.ethtype || fs->mask.ethtype) 7649 fconf |= F_ETHERTYPE; 7650 7651 if (fs->val.proto || fs->mask.proto) 7652 fconf |= F_PROTOCOL; 7653 7654 if (fs->val.tos || fs->mask.tos) 7655 fconf |= F_TOS; 7656 7657 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7658 fconf |= F_VLAN; 7659 7660 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 7661 fconf |= F_VNIC_ID; 7662 if (tpp->ingress_config & F_VNIC) 7663 return (EINVAL); 7664 } 7665 7666 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 7667 fconf |= F_VNIC_ID; 7668 if ((tpp->ingress_config & F_VNIC) == 0) 7669 return (EINVAL); 7670 } 7671 7672 if (fs->val.iport || fs->mask.iport) 7673 fconf |= F_PORT; 7674 7675 if (fs->val.fcoe || fs->mask.fcoe) 7676 fconf |= F_FCOE; 7677 7678 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 7679 return (E2BIG); 7680 7681 return (0); 7682} 7683 7684static int 7685get_filter_mode(struct adapter *sc, uint32_t *mode) 7686{ 7687 struct tp_params *tpp = &sc->params.tp; 7688 7689 /* 7690 * We trust the cached values of the relevant TP registers. This means 7691 * things work reliably only if writes to those registers are always via 7692 * t4_set_filter_mode. 7693 */ 7694 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 7695 7696 return (0); 7697} 7698 7699static int 7700set_filter_mode(struct adapter *sc, uint32_t mode) 7701{ 7702 struct tp_params *tpp = &sc->params.tp; 7703 uint32_t fconf, iconf; 7704 int rc; 7705 7706 iconf = mode_to_iconf(mode); 7707 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 7708 /* 7709 * For now we just complain if A_TP_INGRESS_CONFIG is not 7710 * already set to the correct value for the requested filter 7711 * mode. It's not clear if it's safe to write to this register 7712 * on the fly. (And we trust the cached value of the register). 7713 */ 7714 return (EBUSY); 7715 } 7716 7717 fconf = mode_to_fconf(mode); 7718 7719 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7720 "t4setfm"); 7721 if (rc) 7722 return (rc); 7723 7724 if (sc->tids.ftids_in_use > 0) { 7725 rc = EBUSY; 7726 goto done; 7727 } 7728 7729#ifdef TCP_OFFLOAD 7730 if (uld_active(sc, ULD_TOM)) { 7731 rc = EBUSY; 7732 goto done; 7733 } 7734#endif 7735 7736 rc = -t4_set_filter_mode(sc, fconf); 7737done: 7738 end_synchronized_op(sc, LOCK_HELD); 7739 return (rc); 7740} 7741 7742static inline uint64_t 7743get_filter_hits(struct adapter *sc, uint32_t fid) 7744{ 7745 uint32_t tcb_addr; 7746 7747 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 7748 (fid + sc->tids.ftid_base) * TCB_SIZE; 7749 7750 if (is_t4(sc)) { 7751 uint64_t hits; 7752 7753 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 7754 return (be64toh(hits)); 7755 } else { 7756 uint32_t hits; 7757 7758 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 7759 return (be32toh(hits)); 7760 } 7761} 7762 7763static int 7764get_filter(struct adapter *sc, struct t4_filter *t) 7765{ 7766 int i, rc, nfilters = sc->tids.nftids; 7767 struct filter_entry *f; 7768 7769 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7770 "t4getf"); 7771 if (rc) 7772 return (rc); 7773 7774 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 7775 t->idx >= nfilters) { 7776 t->idx = 0xffffffff; 7777 goto done; 7778 } 7779 7780 f = &sc->tids.ftid_tab[t->idx]; 7781 for (i = t->idx; i < nfilters; i++, f++) { 7782 if (f->valid) { 7783 t->idx = i; 7784 t->l2tidx = f->l2t ? f->l2t->idx : 0; 7785 t->smtidx = f->smtidx; 7786 if (f->fs.hitcnts) 7787 t->hits = get_filter_hits(sc, t->idx); 7788 else 7789 t->hits = UINT64_MAX; 7790 t->fs = f->fs; 7791 7792 goto done; 7793 } 7794 } 7795 7796 t->idx = 0xffffffff; 7797done: 7798 end_synchronized_op(sc, LOCK_HELD); 7799 return (0); 7800} 7801 7802static int 7803set_filter(struct adapter *sc, struct t4_filter *t) 7804{ 7805 unsigned int nfilters, nports; 7806 struct filter_entry *f; 7807 int i, rc; 7808 7809 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 7810 if (rc) 7811 return (rc); 7812 7813 nfilters = sc->tids.nftids; 7814 nports = sc->params.nports; 7815 7816 if (nfilters == 0) { 7817 rc = ENOTSUP; 7818 goto done; 7819 } 7820 7821 if (t->idx >= nfilters) { 7822 rc = EINVAL; 7823 goto done; 7824 } 7825 7826 /* Validate against the global filter mode and ingress config */ 7827 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 7828 if (rc != 0) 7829 goto done; 7830 7831 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 7832 rc = EINVAL; 7833 goto done; 7834 } 7835 7836 if (t->fs.val.iport >= nports) { 7837 rc = EINVAL; 7838 goto done; 7839 } 7840 7841 /* Can't specify an iq if not steering to it */ 7842 if (!t->fs.dirsteer && t->fs.iq) { 7843 rc = EINVAL; 7844 goto done; 7845 } 7846 7847 /* IPv6 filter idx must be 4 aligned */ 7848 if (t->fs.type == 1 && 7849 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 7850 rc = EINVAL; 7851 goto done; 7852 } 7853 7854 if (!(sc->flags & FULL_INIT_DONE) && 7855 ((rc = adapter_full_init(sc)) != 0)) 7856 goto done; 7857 7858 if (sc->tids.ftid_tab == NULL) { 7859 KASSERT(sc->tids.ftids_in_use == 0, 7860 ("%s: no memory allocated but filters_in_use > 0", 7861 __func__)); 7862 7863 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 7864 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 7865 if (sc->tids.ftid_tab == NULL) { 7866 rc = ENOMEM; 7867 goto done; 7868 } 7869 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 7870 } 7871 7872 for (i = 0; i < 4; i++) { 7873 f = &sc->tids.ftid_tab[t->idx + i]; 7874 7875 if (f->pending || f->valid) { 7876 rc = EBUSY; 7877 goto done; 7878 } 7879 if (f->locked) { 7880 rc = EPERM; 7881 goto done; 7882 } 7883 7884 if (t->fs.type == 0) 7885 break; 7886 } 7887 7888 f = &sc->tids.ftid_tab[t->idx]; 7889 f->fs = t->fs; 7890 7891 rc = set_filter_wr(sc, t->idx); 7892done: 7893 end_synchronized_op(sc, 0); 7894 7895 if (rc == 0) { 7896 mtx_lock(&sc->tids.ftid_lock); 7897 for (;;) { 7898 if (f->pending == 0) { 7899 rc = f->valid ? 0 : EIO; 7900 break; 7901 } 7902 7903 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7904 PCATCH, "t4setfw", 0)) { 7905 rc = EINPROGRESS; 7906 break; 7907 } 7908 } 7909 mtx_unlock(&sc->tids.ftid_lock); 7910 } 7911 return (rc); 7912} 7913 7914static int 7915del_filter(struct adapter *sc, struct t4_filter *t) 7916{ 7917 unsigned int nfilters; 7918 struct filter_entry *f; 7919 int rc; 7920 7921 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 7922 if (rc) 7923 return (rc); 7924 7925 nfilters = sc->tids.nftids; 7926 7927 if (nfilters == 0) { 7928 rc = ENOTSUP; 7929 goto done; 7930 } 7931 7932 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 7933 t->idx >= nfilters) { 7934 rc = EINVAL; 7935 goto done; 7936 } 7937 7938 if (!(sc->flags & FULL_INIT_DONE)) { 7939 rc = EAGAIN; 7940 goto done; 7941 } 7942 7943 f = &sc->tids.ftid_tab[t->idx]; 7944 7945 if (f->pending) { 7946 rc = EBUSY; 7947 goto done; 7948 } 7949 if (f->locked) { 7950 rc = EPERM; 7951 goto done; 7952 } 7953 7954 if (f->valid) { 7955 t->fs = f->fs; /* extra info for the caller */ 7956 rc = del_filter_wr(sc, t->idx); 7957 } 7958 7959done: 7960 end_synchronized_op(sc, 0); 7961 7962 if (rc == 0) { 7963 mtx_lock(&sc->tids.ftid_lock); 7964 for (;;) { 7965 if (f->pending == 0) { 7966 rc = f->valid ? EIO : 0; 7967 break; 7968 } 7969 7970 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 7971 PCATCH, "t4delfw", 0)) { 7972 rc = EINPROGRESS; 7973 break; 7974 } 7975 } 7976 mtx_unlock(&sc->tids.ftid_lock); 7977 } 7978 7979 return (rc); 7980} 7981 7982static void 7983clear_filter(struct filter_entry *f) 7984{ 7985 if (f->l2t) 7986 t4_l2t_release(f->l2t); 7987 7988 bzero(f, sizeof (*f)); 7989} 7990 7991static int 7992set_filter_wr(struct adapter *sc, int fidx) 7993{ 7994 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 7995 struct fw_filter_wr *fwr; 7996 unsigned int ftid, vnic_vld, vnic_vld_mask; 7997 struct wrq_cookie cookie; 7998 7999 ASSERT_SYNCHRONIZED_OP(sc); 8000 8001 if (f->fs.newdmac || f->fs.newvlan) { 8002 /* This filter needs an L2T entry; allocate one. */ 8003 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8004 if (f->l2t == NULL) 8005 return (EAGAIN); 8006 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8007 f->fs.dmac)) { 8008 t4_l2t_release(f->l2t); 8009 f->l2t = NULL; 8010 return (ENOMEM); 8011 } 8012 } 8013 8014 /* Already validated against fconf, iconf */ 8015 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8016 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8017 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8018 vnic_vld = 1; 8019 else 8020 vnic_vld = 0; 8021 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8022 vnic_vld_mask = 1; 8023 else 8024 vnic_vld_mask = 0; 8025 8026 ftid = sc->tids.ftid_base + fidx; 8027 8028 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8029 if (fwr == NULL) 8030 return (ENOMEM); 8031 bzero(fwr, sizeof(*fwr)); 8032 8033 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8034 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8035 fwr->tid_to_iq = 8036 htobe32(V_FW_FILTER_WR_TID(ftid) | 8037 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8038 V_FW_FILTER_WR_NOREPLY(0) | 8039 V_FW_FILTER_WR_IQ(f->fs.iq)); 8040 fwr->del_filter_to_l2tix = 8041 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8042 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8043 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8044 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8045 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8046 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8047 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8048 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8049 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8050 f->fs.newvlan == VLAN_REWRITE) | 8051 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8052 f->fs.newvlan == VLAN_REWRITE) | 8053 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8054 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8055 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8056 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8057 fwr->ethtype = htobe16(f->fs.val.ethtype); 8058 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8059 fwr->frag_to_ovlan_vldm = 8060 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8061 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8062 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8063 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8064 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8065 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8066 fwr->smac_sel = 0; 8067 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8068 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8069 fwr->maci_to_matchtypem = 8070 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8071 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8072 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8073 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8074 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8075 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8076 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8077 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8078 fwr->ptcl = f->fs.val.proto; 8079 fwr->ptclm = f->fs.mask.proto; 8080 fwr->ttyp = f->fs.val.tos; 8081 fwr->ttypm = f->fs.mask.tos; 8082 fwr->ivlan = htobe16(f->fs.val.vlan); 8083 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8084 fwr->ovlan = htobe16(f->fs.val.vnic); 8085 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8086 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8087 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8088 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8089 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8090 fwr->lp = htobe16(f->fs.val.dport); 8091 fwr->lpm = htobe16(f->fs.mask.dport); 8092 fwr->fp = htobe16(f->fs.val.sport); 8093 fwr->fpm = htobe16(f->fs.mask.sport); 8094 if (f->fs.newsmac) 8095 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8096 8097 f->pending = 1; 8098 sc->tids.ftids_in_use++; 8099 8100 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8101 return (0); 8102} 8103 8104static int 8105del_filter_wr(struct adapter *sc, int fidx) 8106{ 8107 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8108 struct fw_filter_wr *fwr; 8109 unsigned int ftid; 8110 struct wrq_cookie cookie; 8111 8112 ftid = sc->tids.ftid_base + fidx; 8113 8114 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8115 if (fwr == NULL) 8116 return (ENOMEM); 8117 bzero(fwr, sizeof (*fwr)); 8118 8119 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8120 8121 f->pending = 1; 8122 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8123 return (0); 8124} 8125 8126int 8127t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8128{ 8129 struct adapter *sc = iq->adapter; 8130 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8131 unsigned int idx = GET_TID(rpl); 8132 unsigned int rc; 8133 struct filter_entry *f; 8134 8135 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8136 rss->opcode)); 8137 MPASS(iq == &sc->sge.fwq); 8138 MPASS(is_ftid(sc, idx)); 8139 8140 idx -= sc->tids.ftid_base; 8141 f = &sc->tids.ftid_tab[idx]; 8142 rc = G_COOKIE(rpl->cookie); 8143 8144 mtx_lock(&sc->tids.ftid_lock); 8145 if (rc == FW_FILTER_WR_FLT_ADDED) { 8146 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8147 __func__, idx)); 8148 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8149 f->pending = 0; /* asynchronous setup completed */ 8150 f->valid = 1; 8151 } else { 8152 if (rc != FW_FILTER_WR_FLT_DELETED) { 8153 /* Add or delete failed, display an error */ 8154 log(LOG_ERR, 8155 "filter %u setup failed with error %u\n", 8156 idx, rc); 8157 } 8158 8159 clear_filter(f); 8160 sc->tids.ftids_in_use--; 8161 } 8162 wakeup(&sc->tids.ftid_tab); 8163 mtx_unlock(&sc->tids.ftid_lock); 8164 8165 return (0); 8166} 8167 8168static int 8169set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8170{ 8171 8172 MPASS(iq->set_tcb_rpl != NULL); 8173 return (iq->set_tcb_rpl(iq, rss, m)); 8174} 8175 8176static int 8177l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8178{ 8179 8180 MPASS(iq->l2t_write_rpl != NULL); 8181 return (iq->l2t_write_rpl(iq, rss, m)); 8182} 8183 8184static int 8185get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8186{ 8187 int rc; 8188 8189 if (cntxt->cid > M_CTXTQID) 8190 return (EINVAL); 8191 8192 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8193 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8194 return (EINVAL); 8195 8196 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8197 if (rc) 8198 return (rc); 8199 8200 if (sc->flags & FW_OK) { 8201 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8202 &cntxt->data[0]); 8203 if (rc == 0) 8204 goto done; 8205 } 8206 8207 /* 8208 * Read via firmware failed or wasn't even attempted. Read directly via 8209 * the backdoor. 8210 */ 8211 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8212done: 8213 end_synchronized_op(sc, 0); 8214 return (rc); 8215} 8216 8217static int 8218load_fw(struct adapter *sc, struct t4_data *fw) 8219{ 8220 int rc; 8221 uint8_t *fw_data; 8222 8223 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8224 if (rc) 8225 return (rc); 8226 8227 if (sc->flags & FULL_INIT_DONE) { 8228 rc = EBUSY; 8229 goto done; 8230 } 8231 8232 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8233 if (fw_data == NULL) { 8234 rc = ENOMEM; 8235 goto done; 8236 } 8237 8238 rc = copyin(fw->data, fw_data, fw->len); 8239 if (rc == 0) 8240 rc = -t4_load_fw(sc, fw_data, fw->len); 8241 8242 free(fw_data, M_CXGBE); 8243done: 8244 end_synchronized_op(sc, 0); 8245 return (rc); 8246} 8247 8248#define MAX_READ_BUF_SIZE (128 * 1024) 8249static int 8250read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8251{ 8252 uint32_t addr, remaining, n; 8253 uint32_t *buf; 8254 int rc; 8255 uint8_t *dst; 8256 8257 rc = validate_mem_range(sc, mr->addr, mr->len); 8258 if (rc != 0) 8259 return (rc); 8260 8261 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8262 addr = mr->addr; 8263 remaining = mr->len; 8264 dst = (void *)mr->data; 8265 8266 while (remaining) { 8267 n = min(remaining, MAX_READ_BUF_SIZE); 8268 read_via_memwin(sc, 2, addr, buf, n); 8269 8270 rc = copyout(buf, dst, n); 8271 if (rc != 0) 8272 break; 8273 8274 dst += n; 8275 remaining -= n; 8276 addr += n; 8277 } 8278 8279 free(buf, M_CXGBE); 8280 return (rc); 8281} 8282#undef MAX_READ_BUF_SIZE 8283 8284static int 8285read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8286{ 8287 int rc; 8288 8289 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8290 return (EINVAL); 8291 8292 if (i2cd->len > sizeof(i2cd->data)) 8293 return (EFBIG); 8294 8295 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8296 if (rc) 8297 return (rc); 8298 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8299 i2cd->offset, i2cd->len, &i2cd->data[0]); 8300 end_synchronized_op(sc, 0); 8301 8302 return (rc); 8303} 8304 8305static int 8306in_range(int val, int lo, int hi) 8307{ 8308 8309 return (val < 0 || (val <= hi && val >= lo)); 8310} 8311 8312static int 8313set_sched_class_config(struct adapter *sc, int minmax) 8314{ 8315 int rc; 8316 8317 if (minmax < 0) 8318 return (EINVAL); 8319 8320 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc"); 8321 if (rc) 8322 return (rc); 8323 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1); 8324 end_synchronized_op(sc, 0); 8325 8326 return (rc); 8327} 8328 8329static int 8330set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p, 8331 int sleep_ok) 8332{ 8333 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode; 8334 struct port_info *pi; 8335 struct tx_sched_class *tc; 8336 8337 if (p->level == SCHED_CLASS_LEVEL_CL_RL) 8338 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8339 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8340 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8341 else if (p->level == SCHED_CLASS_LEVEL_CH_RL) 8342 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8343 else 8344 return (EINVAL); 8345 8346 if (p->mode == SCHED_CLASS_MODE_CLASS) 8347 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8348 else if (p->mode == SCHED_CLASS_MODE_FLOW) 8349 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8350 else 8351 return (EINVAL); 8352 8353 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) 8354 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8355 else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8356 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8357 else 8358 return (EINVAL); 8359 8360 if (p->ratemode == SCHED_CLASS_RATEMODE_REL) 8361 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8362 else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) 8363 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8364 else 8365 return (EINVAL); 8366 8367 /* Vet our parameters ... */ 8368 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1)) 8369 return (ERANGE); 8370 8371 pi = sc->port[sc->chan_map[p->channel]]; 8372 if (pi == NULL) 8373 return (ENXIO); 8374 MPASS(pi->tx_chan == p->channel); 8375 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */ 8376 8377 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) || 8378 !in_range(p->minrate, 0, top_speed) || 8379 !in_range(p->maxrate, 0, top_speed) || 8380 !in_range(p->weight, 0, 100)) 8381 return (ERANGE); 8382 8383 /* 8384 * Translate any unset parameters into the firmware's 8385 * nomenclature and/or fail the call if the parameters 8386 * are required ... 8387 */ 8388 if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0) 8389 return (EINVAL); 8390 8391 if (p->minrate < 0) 8392 p->minrate = 0; 8393 if (p->maxrate < 0) { 8394 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8395 p->level == SCHED_CLASS_LEVEL_CH_RL) 8396 return (EINVAL); 8397 else 8398 p->maxrate = 0; 8399 } 8400 if (p->weight < 0) { 8401 if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8402 return (EINVAL); 8403 else 8404 p->weight = 0; 8405 } 8406 if (p->pktsize < 0) { 8407 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8408 p->level == SCHED_CLASS_LEVEL_CH_RL) 8409 return (EINVAL); 8410 else 8411 p->pktsize = 0; 8412 } 8413 8414 rc = begin_synchronized_op(sc, NULL, 8415 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp"); 8416 if (rc) 8417 return (rc); 8418 tc = &pi->tc[p->cl]; 8419 tc->params = *p; 8420 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode, 8421 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate, 8422 p->weight, p->pktsize, sleep_ok); 8423 if (rc == 0) 8424 tc->flags |= TX_SC_OK; 8425 else { 8426 /* 8427 * Unknown state at this point, see tc->params for what was 8428 * attempted. 8429 */ 8430 tc->flags &= ~TX_SC_OK; 8431 } 8432 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD); 8433 8434 return (rc); 8435} 8436 8437int 8438t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p) 8439{ 8440 8441 if (p->type != SCHED_CLASS_TYPE_PACKET) 8442 return (EINVAL); 8443 8444 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 8445 return (set_sched_class_config(sc, p->u.config.minmax)); 8446 8447 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 8448 return (set_sched_class_params(sc, &p->u.params, 1)); 8449 8450 return (EINVAL); 8451} 8452 8453int 8454t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8455{ 8456 struct port_info *pi = NULL; 8457 struct vi_info *vi; 8458 struct sge_txq *txq; 8459 uint32_t fw_mnem, fw_queue, fw_class; 8460 int i, rc; 8461 8462 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8463 if (rc) 8464 return (rc); 8465 8466 if (p->port >= sc->params.nports) { 8467 rc = EINVAL; 8468 goto done; 8469 } 8470 8471 /* XXX: Only supported for the main VI. */ 8472 pi = sc->port[p->port]; 8473 vi = &pi->vi[0]; 8474 if (!(vi->flags & VI_INIT_DONE)) { 8475 /* tx queues not set up yet */ 8476 rc = EAGAIN; 8477 goto done; 8478 } 8479 8480 if (!in_range(p->queue, 0, vi->ntxq - 1) || 8481 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) { 8482 rc = EINVAL; 8483 goto done; 8484 } 8485 8486 /* 8487 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8488 * Scheduling Class in this case). 8489 */ 8490 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8491 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8492 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8493 8494 /* 8495 * If op.queue is non-negative, then we're only changing the scheduling 8496 * on a single specified TX queue. 8497 */ 8498 if (p->queue >= 0) { 8499 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8500 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8501 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8502 &fw_class); 8503 goto done; 8504 } 8505 8506 /* 8507 * Change the scheduling on all the TX queues for the 8508 * interface. 8509 */ 8510 for_each_txq(vi, i, txq) { 8511 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8512 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8513 &fw_class); 8514 if (rc) 8515 goto done; 8516 } 8517 8518 rc = 0; 8519done: 8520 end_synchronized_op(sc, 0); 8521 return (rc); 8522} 8523 8524int 8525t4_os_find_pci_capability(struct adapter *sc, int cap) 8526{ 8527 int i; 8528 8529 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8530} 8531 8532int 8533t4_os_pci_save_state(struct adapter *sc) 8534{ 8535 device_t dev; 8536 struct pci_devinfo *dinfo; 8537 8538 dev = sc->dev; 8539 dinfo = device_get_ivars(dev); 8540 8541 pci_cfg_save(dev, dinfo, 0); 8542 return (0); 8543} 8544 8545int 8546t4_os_pci_restore_state(struct adapter *sc) 8547{ 8548 device_t dev; 8549 struct pci_devinfo *dinfo; 8550 8551 dev = sc->dev; 8552 dinfo = device_get_ivars(dev); 8553 8554 pci_cfg_restore(dev, dinfo); 8555 return (0); 8556} 8557 8558void 8559t4_os_portmod_changed(const struct adapter *sc, int idx) 8560{ 8561 struct port_info *pi = sc->port[idx]; 8562 struct vi_info *vi; 8563 struct ifnet *ifp; 8564 int v; 8565 static const char *mod_str[] = { 8566 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8567 }; 8568 8569 for_each_vi(pi, v, vi) { 8570 build_medialist(pi, &vi->media); 8571 } 8572 8573 ifp = pi->vi[0].ifp; 8574 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8575 if_printf(ifp, "transceiver unplugged.\n"); 8576 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8577 if_printf(ifp, "unknown transceiver inserted.\n"); 8578 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8579 if_printf(ifp, "unsupported transceiver inserted.\n"); 8580 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8581 if_printf(ifp, "%s transceiver inserted.\n", 8582 mod_str[pi->mod_type]); 8583 } else { 8584 if_printf(ifp, "transceiver (type %d) inserted.\n", 8585 pi->mod_type); 8586 } 8587} 8588 8589void 8590t4_os_link_changed(struct adapter *sc, int idx, int link_stat, int reason) 8591{ 8592 struct port_info *pi = sc->port[idx]; 8593 struct vi_info *vi; 8594 struct ifnet *ifp; 8595 int v; 8596 8597 if (link_stat) 8598 pi->linkdnrc = -1; 8599 else { 8600 if (reason >= 0) 8601 pi->linkdnrc = reason; 8602 } 8603 for_each_vi(pi, v, vi) { 8604 ifp = vi->ifp; 8605 if (ifp == NULL) 8606 continue; 8607 8608 if (link_stat) { 8609 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8610 if_link_state_change(ifp, LINK_STATE_UP); 8611 } else { 8612 if_link_state_change(ifp, LINK_STATE_DOWN); 8613 } 8614 } 8615} 8616 8617void 8618t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8619{ 8620 struct adapter *sc; 8621 8622 sx_slock(&t4_list_lock); 8623 SLIST_FOREACH(sc, &t4_list, link) { 8624 /* 8625 * func should not make any assumptions about what state sc is 8626 * in - the only guarantee is that sc->sc_lock is a valid lock. 8627 */ 8628 func(sc, arg); 8629 } 8630 sx_sunlock(&t4_list_lock); 8631} 8632 8633static int 8634t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8635 struct thread *td) 8636{ 8637 int rc; 8638 struct adapter *sc = dev->si_drv1; 8639 8640 rc = priv_check(td, PRIV_DRIVER); 8641 if (rc != 0) 8642 return (rc); 8643 8644 switch (cmd) { 8645 case CHELSIO_T4_GETREG: { 8646 struct t4_reg *edata = (struct t4_reg *)data; 8647 8648 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8649 return (EFAULT); 8650 8651 if (edata->size == 4) 8652 edata->val = t4_read_reg(sc, edata->addr); 8653 else if (edata->size == 8) 8654 edata->val = t4_read_reg64(sc, edata->addr); 8655 else 8656 return (EINVAL); 8657 8658 break; 8659 } 8660 case CHELSIO_T4_SETREG: { 8661 struct t4_reg *edata = (struct t4_reg *)data; 8662 8663 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 8664 return (EFAULT); 8665 8666 if (edata->size == 4) { 8667 if (edata->val & 0xffffffff00000000) 8668 return (EINVAL); 8669 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 8670 } else if (edata->size == 8) 8671 t4_write_reg64(sc, edata->addr, edata->val); 8672 else 8673 return (EINVAL); 8674 break; 8675 } 8676 case CHELSIO_T4_REGDUMP: { 8677 struct t4_regdump *regs = (struct t4_regdump *)data; 8678 int reglen = t4_get_regs_len(sc); 8679 uint8_t *buf; 8680 8681 if (regs->len < reglen) { 8682 regs->len = reglen; /* hint to the caller */ 8683 return (ENOBUFS); 8684 } 8685 8686 regs->len = reglen; 8687 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 8688 get_regs(sc, regs, buf); 8689 rc = copyout(buf, regs->data, reglen); 8690 free(buf, M_CXGBE); 8691 break; 8692 } 8693 case CHELSIO_T4_GET_FILTER_MODE: 8694 rc = get_filter_mode(sc, (uint32_t *)data); 8695 break; 8696 case CHELSIO_T4_SET_FILTER_MODE: 8697 rc = set_filter_mode(sc, *(uint32_t *)data); 8698 break; 8699 case CHELSIO_T4_GET_FILTER: 8700 rc = get_filter(sc, (struct t4_filter *)data); 8701 break; 8702 case CHELSIO_T4_SET_FILTER: 8703 rc = set_filter(sc, (struct t4_filter *)data); 8704 break; 8705 case CHELSIO_T4_DEL_FILTER: 8706 rc = del_filter(sc, (struct t4_filter *)data); 8707 break; 8708 case CHELSIO_T4_GET_SGE_CONTEXT: 8709 rc = get_sge_context(sc, (struct t4_sge_context *)data); 8710 break; 8711 case CHELSIO_T4_LOAD_FW: 8712 rc = load_fw(sc, (struct t4_data *)data); 8713 break; 8714 case CHELSIO_T4_GET_MEM: 8715 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 8716 break; 8717 case CHELSIO_T4_GET_I2C: 8718 rc = read_i2c(sc, (struct t4_i2c_data *)data); 8719 break; 8720 case CHELSIO_T4_CLEAR_STATS: { 8721 int i, v; 8722 u_int port_id = *(uint32_t *)data; 8723 struct port_info *pi; 8724 struct vi_info *vi; 8725 8726 if (port_id >= sc->params.nports) 8727 return (EINVAL); 8728 pi = sc->port[port_id]; 8729 8730 /* MAC stats */ 8731 t4_clr_port_stats(sc, pi->tx_chan); 8732 pi->tx_parse_error = 0; 8733 mtx_lock(&sc->reg_lock); 8734 for_each_vi(pi, v, vi) { 8735 if (vi->flags & VI_INIT_DONE) 8736 t4_clr_vi_stats(sc, vi->viid); 8737 } 8738 mtx_unlock(&sc->reg_lock); 8739 8740 /* 8741 * Since this command accepts a port, clear stats for 8742 * all VIs on this port. 8743 */ 8744 for_each_vi(pi, v, vi) { 8745 if (vi->flags & VI_INIT_DONE) { 8746 struct sge_rxq *rxq; 8747 struct sge_txq *txq; 8748 struct sge_wrq *wrq; 8749 8750 for_each_rxq(vi, i, rxq) { 8751#if defined(INET) || defined(INET6) 8752 rxq->lro.lro_queued = 0; 8753 rxq->lro.lro_flushed = 0; 8754#endif 8755 rxq->rxcsum = 0; 8756 rxq->vlan_extraction = 0; 8757 } 8758 8759 for_each_txq(vi, i, txq) { 8760 txq->txcsum = 0; 8761 txq->tso_wrs = 0; 8762 txq->vlan_insertion = 0; 8763 txq->imm_wrs = 0; 8764 txq->sgl_wrs = 0; 8765 txq->txpkt_wrs = 0; 8766 txq->txpkts0_wrs = 0; 8767 txq->txpkts1_wrs = 0; 8768 txq->txpkts0_pkts = 0; 8769 txq->txpkts1_pkts = 0; 8770 mp_ring_reset_stats(txq->r); 8771 } 8772 8773#ifdef TCP_OFFLOAD 8774 /* nothing to clear for each ofld_rxq */ 8775 8776 for_each_ofld_txq(vi, i, wrq) { 8777 wrq->tx_wrs_direct = 0; 8778 wrq->tx_wrs_copied = 0; 8779 } 8780#endif 8781 8782 if (IS_MAIN_VI(vi)) { 8783 wrq = &sc->sge.ctrlq[pi->port_id]; 8784 wrq->tx_wrs_direct = 0; 8785 wrq->tx_wrs_copied = 0; 8786 } 8787 } 8788 } 8789 break; 8790 } 8791 case CHELSIO_T4_SCHED_CLASS: 8792 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 8793 break; 8794 case CHELSIO_T4_SCHED_QUEUE: 8795 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 8796 break; 8797 case CHELSIO_T4_GET_TRACER: 8798 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 8799 break; 8800 case CHELSIO_T4_SET_TRACER: 8801 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 8802 break; 8803 default: 8804 rc = ENOTTY; 8805 } 8806 8807 return (rc); 8808} 8809 8810void 8811t4_db_full(struct adapter *sc) 8812{ 8813 8814 CXGBE_UNIMPLEMENTED(__func__); 8815} 8816 8817void 8818t4_db_dropped(struct adapter *sc) 8819{ 8820 8821 CXGBE_UNIMPLEMENTED(__func__); 8822} 8823 8824#ifdef TCP_OFFLOAD 8825void 8826t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 8827{ 8828 8829 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 8830 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 8831 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 8832 V_HPZ3(pgsz_order[3])); 8833} 8834 8835static int 8836toe_capability(struct vi_info *vi, int enable) 8837{ 8838 int rc; 8839 struct port_info *pi = vi->pi; 8840 struct adapter *sc = pi->adapter; 8841 8842 ASSERT_SYNCHRONIZED_OP(sc); 8843 8844 if (!is_offload(sc)) 8845 return (ENODEV); 8846 8847 if (enable) { 8848 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 8849 /* TOE is already enabled. */ 8850 return (0); 8851 } 8852 8853 /* 8854 * We need the port's queues around so that we're able to send 8855 * and receive CPLs to/from the TOE even if the ifnet for this 8856 * port has never been UP'd administratively. 8857 */ 8858 if (!(vi->flags & VI_INIT_DONE)) { 8859 rc = vi_full_init(vi); 8860 if (rc) 8861 return (rc); 8862 } 8863 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 8864 rc = vi_full_init(&pi->vi[0]); 8865 if (rc) 8866 return (rc); 8867 } 8868 8869 if (isset(&sc->offload_map, pi->port_id)) { 8870 /* TOE is enabled on another VI of this port. */ 8871 pi->uld_vis++; 8872 return (0); 8873 } 8874 8875 if (!uld_active(sc, ULD_TOM)) { 8876 rc = t4_activate_uld(sc, ULD_TOM); 8877 if (rc == EAGAIN) { 8878 log(LOG_WARNING, 8879 "You must kldload t4_tom.ko before trying " 8880 "to enable TOE on a cxgbe interface.\n"); 8881 } 8882 if (rc != 0) 8883 return (rc); 8884 KASSERT(sc->tom_softc != NULL, 8885 ("%s: TOM activated but softc NULL", __func__)); 8886 KASSERT(uld_active(sc, ULD_TOM), 8887 ("%s: TOM activated but flag not set", __func__)); 8888 } 8889 8890 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 8891 if (!uld_active(sc, ULD_IWARP)) 8892 (void) t4_activate_uld(sc, ULD_IWARP); 8893 if (!uld_active(sc, ULD_ISCSI)) 8894 (void) t4_activate_uld(sc, ULD_ISCSI); 8895 8896 pi->uld_vis++; 8897 setbit(&sc->offload_map, pi->port_id); 8898 } else { 8899 pi->uld_vis--; 8900 8901 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 8902 return (0); 8903 8904 KASSERT(uld_active(sc, ULD_TOM), 8905 ("%s: TOM never initialized?", __func__)); 8906 clrbit(&sc->offload_map, pi->port_id); 8907 } 8908 8909 return (0); 8910} 8911 8912/* 8913 * Add an upper layer driver to the global list. 8914 */ 8915int 8916t4_register_uld(struct uld_info *ui) 8917{ 8918 int rc = 0; 8919 struct uld_info *u; 8920 8921 sx_xlock(&t4_uld_list_lock); 8922 SLIST_FOREACH(u, &t4_uld_list, link) { 8923 if (u->uld_id == ui->uld_id) { 8924 rc = EEXIST; 8925 goto done; 8926 } 8927 } 8928 8929 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 8930 ui->refcount = 0; 8931done: 8932 sx_xunlock(&t4_uld_list_lock); 8933 return (rc); 8934} 8935 8936int 8937t4_unregister_uld(struct uld_info *ui) 8938{ 8939 int rc = EINVAL; 8940 struct uld_info *u; 8941 8942 sx_xlock(&t4_uld_list_lock); 8943 8944 SLIST_FOREACH(u, &t4_uld_list, link) { 8945 if (u == ui) { 8946 if (ui->refcount > 0) { 8947 rc = EBUSY; 8948 goto done; 8949 } 8950 8951 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 8952 rc = 0; 8953 goto done; 8954 } 8955 } 8956done: 8957 sx_xunlock(&t4_uld_list_lock); 8958 return (rc); 8959} 8960 8961int 8962t4_activate_uld(struct adapter *sc, int id) 8963{ 8964 int rc; 8965 struct uld_info *ui; 8966 8967 ASSERT_SYNCHRONIZED_OP(sc); 8968 8969 if (id < 0 || id > ULD_MAX) 8970 return (EINVAL); 8971 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 8972 8973 sx_slock(&t4_uld_list_lock); 8974 8975 SLIST_FOREACH(ui, &t4_uld_list, link) { 8976 if (ui->uld_id == id) { 8977 if (!(sc->flags & FULL_INIT_DONE)) { 8978 rc = adapter_full_init(sc); 8979 if (rc != 0) 8980 break; 8981 } 8982 8983 rc = ui->activate(sc); 8984 if (rc == 0) { 8985 setbit(&sc->active_ulds, id); 8986 ui->refcount++; 8987 } 8988 break; 8989 } 8990 } 8991 8992 sx_sunlock(&t4_uld_list_lock); 8993 8994 return (rc); 8995} 8996 8997int 8998t4_deactivate_uld(struct adapter *sc, int id) 8999{ 9000 int rc; 9001 struct uld_info *ui; 9002 9003 ASSERT_SYNCHRONIZED_OP(sc); 9004 9005 if (id < 0 || id > ULD_MAX) 9006 return (EINVAL); 9007 rc = ENXIO; 9008 9009 sx_slock(&t4_uld_list_lock); 9010 9011 SLIST_FOREACH(ui, &t4_uld_list, link) { 9012 if (ui->uld_id == id) { 9013 rc = ui->deactivate(sc); 9014 if (rc == 0) { 9015 clrbit(&sc->active_ulds, id); 9016 ui->refcount--; 9017 } 9018 break; 9019 } 9020 } 9021 9022 sx_sunlock(&t4_uld_list_lock); 9023 9024 return (rc); 9025} 9026 9027int 9028uld_active(struct adapter *sc, int uld_id) 9029{ 9030 9031 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9032 9033 return (isset(&sc->active_ulds, uld_id)); 9034} 9035#endif 9036 9037/* 9038 * Come up with reasonable defaults for some of the tunables, provided they're 9039 * not set by the user (in which case we'll use the values as is). 9040 */ 9041static void 9042tweak_tunables(void) 9043{ 9044 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9045 9046 if (t4_ntxq10g < 1) { 9047#ifdef RSS 9048 t4_ntxq10g = rss_getnumbuckets(); 9049#else 9050 t4_ntxq10g = min(nc, NTXQ_10G); 9051#endif 9052 } 9053 9054 if (t4_ntxq1g < 1) { 9055#ifdef RSS 9056 /* XXX: way too many for 1GbE? */ 9057 t4_ntxq1g = rss_getnumbuckets(); 9058#else 9059 t4_ntxq1g = min(nc, NTXQ_1G); 9060#endif 9061 } 9062 9063 if (t4_ntxq_vi < 1) 9064 t4_ntxq_vi = min(nc, NTXQ_VI); 9065 9066 if (t4_nrxq10g < 1) { 9067#ifdef RSS 9068 t4_nrxq10g = rss_getnumbuckets(); 9069#else 9070 t4_nrxq10g = min(nc, NRXQ_10G); 9071#endif 9072 } 9073 9074 if (t4_nrxq1g < 1) { 9075#ifdef RSS 9076 /* XXX: way too many for 1GbE? */ 9077 t4_nrxq1g = rss_getnumbuckets(); 9078#else 9079 t4_nrxq1g = min(nc, NRXQ_1G); 9080#endif 9081 } 9082 9083 if (t4_nrxq_vi < 1) 9084 t4_nrxq_vi = min(nc, NRXQ_VI); 9085 9086#ifdef TCP_OFFLOAD 9087 if (t4_nofldtxq10g < 1) 9088 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 9089 9090 if (t4_nofldtxq1g < 1) 9091 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 9092 9093 if (t4_nofldtxq_vi < 1) 9094 t4_nofldtxq_vi = min(nc, NOFLDTXQ_VI); 9095 9096 if (t4_nofldrxq10g < 1) 9097 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 9098 9099 if (t4_nofldrxq1g < 1) 9100 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 9101 9102 if (t4_nofldrxq_vi < 1) 9103 t4_nofldrxq_vi = min(nc, NOFLDRXQ_VI); 9104 9105 if (t4_toecaps_allowed == -1) 9106 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9107 9108 if (t4_rdmacaps_allowed == -1) { 9109 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9110 FW_CAPS_CONFIG_RDMA_RDMAC; 9111 } 9112 9113 if (t4_iscsicaps_allowed == -1) { 9114 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9115 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9116 FW_CAPS_CONFIG_ISCSI_T10DIF; 9117 } 9118#else 9119 if (t4_toecaps_allowed == -1) 9120 t4_toecaps_allowed = 0; 9121 9122 if (t4_rdmacaps_allowed == -1) 9123 t4_rdmacaps_allowed = 0; 9124 9125 if (t4_iscsicaps_allowed == -1) 9126 t4_iscsicaps_allowed = 0; 9127#endif 9128 9129#ifdef DEV_NETMAP 9130 if (t4_nnmtxq_vi < 1) 9131 t4_nnmtxq_vi = min(nc, NNMTXQ_VI); 9132 9133 if (t4_nnmrxq_vi < 1) 9134 t4_nnmrxq_vi = min(nc, NNMRXQ_VI); 9135#endif 9136 9137 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9138 t4_tmr_idx_10g = TMR_IDX_10G; 9139 9140 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9141 t4_pktc_idx_10g = PKTC_IDX_10G; 9142 9143 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9144 t4_tmr_idx_1g = TMR_IDX_1G; 9145 9146 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9147 t4_pktc_idx_1g = PKTC_IDX_1G; 9148 9149 if (t4_qsize_txq < 128) 9150 t4_qsize_txq = 128; 9151 9152 if (t4_qsize_rxq < 128) 9153 t4_qsize_rxq = 128; 9154 while (t4_qsize_rxq & 7) 9155 t4_qsize_rxq++; 9156 9157 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9158} 9159 9160#ifdef DDB 9161static void 9162t4_dump_tcb(struct adapter *sc, int tid) 9163{ 9164 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9165 9166 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9167 save = t4_read_reg(sc, reg); 9168 base = sc->memwin[2].mw_base; 9169 9170 /* Dump TCB for the tid */ 9171 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9172 tcb_addr += tid * TCB_SIZE; 9173 9174 if (is_t4(sc)) { 9175 pf = 0; 9176 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9177 } else { 9178 pf = V_PFNUM(sc->pf); 9179 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9180 } 9181 t4_write_reg(sc, reg, win_pos | pf); 9182 t4_read_reg(sc, reg); 9183 9184 off = tcb_addr - win_pos; 9185 for (i = 0; i < 4; i++) { 9186 uint32_t buf[8]; 9187 for (j = 0; j < 8; j++, off += 4) 9188 buf[j] = htonl(t4_read_reg(sc, base + off)); 9189 9190 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9191 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9192 buf[7]); 9193 } 9194 9195 t4_write_reg(sc, reg, save); 9196 t4_read_reg(sc, reg); 9197} 9198 9199static void 9200t4_dump_devlog(struct adapter *sc) 9201{ 9202 struct devlog_params *dparams = &sc->params.devlog; 9203 struct fw_devlog_e e; 9204 int i, first, j, m, nentries, rc; 9205 uint64_t ftstamp = UINT64_MAX; 9206 9207 if (dparams->start == 0) { 9208 db_printf("devlog params not valid\n"); 9209 return; 9210 } 9211 9212 nentries = dparams->size / sizeof(struct fw_devlog_e); 9213 m = fwmtype_to_hwmtype(dparams->memtype); 9214 9215 /* Find the first entry. */ 9216 first = -1; 9217 for (i = 0; i < nentries && !db_pager_quit; i++) { 9218 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9219 sizeof(e), (void *)&e); 9220 if (rc != 0) 9221 break; 9222 9223 if (e.timestamp == 0) 9224 break; 9225 9226 e.timestamp = be64toh(e.timestamp); 9227 if (e.timestamp < ftstamp) { 9228 ftstamp = e.timestamp; 9229 first = i; 9230 } 9231 } 9232 9233 if (first == -1) 9234 return; 9235 9236 i = first; 9237 do { 9238 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9239 sizeof(e), (void *)&e); 9240 if (rc != 0) 9241 return; 9242 9243 if (e.timestamp == 0) 9244 return; 9245 9246 e.timestamp = be64toh(e.timestamp); 9247 e.seqno = be32toh(e.seqno); 9248 for (j = 0; j < 8; j++) 9249 e.params[j] = be32toh(e.params[j]); 9250 9251 db_printf("%10d %15ju %8s %8s ", 9252 e.seqno, e.timestamp, 9253 (e.level < nitems(devlog_level_strings) ? 9254 devlog_level_strings[e.level] : "UNKNOWN"), 9255 (e.facility < nitems(devlog_facility_strings) ? 9256 devlog_facility_strings[e.facility] : "UNKNOWN")); 9257 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9258 e.params[3], e.params[4], e.params[5], e.params[6], 9259 e.params[7]); 9260 9261 if (++i == nentries) 9262 i = 0; 9263 } while (i != first && !db_pager_quit); 9264} 9265 9266static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9267_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9268 9269DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9270{ 9271 device_t dev; 9272 int t; 9273 bool valid; 9274 9275 valid = false; 9276 t = db_read_token(); 9277 if (t == tIDENT) { 9278 dev = device_lookup_by_name(db_tok_string); 9279 valid = true; 9280 } 9281 db_skip_to_eol(); 9282 if (!valid) { 9283 db_printf("usage: show t4 devlog <nexus>\n"); 9284 return; 9285 } 9286 9287 if (dev == NULL) { 9288 db_printf("device not found\n"); 9289 return; 9290 } 9291 9292 t4_dump_devlog(device_get_softc(dev)); 9293} 9294 9295DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 9296{ 9297 device_t dev; 9298 int radix, tid, t; 9299 bool valid; 9300 9301 valid = false; 9302 radix = db_radix; 9303 db_radix = 10; 9304 t = db_read_token(); 9305 if (t == tIDENT) { 9306 dev = device_lookup_by_name(db_tok_string); 9307 t = db_read_token(); 9308 if (t == tNUMBER) { 9309 tid = db_tok_number; 9310 valid = true; 9311 } 9312 } 9313 db_radix = radix; 9314 db_skip_to_eol(); 9315 if (!valid) { 9316 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 9317 return; 9318 } 9319 9320 if (dev == NULL) { 9321 db_printf("device not found\n"); 9322 return; 9323 } 9324 if (tid < 0) { 9325 db_printf("invalid tid\n"); 9326 return; 9327 } 9328 9329 t4_dump_tcb(device_get_softc(dev), tid); 9330} 9331#endif 9332 9333static struct sx mlu; /* mod load unload */ 9334SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9335 9336static int 9337mod_event(module_t mod, int cmd, void *arg) 9338{ 9339 int rc = 0; 9340 static int loaded = 0; 9341 9342 switch (cmd) { 9343 case MOD_LOAD: 9344 sx_xlock(&mlu); 9345 if (loaded++ == 0) { 9346 t4_sge_modload(); 9347 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 9348 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 9349 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 9350 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 9351 sx_init(&t4_list_lock, "T4/T5 adapters"); 9352 SLIST_INIT(&t4_list); 9353#ifdef TCP_OFFLOAD 9354 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9355 SLIST_INIT(&t4_uld_list); 9356#endif 9357 t4_tracer_modload(); 9358 tweak_tunables(); 9359 } 9360 sx_xunlock(&mlu); 9361 break; 9362 9363 case MOD_UNLOAD: 9364 sx_xlock(&mlu); 9365 if (--loaded == 0) { 9366 int tries; 9367 9368 sx_slock(&t4_list_lock); 9369 if (!SLIST_EMPTY(&t4_list)) { 9370 rc = EBUSY; 9371 sx_sunlock(&t4_list_lock); 9372 goto done_unload; 9373 } 9374#ifdef TCP_OFFLOAD 9375 sx_slock(&t4_uld_list_lock); 9376 if (!SLIST_EMPTY(&t4_uld_list)) { 9377 rc = EBUSY; 9378 sx_sunlock(&t4_uld_list_lock); 9379 sx_sunlock(&t4_list_lock); 9380 goto done_unload; 9381 } 9382#endif 9383 tries = 0; 9384 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 9385 uprintf("%ju clusters with custom free routine " 9386 "still is use.\n", t4_sge_extfree_refs()); 9387 pause("t4unload", 2 * hz); 9388 } 9389#ifdef TCP_OFFLOAD 9390 sx_sunlock(&t4_uld_list_lock); 9391#endif 9392 sx_sunlock(&t4_list_lock); 9393 9394 if (t4_sge_extfree_refs() == 0) { 9395 t4_tracer_modunload(); 9396#ifdef TCP_OFFLOAD 9397 sx_destroy(&t4_uld_list_lock); 9398#endif 9399 sx_destroy(&t4_list_lock); 9400 t4_sge_modunload(); 9401 loaded = 0; 9402 } else { 9403 rc = EBUSY; 9404 loaded++; /* undo earlier decrement */ 9405 } 9406 } 9407done_unload: 9408 sx_xunlock(&mlu); 9409 break; 9410 } 9411 9412 return (rc); 9413} 9414 9415static devclass_t t4_devclass, t5_devclass; 9416static devclass_t cxgbe_devclass, cxl_devclass; 9417static devclass_t vcxgbe_devclass, vcxl_devclass; 9418 9419DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9420MODULE_VERSION(t4nex, 1); 9421MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9422 9423DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9424MODULE_VERSION(t5nex, 1); 9425MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9426 9427DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9428MODULE_VERSION(cxgbe, 1); 9429 9430DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9431MODULE_VERSION(cxl, 1); 9432 9433DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9434MODULE_VERSION(vcxgbe, 1); 9435 9436DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9437MODULE_VERSION(vcxl, 1); 9438