t4_main.c revision 311261
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_main.c 311261 2017-01-04 06:58:35Z np $"); 30 31#include "opt_ddb.h" 32#include "opt_inet.h" 33#include "opt_inet6.h" 34 35#include <sys/param.h> 36#include <sys/conf.h> 37#include <sys/priv.h> 38#include <sys/kernel.h> 39#include <sys/bus.h> 40#include <sys/systm.h> 41#include <sys/counter.h> 42#include <sys/module.h> 43#include <sys/malloc.h> 44#include <sys/queue.h> 45#include <sys/taskqueue.h> 46#include <sys/pciio.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49#include <dev/pci/pci_private.h> 50#include <sys/firmware.h> 51#include <sys/sbuf.h> 52#include <sys/smp.h> 53#include <sys/socket.h> 54#include <sys/sockio.h> 55#include <sys/sysctl.h> 56#include <net/ethernet.h> 57#include <net/if.h> 58#include <net/if_types.h> 59#include <net/if_dl.h> 60#include <net/if_vlan_var.h> 61#ifdef RSS 62#include <net/rss_config.h> 63#endif 64#if defined(__i386__) || defined(__amd64__) 65#include <vm/vm.h> 66#include <vm/pmap.h> 67#endif 68#ifdef DDB 69#include <ddb/ddb.h> 70#include <ddb/db_lex.h> 71#endif 72 73#include "common/common.h" 74#include "common/t4_msg.h" 75#include "common/t4_regs.h" 76#include "common/t4_regs_values.h" 77#include "t4_ioctl.h" 78#include "t4_l2t.h" 79#include "t4_mp_ring.h" 80 81/* T4 bus driver interface */ 82static int t4_probe(device_t); 83static int t4_attach(device_t); 84static int t4_detach(device_t); 85static device_method_t t4_methods[] = { 86 DEVMETHOD(device_probe, t4_probe), 87 DEVMETHOD(device_attach, t4_attach), 88 DEVMETHOD(device_detach, t4_detach), 89 90 DEVMETHOD_END 91}; 92static driver_t t4_driver = { 93 "t4nex", 94 t4_methods, 95 sizeof(struct adapter) 96}; 97 98 99/* T4 port (cxgbe) interface */ 100static int cxgbe_probe(device_t); 101static int cxgbe_attach(device_t); 102static int cxgbe_detach(device_t); 103device_method_t cxgbe_methods[] = { 104 DEVMETHOD(device_probe, cxgbe_probe), 105 DEVMETHOD(device_attach, cxgbe_attach), 106 DEVMETHOD(device_detach, cxgbe_detach), 107 { 0, 0 } 108}; 109static driver_t cxgbe_driver = { 110 "cxgbe", 111 cxgbe_methods, 112 sizeof(struct port_info) 113}; 114 115/* T4 VI (vcxgbe) interface */ 116static int vcxgbe_probe(device_t); 117static int vcxgbe_attach(device_t); 118static int vcxgbe_detach(device_t); 119static device_method_t vcxgbe_methods[] = { 120 DEVMETHOD(device_probe, vcxgbe_probe), 121 DEVMETHOD(device_attach, vcxgbe_attach), 122 DEVMETHOD(device_detach, vcxgbe_detach), 123 { 0, 0 } 124}; 125static driver_t vcxgbe_driver = { 126 "vcxgbe", 127 vcxgbe_methods, 128 sizeof(struct vi_info) 129}; 130 131static d_ioctl_t t4_ioctl; 132 133static struct cdevsw t4_cdevsw = { 134 .d_version = D_VERSION, 135 .d_ioctl = t4_ioctl, 136 .d_name = "t4nex", 137}; 138 139/* T5 bus driver interface */ 140static int t5_probe(device_t); 141static device_method_t t5_methods[] = { 142 DEVMETHOD(device_probe, t5_probe), 143 DEVMETHOD(device_attach, t4_attach), 144 DEVMETHOD(device_detach, t4_detach), 145 146 DEVMETHOD_END 147}; 148static driver_t t5_driver = { 149 "t5nex", 150 t5_methods, 151 sizeof(struct adapter) 152}; 153 154 155/* T5 port (cxl) interface */ 156static driver_t cxl_driver = { 157 "cxl", 158 cxgbe_methods, 159 sizeof(struct port_info) 160}; 161 162/* T5 VI (vcxl) interface */ 163static driver_t vcxl_driver = { 164 "vcxl", 165 vcxgbe_methods, 166 sizeof(struct vi_info) 167}; 168 169/* T6 bus driver interface */ 170static int t6_probe(device_t); 171static device_method_t t6_methods[] = { 172 DEVMETHOD(device_probe, t6_probe), 173 DEVMETHOD(device_attach, t4_attach), 174 DEVMETHOD(device_detach, t4_detach), 175 176 DEVMETHOD_END 177}; 178static driver_t t6_driver = { 179 "t6nex", 180 t6_methods, 181 sizeof(struct adapter) 182}; 183 184 185/* T6 port (cc) interface */ 186static driver_t cc_driver = { 187 "cc", 188 cxgbe_methods, 189 sizeof(struct port_info) 190}; 191 192/* T6 VI (vcc) interface */ 193static driver_t vcc_driver = { 194 "vcc", 195 vcxgbe_methods, 196 sizeof(struct vi_info) 197}; 198 199/* ifnet + media interface */ 200static void cxgbe_init(void *); 201static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 202static int cxgbe_transmit(struct ifnet *, struct mbuf *); 203static void cxgbe_qflush(struct ifnet *); 204static int cxgbe_media_change(struct ifnet *); 205static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 206 207MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 208 209/* 210 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 211 * then ADAPTER_LOCK, then t4_uld_list_lock. 212 */ 213static struct sx t4_list_lock; 214SLIST_HEAD(, adapter) t4_list; 215#ifdef TCP_OFFLOAD 216static struct sx t4_uld_list_lock; 217SLIST_HEAD(, uld_info) t4_uld_list; 218#endif 219 220/* 221 * Tunables. See tweak_tunables() too. 222 * 223 * Each tunable is set to a default value here if it's known at compile-time. 224 * Otherwise it is set to -1 as an indication to tweak_tunables() that it should 225 * provide a reasonable default when the driver is loaded. 226 * 227 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 228 * T5 are under hw.cxl. 229 */ 230 231/* 232 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 233 */ 234#define NTXQ_10G 16 235int t4_ntxq10g = -1; 236TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 237 238#define NRXQ_10G 8 239int t4_nrxq10g = -1; 240TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 241 242#define NTXQ_1G 4 243int t4_ntxq1g = -1; 244TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 245 246#define NRXQ_1G 2 247int t4_nrxq1g = -1; 248TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 249 250#define NTXQ_VI 1 251static int t4_ntxq_vi = -1; 252TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 253 254#define NRXQ_VI 1 255static int t4_nrxq_vi = -1; 256TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 257 258static int t4_rsrv_noflowq = 0; 259TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 260 261#ifdef TCP_OFFLOAD 262#define NOFLDTXQ_10G 8 263static int t4_nofldtxq10g = -1; 264TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 265 266#define NOFLDRXQ_10G 2 267static int t4_nofldrxq10g = -1; 268TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 269 270#define NOFLDTXQ_1G 2 271static int t4_nofldtxq1g = -1; 272TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 273 274#define NOFLDRXQ_1G 1 275static int t4_nofldrxq1g = -1; 276TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 277 278#define NOFLDTXQ_VI 1 279static int t4_nofldtxq_vi = -1; 280TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 281 282#define NOFLDRXQ_VI 1 283static int t4_nofldrxq_vi = -1; 284TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 285#endif 286 287#ifdef DEV_NETMAP 288#define NNMTXQ_VI 2 289static int t4_nnmtxq_vi = -1; 290TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 291 292#define NNMRXQ_VI 2 293static int t4_nnmrxq_vi = -1; 294TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 295#endif 296 297/* 298 * Holdoff parameters for 10G and 1G ports. 299 */ 300#define TMR_IDX_10G 1 301int t4_tmr_idx_10g = TMR_IDX_10G; 302TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 303 304#define PKTC_IDX_10G (-1) 305int t4_pktc_idx_10g = PKTC_IDX_10G; 306TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 307 308#define TMR_IDX_1G 1 309int t4_tmr_idx_1g = TMR_IDX_1G; 310TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 311 312#define PKTC_IDX_1G (-1) 313int t4_pktc_idx_1g = PKTC_IDX_1G; 314TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 315 316/* 317 * Size (# of entries) of each tx and rx queue. 318 */ 319unsigned int t4_qsize_txq = TX_EQ_QSIZE; 320TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 321 322unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 323TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 324 325/* 326 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 327 */ 328int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 329TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 330 331/* 332 * Configuration file. 333 */ 334#define DEFAULT_CF "default" 335#define FLASH_CF "flash" 336#define UWIRE_CF "uwire" 337#define FPGA_CF "fpga" 338static char t4_cfg_file[32] = DEFAULT_CF; 339TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 340 341/* 342 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 343 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 344 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 345 * mark or when signalled to do so, 0 to never emit PAUSE. 346 */ 347static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 348TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 349 350/* 351 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS, 352 * FEC_RESERVED respectively). 353 * -1 to run with the firmware default. 354 * 0 to disable FEC. 355 */ 356static int t4_fec = -1; 357TUNABLE_INT("hw.cxgbe.fec", &t4_fec); 358 359/* 360 * Link autonegotiation. 361 * -1 to run with the firmware default. 362 * 0 to disable. 363 * 1 to enable. 364 */ 365static int t4_autoneg = -1; 366TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg); 367 368/* 369 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 370 * encouraged respectively). 371 */ 372static unsigned int t4_fw_install = 1; 373TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 374 375/* 376 * ASIC features that will be used. Disable the ones you don't want so that the 377 * chip resources aren't wasted on features that will not be used. 378 */ 379static int t4_nbmcaps_allowed = 0; 380TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 381 382static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 383TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 384 385static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 386 FW_CAPS_CONFIG_SWITCH_EGRESS; 387TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 388 389static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 390TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 391 392static int t4_toecaps_allowed = -1; 393TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 394 395static int t4_rdmacaps_allowed = -1; 396TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 397 398static int t4_cryptocaps_allowed = 0; 399TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed); 400 401static int t4_iscsicaps_allowed = -1; 402TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 403 404static int t4_fcoecaps_allowed = 0; 405TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 406 407static int t5_write_combine = 0; 408TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 409 410static int t4_num_vis = 1; 411TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 412 413/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 414static int vi_mac_funcs[] = { 415 FW_VI_FUNC_OFLD, 416 FW_VI_FUNC_IWARP, 417 FW_VI_FUNC_OPENISCSI, 418 FW_VI_FUNC_OPENFCOE, 419 FW_VI_FUNC_FOISCSI, 420 FW_VI_FUNC_FOFCOE, 421}; 422 423struct intrs_and_queues { 424 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 425 uint16_t nirq; /* Total # of vectors */ 426 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 427 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 428 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 429 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 430 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 431 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 432 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 433 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 434 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 435 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 436 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 437 438 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 439 uint16_t ntxq_vi; /* # of NIC txq's */ 440 uint16_t nrxq_vi; /* # of NIC rxq's */ 441 uint16_t nofldtxq_vi; /* # of TOE txq's */ 442 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 443 uint16_t nnmtxq_vi; /* # of netmap txq's */ 444 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 445}; 446 447struct filter_entry { 448 uint32_t valid:1; /* filter allocated and valid */ 449 uint32_t locked:1; /* filter is administratively locked */ 450 uint32_t pending:1; /* filter action is pending firmware reply */ 451 uint32_t smtidx:8; /* Source MAC Table index for smac */ 452 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 453 454 struct t4_filter_specification fs; 455}; 456 457static void setup_memwin(struct adapter *); 458static void position_memwin(struct adapter *, int, uint32_t); 459static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 460static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 461 int); 462static inline int write_via_memwin(struct adapter *, int, uint32_t, 463 const uint32_t *, int); 464static int validate_mem_range(struct adapter *, uint32_t, int); 465static int fwmtype_to_hwmtype(int); 466static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 467 uint32_t *); 468static int fixup_devlog_params(struct adapter *); 469static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 470 struct intrs_and_queues *); 471static int prep_firmware(struct adapter *); 472static int partition_resources(struct adapter *, const struct firmware *, 473 const char *); 474static int get_params__pre_init(struct adapter *); 475static int get_params__post_init(struct adapter *); 476static int set_params__post_init(struct adapter *); 477static void t4_set_desc(struct adapter *); 478static void build_medialist(struct port_info *, struct ifmedia *); 479static int cxgbe_init_synchronized(struct vi_info *); 480static int cxgbe_uninit_synchronized(struct vi_info *); 481static void quiesce_txq(struct adapter *, struct sge_txq *); 482static void quiesce_wrq(struct adapter *, struct sge_wrq *); 483static void quiesce_iq(struct adapter *, struct sge_iq *); 484static void quiesce_fl(struct adapter *, struct sge_fl *); 485static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 486 driver_intr_t *, void *, char *); 487static int t4_free_irq(struct adapter *, struct irq *); 488static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 489static void vi_refresh_stats(struct adapter *, struct vi_info *); 490static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 491static void cxgbe_tick(void *); 492static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 493static void cxgbe_sysctls(struct port_info *); 494static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 495static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 496static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 497static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 498static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 499static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 500static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 501static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 502static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 503static int sysctl_fec(SYSCTL_HANDLER_ARGS); 504static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); 505static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 506static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 507#ifdef SBUF_DRAIN 508static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 509static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 510static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 511static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 512static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 513static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 514static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 515static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 516static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 517static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 518static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 519static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 520static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 521static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 522static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 523static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 524static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 525static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 526static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 527static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 528static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 529static int sysctl_tids(SYSCTL_HANDLER_ARGS); 530static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 531static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 532static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 533static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 534static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 535static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 536static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 537#endif 538#ifdef TCP_OFFLOAD 539static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 540static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 541static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 542#endif 543static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 544static uint32_t mode_to_fconf(uint32_t); 545static uint32_t mode_to_iconf(uint32_t); 546static int check_fspec_against_fconf_iconf(struct adapter *, 547 struct t4_filter_specification *); 548static int get_filter_mode(struct adapter *, uint32_t *); 549static int set_filter_mode(struct adapter *, uint32_t); 550static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 551static int get_filter(struct adapter *, struct t4_filter *); 552static int set_filter(struct adapter *, struct t4_filter *); 553static int del_filter(struct adapter *, struct t4_filter *); 554static void clear_filter(struct filter_entry *); 555static int set_filter_wr(struct adapter *, int); 556static int del_filter_wr(struct adapter *, int); 557static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 558 struct mbuf *); 559static int get_sge_context(struct adapter *, struct t4_sge_context *); 560static int load_fw(struct adapter *, struct t4_data *); 561static int load_cfg(struct adapter *, struct t4_data *); 562static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 563static int read_i2c(struct adapter *, struct t4_i2c_data *); 564#ifdef TCP_OFFLOAD 565static int toe_capability(struct vi_info *, int); 566#endif 567static int mod_event(module_t, int, void *); 568 569struct { 570 uint16_t device; 571 char *desc; 572} t4_pciids[] = { 573 {0xa000, "Chelsio Terminator 4 FPGA"}, 574 {0x4400, "Chelsio T440-dbg"}, 575 {0x4401, "Chelsio T420-CR"}, 576 {0x4402, "Chelsio T422-CR"}, 577 {0x4403, "Chelsio T440-CR"}, 578 {0x4404, "Chelsio T420-BCH"}, 579 {0x4405, "Chelsio T440-BCH"}, 580 {0x4406, "Chelsio T440-CH"}, 581 {0x4407, "Chelsio T420-SO"}, 582 {0x4408, "Chelsio T420-CX"}, 583 {0x4409, "Chelsio T420-BT"}, 584 {0x440a, "Chelsio T404-BT"}, 585 {0x440e, "Chelsio T440-LP-CR"}, 586}, t5_pciids[] = { 587 {0xb000, "Chelsio Terminator 5 FPGA"}, 588 {0x5400, "Chelsio T580-dbg"}, 589 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 590 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 591 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 592 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 593 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 594 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 595 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 596 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 597 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 598 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 599 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 600 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 601 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 602#ifdef notyet 603 {0x5404, "Chelsio T520-BCH"}, 604 {0x5405, "Chelsio T540-BCH"}, 605 {0x5406, "Chelsio T540-CH"}, 606 {0x5408, "Chelsio T520-CX"}, 607 {0x540b, "Chelsio B520-SR"}, 608 {0x540c, "Chelsio B504-BT"}, 609 {0x540f, "Chelsio Amsterdam"}, 610 {0x5413, "Chelsio T580-CHR"}, 611#endif 612}, t6_pciids[] = { 613 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 614 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 615 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 616 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 617 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 618 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 619 {0x6410, "Chelsio T62100-DBG"}, /* 2 x 40/50/100G, debug */ 620}; 621 622#ifdef TCP_OFFLOAD 623/* 624 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 625 * exactly the same for both rxq and ofld_rxq. 626 */ 627CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 628CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 629#endif 630CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 631 632static int 633t4_probe(device_t dev) 634{ 635 int i; 636 uint16_t v = pci_get_vendor(dev); 637 uint16_t d = pci_get_device(dev); 638 uint8_t f = pci_get_function(dev); 639 640 if (v != PCI_VENDOR_ID_CHELSIO) 641 return (ENXIO); 642 643 /* Attach only to PF0 of the FPGA */ 644 if (d == 0xa000 && f != 0) 645 return (ENXIO); 646 647 for (i = 0; i < nitems(t4_pciids); i++) { 648 if (d == t4_pciids[i].device) { 649 device_set_desc(dev, t4_pciids[i].desc); 650 return (BUS_PROBE_DEFAULT); 651 } 652 } 653 654 return (ENXIO); 655} 656 657static int 658t5_probe(device_t dev) 659{ 660 int i; 661 uint16_t v = pci_get_vendor(dev); 662 uint16_t d = pci_get_device(dev); 663 uint8_t f = pci_get_function(dev); 664 665 if (v != PCI_VENDOR_ID_CHELSIO) 666 return (ENXIO); 667 668 /* Attach only to PF0 of the FPGA */ 669 if (d == 0xb000 && f != 0) 670 return (ENXIO); 671 672 for (i = 0; i < nitems(t5_pciids); i++) { 673 if (d == t5_pciids[i].device) { 674 device_set_desc(dev, t5_pciids[i].desc); 675 return (BUS_PROBE_DEFAULT); 676 } 677 } 678 679 return (ENXIO); 680} 681 682static int 683t6_probe(device_t dev) 684{ 685 int i; 686 uint16_t v = pci_get_vendor(dev); 687 uint16_t d = pci_get_device(dev); 688 689 if (v != PCI_VENDOR_ID_CHELSIO) 690 return (ENXIO); 691 692 for (i = 0; i < nitems(t6_pciids); i++) { 693 if (d == t6_pciids[i].device) { 694 device_set_desc(dev, t6_pciids[i].desc); 695 return (BUS_PROBE_DEFAULT); 696 } 697 } 698 699 return (ENXIO); 700} 701 702static void 703t5_attribute_workaround(device_t dev) 704{ 705 device_t root_port; 706 uint32_t v; 707 708 /* 709 * The T5 chips do not properly echo the No Snoop and Relaxed 710 * Ordering attributes when replying to a TLP from a Root 711 * Port. As a workaround, find the parent Root Port and 712 * disable No Snoop and Relaxed Ordering. Note that this 713 * affects all devices under this root port. 714 */ 715 root_port = pci_find_pcie_root_port(dev); 716 if (root_port == NULL) { 717 device_printf(dev, "Unable to find parent root port\n"); 718 return; 719 } 720 721 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 722 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 723 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 724 0) 725 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 726 device_get_nameunit(root_port)); 727} 728 729static const struct devnames devnames[] = { 730 { 731 .nexus_name = "t4nex", 732 .ifnet_name = "cxgbe", 733 .vi_ifnet_name = "vcxgbe", 734 .pf03_drv_name = "t4iov", 735 .vf_nexus_name = "t4vf", 736 .vf_ifnet_name = "cxgbev" 737 }, { 738 .nexus_name = "t5nex", 739 .ifnet_name = "cxl", 740 .vi_ifnet_name = "vcxl", 741 .pf03_drv_name = "t5iov", 742 .vf_nexus_name = "t5vf", 743 .vf_ifnet_name = "cxlv" 744 }, { 745 .nexus_name = "t6nex", 746 .ifnet_name = "cc", 747 .vi_ifnet_name = "vcc", 748 .pf03_drv_name = "t6iov", 749 .vf_nexus_name = "t6vf", 750 .vf_ifnet_name = "ccv" 751 } 752}; 753 754void 755t4_init_devnames(struct adapter *sc) 756{ 757 int id; 758 759 id = chip_id(sc); 760 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 761 sc->names = &devnames[id - CHELSIO_T4]; 762 else { 763 device_printf(sc->dev, "chip id %d is not supported.\n", id); 764 sc->names = NULL; 765 } 766} 767 768static int 769t4_attach(device_t dev) 770{ 771 struct adapter *sc; 772 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 773 struct make_dev_args mda; 774 struct intrs_and_queues iaq; 775 struct sge *s; 776 uint8_t *buf; 777#ifdef TCP_OFFLOAD 778 int ofld_rqidx, ofld_tqidx; 779#endif 780#ifdef DEV_NETMAP 781 int nm_rqidx, nm_tqidx; 782#endif 783 int num_vis; 784 785 sc = device_get_softc(dev); 786 sc->dev = dev; 787 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 788 789 if ((pci_get_device(dev) & 0xff00) == 0x5400) 790 t5_attribute_workaround(dev); 791 pci_enable_busmaster(dev); 792 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 793 uint32_t v; 794 795 pci_set_max_read_req(dev, 4096); 796 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 797 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 798 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 799 800 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 801 } 802 803 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 804 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 805 sc->traceq = -1; 806 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 807 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 808 device_get_nameunit(dev)); 809 810 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 811 device_get_nameunit(dev)); 812 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 813 t4_add_adapter(sc); 814 815 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 816 TAILQ_INIT(&sc->sfl); 817 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 818 819 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 820 821 rc = t4_map_bars_0_and_4(sc); 822 if (rc != 0) 823 goto done; /* error message displayed already */ 824 825 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 826 827 /* Prepare the adapter for operation. */ 828 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 829 rc = -t4_prep_adapter(sc, buf); 830 free(buf, M_CXGBE); 831 if (rc != 0) { 832 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 833 goto done; 834 } 835 836 /* 837 * This is the real PF# to which we're attaching. Works from within PCI 838 * passthrough environments too, where pci_get_function() could return a 839 * different PF# depending on the passthrough configuration. We need to 840 * use the real PF# in all our communication with the firmware. 841 */ 842 j = t4_read_reg(sc, A_PL_WHOAMI); 843 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 844 sc->mbox = sc->pf; 845 846 t4_init_devnames(sc); 847 if (sc->names == NULL) { 848 rc = ENOTSUP; 849 goto done; /* error message displayed already */ 850 } 851 852 /* 853 * Do this really early, with the memory windows set up even before the 854 * character device. The userland tool's register i/o and mem read 855 * will work even in "recovery mode". 856 */ 857 setup_memwin(sc); 858 if (t4_init_devlog_params(sc, 0) == 0) 859 fixup_devlog_params(sc); 860 make_dev_args_init(&mda); 861 mda.mda_devsw = &t4_cdevsw; 862 mda.mda_uid = UID_ROOT; 863 mda.mda_gid = GID_WHEEL; 864 mda.mda_mode = 0600; 865 mda.mda_si_drv1 = sc; 866 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 867 if (rc != 0) 868 device_printf(dev, "failed to create nexus char device: %d.\n", 869 rc); 870 871 /* Go no further if recovery mode has been requested. */ 872 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 873 device_printf(dev, "recovery mode.\n"); 874 goto done; 875 } 876 877#if defined(__i386__) 878 if ((cpu_feature & CPUID_CX8) == 0) { 879 device_printf(dev, "64 bit atomics not available.\n"); 880 rc = ENOTSUP; 881 goto done; 882 } 883#endif 884 885 /* Prepare the firmware for operation */ 886 rc = prep_firmware(sc); 887 if (rc != 0) 888 goto done; /* error message displayed already */ 889 890 rc = get_params__post_init(sc); 891 if (rc != 0) 892 goto done; /* error message displayed already */ 893 894 rc = set_params__post_init(sc); 895 if (rc != 0) 896 goto done; /* error message displayed already */ 897 898 rc = t4_map_bar_2(sc); 899 if (rc != 0) 900 goto done; /* error message displayed already */ 901 902 rc = t4_create_dma_tag(sc); 903 if (rc != 0) 904 goto done; /* error message displayed already */ 905 906 /* 907 * Number of VIs to create per-port. The first VI is the "main" regular 908 * VI for the port. The rest are additional virtual interfaces on the 909 * same physical port. Note that the main VI does not have native 910 * netmap support but the extra VIs do. 911 * 912 * Limit the number of VIs per port to the number of available 913 * MAC addresses per port. 914 */ 915 if (t4_num_vis >= 1) 916 num_vis = t4_num_vis; 917 else 918 num_vis = 1; 919 if (num_vis > nitems(vi_mac_funcs)) { 920 num_vis = nitems(vi_mac_funcs); 921 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 922 } 923 924 /* 925 * First pass over all the ports - allocate VIs and initialize some 926 * basic parameters like mac address, port type, etc. We also figure 927 * out whether a port is 10G or 1G and use that information when 928 * calculating how many interrupts to attempt to allocate. 929 */ 930 n10g = n1g = 0; 931 for_each_port(sc, i) { 932 struct port_info *pi; 933 struct link_config *lc; 934 935 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 936 sc->port[i] = pi; 937 938 /* These must be set before t4_port_init */ 939 pi->adapter = sc; 940 pi->port_id = i; 941 /* 942 * XXX: vi[0] is special so we can't delay this allocation until 943 * pi->nvi's final value is known. 944 */ 945 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 946 M_ZERO | M_WAITOK); 947 948 /* 949 * Allocate the "main" VI and initialize parameters 950 * like mac addr. 951 */ 952 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 953 if (rc != 0) { 954 device_printf(dev, "unable to initialize port %d: %d\n", 955 i, rc); 956 free(pi->vi, M_CXGBE); 957 free(pi, M_CXGBE); 958 sc->port[i] = NULL; 959 goto done; 960 } 961 962 lc = &pi->link_cfg; 963 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 964 lc->requested_fc |= t4_pause_settings; 965 if (t4_fec != -1) { 966 lc->requested_fec = t4_fec & 967 G_FW_PORT_CAP_FEC(lc->supported); 968 } 969 if (lc->supported & FW_PORT_CAP_ANEG && t4_autoneg != -1) { 970 lc->autoneg = t4_autoneg ? AUTONEG_ENABLE : 971 AUTONEG_DISABLE; 972 } 973 974 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 975 if (rc != 0) { 976 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 977 free(pi->vi, M_CXGBE); 978 free(pi, M_CXGBE); 979 sc->port[i] = NULL; 980 goto done; 981 } 982 983 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 984 device_get_nameunit(dev), i); 985 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 986 sc->chan_map[pi->tx_chan] = i; 987 988 pi->tc = malloc(sizeof(struct tx_sched_class) * 989 sc->chip_params->nsched_cls, M_CXGBE, M_ZERO | M_WAITOK); 990 991 if (port_top_speed(pi) >= 10) { 992 n10g++; 993 } else { 994 n1g++; 995 } 996 997 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1); 998 if (pi->dev == NULL) { 999 device_printf(dev, 1000 "failed to add device for port %d.\n", i); 1001 rc = ENXIO; 1002 goto done; 1003 } 1004 pi->vi[0].dev = pi->dev; 1005 device_set_softc(pi->dev, pi); 1006 } 1007 1008 /* 1009 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 1010 */ 1011 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 1012 if (rc != 0) 1013 goto done; /* error message displayed already */ 1014 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 1015 num_vis = 1; 1016 1017 sc->intr_type = iaq.intr_type; 1018 sc->intr_count = iaq.nirq; 1019 1020 s = &sc->sge; 1021 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 1022 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 1023 if (num_vis > 1) { 1024 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 1025 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 1026 } 1027 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1028 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 1029 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1030#ifdef TCP_OFFLOAD 1031 if (is_offload(sc)) { 1032 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 1033 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 1034 if (num_vis > 1) { 1035 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 1036 iaq.nofldrxq_vi; 1037 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 1038 iaq.nofldtxq_vi; 1039 } 1040 s->neq += s->nofldtxq + s->nofldrxq; 1041 s->niq += s->nofldrxq; 1042 1043 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1044 M_CXGBE, M_ZERO | M_WAITOK); 1045 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1046 M_CXGBE, M_ZERO | M_WAITOK); 1047 } 1048#endif 1049#ifdef DEV_NETMAP 1050 if (num_vis > 1) { 1051 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 1052 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 1053 } 1054 s->neq += s->nnmtxq + s->nnmrxq; 1055 s->niq += s->nnmrxq; 1056 1057 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1058 M_CXGBE, M_ZERO | M_WAITOK); 1059 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1060 M_CXGBE, M_ZERO | M_WAITOK); 1061#endif 1062 1063 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 1064 M_ZERO | M_WAITOK); 1065 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1066 M_ZERO | M_WAITOK); 1067 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1068 M_ZERO | M_WAITOK); 1069 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 1070 M_ZERO | M_WAITOK); 1071 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 1072 M_ZERO | M_WAITOK); 1073 1074 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1075 M_ZERO | M_WAITOK); 1076 1077 t4_init_l2t(sc, M_WAITOK); 1078 1079 /* 1080 * Second pass over the ports. This time we know the number of rx and 1081 * tx queues that each port should get. 1082 */ 1083 rqidx = tqidx = 0; 1084#ifdef TCP_OFFLOAD 1085 ofld_rqidx = ofld_tqidx = 0; 1086#endif 1087#ifdef DEV_NETMAP 1088 nm_rqidx = nm_tqidx = 0; 1089#endif 1090 for_each_port(sc, i) { 1091 struct port_info *pi = sc->port[i]; 1092 struct vi_info *vi; 1093 1094 if (pi == NULL) 1095 continue; 1096 1097 pi->nvi = num_vis; 1098 for_each_vi(pi, j, vi) { 1099 vi->pi = pi; 1100 vi->qsize_rxq = t4_qsize_rxq; 1101 vi->qsize_txq = t4_qsize_txq; 1102 1103 vi->first_rxq = rqidx; 1104 vi->first_txq = tqidx; 1105 if (port_top_speed(pi) >= 10) { 1106 vi->tmr_idx = t4_tmr_idx_10g; 1107 vi->pktc_idx = t4_pktc_idx_10g; 1108 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1109 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 1110 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 1111 } else { 1112 vi->tmr_idx = t4_tmr_idx_1g; 1113 vi->pktc_idx = t4_pktc_idx_1g; 1114 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1115 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 1116 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 1117 } 1118 rqidx += vi->nrxq; 1119 tqidx += vi->ntxq; 1120 1121 if (j == 0 && vi->ntxq > 1) 1122 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1123 else 1124 vi->rsrv_noflowq = 0; 1125 1126#ifdef TCP_OFFLOAD 1127 vi->first_ofld_rxq = ofld_rqidx; 1128 vi->first_ofld_txq = ofld_tqidx; 1129 if (port_top_speed(pi) >= 10) { 1130 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1131 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1132 iaq.nofldrxq_vi; 1133 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1134 iaq.nofldtxq_vi; 1135 } else { 1136 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1137 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1138 iaq.nofldrxq_vi; 1139 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1140 iaq.nofldtxq_vi; 1141 } 1142 ofld_rqidx += vi->nofldrxq; 1143 ofld_tqidx += vi->nofldtxq; 1144#endif 1145#ifdef DEV_NETMAP 1146 if (j > 0) { 1147 vi->first_nm_rxq = nm_rqidx; 1148 vi->first_nm_txq = nm_tqidx; 1149 vi->nnmrxq = iaq.nnmrxq_vi; 1150 vi->nnmtxq = iaq.nnmtxq_vi; 1151 nm_rqidx += vi->nnmrxq; 1152 nm_tqidx += vi->nnmtxq; 1153 } 1154#endif 1155 } 1156 } 1157 1158 rc = t4_setup_intr_handlers(sc); 1159 if (rc != 0) { 1160 device_printf(dev, 1161 "failed to setup interrupt handlers: %d\n", rc); 1162 goto done; 1163 } 1164 1165 rc = bus_generic_attach(dev); 1166 if (rc != 0) { 1167 device_printf(dev, 1168 "failed to attach all child ports: %d\n", rc); 1169 goto done; 1170 } 1171 1172 device_printf(dev, 1173 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1174 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1175 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1176 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1177 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1178 1179 t4_set_desc(sc); 1180 1181done: 1182 if (rc != 0 && sc->cdev) { 1183 /* cdev was created and so cxgbetool works; recover that way. */ 1184 device_printf(dev, 1185 "error during attach, adapter is now in recovery mode.\n"); 1186 rc = 0; 1187 } 1188 1189 if (rc != 0) 1190 t4_detach_common(dev); 1191 else 1192 t4_sysctls(sc); 1193 1194 return (rc); 1195} 1196 1197/* 1198 * Idempotent 1199 */ 1200static int 1201t4_detach(device_t dev) 1202{ 1203 struct adapter *sc; 1204 1205 sc = device_get_softc(dev); 1206 1207 return (t4_detach_common(dev)); 1208} 1209 1210int 1211t4_detach_common(device_t dev) 1212{ 1213 struct adapter *sc; 1214 struct port_info *pi; 1215 int i, rc; 1216 1217 sc = device_get_softc(dev); 1218 1219 if (sc->flags & FULL_INIT_DONE) { 1220 if (!(sc->flags & IS_VF)) 1221 t4_intr_disable(sc); 1222 } 1223 1224 if (sc->cdev) { 1225 destroy_dev(sc->cdev); 1226 sc->cdev = NULL; 1227 } 1228 1229 if (device_is_attached(dev)) { 1230 rc = bus_generic_detach(dev); 1231 if (rc) { 1232 device_printf(dev, 1233 "failed to detach child devices: %d\n", rc); 1234 return (rc); 1235 } 1236 } 1237 1238 for (i = 0; i < sc->intr_count; i++) 1239 t4_free_irq(sc, &sc->irq[i]); 1240 1241 for (i = 0; i < MAX_NPORTS; i++) { 1242 pi = sc->port[i]; 1243 if (pi) { 1244 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1245 if (pi->dev) 1246 device_delete_child(dev, pi->dev); 1247 1248 mtx_destroy(&pi->pi_lock); 1249 free(pi->vi, M_CXGBE); 1250 free(pi->tc, M_CXGBE); 1251 free(pi, M_CXGBE); 1252 } 1253 } 1254 1255 if (sc->flags & FULL_INIT_DONE) 1256 adapter_full_uninit(sc); 1257 1258 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1259 t4_fw_bye(sc, sc->mbox); 1260 1261 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1262 pci_release_msi(dev); 1263 1264 if (sc->regs_res) 1265 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1266 sc->regs_res); 1267 1268 if (sc->udbs_res) 1269 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1270 sc->udbs_res); 1271 1272 if (sc->msix_res) 1273 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1274 sc->msix_res); 1275 1276 if (sc->l2t) 1277 t4_free_l2t(sc->l2t); 1278 1279#ifdef TCP_OFFLOAD 1280 free(sc->sge.ofld_rxq, M_CXGBE); 1281 free(sc->sge.ofld_txq, M_CXGBE); 1282#endif 1283#ifdef DEV_NETMAP 1284 free(sc->sge.nm_rxq, M_CXGBE); 1285 free(sc->sge.nm_txq, M_CXGBE); 1286#endif 1287 free(sc->irq, M_CXGBE); 1288 free(sc->sge.rxq, M_CXGBE); 1289 free(sc->sge.txq, M_CXGBE); 1290 free(sc->sge.ctrlq, M_CXGBE); 1291 free(sc->sge.iqmap, M_CXGBE); 1292 free(sc->sge.eqmap, M_CXGBE); 1293 free(sc->tids.ftid_tab, M_CXGBE); 1294 t4_destroy_dma_tag(sc); 1295 if (mtx_initialized(&sc->sc_lock)) { 1296 sx_xlock(&t4_list_lock); 1297 SLIST_REMOVE(&t4_list, sc, adapter, link); 1298 sx_xunlock(&t4_list_lock); 1299 mtx_destroy(&sc->sc_lock); 1300 } 1301 1302 callout_drain(&sc->sfl_callout); 1303 if (mtx_initialized(&sc->tids.ftid_lock)) 1304 mtx_destroy(&sc->tids.ftid_lock); 1305 if (mtx_initialized(&sc->sfl_lock)) 1306 mtx_destroy(&sc->sfl_lock); 1307 if (mtx_initialized(&sc->ifp_lock)) 1308 mtx_destroy(&sc->ifp_lock); 1309 if (mtx_initialized(&sc->reg_lock)) 1310 mtx_destroy(&sc->reg_lock); 1311 1312 for (i = 0; i < NUM_MEMWIN; i++) { 1313 struct memwin *mw = &sc->memwin[i]; 1314 1315 if (rw_initialized(&mw->mw_lock)) 1316 rw_destroy(&mw->mw_lock); 1317 } 1318 1319 bzero(sc, sizeof(*sc)); 1320 1321 return (0); 1322} 1323 1324static int 1325cxgbe_probe(device_t dev) 1326{ 1327 char buf[128]; 1328 struct port_info *pi = device_get_softc(dev); 1329 1330 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1331 device_set_desc_copy(dev, buf); 1332 1333 return (BUS_PROBE_DEFAULT); 1334} 1335 1336#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1337 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1338 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1339#define T4_CAP_ENABLE (T4_CAP) 1340 1341static int 1342cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1343{ 1344 struct ifnet *ifp; 1345 struct sbuf *sb; 1346 1347 vi->xact_addr_filt = -1; 1348 callout_init(&vi->tick, 1); 1349 1350 /* Allocate an ifnet and set it up */ 1351 ifp = if_alloc(IFT_ETHER); 1352 if (ifp == NULL) { 1353 device_printf(dev, "Cannot allocate ifnet\n"); 1354 return (ENOMEM); 1355 } 1356 vi->ifp = ifp; 1357 ifp->if_softc = vi; 1358 1359 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1360 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1361 1362 ifp->if_init = cxgbe_init; 1363 ifp->if_ioctl = cxgbe_ioctl; 1364 ifp->if_transmit = cxgbe_transmit; 1365 ifp->if_qflush = cxgbe_qflush; 1366 1367 ifp->if_capabilities = T4_CAP; 1368#ifdef TCP_OFFLOAD 1369 if (vi->nofldrxq != 0) 1370 ifp->if_capabilities |= IFCAP_TOE; 1371#endif 1372 ifp->if_capenable = T4_CAP_ENABLE; 1373 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1374 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1375 1376 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1377 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1378 ifp->if_hw_tsomaxsegsize = 65536; 1379 1380 /* Initialize ifmedia for this VI */ 1381 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1382 cxgbe_media_status); 1383 build_medialist(vi->pi, &vi->media); 1384 1385 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1386 EVENTHANDLER_PRI_ANY); 1387 1388 ether_ifattach(ifp, vi->hw_addr); 1389#ifdef DEV_NETMAP 1390 if (vi->nnmrxq != 0) 1391 cxgbe_nm_attach(vi); 1392#endif 1393 sb = sbuf_new_auto(); 1394 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1395#ifdef TCP_OFFLOAD 1396 if (ifp->if_capabilities & IFCAP_TOE) 1397 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1398 vi->nofldtxq, vi->nofldrxq); 1399#endif 1400#ifdef DEV_NETMAP 1401 if (ifp->if_capabilities & IFCAP_NETMAP) 1402 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1403 vi->nnmtxq, vi->nnmrxq); 1404#endif 1405 sbuf_finish(sb); 1406 device_printf(dev, "%s\n", sbuf_data(sb)); 1407 sbuf_delete(sb); 1408 1409 vi_sysctls(vi); 1410 1411 return (0); 1412} 1413 1414static int 1415cxgbe_attach(device_t dev) 1416{ 1417 struct port_info *pi = device_get_softc(dev); 1418 struct adapter *sc = pi->adapter; 1419 struct vi_info *vi; 1420 int i, rc; 1421 1422 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1423 1424 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1425 if (rc) 1426 return (rc); 1427 1428 for_each_vi(pi, i, vi) { 1429 if (i == 0) 1430 continue; 1431 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1432 if (vi->dev == NULL) { 1433 device_printf(dev, "failed to add VI %d\n", i); 1434 continue; 1435 } 1436 device_set_softc(vi->dev, vi); 1437 } 1438 1439 cxgbe_sysctls(pi); 1440 1441 bus_generic_attach(dev); 1442 1443 return (0); 1444} 1445 1446static void 1447cxgbe_vi_detach(struct vi_info *vi) 1448{ 1449 struct ifnet *ifp = vi->ifp; 1450 1451 ether_ifdetach(ifp); 1452 1453 if (vi->vlan_c) 1454 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1455 1456 /* Let detach proceed even if these fail. */ 1457#ifdef DEV_NETMAP 1458 if (ifp->if_capabilities & IFCAP_NETMAP) 1459 cxgbe_nm_detach(vi); 1460#endif 1461 cxgbe_uninit_synchronized(vi); 1462 callout_drain(&vi->tick); 1463 vi_full_uninit(vi); 1464 1465 ifmedia_removeall(&vi->media); 1466 if_free(vi->ifp); 1467 vi->ifp = NULL; 1468} 1469 1470static int 1471cxgbe_detach(device_t dev) 1472{ 1473 struct port_info *pi = device_get_softc(dev); 1474 struct adapter *sc = pi->adapter; 1475 int rc; 1476 1477 /* Detach the extra VIs first. */ 1478 rc = bus_generic_detach(dev); 1479 if (rc) 1480 return (rc); 1481 device_delete_children(dev); 1482 1483 doom_vi(sc, &pi->vi[0]); 1484 1485 if (pi->flags & HAS_TRACEQ) { 1486 sc->traceq = -1; /* cloner should not create ifnet */ 1487 t4_tracer_port_detach(sc); 1488 } 1489 1490 cxgbe_vi_detach(&pi->vi[0]); 1491 callout_drain(&pi->tick); 1492 1493 end_synchronized_op(sc, 0); 1494 1495 return (0); 1496} 1497 1498static void 1499cxgbe_init(void *arg) 1500{ 1501 struct vi_info *vi = arg; 1502 struct adapter *sc = vi->pi->adapter; 1503 1504 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1505 return; 1506 cxgbe_init_synchronized(vi); 1507 end_synchronized_op(sc, 0); 1508} 1509 1510static int 1511cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1512{ 1513 int rc = 0, mtu, flags, can_sleep; 1514 struct vi_info *vi = ifp->if_softc; 1515 struct adapter *sc = vi->pi->adapter; 1516 struct ifreq *ifr = (struct ifreq *)data; 1517 uint32_t mask; 1518 1519 switch (cmd) { 1520 case SIOCSIFMTU: 1521 mtu = ifr->ifr_mtu; 1522 if (mtu < ETHERMIN || mtu > MAX_MTU) 1523 return (EINVAL); 1524 1525 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1526 if (rc) 1527 return (rc); 1528 ifp->if_mtu = mtu; 1529 if (vi->flags & VI_INIT_DONE) { 1530 t4_update_fl_bufsize(ifp); 1531 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1532 rc = update_mac_settings(ifp, XGMAC_MTU); 1533 } 1534 end_synchronized_op(sc, 0); 1535 break; 1536 1537 case SIOCSIFFLAGS: 1538 can_sleep = 0; 1539redo_sifflags: 1540 rc = begin_synchronized_op(sc, vi, 1541 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1542 if (rc) 1543 return (rc); 1544 1545 if (ifp->if_flags & IFF_UP) { 1546 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1547 flags = vi->if_flags; 1548 if ((ifp->if_flags ^ flags) & 1549 (IFF_PROMISC | IFF_ALLMULTI)) { 1550 if (can_sleep == 1) { 1551 end_synchronized_op(sc, 0); 1552 can_sleep = 0; 1553 goto redo_sifflags; 1554 } 1555 rc = update_mac_settings(ifp, 1556 XGMAC_PROMISC | XGMAC_ALLMULTI); 1557 } 1558 } else { 1559 if (can_sleep == 0) { 1560 end_synchronized_op(sc, LOCK_HELD); 1561 can_sleep = 1; 1562 goto redo_sifflags; 1563 } 1564 rc = cxgbe_init_synchronized(vi); 1565 } 1566 vi->if_flags = ifp->if_flags; 1567 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1568 if (can_sleep == 0) { 1569 end_synchronized_op(sc, LOCK_HELD); 1570 can_sleep = 1; 1571 goto redo_sifflags; 1572 } 1573 rc = cxgbe_uninit_synchronized(vi); 1574 } 1575 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1576 break; 1577 1578 case SIOCADDMULTI: 1579 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1580 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1581 if (rc) 1582 return (rc); 1583 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1584 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1585 end_synchronized_op(sc, LOCK_HELD); 1586 break; 1587 1588 case SIOCSIFCAP: 1589 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1590 if (rc) 1591 return (rc); 1592 1593 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1594 if (mask & IFCAP_TXCSUM) { 1595 ifp->if_capenable ^= IFCAP_TXCSUM; 1596 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1597 1598 if (IFCAP_TSO4 & ifp->if_capenable && 1599 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1600 ifp->if_capenable &= ~IFCAP_TSO4; 1601 if_printf(ifp, 1602 "tso4 disabled due to -txcsum.\n"); 1603 } 1604 } 1605 if (mask & IFCAP_TXCSUM_IPV6) { 1606 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1607 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1608 1609 if (IFCAP_TSO6 & ifp->if_capenable && 1610 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1611 ifp->if_capenable &= ~IFCAP_TSO6; 1612 if_printf(ifp, 1613 "tso6 disabled due to -txcsum6.\n"); 1614 } 1615 } 1616 if (mask & IFCAP_RXCSUM) 1617 ifp->if_capenable ^= IFCAP_RXCSUM; 1618 if (mask & IFCAP_RXCSUM_IPV6) 1619 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1620 1621 /* 1622 * Note that we leave CSUM_TSO alone (it is always set). The 1623 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1624 * sending a TSO request our way, so it's sufficient to toggle 1625 * IFCAP_TSOx only. 1626 */ 1627 if (mask & IFCAP_TSO4) { 1628 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1629 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1630 if_printf(ifp, "enable txcsum first.\n"); 1631 rc = EAGAIN; 1632 goto fail; 1633 } 1634 ifp->if_capenable ^= IFCAP_TSO4; 1635 } 1636 if (mask & IFCAP_TSO6) { 1637 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1638 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1639 if_printf(ifp, "enable txcsum6 first.\n"); 1640 rc = EAGAIN; 1641 goto fail; 1642 } 1643 ifp->if_capenable ^= IFCAP_TSO6; 1644 } 1645 if (mask & IFCAP_LRO) { 1646#if defined(INET) || defined(INET6) 1647 int i; 1648 struct sge_rxq *rxq; 1649 1650 ifp->if_capenable ^= IFCAP_LRO; 1651 for_each_rxq(vi, i, rxq) { 1652 if (ifp->if_capenable & IFCAP_LRO) 1653 rxq->iq.flags |= IQ_LRO_ENABLED; 1654 else 1655 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1656 } 1657#endif 1658 } 1659#ifdef TCP_OFFLOAD 1660 if (mask & IFCAP_TOE) { 1661 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1662 1663 rc = toe_capability(vi, enable); 1664 if (rc != 0) 1665 goto fail; 1666 1667 ifp->if_capenable ^= mask; 1668 } 1669#endif 1670 if (mask & IFCAP_VLAN_HWTAGGING) { 1671 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1672 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1673 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1674 } 1675 if (mask & IFCAP_VLAN_MTU) { 1676 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1677 1678 /* Need to find out how to disable auto-mtu-inflation */ 1679 } 1680 if (mask & IFCAP_VLAN_HWTSO) 1681 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1682 if (mask & IFCAP_VLAN_HWCSUM) 1683 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1684 1685#ifdef VLAN_CAPABILITIES 1686 VLAN_CAPABILITIES(ifp); 1687#endif 1688fail: 1689 end_synchronized_op(sc, 0); 1690 break; 1691 1692 case SIOCSIFMEDIA: 1693 case SIOCGIFMEDIA: 1694 case SIOCGIFXMEDIA: 1695 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1696 break; 1697 1698 case SIOCGI2C: { 1699 struct ifi2creq i2c; 1700 1701 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1702 if (rc != 0) 1703 break; 1704 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1705 rc = EPERM; 1706 break; 1707 } 1708 if (i2c.len > sizeof(i2c.data)) { 1709 rc = EINVAL; 1710 break; 1711 } 1712 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1713 if (rc) 1714 return (rc); 1715 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1716 i2c.offset, i2c.len, &i2c.data[0]); 1717 end_synchronized_op(sc, 0); 1718 if (rc == 0) 1719 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1720 break; 1721 } 1722 1723 default: 1724 rc = ether_ioctl(ifp, cmd, data); 1725 } 1726 1727 return (rc); 1728} 1729 1730static int 1731cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1732{ 1733 struct vi_info *vi = ifp->if_softc; 1734 struct port_info *pi = vi->pi; 1735 struct adapter *sc = pi->adapter; 1736 struct sge_txq *txq; 1737 void *items[1]; 1738 int rc; 1739 1740 M_ASSERTPKTHDR(m); 1741 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1742 1743 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1744 m_freem(m); 1745 return (ENETDOWN); 1746 } 1747 1748 rc = parse_pkt(sc, &m); 1749 if (__predict_false(rc != 0)) { 1750 MPASS(m == NULL); /* was freed already */ 1751 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1752 return (rc); 1753 } 1754 1755 /* Select a txq. */ 1756 txq = &sc->sge.txq[vi->first_txq]; 1757 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1758 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1759 vi->rsrv_noflowq); 1760 1761 items[0] = m; 1762 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1763 if (__predict_false(rc != 0)) 1764 m_freem(m); 1765 1766 return (rc); 1767} 1768 1769static void 1770cxgbe_qflush(struct ifnet *ifp) 1771{ 1772 struct vi_info *vi = ifp->if_softc; 1773 struct sge_txq *txq; 1774 int i; 1775 1776 /* queues do not exist if !VI_INIT_DONE. */ 1777 if (vi->flags & VI_INIT_DONE) { 1778 for_each_txq(vi, i, txq) { 1779 TXQ_LOCK(txq); 1780 txq->eq.flags &= ~EQ_ENABLED; 1781 TXQ_UNLOCK(txq); 1782 while (!mp_ring_is_idle(txq->r)) { 1783 mp_ring_check_drainage(txq->r, 0); 1784 pause("qflush", 1); 1785 } 1786 } 1787 } 1788 if_qflush(ifp); 1789} 1790 1791static int 1792cxgbe_media_change(struct ifnet *ifp) 1793{ 1794 struct vi_info *vi = ifp->if_softc; 1795 1796 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1797 1798 return (EOPNOTSUPP); 1799} 1800 1801static void 1802cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1803{ 1804 struct vi_info *vi = ifp->if_softc; 1805 struct port_info *pi = vi->pi; 1806 struct ifmedia_entry *cur; 1807 int speed = pi->link_cfg.speed; 1808 1809 cur = vi->media.ifm_cur; 1810 1811 ifmr->ifm_status = IFM_AVALID; 1812 if (!pi->link_cfg.link_ok) 1813 return; 1814 1815 ifmr->ifm_status |= IFM_ACTIVE; 1816 1817 /* active and current will differ iff current media is autoselect. */ 1818 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1819 return; 1820 1821 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1822 if (speed == 10000) 1823 ifmr->ifm_active |= IFM_10G_T; 1824 else if (speed == 1000) 1825 ifmr->ifm_active |= IFM_1000_T; 1826 else if (speed == 100) 1827 ifmr->ifm_active |= IFM_100_TX; 1828 else if (speed == 10) 1829 ifmr->ifm_active |= IFM_10_T; 1830 else 1831 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1832 speed)); 1833} 1834 1835static int 1836vcxgbe_probe(device_t dev) 1837{ 1838 char buf[128]; 1839 struct vi_info *vi = device_get_softc(dev); 1840 1841 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 1842 vi - vi->pi->vi); 1843 device_set_desc_copy(dev, buf); 1844 1845 return (BUS_PROBE_DEFAULT); 1846} 1847 1848static int 1849vcxgbe_attach(device_t dev) 1850{ 1851 struct vi_info *vi; 1852 struct port_info *pi; 1853 struct adapter *sc; 1854 int func, index, rc; 1855 u32 param, val; 1856 1857 vi = device_get_softc(dev); 1858 pi = vi->pi; 1859 sc = pi->adapter; 1860 1861 index = vi - pi->vi; 1862 KASSERT(index < nitems(vi_mac_funcs), 1863 ("%s: VI %s doesn't have a MAC func", __func__, 1864 device_get_nameunit(dev))); 1865 func = vi_mac_funcs[index]; 1866 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 1867 vi->hw_addr, &vi->rss_size, func, 0); 1868 if (rc < 0) { 1869 device_printf(dev, "Failed to allocate virtual interface " 1870 "for port %d: %d\n", pi->port_id, -rc); 1871 return (-rc); 1872 } 1873 vi->viid = rc; 1874 if (chip_id(sc) <= CHELSIO_T5) 1875 vi->smt_idx = (rc & 0x7f) << 1; 1876 else 1877 vi->smt_idx = (rc & 0x7f); 1878 1879 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1880 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 1881 V_FW_PARAMS_PARAM_YZ(vi->viid); 1882 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1883 if (rc) 1884 vi->rss_base = 0xffff; 1885 else { 1886 /* MPASS((val >> 16) == rss_size); */ 1887 vi->rss_base = val & 0xffff; 1888 } 1889 1890 rc = cxgbe_vi_attach(dev, vi); 1891 if (rc) { 1892 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1893 return (rc); 1894 } 1895 return (0); 1896} 1897 1898static int 1899vcxgbe_detach(device_t dev) 1900{ 1901 struct vi_info *vi; 1902 struct adapter *sc; 1903 1904 vi = device_get_softc(dev); 1905 sc = vi->pi->adapter; 1906 1907 doom_vi(sc, vi); 1908 1909 cxgbe_vi_detach(vi); 1910 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1911 1912 end_synchronized_op(sc, 0); 1913 1914 return (0); 1915} 1916 1917void 1918t4_fatal_err(struct adapter *sc) 1919{ 1920 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1921 t4_intr_disable(sc); 1922 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1923 device_get_nameunit(sc->dev)); 1924} 1925 1926void 1927t4_add_adapter(struct adapter *sc) 1928{ 1929 sx_xlock(&t4_list_lock); 1930 SLIST_INSERT_HEAD(&t4_list, sc, link); 1931 sx_xunlock(&t4_list_lock); 1932} 1933 1934int 1935t4_map_bars_0_and_4(struct adapter *sc) 1936{ 1937 sc->regs_rid = PCIR_BAR(0); 1938 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1939 &sc->regs_rid, RF_ACTIVE); 1940 if (sc->regs_res == NULL) { 1941 device_printf(sc->dev, "cannot map registers.\n"); 1942 return (ENXIO); 1943 } 1944 sc->bt = rman_get_bustag(sc->regs_res); 1945 sc->bh = rman_get_bushandle(sc->regs_res); 1946 sc->mmio_len = rman_get_size(sc->regs_res); 1947 setbit(&sc->doorbells, DOORBELL_KDB); 1948 1949 sc->msix_rid = PCIR_BAR(4); 1950 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1951 &sc->msix_rid, RF_ACTIVE); 1952 if (sc->msix_res == NULL) { 1953 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1954 return (ENXIO); 1955 } 1956 1957 return (0); 1958} 1959 1960int 1961t4_map_bar_2(struct adapter *sc) 1962{ 1963 1964 /* 1965 * T4: only iWARP driver uses the userspace doorbells. There is no need 1966 * to map it if RDMA is disabled. 1967 */ 1968 if (is_t4(sc) && sc->rdmacaps == 0) 1969 return (0); 1970 1971 sc->udbs_rid = PCIR_BAR(2); 1972 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1973 &sc->udbs_rid, RF_ACTIVE); 1974 if (sc->udbs_res == NULL) { 1975 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1976 return (ENXIO); 1977 } 1978 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1979 1980 if (chip_id(sc) >= CHELSIO_T5) { 1981 setbit(&sc->doorbells, DOORBELL_UDB); 1982#if defined(__i386__) || defined(__amd64__) 1983 if (t5_write_combine) { 1984 int rc, mode; 1985 1986 /* 1987 * Enable write combining on BAR2. This is the 1988 * userspace doorbell BAR and is split into 128B 1989 * (UDBS_SEG_SIZE) doorbell regions, each associated 1990 * with an egress queue. The first 64B has the doorbell 1991 * and the second 64B can be used to submit a tx work 1992 * request with an implicit doorbell. 1993 */ 1994 1995 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1996 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1997 if (rc == 0) { 1998 clrbit(&sc->doorbells, DOORBELL_UDB); 1999 setbit(&sc->doorbells, DOORBELL_WCWR); 2000 setbit(&sc->doorbells, DOORBELL_UDBWC); 2001 } else { 2002 device_printf(sc->dev, 2003 "couldn't enable write combining: %d\n", 2004 rc); 2005 } 2006 2007 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2008 t4_write_reg(sc, A_SGE_STAT_CFG, 2009 V_STATSOURCE_T5(7) | mode); 2010 } 2011#endif 2012 } 2013 2014 return (0); 2015} 2016 2017struct memwin_init { 2018 uint32_t base; 2019 uint32_t aperture; 2020}; 2021 2022static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2023 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2024 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2025 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2026}; 2027 2028static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2029 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2030 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2031 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2032}; 2033 2034static void 2035setup_memwin(struct adapter *sc) 2036{ 2037 const struct memwin_init *mw_init; 2038 struct memwin *mw; 2039 int i; 2040 uint32_t bar0; 2041 2042 if (is_t4(sc)) { 2043 /* 2044 * Read low 32b of bar0 indirectly via the hardware backdoor 2045 * mechanism. Works from within PCI passthrough environments 2046 * too, where rman_get_start() can return a different value. We 2047 * need to program the T4 memory window decoders with the actual 2048 * addresses that will be coming across the PCIe link. 2049 */ 2050 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2051 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2052 2053 mw_init = &t4_memwin[0]; 2054 } else { 2055 /* T5+ use the relative offset inside the PCIe BAR */ 2056 bar0 = 0; 2057 2058 mw_init = &t5_memwin[0]; 2059 } 2060 2061 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2062 rw_init(&mw->mw_lock, "memory window access"); 2063 mw->mw_base = mw_init->base; 2064 mw->mw_aperture = mw_init->aperture; 2065 mw->mw_curpos = 0; 2066 t4_write_reg(sc, 2067 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2068 (mw->mw_base + bar0) | V_BIR(0) | 2069 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2070 rw_wlock(&mw->mw_lock); 2071 position_memwin(sc, i, 0); 2072 rw_wunlock(&mw->mw_lock); 2073 } 2074 2075 /* flush */ 2076 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2077} 2078 2079/* 2080 * Positions the memory window at the given address in the card's address space. 2081 * There are some alignment requirements and the actual position may be at an 2082 * address prior to the requested address. mw->mw_curpos always has the actual 2083 * position of the window. 2084 */ 2085static void 2086position_memwin(struct adapter *sc, int idx, uint32_t addr) 2087{ 2088 struct memwin *mw; 2089 uint32_t pf; 2090 uint32_t reg; 2091 2092 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2093 mw = &sc->memwin[idx]; 2094 rw_assert(&mw->mw_lock, RA_WLOCKED); 2095 2096 if (is_t4(sc)) { 2097 pf = 0; 2098 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2099 } else { 2100 pf = V_PFNUM(sc->pf); 2101 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2102 } 2103 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2104 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2105 t4_read_reg(sc, reg); /* flush */ 2106} 2107 2108static int 2109rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2110 int len, int rw) 2111{ 2112 struct memwin *mw; 2113 uint32_t mw_end, v; 2114 2115 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2116 2117 /* Memory can only be accessed in naturally aligned 4 byte units */ 2118 if (addr & 3 || len & 3 || len <= 0) 2119 return (EINVAL); 2120 2121 mw = &sc->memwin[idx]; 2122 while (len > 0) { 2123 rw_rlock(&mw->mw_lock); 2124 mw_end = mw->mw_curpos + mw->mw_aperture; 2125 if (addr >= mw_end || addr < mw->mw_curpos) { 2126 /* Will need to reposition the window */ 2127 if (!rw_try_upgrade(&mw->mw_lock)) { 2128 rw_runlock(&mw->mw_lock); 2129 rw_wlock(&mw->mw_lock); 2130 } 2131 rw_assert(&mw->mw_lock, RA_WLOCKED); 2132 position_memwin(sc, idx, addr); 2133 rw_downgrade(&mw->mw_lock); 2134 mw_end = mw->mw_curpos + mw->mw_aperture; 2135 } 2136 rw_assert(&mw->mw_lock, RA_RLOCKED); 2137 while (addr < mw_end && len > 0) { 2138 if (rw == 0) { 2139 v = t4_read_reg(sc, mw->mw_base + addr - 2140 mw->mw_curpos); 2141 *val++ = le32toh(v); 2142 } else { 2143 v = *val++; 2144 t4_write_reg(sc, mw->mw_base + addr - 2145 mw->mw_curpos, htole32(v));; 2146 } 2147 addr += 4; 2148 len -= 4; 2149 } 2150 rw_runlock(&mw->mw_lock); 2151 } 2152 2153 return (0); 2154} 2155 2156static inline int 2157read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2158 int len) 2159{ 2160 2161 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2162} 2163 2164static inline int 2165write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2166 const uint32_t *val, int len) 2167{ 2168 2169 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2170} 2171 2172static int 2173t4_range_cmp(const void *a, const void *b) 2174{ 2175 return ((const struct t4_range *)a)->start - 2176 ((const struct t4_range *)b)->start; 2177} 2178 2179/* 2180 * Verify that the memory range specified by the addr/len pair is valid within 2181 * the card's address space. 2182 */ 2183static int 2184validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2185{ 2186 struct t4_range mem_ranges[4], *r, *next; 2187 uint32_t em, addr_len; 2188 int i, n, remaining; 2189 2190 /* Memory can only be accessed in naturally aligned 4 byte units */ 2191 if (addr & 3 || len & 3 || len <= 0) 2192 return (EINVAL); 2193 2194 /* Enabled memories */ 2195 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2196 2197 r = &mem_ranges[0]; 2198 n = 0; 2199 bzero(r, sizeof(mem_ranges)); 2200 if (em & F_EDRAM0_ENABLE) { 2201 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2202 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2203 if (r->size > 0) { 2204 r->start = G_EDRAM0_BASE(addr_len) << 20; 2205 if (addr >= r->start && 2206 addr + len <= r->start + r->size) 2207 return (0); 2208 r++; 2209 n++; 2210 } 2211 } 2212 if (em & F_EDRAM1_ENABLE) { 2213 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2214 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2215 if (r->size > 0) { 2216 r->start = G_EDRAM1_BASE(addr_len) << 20; 2217 if (addr >= r->start && 2218 addr + len <= r->start + r->size) 2219 return (0); 2220 r++; 2221 n++; 2222 } 2223 } 2224 if (em & F_EXT_MEM_ENABLE) { 2225 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2226 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2227 if (r->size > 0) { 2228 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2229 if (addr >= r->start && 2230 addr + len <= r->start + r->size) 2231 return (0); 2232 r++; 2233 n++; 2234 } 2235 } 2236 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2237 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2238 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2239 if (r->size > 0) { 2240 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2241 if (addr >= r->start && 2242 addr + len <= r->start + r->size) 2243 return (0); 2244 r++; 2245 n++; 2246 } 2247 } 2248 MPASS(n <= nitems(mem_ranges)); 2249 2250 if (n > 1) { 2251 /* Sort and merge the ranges. */ 2252 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2253 2254 /* Start from index 0 and examine the next n - 1 entries. */ 2255 r = &mem_ranges[0]; 2256 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2257 2258 MPASS(r->size > 0); /* r is a valid entry. */ 2259 next = r + 1; 2260 MPASS(next->size > 0); /* and so is the next one. */ 2261 2262 while (r->start + r->size >= next->start) { 2263 /* Merge the next one into the current entry. */ 2264 r->size = max(r->start + r->size, 2265 next->start + next->size) - r->start; 2266 n--; /* One fewer entry in total. */ 2267 if (--remaining == 0) 2268 goto done; /* short circuit */ 2269 next++; 2270 } 2271 if (next != r + 1) { 2272 /* 2273 * Some entries were merged into r and next 2274 * points to the first valid entry that couldn't 2275 * be merged. 2276 */ 2277 MPASS(next->size > 0); /* must be valid */ 2278 memcpy(r + 1, next, remaining * sizeof(*r)); 2279#ifdef INVARIANTS 2280 /* 2281 * This so that the foo->size assertion in the 2282 * next iteration of the loop do the right 2283 * thing for entries that were pulled up and are 2284 * no longer valid. 2285 */ 2286 MPASS(n < nitems(mem_ranges)); 2287 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2288 sizeof(struct t4_range)); 2289#endif 2290 } 2291 } 2292done: 2293 /* Done merging the ranges. */ 2294 MPASS(n > 0); 2295 r = &mem_ranges[0]; 2296 for (i = 0; i < n; i++, r++) { 2297 if (addr >= r->start && 2298 addr + len <= r->start + r->size) 2299 return (0); 2300 } 2301 } 2302 2303 return (EFAULT); 2304} 2305 2306static int 2307fwmtype_to_hwmtype(int mtype) 2308{ 2309 2310 switch (mtype) { 2311 case FW_MEMTYPE_EDC0: 2312 return (MEM_EDC0); 2313 case FW_MEMTYPE_EDC1: 2314 return (MEM_EDC1); 2315 case FW_MEMTYPE_EXTMEM: 2316 return (MEM_MC0); 2317 case FW_MEMTYPE_EXTMEM1: 2318 return (MEM_MC1); 2319 default: 2320 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2321 } 2322} 2323 2324/* 2325 * Verify that the memory range specified by the memtype/offset/len pair is 2326 * valid and lies entirely within the memtype specified. The global address of 2327 * the start of the range is returned in addr. 2328 */ 2329static int 2330validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2331 uint32_t *addr) 2332{ 2333 uint32_t em, addr_len, maddr; 2334 2335 /* Memory can only be accessed in naturally aligned 4 byte units */ 2336 if (off & 3 || len & 3 || len == 0) 2337 return (EINVAL); 2338 2339 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2340 switch (fwmtype_to_hwmtype(mtype)) { 2341 case MEM_EDC0: 2342 if (!(em & F_EDRAM0_ENABLE)) 2343 return (EINVAL); 2344 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2345 maddr = G_EDRAM0_BASE(addr_len) << 20; 2346 break; 2347 case MEM_EDC1: 2348 if (!(em & F_EDRAM1_ENABLE)) 2349 return (EINVAL); 2350 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2351 maddr = G_EDRAM1_BASE(addr_len) << 20; 2352 break; 2353 case MEM_MC: 2354 if (!(em & F_EXT_MEM_ENABLE)) 2355 return (EINVAL); 2356 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2357 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2358 break; 2359 case MEM_MC1: 2360 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2361 return (EINVAL); 2362 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2363 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2364 break; 2365 default: 2366 return (EINVAL); 2367 } 2368 2369 *addr = maddr + off; /* global address */ 2370 return (validate_mem_range(sc, *addr, len)); 2371} 2372 2373static int 2374fixup_devlog_params(struct adapter *sc) 2375{ 2376 struct devlog_params *dparams = &sc->params.devlog; 2377 int rc; 2378 2379 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2380 dparams->size, &dparams->addr); 2381 2382 return (rc); 2383} 2384 2385static int 2386cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2387 struct intrs_and_queues *iaq) 2388{ 2389 int rc, itype, navail, nrxq10g, nrxq1g, n; 2390 int nofldrxq10g = 0, nofldrxq1g = 0; 2391 2392 bzero(iaq, sizeof(*iaq)); 2393 2394 iaq->ntxq10g = t4_ntxq10g; 2395 iaq->ntxq1g = t4_ntxq1g; 2396 iaq->ntxq_vi = t4_ntxq_vi; 2397 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2398 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2399 iaq->nrxq_vi = t4_nrxq_vi; 2400 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2401#ifdef TCP_OFFLOAD 2402 if (is_offload(sc)) { 2403 iaq->nofldtxq10g = t4_nofldtxq10g; 2404 iaq->nofldtxq1g = t4_nofldtxq1g; 2405 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2406 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2407 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2408 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2409 } 2410#endif 2411#ifdef DEV_NETMAP 2412 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2413 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2414#endif 2415 2416 for (itype = INTR_MSIX; itype; itype >>= 1) { 2417 2418 if ((itype & t4_intr_types) == 0) 2419 continue; /* not allowed */ 2420 2421 if (itype == INTR_MSIX) 2422 navail = pci_msix_count(sc->dev); 2423 else if (itype == INTR_MSI) 2424 navail = pci_msi_count(sc->dev); 2425 else 2426 navail = 1; 2427restart: 2428 if (navail == 0) 2429 continue; 2430 2431 iaq->intr_type = itype; 2432 iaq->intr_flags_10g = 0; 2433 iaq->intr_flags_1g = 0; 2434 2435 /* 2436 * Best option: an interrupt vector for errors, one for the 2437 * firmware event queue, and one for every rxq (NIC and TOE) of 2438 * every VI. The VIs that support netmap use the same 2439 * interrupts for the NIC rx queues and the netmap rx queues 2440 * because only one set of queues is active at a time. 2441 */ 2442 iaq->nirq = T4_EXTRA_INTR; 2443 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2444 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2445 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2446 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2447 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2448 if (iaq->nirq <= navail && 2449 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2450 iaq->intr_flags_10g = INTR_ALL; 2451 iaq->intr_flags_1g = INTR_ALL; 2452 goto allocate; 2453 } 2454 2455 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2456 if (num_vis > 1) { 2457 device_printf(sc->dev, "virtual interfaces disabled " 2458 "because num_vis=%u with current settings " 2459 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2460 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2461 "nnmrxq_vi=%u) would need %u interrupts but " 2462 "only %u are available.\n", num_vis, nrxq10g, 2463 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2464 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2465 navail); 2466 num_vis = 1; 2467 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2468 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2469 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2470 goto restart; 2471 } 2472 2473 /* 2474 * Second best option: a vector for errors, one for the firmware 2475 * event queue, and vectors for either all the NIC rx queues or 2476 * all the TOE rx queues. The queues that don't get vectors 2477 * will forward their interrupts to those that do. 2478 */ 2479 iaq->nirq = T4_EXTRA_INTR; 2480 if (nrxq10g >= nofldrxq10g) { 2481 iaq->intr_flags_10g = INTR_RXQ; 2482 iaq->nirq += n10g * nrxq10g; 2483 } else { 2484 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2485 iaq->nirq += n10g * nofldrxq10g; 2486 } 2487 if (nrxq1g >= nofldrxq1g) { 2488 iaq->intr_flags_1g = INTR_RXQ; 2489 iaq->nirq += n1g * nrxq1g; 2490 } else { 2491 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2492 iaq->nirq += n1g * nofldrxq1g; 2493 } 2494 if (iaq->nirq <= navail && 2495 (itype != INTR_MSI || powerof2(iaq->nirq))) 2496 goto allocate; 2497 2498 /* 2499 * Next best option: an interrupt vector for errors, one for the 2500 * firmware event queue, and at least one per main-VI. At this 2501 * point we know we'll have to downsize nrxq and/or nofldrxq to 2502 * fit what's available to us. 2503 */ 2504 iaq->nirq = T4_EXTRA_INTR; 2505 iaq->nirq += n10g + n1g; 2506 if (iaq->nirq <= navail) { 2507 int leftover = navail - iaq->nirq; 2508 2509 if (n10g > 0) { 2510 int target = max(nrxq10g, nofldrxq10g); 2511 2512 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2513 INTR_RXQ : INTR_OFLD_RXQ; 2514 2515 n = 1; 2516 while (n < target && leftover >= n10g) { 2517 leftover -= n10g; 2518 iaq->nirq += n10g; 2519 n++; 2520 } 2521 iaq->nrxq10g = min(n, nrxq10g); 2522#ifdef TCP_OFFLOAD 2523 iaq->nofldrxq10g = min(n, nofldrxq10g); 2524#endif 2525 } 2526 2527 if (n1g > 0) { 2528 int target = max(nrxq1g, nofldrxq1g); 2529 2530 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2531 INTR_RXQ : INTR_OFLD_RXQ; 2532 2533 n = 1; 2534 while (n < target && leftover >= n1g) { 2535 leftover -= n1g; 2536 iaq->nirq += n1g; 2537 n++; 2538 } 2539 iaq->nrxq1g = min(n, nrxq1g); 2540#ifdef TCP_OFFLOAD 2541 iaq->nofldrxq1g = min(n, nofldrxq1g); 2542#endif 2543 } 2544 2545 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2546 goto allocate; 2547 } 2548 2549 /* 2550 * Least desirable option: one interrupt vector for everything. 2551 */ 2552 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2553 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2554#ifdef TCP_OFFLOAD 2555 if (is_offload(sc)) 2556 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2557#endif 2558allocate: 2559 navail = iaq->nirq; 2560 rc = 0; 2561 if (itype == INTR_MSIX) 2562 rc = pci_alloc_msix(sc->dev, &navail); 2563 else if (itype == INTR_MSI) 2564 rc = pci_alloc_msi(sc->dev, &navail); 2565 2566 if (rc == 0) { 2567 if (navail == iaq->nirq) 2568 return (0); 2569 2570 /* 2571 * Didn't get the number requested. Use whatever number 2572 * the kernel is willing to allocate (it's in navail). 2573 */ 2574 device_printf(sc->dev, "fewer vectors than requested, " 2575 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2576 itype, iaq->nirq, navail); 2577 pci_release_msi(sc->dev); 2578 goto restart; 2579 } 2580 2581 device_printf(sc->dev, 2582 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2583 itype, rc, iaq->nirq, navail); 2584 } 2585 2586 device_printf(sc->dev, 2587 "failed to find a usable interrupt type. " 2588 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2589 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2590 2591 return (ENXIO); 2592} 2593 2594#define FW_VERSION(chip) ( \ 2595 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2596 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2597 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2598 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2599#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2600 2601struct fw_info { 2602 uint8_t chip; 2603 char *kld_name; 2604 char *fw_mod_name; 2605 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2606} fw_info[] = { 2607 { 2608 .chip = CHELSIO_T4, 2609 .kld_name = "t4fw_cfg", 2610 .fw_mod_name = "t4fw", 2611 .fw_hdr = { 2612 .chip = FW_HDR_CHIP_T4, 2613 .fw_ver = htobe32_const(FW_VERSION(T4)), 2614 .intfver_nic = FW_INTFVER(T4, NIC), 2615 .intfver_vnic = FW_INTFVER(T4, VNIC), 2616 .intfver_ofld = FW_INTFVER(T4, OFLD), 2617 .intfver_ri = FW_INTFVER(T4, RI), 2618 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2619 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2620 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2621 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2622 }, 2623 }, { 2624 .chip = CHELSIO_T5, 2625 .kld_name = "t5fw_cfg", 2626 .fw_mod_name = "t5fw", 2627 .fw_hdr = { 2628 .chip = FW_HDR_CHIP_T5, 2629 .fw_ver = htobe32_const(FW_VERSION(T5)), 2630 .intfver_nic = FW_INTFVER(T5, NIC), 2631 .intfver_vnic = FW_INTFVER(T5, VNIC), 2632 .intfver_ofld = FW_INTFVER(T5, OFLD), 2633 .intfver_ri = FW_INTFVER(T5, RI), 2634 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2635 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2636 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2637 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2638 }, 2639 }, { 2640 .chip = CHELSIO_T6, 2641 .kld_name = "t6fw_cfg", 2642 .fw_mod_name = "t6fw", 2643 .fw_hdr = { 2644 .chip = FW_HDR_CHIP_T6, 2645 .fw_ver = htobe32_const(FW_VERSION(T6)), 2646 .intfver_nic = FW_INTFVER(T6, NIC), 2647 .intfver_vnic = FW_INTFVER(T6, VNIC), 2648 .intfver_ofld = FW_INTFVER(T6, OFLD), 2649 .intfver_ri = FW_INTFVER(T6, RI), 2650 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 2651 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2652 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 2653 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2654 }, 2655 } 2656}; 2657 2658static struct fw_info * 2659find_fw_info(int chip) 2660{ 2661 int i; 2662 2663 for (i = 0; i < nitems(fw_info); i++) { 2664 if (fw_info[i].chip == chip) 2665 return (&fw_info[i]); 2666 } 2667 return (NULL); 2668} 2669 2670/* 2671 * Is the given firmware API compatible with the one the driver was compiled 2672 * with? 2673 */ 2674static int 2675fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2676{ 2677 2678 /* short circuit if it's the exact same firmware version */ 2679 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2680 return (1); 2681 2682 /* 2683 * XXX: Is this too conservative? Perhaps I should limit this to the 2684 * features that are supported in the driver. 2685 */ 2686#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2687 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2688 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2689 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2690 return (1); 2691#undef SAME_INTF 2692 2693 return (0); 2694} 2695 2696/* 2697 * The firmware in the KLD is usable, but should it be installed? This routine 2698 * explains itself in detail if it indicates the KLD firmware should be 2699 * installed. 2700 */ 2701static int 2702should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2703{ 2704 const char *reason; 2705 2706 if (!card_fw_usable) { 2707 reason = "incompatible or unusable"; 2708 goto install; 2709 } 2710 2711 if (k > c) { 2712 reason = "older than the version bundled with this driver"; 2713 goto install; 2714 } 2715 2716 if (t4_fw_install == 2 && k != c) { 2717 reason = "different than the version bundled with this driver"; 2718 goto install; 2719 } 2720 2721 return (0); 2722 2723install: 2724 if (t4_fw_install == 0) { 2725 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2726 "but the driver is prohibited from installing a different " 2727 "firmware on the card.\n", 2728 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2729 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2730 2731 return (0); 2732 } 2733 2734 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2735 "installing firmware %u.%u.%u.%u on card.\n", 2736 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2737 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2738 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2739 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2740 2741 return (1); 2742} 2743/* 2744 * Establish contact with the firmware and determine if we are the master driver 2745 * or not, and whether we are responsible for chip initialization. 2746 */ 2747static int 2748prep_firmware(struct adapter *sc) 2749{ 2750 const struct firmware *fw = NULL, *default_cfg; 2751 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2752 enum dev_state state; 2753 struct fw_info *fw_info; 2754 struct fw_hdr *card_fw; /* fw on the card */ 2755 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2756 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2757 against */ 2758 2759 /* Contact firmware. */ 2760 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2761 if (rc < 0 || state == DEV_STATE_ERR) { 2762 rc = -rc; 2763 device_printf(sc->dev, 2764 "failed to connect to the firmware: %d, %d.\n", rc, state); 2765 return (rc); 2766 } 2767 pf = rc; 2768 if (pf == sc->mbox) 2769 sc->flags |= MASTER_PF; 2770 else if (state == DEV_STATE_UNINIT) { 2771 /* 2772 * We didn't get to be the master so we definitely won't be 2773 * configuring the chip. It's a bug if someone else hasn't 2774 * configured it already. 2775 */ 2776 device_printf(sc->dev, "couldn't be master(%d), " 2777 "device not already initialized either(%d).\n", rc, state); 2778 return (EDOOFUS); 2779 } 2780 2781 /* This is the firmware whose headers the driver was compiled against */ 2782 fw_info = find_fw_info(chip_id(sc)); 2783 if (fw_info == NULL) { 2784 device_printf(sc->dev, 2785 "unable to look up firmware information for chip %d.\n", 2786 chip_id(sc)); 2787 return (EINVAL); 2788 } 2789 drv_fw = &fw_info->fw_hdr; 2790 2791 /* 2792 * The firmware KLD contains many modules. The KLD name is also the 2793 * name of the module that contains the default config file. 2794 */ 2795 default_cfg = firmware_get(fw_info->kld_name); 2796 2797 /* Read the header of the firmware on the card */ 2798 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2799 rc = -t4_read_flash(sc, FLASH_FW_START, 2800 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2801 if (rc == 0) 2802 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2803 else { 2804 device_printf(sc->dev, 2805 "Unable to read card's firmware header: %d\n", rc); 2806 card_fw_usable = 0; 2807 } 2808 2809 /* This is the firmware in the KLD */ 2810 fw = firmware_get(fw_info->fw_mod_name); 2811 if (fw != NULL) { 2812 kld_fw = (const void *)fw->data; 2813 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2814 } else { 2815 kld_fw = NULL; 2816 kld_fw_usable = 0; 2817 } 2818 2819 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2820 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2821 /* 2822 * Common case: the firmware on the card is an exact match and 2823 * the KLD is an exact match too, or the KLD is 2824 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2825 * here -- use cxgbetool loadfw if you want to reinstall the 2826 * same firmware as the one on the card. 2827 */ 2828 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2829 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2830 be32toh(card_fw->fw_ver))) { 2831 2832 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2833 if (rc != 0) { 2834 device_printf(sc->dev, 2835 "failed to install firmware: %d\n", rc); 2836 goto done; 2837 } 2838 2839 /* Installed successfully, update the cached header too. */ 2840 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2841 card_fw_usable = 1; 2842 need_fw_reset = 0; /* already reset as part of load_fw */ 2843 } 2844 2845 if (!card_fw_usable) { 2846 uint32_t d, c, k; 2847 2848 d = ntohl(drv_fw->fw_ver); 2849 c = ntohl(card_fw->fw_ver); 2850 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2851 2852 device_printf(sc->dev, "Cannot find a usable firmware: " 2853 "fw_install %d, chip state %d, " 2854 "driver compiled with %d.%d.%d.%d, " 2855 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2856 t4_fw_install, state, 2857 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2858 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2859 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2860 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2861 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2862 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2863 rc = EINVAL; 2864 goto done; 2865 } 2866 2867 /* Reset device */ 2868 if (need_fw_reset && 2869 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2870 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2871 if (rc != ETIMEDOUT && rc != EIO) 2872 t4_fw_bye(sc, sc->mbox); 2873 goto done; 2874 } 2875 sc->flags |= FW_OK; 2876 2877 rc = get_params__pre_init(sc); 2878 if (rc != 0) 2879 goto done; /* error message displayed already */ 2880 2881 /* Partition adapter resources as specified in the config file. */ 2882 if (state == DEV_STATE_UNINIT) { 2883 2884 KASSERT(sc->flags & MASTER_PF, 2885 ("%s: trying to change chip settings when not master.", 2886 __func__)); 2887 2888 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2889 if (rc != 0) 2890 goto done; /* error message displayed already */ 2891 2892 t4_tweak_chip_settings(sc); 2893 2894 /* get basic stuff going */ 2895 rc = -t4_fw_initialize(sc, sc->mbox); 2896 if (rc != 0) { 2897 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2898 goto done; 2899 } 2900 } else { 2901 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2902 sc->cfcsum = 0; 2903 } 2904 2905done: 2906 free(card_fw, M_CXGBE); 2907 if (fw != NULL) 2908 firmware_put(fw, FIRMWARE_UNLOAD); 2909 if (default_cfg != NULL) 2910 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2911 2912 return (rc); 2913} 2914 2915#define FW_PARAM_DEV(param) \ 2916 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2917 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2918#define FW_PARAM_PFVF(param) \ 2919 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2920 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2921 2922/* 2923 * Partition chip resources for use between various PFs, VFs, etc. 2924 */ 2925static int 2926partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2927 const char *name_prefix) 2928{ 2929 const struct firmware *cfg = NULL; 2930 int rc = 0; 2931 struct fw_caps_config_cmd caps; 2932 uint32_t mtype, moff, finicsum, cfcsum; 2933 2934 /* 2935 * Figure out what configuration file to use. Pick the default config 2936 * file for the card if the user hasn't specified one explicitly. 2937 */ 2938 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2939 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2940 /* Card specific overrides go here. */ 2941 if (pci_get_device(sc->dev) == 0x440a) 2942 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2943 if (is_fpga(sc)) 2944 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2945 } 2946 2947 /* 2948 * We need to load another module if the profile is anything except 2949 * "default" or "flash". 2950 */ 2951 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2952 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2953 char s[32]; 2954 2955 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2956 cfg = firmware_get(s); 2957 if (cfg == NULL) { 2958 if (default_cfg != NULL) { 2959 device_printf(sc->dev, 2960 "unable to load module \"%s\" for " 2961 "configuration profile \"%s\", will use " 2962 "the default config file instead.\n", 2963 s, sc->cfg_file); 2964 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2965 "%s", DEFAULT_CF); 2966 } else { 2967 device_printf(sc->dev, 2968 "unable to load module \"%s\" for " 2969 "configuration profile \"%s\", will use " 2970 "the config file on the card's flash " 2971 "instead.\n", s, sc->cfg_file); 2972 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2973 "%s", FLASH_CF); 2974 } 2975 } 2976 } 2977 2978 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2979 default_cfg == NULL) { 2980 device_printf(sc->dev, 2981 "default config file not available, will use the config " 2982 "file on the card's flash instead.\n"); 2983 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2984 } 2985 2986 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2987 u_int cflen; 2988 const uint32_t *cfdata; 2989 uint32_t param, val, addr; 2990 2991 KASSERT(cfg != NULL || default_cfg != NULL, 2992 ("%s: no config to upload", __func__)); 2993 2994 /* 2995 * Ask the firmware where it wants us to upload the config file. 2996 */ 2997 param = FW_PARAM_DEV(CF); 2998 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2999 if (rc != 0) { 3000 /* No support for config file? Shouldn't happen. */ 3001 device_printf(sc->dev, 3002 "failed to query config file location: %d.\n", rc); 3003 goto done; 3004 } 3005 mtype = G_FW_PARAMS_PARAM_Y(val); 3006 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3007 3008 /* 3009 * XXX: sheer laziness. We deliberately added 4 bytes of 3010 * useless stuffing/comments at the end of the config file so 3011 * it's ok to simply throw away the last remaining bytes when 3012 * the config file is not an exact multiple of 4. This also 3013 * helps with the validate_mt_off_len check. 3014 */ 3015 if (cfg != NULL) { 3016 cflen = cfg->datasize & ~3; 3017 cfdata = cfg->data; 3018 } else { 3019 cflen = default_cfg->datasize & ~3; 3020 cfdata = default_cfg->data; 3021 } 3022 3023 if (cflen > FLASH_CFG_MAX_SIZE) { 3024 device_printf(sc->dev, 3025 "config file too long (%d, max allowed is %d). " 3026 "Will try to use the config on the card, if any.\n", 3027 cflen, FLASH_CFG_MAX_SIZE); 3028 goto use_config_on_flash; 3029 } 3030 3031 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3032 if (rc != 0) { 3033 device_printf(sc->dev, 3034 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3035 "Will try to use the config on the card, if any.\n", 3036 __func__, mtype, moff, cflen, rc); 3037 goto use_config_on_flash; 3038 } 3039 write_via_memwin(sc, 2, addr, cfdata, cflen); 3040 } else { 3041use_config_on_flash: 3042 mtype = FW_MEMTYPE_FLASH; 3043 moff = t4_flash_cfg_addr(sc); 3044 } 3045 3046 bzero(&caps, sizeof(caps)); 3047 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3048 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3049 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3050 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3051 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3052 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3053 if (rc != 0) { 3054 device_printf(sc->dev, 3055 "failed to pre-process config file: %d " 3056 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3057 goto done; 3058 } 3059 3060 finicsum = be32toh(caps.finicsum); 3061 cfcsum = be32toh(caps.cfcsum); 3062 if (finicsum != cfcsum) { 3063 device_printf(sc->dev, 3064 "WARNING: config file checksum mismatch: %08x %08x\n", 3065 finicsum, cfcsum); 3066 } 3067 sc->cfcsum = cfcsum; 3068 3069#define LIMIT_CAPS(x) do { \ 3070 caps.x &= htobe16(t4_##x##_allowed); \ 3071} while (0) 3072 3073 /* 3074 * Let the firmware know what features will (not) be used so it can tune 3075 * things accordingly. 3076 */ 3077 LIMIT_CAPS(nbmcaps); 3078 LIMIT_CAPS(linkcaps); 3079 LIMIT_CAPS(switchcaps); 3080 LIMIT_CAPS(niccaps); 3081 LIMIT_CAPS(toecaps); 3082 LIMIT_CAPS(rdmacaps); 3083 LIMIT_CAPS(cryptocaps); 3084 LIMIT_CAPS(iscsicaps); 3085 LIMIT_CAPS(fcoecaps); 3086#undef LIMIT_CAPS 3087 3088 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3089 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3090 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3091 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3092 if (rc != 0) { 3093 device_printf(sc->dev, 3094 "failed to process config file: %d.\n", rc); 3095 } 3096done: 3097 if (cfg != NULL) 3098 firmware_put(cfg, FIRMWARE_UNLOAD); 3099 return (rc); 3100} 3101 3102/* 3103 * Retrieve parameters that are needed (or nice to have) very early. 3104 */ 3105static int 3106get_params__pre_init(struct adapter *sc) 3107{ 3108 int rc; 3109 uint32_t param[2], val[2]; 3110 3111 t4_get_version_info(sc); 3112 3113 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 3114 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3115 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3116 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3117 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 3118 3119 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 3120 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 3121 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 3122 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 3123 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 3124 3125 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 3126 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 3127 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 3128 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 3129 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 3130 3131 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 3132 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 3133 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 3134 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 3135 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 3136 3137 param[0] = FW_PARAM_DEV(PORTVEC); 3138 param[1] = FW_PARAM_DEV(CCLK); 3139 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3140 if (rc != 0) { 3141 device_printf(sc->dev, 3142 "failed to query parameters (pre_init): %d.\n", rc); 3143 return (rc); 3144 } 3145 3146 sc->params.portvec = val[0]; 3147 sc->params.nports = bitcount32(val[0]); 3148 sc->params.vpd.cclk = val[1]; 3149 3150 /* Read device log parameters. */ 3151 rc = -t4_init_devlog_params(sc, 1); 3152 if (rc == 0) 3153 fixup_devlog_params(sc); 3154 else { 3155 device_printf(sc->dev, 3156 "failed to get devlog parameters: %d.\n", rc); 3157 rc = 0; /* devlog isn't critical for device operation */ 3158 } 3159 3160 return (rc); 3161} 3162 3163/* 3164 * Retrieve various parameters that are of interest to the driver. The device 3165 * has been initialized by the firmware at this point. 3166 */ 3167static int 3168get_params__post_init(struct adapter *sc) 3169{ 3170 int rc; 3171 uint32_t param[7], val[7]; 3172 struct fw_caps_config_cmd caps; 3173 3174 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3175 param[1] = FW_PARAM_PFVF(EQ_START); 3176 param[2] = FW_PARAM_PFVF(FILTER_START); 3177 param[3] = FW_PARAM_PFVF(FILTER_END); 3178 param[4] = FW_PARAM_PFVF(L2T_START); 3179 param[5] = FW_PARAM_PFVF(L2T_END); 3180 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3181 if (rc != 0) { 3182 device_printf(sc->dev, 3183 "failed to query parameters (post_init): %d.\n", rc); 3184 return (rc); 3185 } 3186 3187 sc->sge.iq_start = val[0]; 3188 sc->sge.eq_start = val[1]; 3189 sc->tids.ftid_base = val[2]; 3190 sc->tids.nftids = val[3] - val[2] + 1; 3191 sc->params.ftid_min = val[2]; 3192 sc->params.ftid_max = val[3]; 3193 sc->vres.l2t.start = val[4]; 3194 sc->vres.l2t.size = val[5] - val[4] + 1; 3195 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3196 ("%s: L2 table size (%u) larger than expected (%u)", 3197 __func__, sc->vres.l2t.size, L2T_SIZE)); 3198 3199 /* get capabilites */ 3200 bzero(&caps, sizeof(caps)); 3201 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3202 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3203 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3204 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3205 if (rc != 0) { 3206 device_printf(sc->dev, 3207 "failed to get card capabilities: %d.\n", rc); 3208 return (rc); 3209 } 3210 3211#define READ_CAPS(x) do { \ 3212 sc->x = htobe16(caps.x); \ 3213} while (0) 3214 READ_CAPS(nbmcaps); 3215 READ_CAPS(linkcaps); 3216 READ_CAPS(switchcaps); 3217 READ_CAPS(niccaps); 3218 READ_CAPS(toecaps); 3219 READ_CAPS(rdmacaps); 3220 READ_CAPS(cryptocaps); 3221 READ_CAPS(iscsicaps); 3222 READ_CAPS(fcoecaps); 3223 3224 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3225 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3226 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3227 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3228 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3229 if (rc != 0) { 3230 device_printf(sc->dev, 3231 "failed to query NIC parameters: %d.\n", rc); 3232 return (rc); 3233 } 3234 sc->tids.etid_base = val[0]; 3235 sc->params.etid_min = val[0]; 3236 sc->tids.netids = val[1] - val[0] + 1; 3237 sc->params.netids = sc->tids.netids; 3238 sc->params.eo_wr_cred = val[2]; 3239 sc->params.ethoffload = 1; 3240 } 3241 3242 if (sc->toecaps) { 3243 /* query offload-related parameters */ 3244 param[0] = FW_PARAM_DEV(NTID); 3245 param[1] = FW_PARAM_PFVF(SERVER_START); 3246 param[2] = FW_PARAM_PFVF(SERVER_END); 3247 param[3] = FW_PARAM_PFVF(TDDP_START); 3248 param[4] = FW_PARAM_PFVF(TDDP_END); 3249 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3250 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3251 if (rc != 0) { 3252 device_printf(sc->dev, 3253 "failed to query TOE parameters: %d.\n", rc); 3254 return (rc); 3255 } 3256 sc->tids.ntids = val[0]; 3257 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3258 sc->tids.stid_base = val[1]; 3259 sc->tids.nstids = val[2] - val[1] + 1; 3260 sc->vres.ddp.start = val[3]; 3261 sc->vres.ddp.size = val[4] - val[3] + 1; 3262 sc->params.ofldq_wr_cred = val[5]; 3263 sc->params.offload = 1; 3264 } 3265 if (sc->rdmacaps) { 3266 param[0] = FW_PARAM_PFVF(STAG_START); 3267 param[1] = FW_PARAM_PFVF(STAG_END); 3268 param[2] = FW_PARAM_PFVF(RQ_START); 3269 param[3] = FW_PARAM_PFVF(RQ_END); 3270 param[4] = FW_PARAM_PFVF(PBL_START); 3271 param[5] = FW_PARAM_PFVF(PBL_END); 3272 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3273 if (rc != 0) { 3274 device_printf(sc->dev, 3275 "failed to query RDMA parameters(1): %d.\n", rc); 3276 return (rc); 3277 } 3278 sc->vres.stag.start = val[0]; 3279 sc->vres.stag.size = val[1] - val[0] + 1; 3280 sc->vres.rq.start = val[2]; 3281 sc->vres.rq.size = val[3] - val[2] + 1; 3282 sc->vres.pbl.start = val[4]; 3283 sc->vres.pbl.size = val[5] - val[4] + 1; 3284 3285 param[0] = FW_PARAM_PFVF(SQRQ_START); 3286 param[1] = FW_PARAM_PFVF(SQRQ_END); 3287 param[2] = FW_PARAM_PFVF(CQ_START); 3288 param[3] = FW_PARAM_PFVF(CQ_END); 3289 param[4] = FW_PARAM_PFVF(OCQ_START); 3290 param[5] = FW_PARAM_PFVF(OCQ_END); 3291 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3292 if (rc != 0) { 3293 device_printf(sc->dev, 3294 "failed to query RDMA parameters(2): %d.\n", rc); 3295 return (rc); 3296 } 3297 sc->vres.qp.start = val[0]; 3298 sc->vres.qp.size = val[1] - val[0] + 1; 3299 sc->vres.cq.start = val[2]; 3300 sc->vres.cq.size = val[3] - val[2] + 1; 3301 sc->vres.ocq.start = val[4]; 3302 sc->vres.ocq.size = val[5] - val[4] + 1; 3303 } 3304 if (sc->iscsicaps) { 3305 param[0] = FW_PARAM_PFVF(ISCSI_START); 3306 param[1] = FW_PARAM_PFVF(ISCSI_END); 3307 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3308 if (rc != 0) { 3309 device_printf(sc->dev, 3310 "failed to query iSCSI parameters: %d.\n", rc); 3311 return (rc); 3312 } 3313 sc->vres.iscsi.start = val[0]; 3314 sc->vres.iscsi.size = val[1] - val[0] + 1; 3315 } 3316 3317 t4_init_sge_params(sc); 3318 3319 /* 3320 * We've got the params we wanted to query via the firmware. Now grab 3321 * some others directly from the chip. 3322 */ 3323 rc = t4_read_chip_settings(sc); 3324 3325 return (rc); 3326} 3327 3328static int 3329set_params__post_init(struct adapter *sc) 3330{ 3331 uint32_t param, val; 3332 3333 /* ask for encapsulated CPLs */ 3334 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3335 val = 1; 3336 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3337 3338 return (0); 3339} 3340 3341#undef FW_PARAM_PFVF 3342#undef FW_PARAM_DEV 3343 3344static void 3345t4_set_desc(struct adapter *sc) 3346{ 3347 char buf[128]; 3348 struct adapter_params *p = &sc->params; 3349 3350 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3351 3352 device_set_desc_copy(sc->dev, buf); 3353} 3354 3355static void 3356build_medialist(struct port_info *pi, struct ifmedia *media) 3357{ 3358 int m; 3359 3360 PORT_LOCK(pi); 3361 3362 ifmedia_removeall(media); 3363 3364 m = IFM_ETHER | IFM_FDX; 3365 3366 switch(pi->port_type) { 3367 case FW_PORT_TYPE_BT_XFI: 3368 case FW_PORT_TYPE_BT_XAUI: 3369 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3370 /* fall through */ 3371 3372 case FW_PORT_TYPE_BT_SGMII: 3373 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3374 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3375 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3376 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3377 break; 3378 3379 case FW_PORT_TYPE_CX4: 3380 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3381 ifmedia_set(media, m | IFM_10G_CX4); 3382 break; 3383 3384 case FW_PORT_TYPE_QSFP_10G: 3385 case FW_PORT_TYPE_SFP: 3386 case FW_PORT_TYPE_FIBER_XFI: 3387 case FW_PORT_TYPE_FIBER_XAUI: 3388 switch (pi->mod_type) { 3389 3390 case FW_PORT_MOD_TYPE_LR: 3391 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3392 ifmedia_set(media, m | IFM_10G_LR); 3393 break; 3394 3395 case FW_PORT_MOD_TYPE_SR: 3396 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3397 ifmedia_set(media, m | IFM_10G_SR); 3398 break; 3399 3400 case FW_PORT_MOD_TYPE_LRM: 3401 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3402 ifmedia_set(media, m | IFM_10G_LRM); 3403 break; 3404 3405 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3406 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3407 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3408 ifmedia_set(media, m | IFM_10G_TWINAX); 3409 break; 3410 3411 case FW_PORT_MOD_TYPE_NONE: 3412 m &= ~IFM_FDX; 3413 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3414 ifmedia_set(media, m | IFM_NONE); 3415 break; 3416 3417 case FW_PORT_MOD_TYPE_NA: 3418 case FW_PORT_MOD_TYPE_ER: 3419 default: 3420 device_printf(pi->dev, 3421 "unknown port_type (%d), mod_type (%d)\n", 3422 pi->port_type, pi->mod_type); 3423 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3424 ifmedia_set(media, m | IFM_UNKNOWN); 3425 break; 3426 } 3427 break; 3428 3429 case FW_PORT_TYPE_CR_QSFP: 3430 case FW_PORT_TYPE_SFP28: 3431 case FW_PORT_TYPE_KR_SFP28: 3432 switch (pi->mod_type) { 3433 3434 case FW_PORT_MOD_TYPE_SR: 3435 ifmedia_add(media, m | IFM_25G_SR, 0, NULL); 3436 ifmedia_set(media, m | IFM_25G_SR); 3437 break; 3438 3439 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3440 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3441 ifmedia_add(media, m | IFM_25G_CR, 0, NULL); 3442 ifmedia_set(media, m | IFM_25G_CR); 3443 break; 3444 3445 case FW_PORT_MOD_TYPE_NONE: 3446 m &= ~IFM_FDX; 3447 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3448 ifmedia_set(media, m | IFM_NONE); 3449 break; 3450 3451 default: 3452 device_printf(pi->dev, 3453 "unknown port_type (%d), mod_type (%d)\n", 3454 pi->port_type, pi->mod_type); 3455 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3456 ifmedia_set(media, m | IFM_UNKNOWN); 3457 break; 3458 } 3459 break; 3460 3461 case FW_PORT_TYPE_QSFP: 3462 switch (pi->mod_type) { 3463 3464 case FW_PORT_MOD_TYPE_LR: 3465 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3466 ifmedia_set(media, m | IFM_40G_LR4); 3467 break; 3468 3469 case FW_PORT_MOD_TYPE_SR: 3470 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3471 ifmedia_set(media, m | IFM_40G_SR4); 3472 break; 3473 3474 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3475 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3476 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3477 ifmedia_set(media, m | IFM_40G_CR4); 3478 break; 3479 3480 case FW_PORT_MOD_TYPE_NONE: 3481 m &= ~IFM_FDX; 3482 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3483 ifmedia_set(media, m | IFM_NONE); 3484 break; 3485 3486 default: 3487 device_printf(pi->dev, 3488 "unknown port_type (%d), mod_type (%d)\n", 3489 pi->port_type, pi->mod_type); 3490 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3491 ifmedia_set(media, m | IFM_UNKNOWN); 3492 break; 3493 } 3494 break; 3495 3496 case FW_PORT_TYPE_KR4_100G: 3497 case FW_PORT_TYPE_CR4_QSFP: 3498 switch (pi->mod_type) { 3499 3500 case FW_PORT_MOD_TYPE_LR: 3501 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL); 3502 ifmedia_set(media, m | IFM_100G_LR4); 3503 break; 3504 3505 case FW_PORT_MOD_TYPE_SR: 3506 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL); 3507 ifmedia_set(media, m | IFM_100G_SR4); 3508 break; 3509 3510 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3511 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3512 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL); 3513 ifmedia_set(media, m | IFM_100G_CR4); 3514 break; 3515 3516 case FW_PORT_MOD_TYPE_NONE: 3517 m &= ~IFM_FDX; 3518 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3519 ifmedia_set(media, m | IFM_NONE); 3520 break; 3521 3522 default: 3523 device_printf(pi->dev, 3524 "unknown port_type (%d), mod_type (%d)\n", 3525 pi->port_type, pi->mod_type); 3526 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3527 ifmedia_set(media, m | IFM_UNKNOWN); 3528 break; 3529 } 3530 break; 3531 3532 default: 3533 device_printf(pi->dev, 3534 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3535 pi->mod_type); 3536 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3537 ifmedia_set(media, m | IFM_UNKNOWN); 3538 break; 3539 } 3540 3541 PORT_UNLOCK(pi); 3542} 3543 3544#define FW_MAC_EXACT_CHUNK 7 3545 3546/* 3547 * Program the port's XGMAC based on parameters in ifnet. The caller also 3548 * indicates which parameters should be programmed (the rest are left alone). 3549 */ 3550int 3551update_mac_settings(struct ifnet *ifp, int flags) 3552{ 3553 int rc = 0; 3554 struct vi_info *vi = ifp->if_softc; 3555 struct port_info *pi = vi->pi; 3556 struct adapter *sc = pi->adapter; 3557 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3558 3559 ASSERT_SYNCHRONIZED_OP(sc); 3560 KASSERT(flags, ("%s: not told what to update.", __func__)); 3561 3562 if (flags & XGMAC_MTU) 3563 mtu = ifp->if_mtu; 3564 3565 if (flags & XGMAC_PROMISC) 3566 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3567 3568 if (flags & XGMAC_ALLMULTI) 3569 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3570 3571 if (flags & XGMAC_VLANEX) 3572 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3573 3574 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3575 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3576 allmulti, 1, vlanex, false); 3577 if (rc) { 3578 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3579 rc); 3580 return (rc); 3581 } 3582 } 3583 3584 if (flags & XGMAC_UCADDR) { 3585 uint8_t ucaddr[ETHER_ADDR_LEN]; 3586 3587 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3588 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3589 ucaddr, true, true); 3590 if (rc < 0) { 3591 rc = -rc; 3592 if_printf(ifp, "change_mac failed: %d\n", rc); 3593 return (rc); 3594 } else { 3595 vi->xact_addr_filt = rc; 3596 rc = 0; 3597 } 3598 } 3599 3600 if (flags & XGMAC_MCADDRS) { 3601 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3602 int del = 1; 3603 uint64_t hash = 0; 3604 struct ifmultiaddr *ifma; 3605 int i = 0, j; 3606 3607 if_maddr_rlock(ifp); 3608 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3609 if (ifma->ifma_addr->sa_family != AF_LINK) 3610 continue; 3611 mcaddr[i] = 3612 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3613 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3614 i++; 3615 3616 if (i == FW_MAC_EXACT_CHUNK) { 3617 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3618 del, i, mcaddr, NULL, &hash, 0); 3619 if (rc < 0) { 3620 rc = -rc; 3621 for (j = 0; j < i; j++) { 3622 if_printf(ifp, 3623 "failed to add mc address" 3624 " %02x:%02x:%02x:" 3625 "%02x:%02x:%02x rc=%d\n", 3626 mcaddr[j][0], mcaddr[j][1], 3627 mcaddr[j][2], mcaddr[j][3], 3628 mcaddr[j][4], mcaddr[j][5], 3629 rc); 3630 } 3631 goto mcfail; 3632 } 3633 del = 0; 3634 i = 0; 3635 } 3636 } 3637 if (i > 0) { 3638 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3639 mcaddr, NULL, &hash, 0); 3640 if (rc < 0) { 3641 rc = -rc; 3642 for (j = 0; j < i; j++) { 3643 if_printf(ifp, 3644 "failed to add mc address" 3645 " %02x:%02x:%02x:" 3646 "%02x:%02x:%02x rc=%d\n", 3647 mcaddr[j][0], mcaddr[j][1], 3648 mcaddr[j][2], mcaddr[j][3], 3649 mcaddr[j][4], mcaddr[j][5], 3650 rc); 3651 } 3652 goto mcfail; 3653 } 3654 } 3655 3656 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3657 if (rc != 0) 3658 if_printf(ifp, "failed to set mc address hash: %d", rc); 3659mcfail: 3660 if_maddr_runlock(ifp); 3661 } 3662 3663 return (rc); 3664} 3665 3666/* 3667 * {begin|end}_synchronized_op must be called from the same thread. 3668 */ 3669int 3670begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3671 char *wmesg) 3672{ 3673 int rc, pri; 3674 3675#ifdef WITNESS 3676 /* the caller thinks it's ok to sleep, but is it really? */ 3677 if (flags & SLEEP_OK) 3678 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3679 "begin_synchronized_op"); 3680#endif 3681 3682 if (INTR_OK) 3683 pri = PCATCH; 3684 else 3685 pri = 0; 3686 3687 ADAPTER_LOCK(sc); 3688 for (;;) { 3689 3690 if (vi && IS_DOOMED(vi)) { 3691 rc = ENXIO; 3692 goto done; 3693 } 3694 3695 if (!IS_BUSY(sc)) { 3696 rc = 0; 3697 break; 3698 } 3699 3700 if (!(flags & SLEEP_OK)) { 3701 rc = EBUSY; 3702 goto done; 3703 } 3704 3705 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3706 rc = EINTR; 3707 goto done; 3708 } 3709 } 3710 3711 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3712 SET_BUSY(sc); 3713#ifdef INVARIANTS 3714 sc->last_op = wmesg; 3715 sc->last_op_thr = curthread; 3716 sc->last_op_flags = flags; 3717#endif 3718 3719done: 3720 if (!(flags & HOLD_LOCK) || rc) 3721 ADAPTER_UNLOCK(sc); 3722 3723 return (rc); 3724} 3725 3726/* 3727 * Tell if_ioctl and if_init that the VI is going away. This is 3728 * special variant of begin_synchronized_op and must be paired with a 3729 * call to end_synchronized_op. 3730 */ 3731void 3732doom_vi(struct adapter *sc, struct vi_info *vi) 3733{ 3734 3735 ADAPTER_LOCK(sc); 3736 SET_DOOMED(vi); 3737 wakeup(&sc->flags); 3738 while (IS_BUSY(sc)) 3739 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3740 SET_BUSY(sc); 3741#ifdef INVARIANTS 3742 sc->last_op = "t4detach"; 3743 sc->last_op_thr = curthread; 3744 sc->last_op_flags = 0; 3745#endif 3746 ADAPTER_UNLOCK(sc); 3747} 3748 3749/* 3750 * {begin|end}_synchronized_op must be called from the same thread. 3751 */ 3752void 3753end_synchronized_op(struct adapter *sc, int flags) 3754{ 3755 3756 if (flags & LOCK_HELD) 3757 ADAPTER_LOCK_ASSERT_OWNED(sc); 3758 else 3759 ADAPTER_LOCK(sc); 3760 3761 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3762 CLR_BUSY(sc); 3763 wakeup(&sc->flags); 3764 ADAPTER_UNLOCK(sc); 3765} 3766 3767static int 3768cxgbe_init_synchronized(struct vi_info *vi) 3769{ 3770 struct port_info *pi = vi->pi; 3771 struct adapter *sc = pi->adapter; 3772 struct ifnet *ifp = vi->ifp; 3773 int rc = 0, i; 3774 struct sge_txq *txq; 3775 3776 ASSERT_SYNCHRONIZED_OP(sc); 3777 3778 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3779 return (0); /* already running */ 3780 3781 if (!(sc->flags & FULL_INIT_DONE) && 3782 ((rc = adapter_full_init(sc)) != 0)) 3783 return (rc); /* error message displayed already */ 3784 3785 if (!(vi->flags & VI_INIT_DONE) && 3786 ((rc = vi_full_init(vi)) != 0)) 3787 return (rc); /* error message displayed already */ 3788 3789 rc = update_mac_settings(ifp, XGMAC_ALL); 3790 if (rc) 3791 goto done; /* error message displayed already */ 3792 3793 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3794 if (rc != 0) { 3795 if_printf(ifp, "enable_vi failed: %d\n", rc); 3796 goto done; 3797 } 3798 3799 /* 3800 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3801 * if this changes. 3802 */ 3803 3804 for_each_txq(vi, i, txq) { 3805 TXQ_LOCK(txq); 3806 txq->eq.flags |= EQ_ENABLED; 3807 TXQ_UNLOCK(txq); 3808 } 3809 3810 /* 3811 * The first iq of the first port to come up is used for tracing. 3812 */ 3813 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3814 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3815 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3816 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3817 V_QUEUENUMBER(sc->traceq)); 3818 pi->flags |= HAS_TRACEQ; 3819 } 3820 3821 /* all ok */ 3822 PORT_LOCK(pi); 3823 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3824 pi->up_vis++; 3825 3826 if (pi->nvi > 1 || sc->flags & IS_VF) 3827 callout_reset(&vi->tick, hz, vi_tick, vi); 3828 else 3829 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3830 PORT_UNLOCK(pi); 3831done: 3832 if (rc != 0) 3833 cxgbe_uninit_synchronized(vi); 3834 3835 return (rc); 3836} 3837 3838/* 3839 * Idempotent. 3840 */ 3841static int 3842cxgbe_uninit_synchronized(struct vi_info *vi) 3843{ 3844 struct port_info *pi = vi->pi; 3845 struct adapter *sc = pi->adapter; 3846 struct ifnet *ifp = vi->ifp; 3847 int rc, i; 3848 struct sge_txq *txq; 3849 3850 ASSERT_SYNCHRONIZED_OP(sc); 3851 3852 if (!(vi->flags & VI_INIT_DONE)) { 3853 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3854 ("uninited VI is running")); 3855 return (0); 3856 } 3857 3858 /* 3859 * Disable the VI so that all its data in either direction is discarded 3860 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3861 * tick) intact as the TP can deliver negative advice or data that it's 3862 * holding in its RAM (for an offloaded connection) even after the VI is 3863 * disabled. 3864 */ 3865 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 3866 if (rc) { 3867 if_printf(ifp, "disable_vi failed: %d\n", rc); 3868 return (rc); 3869 } 3870 3871 for_each_txq(vi, i, txq) { 3872 TXQ_LOCK(txq); 3873 txq->eq.flags &= ~EQ_ENABLED; 3874 TXQ_UNLOCK(txq); 3875 } 3876 3877 PORT_LOCK(pi); 3878 if (pi->nvi > 1 || sc->flags & IS_VF) 3879 callout_stop(&vi->tick); 3880 else 3881 callout_stop(&pi->tick); 3882 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3883 PORT_UNLOCK(pi); 3884 return (0); 3885 } 3886 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3887 pi->up_vis--; 3888 if (pi->up_vis > 0) { 3889 PORT_UNLOCK(pi); 3890 return (0); 3891 } 3892 PORT_UNLOCK(pi); 3893 3894 pi->link_cfg.link_ok = 0; 3895 pi->link_cfg.speed = 0; 3896 pi->link_cfg.link_down_rc = 255; 3897 t4_os_link_changed(sc, pi->port_id, 0); 3898 3899 return (0); 3900} 3901 3902/* 3903 * It is ok for this function to fail midway and return right away. t4_detach 3904 * will walk the entire sc->irq list and clean up whatever is valid. 3905 */ 3906int 3907t4_setup_intr_handlers(struct adapter *sc) 3908{ 3909 int rc, rid, p, q, v; 3910 char s[8]; 3911 struct irq *irq; 3912 struct port_info *pi; 3913 struct vi_info *vi; 3914 struct sge *sge = &sc->sge; 3915 struct sge_rxq *rxq; 3916#ifdef TCP_OFFLOAD 3917 struct sge_ofld_rxq *ofld_rxq; 3918#endif 3919#ifdef DEV_NETMAP 3920 struct sge_nm_rxq *nm_rxq; 3921#endif 3922 3923 /* 3924 * Setup interrupts. 3925 */ 3926 irq = &sc->irq[0]; 3927 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3928 if (sc->intr_count == 1) 3929 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3930 3931 /* Multiple interrupts. */ 3932 if (sc->flags & IS_VF) 3933 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 3934 ("%s: too few intr.", __func__)); 3935 else 3936 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3937 ("%s: too few intr.", __func__)); 3938 3939 /* The first one is always error intr on PFs */ 3940 if (!(sc->flags & IS_VF)) { 3941 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3942 if (rc != 0) 3943 return (rc); 3944 irq++; 3945 rid++; 3946 } 3947 3948 /* The second one is always the firmware event queue (first on VFs) */ 3949 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 3950 if (rc != 0) 3951 return (rc); 3952 irq++; 3953 rid++; 3954 3955 for_each_port(sc, p) { 3956 pi = sc->port[p]; 3957 for_each_vi(pi, v, vi) { 3958 vi->first_intr = rid - 1; 3959 3960 if (vi->nnmrxq > 0) { 3961 int n = max(vi->nrxq, vi->nnmrxq); 3962 3963 MPASS(vi->flags & INTR_RXQ); 3964 3965 rxq = &sge->rxq[vi->first_rxq]; 3966#ifdef DEV_NETMAP 3967 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 3968#endif 3969 for (q = 0; q < n; q++) { 3970 snprintf(s, sizeof(s), "%x%c%x", p, 3971 'a' + v, q); 3972 if (q < vi->nrxq) 3973 irq->rxq = rxq++; 3974#ifdef DEV_NETMAP 3975 if (q < vi->nnmrxq) 3976 irq->nm_rxq = nm_rxq++; 3977#endif 3978 rc = t4_alloc_irq(sc, irq, rid, 3979 t4_vi_intr, irq, s); 3980 if (rc != 0) 3981 return (rc); 3982 irq++; 3983 rid++; 3984 vi->nintr++; 3985 } 3986 } else if (vi->flags & INTR_RXQ) { 3987 for_each_rxq(vi, q, rxq) { 3988 snprintf(s, sizeof(s), "%x%c%x", p, 3989 'a' + v, q); 3990 rc = t4_alloc_irq(sc, irq, rid, 3991 t4_intr, rxq, s); 3992 if (rc != 0) 3993 return (rc); 3994 irq++; 3995 rid++; 3996 vi->nintr++; 3997 } 3998 } 3999#ifdef TCP_OFFLOAD 4000 if (vi->flags & INTR_OFLD_RXQ) { 4001 for_each_ofld_rxq(vi, q, ofld_rxq) { 4002 snprintf(s, sizeof(s), "%x%c%x", p, 4003 'A' + v, q); 4004 rc = t4_alloc_irq(sc, irq, rid, 4005 t4_intr, ofld_rxq, s); 4006 if (rc != 0) 4007 return (rc); 4008 irq++; 4009 rid++; 4010 vi->nintr++; 4011 } 4012 } 4013#endif 4014 } 4015 } 4016 MPASS(irq == &sc->irq[sc->intr_count]); 4017 4018 return (0); 4019} 4020 4021int 4022adapter_full_init(struct adapter *sc) 4023{ 4024 int rc, i; 4025 4026 ASSERT_SYNCHRONIZED_OP(sc); 4027 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4028 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 4029 ("%s: FULL_INIT_DONE already", __func__)); 4030 4031 /* 4032 * queues that belong to the adapter (not any particular port). 4033 */ 4034 rc = t4_setup_adapter_queues(sc); 4035 if (rc != 0) 4036 goto done; 4037 4038 for (i = 0; i < nitems(sc->tq); i++) { 4039 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 4040 taskqueue_thread_enqueue, &sc->tq[i]); 4041 if (sc->tq[i] == NULL) { 4042 device_printf(sc->dev, 4043 "failed to allocate task queue %d\n", i); 4044 rc = ENOMEM; 4045 goto done; 4046 } 4047 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4048 device_get_nameunit(sc->dev), i); 4049 } 4050 4051 if (!(sc->flags & IS_VF)) 4052 t4_intr_enable(sc); 4053 sc->flags |= FULL_INIT_DONE; 4054done: 4055 if (rc != 0) 4056 adapter_full_uninit(sc); 4057 4058 return (rc); 4059} 4060 4061int 4062adapter_full_uninit(struct adapter *sc) 4063{ 4064 int i; 4065 4066 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4067 4068 t4_teardown_adapter_queues(sc); 4069 4070 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4071 taskqueue_free(sc->tq[i]); 4072 sc->tq[i] = NULL; 4073 } 4074 4075 sc->flags &= ~FULL_INIT_DONE; 4076 4077 return (0); 4078} 4079 4080#ifdef RSS 4081#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4082 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4083 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4084 RSS_HASHTYPE_RSS_UDP_IPV6) 4085 4086/* Translates kernel hash types to hardware. */ 4087static int 4088hashconfig_to_hashen(int hashconfig) 4089{ 4090 int hashen = 0; 4091 4092 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4093 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4094 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4095 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4096 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4097 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4098 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4099 } 4100 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4101 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4102 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4103 } 4104 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4105 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4106 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4107 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4108 4109 return (hashen); 4110} 4111 4112/* Translates hardware hash types to kernel. */ 4113static int 4114hashen_to_hashconfig(int hashen) 4115{ 4116 int hashconfig = 0; 4117 4118 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4119 /* 4120 * If UDP hashing was enabled it must have been enabled for 4121 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4122 * enabling any 4-tuple hash is nonsense configuration. 4123 */ 4124 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4125 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4126 4127 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4128 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4129 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4130 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4131 } 4132 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4133 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4134 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4135 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4136 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4137 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4138 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4139 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4140 4141 return (hashconfig); 4142} 4143#endif 4144 4145int 4146vi_full_init(struct vi_info *vi) 4147{ 4148 struct adapter *sc = vi->pi->adapter; 4149 struct ifnet *ifp = vi->ifp; 4150 uint16_t *rss; 4151 struct sge_rxq *rxq; 4152 int rc, i, j, hashen; 4153#ifdef RSS 4154 int nbuckets = rss_getnumbuckets(); 4155 int hashconfig = rss_gethashconfig(); 4156 int extra; 4157 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4158 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4159#endif 4160 4161 ASSERT_SYNCHRONIZED_OP(sc); 4162 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4163 ("%s: VI_INIT_DONE already", __func__)); 4164 4165 sysctl_ctx_init(&vi->ctx); 4166 vi->flags |= VI_SYSCTL_CTX; 4167 4168 /* 4169 * Allocate tx/rx/fl queues for this VI. 4170 */ 4171 rc = t4_setup_vi_queues(vi); 4172 if (rc != 0) 4173 goto done; /* error message displayed already */ 4174 4175 /* 4176 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4177 */ 4178 if (vi->nrxq > vi->rss_size) { 4179 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4180 "some queues will never receive traffic.\n", vi->nrxq, 4181 vi->rss_size); 4182 } else if (vi->rss_size % vi->nrxq) { 4183 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4184 "expect uneven traffic distribution.\n", vi->nrxq, 4185 vi->rss_size); 4186 } 4187#ifdef RSS 4188 MPASS(RSS_KEYSIZE == 40); 4189 if (vi->nrxq != nbuckets) { 4190 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4191 "performance will be impacted.\n", vi->nrxq, nbuckets); 4192 } 4193 4194 rss_getkey((void *)&raw_rss_key[0]); 4195 for (i = 0; i < nitems(rss_key); i++) { 4196 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4197 } 4198 t4_write_rss_key(sc, &rss_key[0], -1); 4199#endif 4200 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4201 for (i = 0; i < vi->rss_size;) { 4202#ifdef RSS 4203 j = rss_get_indirection_to_bucket(i); 4204 j %= vi->nrxq; 4205 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4206 rss[i++] = rxq->iq.abs_id; 4207#else 4208 for_each_rxq(vi, j, rxq) { 4209 rss[i++] = rxq->iq.abs_id; 4210 if (i == vi->rss_size) 4211 break; 4212 } 4213#endif 4214 } 4215 4216 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4217 vi->rss_size); 4218 if (rc != 0) { 4219 if_printf(ifp, "rss_config failed: %d\n", rc); 4220 goto done; 4221 } 4222 4223#ifdef RSS 4224 hashen = hashconfig_to_hashen(hashconfig); 4225 4226 /* 4227 * We may have had to enable some hashes even though the global config 4228 * wants them disabled. This is a potential problem that must be 4229 * reported to the user. 4230 */ 4231 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4232 4233 /* 4234 * If we consider only the supported hash types, then the enabled hashes 4235 * are a superset of the requested hashes. In other words, there cannot 4236 * be any supported hash that was requested but not enabled, but there 4237 * can be hashes that were not requested but had to be enabled. 4238 */ 4239 extra &= SUPPORTED_RSS_HASHTYPES; 4240 MPASS((extra & hashconfig) == 0); 4241 4242 if (extra) { 4243 if_printf(ifp, 4244 "global RSS config (0x%x) cannot be accomodated.\n", 4245 hashconfig); 4246 } 4247 if (extra & RSS_HASHTYPE_RSS_IPV4) 4248 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4249 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4250 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4251 if (extra & RSS_HASHTYPE_RSS_IPV6) 4252 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4253 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4254 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4255 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4256 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4257 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4258 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4259#else 4260 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4261 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4262 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4263 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4264#endif 4265 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); 4266 if (rc != 0) { 4267 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4268 goto done; 4269 } 4270 4271 vi->rss = rss; 4272 vi->flags |= VI_INIT_DONE; 4273done: 4274 if (rc != 0) 4275 vi_full_uninit(vi); 4276 4277 return (rc); 4278} 4279 4280/* 4281 * Idempotent. 4282 */ 4283int 4284vi_full_uninit(struct vi_info *vi) 4285{ 4286 struct port_info *pi = vi->pi; 4287 struct adapter *sc = pi->adapter; 4288 int i; 4289 struct sge_rxq *rxq; 4290 struct sge_txq *txq; 4291#ifdef TCP_OFFLOAD 4292 struct sge_ofld_rxq *ofld_rxq; 4293 struct sge_wrq *ofld_txq; 4294#endif 4295 4296 if (vi->flags & VI_INIT_DONE) { 4297 4298 /* Need to quiesce queues. */ 4299 4300 /* XXX: Only for the first VI? */ 4301 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4302 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4303 4304 for_each_txq(vi, i, txq) { 4305 quiesce_txq(sc, txq); 4306 } 4307 4308#ifdef TCP_OFFLOAD 4309 for_each_ofld_txq(vi, i, ofld_txq) { 4310 quiesce_wrq(sc, ofld_txq); 4311 } 4312#endif 4313 4314 for_each_rxq(vi, i, rxq) { 4315 quiesce_iq(sc, &rxq->iq); 4316 quiesce_fl(sc, &rxq->fl); 4317 } 4318 4319#ifdef TCP_OFFLOAD 4320 for_each_ofld_rxq(vi, i, ofld_rxq) { 4321 quiesce_iq(sc, &ofld_rxq->iq); 4322 quiesce_fl(sc, &ofld_rxq->fl); 4323 } 4324#endif 4325 free(vi->rss, M_CXGBE); 4326 free(vi->nm_rss, M_CXGBE); 4327 } 4328 4329 t4_teardown_vi_queues(vi); 4330 vi->flags &= ~VI_INIT_DONE; 4331 4332 return (0); 4333} 4334 4335static void 4336quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4337{ 4338 struct sge_eq *eq = &txq->eq; 4339 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4340 4341 (void) sc; /* unused */ 4342 4343#ifdef INVARIANTS 4344 TXQ_LOCK(txq); 4345 MPASS((eq->flags & EQ_ENABLED) == 0); 4346 TXQ_UNLOCK(txq); 4347#endif 4348 4349 /* Wait for the mp_ring to empty. */ 4350 while (!mp_ring_is_idle(txq->r)) { 4351 mp_ring_check_drainage(txq->r, 0); 4352 pause("rquiesce", 1); 4353 } 4354 4355 /* Then wait for the hardware to finish. */ 4356 while (spg->cidx != htobe16(eq->pidx)) 4357 pause("equiesce", 1); 4358 4359 /* Finally, wait for the driver to reclaim all descriptors. */ 4360 while (eq->cidx != eq->pidx) 4361 pause("dquiesce", 1); 4362} 4363 4364static void 4365quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4366{ 4367 4368 /* XXXTX */ 4369} 4370 4371static void 4372quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4373{ 4374 (void) sc; /* unused */ 4375 4376 /* Synchronize with the interrupt handler */ 4377 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4378 pause("iqfree", 1); 4379} 4380 4381static void 4382quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4383{ 4384 mtx_lock(&sc->sfl_lock); 4385 FL_LOCK(fl); 4386 fl->flags |= FL_DOOMED; 4387 FL_UNLOCK(fl); 4388 callout_stop(&sc->sfl_callout); 4389 mtx_unlock(&sc->sfl_lock); 4390 4391 KASSERT((fl->flags & FL_STARVING) == 0, 4392 ("%s: still starving", __func__)); 4393} 4394 4395static int 4396t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4397 driver_intr_t *handler, void *arg, char *name) 4398{ 4399 int rc; 4400 4401 irq->rid = rid; 4402 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4403 RF_SHAREABLE | RF_ACTIVE); 4404 if (irq->res == NULL) { 4405 device_printf(sc->dev, 4406 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4407 return (ENOMEM); 4408 } 4409 4410 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4411 NULL, handler, arg, &irq->tag); 4412 if (rc != 0) { 4413 device_printf(sc->dev, 4414 "failed to setup interrupt for rid %d, name %s: %d\n", 4415 rid, name, rc); 4416 } else if (name) 4417 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 4418 4419 return (rc); 4420} 4421 4422static int 4423t4_free_irq(struct adapter *sc, struct irq *irq) 4424{ 4425 if (irq->tag) 4426 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4427 if (irq->res) 4428 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4429 4430 bzero(irq, sizeof(*irq)); 4431 4432 return (0); 4433} 4434 4435static void 4436get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4437{ 4438 4439 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4440 t4_get_regs(sc, buf, regs->len); 4441} 4442 4443#define A_PL_INDIR_CMD 0x1f8 4444 4445#define S_PL_AUTOINC 31 4446#define M_PL_AUTOINC 0x1U 4447#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4448#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4449 4450#define S_PL_VFID 20 4451#define M_PL_VFID 0xffU 4452#define V_PL_VFID(x) ((x) << S_PL_VFID) 4453#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4454 4455#define S_PL_ADDR 0 4456#define M_PL_ADDR 0xfffffU 4457#define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4458#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4459 4460#define A_PL_INDIR_DATA 0x1fc 4461 4462static uint64_t 4463read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4464{ 4465 u32 stats[2]; 4466 4467 mtx_assert(&sc->reg_lock, MA_OWNED); 4468 if (sc->flags & IS_VF) { 4469 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4470 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4471 } else { 4472 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4473 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4474 V_PL_ADDR(VF_MPS_REG(reg))); 4475 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4476 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4477 } 4478 return (((uint64_t)stats[1]) << 32 | stats[0]); 4479} 4480 4481static void 4482t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4483 struct fw_vi_stats_vf *stats) 4484{ 4485 4486#define GET_STAT(name) \ 4487 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4488 4489 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4490 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4491 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4492 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4493 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4494 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4495 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4496 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4497 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4498 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4499 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4500 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4501 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4502 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4503 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4504 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4505 4506#undef GET_STAT 4507} 4508 4509static void 4510t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4511{ 4512 int reg; 4513 4514 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4515 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4516 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4517 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4518 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4519 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4520} 4521 4522static void 4523vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4524{ 4525 struct ifnet *ifp = vi->ifp; 4526 struct sge_txq *txq; 4527 int i, drops; 4528 struct fw_vi_stats_vf *s = &vi->stats; 4529 struct timeval tv; 4530 const struct timeval interval = {0, 250000}; /* 250ms */ 4531 4532 if (!(vi->flags & VI_INIT_DONE)) 4533 return; 4534 4535 getmicrotime(&tv); 4536 timevalsub(&tv, &interval); 4537 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4538 return; 4539 4540 mtx_lock(&sc->reg_lock); 4541 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4542 4543 ifp->if_ipackets = s->rx_bcast_frames + s->rx_mcast_frames + 4544 s->rx_ucast_frames; 4545 ifp->if_ierrors = s->rx_err_frames; 4546 ifp->if_opackets = s->tx_bcast_frames + s->tx_mcast_frames + 4547 s->tx_ucast_frames + s->tx_offload_frames; 4548 ifp->if_oerrors = s->tx_drop_frames; 4549 ifp->if_ibytes = s->rx_bcast_bytes + s->rx_mcast_bytes + 4550 s->rx_ucast_bytes; 4551 ifp->if_obytes = s->tx_bcast_bytes + s->tx_mcast_bytes + 4552 s->tx_ucast_bytes + s->tx_offload_bytes; 4553 ifp->if_imcasts = s->rx_mcast_frames; 4554 ifp->if_omcasts = s->tx_mcast_frames; 4555 4556 drops = 0; 4557 for_each_txq(vi, i, txq) 4558 drops += counter_u64_fetch(txq->r->drops); 4559 ifp->if_snd.ifq_drops = drops; 4560 4561 getmicrotime(&vi->last_refreshed); 4562 mtx_unlock(&sc->reg_lock); 4563} 4564 4565static void 4566cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4567{ 4568 struct vi_info *vi = &pi->vi[0]; 4569 struct ifnet *ifp = vi->ifp; 4570 struct sge_txq *txq; 4571 int i, drops; 4572 struct port_stats *s = &pi->stats; 4573 struct timeval tv; 4574 const struct timeval interval = {0, 250000}; /* 250ms */ 4575 4576 getmicrotime(&tv); 4577 timevalsub(&tv, &interval); 4578 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4579 return; 4580 4581 t4_get_port_stats(sc, pi->tx_chan, s); 4582 4583 ifp->if_opackets = s->tx_frames; 4584 ifp->if_ipackets = s->rx_frames; 4585 ifp->if_obytes = s->tx_octets; 4586 ifp->if_ibytes = s->rx_octets; 4587 ifp->if_omcasts = s->tx_mcast_frames; 4588 ifp->if_imcasts = s->rx_mcast_frames; 4589 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 4590 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 4591 s->rx_trunc3; 4592 for (i = 0; i < sc->chip_params->nchan; i++) { 4593 if (pi->rx_chan_map & (1 << i)) { 4594 uint32_t v; 4595 4596 mtx_lock(&sc->reg_lock); 4597 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4598 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4599 mtx_unlock(&sc->reg_lock); 4600 ifp->if_iqdrops += v; 4601 } 4602 } 4603 4604 drops = s->tx_drop; 4605 for_each_txq(vi, i, txq) 4606 drops += counter_u64_fetch(txq->r->drops); 4607 ifp->if_snd.ifq_drops = drops; 4608 4609 ifp->if_oerrors = s->tx_error_frames; 4610 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long + 4611 s->rx_fcs_err + s->rx_len_err; 4612 4613 getmicrotime(&pi->last_refreshed); 4614} 4615 4616static void 4617cxgbe_tick(void *arg) 4618{ 4619 struct port_info *pi = arg; 4620 struct adapter *sc = pi->adapter; 4621 4622 PORT_LOCK_ASSERT_OWNED(pi); 4623 cxgbe_refresh_stats(sc, pi); 4624 4625 callout_schedule(&pi->tick, hz); 4626} 4627 4628void 4629vi_tick(void *arg) 4630{ 4631 struct vi_info *vi = arg; 4632 struct adapter *sc = vi->pi->adapter; 4633 4634 vi_refresh_stats(sc, vi); 4635 4636 callout_schedule(&vi->tick, hz); 4637} 4638 4639static void 4640cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4641{ 4642 struct ifnet *vlan; 4643 4644 if (arg != ifp || ifp->if_type != IFT_ETHER) 4645 return; 4646 4647 vlan = VLAN_DEVAT(ifp, vid); 4648 VLAN_SETCOOKIE(vlan, ifp); 4649} 4650 4651/* 4652 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 4653 */ 4654static char *caps_decoder[] = { 4655 "\20\001IPMI\002NCSI", /* 0: NBM */ 4656 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 4657 "\20\001INGRESS\002EGRESS", /* 2: switch */ 4658 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 4659 "\006HASHFILTER\007ETHOFLD", 4660 "\20\001TOE", /* 4: TOE */ 4661 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 4662 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 4663 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 4664 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 4665 "\007T10DIF" 4666 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 4667 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 4668 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 4669 "\004PO_INITIATOR\005PO_TARGET", 4670}; 4671 4672void 4673t4_sysctls(struct adapter *sc) 4674{ 4675 struct sysctl_ctx_list *ctx; 4676 struct sysctl_oid *oid; 4677 struct sysctl_oid_list *children, *c0; 4678 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4679 4680 ctx = device_get_sysctl_ctx(sc->dev); 4681 4682 /* 4683 * dev.t4nex.X. 4684 */ 4685 oid = device_get_sysctl_tree(sc->dev); 4686 c0 = children = SYSCTL_CHILDREN(oid); 4687 4688 sc->sc_do_rxcopy = 1; 4689 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4690 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4691 4692 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4693 sc->params.nports, "# of ports"); 4694 4695 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4696 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4697 sysctl_bitfield, "A", "available doorbells"); 4698 4699 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4700 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4701 4702 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4703 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4704 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4705 "interrupt holdoff timer values (us)"); 4706 4707 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4708 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4709 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4710 "interrupt holdoff packet counter values"); 4711 4712 t4_sge_sysctls(sc, ctx, children); 4713 4714 sc->lro_timeout = 100; 4715 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4716 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4717 4718 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 4719 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4720 4721 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 4722 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 4723 4724 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4725 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4726 4727 if (sc->flags & IS_VF) 4728 return; 4729 4730 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4731 NULL, chip_rev(sc), "chip hardware revision"); 4732 4733 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 4734 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 4735 4736 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 4737 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 4738 4739 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 4740 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 4741 4742 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 4743 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 4744 4745 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 4746 sc->er_version, 0, "expansion ROM version"); 4747 4748 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 4749 sc->bs_version, 0, "bootstrap firmware version"); 4750 4751 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 4752 NULL, sc->params.scfg_vers, "serial config version"); 4753 4754 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 4755 NULL, sc->params.vpd_vers, "VPD version"); 4756 4757 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4758 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4759 4760 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4761 sc->cfcsum, "config file checksum"); 4762 4763#define SYSCTL_CAP(name, n, text) \ 4764 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 4765 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 4766 sysctl_bitfield, "A", "available " text " capabilities") 4767 4768 SYSCTL_CAP(nbmcaps, 0, "NBM"); 4769 SYSCTL_CAP(linkcaps, 1, "link"); 4770 SYSCTL_CAP(switchcaps, 2, "switch"); 4771 SYSCTL_CAP(niccaps, 3, "NIC"); 4772 SYSCTL_CAP(toecaps, 4, "TCP offload"); 4773 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 4774 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 4775 SYSCTL_CAP(cryptocaps, 7, "crypto"); 4776 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 4777#undef SYSCTL_CAP 4778 4779 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4780 NULL, sc->tids.nftids, "number of filters"); 4781 4782 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4783 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4784 "chip temperature (in Celsius)"); 4785 4786#ifdef SBUF_DRAIN 4787 /* 4788 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4789 */ 4790 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4791 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4792 "logs and miscellaneous information"); 4793 children = SYSCTL_CHILDREN(oid); 4794 4795 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4796 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4797 sysctl_cctrl, "A", "congestion control"); 4798 4799 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4800 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4801 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4802 4803 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4804 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4805 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4806 4807 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4808 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4809 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4810 4811 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4812 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4813 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4814 4815 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4816 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4817 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4818 4819 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4820 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4821 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4822 4823 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4824 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4825 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4826 "A", "CIM logic analyzer"); 4827 4828 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4829 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4830 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4831 4832 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4833 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4834 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4835 4836 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4837 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4838 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4839 4840 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4841 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4842 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4843 4844 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4845 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4846 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4847 4848 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4849 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4850 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4851 4852 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4853 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4854 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4855 4856 if (chip_id(sc) > CHELSIO_T4) { 4857 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4858 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4859 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4860 4861 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4862 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4863 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4864 } 4865 4866 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4867 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4868 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4869 4870 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4871 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4872 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4873 4874 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4875 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4876 sysctl_cpl_stats, "A", "CPL statistics"); 4877 4878 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4879 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4880 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4881 4882 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4883 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4884 sysctl_devlog, "A", "firmware's device log"); 4885 4886 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4887 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4888 sysctl_fcoe_stats, "A", "FCoE statistics"); 4889 4890 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4891 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4892 sysctl_hw_sched, "A", "hardware scheduler "); 4893 4894 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4895 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4896 sysctl_l2t, "A", "hardware L2 table"); 4897 4898 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4899 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4900 sysctl_lb_stats, "A", "loopback statistics"); 4901 4902 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4903 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4904 sysctl_meminfo, "A", "memory regions"); 4905 4906 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4907 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4908 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 4909 "A", "MPS TCAM entries"); 4910 4911 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4912 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4913 sysctl_path_mtus, "A", "path MTUs"); 4914 4915 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4916 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4917 sysctl_pm_stats, "A", "PM statistics"); 4918 4919 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4920 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4921 sysctl_rdma_stats, "A", "RDMA statistics"); 4922 4923 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4924 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4925 sysctl_tcp_stats, "A", "TCP statistics"); 4926 4927 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4928 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4929 sysctl_tids, "A", "TID information"); 4930 4931 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4932 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4933 sysctl_tp_err_stats, "A", "TP error statistics"); 4934 4935 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 4936 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 4937 "TP logic analyzer event capture mask"); 4938 4939 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4940 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4941 sysctl_tp_la, "A", "TP logic analyzer"); 4942 4943 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4944 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4945 sysctl_tx_rate, "A", "Tx rate"); 4946 4947 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4948 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4949 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4950 4951 if (chip_id(sc) >= CHELSIO_T5) { 4952 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4953 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4954 sysctl_wcwr_stats, "A", "write combined work requests"); 4955 } 4956#endif 4957 4958#ifdef TCP_OFFLOAD 4959 if (is_offload(sc)) { 4960 /* 4961 * dev.t4nex.X.toe. 4962 */ 4963 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4964 NULL, "TOE parameters"); 4965 children = SYSCTL_CHILDREN(oid); 4966 4967 sc->tt.sndbuf = 256 * 1024; 4968 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4969 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4970 4971 sc->tt.ddp = 0; 4972 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4973 &sc->tt.ddp, 0, "DDP allowed"); 4974 4975 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4976 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4977 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4978 4979 sc->tt.ddp_thres = 4980 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4981 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4982 &sc->tt.ddp_thres, 0, "DDP threshold"); 4983 4984 sc->tt.rx_coalesce = 1; 4985 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 4986 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 4987 4988 sc->tt.tx_align = 1; 4989 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 4990 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 4991 4992 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 4993 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 4994 "TP timer tick (us)"); 4995 4996 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 4997 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 4998 "TCP timestamp tick (us)"); 4999 5000 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 5001 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 5002 "DACK tick (us)"); 5003 5004 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 5005 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 5006 "IU", "DACK timer (us)"); 5007 5008 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 5009 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 5010 sysctl_tp_timer, "LU", "Retransmit min (us)"); 5011 5012 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 5013 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 5014 sysctl_tp_timer, "LU", "Retransmit max (us)"); 5015 5016 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 5017 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 5018 sysctl_tp_timer, "LU", "Persist timer min (us)"); 5019 5020 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 5021 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 5022 sysctl_tp_timer, "LU", "Persist timer max (us)"); 5023 5024 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 5025 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 5026 sysctl_tp_timer, "LU", "Keepidle idle timer (us)"); 5027 5028 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl", 5029 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 5030 sysctl_tp_timer, "LU", "Keepidle interval (us)"); 5031 5032 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 5033 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 5034 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 5035 5036 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 5037 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 5038 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 5039 } 5040#endif 5041} 5042 5043void 5044vi_sysctls(struct vi_info *vi) 5045{ 5046 struct sysctl_ctx_list *ctx; 5047 struct sysctl_oid *oid; 5048 struct sysctl_oid_list *children; 5049 5050 ctx = device_get_sysctl_ctx(vi->dev); 5051 5052 /* 5053 * dev.v?(cxgbe|cxl).X. 5054 */ 5055 oid = device_get_sysctl_tree(vi->dev); 5056 children = SYSCTL_CHILDREN(oid); 5057 5058 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 5059 vi->viid, "VI identifer"); 5060 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 5061 &vi->nrxq, 0, "# of rx queues"); 5062 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 5063 &vi->ntxq, 0, "# of tx queues"); 5064 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 5065 &vi->first_rxq, 0, "index of first rx queue"); 5066 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 5067 &vi->first_txq, 0, "index of first tx queue"); 5068 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 5069 vi->rss_size, "size of RSS indirection table"); 5070 5071 if (IS_MAIN_VI(vi)) { 5072 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 5073 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 5074 "Reserve queue 0 for non-flowid packets"); 5075 } 5076 5077#ifdef TCP_OFFLOAD 5078 if (vi->nofldrxq != 0) { 5079 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5080 &vi->nofldrxq, 0, 5081 "# of rx queues for offloaded TCP connections"); 5082 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5083 &vi->nofldtxq, 0, 5084 "# of tx queues for offloaded TCP connections"); 5085 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5086 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5087 "index of first TOE rx queue"); 5088 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5089 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5090 "index of first TOE tx queue"); 5091 } 5092#endif 5093#ifdef DEV_NETMAP 5094 if (vi->nnmrxq != 0) { 5095 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 5096 &vi->nnmrxq, 0, "# of netmap rx queues"); 5097 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 5098 &vi->nnmtxq, 0, "# of netmap tx queues"); 5099 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 5100 CTLFLAG_RD, &vi->first_nm_rxq, 0, 5101 "index of first netmap rx queue"); 5102 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 5103 CTLFLAG_RD, &vi->first_nm_txq, 0, 5104 "index of first netmap tx queue"); 5105 } 5106#endif 5107 5108 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5109 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5110 "holdoff timer index"); 5111 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5112 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5113 "holdoff packet counter index"); 5114 5115 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5116 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5117 "rx queue size"); 5118 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5119 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5120 "tx queue size"); 5121} 5122 5123static void 5124cxgbe_sysctls(struct port_info *pi) 5125{ 5126 struct sysctl_ctx_list *ctx; 5127 struct sysctl_oid *oid; 5128 struct sysctl_oid_list *children, *children2; 5129 struct adapter *sc = pi->adapter; 5130 int i; 5131 char name[16]; 5132 5133 ctx = device_get_sysctl_ctx(pi->dev); 5134 5135 /* 5136 * dev.cxgbe.X. 5137 */ 5138 oid = device_get_sysctl_tree(pi->dev); 5139 children = SYSCTL_CHILDREN(oid); 5140 5141 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5142 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5143 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5144 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5145 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5146 "PHY temperature (in Celsius)"); 5147 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5148 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5149 "PHY firmware version"); 5150 } 5151 5152 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5153 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", 5154 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5155 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", 5156 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", 5157 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); 5158 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", 5159 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", 5160 "autonegotiation (-1 = not supported)"); 5161 5162 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5163 port_top_speed(pi), "max speed (in Gbps)"); 5164 5165 if (sc->flags & IS_VF) 5166 return; 5167 5168 /* 5169 * dev.(cxgbe|cxl).X.tc. 5170 */ 5171 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5172 "Tx scheduler traffic classes"); 5173 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5174 struct tx_sched_class *tc = &pi->tc[i]; 5175 5176 snprintf(name, sizeof(name), "%d", i); 5177 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5178 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5179 "traffic class")); 5180 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5181 &tc->flags, 0, "flags"); 5182 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5183 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5184#ifdef SBUF_DRAIN 5185 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5186 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5187 sysctl_tc_params, "A", "traffic class parameters"); 5188#endif 5189 } 5190 5191 /* 5192 * dev.cxgbe.X.stats. 5193 */ 5194 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5195 NULL, "port statistics"); 5196 children = SYSCTL_CHILDREN(oid); 5197 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5198 &pi->tx_parse_error, 0, 5199 "# of tx packets with invalid length or # of segments"); 5200 5201#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5202 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5203 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5204 sysctl_handle_t4_reg64, "QU", desc) 5205 5206 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5207 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5208 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5209 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5210 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5211 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5212 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5213 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5214 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5215 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5216 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5217 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5218 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5219 "# of tx frames in this range", 5220 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5221 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5222 "# of tx frames in this range", 5223 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5224 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5225 "# of tx frames in this range", 5226 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5227 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5228 "# of tx frames in this range", 5229 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5230 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5231 "# of tx frames in this range", 5232 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5233 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5234 "# of tx frames in this range", 5235 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5236 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5237 "# of tx frames in this range", 5238 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5239 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5240 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5241 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5242 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5243 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5244 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5245 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5246 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5247 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5248 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5249 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5250 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5251 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5252 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5253 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5254 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5255 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5256 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5257 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5258 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5259 5260 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5261 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5262 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5263 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5264 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5265 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5266 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5267 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5268 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5269 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5270 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5271 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5272 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5273 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5274 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5275 "# of frames received with bad FCS", 5276 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5277 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5278 "# of frames received with length error", 5279 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5280 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5281 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5282 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5283 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5284 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5285 "# of rx frames in this range", 5286 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5287 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5288 "# of rx frames in this range", 5289 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5290 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5291 "# of rx frames in this range", 5292 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5293 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5294 "# of rx frames in this range", 5295 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5296 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5297 "# of rx frames in this range", 5298 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5299 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5300 "# of rx frames in this range", 5301 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5302 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5303 "# of rx frames in this range", 5304 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5305 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5306 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5307 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5308 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5309 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5310 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5311 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5312 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5313 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5314 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5315 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5316 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5317 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5318 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5319 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5320 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5321 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5322 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5323 5324#undef SYSCTL_ADD_T4_REG64 5325 5326#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5327 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5328 &pi->stats.name, desc) 5329 5330 /* We get these from port_stats and they may be stale by upto 1s */ 5331 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5332 "# drops due to buffer-group 0 overflows"); 5333 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5334 "# drops due to buffer-group 1 overflows"); 5335 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5336 "# drops due to buffer-group 2 overflows"); 5337 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5338 "# drops due to buffer-group 3 overflows"); 5339 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5340 "# of buffer-group 0 truncated packets"); 5341 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5342 "# of buffer-group 1 truncated packets"); 5343 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5344 "# of buffer-group 2 truncated packets"); 5345 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5346 "# of buffer-group 3 truncated packets"); 5347 5348#undef SYSCTL_ADD_T4_PORTSTAT 5349} 5350 5351static int 5352sysctl_int_array(SYSCTL_HANDLER_ARGS) 5353{ 5354 int rc, *i, space = 0; 5355 struct sbuf sb; 5356 5357 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 5358 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5359 if (space) 5360 sbuf_printf(&sb, " "); 5361 sbuf_printf(&sb, "%d", *i); 5362 space = 1; 5363 } 5364 sbuf_finish(&sb); 5365 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5366 sbuf_delete(&sb); 5367 return (rc); 5368} 5369 5370static int 5371sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5372{ 5373 int rc; 5374 struct sbuf *sb; 5375 5376 rc = sysctl_wire_old_buffer(req, 0); 5377 if (rc != 0) 5378 return(rc); 5379 5380 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5381 if (sb == NULL) 5382 return (ENOMEM); 5383 5384 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5385 rc = sbuf_finish(sb); 5386 sbuf_delete(sb); 5387 5388 return (rc); 5389} 5390 5391static int 5392sysctl_btphy(SYSCTL_HANDLER_ARGS) 5393{ 5394 struct port_info *pi = arg1; 5395 int op = arg2; 5396 struct adapter *sc = pi->adapter; 5397 u_int v; 5398 int rc; 5399 5400 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5401 if (rc) 5402 return (rc); 5403 /* XXX: magic numbers */ 5404 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5405 &v); 5406 end_synchronized_op(sc, 0); 5407 if (rc) 5408 return (rc); 5409 if (op == 0) 5410 v /= 256; 5411 5412 rc = sysctl_handle_int(oidp, &v, 0, req); 5413 return (rc); 5414} 5415 5416static int 5417sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5418{ 5419 struct vi_info *vi = arg1; 5420 int rc, val; 5421 5422 val = vi->rsrv_noflowq; 5423 rc = sysctl_handle_int(oidp, &val, 0, req); 5424 if (rc != 0 || req->newptr == NULL) 5425 return (rc); 5426 5427 if ((val >= 1) && (vi->ntxq > 1)) 5428 vi->rsrv_noflowq = 1; 5429 else 5430 vi->rsrv_noflowq = 0; 5431 5432 return (rc); 5433} 5434 5435static int 5436sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5437{ 5438 struct vi_info *vi = arg1; 5439 struct adapter *sc = vi->pi->adapter; 5440 int idx, rc, i; 5441 struct sge_rxq *rxq; 5442#ifdef TCP_OFFLOAD 5443 struct sge_ofld_rxq *ofld_rxq; 5444#endif 5445 uint8_t v; 5446 5447 idx = vi->tmr_idx; 5448 5449 rc = sysctl_handle_int(oidp, &idx, 0, req); 5450 if (rc != 0 || req->newptr == NULL) 5451 return (rc); 5452 5453 if (idx < 0 || idx >= SGE_NTIMERS) 5454 return (EINVAL); 5455 5456 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5457 "t4tmr"); 5458 if (rc) 5459 return (rc); 5460 5461 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5462 for_each_rxq(vi, i, rxq) { 5463#ifdef atomic_store_rel_8 5464 atomic_store_rel_8(&rxq->iq.intr_params, v); 5465#else 5466 rxq->iq.intr_params = v; 5467#endif 5468 } 5469#ifdef TCP_OFFLOAD 5470 for_each_ofld_rxq(vi, i, ofld_rxq) { 5471#ifdef atomic_store_rel_8 5472 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5473#else 5474 ofld_rxq->iq.intr_params = v; 5475#endif 5476 } 5477#endif 5478 vi->tmr_idx = idx; 5479 5480 end_synchronized_op(sc, LOCK_HELD); 5481 return (0); 5482} 5483 5484static int 5485sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5486{ 5487 struct vi_info *vi = arg1; 5488 struct adapter *sc = vi->pi->adapter; 5489 int idx, rc; 5490 5491 idx = vi->pktc_idx; 5492 5493 rc = sysctl_handle_int(oidp, &idx, 0, req); 5494 if (rc != 0 || req->newptr == NULL) 5495 return (rc); 5496 5497 if (idx < -1 || idx >= SGE_NCOUNTERS) 5498 return (EINVAL); 5499 5500 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5501 "t4pktc"); 5502 if (rc) 5503 return (rc); 5504 5505 if (vi->flags & VI_INIT_DONE) 5506 rc = EBUSY; /* cannot be changed once the queues are created */ 5507 else 5508 vi->pktc_idx = idx; 5509 5510 end_synchronized_op(sc, LOCK_HELD); 5511 return (rc); 5512} 5513 5514static int 5515sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5516{ 5517 struct vi_info *vi = arg1; 5518 struct adapter *sc = vi->pi->adapter; 5519 int qsize, rc; 5520 5521 qsize = vi->qsize_rxq; 5522 5523 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5524 if (rc != 0 || req->newptr == NULL) 5525 return (rc); 5526 5527 if (qsize < 128 || (qsize & 7)) 5528 return (EINVAL); 5529 5530 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5531 "t4rxqs"); 5532 if (rc) 5533 return (rc); 5534 5535 if (vi->flags & VI_INIT_DONE) 5536 rc = EBUSY; /* cannot be changed once the queues are created */ 5537 else 5538 vi->qsize_rxq = qsize; 5539 5540 end_synchronized_op(sc, LOCK_HELD); 5541 return (rc); 5542} 5543 5544static int 5545sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5546{ 5547 struct vi_info *vi = arg1; 5548 struct adapter *sc = vi->pi->adapter; 5549 int qsize, rc; 5550 5551 qsize = vi->qsize_txq; 5552 5553 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5554 if (rc != 0 || req->newptr == NULL) 5555 return (rc); 5556 5557 if (qsize < 128 || qsize > 65536) 5558 return (EINVAL); 5559 5560 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5561 "t4txqs"); 5562 if (rc) 5563 return (rc); 5564 5565 if (vi->flags & VI_INIT_DONE) 5566 rc = EBUSY; /* cannot be changed once the queues are created */ 5567 else 5568 vi->qsize_txq = qsize; 5569 5570 end_synchronized_op(sc, LOCK_HELD); 5571 return (rc); 5572} 5573 5574static int 5575sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5576{ 5577 struct port_info *pi = arg1; 5578 struct adapter *sc = pi->adapter; 5579 struct link_config *lc = &pi->link_cfg; 5580 int rc; 5581 5582 if (req->newptr == NULL) { 5583 struct sbuf *sb; 5584 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5585 5586 rc = sysctl_wire_old_buffer(req, 0); 5587 if (rc != 0) 5588 return(rc); 5589 5590 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5591 if (sb == NULL) 5592 return (ENOMEM); 5593 5594 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5595 rc = sbuf_finish(sb); 5596 sbuf_delete(sb); 5597 } else { 5598 char s[2]; 5599 int n; 5600 5601 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5602 s[1] = 0; 5603 5604 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5605 if (rc != 0) 5606 return(rc); 5607 5608 if (s[1] != 0) 5609 return (EINVAL); 5610 if (s[0] < '0' || s[0] > '9') 5611 return (EINVAL); /* not a number */ 5612 n = s[0] - '0'; 5613 if (n & ~(PAUSE_TX | PAUSE_RX)) 5614 return (EINVAL); /* some other bit is set too */ 5615 5616 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5617 "t4PAUSE"); 5618 if (rc) 5619 return (rc); 5620 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5621 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5622 lc->requested_fc |= n; 5623 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5624 } 5625 end_synchronized_op(sc, 0); 5626 } 5627 5628 return (rc); 5629} 5630 5631static int 5632sysctl_fec(SYSCTL_HANDLER_ARGS) 5633{ 5634 struct port_info *pi = arg1; 5635 struct adapter *sc = pi->adapter; 5636 struct link_config *lc = &pi->link_cfg; 5637 int rc; 5638 5639 if (req->newptr == NULL) { 5640 struct sbuf *sb; 5641 static char *bits = "\20\1RS\2BASER_RS\3RESERVED"; 5642 5643 rc = sysctl_wire_old_buffer(req, 0); 5644 if (rc != 0) 5645 return(rc); 5646 5647 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5648 if (sb == NULL) 5649 return (ENOMEM); 5650 5651 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits); 5652 rc = sbuf_finish(sb); 5653 sbuf_delete(sb); 5654 } else { 5655 char s[2]; 5656 int n; 5657 5658 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC); 5659 s[1] = 0; 5660 5661 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5662 if (rc != 0) 5663 return(rc); 5664 5665 if (s[1] != 0) 5666 return (EINVAL); 5667 if (s[0] < '0' || s[0] > '9') 5668 return (EINVAL); /* not a number */ 5669 n = s[0] - '0'; 5670 if (n & ~M_FW_PORT_CAP_FEC) 5671 return (EINVAL); /* some other bit is set too */ 5672 5673 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5674 "t4fec"); 5675 if (rc) 5676 return (rc); 5677 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) { 5678 lc->requested_fec = n & 5679 G_FW_PORT_CAP_FEC(lc->supported); 5680 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5681 } 5682 end_synchronized_op(sc, 0); 5683 } 5684 5685 return (rc); 5686} 5687 5688static int 5689sysctl_autoneg(SYSCTL_HANDLER_ARGS) 5690{ 5691 struct port_info *pi = arg1; 5692 struct adapter *sc = pi->adapter; 5693 struct link_config *lc = &pi->link_cfg; 5694 int rc, val, old; 5695 5696 if (lc->supported & FW_PORT_CAP_ANEG) 5697 val = lc->autoneg == AUTONEG_ENABLE ? 1 : 0; 5698 else 5699 val = -1; 5700 rc = sysctl_handle_int(oidp, &val, 0, req); 5701 if (rc != 0 || req->newptr == NULL) 5702 return (rc); 5703 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) 5704 return (ENOTSUP); 5705 5706 val = val ? AUTONEG_ENABLE : AUTONEG_DISABLE; 5707 if (lc->autoneg == val) 5708 return (0); /* no change */ 5709 5710 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5711 "t4aneg"); 5712 if (rc) 5713 return (rc); 5714 old = lc->autoneg; 5715 lc->autoneg = val; 5716 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5717 if (rc != 0) 5718 lc->autoneg = old; 5719 return (rc); 5720} 5721 5722static int 5723sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5724{ 5725 struct adapter *sc = arg1; 5726 int reg = arg2; 5727 uint64_t val; 5728 5729 val = t4_read_reg64(sc, reg); 5730 5731 return (sysctl_handle_64(oidp, &val, 0, req)); 5732} 5733 5734static int 5735sysctl_temperature(SYSCTL_HANDLER_ARGS) 5736{ 5737 struct adapter *sc = arg1; 5738 int rc, t; 5739 uint32_t param, val; 5740 5741 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5742 if (rc) 5743 return (rc); 5744 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5745 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5746 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5747 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5748 end_synchronized_op(sc, 0); 5749 if (rc) 5750 return (rc); 5751 5752 /* unknown is returned as 0 but we display -1 in that case */ 5753 t = val == 0 ? -1 : val; 5754 5755 rc = sysctl_handle_int(oidp, &t, 0, req); 5756 return (rc); 5757} 5758 5759#ifdef SBUF_DRAIN 5760static int 5761sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5762{ 5763 struct adapter *sc = arg1; 5764 struct sbuf *sb; 5765 int rc, i; 5766 uint16_t incr[NMTUS][NCCTRL_WIN]; 5767 static const char *dec_fac[] = { 5768 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5769 "0.9375" 5770 }; 5771 5772 rc = sysctl_wire_old_buffer(req, 0); 5773 if (rc != 0) 5774 return (rc); 5775 5776 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5777 if (sb == NULL) 5778 return (ENOMEM); 5779 5780 t4_read_cong_tbl(sc, incr); 5781 5782 for (i = 0; i < NCCTRL_WIN; ++i) { 5783 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5784 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5785 incr[5][i], incr[6][i], incr[7][i]); 5786 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5787 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5788 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5789 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5790 } 5791 5792 rc = sbuf_finish(sb); 5793 sbuf_delete(sb); 5794 5795 return (rc); 5796} 5797 5798static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5799 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5800 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5801 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5802}; 5803 5804static int 5805sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5806{ 5807 struct adapter *sc = arg1; 5808 struct sbuf *sb; 5809 int rc, i, n, qid = arg2; 5810 uint32_t *buf, *p; 5811 char *qtype; 5812 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5813 5814 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5815 ("%s: bad qid %d\n", __func__, qid)); 5816 5817 if (qid < CIM_NUM_IBQ) { 5818 /* inbound queue */ 5819 qtype = "IBQ"; 5820 n = 4 * CIM_IBQ_SIZE; 5821 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5822 rc = t4_read_cim_ibq(sc, qid, buf, n); 5823 } else { 5824 /* outbound queue */ 5825 qtype = "OBQ"; 5826 qid -= CIM_NUM_IBQ; 5827 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5828 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5829 rc = t4_read_cim_obq(sc, qid, buf, n); 5830 } 5831 5832 if (rc < 0) { 5833 rc = -rc; 5834 goto done; 5835 } 5836 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5837 5838 rc = sysctl_wire_old_buffer(req, 0); 5839 if (rc != 0) 5840 goto done; 5841 5842 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5843 if (sb == NULL) { 5844 rc = ENOMEM; 5845 goto done; 5846 } 5847 5848 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5849 for (i = 0, p = buf; i < n; i += 16, p += 4) 5850 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5851 p[2], p[3]); 5852 5853 rc = sbuf_finish(sb); 5854 sbuf_delete(sb); 5855done: 5856 free(buf, M_CXGBE); 5857 return (rc); 5858} 5859 5860static int 5861sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5862{ 5863 struct adapter *sc = arg1; 5864 u_int cfg; 5865 struct sbuf *sb; 5866 uint32_t *buf, *p; 5867 int rc; 5868 5869 MPASS(chip_id(sc) <= CHELSIO_T5); 5870 5871 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5872 if (rc != 0) 5873 return (rc); 5874 5875 rc = sysctl_wire_old_buffer(req, 0); 5876 if (rc != 0) 5877 return (rc); 5878 5879 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5880 if (sb == NULL) 5881 return (ENOMEM); 5882 5883 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5884 M_ZERO | M_WAITOK); 5885 5886 rc = -t4_cim_read_la(sc, buf, NULL); 5887 if (rc != 0) 5888 goto done; 5889 5890 sbuf_printf(sb, "Status Data PC%s", 5891 cfg & F_UPDBGLACAPTPCONLY ? "" : 5892 " LS0Stat LS0Addr LS0Data"); 5893 5894 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5895 if (cfg & F_UPDBGLACAPTPCONLY) { 5896 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5897 p[6], p[7]); 5898 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5899 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5900 p[4] & 0xff, p[5] >> 8); 5901 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5902 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5903 p[1] & 0xf, p[2] >> 4); 5904 } else { 5905 sbuf_printf(sb, 5906 "\n %02x %x%07x %x%07x %08x %08x " 5907 "%08x%08x%08x%08x", 5908 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5909 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5910 p[6], p[7]); 5911 } 5912 } 5913 5914 rc = sbuf_finish(sb); 5915 sbuf_delete(sb); 5916done: 5917 free(buf, M_CXGBE); 5918 return (rc); 5919} 5920 5921static int 5922sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5923{ 5924 struct adapter *sc = arg1; 5925 u_int cfg; 5926 struct sbuf *sb; 5927 uint32_t *buf, *p; 5928 int rc; 5929 5930 MPASS(chip_id(sc) > CHELSIO_T5); 5931 5932 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5933 if (rc != 0) 5934 return (rc); 5935 5936 rc = sysctl_wire_old_buffer(req, 0); 5937 if (rc != 0) 5938 return (rc); 5939 5940 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5941 if (sb == NULL) 5942 return (ENOMEM); 5943 5944 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5945 M_ZERO | M_WAITOK); 5946 5947 rc = -t4_cim_read_la(sc, buf, NULL); 5948 if (rc != 0) 5949 goto done; 5950 5951 sbuf_printf(sb, "Status Inst Data PC%s", 5952 cfg & F_UPDBGLACAPTPCONLY ? "" : 5953 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5954 5955 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5956 if (cfg & F_UPDBGLACAPTPCONLY) { 5957 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5958 p[3] & 0xff, p[2], p[1], p[0]); 5959 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5960 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5961 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5962 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5963 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5964 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5965 p[6] >> 16); 5966 } else { 5967 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5968 "%08x %08x %08x %08x %08x %08x", 5969 (p[9] >> 16) & 0xff, 5970 p[9] & 0xffff, p[8] >> 16, 5971 p[8] & 0xffff, p[7] >> 16, 5972 p[7] & 0xffff, p[6] >> 16, 5973 p[2], p[1], p[0], p[5], p[4], p[3]); 5974 } 5975 } 5976 5977 rc = sbuf_finish(sb); 5978 sbuf_delete(sb); 5979done: 5980 free(buf, M_CXGBE); 5981 return (rc); 5982} 5983 5984static int 5985sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 5986{ 5987 struct adapter *sc = arg1; 5988 u_int i; 5989 struct sbuf *sb; 5990 uint32_t *buf, *p; 5991 int rc; 5992 5993 rc = sysctl_wire_old_buffer(req, 0); 5994 if (rc != 0) 5995 return (rc); 5996 5997 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5998 if (sb == NULL) 5999 return (ENOMEM); 6000 6001 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 6002 M_ZERO | M_WAITOK); 6003 6004 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 6005 p = buf; 6006 6007 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6008 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 6009 p[1], p[0]); 6010 } 6011 6012 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 6013 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6014 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 6015 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 6016 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 6017 (p[1] >> 2) | ((p[2] & 3) << 30), 6018 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 6019 p[0] & 1); 6020 } 6021 6022 rc = sbuf_finish(sb); 6023 sbuf_delete(sb); 6024 free(buf, M_CXGBE); 6025 return (rc); 6026} 6027 6028static int 6029sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 6030{ 6031 struct adapter *sc = arg1; 6032 u_int i; 6033 struct sbuf *sb; 6034 uint32_t *buf, *p; 6035 int rc; 6036 6037 rc = sysctl_wire_old_buffer(req, 0); 6038 if (rc != 0) 6039 return (rc); 6040 6041 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6042 if (sb == NULL) 6043 return (ENOMEM); 6044 6045 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 6046 M_ZERO | M_WAITOK); 6047 6048 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 6049 p = buf; 6050 6051 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 6052 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6053 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 6054 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 6055 p[4], p[3], p[2], p[1], p[0]); 6056 } 6057 6058 sbuf_printf(sb, "\n\nCntl ID Data"); 6059 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6060 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 6061 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 6062 } 6063 6064 rc = sbuf_finish(sb); 6065 sbuf_delete(sb); 6066 free(buf, M_CXGBE); 6067 return (rc); 6068} 6069 6070static int 6071sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 6072{ 6073 struct adapter *sc = arg1; 6074 struct sbuf *sb; 6075 int rc, i; 6076 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6077 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6078 uint16_t thres[CIM_NUM_IBQ]; 6079 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 6080 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 6081 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 6082 6083 cim_num_obq = sc->chip_params->cim_num_obq; 6084 if (is_t4(sc)) { 6085 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 6086 obq_rdaddr = A_UP_OBQ_0_REALADDR; 6087 } else { 6088 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 6089 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 6090 } 6091 nq = CIM_NUM_IBQ + cim_num_obq; 6092 6093 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 6094 if (rc == 0) 6095 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 6096 if (rc != 0) 6097 return (rc); 6098 6099 t4_read_cimq_cfg(sc, base, size, thres); 6100 6101 rc = sysctl_wire_old_buffer(req, 0); 6102 if (rc != 0) 6103 return (rc); 6104 6105 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6106 if (sb == NULL) 6107 return (ENOMEM); 6108 6109 sbuf_printf(sb, 6110 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 6111 6112 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 6113 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 6114 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 6115 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6116 G_QUEREMFLITS(p[2]) * 16); 6117 for ( ; i < nq; i++, p += 4, wr += 2) 6118 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 6119 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 6120 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6121 G_QUEREMFLITS(p[2]) * 16); 6122 6123 rc = sbuf_finish(sb); 6124 sbuf_delete(sb); 6125 6126 return (rc); 6127} 6128 6129static int 6130sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 6131{ 6132 struct adapter *sc = arg1; 6133 struct sbuf *sb; 6134 int rc; 6135 struct tp_cpl_stats stats; 6136 6137 rc = sysctl_wire_old_buffer(req, 0); 6138 if (rc != 0) 6139 return (rc); 6140 6141 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6142 if (sb == NULL) 6143 return (ENOMEM); 6144 6145 mtx_lock(&sc->reg_lock); 6146 t4_tp_get_cpl_stats(sc, &stats); 6147 mtx_unlock(&sc->reg_lock); 6148 6149 if (sc->chip_params->nchan > 2) { 6150 sbuf_printf(sb, " channel 0 channel 1" 6151 " channel 2 channel 3"); 6152 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 6153 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 6154 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 6155 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 6156 } else { 6157 sbuf_printf(sb, " channel 0 channel 1"); 6158 sbuf_printf(sb, "\nCPL requests: %10u %10u", 6159 stats.req[0], stats.req[1]); 6160 sbuf_printf(sb, "\nCPL responses: %10u %10u", 6161 stats.rsp[0], stats.rsp[1]); 6162 } 6163 6164 rc = sbuf_finish(sb); 6165 sbuf_delete(sb); 6166 6167 return (rc); 6168} 6169 6170static int 6171sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 6172{ 6173 struct adapter *sc = arg1; 6174 struct sbuf *sb; 6175 int rc; 6176 struct tp_usm_stats stats; 6177 6178 rc = sysctl_wire_old_buffer(req, 0); 6179 if (rc != 0) 6180 return(rc); 6181 6182 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6183 if (sb == NULL) 6184 return (ENOMEM); 6185 6186 t4_get_usm_stats(sc, &stats); 6187 6188 sbuf_printf(sb, "Frames: %u\n", stats.frames); 6189 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 6190 sbuf_printf(sb, "Drops: %u", stats.drops); 6191 6192 rc = sbuf_finish(sb); 6193 sbuf_delete(sb); 6194 6195 return (rc); 6196} 6197 6198static const char * const devlog_level_strings[] = { 6199 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 6200 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 6201 [FW_DEVLOG_LEVEL_ERR] = "ERR", 6202 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 6203 [FW_DEVLOG_LEVEL_INFO] = "INFO", 6204 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 6205}; 6206 6207static const char * const devlog_facility_strings[] = { 6208 [FW_DEVLOG_FACILITY_CORE] = "CORE", 6209 [FW_DEVLOG_FACILITY_CF] = "CF", 6210 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 6211 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 6212 [FW_DEVLOG_FACILITY_RES] = "RES", 6213 [FW_DEVLOG_FACILITY_HW] = "HW", 6214 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6215 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6216 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6217 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6218 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6219 [FW_DEVLOG_FACILITY_VI] = "VI", 6220 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6221 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6222 [FW_DEVLOG_FACILITY_TM] = "TM", 6223 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6224 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6225 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6226 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6227 [FW_DEVLOG_FACILITY_RI] = "RI", 6228 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6229 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6230 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6231 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6232 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6233}; 6234 6235static int 6236sysctl_devlog(SYSCTL_HANDLER_ARGS) 6237{ 6238 struct adapter *sc = arg1; 6239 struct devlog_params *dparams = &sc->params.devlog; 6240 struct fw_devlog_e *buf, *e; 6241 int i, j, rc, nentries, first = 0; 6242 struct sbuf *sb; 6243 uint64_t ftstamp = UINT64_MAX; 6244 6245 if (dparams->addr == 0) 6246 return (ENXIO); 6247 6248 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6249 if (buf == NULL) 6250 return (ENOMEM); 6251 6252 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6253 if (rc != 0) 6254 goto done; 6255 6256 nentries = dparams->size / sizeof(struct fw_devlog_e); 6257 for (i = 0; i < nentries; i++) { 6258 e = &buf[i]; 6259 6260 if (e->timestamp == 0) 6261 break; /* end */ 6262 6263 e->timestamp = be64toh(e->timestamp); 6264 e->seqno = be32toh(e->seqno); 6265 for (j = 0; j < 8; j++) 6266 e->params[j] = be32toh(e->params[j]); 6267 6268 if (e->timestamp < ftstamp) { 6269 ftstamp = e->timestamp; 6270 first = i; 6271 } 6272 } 6273 6274 if (buf[first].timestamp == 0) 6275 goto done; /* nothing in the log */ 6276 6277 rc = sysctl_wire_old_buffer(req, 0); 6278 if (rc != 0) 6279 goto done; 6280 6281 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6282 if (sb == NULL) { 6283 rc = ENOMEM; 6284 goto done; 6285 } 6286 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6287 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6288 6289 i = first; 6290 do { 6291 e = &buf[i]; 6292 if (e->timestamp == 0) 6293 break; /* end */ 6294 6295 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6296 e->seqno, e->timestamp, 6297 (e->level < nitems(devlog_level_strings) ? 6298 devlog_level_strings[e->level] : "UNKNOWN"), 6299 (e->facility < nitems(devlog_facility_strings) ? 6300 devlog_facility_strings[e->facility] : "UNKNOWN")); 6301 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6302 e->params[2], e->params[3], e->params[4], 6303 e->params[5], e->params[6], e->params[7]); 6304 6305 if (++i == nentries) 6306 i = 0; 6307 } while (i != first); 6308 6309 rc = sbuf_finish(sb); 6310 sbuf_delete(sb); 6311done: 6312 free(buf, M_CXGBE); 6313 return (rc); 6314} 6315 6316static int 6317sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6318{ 6319 struct adapter *sc = arg1; 6320 struct sbuf *sb; 6321 int rc; 6322 struct tp_fcoe_stats stats[MAX_NCHAN]; 6323 int i, nchan = sc->chip_params->nchan; 6324 6325 rc = sysctl_wire_old_buffer(req, 0); 6326 if (rc != 0) 6327 return (rc); 6328 6329 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6330 if (sb == NULL) 6331 return (ENOMEM); 6332 6333 for (i = 0; i < nchan; i++) 6334 t4_get_fcoe_stats(sc, i, &stats[i]); 6335 6336 if (nchan > 2) { 6337 sbuf_printf(sb, " channel 0 channel 1" 6338 " channel 2 channel 3"); 6339 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6340 stats[0].octets_ddp, stats[1].octets_ddp, 6341 stats[2].octets_ddp, stats[3].octets_ddp); 6342 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6343 stats[0].frames_ddp, stats[1].frames_ddp, 6344 stats[2].frames_ddp, stats[3].frames_ddp); 6345 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6346 stats[0].frames_drop, stats[1].frames_drop, 6347 stats[2].frames_drop, stats[3].frames_drop); 6348 } else { 6349 sbuf_printf(sb, " channel 0 channel 1"); 6350 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6351 stats[0].octets_ddp, stats[1].octets_ddp); 6352 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6353 stats[0].frames_ddp, stats[1].frames_ddp); 6354 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6355 stats[0].frames_drop, stats[1].frames_drop); 6356 } 6357 6358 rc = sbuf_finish(sb); 6359 sbuf_delete(sb); 6360 6361 return (rc); 6362} 6363 6364static int 6365sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6366{ 6367 struct adapter *sc = arg1; 6368 struct sbuf *sb; 6369 int rc, i; 6370 unsigned int map, kbps, ipg, mode; 6371 unsigned int pace_tab[NTX_SCHED]; 6372 6373 rc = sysctl_wire_old_buffer(req, 0); 6374 if (rc != 0) 6375 return (rc); 6376 6377 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6378 if (sb == NULL) 6379 return (ENOMEM); 6380 6381 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6382 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6383 t4_read_pace_tbl(sc, pace_tab); 6384 6385 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6386 "Class IPG (0.1 ns) Flow IPG (us)"); 6387 6388 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6389 t4_get_tx_sched(sc, i, &kbps, &ipg); 6390 sbuf_printf(sb, "\n %u %-5s %u ", i, 6391 (mode & (1 << i)) ? "flow" : "class", map & 3); 6392 if (kbps) 6393 sbuf_printf(sb, "%9u ", kbps); 6394 else 6395 sbuf_printf(sb, " disabled "); 6396 6397 if (ipg) 6398 sbuf_printf(sb, "%13u ", ipg); 6399 else 6400 sbuf_printf(sb, " disabled "); 6401 6402 if (pace_tab[i]) 6403 sbuf_printf(sb, "%10u", pace_tab[i]); 6404 else 6405 sbuf_printf(sb, " disabled"); 6406 } 6407 6408 rc = sbuf_finish(sb); 6409 sbuf_delete(sb); 6410 6411 return (rc); 6412} 6413 6414static int 6415sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6416{ 6417 struct adapter *sc = arg1; 6418 struct sbuf *sb; 6419 int rc, i, j; 6420 uint64_t *p0, *p1; 6421 struct lb_port_stats s[2]; 6422 static const char *stat_name[] = { 6423 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6424 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6425 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6426 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6427 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6428 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6429 "BG2FramesTrunc:", "BG3FramesTrunc:" 6430 }; 6431 6432 rc = sysctl_wire_old_buffer(req, 0); 6433 if (rc != 0) 6434 return (rc); 6435 6436 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6437 if (sb == NULL) 6438 return (ENOMEM); 6439 6440 memset(s, 0, sizeof(s)); 6441 6442 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6443 t4_get_lb_stats(sc, i, &s[0]); 6444 t4_get_lb_stats(sc, i + 1, &s[1]); 6445 6446 p0 = &s[0].octets; 6447 p1 = &s[1].octets; 6448 sbuf_printf(sb, "%s Loopback %u" 6449 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6450 6451 for (j = 0; j < nitems(stat_name); j++) 6452 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6453 *p0++, *p1++); 6454 } 6455 6456 rc = sbuf_finish(sb); 6457 sbuf_delete(sb); 6458 6459 return (rc); 6460} 6461 6462static int 6463sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6464{ 6465 int rc = 0; 6466 struct port_info *pi = arg1; 6467 struct link_config *lc = &pi->link_cfg; 6468 struct sbuf *sb; 6469 6470 rc = sysctl_wire_old_buffer(req, 0); 6471 if (rc != 0) 6472 return(rc); 6473 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6474 if (sb == NULL) 6475 return (ENOMEM); 6476 6477 if (lc->link_ok || lc->link_down_rc == 255) 6478 sbuf_printf(sb, "n/a"); 6479 else 6480 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); 6481 6482 rc = sbuf_finish(sb); 6483 sbuf_delete(sb); 6484 6485 return (rc); 6486} 6487 6488struct mem_desc { 6489 unsigned int base; 6490 unsigned int limit; 6491 unsigned int idx; 6492}; 6493 6494static int 6495mem_desc_cmp(const void *a, const void *b) 6496{ 6497 return ((const struct mem_desc *)a)->base - 6498 ((const struct mem_desc *)b)->base; 6499} 6500 6501static void 6502mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6503 unsigned int to) 6504{ 6505 unsigned int size; 6506 6507 if (from == to) 6508 return; 6509 6510 size = to - from + 1; 6511 if (size == 0) 6512 return; 6513 6514 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6515 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6516} 6517 6518static int 6519sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6520{ 6521 struct adapter *sc = arg1; 6522 struct sbuf *sb; 6523 int rc, i, n; 6524 uint32_t lo, hi, used, alloc; 6525 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6526 static const char *region[] = { 6527 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6528 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6529 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6530 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6531 "RQUDP region:", "PBL region:", "TXPBL region:", 6532 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6533 "On-chip queues:" 6534 }; 6535 struct mem_desc avail[4]; 6536 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6537 struct mem_desc *md = mem; 6538 6539 rc = sysctl_wire_old_buffer(req, 0); 6540 if (rc != 0) 6541 return (rc); 6542 6543 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6544 if (sb == NULL) 6545 return (ENOMEM); 6546 6547 for (i = 0; i < nitems(mem); i++) { 6548 mem[i].limit = 0; 6549 mem[i].idx = i; 6550 } 6551 6552 /* Find and sort the populated memory ranges */ 6553 i = 0; 6554 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6555 if (lo & F_EDRAM0_ENABLE) { 6556 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6557 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6558 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6559 avail[i].idx = 0; 6560 i++; 6561 } 6562 if (lo & F_EDRAM1_ENABLE) { 6563 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6564 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6565 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6566 avail[i].idx = 1; 6567 i++; 6568 } 6569 if (lo & F_EXT_MEM_ENABLE) { 6570 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6571 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6572 avail[i].limit = avail[i].base + 6573 (G_EXT_MEM_SIZE(hi) << 20); 6574 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6575 i++; 6576 } 6577 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6578 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6579 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6580 avail[i].limit = avail[i].base + 6581 (G_EXT_MEM1_SIZE(hi) << 20); 6582 avail[i].idx = 4; 6583 i++; 6584 } 6585 if (!i) /* no memory available */ 6586 return 0; 6587 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6588 6589 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6590 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6591 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6592 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6593 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6594 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6595 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6596 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6597 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6598 6599 /* the next few have explicit upper bounds */ 6600 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6601 md->limit = md->base - 1 + 6602 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6603 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6604 md++; 6605 6606 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6607 md->limit = md->base - 1 + 6608 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6609 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6610 md++; 6611 6612 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6613 if (chip_id(sc) <= CHELSIO_T5) 6614 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6615 else 6616 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6617 md->limit = 0; 6618 } else { 6619 md->base = 0; 6620 md->idx = nitems(region); /* hide it */ 6621 } 6622 md++; 6623 6624#define ulp_region(reg) \ 6625 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6626 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6627 6628 ulp_region(RX_ISCSI); 6629 ulp_region(RX_TDDP); 6630 ulp_region(TX_TPT); 6631 ulp_region(RX_STAG); 6632 ulp_region(RX_RQ); 6633 ulp_region(RX_RQUDP); 6634 ulp_region(RX_PBL); 6635 ulp_region(TX_PBL); 6636#undef ulp_region 6637 6638 md->base = 0; 6639 md->idx = nitems(region); 6640 if (!is_t4(sc)) { 6641 uint32_t size = 0; 6642 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6643 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6644 6645 if (is_t5(sc)) { 6646 if (sge_ctrl & F_VFIFO_ENABLE) 6647 size = G_DBVFIFO_SIZE(fifo_size); 6648 } else 6649 size = G_T6_DBVFIFO_SIZE(fifo_size); 6650 6651 if (size) { 6652 md->base = G_BASEADDR(t4_read_reg(sc, 6653 A_SGE_DBVFIFO_BADDR)); 6654 md->limit = md->base + (size << 2) - 1; 6655 } 6656 } 6657 md++; 6658 6659 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6660 md->limit = 0; 6661 md++; 6662 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6663 md->limit = 0; 6664 md++; 6665 6666 md->base = sc->vres.ocq.start; 6667 if (sc->vres.ocq.size) 6668 md->limit = md->base + sc->vres.ocq.size - 1; 6669 else 6670 md->idx = nitems(region); /* hide it */ 6671 md++; 6672 6673 /* add any address-space holes, there can be up to 3 */ 6674 for (n = 0; n < i - 1; n++) 6675 if (avail[n].limit < avail[n + 1].base) 6676 (md++)->base = avail[n].limit; 6677 if (avail[n].limit) 6678 (md++)->base = avail[n].limit; 6679 6680 n = md - mem; 6681 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6682 6683 for (lo = 0; lo < i; lo++) 6684 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6685 avail[lo].limit - 1); 6686 6687 sbuf_printf(sb, "\n"); 6688 for (i = 0; i < n; i++) { 6689 if (mem[i].idx >= nitems(region)) 6690 continue; /* skip holes */ 6691 if (!mem[i].limit) 6692 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6693 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6694 mem[i].limit); 6695 } 6696 6697 sbuf_printf(sb, "\n"); 6698 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6699 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6700 mem_region_show(sb, "uP RAM:", lo, hi); 6701 6702 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6703 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6704 mem_region_show(sb, "uP Extmem2:", lo, hi); 6705 6706 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6707 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6708 G_PMRXMAXPAGE(lo), 6709 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6710 (lo & F_PMRXNUMCHN) ? 2 : 1); 6711 6712 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6713 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6714 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6715 G_PMTXMAXPAGE(lo), 6716 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6717 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6718 sbuf_printf(sb, "%u p-structs\n", 6719 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6720 6721 for (i = 0; i < 4; i++) { 6722 if (chip_id(sc) > CHELSIO_T5) 6723 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6724 else 6725 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6726 if (is_t5(sc)) { 6727 used = G_T5_USED(lo); 6728 alloc = G_T5_ALLOC(lo); 6729 } else { 6730 used = G_USED(lo); 6731 alloc = G_ALLOC(lo); 6732 } 6733 /* For T6 these are MAC buffer groups */ 6734 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6735 i, used, alloc); 6736 } 6737 for (i = 0; i < sc->chip_params->nchan; i++) { 6738 if (chip_id(sc) > CHELSIO_T5) 6739 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6740 else 6741 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6742 if (is_t5(sc)) { 6743 used = G_T5_USED(lo); 6744 alloc = G_T5_ALLOC(lo); 6745 } else { 6746 used = G_USED(lo); 6747 alloc = G_ALLOC(lo); 6748 } 6749 /* For T6 these are MAC buffer groups */ 6750 sbuf_printf(sb, 6751 "\nLoopback %d using %u pages out of %u allocated", 6752 i, used, alloc); 6753 } 6754 6755 rc = sbuf_finish(sb); 6756 sbuf_delete(sb); 6757 6758 return (rc); 6759} 6760 6761static inline void 6762tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6763{ 6764 *mask = x | y; 6765 y = htobe64(y); 6766 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6767} 6768 6769static int 6770sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6771{ 6772 struct adapter *sc = arg1; 6773 struct sbuf *sb; 6774 int rc, i; 6775 6776 MPASS(chip_id(sc) <= CHELSIO_T5); 6777 6778 rc = sysctl_wire_old_buffer(req, 0); 6779 if (rc != 0) 6780 return (rc); 6781 6782 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6783 if (sb == NULL) 6784 return (ENOMEM); 6785 6786 sbuf_printf(sb, 6787 "Idx Ethernet address Mask Vld Ports PF" 6788 " VF Replication P0 P1 P2 P3 ML"); 6789 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6790 uint64_t tcamx, tcamy, mask; 6791 uint32_t cls_lo, cls_hi; 6792 uint8_t addr[ETHER_ADDR_LEN]; 6793 6794 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6795 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6796 if (tcamx & tcamy) 6797 continue; 6798 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6799 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6800 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6801 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6802 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6803 addr[3], addr[4], addr[5], (uintmax_t)mask, 6804 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6805 G_PORTMAP(cls_hi), G_PF(cls_lo), 6806 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6807 6808 if (cls_lo & F_REPLICATE) { 6809 struct fw_ldst_cmd ldst_cmd; 6810 6811 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6812 ldst_cmd.op_to_addrspace = 6813 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6814 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6815 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6816 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6817 ldst_cmd.u.mps.rplc.fid_idx = 6818 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6819 V_FW_LDST_CMD_IDX(i)); 6820 6821 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6822 "t4mps"); 6823 if (rc) 6824 break; 6825 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6826 sizeof(ldst_cmd), &ldst_cmd); 6827 end_synchronized_op(sc, 0); 6828 6829 if (rc != 0) { 6830 sbuf_printf(sb, "%36d", rc); 6831 rc = 0; 6832 } else { 6833 sbuf_printf(sb, " %08x %08x %08x %08x", 6834 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6835 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6836 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6837 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6838 } 6839 } else 6840 sbuf_printf(sb, "%36s", ""); 6841 6842 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6843 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6844 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6845 } 6846 6847 if (rc) 6848 (void) sbuf_finish(sb); 6849 else 6850 rc = sbuf_finish(sb); 6851 sbuf_delete(sb); 6852 6853 return (rc); 6854} 6855 6856static int 6857sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6858{ 6859 struct adapter *sc = arg1; 6860 struct sbuf *sb; 6861 int rc, i; 6862 6863 MPASS(chip_id(sc) > CHELSIO_T5); 6864 6865 rc = sysctl_wire_old_buffer(req, 0); 6866 if (rc != 0) 6867 return (rc); 6868 6869 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6870 if (sb == NULL) 6871 return (ENOMEM); 6872 6873 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6874 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6875 " Replication" 6876 " P0 P1 P2 P3 ML\n"); 6877 6878 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6879 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6880 uint16_t ivlan; 6881 uint64_t tcamx, tcamy, val, mask; 6882 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6883 uint8_t addr[ETHER_ADDR_LEN]; 6884 6885 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6886 if (i < 256) 6887 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6888 else 6889 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6890 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6891 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6892 tcamy = G_DMACH(val) << 32; 6893 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6894 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6895 lookup_type = G_DATALKPTYPE(data2); 6896 port_num = G_DATAPORTNUM(data2); 6897 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6898 /* Inner header VNI */ 6899 vniy = ((data2 & F_DATAVIDH2) << 23) | 6900 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6901 dip_hit = data2 & F_DATADIPHIT; 6902 vlan_vld = 0; 6903 } else { 6904 vniy = 0; 6905 dip_hit = 0; 6906 vlan_vld = data2 & F_DATAVIDH2; 6907 ivlan = G_VIDL(val); 6908 } 6909 6910 ctl |= V_CTLXYBITSEL(1); 6911 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6912 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6913 tcamx = G_DMACH(val) << 32; 6914 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6915 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6916 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6917 /* Inner header VNI mask */ 6918 vnix = ((data2 & F_DATAVIDH2) << 23) | 6919 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6920 } else 6921 vnix = 0; 6922 6923 if (tcamx & tcamy) 6924 continue; 6925 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6926 6927 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6928 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6929 6930 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6931 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6932 "%012jx %06x %06x - - %3c" 6933 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6934 addr[1], addr[2], addr[3], addr[4], addr[5], 6935 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6936 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6937 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6938 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6939 } else { 6940 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6941 "%012jx - - ", i, addr[0], addr[1], 6942 addr[2], addr[3], addr[4], addr[5], 6943 (uintmax_t)mask); 6944 6945 if (vlan_vld) 6946 sbuf_printf(sb, "%4u Y ", ivlan); 6947 else 6948 sbuf_printf(sb, " - N "); 6949 6950 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6951 lookup_type ? 'I' : 'O', port_num, 6952 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6953 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6954 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6955 } 6956 6957 6958 if (cls_lo & F_T6_REPLICATE) { 6959 struct fw_ldst_cmd ldst_cmd; 6960 6961 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6962 ldst_cmd.op_to_addrspace = 6963 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6964 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6965 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6966 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6967 ldst_cmd.u.mps.rplc.fid_idx = 6968 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6969 V_FW_LDST_CMD_IDX(i)); 6970 6971 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6972 "t6mps"); 6973 if (rc) 6974 break; 6975 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6976 sizeof(ldst_cmd), &ldst_cmd); 6977 end_synchronized_op(sc, 0); 6978 6979 if (rc != 0) { 6980 sbuf_printf(sb, "%72d", rc); 6981 rc = 0; 6982 } else { 6983 sbuf_printf(sb, " %08x %08x %08x %08x" 6984 " %08x %08x %08x %08x", 6985 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 6986 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 6987 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 6988 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 6989 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6990 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6991 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6992 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6993 } 6994 } else 6995 sbuf_printf(sb, "%72s", ""); 6996 6997 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 6998 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 6999 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 7000 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 7001 } 7002 7003 if (rc) 7004 (void) sbuf_finish(sb); 7005 else 7006 rc = sbuf_finish(sb); 7007 sbuf_delete(sb); 7008 7009 return (rc); 7010} 7011 7012static int 7013sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 7014{ 7015 struct adapter *sc = arg1; 7016 struct sbuf *sb; 7017 int rc; 7018 uint16_t mtus[NMTUS]; 7019 7020 rc = sysctl_wire_old_buffer(req, 0); 7021 if (rc != 0) 7022 return (rc); 7023 7024 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7025 if (sb == NULL) 7026 return (ENOMEM); 7027 7028 t4_read_mtu_tbl(sc, mtus, NULL); 7029 7030 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 7031 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 7032 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 7033 mtus[14], mtus[15]); 7034 7035 rc = sbuf_finish(sb); 7036 sbuf_delete(sb); 7037 7038 return (rc); 7039} 7040 7041static int 7042sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 7043{ 7044 struct adapter *sc = arg1; 7045 struct sbuf *sb; 7046 int rc, i; 7047 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 7048 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 7049 static const char *tx_stats[MAX_PM_NSTATS] = { 7050 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 7051 "Tx FIFO wait", NULL, "Tx latency" 7052 }; 7053 static const char *rx_stats[MAX_PM_NSTATS] = { 7054 "Read:", "Write bypass:", "Write mem:", "Flush:", 7055 "Rx FIFO wait", NULL, "Rx latency" 7056 }; 7057 7058 rc = sysctl_wire_old_buffer(req, 0); 7059 if (rc != 0) 7060 return (rc); 7061 7062 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7063 if (sb == NULL) 7064 return (ENOMEM); 7065 7066 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 7067 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 7068 7069 sbuf_printf(sb, " Tx pcmds Tx bytes"); 7070 for (i = 0; i < 4; i++) { 7071 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7072 tx_cyc[i]); 7073 } 7074 7075 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 7076 for (i = 0; i < 4; i++) { 7077 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7078 rx_cyc[i]); 7079 } 7080 7081 if (chip_id(sc) > CHELSIO_T5) { 7082 sbuf_printf(sb, 7083 "\n Total wait Total occupancy"); 7084 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7085 tx_cyc[i]); 7086 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7087 rx_cyc[i]); 7088 7089 i += 2; 7090 MPASS(i < nitems(tx_stats)); 7091 7092 sbuf_printf(sb, 7093 "\n Reads Total wait"); 7094 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7095 tx_cyc[i]); 7096 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7097 rx_cyc[i]); 7098 } 7099 7100 rc = sbuf_finish(sb); 7101 sbuf_delete(sb); 7102 7103 return (rc); 7104} 7105 7106static int 7107sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 7108{ 7109 struct adapter *sc = arg1; 7110 struct sbuf *sb; 7111 int rc; 7112 struct tp_rdma_stats stats; 7113 7114 rc = sysctl_wire_old_buffer(req, 0); 7115 if (rc != 0) 7116 return (rc); 7117 7118 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7119 if (sb == NULL) 7120 return (ENOMEM); 7121 7122 mtx_lock(&sc->reg_lock); 7123 t4_tp_get_rdma_stats(sc, &stats); 7124 mtx_unlock(&sc->reg_lock); 7125 7126 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 7127 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 7128 7129 rc = sbuf_finish(sb); 7130 sbuf_delete(sb); 7131 7132 return (rc); 7133} 7134 7135static int 7136sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 7137{ 7138 struct adapter *sc = arg1; 7139 struct sbuf *sb; 7140 int rc; 7141 struct tp_tcp_stats v4, v6; 7142 7143 rc = sysctl_wire_old_buffer(req, 0); 7144 if (rc != 0) 7145 return (rc); 7146 7147 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7148 if (sb == NULL) 7149 return (ENOMEM); 7150 7151 mtx_lock(&sc->reg_lock); 7152 t4_tp_get_tcp_stats(sc, &v4, &v6); 7153 mtx_unlock(&sc->reg_lock); 7154 7155 sbuf_printf(sb, 7156 " IP IPv6\n"); 7157 sbuf_printf(sb, "OutRsts: %20u %20u\n", 7158 v4.tcp_out_rsts, v6.tcp_out_rsts); 7159 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 7160 v4.tcp_in_segs, v6.tcp_in_segs); 7161 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 7162 v4.tcp_out_segs, v6.tcp_out_segs); 7163 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 7164 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 7165 7166 rc = sbuf_finish(sb); 7167 sbuf_delete(sb); 7168 7169 return (rc); 7170} 7171 7172static int 7173sysctl_tids(SYSCTL_HANDLER_ARGS) 7174{ 7175 struct adapter *sc = arg1; 7176 struct sbuf *sb; 7177 int rc; 7178 struct tid_info *t = &sc->tids; 7179 7180 rc = sysctl_wire_old_buffer(req, 0); 7181 if (rc != 0) 7182 return (rc); 7183 7184 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7185 if (sb == NULL) 7186 return (ENOMEM); 7187 7188 if (t->natids) { 7189 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 7190 t->atids_in_use); 7191 } 7192 7193 if (t->ntids) { 7194 sbuf_printf(sb, "TID range: "); 7195 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7196 uint32_t b, hb; 7197 7198 if (chip_id(sc) <= CHELSIO_T5) { 7199 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 7200 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 7201 } else { 7202 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 7203 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); 7204 } 7205 7206 if (b) 7207 sbuf_printf(sb, "0-%u, ", b - 1); 7208 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); 7209 } else 7210 sbuf_printf(sb, "0-%u", t->ntids - 1); 7211 sbuf_printf(sb, ", in use: %u\n", 7212 atomic_load_acq_int(&t->tids_in_use)); 7213 } 7214 7215 if (t->nstids) { 7216 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 7217 t->stid_base + t->nstids - 1, t->stids_in_use); 7218 } 7219 7220 if (t->nftids) { 7221 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7222 t->ftid_base + t->nftids - 1); 7223 } 7224 7225 if (t->netids) { 7226 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7227 t->etid_base + t->netids - 1); 7228 } 7229 7230 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7231 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7232 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7233 7234 rc = sbuf_finish(sb); 7235 sbuf_delete(sb); 7236 7237 return (rc); 7238} 7239 7240static int 7241sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7242{ 7243 struct adapter *sc = arg1; 7244 struct sbuf *sb; 7245 int rc; 7246 struct tp_err_stats stats; 7247 7248 rc = sysctl_wire_old_buffer(req, 0); 7249 if (rc != 0) 7250 return (rc); 7251 7252 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7253 if (sb == NULL) 7254 return (ENOMEM); 7255 7256 mtx_lock(&sc->reg_lock); 7257 t4_tp_get_err_stats(sc, &stats); 7258 mtx_unlock(&sc->reg_lock); 7259 7260 if (sc->chip_params->nchan > 2) { 7261 sbuf_printf(sb, " channel 0 channel 1" 7262 " channel 2 channel 3\n"); 7263 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7264 stats.mac_in_errs[0], stats.mac_in_errs[1], 7265 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7266 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7267 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7268 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7269 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7270 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7271 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7272 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7273 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7274 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7275 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7276 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7277 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7278 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7279 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7280 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7281 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7282 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7283 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7284 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7285 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7286 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7287 } else { 7288 sbuf_printf(sb, " channel 0 channel 1\n"); 7289 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7290 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7291 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7292 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7293 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7294 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7295 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7296 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7297 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7298 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7299 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7300 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7301 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7302 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7303 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7304 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7305 } 7306 7307 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7308 stats.ofld_no_neigh, stats.ofld_cong_defer); 7309 7310 rc = sbuf_finish(sb); 7311 sbuf_delete(sb); 7312 7313 return (rc); 7314} 7315 7316static int 7317sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7318{ 7319 struct adapter *sc = arg1; 7320 struct tp_params *tpp = &sc->params.tp; 7321 u_int mask; 7322 int rc; 7323 7324 mask = tpp->la_mask >> 16; 7325 rc = sysctl_handle_int(oidp, &mask, 0, req); 7326 if (rc != 0 || req->newptr == NULL) 7327 return (rc); 7328 if (mask > 0xffff) 7329 return (EINVAL); 7330 tpp->la_mask = mask << 16; 7331 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7332 7333 return (0); 7334} 7335 7336struct field_desc { 7337 const char *name; 7338 u_int start; 7339 u_int width; 7340}; 7341 7342static void 7343field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7344{ 7345 char buf[32]; 7346 int line_size = 0; 7347 7348 while (f->name) { 7349 uint64_t mask = (1ULL << f->width) - 1; 7350 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7351 ((uintmax_t)v >> f->start) & mask); 7352 7353 if (line_size + len >= 79) { 7354 line_size = 8; 7355 sbuf_printf(sb, "\n "); 7356 } 7357 sbuf_printf(sb, "%s ", buf); 7358 line_size += len + 1; 7359 f++; 7360 } 7361 sbuf_printf(sb, "\n"); 7362} 7363 7364static const struct field_desc tp_la0[] = { 7365 { "RcfOpCodeOut", 60, 4 }, 7366 { "State", 56, 4 }, 7367 { "WcfState", 52, 4 }, 7368 { "RcfOpcSrcOut", 50, 2 }, 7369 { "CRxError", 49, 1 }, 7370 { "ERxError", 48, 1 }, 7371 { "SanityFailed", 47, 1 }, 7372 { "SpuriousMsg", 46, 1 }, 7373 { "FlushInputMsg", 45, 1 }, 7374 { "FlushInputCpl", 44, 1 }, 7375 { "RssUpBit", 43, 1 }, 7376 { "RssFilterHit", 42, 1 }, 7377 { "Tid", 32, 10 }, 7378 { "InitTcb", 31, 1 }, 7379 { "LineNumber", 24, 7 }, 7380 { "Emsg", 23, 1 }, 7381 { "EdataOut", 22, 1 }, 7382 { "Cmsg", 21, 1 }, 7383 { "CdataOut", 20, 1 }, 7384 { "EreadPdu", 19, 1 }, 7385 { "CreadPdu", 18, 1 }, 7386 { "TunnelPkt", 17, 1 }, 7387 { "RcfPeerFin", 16, 1 }, 7388 { "RcfReasonOut", 12, 4 }, 7389 { "TxCchannel", 10, 2 }, 7390 { "RcfTxChannel", 8, 2 }, 7391 { "RxEchannel", 6, 2 }, 7392 { "RcfRxChannel", 5, 1 }, 7393 { "RcfDataOutSrdy", 4, 1 }, 7394 { "RxDvld", 3, 1 }, 7395 { "RxOoDvld", 2, 1 }, 7396 { "RxCongestion", 1, 1 }, 7397 { "TxCongestion", 0, 1 }, 7398 { NULL } 7399}; 7400 7401static const struct field_desc tp_la1[] = { 7402 { "CplCmdIn", 56, 8 }, 7403 { "CplCmdOut", 48, 8 }, 7404 { "ESynOut", 47, 1 }, 7405 { "EAckOut", 46, 1 }, 7406 { "EFinOut", 45, 1 }, 7407 { "ERstOut", 44, 1 }, 7408 { "SynIn", 43, 1 }, 7409 { "AckIn", 42, 1 }, 7410 { "FinIn", 41, 1 }, 7411 { "RstIn", 40, 1 }, 7412 { "DataIn", 39, 1 }, 7413 { "DataInVld", 38, 1 }, 7414 { "PadIn", 37, 1 }, 7415 { "RxBufEmpty", 36, 1 }, 7416 { "RxDdp", 35, 1 }, 7417 { "RxFbCongestion", 34, 1 }, 7418 { "TxFbCongestion", 33, 1 }, 7419 { "TxPktSumSrdy", 32, 1 }, 7420 { "RcfUlpType", 28, 4 }, 7421 { "Eread", 27, 1 }, 7422 { "Ebypass", 26, 1 }, 7423 { "Esave", 25, 1 }, 7424 { "Static0", 24, 1 }, 7425 { "Cread", 23, 1 }, 7426 { "Cbypass", 22, 1 }, 7427 { "Csave", 21, 1 }, 7428 { "CPktOut", 20, 1 }, 7429 { "RxPagePoolFull", 18, 2 }, 7430 { "RxLpbkPkt", 17, 1 }, 7431 { "TxLpbkPkt", 16, 1 }, 7432 { "RxVfValid", 15, 1 }, 7433 { "SynLearned", 14, 1 }, 7434 { "SetDelEntry", 13, 1 }, 7435 { "SetInvEntry", 12, 1 }, 7436 { "CpcmdDvld", 11, 1 }, 7437 { "CpcmdSave", 10, 1 }, 7438 { "RxPstructsFull", 8, 2 }, 7439 { "EpcmdDvld", 7, 1 }, 7440 { "EpcmdFlush", 6, 1 }, 7441 { "EpcmdTrimPrefix", 5, 1 }, 7442 { "EpcmdTrimPostfix", 4, 1 }, 7443 { "ERssIp4Pkt", 3, 1 }, 7444 { "ERssIp6Pkt", 2, 1 }, 7445 { "ERssTcpUdpPkt", 1, 1 }, 7446 { "ERssFceFipPkt", 0, 1 }, 7447 { NULL } 7448}; 7449 7450static const struct field_desc tp_la2[] = { 7451 { "CplCmdIn", 56, 8 }, 7452 { "MpsVfVld", 55, 1 }, 7453 { "MpsPf", 52, 3 }, 7454 { "MpsVf", 44, 8 }, 7455 { "SynIn", 43, 1 }, 7456 { "AckIn", 42, 1 }, 7457 { "FinIn", 41, 1 }, 7458 { "RstIn", 40, 1 }, 7459 { "DataIn", 39, 1 }, 7460 { "DataInVld", 38, 1 }, 7461 { "PadIn", 37, 1 }, 7462 { "RxBufEmpty", 36, 1 }, 7463 { "RxDdp", 35, 1 }, 7464 { "RxFbCongestion", 34, 1 }, 7465 { "TxFbCongestion", 33, 1 }, 7466 { "TxPktSumSrdy", 32, 1 }, 7467 { "RcfUlpType", 28, 4 }, 7468 { "Eread", 27, 1 }, 7469 { "Ebypass", 26, 1 }, 7470 { "Esave", 25, 1 }, 7471 { "Static0", 24, 1 }, 7472 { "Cread", 23, 1 }, 7473 { "Cbypass", 22, 1 }, 7474 { "Csave", 21, 1 }, 7475 { "CPktOut", 20, 1 }, 7476 { "RxPagePoolFull", 18, 2 }, 7477 { "RxLpbkPkt", 17, 1 }, 7478 { "TxLpbkPkt", 16, 1 }, 7479 { "RxVfValid", 15, 1 }, 7480 { "SynLearned", 14, 1 }, 7481 { "SetDelEntry", 13, 1 }, 7482 { "SetInvEntry", 12, 1 }, 7483 { "CpcmdDvld", 11, 1 }, 7484 { "CpcmdSave", 10, 1 }, 7485 { "RxPstructsFull", 8, 2 }, 7486 { "EpcmdDvld", 7, 1 }, 7487 { "EpcmdFlush", 6, 1 }, 7488 { "EpcmdTrimPrefix", 5, 1 }, 7489 { "EpcmdTrimPostfix", 4, 1 }, 7490 { "ERssIp4Pkt", 3, 1 }, 7491 { "ERssIp6Pkt", 2, 1 }, 7492 { "ERssTcpUdpPkt", 1, 1 }, 7493 { "ERssFceFipPkt", 0, 1 }, 7494 { NULL } 7495}; 7496 7497static void 7498tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7499{ 7500 7501 field_desc_show(sb, *p, tp_la0); 7502} 7503 7504static void 7505tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7506{ 7507 7508 if (idx) 7509 sbuf_printf(sb, "\n"); 7510 field_desc_show(sb, p[0], tp_la0); 7511 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7512 field_desc_show(sb, p[1], tp_la0); 7513} 7514 7515static void 7516tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7517{ 7518 7519 if (idx) 7520 sbuf_printf(sb, "\n"); 7521 field_desc_show(sb, p[0], tp_la0); 7522 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7523 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7524} 7525 7526static int 7527sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7528{ 7529 struct adapter *sc = arg1; 7530 struct sbuf *sb; 7531 uint64_t *buf, *p; 7532 int rc; 7533 u_int i, inc; 7534 void (*show_func)(struct sbuf *, uint64_t *, int); 7535 7536 rc = sysctl_wire_old_buffer(req, 0); 7537 if (rc != 0) 7538 return (rc); 7539 7540 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7541 if (sb == NULL) 7542 return (ENOMEM); 7543 7544 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7545 7546 t4_tp_read_la(sc, buf, NULL); 7547 p = buf; 7548 7549 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7550 case 2: 7551 inc = 2; 7552 show_func = tp_la_show2; 7553 break; 7554 case 3: 7555 inc = 2; 7556 show_func = tp_la_show3; 7557 break; 7558 default: 7559 inc = 1; 7560 show_func = tp_la_show; 7561 } 7562 7563 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7564 (*show_func)(sb, p, i); 7565 7566 rc = sbuf_finish(sb); 7567 sbuf_delete(sb); 7568 free(buf, M_CXGBE); 7569 return (rc); 7570} 7571 7572static int 7573sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7574{ 7575 struct adapter *sc = arg1; 7576 struct sbuf *sb; 7577 int rc; 7578 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7579 7580 rc = sysctl_wire_old_buffer(req, 0); 7581 if (rc != 0) 7582 return (rc); 7583 7584 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7585 if (sb == NULL) 7586 return (ENOMEM); 7587 7588 t4_get_chan_txrate(sc, nrate, orate); 7589 7590 if (sc->chip_params->nchan > 2) { 7591 sbuf_printf(sb, " channel 0 channel 1" 7592 " channel 2 channel 3\n"); 7593 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7594 nrate[0], nrate[1], nrate[2], nrate[3]); 7595 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7596 orate[0], orate[1], orate[2], orate[3]); 7597 } else { 7598 sbuf_printf(sb, " channel 0 channel 1\n"); 7599 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7600 nrate[0], nrate[1]); 7601 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7602 orate[0], orate[1]); 7603 } 7604 7605 rc = sbuf_finish(sb); 7606 sbuf_delete(sb); 7607 7608 return (rc); 7609} 7610 7611static int 7612sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7613{ 7614 struct adapter *sc = arg1; 7615 struct sbuf *sb; 7616 uint32_t *buf, *p; 7617 int rc, i; 7618 7619 rc = sysctl_wire_old_buffer(req, 0); 7620 if (rc != 0) 7621 return (rc); 7622 7623 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7624 if (sb == NULL) 7625 return (ENOMEM); 7626 7627 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7628 M_ZERO | M_WAITOK); 7629 7630 t4_ulprx_read_la(sc, buf); 7631 p = buf; 7632 7633 sbuf_printf(sb, " Pcmd Type Message" 7634 " Data"); 7635 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7636 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7637 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7638 } 7639 7640 rc = sbuf_finish(sb); 7641 sbuf_delete(sb); 7642 free(buf, M_CXGBE); 7643 return (rc); 7644} 7645 7646static int 7647sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7648{ 7649 struct adapter *sc = arg1; 7650 struct sbuf *sb; 7651 int rc, v; 7652 7653 MPASS(chip_id(sc) >= CHELSIO_T5); 7654 7655 rc = sysctl_wire_old_buffer(req, 0); 7656 if (rc != 0) 7657 return (rc); 7658 7659 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7660 if (sb == NULL) 7661 return (ENOMEM); 7662 7663 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7664 if (G_STATSOURCE_T5(v) == 7) { 7665 int mode; 7666 7667 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 7668 if (mode == 0) { 7669 sbuf_printf(sb, "total %d, incomplete %d", 7670 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7671 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7672 } else if (mode == 1) { 7673 sbuf_printf(sb, "total %d, data overflow %d", 7674 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7675 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7676 } else { 7677 sbuf_printf(sb, "unknown mode %d", mode); 7678 } 7679 } 7680 rc = sbuf_finish(sb); 7681 sbuf_delete(sb); 7682 7683 return (rc); 7684} 7685 7686static int 7687sysctl_tc_params(SYSCTL_HANDLER_ARGS) 7688{ 7689 struct adapter *sc = arg1; 7690 struct tx_sched_class *tc; 7691 struct t4_sched_class_params p; 7692 struct sbuf *sb; 7693 int i, rc, port_id, flags, mbps, gbps; 7694 7695 rc = sysctl_wire_old_buffer(req, 0); 7696 if (rc != 0) 7697 return (rc); 7698 7699 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7700 if (sb == NULL) 7701 return (ENOMEM); 7702 7703 port_id = arg2 >> 16; 7704 MPASS(port_id < sc->params.nports); 7705 MPASS(sc->port[port_id] != NULL); 7706 i = arg2 & 0xffff; 7707 MPASS(i < sc->chip_params->nsched_cls); 7708 tc = &sc->port[port_id]->tc[i]; 7709 7710 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7711 "t4tc_p"); 7712 if (rc) 7713 goto done; 7714 flags = tc->flags; 7715 p = tc->params; 7716 end_synchronized_op(sc, LOCK_HELD); 7717 7718 if ((flags & TX_SC_OK) == 0) { 7719 sbuf_printf(sb, "none"); 7720 goto done; 7721 } 7722 7723 if (p.level == SCHED_CLASS_LEVEL_CL_WRR) { 7724 sbuf_printf(sb, "cl-wrr weight %u", p.weight); 7725 goto done; 7726 } else if (p.level == SCHED_CLASS_LEVEL_CL_RL) 7727 sbuf_printf(sb, "cl-rl"); 7728 else if (p.level == SCHED_CLASS_LEVEL_CH_RL) 7729 sbuf_printf(sb, "ch-rl"); 7730 else { 7731 rc = ENXIO; 7732 goto done; 7733 } 7734 7735 if (p.ratemode == SCHED_CLASS_RATEMODE_REL) { 7736 /* XXX: top speed or actual link speed? */ 7737 gbps = port_top_speed(sc->port[port_id]); 7738 sbuf_printf(sb, " %u%% of %uGbps", p.maxrate, gbps); 7739 } 7740 else if (p.ratemode == SCHED_CLASS_RATEMODE_ABS) { 7741 switch (p.rateunit) { 7742 case SCHED_CLASS_RATEUNIT_BITS: 7743 mbps = p.maxrate / 1000; 7744 gbps = p.maxrate / 1000000; 7745 if (p.maxrate == gbps * 1000000) 7746 sbuf_printf(sb, " %uGbps", gbps); 7747 else if (p.maxrate == mbps * 1000) 7748 sbuf_printf(sb, " %uMbps", mbps); 7749 else 7750 sbuf_printf(sb, " %uKbps", p.maxrate); 7751 break; 7752 case SCHED_CLASS_RATEUNIT_PKTS: 7753 sbuf_printf(sb, " %upps", p.maxrate); 7754 break; 7755 default: 7756 rc = ENXIO; 7757 goto done; 7758 } 7759 } 7760 7761 switch (p.mode) { 7762 case SCHED_CLASS_MODE_CLASS: 7763 sbuf_printf(sb, " aggregate"); 7764 break; 7765 case SCHED_CLASS_MODE_FLOW: 7766 sbuf_printf(sb, " per-flow"); 7767 break; 7768 default: 7769 rc = ENXIO; 7770 goto done; 7771 } 7772 7773done: 7774 if (rc == 0) 7775 rc = sbuf_finish(sb); 7776 sbuf_delete(sb); 7777 7778 return (rc); 7779} 7780#endif 7781 7782#ifdef TCP_OFFLOAD 7783static void 7784unit_conv(char *buf, size_t len, u_int val, u_int factor) 7785{ 7786 u_int rem = val % factor; 7787 7788 if (rem == 0) 7789 snprintf(buf, len, "%u", val / factor); 7790 else { 7791 while (rem % 10 == 0) 7792 rem /= 10; 7793 snprintf(buf, len, "%u.%u", val / factor, rem); 7794 } 7795} 7796 7797static int 7798sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 7799{ 7800 struct adapter *sc = arg1; 7801 char buf[16]; 7802 u_int res, re; 7803 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7804 7805 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7806 switch (arg2) { 7807 case 0: 7808 /* timer_tick */ 7809 re = G_TIMERRESOLUTION(res); 7810 break; 7811 case 1: 7812 /* TCP timestamp tick */ 7813 re = G_TIMESTAMPRESOLUTION(res); 7814 break; 7815 case 2: 7816 /* DACK tick */ 7817 re = G_DELAYEDACKRESOLUTION(res); 7818 break; 7819 default: 7820 return (EDOOFUS); 7821 } 7822 7823 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 7824 7825 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 7826} 7827 7828static int 7829sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 7830{ 7831 struct adapter *sc = arg1; 7832 u_int res, dack_re, v; 7833 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7834 7835 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7836 dack_re = G_DELAYEDACKRESOLUTION(res); 7837 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 7838 7839 return (sysctl_handle_int(oidp, &v, 0, req)); 7840} 7841 7842static int 7843sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 7844{ 7845 struct adapter *sc = arg1; 7846 int reg = arg2; 7847 u_int tre; 7848 u_long tp_tick_us, v; 7849 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7850 7851 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 7852 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 7853 reg == A_TP_KEEP_IDLE || A_TP_KEEP_INTVL || reg == A_TP_INIT_SRTT || 7854 reg == A_TP_FINWAIT2_TIMER); 7855 7856 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 7857 tp_tick_us = (cclk_ps << tre) / 1000000; 7858 7859 if (reg == A_TP_INIT_SRTT) 7860 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 7861 else 7862 v = tp_tick_us * t4_read_reg(sc, reg); 7863 7864 return (sysctl_handle_long(oidp, &v, 0, req)); 7865} 7866#endif 7867 7868static uint32_t 7869fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7870{ 7871 uint32_t mode; 7872 7873 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7874 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7875 7876 if (fconf & F_FRAGMENTATION) 7877 mode |= T4_FILTER_IP_FRAGMENT; 7878 7879 if (fconf & F_MPSHITTYPE) 7880 mode |= T4_FILTER_MPS_HIT_TYPE; 7881 7882 if (fconf & F_MACMATCH) 7883 mode |= T4_FILTER_MAC_IDX; 7884 7885 if (fconf & F_ETHERTYPE) 7886 mode |= T4_FILTER_ETH_TYPE; 7887 7888 if (fconf & F_PROTOCOL) 7889 mode |= T4_FILTER_IP_PROTO; 7890 7891 if (fconf & F_TOS) 7892 mode |= T4_FILTER_IP_TOS; 7893 7894 if (fconf & F_VLAN) 7895 mode |= T4_FILTER_VLAN; 7896 7897 if (fconf & F_VNIC_ID) { 7898 mode |= T4_FILTER_VNIC; 7899 if (iconf & F_VNIC) 7900 mode |= T4_FILTER_IC_VNIC; 7901 } 7902 7903 if (fconf & F_PORT) 7904 mode |= T4_FILTER_PORT; 7905 7906 if (fconf & F_FCOE) 7907 mode |= T4_FILTER_FCoE; 7908 7909 return (mode); 7910} 7911 7912static uint32_t 7913mode_to_fconf(uint32_t mode) 7914{ 7915 uint32_t fconf = 0; 7916 7917 if (mode & T4_FILTER_IP_FRAGMENT) 7918 fconf |= F_FRAGMENTATION; 7919 7920 if (mode & T4_FILTER_MPS_HIT_TYPE) 7921 fconf |= F_MPSHITTYPE; 7922 7923 if (mode & T4_FILTER_MAC_IDX) 7924 fconf |= F_MACMATCH; 7925 7926 if (mode & T4_FILTER_ETH_TYPE) 7927 fconf |= F_ETHERTYPE; 7928 7929 if (mode & T4_FILTER_IP_PROTO) 7930 fconf |= F_PROTOCOL; 7931 7932 if (mode & T4_FILTER_IP_TOS) 7933 fconf |= F_TOS; 7934 7935 if (mode & T4_FILTER_VLAN) 7936 fconf |= F_VLAN; 7937 7938 if (mode & T4_FILTER_VNIC) 7939 fconf |= F_VNIC_ID; 7940 7941 if (mode & T4_FILTER_PORT) 7942 fconf |= F_PORT; 7943 7944 if (mode & T4_FILTER_FCoE) 7945 fconf |= F_FCOE; 7946 7947 return (fconf); 7948} 7949 7950static uint32_t 7951mode_to_iconf(uint32_t mode) 7952{ 7953 7954 if (mode & T4_FILTER_IC_VNIC) 7955 return (F_VNIC); 7956 return (0); 7957} 7958 7959static int check_fspec_against_fconf_iconf(struct adapter *sc, 7960 struct t4_filter_specification *fs) 7961{ 7962 struct tp_params *tpp = &sc->params.tp; 7963 uint32_t fconf = 0; 7964 7965 if (fs->val.frag || fs->mask.frag) 7966 fconf |= F_FRAGMENTATION; 7967 7968 if (fs->val.matchtype || fs->mask.matchtype) 7969 fconf |= F_MPSHITTYPE; 7970 7971 if (fs->val.macidx || fs->mask.macidx) 7972 fconf |= F_MACMATCH; 7973 7974 if (fs->val.ethtype || fs->mask.ethtype) 7975 fconf |= F_ETHERTYPE; 7976 7977 if (fs->val.proto || fs->mask.proto) 7978 fconf |= F_PROTOCOL; 7979 7980 if (fs->val.tos || fs->mask.tos) 7981 fconf |= F_TOS; 7982 7983 if (fs->val.vlan_vld || fs->mask.vlan_vld) 7984 fconf |= F_VLAN; 7985 7986 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 7987 fconf |= F_VNIC_ID; 7988 if (tpp->ingress_config & F_VNIC) 7989 return (EINVAL); 7990 } 7991 7992 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 7993 fconf |= F_VNIC_ID; 7994 if ((tpp->ingress_config & F_VNIC) == 0) 7995 return (EINVAL); 7996 } 7997 7998 if (fs->val.iport || fs->mask.iport) 7999 fconf |= F_PORT; 8000 8001 if (fs->val.fcoe || fs->mask.fcoe) 8002 fconf |= F_FCOE; 8003 8004 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 8005 return (E2BIG); 8006 8007 return (0); 8008} 8009 8010static int 8011get_filter_mode(struct adapter *sc, uint32_t *mode) 8012{ 8013 struct tp_params *tpp = &sc->params.tp; 8014 8015 /* 8016 * We trust the cached values of the relevant TP registers. This means 8017 * things work reliably only if writes to those registers are always via 8018 * t4_set_filter_mode. 8019 */ 8020 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 8021 8022 return (0); 8023} 8024 8025static int 8026set_filter_mode(struct adapter *sc, uint32_t mode) 8027{ 8028 struct tp_params *tpp = &sc->params.tp; 8029 uint32_t fconf, iconf; 8030 int rc; 8031 8032 iconf = mode_to_iconf(mode); 8033 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 8034 /* 8035 * For now we just complain if A_TP_INGRESS_CONFIG is not 8036 * already set to the correct value for the requested filter 8037 * mode. It's not clear if it's safe to write to this register 8038 * on the fly. (And we trust the cached value of the register). 8039 */ 8040 return (EBUSY); 8041 } 8042 8043 fconf = mode_to_fconf(mode); 8044 8045 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8046 "t4setfm"); 8047 if (rc) 8048 return (rc); 8049 8050 if (sc->tids.ftids_in_use > 0) { 8051 rc = EBUSY; 8052 goto done; 8053 } 8054 8055#ifdef TCP_OFFLOAD 8056 if (uld_active(sc, ULD_TOM)) { 8057 rc = EBUSY; 8058 goto done; 8059 } 8060#endif 8061 8062 rc = -t4_set_filter_mode(sc, fconf); 8063done: 8064 end_synchronized_op(sc, LOCK_HELD); 8065 return (rc); 8066} 8067 8068static inline uint64_t 8069get_filter_hits(struct adapter *sc, uint32_t fid) 8070{ 8071 uint32_t tcb_addr; 8072 8073 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 8074 (fid + sc->tids.ftid_base) * TCB_SIZE; 8075 8076 if (is_t4(sc)) { 8077 uint64_t hits; 8078 8079 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 8080 return (be64toh(hits)); 8081 } else { 8082 uint32_t hits; 8083 8084 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 8085 return (be32toh(hits)); 8086 } 8087} 8088 8089static int 8090get_filter(struct adapter *sc, struct t4_filter *t) 8091{ 8092 int i, rc, nfilters = sc->tids.nftids; 8093 struct filter_entry *f; 8094 8095 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8096 "t4getf"); 8097 if (rc) 8098 return (rc); 8099 8100 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 8101 t->idx >= nfilters) { 8102 t->idx = 0xffffffff; 8103 goto done; 8104 } 8105 8106 f = &sc->tids.ftid_tab[t->idx]; 8107 for (i = t->idx; i < nfilters; i++, f++) { 8108 if (f->valid) { 8109 t->idx = i; 8110 t->l2tidx = f->l2t ? f->l2t->idx : 0; 8111 t->smtidx = f->smtidx; 8112 if (f->fs.hitcnts) 8113 t->hits = get_filter_hits(sc, t->idx); 8114 else 8115 t->hits = UINT64_MAX; 8116 t->fs = f->fs; 8117 8118 goto done; 8119 } 8120 } 8121 8122 t->idx = 0xffffffff; 8123done: 8124 end_synchronized_op(sc, LOCK_HELD); 8125 return (0); 8126} 8127 8128static int 8129set_filter(struct adapter *sc, struct t4_filter *t) 8130{ 8131 unsigned int nfilters, nports; 8132 struct filter_entry *f; 8133 int i, rc; 8134 8135 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 8136 if (rc) 8137 return (rc); 8138 8139 nfilters = sc->tids.nftids; 8140 nports = sc->params.nports; 8141 8142 if (nfilters == 0) { 8143 rc = ENOTSUP; 8144 goto done; 8145 } 8146 8147 if (t->idx >= nfilters) { 8148 rc = EINVAL; 8149 goto done; 8150 } 8151 8152 /* Validate against the global filter mode and ingress config */ 8153 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 8154 if (rc != 0) 8155 goto done; 8156 8157 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 8158 rc = EINVAL; 8159 goto done; 8160 } 8161 8162 if (t->fs.val.iport >= nports) { 8163 rc = EINVAL; 8164 goto done; 8165 } 8166 8167 /* Can't specify an iq if not steering to it */ 8168 if (!t->fs.dirsteer && t->fs.iq) { 8169 rc = EINVAL; 8170 goto done; 8171 } 8172 8173 /* IPv6 filter idx must be 4 aligned */ 8174 if (t->fs.type == 1 && 8175 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 8176 rc = EINVAL; 8177 goto done; 8178 } 8179 8180 if (!(sc->flags & FULL_INIT_DONE) && 8181 ((rc = adapter_full_init(sc)) != 0)) 8182 goto done; 8183 8184 if (sc->tids.ftid_tab == NULL) { 8185 KASSERT(sc->tids.ftids_in_use == 0, 8186 ("%s: no memory allocated but filters_in_use > 0", 8187 __func__)); 8188 8189 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 8190 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 8191 if (sc->tids.ftid_tab == NULL) { 8192 rc = ENOMEM; 8193 goto done; 8194 } 8195 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 8196 } 8197 8198 for (i = 0; i < 4; i++) { 8199 f = &sc->tids.ftid_tab[t->idx + i]; 8200 8201 if (f->pending || f->valid) { 8202 rc = EBUSY; 8203 goto done; 8204 } 8205 if (f->locked) { 8206 rc = EPERM; 8207 goto done; 8208 } 8209 8210 if (t->fs.type == 0) 8211 break; 8212 } 8213 8214 f = &sc->tids.ftid_tab[t->idx]; 8215 f->fs = t->fs; 8216 8217 rc = set_filter_wr(sc, t->idx); 8218done: 8219 end_synchronized_op(sc, 0); 8220 8221 if (rc == 0) { 8222 mtx_lock(&sc->tids.ftid_lock); 8223 for (;;) { 8224 if (f->pending == 0) { 8225 rc = f->valid ? 0 : EIO; 8226 break; 8227 } 8228 8229 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8230 PCATCH, "t4setfw", 0)) { 8231 rc = EINPROGRESS; 8232 break; 8233 } 8234 } 8235 mtx_unlock(&sc->tids.ftid_lock); 8236 } 8237 return (rc); 8238} 8239 8240static int 8241del_filter(struct adapter *sc, struct t4_filter *t) 8242{ 8243 unsigned int nfilters; 8244 struct filter_entry *f; 8245 int rc; 8246 8247 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8248 if (rc) 8249 return (rc); 8250 8251 nfilters = sc->tids.nftids; 8252 8253 if (nfilters == 0) { 8254 rc = ENOTSUP; 8255 goto done; 8256 } 8257 8258 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8259 t->idx >= nfilters) { 8260 rc = EINVAL; 8261 goto done; 8262 } 8263 8264 if (!(sc->flags & FULL_INIT_DONE)) { 8265 rc = EAGAIN; 8266 goto done; 8267 } 8268 8269 f = &sc->tids.ftid_tab[t->idx]; 8270 8271 if (f->pending) { 8272 rc = EBUSY; 8273 goto done; 8274 } 8275 if (f->locked) { 8276 rc = EPERM; 8277 goto done; 8278 } 8279 8280 if (f->valid) { 8281 t->fs = f->fs; /* extra info for the caller */ 8282 rc = del_filter_wr(sc, t->idx); 8283 } 8284 8285done: 8286 end_synchronized_op(sc, 0); 8287 8288 if (rc == 0) { 8289 mtx_lock(&sc->tids.ftid_lock); 8290 for (;;) { 8291 if (f->pending == 0) { 8292 rc = f->valid ? EIO : 0; 8293 break; 8294 } 8295 8296 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8297 PCATCH, "t4delfw", 0)) { 8298 rc = EINPROGRESS; 8299 break; 8300 } 8301 } 8302 mtx_unlock(&sc->tids.ftid_lock); 8303 } 8304 8305 return (rc); 8306} 8307 8308static void 8309clear_filter(struct filter_entry *f) 8310{ 8311 if (f->l2t) 8312 t4_l2t_release(f->l2t); 8313 8314 bzero(f, sizeof (*f)); 8315} 8316 8317static int 8318set_filter_wr(struct adapter *sc, int fidx) 8319{ 8320 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8321 struct fw_filter_wr *fwr; 8322 unsigned int ftid, vnic_vld, vnic_vld_mask; 8323 struct wrq_cookie cookie; 8324 8325 ASSERT_SYNCHRONIZED_OP(sc); 8326 8327 if (f->fs.newdmac || f->fs.newvlan) { 8328 /* This filter needs an L2T entry; allocate one. */ 8329 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8330 if (f->l2t == NULL) 8331 return (EAGAIN); 8332 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8333 f->fs.dmac)) { 8334 t4_l2t_release(f->l2t); 8335 f->l2t = NULL; 8336 return (ENOMEM); 8337 } 8338 } 8339 8340 /* Already validated against fconf, iconf */ 8341 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8342 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8343 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8344 vnic_vld = 1; 8345 else 8346 vnic_vld = 0; 8347 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8348 vnic_vld_mask = 1; 8349 else 8350 vnic_vld_mask = 0; 8351 8352 ftid = sc->tids.ftid_base + fidx; 8353 8354 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8355 if (fwr == NULL) 8356 return (ENOMEM); 8357 bzero(fwr, sizeof(*fwr)); 8358 8359 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8360 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8361 fwr->tid_to_iq = 8362 htobe32(V_FW_FILTER_WR_TID(ftid) | 8363 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8364 V_FW_FILTER_WR_NOREPLY(0) | 8365 V_FW_FILTER_WR_IQ(f->fs.iq)); 8366 fwr->del_filter_to_l2tix = 8367 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8368 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8369 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8370 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8371 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8372 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8373 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8374 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8375 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8376 f->fs.newvlan == VLAN_REWRITE) | 8377 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8378 f->fs.newvlan == VLAN_REWRITE) | 8379 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8380 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8381 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8382 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8383 fwr->ethtype = htobe16(f->fs.val.ethtype); 8384 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8385 fwr->frag_to_ovlan_vldm = 8386 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8387 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8388 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8389 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8390 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8391 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8392 fwr->smac_sel = 0; 8393 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8394 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8395 fwr->maci_to_matchtypem = 8396 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8397 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8398 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8399 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8400 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8401 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8402 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8403 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8404 fwr->ptcl = f->fs.val.proto; 8405 fwr->ptclm = f->fs.mask.proto; 8406 fwr->ttyp = f->fs.val.tos; 8407 fwr->ttypm = f->fs.mask.tos; 8408 fwr->ivlan = htobe16(f->fs.val.vlan); 8409 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8410 fwr->ovlan = htobe16(f->fs.val.vnic); 8411 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8412 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8413 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8414 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8415 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8416 fwr->lp = htobe16(f->fs.val.dport); 8417 fwr->lpm = htobe16(f->fs.mask.dport); 8418 fwr->fp = htobe16(f->fs.val.sport); 8419 fwr->fpm = htobe16(f->fs.mask.sport); 8420 if (f->fs.newsmac) 8421 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8422 8423 f->pending = 1; 8424 sc->tids.ftids_in_use++; 8425 8426 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8427 return (0); 8428} 8429 8430static int 8431del_filter_wr(struct adapter *sc, int fidx) 8432{ 8433 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8434 struct fw_filter_wr *fwr; 8435 unsigned int ftid; 8436 struct wrq_cookie cookie; 8437 8438 ftid = sc->tids.ftid_base + fidx; 8439 8440 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8441 if (fwr == NULL) 8442 return (ENOMEM); 8443 bzero(fwr, sizeof (*fwr)); 8444 8445 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8446 8447 f->pending = 1; 8448 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8449 return (0); 8450} 8451 8452int 8453t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8454{ 8455 struct adapter *sc = iq->adapter; 8456 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8457 unsigned int idx = GET_TID(rpl); 8458 unsigned int rc; 8459 struct filter_entry *f; 8460 8461 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8462 rss->opcode)); 8463 MPASS(iq == &sc->sge.fwq); 8464 MPASS(is_ftid(sc, idx)); 8465 8466 idx -= sc->tids.ftid_base; 8467 f = &sc->tids.ftid_tab[idx]; 8468 rc = G_COOKIE(rpl->cookie); 8469 8470 mtx_lock(&sc->tids.ftid_lock); 8471 if (rc == FW_FILTER_WR_FLT_ADDED) { 8472 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8473 __func__, idx)); 8474 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8475 f->pending = 0; /* asynchronous setup completed */ 8476 f->valid = 1; 8477 } else { 8478 if (rc != FW_FILTER_WR_FLT_DELETED) { 8479 /* Add or delete failed, display an error */ 8480 log(LOG_ERR, 8481 "filter %u setup failed with error %u\n", 8482 idx, rc); 8483 } 8484 8485 clear_filter(f); 8486 sc->tids.ftids_in_use--; 8487 } 8488 wakeup(&sc->tids.ftid_tab); 8489 mtx_unlock(&sc->tids.ftid_lock); 8490 8491 return (0); 8492} 8493 8494static int 8495set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8496{ 8497 8498 MPASS(iq->set_tcb_rpl != NULL); 8499 return (iq->set_tcb_rpl(iq, rss, m)); 8500} 8501 8502static int 8503l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8504{ 8505 8506 MPASS(iq->l2t_write_rpl != NULL); 8507 return (iq->l2t_write_rpl(iq, rss, m)); 8508} 8509 8510static int 8511get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8512{ 8513 int rc; 8514 8515 if (cntxt->cid > M_CTXTQID) 8516 return (EINVAL); 8517 8518 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8519 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8520 return (EINVAL); 8521 8522 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8523 if (rc) 8524 return (rc); 8525 8526 if (sc->flags & FW_OK) { 8527 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8528 &cntxt->data[0]); 8529 if (rc == 0) 8530 goto done; 8531 } 8532 8533 /* 8534 * Read via firmware failed or wasn't even attempted. Read directly via 8535 * the backdoor. 8536 */ 8537 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8538done: 8539 end_synchronized_op(sc, 0); 8540 return (rc); 8541} 8542 8543static int 8544load_fw(struct adapter *sc, struct t4_data *fw) 8545{ 8546 int rc; 8547 uint8_t *fw_data; 8548 8549 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8550 if (rc) 8551 return (rc); 8552 8553 if (sc->flags & FULL_INIT_DONE) { 8554 rc = EBUSY; 8555 goto done; 8556 } 8557 8558 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8559 if (fw_data == NULL) { 8560 rc = ENOMEM; 8561 goto done; 8562 } 8563 8564 rc = copyin(fw->data, fw_data, fw->len); 8565 if (rc == 0) 8566 rc = -t4_load_fw(sc, fw_data, fw->len); 8567 8568 free(fw_data, M_CXGBE); 8569done: 8570 end_synchronized_op(sc, 0); 8571 return (rc); 8572} 8573 8574static int 8575load_cfg(struct adapter *sc, struct t4_data *cfg) 8576{ 8577 int rc; 8578 uint8_t *cfg_data = NULL; 8579 8580 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 8581 if (rc) 8582 return (rc); 8583 8584 if (cfg->len == 0) { 8585 /* clear */ 8586 rc = -t4_load_cfg(sc, NULL, 0); 8587 goto done; 8588 } 8589 8590 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); 8591 if (cfg_data == NULL) { 8592 rc = ENOMEM; 8593 goto done; 8594 } 8595 8596 rc = copyin(cfg->data, cfg_data, cfg->len); 8597 if (rc == 0) 8598 rc = -t4_load_cfg(sc, cfg_data, cfg->len); 8599 8600 free(cfg_data, M_CXGBE); 8601done: 8602 end_synchronized_op(sc, 0); 8603 return (rc); 8604} 8605 8606#define MAX_READ_BUF_SIZE (128 * 1024) 8607static int 8608read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8609{ 8610 uint32_t addr, remaining, n; 8611 uint32_t *buf; 8612 int rc; 8613 uint8_t *dst; 8614 8615 rc = validate_mem_range(sc, mr->addr, mr->len); 8616 if (rc != 0) 8617 return (rc); 8618 8619 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8620 addr = mr->addr; 8621 remaining = mr->len; 8622 dst = (void *)mr->data; 8623 8624 while (remaining) { 8625 n = min(remaining, MAX_READ_BUF_SIZE); 8626 read_via_memwin(sc, 2, addr, buf, n); 8627 8628 rc = copyout(buf, dst, n); 8629 if (rc != 0) 8630 break; 8631 8632 dst += n; 8633 remaining -= n; 8634 addr += n; 8635 } 8636 8637 free(buf, M_CXGBE); 8638 return (rc); 8639} 8640#undef MAX_READ_BUF_SIZE 8641 8642static int 8643read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8644{ 8645 int rc; 8646 8647 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8648 return (EINVAL); 8649 8650 if (i2cd->len > sizeof(i2cd->data)) 8651 return (EFBIG); 8652 8653 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8654 if (rc) 8655 return (rc); 8656 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8657 i2cd->offset, i2cd->len, &i2cd->data[0]); 8658 end_synchronized_op(sc, 0); 8659 8660 return (rc); 8661} 8662 8663static int 8664in_range(int val, int lo, int hi) 8665{ 8666 8667 return (val < 0 || (val <= hi && val >= lo)); 8668} 8669 8670static int 8671set_sched_class_config(struct adapter *sc, int minmax) 8672{ 8673 int rc; 8674 8675 if (minmax < 0) 8676 return (EINVAL); 8677 8678 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc"); 8679 if (rc) 8680 return (rc); 8681 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1); 8682 end_synchronized_op(sc, 0); 8683 8684 return (rc); 8685} 8686 8687static int 8688set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p, 8689 int sleep_ok) 8690{ 8691 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode; 8692 struct port_info *pi; 8693 struct tx_sched_class *tc; 8694 8695 if (p->level == SCHED_CLASS_LEVEL_CL_RL) 8696 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8697 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8698 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8699 else if (p->level == SCHED_CLASS_LEVEL_CH_RL) 8700 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8701 else 8702 return (EINVAL); 8703 8704 if (p->mode == SCHED_CLASS_MODE_CLASS) 8705 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8706 else if (p->mode == SCHED_CLASS_MODE_FLOW) 8707 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8708 else 8709 return (EINVAL); 8710 8711 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) 8712 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8713 else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8714 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8715 else 8716 return (EINVAL); 8717 8718 if (p->ratemode == SCHED_CLASS_RATEMODE_REL) 8719 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8720 else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) 8721 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8722 else 8723 return (EINVAL); 8724 8725 /* Vet our parameters ... */ 8726 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1)) 8727 return (ERANGE); 8728 8729 pi = sc->port[sc->chan_map[p->channel]]; 8730 if (pi == NULL) 8731 return (ENXIO); 8732 MPASS(pi->tx_chan == p->channel); 8733 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */ 8734 8735 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) || 8736 !in_range(p->minrate, 0, top_speed) || 8737 !in_range(p->maxrate, 0, top_speed) || 8738 !in_range(p->weight, 0, 100)) 8739 return (ERANGE); 8740 8741 /* 8742 * Translate any unset parameters into the firmware's 8743 * nomenclature and/or fail the call if the parameters 8744 * are required ... 8745 */ 8746 if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0) 8747 return (EINVAL); 8748 8749 if (p->minrate < 0) 8750 p->minrate = 0; 8751 if (p->maxrate < 0) { 8752 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8753 p->level == SCHED_CLASS_LEVEL_CH_RL) 8754 return (EINVAL); 8755 else 8756 p->maxrate = 0; 8757 } 8758 if (p->weight < 0) { 8759 if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8760 return (EINVAL); 8761 else 8762 p->weight = 0; 8763 } 8764 if (p->pktsize < 0) { 8765 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8766 p->level == SCHED_CLASS_LEVEL_CH_RL) 8767 return (EINVAL); 8768 else 8769 p->pktsize = 0; 8770 } 8771 8772 rc = begin_synchronized_op(sc, NULL, 8773 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp"); 8774 if (rc) 8775 return (rc); 8776 tc = &pi->tc[p->cl]; 8777 tc->params = *p; 8778 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode, 8779 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate, 8780 p->weight, p->pktsize, sleep_ok); 8781 if (rc == 0) 8782 tc->flags |= TX_SC_OK; 8783 else { 8784 /* 8785 * Unknown state at this point, see tc->params for what was 8786 * attempted. 8787 */ 8788 tc->flags &= ~TX_SC_OK; 8789 } 8790 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD); 8791 8792 return (rc); 8793} 8794 8795int 8796t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p) 8797{ 8798 8799 if (p->type != SCHED_CLASS_TYPE_PACKET) 8800 return (EINVAL); 8801 8802 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 8803 return (set_sched_class_config(sc, p->u.config.minmax)); 8804 8805 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 8806 return (set_sched_class_params(sc, &p->u.params, 1)); 8807 8808 return (EINVAL); 8809} 8810 8811int 8812t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8813{ 8814 struct port_info *pi = NULL; 8815 struct vi_info *vi; 8816 struct sge_txq *txq; 8817 uint32_t fw_mnem, fw_queue, fw_class; 8818 int i, rc; 8819 8820 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8821 if (rc) 8822 return (rc); 8823 8824 if (p->port >= sc->params.nports) { 8825 rc = EINVAL; 8826 goto done; 8827 } 8828 8829 /* XXX: Only supported for the main VI. */ 8830 pi = sc->port[p->port]; 8831 vi = &pi->vi[0]; 8832 if (!(vi->flags & VI_INIT_DONE)) { 8833 /* tx queues not set up yet */ 8834 rc = EAGAIN; 8835 goto done; 8836 } 8837 8838 if (!in_range(p->queue, 0, vi->ntxq - 1) || 8839 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) { 8840 rc = EINVAL; 8841 goto done; 8842 } 8843 8844 /* 8845 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8846 * Scheduling Class in this case). 8847 */ 8848 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8849 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8850 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8851 8852 /* 8853 * If op.queue is non-negative, then we're only changing the scheduling 8854 * on a single specified TX queue. 8855 */ 8856 if (p->queue >= 0) { 8857 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8858 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8859 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8860 &fw_class); 8861 goto done; 8862 } 8863 8864 /* 8865 * Change the scheduling on all the TX queues for the 8866 * interface. 8867 */ 8868 for_each_txq(vi, i, txq) { 8869 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8870 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8871 &fw_class); 8872 if (rc) 8873 goto done; 8874 } 8875 8876 rc = 0; 8877done: 8878 end_synchronized_op(sc, 0); 8879 return (rc); 8880} 8881 8882int 8883t4_os_find_pci_capability(struct adapter *sc, int cap) 8884{ 8885 int i; 8886 8887 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8888} 8889 8890int 8891t4_os_pci_save_state(struct adapter *sc) 8892{ 8893 device_t dev; 8894 struct pci_devinfo *dinfo; 8895 8896 dev = sc->dev; 8897 dinfo = device_get_ivars(dev); 8898 8899 pci_cfg_save(dev, dinfo, 0); 8900 return (0); 8901} 8902 8903int 8904t4_os_pci_restore_state(struct adapter *sc) 8905{ 8906 device_t dev; 8907 struct pci_devinfo *dinfo; 8908 8909 dev = sc->dev; 8910 dinfo = device_get_ivars(dev); 8911 8912 pci_cfg_restore(dev, dinfo); 8913 return (0); 8914} 8915 8916void 8917t4_os_portmod_changed(const struct adapter *sc, int idx) 8918{ 8919 struct port_info *pi = sc->port[idx]; 8920 struct vi_info *vi; 8921 struct ifnet *ifp; 8922 int v; 8923 static const char *mod_str[] = { 8924 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8925 }; 8926 8927 for_each_vi(pi, v, vi) { 8928 build_medialist(pi, &vi->media); 8929 } 8930 8931 ifp = pi->vi[0].ifp; 8932 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8933 if_printf(ifp, "transceiver unplugged.\n"); 8934 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8935 if_printf(ifp, "unknown transceiver inserted.\n"); 8936 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8937 if_printf(ifp, "unsupported transceiver inserted.\n"); 8938 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8939 if_printf(ifp, "%s transceiver inserted.\n", 8940 mod_str[pi->mod_type]); 8941 } else { 8942 if_printf(ifp, "transceiver (type %d) inserted.\n", 8943 pi->mod_type); 8944 } 8945} 8946 8947void 8948t4_os_link_changed(struct adapter *sc, int idx, int link_stat) 8949{ 8950 struct port_info *pi = sc->port[idx]; 8951 struct vi_info *vi; 8952 struct ifnet *ifp; 8953 int v; 8954 8955 for_each_vi(pi, v, vi) { 8956 ifp = vi->ifp; 8957 if (ifp == NULL) 8958 continue; 8959 8960 if (link_stat) { 8961 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8962 if_link_state_change(ifp, LINK_STATE_UP); 8963 } else { 8964 if_link_state_change(ifp, LINK_STATE_DOWN); 8965 } 8966 } 8967} 8968 8969void 8970t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8971{ 8972 struct adapter *sc; 8973 8974 sx_slock(&t4_list_lock); 8975 SLIST_FOREACH(sc, &t4_list, link) { 8976 /* 8977 * func should not make any assumptions about what state sc is 8978 * in - the only guarantee is that sc->sc_lock is a valid lock. 8979 */ 8980 func(sc, arg); 8981 } 8982 sx_sunlock(&t4_list_lock); 8983} 8984 8985static int 8986t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 8987 struct thread *td) 8988{ 8989 int rc; 8990 struct adapter *sc = dev->si_drv1; 8991 8992 rc = priv_check(td, PRIV_DRIVER); 8993 if (rc != 0) 8994 return (rc); 8995 8996 switch (cmd) { 8997 case CHELSIO_T4_GETREG: { 8998 struct t4_reg *edata = (struct t4_reg *)data; 8999 9000 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9001 return (EFAULT); 9002 9003 if (edata->size == 4) 9004 edata->val = t4_read_reg(sc, edata->addr); 9005 else if (edata->size == 8) 9006 edata->val = t4_read_reg64(sc, edata->addr); 9007 else 9008 return (EINVAL); 9009 9010 break; 9011 } 9012 case CHELSIO_T4_SETREG: { 9013 struct t4_reg *edata = (struct t4_reg *)data; 9014 9015 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9016 return (EFAULT); 9017 9018 if (edata->size == 4) { 9019 if (edata->val & 0xffffffff00000000) 9020 return (EINVAL); 9021 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 9022 } else if (edata->size == 8) 9023 t4_write_reg64(sc, edata->addr, edata->val); 9024 else 9025 return (EINVAL); 9026 break; 9027 } 9028 case CHELSIO_T4_REGDUMP: { 9029 struct t4_regdump *regs = (struct t4_regdump *)data; 9030 int reglen = t4_get_regs_len(sc); 9031 uint8_t *buf; 9032 9033 if (regs->len < reglen) { 9034 regs->len = reglen; /* hint to the caller */ 9035 return (ENOBUFS); 9036 } 9037 9038 regs->len = reglen; 9039 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 9040 get_regs(sc, regs, buf); 9041 rc = copyout(buf, regs->data, reglen); 9042 free(buf, M_CXGBE); 9043 break; 9044 } 9045 case CHELSIO_T4_GET_FILTER_MODE: 9046 rc = get_filter_mode(sc, (uint32_t *)data); 9047 break; 9048 case CHELSIO_T4_SET_FILTER_MODE: 9049 rc = set_filter_mode(sc, *(uint32_t *)data); 9050 break; 9051 case CHELSIO_T4_GET_FILTER: 9052 rc = get_filter(sc, (struct t4_filter *)data); 9053 break; 9054 case CHELSIO_T4_SET_FILTER: 9055 rc = set_filter(sc, (struct t4_filter *)data); 9056 break; 9057 case CHELSIO_T4_DEL_FILTER: 9058 rc = del_filter(sc, (struct t4_filter *)data); 9059 break; 9060 case CHELSIO_T4_GET_SGE_CONTEXT: 9061 rc = get_sge_context(sc, (struct t4_sge_context *)data); 9062 break; 9063 case CHELSIO_T4_LOAD_FW: 9064 rc = load_fw(sc, (struct t4_data *)data); 9065 break; 9066 case CHELSIO_T4_GET_MEM: 9067 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 9068 break; 9069 case CHELSIO_T4_GET_I2C: 9070 rc = read_i2c(sc, (struct t4_i2c_data *)data); 9071 break; 9072 case CHELSIO_T4_CLEAR_STATS: { 9073 int i, v; 9074 u_int port_id = *(uint32_t *)data; 9075 struct port_info *pi; 9076 struct vi_info *vi; 9077 9078 if (port_id >= sc->params.nports) 9079 return (EINVAL); 9080 pi = sc->port[port_id]; 9081 if (pi == NULL) 9082 return (EIO); 9083 9084 /* MAC stats */ 9085 t4_clr_port_stats(sc, pi->tx_chan); 9086 pi->tx_parse_error = 0; 9087 mtx_lock(&sc->reg_lock); 9088 for_each_vi(pi, v, vi) { 9089 if (vi->flags & VI_INIT_DONE) 9090 t4_clr_vi_stats(sc, vi->viid); 9091 } 9092 mtx_unlock(&sc->reg_lock); 9093 9094 /* 9095 * Since this command accepts a port, clear stats for 9096 * all VIs on this port. 9097 */ 9098 for_each_vi(pi, v, vi) { 9099 if (vi->flags & VI_INIT_DONE) { 9100 struct sge_rxq *rxq; 9101 struct sge_txq *txq; 9102 struct sge_wrq *wrq; 9103 9104 for_each_rxq(vi, i, rxq) { 9105#if defined(INET) || defined(INET6) 9106 rxq->lro.lro_queued = 0; 9107 rxq->lro.lro_flushed = 0; 9108#endif 9109 rxq->rxcsum = 0; 9110 rxq->vlan_extraction = 0; 9111 } 9112 9113 for_each_txq(vi, i, txq) { 9114 txq->txcsum = 0; 9115 txq->tso_wrs = 0; 9116 txq->vlan_insertion = 0; 9117 txq->imm_wrs = 0; 9118 txq->sgl_wrs = 0; 9119 txq->txpkt_wrs = 0; 9120 txq->txpkts0_wrs = 0; 9121 txq->txpkts1_wrs = 0; 9122 txq->txpkts0_pkts = 0; 9123 txq->txpkts1_pkts = 0; 9124 mp_ring_reset_stats(txq->r); 9125 } 9126 9127#ifdef TCP_OFFLOAD 9128 /* nothing to clear for each ofld_rxq */ 9129 9130 for_each_ofld_txq(vi, i, wrq) { 9131 wrq->tx_wrs_direct = 0; 9132 wrq->tx_wrs_copied = 0; 9133 } 9134#endif 9135 9136 if (IS_MAIN_VI(vi)) { 9137 wrq = &sc->sge.ctrlq[pi->port_id]; 9138 wrq->tx_wrs_direct = 0; 9139 wrq->tx_wrs_copied = 0; 9140 } 9141 } 9142 } 9143 break; 9144 } 9145 case CHELSIO_T4_SCHED_CLASS: 9146 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 9147 break; 9148 case CHELSIO_T4_SCHED_QUEUE: 9149 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 9150 break; 9151 case CHELSIO_T4_GET_TRACER: 9152 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 9153 break; 9154 case CHELSIO_T4_SET_TRACER: 9155 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 9156 break; 9157 case CHELSIO_T4_LOAD_CFG: 9158 rc = load_cfg(sc, (struct t4_data *)data); 9159 break; 9160 default: 9161 rc = ENOTTY; 9162 } 9163 9164 return (rc); 9165} 9166 9167void 9168t4_db_full(struct adapter *sc) 9169{ 9170 9171 CXGBE_UNIMPLEMENTED(__func__); 9172} 9173 9174void 9175t4_db_dropped(struct adapter *sc) 9176{ 9177 9178 CXGBE_UNIMPLEMENTED(__func__); 9179} 9180 9181#ifdef TCP_OFFLOAD 9182void 9183t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 9184{ 9185 9186 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 9187 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 9188 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 9189 V_HPZ3(pgsz_order[3])); 9190} 9191 9192static int 9193toe_capability(struct vi_info *vi, int enable) 9194{ 9195 int rc; 9196 struct port_info *pi = vi->pi; 9197 struct adapter *sc = pi->adapter; 9198 9199 ASSERT_SYNCHRONIZED_OP(sc); 9200 9201 if (!is_offload(sc)) 9202 return (ENODEV); 9203 9204 if (enable) { 9205 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 9206 /* TOE is already enabled. */ 9207 return (0); 9208 } 9209 9210 /* 9211 * We need the port's queues around so that we're able to send 9212 * and receive CPLs to/from the TOE even if the ifnet for this 9213 * port has never been UP'd administratively. 9214 */ 9215 if (!(vi->flags & VI_INIT_DONE)) { 9216 rc = vi_full_init(vi); 9217 if (rc) 9218 return (rc); 9219 } 9220 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 9221 rc = vi_full_init(&pi->vi[0]); 9222 if (rc) 9223 return (rc); 9224 } 9225 9226 if (isset(&sc->offload_map, pi->port_id)) { 9227 /* TOE is enabled on another VI of this port. */ 9228 pi->uld_vis++; 9229 return (0); 9230 } 9231 9232 if (!uld_active(sc, ULD_TOM)) { 9233 rc = t4_activate_uld(sc, ULD_TOM); 9234 if (rc == EAGAIN) { 9235 log(LOG_WARNING, 9236 "You must kldload t4_tom.ko before trying " 9237 "to enable TOE on a cxgbe interface.\n"); 9238 } 9239 if (rc != 0) 9240 return (rc); 9241 KASSERT(sc->tom_softc != NULL, 9242 ("%s: TOM activated but softc NULL", __func__)); 9243 KASSERT(uld_active(sc, ULD_TOM), 9244 ("%s: TOM activated but flag not set", __func__)); 9245 } 9246 9247 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 9248 if (!uld_active(sc, ULD_IWARP)) 9249 (void) t4_activate_uld(sc, ULD_IWARP); 9250 if (!uld_active(sc, ULD_ISCSI)) 9251 (void) t4_activate_uld(sc, ULD_ISCSI); 9252 9253 pi->uld_vis++; 9254 setbit(&sc->offload_map, pi->port_id); 9255 } else { 9256 pi->uld_vis--; 9257 9258 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9259 return (0); 9260 9261 KASSERT(uld_active(sc, ULD_TOM), 9262 ("%s: TOM never initialized?", __func__)); 9263 clrbit(&sc->offload_map, pi->port_id); 9264 } 9265 9266 return (0); 9267} 9268 9269/* 9270 * Add an upper layer driver to the global list. 9271 */ 9272int 9273t4_register_uld(struct uld_info *ui) 9274{ 9275 int rc = 0; 9276 struct uld_info *u; 9277 9278 sx_xlock(&t4_uld_list_lock); 9279 SLIST_FOREACH(u, &t4_uld_list, link) { 9280 if (u->uld_id == ui->uld_id) { 9281 rc = EEXIST; 9282 goto done; 9283 } 9284 } 9285 9286 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9287 ui->refcount = 0; 9288done: 9289 sx_xunlock(&t4_uld_list_lock); 9290 return (rc); 9291} 9292 9293int 9294t4_unregister_uld(struct uld_info *ui) 9295{ 9296 int rc = EINVAL; 9297 struct uld_info *u; 9298 9299 sx_xlock(&t4_uld_list_lock); 9300 9301 SLIST_FOREACH(u, &t4_uld_list, link) { 9302 if (u == ui) { 9303 if (ui->refcount > 0) { 9304 rc = EBUSY; 9305 goto done; 9306 } 9307 9308 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9309 rc = 0; 9310 goto done; 9311 } 9312 } 9313done: 9314 sx_xunlock(&t4_uld_list_lock); 9315 return (rc); 9316} 9317 9318int 9319t4_activate_uld(struct adapter *sc, int id) 9320{ 9321 int rc; 9322 struct uld_info *ui; 9323 9324 ASSERT_SYNCHRONIZED_OP(sc); 9325 9326 if (id < 0 || id > ULD_MAX) 9327 return (EINVAL); 9328 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9329 9330 sx_slock(&t4_uld_list_lock); 9331 9332 SLIST_FOREACH(ui, &t4_uld_list, link) { 9333 if (ui->uld_id == id) { 9334 if (!(sc->flags & FULL_INIT_DONE)) { 9335 rc = adapter_full_init(sc); 9336 if (rc != 0) 9337 break; 9338 } 9339 9340 rc = ui->activate(sc); 9341 if (rc == 0) { 9342 setbit(&sc->active_ulds, id); 9343 ui->refcount++; 9344 } 9345 break; 9346 } 9347 } 9348 9349 sx_sunlock(&t4_uld_list_lock); 9350 9351 return (rc); 9352} 9353 9354int 9355t4_deactivate_uld(struct adapter *sc, int id) 9356{ 9357 int rc; 9358 struct uld_info *ui; 9359 9360 ASSERT_SYNCHRONIZED_OP(sc); 9361 9362 if (id < 0 || id > ULD_MAX) 9363 return (EINVAL); 9364 rc = ENXIO; 9365 9366 sx_slock(&t4_uld_list_lock); 9367 9368 SLIST_FOREACH(ui, &t4_uld_list, link) { 9369 if (ui->uld_id == id) { 9370 rc = ui->deactivate(sc); 9371 if (rc == 0) { 9372 clrbit(&sc->active_ulds, id); 9373 ui->refcount--; 9374 } 9375 break; 9376 } 9377 } 9378 9379 sx_sunlock(&t4_uld_list_lock); 9380 9381 return (rc); 9382} 9383 9384int 9385uld_active(struct adapter *sc, int uld_id) 9386{ 9387 9388 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9389 9390 return (isset(&sc->active_ulds, uld_id)); 9391} 9392#endif 9393 9394/* 9395 * Come up with reasonable defaults for some of the tunables, provided they're 9396 * not set by the user (in which case we'll use the values as is). 9397 */ 9398static void 9399tweak_tunables(void) 9400{ 9401 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9402 9403 if (t4_ntxq10g < 1) { 9404#ifdef RSS 9405 t4_ntxq10g = rss_getnumbuckets(); 9406#else 9407 t4_ntxq10g = min(nc, NTXQ_10G); 9408#endif 9409 } 9410 9411 if (t4_ntxq1g < 1) { 9412#ifdef RSS 9413 /* XXX: way too many for 1GbE? */ 9414 t4_ntxq1g = rss_getnumbuckets(); 9415#else 9416 t4_ntxq1g = min(nc, NTXQ_1G); 9417#endif 9418 } 9419 9420 if (t4_ntxq_vi < 1) 9421 t4_ntxq_vi = min(nc, NTXQ_VI); 9422 9423 if (t4_nrxq10g < 1) { 9424#ifdef RSS 9425 t4_nrxq10g = rss_getnumbuckets(); 9426#else 9427 t4_nrxq10g = min(nc, NRXQ_10G); 9428#endif 9429 } 9430 9431 if (t4_nrxq1g < 1) { 9432#ifdef RSS 9433 /* XXX: way too many for 1GbE? */ 9434 t4_nrxq1g = rss_getnumbuckets(); 9435#else 9436 t4_nrxq1g = min(nc, NRXQ_1G); 9437#endif 9438 } 9439 9440 if (t4_nrxq_vi < 1) 9441 t4_nrxq_vi = min(nc, NRXQ_VI); 9442 9443#ifdef TCP_OFFLOAD 9444 if (t4_nofldtxq10g < 1) 9445 t4_nofldtxq10g = min(nc, NOFLDTXQ_10G); 9446 9447 if (t4_nofldtxq1g < 1) 9448 t4_nofldtxq1g = min(nc, NOFLDTXQ_1G); 9449 9450 if (t4_nofldtxq_vi < 1) 9451 t4_nofldtxq_vi = min(nc, NOFLDTXQ_VI); 9452 9453 if (t4_nofldrxq10g < 1) 9454 t4_nofldrxq10g = min(nc, NOFLDRXQ_10G); 9455 9456 if (t4_nofldrxq1g < 1) 9457 t4_nofldrxq1g = min(nc, NOFLDRXQ_1G); 9458 9459 if (t4_nofldrxq_vi < 1) 9460 t4_nofldrxq_vi = min(nc, NOFLDRXQ_VI); 9461 9462 if (t4_toecaps_allowed == -1) 9463 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9464 9465 if (t4_rdmacaps_allowed == -1) { 9466 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9467 FW_CAPS_CONFIG_RDMA_RDMAC; 9468 } 9469 9470 if (t4_iscsicaps_allowed == -1) { 9471 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9472 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9473 FW_CAPS_CONFIG_ISCSI_T10DIF; 9474 } 9475#else 9476 if (t4_toecaps_allowed == -1) 9477 t4_toecaps_allowed = 0; 9478 9479 if (t4_rdmacaps_allowed == -1) 9480 t4_rdmacaps_allowed = 0; 9481 9482 if (t4_iscsicaps_allowed == -1) 9483 t4_iscsicaps_allowed = 0; 9484#endif 9485 9486#ifdef DEV_NETMAP 9487 if (t4_nnmtxq_vi < 1) 9488 t4_nnmtxq_vi = min(nc, NNMTXQ_VI); 9489 9490 if (t4_nnmrxq_vi < 1) 9491 t4_nnmrxq_vi = min(nc, NNMRXQ_VI); 9492#endif 9493 9494 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9495 t4_tmr_idx_10g = TMR_IDX_10G; 9496 9497 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9498 t4_pktc_idx_10g = PKTC_IDX_10G; 9499 9500 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9501 t4_tmr_idx_1g = TMR_IDX_1G; 9502 9503 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9504 t4_pktc_idx_1g = PKTC_IDX_1G; 9505 9506 if (t4_qsize_txq < 128) 9507 t4_qsize_txq = 128; 9508 9509 if (t4_qsize_rxq < 128) 9510 t4_qsize_rxq = 128; 9511 while (t4_qsize_rxq & 7) 9512 t4_qsize_rxq++; 9513 9514 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9515} 9516 9517#ifdef DDB 9518static void 9519t4_dump_tcb(struct adapter *sc, int tid) 9520{ 9521 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9522 9523 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9524 save = t4_read_reg(sc, reg); 9525 base = sc->memwin[2].mw_base; 9526 9527 /* Dump TCB for the tid */ 9528 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9529 tcb_addr += tid * TCB_SIZE; 9530 9531 if (is_t4(sc)) { 9532 pf = 0; 9533 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9534 } else { 9535 pf = V_PFNUM(sc->pf); 9536 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9537 } 9538 t4_write_reg(sc, reg, win_pos | pf); 9539 t4_read_reg(sc, reg); 9540 9541 off = tcb_addr - win_pos; 9542 for (i = 0; i < 4; i++) { 9543 uint32_t buf[8]; 9544 for (j = 0; j < 8; j++, off += 4) 9545 buf[j] = htonl(t4_read_reg(sc, base + off)); 9546 9547 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9548 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9549 buf[7]); 9550 } 9551 9552 t4_write_reg(sc, reg, save); 9553 t4_read_reg(sc, reg); 9554} 9555 9556static void 9557t4_dump_devlog(struct adapter *sc) 9558{ 9559 struct devlog_params *dparams = &sc->params.devlog; 9560 struct fw_devlog_e e; 9561 int i, first, j, m, nentries, rc; 9562 uint64_t ftstamp = UINT64_MAX; 9563 9564 if (dparams->start == 0) { 9565 db_printf("devlog params not valid\n"); 9566 return; 9567 } 9568 9569 nentries = dparams->size / sizeof(struct fw_devlog_e); 9570 m = fwmtype_to_hwmtype(dparams->memtype); 9571 9572 /* Find the first entry. */ 9573 first = -1; 9574 for (i = 0; i < nentries && !db_pager_quit; i++) { 9575 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9576 sizeof(e), (void *)&e); 9577 if (rc != 0) 9578 break; 9579 9580 if (e.timestamp == 0) 9581 break; 9582 9583 e.timestamp = be64toh(e.timestamp); 9584 if (e.timestamp < ftstamp) { 9585 ftstamp = e.timestamp; 9586 first = i; 9587 } 9588 } 9589 9590 if (first == -1) 9591 return; 9592 9593 i = first; 9594 do { 9595 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9596 sizeof(e), (void *)&e); 9597 if (rc != 0) 9598 return; 9599 9600 if (e.timestamp == 0) 9601 return; 9602 9603 e.timestamp = be64toh(e.timestamp); 9604 e.seqno = be32toh(e.seqno); 9605 for (j = 0; j < 8; j++) 9606 e.params[j] = be32toh(e.params[j]); 9607 9608 db_printf("%10d %15ju %8s %8s ", 9609 e.seqno, e.timestamp, 9610 (e.level < nitems(devlog_level_strings) ? 9611 devlog_level_strings[e.level] : "UNKNOWN"), 9612 (e.facility < nitems(devlog_facility_strings) ? 9613 devlog_facility_strings[e.facility] : "UNKNOWN")); 9614 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9615 e.params[3], e.params[4], e.params[5], e.params[6], 9616 e.params[7]); 9617 9618 if (++i == nentries) 9619 i = 0; 9620 } while (i != first && !db_pager_quit); 9621} 9622 9623static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9624_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9625 9626DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9627{ 9628 device_t dev; 9629 int t; 9630 bool valid; 9631 9632 valid = false; 9633 t = db_read_token(); 9634 if (t == tIDENT) { 9635 dev = device_lookup_by_name(db_tok_string); 9636 valid = true; 9637 } 9638 db_skip_to_eol(); 9639 if (!valid) { 9640 db_printf("usage: show t4 devlog <nexus>\n"); 9641 return; 9642 } 9643 9644 if (dev == NULL) { 9645 db_printf("device not found\n"); 9646 return; 9647 } 9648 9649 t4_dump_devlog(device_get_softc(dev)); 9650} 9651 9652DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 9653{ 9654 device_t dev; 9655 int radix, tid, t; 9656 bool valid; 9657 9658 valid = false; 9659 radix = db_radix; 9660 db_radix = 10; 9661 t = db_read_token(); 9662 if (t == tIDENT) { 9663 dev = device_lookup_by_name(db_tok_string); 9664 t = db_read_token(); 9665 if (t == tNUMBER) { 9666 tid = db_tok_number; 9667 valid = true; 9668 } 9669 } 9670 db_radix = radix; 9671 db_skip_to_eol(); 9672 if (!valid) { 9673 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 9674 return; 9675 } 9676 9677 if (dev == NULL) { 9678 db_printf("device not found\n"); 9679 return; 9680 } 9681 if (tid < 0) { 9682 db_printf("invalid tid\n"); 9683 return; 9684 } 9685 9686 t4_dump_tcb(device_get_softc(dev), tid); 9687} 9688#endif 9689 9690static struct sx mlu; /* mod load unload */ 9691SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9692 9693static int 9694mod_event(module_t mod, int cmd, void *arg) 9695{ 9696 int rc = 0; 9697 static int loaded = 0; 9698 9699 switch (cmd) { 9700 case MOD_LOAD: 9701 sx_xlock(&mlu); 9702 if (loaded++ == 0) { 9703 t4_sge_modload(); 9704 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 9705 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 9706 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 9707 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 9708 sx_init(&t4_list_lock, "T4/T5 adapters"); 9709 SLIST_INIT(&t4_list); 9710#ifdef TCP_OFFLOAD 9711 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9712 SLIST_INIT(&t4_uld_list); 9713#endif 9714 t4_tracer_modload(); 9715 tweak_tunables(); 9716 } 9717 sx_xunlock(&mlu); 9718 break; 9719 9720 case MOD_UNLOAD: 9721 sx_xlock(&mlu); 9722 if (--loaded == 0) { 9723 int tries; 9724 9725 sx_slock(&t4_list_lock); 9726 if (!SLIST_EMPTY(&t4_list)) { 9727 rc = EBUSY; 9728 sx_sunlock(&t4_list_lock); 9729 goto done_unload; 9730 } 9731#ifdef TCP_OFFLOAD 9732 sx_slock(&t4_uld_list_lock); 9733 if (!SLIST_EMPTY(&t4_uld_list)) { 9734 rc = EBUSY; 9735 sx_sunlock(&t4_uld_list_lock); 9736 sx_sunlock(&t4_list_lock); 9737 goto done_unload; 9738 } 9739#endif 9740 tries = 0; 9741 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 9742 uprintf("%ju clusters with custom free routine " 9743 "still is use.\n", t4_sge_extfree_refs()); 9744 pause("t4unload", 2 * hz); 9745 } 9746#ifdef TCP_OFFLOAD 9747 sx_sunlock(&t4_uld_list_lock); 9748#endif 9749 sx_sunlock(&t4_list_lock); 9750 9751 if (t4_sge_extfree_refs() == 0) { 9752 t4_tracer_modunload(); 9753#ifdef TCP_OFFLOAD 9754 sx_destroy(&t4_uld_list_lock); 9755#endif 9756 sx_destroy(&t4_list_lock); 9757 t4_sge_modunload(); 9758 loaded = 0; 9759 } else { 9760 rc = EBUSY; 9761 loaded++; /* undo earlier decrement */ 9762 } 9763 } 9764done_unload: 9765 sx_xunlock(&mlu); 9766 break; 9767 } 9768 9769 return (rc); 9770} 9771 9772static devclass_t t4_devclass, t5_devclass, t6_devclass; 9773static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 9774static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 9775 9776DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9777MODULE_VERSION(t4nex, 1); 9778MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9779 9780DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9781MODULE_VERSION(t5nex, 1); 9782MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9783 9784DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 9785MODULE_VERSION(t6nex, 1); 9786MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 9787#ifdef DEV_NETMAP 9788MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 9789#endif /* DEV_NETMAP */ 9790 9791DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9792MODULE_VERSION(cxgbe, 1); 9793 9794DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9795MODULE_VERSION(cxl, 1); 9796 9797DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 9798MODULE_VERSION(cc, 1); 9799 9800DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9801MODULE_VERSION(vcxgbe, 1); 9802 9803DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9804MODULE_VERSION(vcxl, 1); 9805 9806DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 9807MODULE_VERSION(vcc, 1); 9808