t4_main.c revision 318809
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_main.c 318809 2017-05-24 20:29:20Z np $"); 30 31#include "opt_ddb.h" 32#include "opt_inet.h" 33#include "opt_inet6.h" 34 35#include <sys/param.h> 36#include <sys/conf.h> 37#include <sys/priv.h> 38#include <sys/kernel.h> 39#include <sys/bus.h> 40#include <sys/systm.h> 41#include <sys/counter.h> 42#include <sys/module.h> 43#include <sys/malloc.h> 44#include <sys/queue.h> 45#include <sys/taskqueue.h> 46#include <sys/pciio.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49#include <dev/pci/pci_private.h> 50#include <sys/firmware.h> 51#include <sys/sbuf.h> 52#include <sys/smp.h> 53#include <sys/socket.h> 54#include <sys/sockio.h> 55#include <sys/sysctl.h> 56#include <net/ethernet.h> 57#include <net/if.h> 58#include <net/if_types.h> 59#include <net/if_dl.h> 60#include <net/if_vlan_var.h> 61#ifdef RSS 62#include <net/rss_config.h> 63#endif 64#if defined(__i386__) || defined(__amd64__) 65#include <vm/vm.h> 66#include <vm/pmap.h> 67#endif 68#ifdef DDB 69#include <ddb/ddb.h> 70#include <ddb/db_lex.h> 71#endif 72 73#include "common/common.h" 74#include "common/t4_msg.h" 75#include "common/t4_regs.h" 76#include "common/t4_regs_values.h" 77#include "t4_ioctl.h" 78#include "t4_l2t.h" 79#include "t4_mp_ring.h" 80 81/* T4 bus driver interface */ 82static int t4_probe(device_t); 83static int t4_attach(device_t); 84static int t4_detach(device_t); 85static device_method_t t4_methods[] = { 86 DEVMETHOD(device_probe, t4_probe), 87 DEVMETHOD(device_attach, t4_attach), 88 DEVMETHOD(device_detach, t4_detach), 89 90 DEVMETHOD_END 91}; 92static driver_t t4_driver = { 93 "t4nex", 94 t4_methods, 95 sizeof(struct adapter) 96}; 97 98 99/* T4 port (cxgbe) interface */ 100static int cxgbe_probe(device_t); 101static int cxgbe_attach(device_t); 102static int cxgbe_detach(device_t); 103device_method_t cxgbe_methods[] = { 104 DEVMETHOD(device_probe, cxgbe_probe), 105 DEVMETHOD(device_attach, cxgbe_attach), 106 DEVMETHOD(device_detach, cxgbe_detach), 107 { 0, 0 } 108}; 109static driver_t cxgbe_driver = { 110 "cxgbe", 111 cxgbe_methods, 112 sizeof(struct port_info) 113}; 114 115/* T4 VI (vcxgbe) interface */ 116static int vcxgbe_probe(device_t); 117static int vcxgbe_attach(device_t); 118static int vcxgbe_detach(device_t); 119static device_method_t vcxgbe_methods[] = { 120 DEVMETHOD(device_probe, vcxgbe_probe), 121 DEVMETHOD(device_attach, vcxgbe_attach), 122 DEVMETHOD(device_detach, vcxgbe_detach), 123 { 0, 0 } 124}; 125static driver_t vcxgbe_driver = { 126 "vcxgbe", 127 vcxgbe_methods, 128 sizeof(struct vi_info) 129}; 130 131static d_ioctl_t t4_ioctl; 132 133static struct cdevsw t4_cdevsw = { 134 .d_version = D_VERSION, 135 .d_ioctl = t4_ioctl, 136 .d_name = "t4nex", 137}; 138 139/* T5 bus driver interface */ 140static int t5_probe(device_t); 141static device_method_t t5_methods[] = { 142 DEVMETHOD(device_probe, t5_probe), 143 DEVMETHOD(device_attach, t4_attach), 144 DEVMETHOD(device_detach, t4_detach), 145 146 DEVMETHOD_END 147}; 148static driver_t t5_driver = { 149 "t5nex", 150 t5_methods, 151 sizeof(struct adapter) 152}; 153 154 155/* T5 port (cxl) interface */ 156static driver_t cxl_driver = { 157 "cxl", 158 cxgbe_methods, 159 sizeof(struct port_info) 160}; 161 162/* T5 VI (vcxl) interface */ 163static driver_t vcxl_driver = { 164 "vcxl", 165 vcxgbe_methods, 166 sizeof(struct vi_info) 167}; 168 169/* T6 bus driver interface */ 170static int t6_probe(device_t); 171static device_method_t t6_methods[] = { 172 DEVMETHOD(device_probe, t6_probe), 173 DEVMETHOD(device_attach, t4_attach), 174 DEVMETHOD(device_detach, t4_detach), 175 176 DEVMETHOD_END 177}; 178static driver_t t6_driver = { 179 "t6nex", 180 t6_methods, 181 sizeof(struct adapter) 182}; 183 184 185/* T6 port (cc) interface */ 186static driver_t cc_driver = { 187 "cc", 188 cxgbe_methods, 189 sizeof(struct port_info) 190}; 191 192/* T6 VI (vcc) interface */ 193static driver_t vcc_driver = { 194 "vcc", 195 vcxgbe_methods, 196 sizeof(struct vi_info) 197}; 198 199/* ifnet + media interface */ 200static void cxgbe_init(void *); 201static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 202static int cxgbe_transmit(struct ifnet *, struct mbuf *); 203static void cxgbe_qflush(struct ifnet *); 204static int cxgbe_media_change(struct ifnet *); 205static void cxgbe_media_status(struct ifnet *, struct ifmediareq *); 206 207MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 208 209/* 210 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 211 * then ADAPTER_LOCK, then t4_uld_list_lock. 212 */ 213static struct sx t4_list_lock; 214SLIST_HEAD(, adapter) t4_list; 215#ifdef TCP_OFFLOAD 216static struct sx t4_uld_list_lock; 217SLIST_HEAD(, uld_info) t4_uld_list; 218#endif 219 220/* 221 * Tunables. See tweak_tunables() too. 222 * 223 * Each tunable is set to a default value here if it's known at compile-time. 224 * Otherwise it is set to -n as an indication to tweak_tunables() that it should 225 * provide a reasonable default (upto n) when the driver is loaded. 226 * 227 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 228 * T5 are under hw.cxl. 229 */ 230 231/* 232 * Number of queues for tx and rx, 10G and 1G, NIC and offload. 233 */ 234#define NTXQ_10G 16 235int t4_ntxq10g = -NTXQ_10G; 236TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g); 237 238#define NRXQ_10G 8 239int t4_nrxq10g = -NRXQ_10G; 240TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g); 241 242#define NTXQ_1G 4 243int t4_ntxq1g = -NTXQ_1G; 244TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g); 245 246#define NRXQ_1G 2 247int t4_nrxq1g = -NRXQ_1G; 248TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g); 249 250#define NTXQ_VI 1 251static int t4_ntxq_vi = -NTXQ_VI; 252TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi); 253 254#define NRXQ_VI 1 255static int t4_nrxq_vi = -NRXQ_VI; 256TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi); 257 258static int t4_rsrv_noflowq = 0; 259TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq); 260 261#ifdef TCP_OFFLOAD 262#define NOFLDTXQ_10G 8 263static int t4_nofldtxq10g = -NOFLDTXQ_10G; 264TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g); 265 266#define NOFLDRXQ_10G 2 267static int t4_nofldrxq10g = -NOFLDRXQ_10G; 268TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g); 269 270#define NOFLDTXQ_1G 2 271static int t4_nofldtxq1g = -NOFLDTXQ_1G; 272TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g); 273 274#define NOFLDRXQ_1G 1 275static int t4_nofldrxq1g = -NOFLDRXQ_1G; 276TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g); 277 278#define NOFLDTXQ_VI 1 279static int t4_nofldtxq_vi = -NOFLDTXQ_VI; 280TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi); 281 282#define NOFLDRXQ_VI 1 283static int t4_nofldrxq_vi = -NOFLDRXQ_VI; 284TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi); 285#endif 286 287#ifdef DEV_NETMAP 288#define NNMTXQ_VI 2 289static int t4_nnmtxq_vi = -NNMTXQ_VI; 290TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi); 291 292#define NNMRXQ_VI 2 293static int t4_nnmrxq_vi = -NNMRXQ_VI; 294TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi); 295#endif 296 297/* 298 * Holdoff parameters for 10G and 1G ports. 299 */ 300#define TMR_IDX_10G 1 301int t4_tmr_idx_10g = TMR_IDX_10G; 302TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g); 303 304#define PKTC_IDX_10G (-1) 305int t4_pktc_idx_10g = PKTC_IDX_10G; 306TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g); 307 308#define TMR_IDX_1G 1 309int t4_tmr_idx_1g = TMR_IDX_1G; 310TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g); 311 312#define PKTC_IDX_1G (-1) 313int t4_pktc_idx_1g = PKTC_IDX_1G; 314TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g); 315 316/* 317 * Size (# of entries) of each tx and rx queue. 318 */ 319unsigned int t4_qsize_txq = TX_EQ_QSIZE; 320TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq); 321 322unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 323TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq); 324 325/* 326 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 327 */ 328int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 329TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types); 330 331/* 332 * Configuration file. 333 */ 334#define DEFAULT_CF "default" 335#define FLASH_CF "flash" 336#define UWIRE_CF "uwire" 337#define FPGA_CF "fpga" 338static char t4_cfg_file[32] = DEFAULT_CF; 339TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file)); 340 341/* 342 * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively). 343 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 344 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 345 * mark or when signalled to do so, 0 to never emit PAUSE. 346 */ 347static int t4_pause_settings = PAUSE_TX | PAUSE_RX; 348TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings); 349 350/* 351 * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS, 352 * FEC_RESERVED respectively). 353 * -1 to run with the firmware default. 354 * 0 to disable FEC. 355 */ 356static int t4_fec = -1; 357TUNABLE_INT("hw.cxgbe.fec", &t4_fec); 358 359/* 360 * Link autonegotiation. 361 * -1 to run with the firmware default. 362 * 0 to disable. 363 * 1 to enable. 364 */ 365static int t4_autoneg = -1; 366TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg); 367 368/* 369 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 370 * encouraged respectively). 371 */ 372static unsigned int t4_fw_install = 1; 373TUNABLE_INT("hw.cxgbe.fw_install", &t4_fw_install); 374 375/* 376 * ASIC features that will be used. Disable the ones you don't want so that the 377 * chip resources aren't wasted on features that will not be used. 378 */ 379static int t4_nbmcaps_allowed = 0; 380TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed); 381 382static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 383TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed); 384 385static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 386 FW_CAPS_CONFIG_SWITCH_EGRESS; 387TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed); 388 389static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC; 390TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed); 391 392static int t4_toecaps_allowed = -1; 393TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed); 394 395static int t4_rdmacaps_allowed = -1; 396TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed); 397 398static int t4_cryptocaps_allowed = 0; 399TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed); 400 401static int t4_iscsicaps_allowed = -1; 402TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed); 403 404static int t4_fcoecaps_allowed = 0; 405TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed); 406 407static int t5_write_combine = 0; 408TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine); 409 410static int t4_num_vis = 1; 411TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis); 412 413/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */ 414static int vi_mac_funcs[] = { 415 FW_VI_FUNC_OFLD, 416 FW_VI_FUNC_IWARP, 417 FW_VI_FUNC_OPENISCSI, 418 FW_VI_FUNC_OPENFCOE, 419 FW_VI_FUNC_FOISCSI, 420 FW_VI_FUNC_FOFCOE, 421}; 422 423struct intrs_and_queues { 424 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 425 uint16_t nirq; /* Total # of vectors */ 426 uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */ 427 uint16_t intr_flags_1g; /* Interrupt flags for each 1G port */ 428 uint16_t ntxq10g; /* # of NIC txq's for each 10G port */ 429 uint16_t nrxq10g; /* # of NIC rxq's for each 10G port */ 430 uint16_t ntxq1g; /* # of NIC txq's for each 1G port */ 431 uint16_t nrxq1g; /* # of NIC rxq's for each 1G port */ 432 uint16_t rsrv_noflowq; /* Flag whether to reserve queue 0 */ 433 uint16_t nofldtxq10g; /* # of TOE txq's for each 10G port */ 434 uint16_t nofldrxq10g; /* # of TOE rxq's for each 10G port */ 435 uint16_t nofldtxq1g; /* # of TOE txq's for each 1G port */ 436 uint16_t nofldrxq1g; /* # of TOE rxq's for each 1G port */ 437 438 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 439 uint16_t ntxq_vi; /* # of NIC txq's */ 440 uint16_t nrxq_vi; /* # of NIC rxq's */ 441 uint16_t nofldtxq_vi; /* # of TOE txq's */ 442 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 443 uint16_t nnmtxq_vi; /* # of netmap txq's */ 444 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 445}; 446 447struct filter_entry { 448 uint32_t valid:1; /* filter allocated and valid */ 449 uint32_t locked:1; /* filter is administratively locked */ 450 uint32_t pending:1; /* filter action is pending firmware reply */ 451 uint32_t smtidx:8; /* Source MAC Table index for smac */ 452 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 453 454 struct t4_filter_specification fs; 455}; 456 457static void setup_memwin(struct adapter *); 458static void position_memwin(struct adapter *, int, uint32_t); 459static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int); 460static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *, 461 int); 462static inline int write_via_memwin(struct adapter *, int, uint32_t, 463 const uint32_t *, int); 464static int validate_mem_range(struct adapter *, uint32_t, int); 465static int fwmtype_to_hwmtype(int); 466static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 467 uint32_t *); 468static int fixup_devlog_params(struct adapter *); 469static int cfg_itype_and_nqueues(struct adapter *, int, int, int, 470 struct intrs_and_queues *); 471static int prep_firmware(struct adapter *); 472static int partition_resources(struct adapter *, const struct firmware *, 473 const char *); 474static int get_params__pre_init(struct adapter *); 475static int get_params__post_init(struct adapter *); 476static int set_params__post_init(struct adapter *); 477static void t4_set_desc(struct adapter *); 478static void build_medialist(struct port_info *, struct ifmedia *); 479static int cxgbe_init_synchronized(struct vi_info *); 480static int cxgbe_uninit_synchronized(struct vi_info *); 481static void quiesce_txq(struct adapter *, struct sge_txq *); 482static void quiesce_wrq(struct adapter *, struct sge_wrq *); 483static void quiesce_iq(struct adapter *, struct sge_iq *); 484static void quiesce_fl(struct adapter *, struct sge_fl *); 485static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 486 driver_intr_t *, void *, char *); 487static int t4_free_irq(struct adapter *, struct irq *); 488static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 489static void vi_refresh_stats(struct adapter *, struct vi_info *); 490static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 491static void cxgbe_tick(void *); 492static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t); 493static void cxgbe_sysctls(struct port_info *); 494static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 495static int sysctl_bitfield(SYSCTL_HANDLER_ARGS); 496static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 497static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 498static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 499static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 500static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 501static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 502static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 503static int sysctl_fec(SYSCTL_HANDLER_ARGS); 504static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); 505static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 506static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 507#ifdef SBUF_DRAIN 508static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 509static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 510static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 511static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS); 512static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 513static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 514static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 515static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 516static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 517static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 518static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 519static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 520static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 521static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 522static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 523static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 524static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 525static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 526static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 527static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 528static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 529static int sysctl_tids(SYSCTL_HANDLER_ARGS); 530static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 531static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 532static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 533static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 534static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 535static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 536static int sysctl_tc_params(SYSCTL_HANDLER_ARGS); 537#endif 538#ifdef TCP_OFFLOAD 539static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 540static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 541static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 542#endif 543static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t); 544static uint32_t mode_to_fconf(uint32_t); 545static uint32_t mode_to_iconf(uint32_t); 546static int check_fspec_against_fconf_iconf(struct adapter *, 547 struct t4_filter_specification *); 548static int get_filter_mode(struct adapter *, uint32_t *); 549static int set_filter_mode(struct adapter *, uint32_t); 550static inline uint64_t get_filter_hits(struct adapter *, uint32_t); 551static int get_filter(struct adapter *, struct t4_filter *); 552static int set_filter(struct adapter *, struct t4_filter *); 553static int del_filter(struct adapter *, struct t4_filter *); 554static void clear_filter(struct filter_entry *); 555static int set_filter_wr(struct adapter *, int); 556static int del_filter_wr(struct adapter *, int); 557static int set_tcb_rpl(struct sge_iq *, const struct rss_header *, 558 struct mbuf *); 559static int get_sge_context(struct adapter *, struct t4_sge_context *); 560static int load_fw(struct adapter *, struct t4_data *); 561static int load_cfg(struct adapter *, struct t4_data *); 562static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 563static int read_i2c(struct adapter *, struct t4_i2c_data *); 564#ifdef TCP_OFFLOAD 565static int toe_capability(struct vi_info *, int); 566#endif 567static int mod_event(module_t, int, void *); 568 569struct { 570 uint16_t device; 571 char *desc; 572} t4_pciids[] = { 573 {0xa000, "Chelsio Terminator 4 FPGA"}, 574 {0x4400, "Chelsio T440-dbg"}, 575 {0x4401, "Chelsio T420-CR"}, 576 {0x4402, "Chelsio T422-CR"}, 577 {0x4403, "Chelsio T440-CR"}, 578 {0x4404, "Chelsio T420-BCH"}, 579 {0x4405, "Chelsio T440-BCH"}, 580 {0x4406, "Chelsio T440-CH"}, 581 {0x4407, "Chelsio T420-SO"}, 582 {0x4408, "Chelsio T420-CX"}, 583 {0x4409, "Chelsio T420-BT"}, 584 {0x440a, "Chelsio T404-BT"}, 585 {0x440e, "Chelsio T440-LP-CR"}, 586}, t5_pciids[] = { 587 {0xb000, "Chelsio Terminator 5 FPGA"}, 588 {0x5400, "Chelsio T580-dbg"}, 589 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 590 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 591 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 592 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 593 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 594 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 595 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 596 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 597 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 598 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 599 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 600 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 601 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 602#ifdef notyet 603 {0x5404, "Chelsio T520-BCH"}, 604 {0x5405, "Chelsio T540-BCH"}, 605 {0x5406, "Chelsio T540-CH"}, 606 {0x5408, "Chelsio T520-CX"}, 607 {0x540b, "Chelsio B520-SR"}, 608 {0x540c, "Chelsio B504-BT"}, 609 {0x540f, "Chelsio Amsterdam"}, 610 {0x5413, "Chelsio T580-CHR"}, 611#endif 612}, t6_pciids[] = { 613 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 614 {0x6400, "Chelsio T6225-DBG"}, /* 2 x 10/25G, debug */ 615 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 616 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 617 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 618 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 619 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 620 {0x6410, "Chelsio T62100-DBG"}, /* 2 x 40/50/100G, debug */ 621}; 622 623#ifdef TCP_OFFLOAD 624/* 625 * service_iq() has an iq and needs the fl. Offset of fl from the iq should be 626 * exactly the same for both rxq and ofld_rxq. 627 */ 628CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 629CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 630#endif 631CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 632 633static int 634t4_probe(device_t dev) 635{ 636 int i; 637 uint16_t v = pci_get_vendor(dev); 638 uint16_t d = pci_get_device(dev); 639 uint8_t f = pci_get_function(dev); 640 641 if (v != PCI_VENDOR_ID_CHELSIO) 642 return (ENXIO); 643 644 /* Attach only to PF0 of the FPGA */ 645 if (d == 0xa000 && f != 0) 646 return (ENXIO); 647 648 for (i = 0; i < nitems(t4_pciids); i++) { 649 if (d == t4_pciids[i].device) { 650 device_set_desc(dev, t4_pciids[i].desc); 651 return (BUS_PROBE_DEFAULT); 652 } 653 } 654 655 return (ENXIO); 656} 657 658static int 659t5_probe(device_t dev) 660{ 661 int i; 662 uint16_t v = pci_get_vendor(dev); 663 uint16_t d = pci_get_device(dev); 664 uint8_t f = pci_get_function(dev); 665 666 if (v != PCI_VENDOR_ID_CHELSIO) 667 return (ENXIO); 668 669 /* Attach only to PF0 of the FPGA */ 670 if (d == 0xb000 && f != 0) 671 return (ENXIO); 672 673 for (i = 0; i < nitems(t5_pciids); i++) { 674 if (d == t5_pciids[i].device) { 675 device_set_desc(dev, t5_pciids[i].desc); 676 return (BUS_PROBE_DEFAULT); 677 } 678 } 679 680 return (ENXIO); 681} 682 683static int 684t6_probe(device_t dev) 685{ 686 int i; 687 uint16_t v = pci_get_vendor(dev); 688 uint16_t d = pci_get_device(dev); 689 690 if (v != PCI_VENDOR_ID_CHELSIO) 691 return (ENXIO); 692 693 for (i = 0; i < nitems(t6_pciids); i++) { 694 if (d == t6_pciids[i].device) { 695 device_set_desc(dev, t6_pciids[i].desc); 696 return (BUS_PROBE_DEFAULT); 697 } 698 } 699 700 return (ENXIO); 701} 702 703static void 704t5_attribute_workaround(device_t dev) 705{ 706 device_t root_port; 707 uint32_t v; 708 709 /* 710 * The T5 chips do not properly echo the No Snoop and Relaxed 711 * Ordering attributes when replying to a TLP from a Root 712 * Port. As a workaround, find the parent Root Port and 713 * disable No Snoop and Relaxed Ordering. Note that this 714 * affects all devices under this root port. 715 */ 716 root_port = pci_find_pcie_root_port(dev); 717 if (root_port == NULL) { 718 device_printf(dev, "Unable to find parent root port\n"); 719 return; 720 } 721 722 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 723 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 724 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 725 0) 726 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 727 device_get_nameunit(root_port)); 728} 729 730static const struct devnames devnames[] = { 731 { 732 .nexus_name = "t4nex", 733 .ifnet_name = "cxgbe", 734 .vi_ifnet_name = "vcxgbe", 735 .pf03_drv_name = "t4iov", 736 .vf_nexus_name = "t4vf", 737 .vf_ifnet_name = "cxgbev" 738 }, { 739 .nexus_name = "t5nex", 740 .ifnet_name = "cxl", 741 .vi_ifnet_name = "vcxl", 742 .pf03_drv_name = "t5iov", 743 .vf_nexus_name = "t5vf", 744 .vf_ifnet_name = "cxlv" 745 }, { 746 .nexus_name = "t6nex", 747 .ifnet_name = "cc", 748 .vi_ifnet_name = "vcc", 749 .pf03_drv_name = "t6iov", 750 .vf_nexus_name = "t6vf", 751 .vf_ifnet_name = "ccv" 752 } 753}; 754 755void 756t4_init_devnames(struct adapter *sc) 757{ 758 int id; 759 760 id = chip_id(sc); 761 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 762 sc->names = &devnames[id - CHELSIO_T4]; 763 else { 764 device_printf(sc->dev, "chip id %d is not supported.\n", id); 765 sc->names = NULL; 766 } 767} 768 769static int 770t4_attach(device_t dev) 771{ 772 struct adapter *sc; 773 int rc = 0, i, j, n10g, n1g, rqidx, tqidx; 774 struct make_dev_args mda; 775 struct intrs_and_queues iaq; 776 struct sge *s; 777 uint8_t *buf; 778#ifdef TCP_OFFLOAD 779 int ofld_rqidx, ofld_tqidx; 780#endif 781#ifdef DEV_NETMAP 782 int nm_rqidx, nm_tqidx; 783#endif 784 int num_vis; 785 786 sc = device_get_softc(dev); 787 sc->dev = dev; 788 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 789 790 if ((pci_get_device(dev) & 0xff00) == 0x5400) 791 t5_attribute_workaround(dev); 792 pci_enable_busmaster(dev); 793 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 794 uint32_t v; 795 796 pci_set_max_read_req(dev, 4096); 797 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 798 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 799 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 800 801 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 802 } 803 804 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 805 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 806 sc->traceq = -1; 807 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 808 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 809 device_get_nameunit(dev)); 810 811 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 812 device_get_nameunit(dev)); 813 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 814 t4_add_adapter(sc); 815 816 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 817 TAILQ_INIT(&sc->sfl); 818 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 819 820 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 821 822 rc = t4_map_bars_0_and_4(sc); 823 if (rc != 0) 824 goto done; /* error message displayed already */ 825 826 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 827 828 /* Prepare the adapter for operation. */ 829 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 830 rc = -t4_prep_adapter(sc, buf); 831 free(buf, M_CXGBE); 832 if (rc != 0) { 833 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 834 goto done; 835 } 836 837 /* 838 * This is the real PF# to which we're attaching. Works from within PCI 839 * passthrough environments too, where pci_get_function() could return a 840 * different PF# depending on the passthrough configuration. We need to 841 * use the real PF# in all our communication with the firmware. 842 */ 843 j = t4_read_reg(sc, A_PL_WHOAMI); 844 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 845 sc->mbox = sc->pf; 846 847 t4_init_devnames(sc); 848 if (sc->names == NULL) { 849 rc = ENOTSUP; 850 goto done; /* error message displayed already */ 851 } 852 853 /* 854 * Do this really early, with the memory windows set up even before the 855 * character device. The userland tool's register i/o and mem read 856 * will work even in "recovery mode". 857 */ 858 setup_memwin(sc); 859 if (t4_init_devlog_params(sc, 0) == 0) 860 fixup_devlog_params(sc); 861 make_dev_args_init(&mda); 862 mda.mda_devsw = &t4_cdevsw; 863 mda.mda_uid = UID_ROOT; 864 mda.mda_gid = GID_WHEEL; 865 mda.mda_mode = 0600; 866 mda.mda_si_drv1 = sc; 867 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 868 if (rc != 0) 869 device_printf(dev, "failed to create nexus char device: %d.\n", 870 rc); 871 872 /* Go no further if recovery mode has been requested. */ 873 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 874 device_printf(dev, "recovery mode.\n"); 875 goto done; 876 } 877 878#if defined(__i386__) 879 if ((cpu_feature & CPUID_CX8) == 0) { 880 device_printf(dev, "64 bit atomics not available.\n"); 881 rc = ENOTSUP; 882 goto done; 883 } 884#endif 885 886 /* Prepare the firmware for operation */ 887 rc = prep_firmware(sc); 888 if (rc != 0) 889 goto done; /* error message displayed already */ 890 891 rc = get_params__post_init(sc); 892 if (rc != 0) 893 goto done; /* error message displayed already */ 894 895 rc = set_params__post_init(sc); 896 if (rc != 0) 897 goto done; /* error message displayed already */ 898 899 rc = t4_map_bar_2(sc); 900 if (rc != 0) 901 goto done; /* error message displayed already */ 902 903 rc = t4_create_dma_tag(sc); 904 if (rc != 0) 905 goto done; /* error message displayed already */ 906 907 /* 908 * Number of VIs to create per-port. The first VI is the "main" regular 909 * VI for the port. The rest are additional virtual interfaces on the 910 * same physical port. Note that the main VI does not have native 911 * netmap support but the extra VIs do. 912 * 913 * Limit the number of VIs per port to the number of available 914 * MAC addresses per port. 915 */ 916 if (t4_num_vis >= 1) 917 num_vis = t4_num_vis; 918 else 919 num_vis = 1; 920 if (num_vis > nitems(vi_mac_funcs)) { 921 num_vis = nitems(vi_mac_funcs); 922 device_printf(dev, "Number of VIs limited to %d\n", num_vis); 923 } 924 925 /* 926 * First pass over all the ports - allocate VIs and initialize some 927 * basic parameters like mac address, port type, etc. We also figure 928 * out whether a port is 10G or 1G and use that information when 929 * calculating how many interrupts to attempt to allocate. 930 */ 931 n10g = n1g = 0; 932 for_each_port(sc, i) { 933 struct port_info *pi; 934 struct link_config *lc; 935 936 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 937 sc->port[i] = pi; 938 939 /* These must be set before t4_port_init */ 940 pi->adapter = sc; 941 pi->port_id = i; 942 /* 943 * XXX: vi[0] is special so we can't delay this allocation until 944 * pi->nvi's final value is known. 945 */ 946 pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE, 947 M_ZERO | M_WAITOK); 948 949 /* 950 * Allocate the "main" VI and initialize parameters 951 * like mac addr. 952 */ 953 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 954 if (rc != 0) { 955 device_printf(dev, "unable to initialize port %d: %d\n", 956 i, rc); 957 free(pi->vi, M_CXGBE); 958 free(pi, M_CXGBE); 959 sc->port[i] = NULL; 960 goto done; 961 } 962 963 lc = &pi->link_cfg; 964 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 965 lc->requested_fc |= t4_pause_settings; 966 if (t4_fec != -1) { 967 lc->requested_fec = t4_fec & 968 G_FW_PORT_CAP_FEC(lc->supported); 969 } 970 if (lc->supported & FW_PORT_CAP_ANEG && t4_autoneg != -1) { 971 lc->autoneg = t4_autoneg ? AUTONEG_ENABLE : 972 AUTONEG_DISABLE; 973 } 974 975 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 976 if (rc != 0) { 977 device_printf(dev, "port %d l1cfg failed: %d\n", i, rc); 978 free(pi->vi, M_CXGBE); 979 free(pi, M_CXGBE); 980 sc->port[i] = NULL; 981 goto done; 982 } 983 984 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 985 device_get_nameunit(dev), i); 986 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 987 sc->chan_map[pi->tx_chan] = i; 988 989 pi->tc = malloc(sizeof(struct tx_sched_class) * 990 sc->chip_params->nsched_cls, M_CXGBE, M_ZERO | M_WAITOK); 991 992 if (port_top_speed(pi) >= 10) { 993 n10g++; 994 } else { 995 n1g++; 996 } 997 998 pi->dev = device_add_child(dev, sc->names->ifnet_name, -1); 999 if (pi->dev == NULL) { 1000 device_printf(dev, 1001 "failed to add device for port %d.\n", i); 1002 rc = ENXIO; 1003 goto done; 1004 } 1005 pi->vi[0].dev = pi->dev; 1006 device_set_softc(pi->dev, pi); 1007 } 1008 1009 /* 1010 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 1011 */ 1012 rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq); 1013 if (rc != 0) 1014 goto done; /* error message displayed already */ 1015 if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0) 1016 num_vis = 1; 1017 1018 sc->intr_type = iaq.intr_type; 1019 sc->intr_count = iaq.nirq; 1020 1021 s = &sc->sge; 1022 s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g; 1023 s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g; 1024 if (num_vis > 1) { 1025 s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi; 1026 s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi; 1027 } 1028 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1029 s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */ 1030 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1031#ifdef TCP_OFFLOAD 1032 if (is_offload(sc)) { 1033 s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 1034 s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 1035 if (num_vis > 1) { 1036 s->nofldrxq += (n10g + n1g) * (num_vis - 1) * 1037 iaq.nofldrxq_vi; 1038 s->nofldtxq += (n10g + n1g) * (num_vis - 1) * 1039 iaq.nofldtxq_vi; 1040 } 1041 s->neq += s->nofldtxq + s->nofldrxq; 1042 s->niq += s->nofldrxq; 1043 1044 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1045 M_CXGBE, M_ZERO | M_WAITOK); 1046 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1047 M_CXGBE, M_ZERO | M_WAITOK); 1048 } 1049#endif 1050#ifdef DEV_NETMAP 1051 if (num_vis > 1) { 1052 s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi; 1053 s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi; 1054 } 1055 s->neq += s->nnmtxq + s->nnmrxq; 1056 s->niq += s->nnmrxq; 1057 1058 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1059 M_CXGBE, M_ZERO | M_WAITOK); 1060 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1061 M_CXGBE, M_ZERO | M_WAITOK); 1062#endif 1063 1064 s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE, 1065 M_ZERO | M_WAITOK); 1066 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1067 M_ZERO | M_WAITOK); 1068 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1069 M_ZERO | M_WAITOK); 1070 s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE, 1071 M_ZERO | M_WAITOK); 1072 s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE, 1073 M_ZERO | M_WAITOK); 1074 1075 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1076 M_ZERO | M_WAITOK); 1077 1078 t4_init_l2t(sc, M_WAITOK); 1079 1080 /* 1081 * Second pass over the ports. This time we know the number of rx and 1082 * tx queues that each port should get. 1083 */ 1084 rqidx = tqidx = 0; 1085#ifdef TCP_OFFLOAD 1086 ofld_rqidx = ofld_tqidx = 0; 1087#endif 1088#ifdef DEV_NETMAP 1089 nm_rqidx = nm_tqidx = 0; 1090#endif 1091 for_each_port(sc, i) { 1092 struct port_info *pi = sc->port[i]; 1093 struct vi_info *vi; 1094 1095 if (pi == NULL) 1096 continue; 1097 1098 pi->nvi = num_vis; 1099 for_each_vi(pi, j, vi) { 1100 vi->pi = pi; 1101 vi->qsize_rxq = t4_qsize_rxq; 1102 vi->qsize_txq = t4_qsize_txq; 1103 1104 vi->first_rxq = rqidx; 1105 vi->first_txq = tqidx; 1106 if (port_top_speed(pi) >= 10) { 1107 vi->tmr_idx = t4_tmr_idx_10g; 1108 vi->pktc_idx = t4_pktc_idx_10g; 1109 vi->flags |= iaq.intr_flags_10g & INTR_RXQ; 1110 vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi; 1111 vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi; 1112 } else { 1113 vi->tmr_idx = t4_tmr_idx_1g; 1114 vi->pktc_idx = t4_pktc_idx_1g; 1115 vi->flags |= iaq.intr_flags_1g & INTR_RXQ; 1116 vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi; 1117 vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi; 1118 } 1119 rqidx += vi->nrxq; 1120 tqidx += vi->ntxq; 1121 1122 if (j == 0 && vi->ntxq > 1) 1123 vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0; 1124 else 1125 vi->rsrv_noflowq = 0; 1126 1127#ifdef TCP_OFFLOAD 1128 vi->first_ofld_rxq = ofld_rqidx; 1129 vi->first_ofld_txq = ofld_tqidx; 1130 if (port_top_speed(pi) >= 10) { 1131 vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ; 1132 vi->nofldrxq = j == 0 ? iaq.nofldrxq10g : 1133 iaq.nofldrxq_vi; 1134 vi->nofldtxq = j == 0 ? iaq.nofldtxq10g : 1135 iaq.nofldtxq_vi; 1136 } else { 1137 vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ; 1138 vi->nofldrxq = j == 0 ? iaq.nofldrxq1g : 1139 iaq.nofldrxq_vi; 1140 vi->nofldtxq = j == 0 ? iaq.nofldtxq1g : 1141 iaq.nofldtxq_vi; 1142 } 1143 ofld_rqidx += vi->nofldrxq; 1144 ofld_tqidx += vi->nofldtxq; 1145#endif 1146#ifdef DEV_NETMAP 1147 if (j > 0) { 1148 vi->first_nm_rxq = nm_rqidx; 1149 vi->first_nm_txq = nm_tqidx; 1150 vi->nnmrxq = iaq.nnmrxq_vi; 1151 vi->nnmtxq = iaq.nnmtxq_vi; 1152 nm_rqidx += vi->nnmrxq; 1153 nm_tqidx += vi->nnmtxq; 1154 } 1155#endif 1156 } 1157 } 1158 1159 rc = t4_setup_intr_handlers(sc); 1160 if (rc != 0) { 1161 device_printf(dev, 1162 "failed to setup interrupt handlers: %d\n", rc); 1163 goto done; 1164 } 1165 1166 rc = bus_generic_attach(dev); 1167 if (rc != 0) { 1168 device_printf(dev, 1169 "failed to attach all child ports: %d\n", rc); 1170 goto done; 1171 } 1172 1173 device_printf(dev, 1174 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1175 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1176 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1177 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1178 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1179 1180 t4_set_desc(sc); 1181 1182done: 1183 if (rc != 0 && sc->cdev) { 1184 /* cdev was created and so cxgbetool works; recover that way. */ 1185 device_printf(dev, 1186 "error during attach, adapter is now in recovery mode.\n"); 1187 rc = 0; 1188 } 1189 1190 if (rc != 0) 1191 t4_detach_common(dev); 1192 else 1193 t4_sysctls(sc); 1194 1195 return (rc); 1196} 1197 1198/* 1199 * Idempotent 1200 */ 1201static int 1202t4_detach(device_t dev) 1203{ 1204 struct adapter *sc; 1205 1206 sc = device_get_softc(dev); 1207 1208 return (t4_detach_common(dev)); 1209} 1210 1211int 1212t4_detach_common(device_t dev) 1213{ 1214 struct adapter *sc; 1215 struct port_info *pi; 1216 int i, rc; 1217 1218 sc = device_get_softc(dev); 1219 1220 if (sc->flags & FULL_INIT_DONE) { 1221 if (!(sc->flags & IS_VF)) 1222 t4_intr_disable(sc); 1223 } 1224 1225 if (sc->cdev) { 1226 destroy_dev(sc->cdev); 1227 sc->cdev = NULL; 1228 } 1229 1230 if (device_is_attached(dev)) { 1231 rc = bus_generic_detach(dev); 1232 if (rc) { 1233 device_printf(dev, 1234 "failed to detach child devices: %d\n", rc); 1235 return (rc); 1236 } 1237 } 1238 1239 for (i = 0; i < sc->intr_count; i++) 1240 t4_free_irq(sc, &sc->irq[i]); 1241 1242 for (i = 0; i < MAX_NPORTS; i++) { 1243 pi = sc->port[i]; 1244 if (pi) { 1245 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1246 if (pi->dev) 1247 device_delete_child(dev, pi->dev); 1248 1249 mtx_destroy(&pi->pi_lock); 1250 free(pi->vi, M_CXGBE); 1251 free(pi->tc, M_CXGBE); 1252 free(pi, M_CXGBE); 1253 } 1254 } 1255 1256 if (sc->flags & FULL_INIT_DONE) 1257 adapter_full_uninit(sc); 1258 1259 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1260 t4_fw_bye(sc, sc->mbox); 1261 1262 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1263 pci_release_msi(dev); 1264 1265 if (sc->regs_res) 1266 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1267 sc->regs_res); 1268 1269 if (sc->udbs_res) 1270 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1271 sc->udbs_res); 1272 1273 if (sc->msix_res) 1274 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1275 sc->msix_res); 1276 1277 if (sc->l2t) 1278 t4_free_l2t(sc->l2t); 1279 1280#ifdef TCP_OFFLOAD 1281 free(sc->sge.ofld_rxq, M_CXGBE); 1282 free(sc->sge.ofld_txq, M_CXGBE); 1283#endif 1284#ifdef DEV_NETMAP 1285 free(sc->sge.nm_rxq, M_CXGBE); 1286 free(sc->sge.nm_txq, M_CXGBE); 1287#endif 1288 free(sc->irq, M_CXGBE); 1289 free(sc->sge.rxq, M_CXGBE); 1290 free(sc->sge.txq, M_CXGBE); 1291 free(sc->sge.ctrlq, M_CXGBE); 1292 free(sc->sge.iqmap, M_CXGBE); 1293 free(sc->sge.eqmap, M_CXGBE); 1294 free(sc->tids.ftid_tab, M_CXGBE); 1295 t4_destroy_dma_tag(sc); 1296 if (mtx_initialized(&sc->sc_lock)) { 1297 sx_xlock(&t4_list_lock); 1298 SLIST_REMOVE(&t4_list, sc, adapter, link); 1299 sx_xunlock(&t4_list_lock); 1300 mtx_destroy(&sc->sc_lock); 1301 } 1302 1303 callout_drain(&sc->sfl_callout); 1304 if (mtx_initialized(&sc->tids.ftid_lock)) 1305 mtx_destroy(&sc->tids.ftid_lock); 1306 if (mtx_initialized(&sc->sfl_lock)) 1307 mtx_destroy(&sc->sfl_lock); 1308 if (mtx_initialized(&sc->ifp_lock)) 1309 mtx_destroy(&sc->ifp_lock); 1310 if (mtx_initialized(&sc->reg_lock)) 1311 mtx_destroy(&sc->reg_lock); 1312 1313 for (i = 0; i < NUM_MEMWIN; i++) { 1314 struct memwin *mw = &sc->memwin[i]; 1315 1316 if (rw_initialized(&mw->mw_lock)) 1317 rw_destroy(&mw->mw_lock); 1318 } 1319 1320 bzero(sc, sizeof(*sc)); 1321 1322 return (0); 1323} 1324 1325static int 1326cxgbe_probe(device_t dev) 1327{ 1328 char buf[128]; 1329 struct port_info *pi = device_get_softc(dev); 1330 1331 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1332 device_set_desc_copy(dev, buf); 1333 1334 return (BUS_PROBE_DEFAULT); 1335} 1336 1337#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1338 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1339 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS) 1340#define T4_CAP_ENABLE (T4_CAP) 1341 1342static int 1343cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1344{ 1345 struct ifnet *ifp; 1346 struct sbuf *sb; 1347 1348 vi->xact_addr_filt = -1; 1349 callout_init(&vi->tick, 1); 1350 1351 /* Allocate an ifnet and set it up */ 1352 ifp = if_alloc(IFT_ETHER); 1353 if (ifp == NULL) { 1354 device_printf(dev, "Cannot allocate ifnet\n"); 1355 return (ENOMEM); 1356 } 1357 vi->ifp = ifp; 1358 ifp->if_softc = vi; 1359 1360 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1361 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1362 1363 ifp->if_init = cxgbe_init; 1364 ifp->if_ioctl = cxgbe_ioctl; 1365 ifp->if_transmit = cxgbe_transmit; 1366 ifp->if_qflush = cxgbe_qflush; 1367 1368 ifp->if_capabilities = T4_CAP; 1369#ifdef TCP_OFFLOAD 1370 if (vi->nofldrxq != 0) 1371 ifp->if_capabilities |= IFCAP_TOE; 1372#endif 1373 ifp->if_capenable = T4_CAP_ENABLE; 1374 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1375 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1376 1377 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1378 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS; 1379 ifp->if_hw_tsomaxsegsize = 65536; 1380 1381 /* Initialize ifmedia for this VI */ 1382 ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change, 1383 cxgbe_media_status); 1384 build_medialist(vi->pi, &vi->media); 1385 1386 vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp, 1387 EVENTHANDLER_PRI_ANY); 1388 1389 ether_ifattach(ifp, vi->hw_addr); 1390#ifdef DEV_NETMAP 1391 if (vi->nnmrxq != 0) 1392 cxgbe_nm_attach(vi); 1393#endif 1394 sb = sbuf_new_auto(); 1395 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1396#ifdef TCP_OFFLOAD 1397 if (ifp->if_capabilities & IFCAP_TOE) 1398 sbuf_printf(sb, "; %d txq, %d rxq (TOE)", 1399 vi->nofldtxq, vi->nofldrxq); 1400#endif 1401#ifdef DEV_NETMAP 1402 if (ifp->if_capabilities & IFCAP_NETMAP) 1403 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1404 vi->nnmtxq, vi->nnmrxq); 1405#endif 1406 sbuf_finish(sb); 1407 device_printf(dev, "%s\n", sbuf_data(sb)); 1408 sbuf_delete(sb); 1409 1410 vi_sysctls(vi); 1411 1412 return (0); 1413} 1414 1415static int 1416cxgbe_attach(device_t dev) 1417{ 1418 struct port_info *pi = device_get_softc(dev); 1419 struct adapter *sc = pi->adapter; 1420 struct vi_info *vi; 1421 int i, rc; 1422 1423 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1424 1425 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1426 if (rc) 1427 return (rc); 1428 1429 for_each_vi(pi, i, vi) { 1430 if (i == 0) 1431 continue; 1432 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1433 if (vi->dev == NULL) { 1434 device_printf(dev, "failed to add VI %d\n", i); 1435 continue; 1436 } 1437 device_set_softc(vi->dev, vi); 1438 } 1439 1440 cxgbe_sysctls(pi); 1441 1442 bus_generic_attach(dev); 1443 1444 return (0); 1445} 1446 1447static void 1448cxgbe_vi_detach(struct vi_info *vi) 1449{ 1450 struct ifnet *ifp = vi->ifp; 1451 1452 ether_ifdetach(ifp); 1453 1454 if (vi->vlan_c) 1455 EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c); 1456 1457 /* Let detach proceed even if these fail. */ 1458#ifdef DEV_NETMAP 1459 if (ifp->if_capabilities & IFCAP_NETMAP) 1460 cxgbe_nm_detach(vi); 1461#endif 1462 cxgbe_uninit_synchronized(vi); 1463 callout_drain(&vi->tick); 1464 vi_full_uninit(vi); 1465 1466 ifmedia_removeall(&vi->media); 1467 if_free(vi->ifp); 1468 vi->ifp = NULL; 1469} 1470 1471static int 1472cxgbe_detach(device_t dev) 1473{ 1474 struct port_info *pi = device_get_softc(dev); 1475 struct adapter *sc = pi->adapter; 1476 int rc; 1477 1478 /* Detach the extra VIs first. */ 1479 rc = bus_generic_detach(dev); 1480 if (rc) 1481 return (rc); 1482 device_delete_children(dev); 1483 1484 doom_vi(sc, &pi->vi[0]); 1485 1486 if (pi->flags & HAS_TRACEQ) { 1487 sc->traceq = -1; /* cloner should not create ifnet */ 1488 t4_tracer_port_detach(sc); 1489 } 1490 1491 cxgbe_vi_detach(&pi->vi[0]); 1492 callout_drain(&pi->tick); 1493 1494 end_synchronized_op(sc, 0); 1495 1496 return (0); 1497} 1498 1499static void 1500cxgbe_init(void *arg) 1501{ 1502 struct vi_info *vi = arg; 1503 struct adapter *sc = vi->pi->adapter; 1504 1505 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1506 return; 1507 cxgbe_init_synchronized(vi); 1508 end_synchronized_op(sc, 0); 1509} 1510 1511static int 1512cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1513{ 1514 int rc = 0, mtu, flags, can_sleep; 1515 struct vi_info *vi = ifp->if_softc; 1516 struct adapter *sc = vi->pi->adapter; 1517 struct ifreq *ifr = (struct ifreq *)data; 1518 uint32_t mask; 1519 1520 switch (cmd) { 1521 case SIOCSIFMTU: 1522 mtu = ifr->ifr_mtu; 1523 if (mtu < ETHERMIN || mtu > MAX_MTU) 1524 return (EINVAL); 1525 1526 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1527 if (rc) 1528 return (rc); 1529 ifp->if_mtu = mtu; 1530 if (vi->flags & VI_INIT_DONE) { 1531 t4_update_fl_bufsize(ifp); 1532 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1533 rc = update_mac_settings(ifp, XGMAC_MTU); 1534 } 1535 end_synchronized_op(sc, 0); 1536 break; 1537 1538 case SIOCSIFFLAGS: 1539 can_sleep = 0; 1540redo_sifflags: 1541 rc = begin_synchronized_op(sc, vi, 1542 can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg"); 1543 if (rc) 1544 return (rc); 1545 1546 if (ifp->if_flags & IFF_UP) { 1547 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1548 flags = vi->if_flags; 1549 if ((ifp->if_flags ^ flags) & 1550 (IFF_PROMISC | IFF_ALLMULTI)) { 1551 if (can_sleep == 1) { 1552 end_synchronized_op(sc, 0); 1553 can_sleep = 0; 1554 goto redo_sifflags; 1555 } 1556 rc = update_mac_settings(ifp, 1557 XGMAC_PROMISC | XGMAC_ALLMULTI); 1558 } 1559 } else { 1560 if (can_sleep == 0) { 1561 end_synchronized_op(sc, LOCK_HELD); 1562 can_sleep = 1; 1563 goto redo_sifflags; 1564 } 1565 rc = cxgbe_init_synchronized(vi); 1566 } 1567 vi->if_flags = ifp->if_flags; 1568 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1569 if (can_sleep == 0) { 1570 end_synchronized_op(sc, LOCK_HELD); 1571 can_sleep = 1; 1572 goto redo_sifflags; 1573 } 1574 rc = cxgbe_uninit_synchronized(vi); 1575 } 1576 end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD); 1577 break; 1578 1579 case SIOCADDMULTI: 1580 case SIOCDELMULTI: /* these two are called with a mutex held :-( */ 1581 rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi"); 1582 if (rc) 1583 return (rc); 1584 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1585 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1586 end_synchronized_op(sc, LOCK_HELD); 1587 break; 1588 1589 case SIOCSIFCAP: 1590 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1591 if (rc) 1592 return (rc); 1593 1594 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1595 if (mask & IFCAP_TXCSUM) { 1596 ifp->if_capenable ^= IFCAP_TXCSUM; 1597 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1598 1599 if (IFCAP_TSO4 & ifp->if_capenable && 1600 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1601 ifp->if_capenable &= ~IFCAP_TSO4; 1602 if_printf(ifp, 1603 "tso4 disabled due to -txcsum.\n"); 1604 } 1605 } 1606 if (mask & IFCAP_TXCSUM_IPV6) { 1607 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1608 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1609 1610 if (IFCAP_TSO6 & ifp->if_capenable && 1611 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1612 ifp->if_capenable &= ~IFCAP_TSO6; 1613 if_printf(ifp, 1614 "tso6 disabled due to -txcsum6.\n"); 1615 } 1616 } 1617 if (mask & IFCAP_RXCSUM) 1618 ifp->if_capenable ^= IFCAP_RXCSUM; 1619 if (mask & IFCAP_RXCSUM_IPV6) 1620 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1621 1622 /* 1623 * Note that we leave CSUM_TSO alone (it is always set). The 1624 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1625 * sending a TSO request our way, so it's sufficient to toggle 1626 * IFCAP_TSOx only. 1627 */ 1628 if (mask & IFCAP_TSO4) { 1629 if (!(IFCAP_TSO4 & ifp->if_capenable) && 1630 !(IFCAP_TXCSUM & ifp->if_capenable)) { 1631 if_printf(ifp, "enable txcsum first.\n"); 1632 rc = EAGAIN; 1633 goto fail; 1634 } 1635 ifp->if_capenable ^= IFCAP_TSO4; 1636 } 1637 if (mask & IFCAP_TSO6) { 1638 if (!(IFCAP_TSO6 & ifp->if_capenable) && 1639 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1640 if_printf(ifp, "enable txcsum6 first.\n"); 1641 rc = EAGAIN; 1642 goto fail; 1643 } 1644 ifp->if_capenable ^= IFCAP_TSO6; 1645 } 1646 if (mask & IFCAP_LRO) { 1647#if defined(INET) || defined(INET6) 1648 int i; 1649 struct sge_rxq *rxq; 1650 1651 ifp->if_capenable ^= IFCAP_LRO; 1652 for_each_rxq(vi, i, rxq) { 1653 if (ifp->if_capenable & IFCAP_LRO) 1654 rxq->iq.flags |= IQ_LRO_ENABLED; 1655 else 1656 rxq->iq.flags &= ~IQ_LRO_ENABLED; 1657 } 1658#endif 1659 } 1660#ifdef TCP_OFFLOAD 1661 if (mask & IFCAP_TOE) { 1662 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 1663 1664 rc = toe_capability(vi, enable); 1665 if (rc != 0) 1666 goto fail; 1667 1668 ifp->if_capenable ^= mask; 1669 } 1670#endif 1671 if (mask & IFCAP_VLAN_HWTAGGING) { 1672 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1673 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1674 rc = update_mac_settings(ifp, XGMAC_VLANEX); 1675 } 1676 if (mask & IFCAP_VLAN_MTU) { 1677 ifp->if_capenable ^= IFCAP_VLAN_MTU; 1678 1679 /* Need to find out how to disable auto-mtu-inflation */ 1680 } 1681 if (mask & IFCAP_VLAN_HWTSO) 1682 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1683 if (mask & IFCAP_VLAN_HWCSUM) 1684 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1685 1686#ifdef VLAN_CAPABILITIES 1687 VLAN_CAPABILITIES(ifp); 1688#endif 1689fail: 1690 end_synchronized_op(sc, 0); 1691 break; 1692 1693 case SIOCSIFMEDIA: 1694 case SIOCGIFMEDIA: 1695 case SIOCGIFXMEDIA: 1696 ifmedia_ioctl(ifp, ifr, &vi->media, cmd); 1697 break; 1698 1699 case SIOCGI2C: { 1700 struct ifi2creq i2c; 1701 1702 rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 1703 if (rc != 0) 1704 break; 1705 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 1706 rc = EPERM; 1707 break; 1708 } 1709 if (i2c.len > sizeof(i2c.data)) { 1710 rc = EINVAL; 1711 break; 1712 } 1713 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 1714 if (rc) 1715 return (rc); 1716 rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr, 1717 i2c.offset, i2c.len, &i2c.data[0]); 1718 end_synchronized_op(sc, 0); 1719 if (rc == 0) 1720 rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 1721 break; 1722 } 1723 1724 default: 1725 rc = ether_ioctl(ifp, cmd, data); 1726 } 1727 1728 return (rc); 1729} 1730 1731static int 1732cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 1733{ 1734 struct vi_info *vi = ifp->if_softc; 1735 struct port_info *pi = vi->pi; 1736 struct adapter *sc = pi->adapter; 1737 struct sge_txq *txq; 1738 void *items[1]; 1739 int rc; 1740 1741 M_ASSERTPKTHDR(m); 1742 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 1743 1744 if (__predict_false(pi->link_cfg.link_ok == 0)) { 1745 m_freem(m); 1746 return (ENETDOWN); 1747 } 1748 1749 rc = parse_pkt(sc, &m); 1750 if (__predict_false(rc != 0)) { 1751 MPASS(m == NULL); /* was freed already */ 1752 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 1753 return (rc); 1754 } 1755 1756 /* Select a txq. */ 1757 txq = &sc->sge.txq[vi->first_txq]; 1758 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 1759 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 1760 vi->rsrv_noflowq); 1761 1762 items[0] = m; 1763 rc = mp_ring_enqueue(txq->r, items, 1, 4096); 1764 if (__predict_false(rc != 0)) 1765 m_freem(m); 1766 1767 return (rc); 1768} 1769 1770static void 1771cxgbe_qflush(struct ifnet *ifp) 1772{ 1773 struct vi_info *vi = ifp->if_softc; 1774 struct sge_txq *txq; 1775 int i; 1776 1777 /* queues do not exist if !VI_INIT_DONE. */ 1778 if (vi->flags & VI_INIT_DONE) { 1779 for_each_txq(vi, i, txq) { 1780 TXQ_LOCK(txq); 1781 txq->eq.flags &= ~EQ_ENABLED; 1782 TXQ_UNLOCK(txq); 1783 while (!mp_ring_is_idle(txq->r)) { 1784 mp_ring_check_drainage(txq->r, 0); 1785 pause("qflush", 1); 1786 } 1787 } 1788 } 1789 if_qflush(ifp); 1790} 1791 1792static int 1793cxgbe_media_change(struct ifnet *ifp) 1794{ 1795 struct vi_info *vi = ifp->if_softc; 1796 1797 device_printf(vi->dev, "%s unimplemented.\n", __func__); 1798 1799 return (EOPNOTSUPP); 1800} 1801 1802static void 1803cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1804{ 1805 struct vi_info *vi = ifp->if_softc; 1806 struct port_info *pi = vi->pi; 1807 struct ifmedia_entry *cur; 1808 int speed = pi->link_cfg.speed; 1809 1810 cur = vi->media.ifm_cur; 1811 1812 ifmr->ifm_status = IFM_AVALID; 1813 if (!pi->link_cfg.link_ok) 1814 return; 1815 1816 ifmr->ifm_status |= IFM_ACTIVE; 1817 1818 /* active and current will differ iff current media is autoselect. */ 1819 if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 1820 return; 1821 1822 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 1823 if (speed == 10000) 1824 ifmr->ifm_active |= IFM_10G_T; 1825 else if (speed == 1000) 1826 ifmr->ifm_active |= IFM_1000_T; 1827 else if (speed == 100) 1828 ifmr->ifm_active |= IFM_100_TX; 1829 else if (speed == 10) 1830 ifmr->ifm_active |= IFM_10_T; 1831 else 1832 KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 1833 speed)); 1834} 1835 1836static int 1837vcxgbe_probe(device_t dev) 1838{ 1839 char buf[128]; 1840 struct vi_info *vi = device_get_softc(dev); 1841 1842 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 1843 vi - vi->pi->vi); 1844 device_set_desc_copy(dev, buf); 1845 1846 return (BUS_PROBE_DEFAULT); 1847} 1848 1849static int 1850vcxgbe_attach(device_t dev) 1851{ 1852 struct vi_info *vi; 1853 struct port_info *pi; 1854 struct adapter *sc; 1855 int func, index, rc; 1856 u32 param, val; 1857 1858 vi = device_get_softc(dev); 1859 pi = vi->pi; 1860 sc = pi->adapter; 1861 1862 index = vi - pi->vi; 1863 KASSERT(index < nitems(vi_mac_funcs), 1864 ("%s: VI %s doesn't have a MAC func", __func__, 1865 device_get_nameunit(dev))); 1866 func = vi_mac_funcs[index]; 1867 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 1868 vi->hw_addr, &vi->rss_size, func, 0); 1869 if (rc < 0) { 1870 device_printf(dev, "Failed to allocate virtual interface " 1871 "for port %d: %d\n", pi->port_id, -rc); 1872 return (-rc); 1873 } 1874 vi->viid = rc; 1875 if (chip_id(sc) <= CHELSIO_T5) 1876 vi->smt_idx = (rc & 0x7f) << 1; 1877 else 1878 vi->smt_idx = (rc & 0x7f); 1879 1880 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 1881 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 1882 V_FW_PARAMS_PARAM_YZ(vi->viid); 1883 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1884 if (rc) 1885 vi->rss_base = 0xffff; 1886 else { 1887 /* MPASS((val >> 16) == rss_size); */ 1888 vi->rss_base = val & 0xffff; 1889 } 1890 1891 rc = cxgbe_vi_attach(dev, vi); 1892 if (rc) { 1893 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1894 return (rc); 1895 } 1896 return (0); 1897} 1898 1899static int 1900vcxgbe_detach(device_t dev) 1901{ 1902 struct vi_info *vi; 1903 struct adapter *sc; 1904 1905 vi = device_get_softc(dev); 1906 sc = vi->pi->adapter; 1907 1908 doom_vi(sc, vi); 1909 1910 cxgbe_vi_detach(vi); 1911 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 1912 1913 end_synchronized_op(sc, 0); 1914 1915 return (0); 1916} 1917 1918void 1919t4_fatal_err(struct adapter *sc) 1920{ 1921 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 1922 t4_intr_disable(sc); 1923 log(LOG_EMERG, "%s: encountered fatal error, adapter stopped.\n", 1924 device_get_nameunit(sc->dev)); 1925} 1926 1927void 1928t4_add_adapter(struct adapter *sc) 1929{ 1930 sx_xlock(&t4_list_lock); 1931 SLIST_INSERT_HEAD(&t4_list, sc, link); 1932 sx_xunlock(&t4_list_lock); 1933} 1934 1935int 1936t4_map_bars_0_and_4(struct adapter *sc) 1937{ 1938 sc->regs_rid = PCIR_BAR(0); 1939 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1940 &sc->regs_rid, RF_ACTIVE); 1941 if (sc->regs_res == NULL) { 1942 device_printf(sc->dev, "cannot map registers.\n"); 1943 return (ENXIO); 1944 } 1945 sc->bt = rman_get_bustag(sc->regs_res); 1946 sc->bh = rman_get_bushandle(sc->regs_res); 1947 sc->mmio_len = rman_get_size(sc->regs_res); 1948 setbit(&sc->doorbells, DOORBELL_KDB); 1949 1950 sc->msix_rid = PCIR_BAR(4); 1951 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1952 &sc->msix_rid, RF_ACTIVE); 1953 if (sc->msix_res == NULL) { 1954 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 1955 return (ENXIO); 1956 } 1957 1958 return (0); 1959} 1960 1961int 1962t4_map_bar_2(struct adapter *sc) 1963{ 1964 1965 /* 1966 * T4: only iWARP driver uses the userspace doorbells. There is no need 1967 * to map it if RDMA is disabled. 1968 */ 1969 if (is_t4(sc) && sc->rdmacaps == 0) 1970 return (0); 1971 1972 sc->udbs_rid = PCIR_BAR(2); 1973 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 1974 &sc->udbs_rid, RF_ACTIVE); 1975 if (sc->udbs_res == NULL) { 1976 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 1977 return (ENXIO); 1978 } 1979 sc->udbs_base = rman_get_virtual(sc->udbs_res); 1980 1981 if (chip_id(sc) >= CHELSIO_T5) { 1982 setbit(&sc->doorbells, DOORBELL_UDB); 1983#if defined(__i386__) || defined(__amd64__) 1984 if (t5_write_combine) { 1985 int rc, mode; 1986 1987 /* 1988 * Enable write combining on BAR2. This is the 1989 * userspace doorbell BAR and is split into 128B 1990 * (UDBS_SEG_SIZE) doorbell regions, each associated 1991 * with an egress queue. The first 64B has the doorbell 1992 * and the second 64B can be used to submit a tx work 1993 * request with an implicit doorbell. 1994 */ 1995 1996 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 1997 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 1998 if (rc == 0) { 1999 clrbit(&sc->doorbells, DOORBELL_UDB); 2000 setbit(&sc->doorbells, DOORBELL_WCWR); 2001 setbit(&sc->doorbells, DOORBELL_UDBWC); 2002 } else { 2003 device_printf(sc->dev, 2004 "couldn't enable write combining: %d\n", 2005 rc); 2006 } 2007 2008 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2009 t4_write_reg(sc, A_SGE_STAT_CFG, 2010 V_STATSOURCE_T5(7) | mode); 2011 } 2012#endif 2013 } 2014 2015 return (0); 2016} 2017 2018struct memwin_init { 2019 uint32_t base; 2020 uint32_t aperture; 2021}; 2022 2023static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2024 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2025 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2026 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2027}; 2028 2029static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2030 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2031 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2032 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2033}; 2034 2035static void 2036setup_memwin(struct adapter *sc) 2037{ 2038 const struct memwin_init *mw_init; 2039 struct memwin *mw; 2040 int i; 2041 uint32_t bar0; 2042 2043 if (is_t4(sc)) { 2044 /* 2045 * Read low 32b of bar0 indirectly via the hardware backdoor 2046 * mechanism. Works from within PCI passthrough environments 2047 * too, where rman_get_start() can return a different value. We 2048 * need to program the T4 memory window decoders with the actual 2049 * addresses that will be coming across the PCIe link. 2050 */ 2051 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2052 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2053 2054 mw_init = &t4_memwin[0]; 2055 } else { 2056 /* T5+ use the relative offset inside the PCIe BAR */ 2057 bar0 = 0; 2058 2059 mw_init = &t5_memwin[0]; 2060 } 2061 2062 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2063 rw_init(&mw->mw_lock, "memory window access"); 2064 mw->mw_base = mw_init->base; 2065 mw->mw_aperture = mw_init->aperture; 2066 mw->mw_curpos = 0; 2067 t4_write_reg(sc, 2068 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2069 (mw->mw_base + bar0) | V_BIR(0) | 2070 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2071 rw_wlock(&mw->mw_lock); 2072 position_memwin(sc, i, 0); 2073 rw_wunlock(&mw->mw_lock); 2074 } 2075 2076 /* flush */ 2077 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2078} 2079 2080/* 2081 * Positions the memory window at the given address in the card's address space. 2082 * There are some alignment requirements and the actual position may be at an 2083 * address prior to the requested address. mw->mw_curpos always has the actual 2084 * position of the window. 2085 */ 2086static void 2087position_memwin(struct adapter *sc, int idx, uint32_t addr) 2088{ 2089 struct memwin *mw; 2090 uint32_t pf; 2091 uint32_t reg; 2092 2093 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2094 mw = &sc->memwin[idx]; 2095 rw_assert(&mw->mw_lock, RA_WLOCKED); 2096 2097 if (is_t4(sc)) { 2098 pf = 0; 2099 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2100 } else { 2101 pf = V_PFNUM(sc->pf); 2102 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2103 } 2104 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2105 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2106 t4_read_reg(sc, reg); /* flush */ 2107} 2108 2109static int 2110rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2111 int len, int rw) 2112{ 2113 struct memwin *mw; 2114 uint32_t mw_end, v; 2115 2116 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2117 2118 /* Memory can only be accessed in naturally aligned 4 byte units */ 2119 if (addr & 3 || len & 3 || len <= 0) 2120 return (EINVAL); 2121 2122 mw = &sc->memwin[idx]; 2123 while (len > 0) { 2124 rw_rlock(&mw->mw_lock); 2125 mw_end = mw->mw_curpos + mw->mw_aperture; 2126 if (addr >= mw_end || addr < mw->mw_curpos) { 2127 /* Will need to reposition the window */ 2128 if (!rw_try_upgrade(&mw->mw_lock)) { 2129 rw_runlock(&mw->mw_lock); 2130 rw_wlock(&mw->mw_lock); 2131 } 2132 rw_assert(&mw->mw_lock, RA_WLOCKED); 2133 position_memwin(sc, idx, addr); 2134 rw_downgrade(&mw->mw_lock); 2135 mw_end = mw->mw_curpos + mw->mw_aperture; 2136 } 2137 rw_assert(&mw->mw_lock, RA_RLOCKED); 2138 while (addr < mw_end && len > 0) { 2139 if (rw == 0) { 2140 v = t4_read_reg(sc, mw->mw_base + addr - 2141 mw->mw_curpos); 2142 *val++ = le32toh(v); 2143 } else { 2144 v = *val++; 2145 t4_write_reg(sc, mw->mw_base + addr - 2146 mw->mw_curpos, htole32(v));; 2147 } 2148 addr += 4; 2149 len -= 4; 2150 } 2151 rw_runlock(&mw->mw_lock); 2152 } 2153 2154 return (0); 2155} 2156 2157static inline int 2158read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2159 int len) 2160{ 2161 2162 return (rw_via_memwin(sc, idx, addr, val, len, 0)); 2163} 2164 2165static inline int 2166write_via_memwin(struct adapter *sc, int idx, uint32_t addr, 2167 const uint32_t *val, int len) 2168{ 2169 2170 return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1)); 2171} 2172 2173static int 2174t4_range_cmp(const void *a, const void *b) 2175{ 2176 return ((const struct t4_range *)a)->start - 2177 ((const struct t4_range *)b)->start; 2178} 2179 2180/* 2181 * Verify that the memory range specified by the addr/len pair is valid within 2182 * the card's address space. 2183 */ 2184static int 2185validate_mem_range(struct adapter *sc, uint32_t addr, int len) 2186{ 2187 struct t4_range mem_ranges[4], *r, *next; 2188 uint32_t em, addr_len; 2189 int i, n, remaining; 2190 2191 /* Memory can only be accessed in naturally aligned 4 byte units */ 2192 if (addr & 3 || len & 3 || len <= 0) 2193 return (EINVAL); 2194 2195 /* Enabled memories */ 2196 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2197 2198 r = &mem_ranges[0]; 2199 n = 0; 2200 bzero(r, sizeof(mem_ranges)); 2201 if (em & F_EDRAM0_ENABLE) { 2202 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2203 r->size = G_EDRAM0_SIZE(addr_len) << 20; 2204 if (r->size > 0) { 2205 r->start = G_EDRAM0_BASE(addr_len) << 20; 2206 if (addr >= r->start && 2207 addr + len <= r->start + r->size) 2208 return (0); 2209 r++; 2210 n++; 2211 } 2212 } 2213 if (em & F_EDRAM1_ENABLE) { 2214 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2215 r->size = G_EDRAM1_SIZE(addr_len) << 20; 2216 if (r->size > 0) { 2217 r->start = G_EDRAM1_BASE(addr_len) << 20; 2218 if (addr >= r->start && 2219 addr + len <= r->start + r->size) 2220 return (0); 2221 r++; 2222 n++; 2223 } 2224 } 2225 if (em & F_EXT_MEM_ENABLE) { 2226 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2227 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 2228 if (r->size > 0) { 2229 r->start = G_EXT_MEM_BASE(addr_len) << 20; 2230 if (addr >= r->start && 2231 addr + len <= r->start + r->size) 2232 return (0); 2233 r++; 2234 n++; 2235 } 2236 } 2237 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 2238 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2239 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 2240 if (r->size > 0) { 2241 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 2242 if (addr >= r->start && 2243 addr + len <= r->start + r->size) 2244 return (0); 2245 r++; 2246 n++; 2247 } 2248 } 2249 MPASS(n <= nitems(mem_ranges)); 2250 2251 if (n > 1) { 2252 /* Sort and merge the ranges. */ 2253 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 2254 2255 /* Start from index 0 and examine the next n - 1 entries. */ 2256 r = &mem_ranges[0]; 2257 for (remaining = n - 1; remaining > 0; remaining--, r++) { 2258 2259 MPASS(r->size > 0); /* r is a valid entry. */ 2260 next = r + 1; 2261 MPASS(next->size > 0); /* and so is the next one. */ 2262 2263 while (r->start + r->size >= next->start) { 2264 /* Merge the next one into the current entry. */ 2265 r->size = max(r->start + r->size, 2266 next->start + next->size) - r->start; 2267 n--; /* One fewer entry in total. */ 2268 if (--remaining == 0) 2269 goto done; /* short circuit */ 2270 next++; 2271 } 2272 if (next != r + 1) { 2273 /* 2274 * Some entries were merged into r and next 2275 * points to the first valid entry that couldn't 2276 * be merged. 2277 */ 2278 MPASS(next->size > 0); /* must be valid */ 2279 memcpy(r + 1, next, remaining * sizeof(*r)); 2280#ifdef INVARIANTS 2281 /* 2282 * This so that the foo->size assertion in the 2283 * next iteration of the loop do the right 2284 * thing for entries that were pulled up and are 2285 * no longer valid. 2286 */ 2287 MPASS(n < nitems(mem_ranges)); 2288 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 2289 sizeof(struct t4_range)); 2290#endif 2291 } 2292 } 2293done: 2294 /* Done merging the ranges. */ 2295 MPASS(n > 0); 2296 r = &mem_ranges[0]; 2297 for (i = 0; i < n; i++, r++) { 2298 if (addr >= r->start && 2299 addr + len <= r->start + r->size) 2300 return (0); 2301 } 2302 } 2303 2304 return (EFAULT); 2305} 2306 2307static int 2308fwmtype_to_hwmtype(int mtype) 2309{ 2310 2311 switch (mtype) { 2312 case FW_MEMTYPE_EDC0: 2313 return (MEM_EDC0); 2314 case FW_MEMTYPE_EDC1: 2315 return (MEM_EDC1); 2316 case FW_MEMTYPE_EXTMEM: 2317 return (MEM_MC0); 2318 case FW_MEMTYPE_EXTMEM1: 2319 return (MEM_MC1); 2320 default: 2321 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 2322 } 2323} 2324 2325/* 2326 * Verify that the memory range specified by the memtype/offset/len pair is 2327 * valid and lies entirely within the memtype specified. The global address of 2328 * the start of the range is returned in addr. 2329 */ 2330static int 2331validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 2332 uint32_t *addr) 2333{ 2334 uint32_t em, addr_len, maddr; 2335 2336 /* Memory can only be accessed in naturally aligned 4 byte units */ 2337 if (off & 3 || len & 3 || len == 0) 2338 return (EINVAL); 2339 2340 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 2341 switch (fwmtype_to_hwmtype(mtype)) { 2342 case MEM_EDC0: 2343 if (!(em & F_EDRAM0_ENABLE)) 2344 return (EINVAL); 2345 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 2346 maddr = G_EDRAM0_BASE(addr_len) << 20; 2347 break; 2348 case MEM_EDC1: 2349 if (!(em & F_EDRAM1_ENABLE)) 2350 return (EINVAL); 2351 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 2352 maddr = G_EDRAM1_BASE(addr_len) << 20; 2353 break; 2354 case MEM_MC: 2355 if (!(em & F_EXT_MEM_ENABLE)) 2356 return (EINVAL); 2357 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 2358 maddr = G_EXT_MEM_BASE(addr_len) << 20; 2359 break; 2360 case MEM_MC1: 2361 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 2362 return (EINVAL); 2363 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 2364 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 2365 break; 2366 default: 2367 return (EINVAL); 2368 } 2369 2370 *addr = maddr + off; /* global address */ 2371 return (validate_mem_range(sc, *addr, len)); 2372} 2373 2374static int 2375fixup_devlog_params(struct adapter *sc) 2376{ 2377 struct devlog_params *dparams = &sc->params.devlog; 2378 int rc; 2379 2380 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 2381 dparams->size, &dparams->addr); 2382 2383 return (rc); 2384} 2385 2386static int 2387cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis, 2388 struct intrs_and_queues *iaq) 2389{ 2390 int rc, itype, navail, nrxq10g, nrxq1g, n; 2391 int nofldrxq10g = 0, nofldrxq1g = 0; 2392 2393 bzero(iaq, sizeof(*iaq)); 2394 2395 iaq->ntxq10g = t4_ntxq10g; 2396 iaq->ntxq1g = t4_ntxq1g; 2397 iaq->ntxq_vi = t4_ntxq_vi; 2398 iaq->nrxq10g = nrxq10g = t4_nrxq10g; 2399 iaq->nrxq1g = nrxq1g = t4_nrxq1g; 2400 iaq->nrxq_vi = t4_nrxq_vi; 2401 iaq->rsrv_noflowq = t4_rsrv_noflowq; 2402#ifdef TCP_OFFLOAD 2403 if (is_offload(sc)) { 2404 iaq->nofldtxq10g = t4_nofldtxq10g; 2405 iaq->nofldtxq1g = t4_nofldtxq1g; 2406 iaq->nofldtxq_vi = t4_nofldtxq_vi; 2407 iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g; 2408 iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g; 2409 iaq->nofldrxq_vi = t4_nofldrxq_vi; 2410 } 2411#endif 2412#ifdef DEV_NETMAP 2413 iaq->nnmtxq_vi = t4_nnmtxq_vi; 2414 iaq->nnmrxq_vi = t4_nnmrxq_vi; 2415#endif 2416 2417 for (itype = INTR_MSIX; itype; itype >>= 1) { 2418 2419 if ((itype & t4_intr_types) == 0) 2420 continue; /* not allowed */ 2421 2422 if (itype == INTR_MSIX) 2423 navail = pci_msix_count(sc->dev); 2424 else if (itype == INTR_MSI) 2425 navail = pci_msi_count(sc->dev); 2426 else 2427 navail = 1; 2428restart: 2429 if (navail == 0) 2430 continue; 2431 2432 iaq->intr_type = itype; 2433 iaq->intr_flags_10g = 0; 2434 iaq->intr_flags_1g = 0; 2435 2436 /* 2437 * Best option: an interrupt vector for errors, one for the 2438 * firmware event queue, and one for every rxq (NIC and TOE) of 2439 * every VI. The VIs that support netmap use the same 2440 * interrupts for the NIC rx queues and the netmap rx queues 2441 * because only one set of queues is active at a time. 2442 */ 2443 iaq->nirq = T4_EXTRA_INTR; 2444 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2445 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2446 iaq->nirq += (n10g + n1g) * (num_vis - 1) * 2447 max(iaq->nrxq_vi, iaq->nnmrxq_vi); /* See comment above. */ 2448 iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi; 2449 if (iaq->nirq <= navail && 2450 (itype != INTR_MSI || powerof2(iaq->nirq))) { 2451 iaq->intr_flags_10g = INTR_ALL; 2452 iaq->intr_flags_1g = INTR_ALL; 2453 goto allocate; 2454 } 2455 2456 /* Disable the VIs (and netmap) if there aren't enough intrs */ 2457 if (num_vis > 1) { 2458 device_printf(sc->dev, "virtual interfaces disabled " 2459 "because num_vis=%u with current settings " 2460 "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, " 2461 "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, " 2462 "nnmrxq_vi=%u) would need %u interrupts but " 2463 "only %u are available.\n", num_vis, nrxq10g, 2464 nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi, 2465 iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq, 2466 navail); 2467 num_vis = 1; 2468 iaq->ntxq_vi = iaq->nrxq_vi = 0; 2469 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 2470 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 2471 goto restart; 2472 } 2473 2474 /* 2475 * Second best option: a vector for errors, one for the firmware 2476 * event queue, and vectors for either all the NIC rx queues or 2477 * all the TOE rx queues. The queues that don't get vectors 2478 * will forward their interrupts to those that do. 2479 */ 2480 iaq->nirq = T4_EXTRA_INTR; 2481 if (nrxq10g >= nofldrxq10g) { 2482 iaq->intr_flags_10g = INTR_RXQ; 2483 iaq->nirq += n10g * nrxq10g; 2484 } else { 2485 iaq->intr_flags_10g = INTR_OFLD_RXQ; 2486 iaq->nirq += n10g * nofldrxq10g; 2487 } 2488 if (nrxq1g >= nofldrxq1g) { 2489 iaq->intr_flags_1g = INTR_RXQ; 2490 iaq->nirq += n1g * nrxq1g; 2491 } else { 2492 iaq->intr_flags_1g = INTR_OFLD_RXQ; 2493 iaq->nirq += n1g * nofldrxq1g; 2494 } 2495 if (iaq->nirq <= navail && 2496 (itype != INTR_MSI || powerof2(iaq->nirq))) 2497 goto allocate; 2498 2499 /* 2500 * Next best option: an interrupt vector for errors, one for the 2501 * firmware event queue, and at least one per main-VI. At this 2502 * point we know we'll have to downsize nrxq and/or nofldrxq to 2503 * fit what's available to us. 2504 */ 2505 iaq->nirq = T4_EXTRA_INTR; 2506 iaq->nirq += n10g + n1g; 2507 if (iaq->nirq <= navail) { 2508 int leftover = navail - iaq->nirq; 2509 2510 if (n10g > 0) { 2511 int target = max(nrxq10g, nofldrxq10g); 2512 2513 iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ? 2514 INTR_RXQ : INTR_OFLD_RXQ; 2515 2516 n = 1; 2517 while (n < target && leftover >= n10g) { 2518 leftover -= n10g; 2519 iaq->nirq += n10g; 2520 n++; 2521 } 2522 iaq->nrxq10g = min(n, nrxq10g); 2523#ifdef TCP_OFFLOAD 2524 iaq->nofldrxq10g = min(n, nofldrxq10g); 2525#endif 2526 } 2527 2528 if (n1g > 0) { 2529 int target = max(nrxq1g, nofldrxq1g); 2530 2531 iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ? 2532 INTR_RXQ : INTR_OFLD_RXQ; 2533 2534 n = 1; 2535 while (n < target && leftover >= n1g) { 2536 leftover -= n1g; 2537 iaq->nirq += n1g; 2538 n++; 2539 } 2540 iaq->nrxq1g = min(n, nrxq1g); 2541#ifdef TCP_OFFLOAD 2542 iaq->nofldrxq1g = min(n, nofldrxq1g); 2543#endif 2544 } 2545 2546 if (itype != INTR_MSI || powerof2(iaq->nirq)) 2547 goto allocate; 2548 } 2549 2550 /* 2551 * Least desirable option: one interrupt vector for everything. 2552 */ 2553 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2554 iaq->intr_flags_10g = iaq->intr_flags_1g = 0; 2555#ifdef TCP_OFFLOAD 2556 if (is_offload(sc)) 2557 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2558#endif 2559allocate: 2560 navail = iaq->nirq; 2561 rc = 0; 2562 if (itype == INTR_MSIX) 2563 rc = pci_alloc_msix(sc->dev, &navail); 2564 else if (itype == INTR_MSI) 2565 rc = pci_alloc_msi(sc->dev, &navail); 2566 2567 if (rc == 0) { 2568 if (navail == iaq->nirq) 2569 return (0); 2570 2571 /* 2572 * Didn't get the number requested. Use whatever number 2573 * the kernel is willing to allocate (it's in navail). 2574 */ 2575 device_printf(sc->dev, "fewer vectors than requested, " 2576 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 2577 itype, iaq->nirq, navail); 2578 pci_release_msi(sc->dev); 2579 goto restart; 2580 } 2581 2582 device_printf(sc->dev, 2583 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 2584 itype, rc, iaq->nirq, navail); 2585 } 2586 2587 device_printf(sc->dev, 2588 "failed to find a usable interrupt type. " 2589 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 2590 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 2591 2592 return (ENXIO); 2593} 2594 2595#define FW_VERSION(chip) ( \ 2596 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 2597 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 2598 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 2599 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 2600#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 2601 2602struct fw_info { 2603 uint8_t chip; 2604 char *kld_name; 2605 char *fw_mod_name; 2606 struct fw_hdr fw_hdr; /* XXX: waste of space, need a sparse struct */ 2607} fw_info[] = { 2608 { 2609 .chip = CHELSIO_T4, 2610 .kld_name = "t4fw_cfg", 2611 .fw_mod_name = "t4fw", 2612 .fw_hdr = { 2613 .chip = FW_HDR_CHIP_T4, 2614 .fw_ver = htobe32_const(FW_VERSION(T4)), 2615 .intfver_nic = FW_INTFVER(T4, NIC), 2616 .intfver_vnic = FW_INTFVER(T4, VNIC), 2617 .intfver_ofld = FW_INTFVER(T4, OFLD), 2618 .intfver_ri = FW_INTFVER(T4, RI), 2619 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 2620 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 2621 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 2622 .intfver_fcoe = FW_INTFVER(T4, FCOE), 2623 }, 2624 }, { 2625 .chip = CHELSIO_T5, 2626 .kld_name = "t5fw_cfg", 2627 .fw_mod_name = "t5fw", 2628 .fw_hdr = { 2629 .chip = FW_HDR_CHIP_T5, 2630 .fw_ver = htobe32_const(FW_VERSION(T5)), 2631 .intfver_nic = FW_INTFVER(T5, NIC), 2632 .intfver_vnic = FW_INTFVER(T5, VNIC), 2633 .intfver_ofld = FW_INTFVER(T5, OFLD), 2634 .intfver_ri = FW_INTFVER(T5, RI), 2635 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 2636 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 2637 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 2638 .intfver_fcoe = FW_INTFVER(T5, FCOE), 2639 }, 2640 }, { 2641 .chip = CHELSIO_T6, 2642 .kld_name = "t6fw_cfg", 2643 .fw_mod_name = "t6fw", 2644 .fw_hdr = { 2645 .chip = FW_HDR_CHIP_T6, 2646 .fw_ver = htobe32_const(FW_VERSION(T6)), 2647 .intfver_nic = FW_INTFVER(T6, NIC), 2648 .intfver_vnic = FW_INTFVER(T6, VNIC), 2649 .intfver_ofld = FW_INTFVER(T6, OFLD), 2650 .intfver_ri = FW_INTFVER(T6, RI), 2651 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 2652 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 2653 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 2654 .intfver_fcoe = FW_INTFVER(T6, FCOE), 2655 }, 2656 } 2657}; 2658 2659static struct fw_info * 2660find_fw_info(int chip) 2661{ 2662 int i; 2663 2664 for (i = 0; i < nitems(fw_info); i++) { 2665 if (fw_info[i].chip == chip) 2666 return (&fw_info[i]); 2667 } 2668 return (NULL); 2669} 2670 2671/* 2672 * Is the given firmware API compatible with the one the driver was compiled 2673 * with? 2674 */ 2675static int 2676fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 2677{ 2678 2679 /* short circuit if it's the exact same firmware version */ 2680 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 2681 return (1); 2682 2683 /* 2684 * XXX: Is this too conservative? Perhaps I should limit this to the 2685 * features that are supported in the driver. 2686 */ 2687#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 2688 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 2689 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 2690 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 2691 return (1); 2692#undef SAME_INTF 2693 2694 return (0); 2695} 2696 2697/* 2698 * The firmware in the KLD is usable, but should it be installed? This routine 2699 * explains itself in detail if it indicates the KLD firmware should be 2700 * installed. 2701 */ 2702static int 2703should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c) 2704{ 2705 const char *reason; 2706 2707 if (!card_fw_usable) { 2708 reason = "incompatible or unusable"; 2709 goto install; 2710 } 2711 2712 if (k > c) { 2713 reason = "older than the version bundled with this driver"; 2714 goto install; 2715 } 2716 2717 if (t4_fw_install == 2 && k != c) { 2718 reason = "different than the version bundled with this driver"; 2719 goto install; 2720 } 2721 2722 return (0); 2723 2724install: 2725 if (t4_fw_install == 0) { 2726 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2727 "but the driver is prohibited from installing a different " 2728 "firmware on the card.\n", 2729 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2730 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 2731 2732 return (0); 2733 } 2734 2735 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 2736 "installing firmware %u.%u.%u.%u on card.\n", 2737 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2738 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 2739 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2740 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2741 2742 return (1); 2743} 2744/* 2745 * Establish contact with the firmware and determine if we are the master driver 2746 * or not, and whether we are responsible for chip initialization. 2747 */ 2748static int 2749prep_firmware(struct adapter *sc) 2750{ 2751 const struct firmware *fw = NULL, *default_cfg; 2752 int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1; 2753 enum dev_state state; 2754 struct fw_info *fw_info; 2755 struct fw_hdr *card_fw; /* fw on the card */ 2756 const struct fw_hdr *kld_fw; /* fw in the KLD */ 2757 const struct fw_hdr *drv_fw; /* fw header the driver was compiled 2758 against */ 2759 2760 /* Contact firmware. */ 2761 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 2762 if (rc < 0 || state == DEV_STATE_ERR) { 2763 rc = -rc; 2764 device_printf(sc->dev, 2765 "failed to connect to the firmware: %d, %d.\n", rc, state); 2766 return (rc); 2767 } 2768 pf = rc; 2769 if (pf == sc->mbox) 2770 sc->flags |= MASTER_PF; 2771 else if (state == DEV_STATE_UNINIT) { 2772 /* 2773 * We didn't get to be the master so we definitely won't be 2774 * configuring the chip. It's a bug if someone else hasn't 2775 * configured it already. 2776 */ 2777 device_printf(sc->dev, "couldn't be master(%d), " 2778 "device not already initialized either(%d).\n", rc, state); 2779 return (EDOOFUS); 2780 } 2781 2782 /* This is the firmware whose headers the driver was compiled against */ 2783 fw_info = find_fw_info(chip_id(sc)); 2784 if (fw_info == NULL) { 2785 device_printf(sc->dev, 2786 "unable to look up firmware information for chip %d.\n", 2787 chip_id(sc)); 2788 return (EINVAL); 2789 } 2790 drv_fw = &fw_info->fw_hdr; 2791 2792 /* 2793 * The firmware KLD contains many modules. The KLD name is also the 2794 * name of the module that contains the default config file. 2795 */ 2796 default_cfg = firmware_get(fw_info->kld_name); 2797 2798 /* Read the header of the firmware on the card */ 2799 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 2800 rc = -t4_read_flash(sc, FLASH_FW_START, 2801 sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1); 2802 if (rc == 0) 2803 card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw); 2804 else { 2805 device_printf(sc->dev, 2806 "Unable to read card's firmware header: %d\n", rc); 2807 card_fw_usable = 0; 2808 } 2809 2810 /* This is the firmware in the KLD */ 2811 fw = firmware_get(fw_info->fw_mod_name); 2812 if (fw != NULL) { 2813 kld_fw = (const void *)fw->data; 2814 kld_fw_usable = fw_compatible(drv_fw, kld_fw); 2815 } else { 2816 kld_fw = NULL; 2817 kld_fw_usable = 0; 2818 } 2819 2820 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 2821 (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) { 2822 /* 2823 * Common case: the firmware on the card is an exact match and 2824 * the KLD is an exact match too, or the KLD is 2825 * absent/incompatible. Note that t4_fw_install = 2 is ignored 2826 * here -- use cxgbetool loadfw if you want to reinstall the 2827 * same firmware as the one on the card. 2828 */ 2829 } else if (kld_fw_usable && state == DEV_STATE_UNINIT && 2830 should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver), 2831 be32toh(card_fw->fw_ver))) { 2832 2833 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 2834 if (rc != 0) { 2835 device_printf(sc->dev, 2836 "failed to install firmware: %d\n", rc); 2837 goto done; 2838 } 2839 2840 /* Installed successfully, update the cached header too. */ 2841 memcpy(card_fw, kld_fw, sizeof(*card_fw)); 2842 card_fw_usable = 1; 2843 need_fw_reset = 0; /* already reset as part of load_fw */ 2844 } 2845 2846 if (!card_fw_usable) { 2847 uint32_t d, c, k; 2848 2849 d = ntohl(drv_fw->fw_ver); 2850 c = ntohl(card_fw->fw_ver); 2851 k = kld_fw ? ntohl(kld_fw->fw_ver) : 0; 2852 2853 device_printf(sc->dev, "Cannot find a usable firmware: " 2854 "fw_install %d, chip state %d, " 2855 "driver compiled with %d.%d.%d.%d, " 2856 "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n", 2857 t4_fw_install, state, 2858 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 2859 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d), 2860 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 2861 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), 2862 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 2863 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k)); 2864 rc = EINVAL; 2865 goto done; 2866 } 2867 2868 /* Reset device */ 2869 if (need_fw_reset && 2870 (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) { 2871 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 2872 if (rc != ETIMEDOUT && rc != EIO) 2873 t4_fw_bye(sc, sc->mbox); 2874 goto done; 2875 } 2876 sc->flags |= FW_OK; 2877 2878 rc = get_params__pre_init(sc); 2879 if (rc != 0) 2880 goto done; /* error message displayed already */ 2881 2882 /* Partition adapter resources as specified in the config file. */ 2883 if (state == DEV_STATE_UNINIT) { 2884 2885 KASSERT(sc->flags & MASTER_PF, 2886 ("%s: trying to change chip settings when not master.", 2887 __func__)); 2888 2889 rc = partition_resources(sc, default_cfg, fw_info->kld_name); 2890 if (rc != 0) 2891 goto done; /* error message displayed already */ 2892 2893 t4_tweak_chip_settings(sc); 2894 2895 /* get basic stuff going */ 2896 rc = -t4_fw_initialize(sc, sc->mbox); 2897 if (rc != 0) { 2898 device_printf(sc->dev, "fw init failed: %d.\n", rc); 2899 goto done; 2900 } 2901 } else { 2902 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf); 2903 sc->cfcsum = 0; 2904 } 2905 2906done: 2907 free(card_fw, M_CXGBE); 2908 if (fw != NULL) 2909 firmware_put(fw, FIRMWARE_UNLOAD); 2910 if (default_cfg != NULL) 2911 firmware_put(default_cfg, FIRMWARE_UNLOAD); 2912 2913 return (rc); 2914} 2915 2916#define FW_PARAM_DEV(param) \ 2917 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 2918 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 2919#define FW_PARAM_PFVF(param) \ 2920 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 2921 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 2922 2923/* 2924 * Partition chip resources for use between various PFs, VFs, etc. 2925 */ 2926static int 2927partition_resources(struct adapter *sc, const struct firmware *default_cfg, 2928 const char *name_prefix) 2929{ 2930 const struct firmware *cfg = NULL; 2931 int rc = 0; 2932 struct fw_caps_config_cmd caps; 2933 uint32_t mtype, moff, finicsum, cfcsum; 2934 2935 /* 2936 * Figure out what configuration file to use. Pick the default config 2937 * file for the card if the user hasn't specified one explicitly. 2938 */ 2939 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file); 2940 if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 2941 /* Card specific overrides go here. */ 2942 if (pci_get_device(sc->dev) == 0x440a) 2943 snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF); 2944 if (is_fpga(sc)) 2945 snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF); 2946 } 2947 2948 /* 2949 * We need to load another module if the profile is anything except 2950 * "default" or "flash". 2951 */ 2952 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 && 2953 strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2954 char s[32]; 2955 2956 snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file); 2957 cfg = firmware_get(s); 2958 if (cfg == NULL) { 2959 if (default_cfg != NULL) { 2960 device_printf(sc->dev, 2961 "unable to load module \"%s\" for " 2962 "configuration profile \"%s\", will use " 2963 "the default config file instead.\n", 2964 s, sc->cfg_file); 2965 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2966 "%s", DEFAULT_CF); 2967 } else { 2968 device_printf(sc->dev, 2969 "unable to load module \"%s\" for " 2970 "configuration profile \"%s\", will use " 2971 "the config file on the card's flash " 2972 "instead.\n", s, sc->cfg_file); 2973 snprintf(sc->cfg_file, sizeof(sc->cfg_file), 2974 "%s", FLASH_CF); 2975 } 2976 } 2977 } 2978 2979 if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 && 2980 default_cfg == NULL) { 2981 device_printf(sc->dev, 2982 "default config file not available, will use the config " 2983 "file on the card's flash instead.\n"); 2984 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF); 2985 } 2986 2987 if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) { 2988 u_int cflen; 2989 const uint32_t *cfdata; 2990 uint32_t param, val, addr; 2991 2992 KASSERT(cfg != NULL || default_cfg != NULL, 2993 ("%s: no config to upload", __func__)); 2994 2995 /* 2996 * Ask the firmware where it wants us to upload the config file. 2997 */ 2998 param = FW_PARAM_DEV(CF); 2999 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3000 if (rc != 0) { 3001 /* No support for config file? Shouldn't happen. */ 3002 device_printf(sc->dev, 3003 "failed to query config file location: %d.\n", rc); 3004 goto done; 3005 } 3006 mtype = G_FW_PARAMS_PARAM_Y(val); 3007 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 3008 3009 /* 3010 * XXX: sheer laziness. We deliberately added 4 bytes of 3011 * useless stuffing/comments at the end of the config file so 3012 * it's ok to simply throw away the last remaining bytes when 3013 * the config file is not an exact multiple of 4. This also 3014 * helps with the validate_mt_off_len check. 3015 */ 3016 if (cfg != NULL) { 3017 cflen = cfg->datasize & ~3; 3018 cfdata = cfg->data; 3019 } else { 3020 cflen = default_cfg->datasize & ~3; 3021 cfdata = default_cfg->data; 3022 } 3023 3024 if (cflen > FLASH_CFG_MAX_SIZE) { 3025 device_printf(sc->dev, 3026 "config file too long (%d, max allowed is %d). " 3027 "Will try to use the config on the card, if any.\n", 3028 cflen, FLASH_CFG_MAX_SIZE); 3029 goto use_config_on_flash; 3030 } 3031 3032 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3033 if (rc != 0) { 3034 device_printf(sc->dev, 3035 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 3036 "Will try to use the config on the card, if any.\n", 3037 __func__, mtype, moff, cflen, rc); 3038 goto use_config_on_flash; 3039 } 3040 write_via_memwin(sc, 2, addr, cfdata, cflen); 3041 } else { 3042use_config_on_flash: 3043 mtype = FW_MEMTYPE_FLASH; 3044 moff = t4_flash_cfg_addr(sc); 3045 } 3046 3047 bzero(&caps, sizeof(caps)); 3048 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3049 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3050 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 3051 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3052 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps)); 3053 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3054 if (rc != 0) { 3055 device_printf(sc->dev, 3056 "failed to pre-process config file: %d " 3057 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 3058 goto done; 3059 } 3060 3061 finicsum = be32toh(caps.finicsum); 3062 cfcsum = be32toh(caps.cfcsum); 3063 if (finicsum != cfcsum) { 3064 device_printf(sc->dev, 3065 "WARNING: config file checksum mismatch: %08x %08x\n", 3066 finicsum, cfcsum); 3067 } 3068 sc->cfcsum = cfcsum; 3069 3070#define LIMIT_CAPS(x) do { \ 3071 caps.x &= htobe16(t4_##x##_allowed); \ 3072} while (0) 3073 3074 /* 3075 * Let the firmware know what features will (not) be used so it can tune 3076 * things accordingly. 3077 */ 3078 LIMIT_CAPS(nbmcaps); 3079 LIMIT_CAPS(linkcaps); 3080 LIMIT_CAPS(switchcaps); 3081 LIMIT_CAPS(niccaps); 3082 LIMIT_CAPS(toecaps); 3083 LIMIT_CAPS(rdmacaps); 3084 LIMIT_CAPS(cryptocaps); 3085 LIMIT_CAPS(iscsicaps); 3086 LIMIT_CAPS(fcoecaps); 3087#undef LIMIT_CAPS 3088 3089 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3090 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 3091 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3092 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 3093 if (rc != 0) { 3094 device_printf(sc->dev, 3095 "failed to process config file: %d.\n", rc); 3096 } 3097done: 3098 if (cfg != NULL) 3099 firmware_put(cfg, FIRMWARE_UNLOAD); 3100 return (rc); 3101} 3102 3103/* 3104 * Retrieve parameters that are needed (or nice to have) very early. 3105 */ 3106static int 3107get_params__pre_init(struct adapter *sc) 3108{ 3109 int rc; 3110 uint32_t param[2], val[2]; 3111 3112 t4_get_version_info(sc); 3113 3114 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 3115 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 3116 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 3117 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 3118 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 3119 3120 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 3121 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 3122 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 3123 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 3124 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 3125 3126 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 3127 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 3128 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 3129 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 3130 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 3131 3132 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 3133 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 3134 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 3135 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 3136 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 3137 3138 param[0] = FW_PARAM_DEV(PORTVEC); 3139 param[1] = FW_PARAM_DEV(CCLK); 3140 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3141 if (rc != 0) { 3142 device_printf(sc->dev, 3143 "failed to query parameters (pre_init): %d.\n", rc); 3144 return (rc); 3145 } 3146 3147 sc->params.portvec = val[0]; 3148 sc->params.nports = bitcount32(val[0]); 3149 sc->params.vpd.cclk = val[1]; 3150 3151 /* Read device log parameters. */ 3152 rc = -t4_init_devlog_params(sc, 1); 3153 if (rc == 0) 3154 fixup_devlog_params(sc); 3155 else { 3156 device_printf(sc->dev, 3157 "failed to get devlog parameters: %d.\n", rc); 3158 rc = 0; /* devlog isn't critical for device operation */ 3159 } 3160 3161 return (rc); 3162} 3163 3164/* 3165 * Retrieve various parameters that are of interest to the driver. The device 3166 * has been initialized by the firmware at this point. 3167 */ 3168static int 3169get_params__post_init(struct adapter *sc) 3170{ 3171 int rc; 3172 uint32_t param[7], val[7]; 3173 struct fw_caps_config_cmd caps; 3174 3175 param[0] = FW_PARAM_PFVF(IQFLINT_START); 3176 param[1] = FW_PARAM_PFVF(EQ_START); 3177 param[2] = FW_PARAM_PFVF(FILTER_START); 3178 param[3] = FW_PARAM_PFVF(FILTER_END); 3179 param[4] = FW_PARAM_PFVF(L2T_START); 3180 param[5] = FW_PARAM_PFVF(L2T_END); 3181 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3182 if (rc != 0) { 3183 device_printf(sc->dev, 3184 "failed to query parameters (post_init): %d.\n", rc); 3185 return (rc); 3186 } 3187 3188 sc->sge.iq_start = val[0]; 3189 sc->sge.eq_start = val[1]; 3190 sc->tids.ftid_base = val[2]; 3191 sc->tids.nftids = val[3] - val[2] + 1; 3192 sc->params.ftid_min = val[2]; 3193 sc->params.ftid_max = val[3]; 3194 sc->vres.l2t.start = val[4]; 3195 sc->vres.l2t.size = val[5] - val[4] + 1; 3196 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 3197 ("%s: L2 table size (%u) larger than expected (%u)", 3198 __func__, sc->vres.l2t.size, L2T_SIZE)); 3199 3200 /* get capabilites */ 3201 bzero(&caps, sizeof(caps)); 3202 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3203 F_FW_CMD_REQUEST | F_FW_CMD_READ); 3204 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 3205 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 3206 if (rc != 0) { 3207 device_printf(sc->dev, 3208 "failed to get card capabilities: %d.\n", rc); 3209 return (rc); 3210 } 3211 3212#define READ_CAPS(x) do { \ 3213 sc->x = htobe16(caps.x); \ 3214} while (0) 3215 READ_CAPS(nbmcaps); 3216 READ_CAPS(linkcaps); 3217 READ_CAPS(switchcaps); 3218 READ_CAPS(niccaps); 3219 READ_CAPS(toecaps); 3220 READ_CAPS(rdmacaps); 3221 READ_CAPS(cryptocaps); 3222 READ_CAPS(iscsicaps); 3223 READ_CAPS(fcoecaps); 3224 3225 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 3226 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 3227 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 3228 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3229 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 3230 if (rc != 0) { 3231 device_printf(sc->dev, 3232 "failed to query NIC parameters: %d.\n", rc); 3233 return (rc); 3234 } 3235 sc->tids.etid_base = val[0]; 3236 sc->params.etid_min = val[0]; 3237 sc->tids.netids = val[1] - val[0] + 1; 3238 sc->params.netids = sc->tids.netids; 3239 sc->params.eo_wr_cred = val[2]; 3240 sc->params.ethoffload = 1; 3241 } 3242 3243 if (sc->toecaps) { 3244 /* query offload-related parameters */ 3245 param[0] = FW_PARAM_DEV(NTID); 3246 param[1] = FW_PARAM_PFVF(SERVER_START); 3247 param[2] = FW_PARAM_PFVF(SERVER_END); 3248 param[3] = FW_PARAM_PFVF(TDDP_START); 3249 param[4] = FW_PARAM_PFVF(TDDP_END); 3250 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3251 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3252 if (rc != 0) { 3253 device_printf(sc->dev, 3254 "failed to query TOE parameters: %d.\n", rc); 3255 return (rc); 3256 } 3257 sc->tids.ntids = val[0]; 3258 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 3259 sc->tids.stid_base = val[1]; 3260 sc->tids.nstids = val[2] - val[1] + 1; 3261 sc->vres.ddp.start = val[3]; 3262 sc->vres.ddp.size = val[4] - val[3] + 1; 3263 sc->params.ofldq_wr_cred = val[5]; 3264 sc->params.offload = 1; 3265 } 3266 if (sc->rdmacaps) { 3267 param[0] = FW_PARAM_PFVF(STAG_START); 3268 param[1] = FW_PARAM_PFVF(STAG_END); 3269 param[2] = FW_PARAM_PFVF(RQ_START); 3270 param[3] = FW_PARAM_PFVF(RQ_END); 3271 param[4] = FW_PARAM_PFVF(PBL_START); 3272 param[5] = FW_PARAM_PFVF(PBL_END); 3273 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3274 if (rc != 0) { 3275 device_printf(sc->dev, 3276 "failed to query RDMA parameters(1): %d.\n", rc); 3277 return (rc); 3278 } 3279 sc->vres.stag.start = val[0]; 3280 sc->vres.stag.size = val[1] - val[0] + 1; 3281 sc->vres.rq.start = val[2]; 3282 sc->vres.rq.size = val[3] - val[2] + 1; 3283 sc->vres.pbl.start = val[4]; 3284 sc->vres.pbl.size = val[5] - val[4] + 1; 3285 3286 param[0] = FW_PARAM_PFVF(SQRQ_START); 3287 param[1] = FW_PARAM_PFVF(SQRQ_END); 3288 param[2] = FW_PARAM_PFVF(CQ_START); 3289 param[3] = FW_PARAM_PFVF(CQ_END); 3290 param[4] = FW_PARAM_PFVF(OCQ_START); 3291 param[5] = FW_PARAM_PFVF(OCQ_END); 3292 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 3293 if (rc != 0) { 3294 device_printf(sc->dev, 3295 "failed to query RDMA parameters(2): %d.\n", rc); 3296 return (rc); 3297 } 3298 sc->vres.qp.start = val[0]; 3299 sc->vres.qp.size = val[1] - val[0] + 1; 3300 sc->vres.cq.start = val[2]; 3301 sc->vres.cq.size = val[3] - val[2] + 1; 3302 sc->vres.ocq.start = val[4]; 3303 sc->vres.ocq.size = val[5] - val[4] + 1; 3304 3305 param[0] = FW_PARAM_PFVF(SRQ_START); 3306 param[1] = FW_PARAM_PFVF(SRQ_END); 3307 param[2] = FW_PARAM_DEV(MAXORDIRD_QP); 3308 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); 3309 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); 3310 if (rc != 0) { 3311 device_printf(sc->dev, 3312 "failed to query RDMA parameters(3): %d.\n", rc); 3313 return (rc); 3314 } 3315 sc->vres.srq.start = val[0]; 3316 sc->vres.srq.size = val[1] - val[0] + 1; 3317 sc->params.max_ordird_qp = val[2]; 3318 sc->params.max_ird_adapter = val[3]; 3319 } 3320 if (sc->iscsicaps) { 3321 param[0] = FW_PARAM_PFVF(ISCSI_START); 3322 param[1] = FW_PARAM_PFVF(ISCSI_END); 3323 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 3324 if (rc != 0) { 3325 device_printf(sc->dev, 3326 "failed to query iSCSI parameters: %d.\n", rc); 3327 return (rc); 3328 } 3329 sc->vres.iscsi.start = val[0]; 3330 sc->vres.iscsi.size = val[1] - val[0] + 1; 3331 } 3332 3333 t4_init_sge_params(sc); 3334 3335 /* 3336 * We've got the params we wanted to query via the firmware. Now grab 3337 * some others directly from the chip. 3338 */ 3339 rc = t4_read_chip_settings(sc); 3340 3341 return (rc); 3342} 3343 3344static int 3345set_params__post_init(struct adapter *sc) 3346{ 3347 uint32_t param, val; 3348 3349 /* ask for encapsulated CPLs */ 3350 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 3351 val = 1; 3352 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3353 3354 return (0); 3355} 3356 3357#undef FW_PARAM_PFVF 3358#undef FW_PARAM_DEV 3359 3360static void 3361t4_set_desc(struct adapter *sc) 3362{ 3363 char buf[128]; 3364 struct adapter_params *p = &sc->params; 3365 3366 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 3367 3368 device_set_desc_copy(sc->dev, buf); 3369} 3370 3371static void 3372build_medialist(struct port_info *pi, struct ifmedia *media) 3373{ 3374 int m; 3375 3376 PORT_LOCK(pi); 3377 3378 ifmedia_removeall(media); 3379 3380 m = IFM_ETHER | IFM_FDX; 3381 3382 switch(pi->port_type) { 3383 case FW_PORT_TYPE_BT_XFI: 3384 case FW_PORT_TYPE_BT_XAUI: 3385 ifmedia_add(media, m | IFM_10G_T, 0, NULL); 3386 /* fall through */ 3387 3388 case FW_PORT_TYPE_BT_SGMII: 3389 ifmedia_add(media, m | IFM_1000_T, 0, NULL); 3390 ifmedia_add(media, m | IFM_100_TX, 0, NULL); 3391 ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); 3392 ifmedia_set(media, IFM_ETHER | IFM_AUTO); 3393 break; 3394 3395 case FW_PORT_TYPE_CX4: 3396 ifmedia_add(media, m | IFM_10G_CX4, 0, NULL); 3397 ifmedia_set(media, m | IFM_10G_CX4); 3398 break; 3399 3400 case FW_PORT_TYPE_QSFP_10G: 3401 case FW_PORT_TYPE_SFP: 3402 case FW_PORT_TYPE_FIBER_XFI: 3403 case FW_PORT_TYPE_FIBER_XAUI: 3404 switch (pi->mod_type) { 3405 3406 case FW_PORT_MOD_TYPE_LR: 3407 ifmedia_add(media, m | IFM_10G_LR, 0, NULL); 3408 ifmedia_set(media, m | IFM_10G_LR); 3409 break; 3410 3411 case FW_PORT_MOD_TYPE_SR: 3412 ifmedia_add(media, m | IFM_10G_SR, 0, NULL); 3413 ifmedia_set(media, m | IFM_10G_SR); 3414 break; 3415 3416 case FW_PORT_MOD_TYPE_LRM: 3417 ifmedia_add(media, m | IFM_10G_LRM, 0, NULL); 3418 ifmedia_set(media, m | IFM_10G_LRM); 3419 break; 3420 3421 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3422 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3423 ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL); 3424 ifmedia_set(media, m | IFM_10G_TWINAX); 3425 break; 3426 3427 case FW_PORT_MOD_TYPE_NONE: 3428 m &= ~IFM_FDX; 3429 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3430 ifmedia_set(media, m | IFM_NONE); 3431 break; 3432 3433 case FW_PORT_MOD_TYPE_NA: 3434 case FW_PORT_MOD_TYPE_ER: 3435 default: 3436 device_printf(pi->dev, 3437 "unknown port_type (%d), mod_type (%d)\n", 3438 pi->port_type, pi->mod_type); 3439 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3440 ifmedia_set(media, m | IFM_UNKNOWN); 3441 break; 3442 } 3443 break; 3444 3445 case FW_PORT_TYPE_CR_QSFP: 3446 case FW_PORT_TYPE_SFP28: 3447 case FW_PORT_TYPE_KR_SFP28: 3448 switch (pi->mod_type) { 3449 3450 case FW_PORT_MOD_TYPE_SR: 3451 ifmedia_add(media, m | IFM_25G_SR, 0, NULL); 3452 ifmedia_set(media, m | IFM_25G_SR); 3453 break; 3454 3455 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3456 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3457 ifmedia_add(media, m | IFM_25G_CR, 0, NULL); 3458 ifmedia_set(media, m | IFM_25G_CR); 3459 break; 3460 3461 case FW_PORT_MOD_TYPE_NONE: 3462 m &= ~IFM_FDX; 3463 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3464 ifmedia_set(media, m | IFM_NONE); 3465 break; 3466 3467 default: 3468 device_printf(pi->dev, 3469 "unknown port_type (%d), mod_type (%d)\n", 3470 pi->port_type, pi->mod_type); 3471 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3472 ifmedia_set(media, m | IFM_UNKNOWN); 3473 break; 3474 } 3475 break; 3476 3477 case FW_PORT_TYPE_QSFP: 3478 switch (pi->mod_type) { 3479 3480 case FW_PORT_MOD_TYPE_LR: 3481 ifmedia_add(media, m | IFM_40G_LR4, 0, NULL); 3482 ifmedia_set(media, m | IFM_40G_LR4); 3483 break; 3484 3485 case FW_PORT_MOD_TYPE_SR: 3486 ifmedia_add(media, m | IFM_40G_SR4, 0, NULL); 3487 ifmedia_set(media, m | IFM_40G_SR4); 3488 break; 3489 3490 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3491 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3492 ifmedia_add(media, m | IFM_40G_CR4, 0, NULL); 3493 ifmedia_set(media, m | IFM_40G_CR4); 3494 break; 3495 3496 case FW_PORT_MOD_TYPE_NONE: 3497 m &= ~IFM_FDX; 3498 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3499 ifmedia_set(media, m | IFM_NONE); 3500 break; 3501 3502 default: 3503 device_printf(pi->dev, 3504 "unknown port_type (%d), mod_type (%d)\n", 3505 pi->port_type, pi->mod_type); 3506 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3507 ifmedia_set(media, m | IFM_UNKNOWN); 3508 break; 3509 } 3510 break; 3511 3512 case FW_PORT_TYPE_KR4_100G: 3513 case FW_PORT_TYPE_CR4_QSFP: 3514 switch (pi->mod_type) { 3515 3516 case FW_PORT_MOD_TYPE_LR: 3517 ifmedia_add(media, m | IFM_100G_LR4, 0, NULL); 3518 ifmedia_set(media, m | IFM_100G_LR4); 3519 break; 3520 3521 case FW_PORT_MOD_TYPE_SR: 3522 ifmedia_add(media, m | IFM_100G_SR4, 0, NULL); 3523 ifmedia_set(media, m | IFM_100G_SR4); 3524 break; 3525 3526 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 3527 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 3528 ifmedia_add(media, m | IFM_100G_CR4, 0, NULL); 3529 ifmedia_set(media, m | IFM_100G_CR4); 3530 break; 3531 3532 case FW_PORT_MOD_TYPE_NONE: 3533 m &= ~IFM_FDX; 3534 ifmedia_add(media, m | IFM_NONE, 0, NULL); 3535 ifmedia_set(media, m | IFM_NONE); 3536 break; 3537 3538 default: 3539 device_printf(pi->dev, 3540 "unknown port_type (%d), mod_type (%d)\n", 3541 pi->port_type, pi->mod_type); 3542 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3543 ifmedia_set(media, m | IFM_UNKNOWN); 3544 break; 3545 } 3546 break; 3547 3548 default: 3549 device_printf(pi->dev, 3550 "unknown port_type (%d), mod_type (%d)\n", pi->port_type, 3551 pi->mod_type); 3552 ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL); 3553 ifmedia_set(media, m | IFM_UNKNOWN); 3554 break; 3555 } 3556 3557 PORT_UNLOCK(pi); 3558} 3559 3560#define FW_MAC_EXACT_CHUNK 7 3561 3562/* 3563 * Program the port's XGMAC based on parameters in ifnet. The caller also 3564 * indicates which parameters should be programmed (the rest are left alone). 3565 */ 3566int 3567update_mac_settings(struct ifnet *ifp, int flags) 3568{ 3569 int rc = 0; 3570 struct vi_info *vi = ifp->if_softc; 3571 struct port_info *pi = vi->pi; 3572 struct adapter *sc = pi->adapter; 3573 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 3574 3575 ASSERT_SYNCHRONIZED_OP(sc); 3576 KASSERT(flags, ("%s: not told what to update.", __func__)); 3577 3578 if (flags & XGMAC_MTU) 3579 mtu = ifp->if_mtu; 3580 3581 if (flags & XGMAC_PROMISC) 3582 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 3583 3584 if (flags & XGMAC_ALLMULTI) 3585 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 3586 3587 if (flags & XGMAC_VLANEX) 3588 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 3589 3590 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 3591 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 3592 allmulti, 1, vlanex, false); 3593 if (rc) { 3594 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 3595 rc); 3596 return (rc); 3597 } 3598 } 3599 3600 if (flags & XGMAC_UCADDR) { 3601 uint8_t ucaddr[ETHER_ADDR_LEN]; 3602 3603 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 3604 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 3605 ucaddr, true, true); 3606 if (rc < 0) { 3607 rc = -rc; 3608 if_printf(ifp, "change_mac failed: %d\n", rc); 3609 return (rc); 3610 } else { 3611 vi->xact_addr_filt = rc; 3612 rc = 0; 3613 } 3614 } 3615 3616 if (flags & XGMAC_MCADDRS) { 3617 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 3618 int del = 1; 3619 uint64_t hash = 0; 3620 struct ifmultiaddr *ifma; 3621 int i = 0, j; 3622 3623 if_maddr_rlock(ifp); 3624 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3625 if (ifma->ifma_addr->sa_family != AF_LINK) 3626 continue; 3627 mcaddr[i] = 3628 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 3629 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 3630 i++; 3631 3632 if (i == FW_MAC_EXACT_CHUNK) { 3633 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 3634 del, i, mcaddr, NULL, &hash, 0); 3635 if (rc < 0) { 3636 rc = -rc; 3637 for (j = 0; j < i; j++) { 3638 if_printf(ifp, 3639 "failed to add mc address" 3640 " %02x:%02x:%02x:" 3641 "%02x:%02x:%02x rc=%d\n", 3642 mcaddr[j][0], mcaddr[j][1], 3643 mcaddr[j][2], mcaddr[j][3], 3644 mcaddr[j][4], mcaddr[j][5], 3645 rc); 3646 } 3647 goto mcfail; 3648 } 3649 del = 0; 3650 i = 0; 3651 } 3652 } 3653 if (i > 0) { 3654 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 3655 mcaddr, NULL, &hash, 0); 3656 if (rc < 0) { 3657 rc = -rc; 3658 for (j = 0; j < i; j++) { 3659 if_printf(ifp, 3660 "failed to add mc address" 3661 " %02x:%02x:%02x:" 3662 "%02x:%02x:%02x rc=%d\n", 3663 mcaddr[j][0], mcaddr[j][1], 3664 mcaddr[j][2], mcaddr[j][3], 3665 mcaddr[j][4], mcaddr[j][5], 3666 rc); 3667 } 3668 goto mcfail; 3669 } 3670 } 3671 3672 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 3673 if (rc != 0) 3674 if_printf(ifp, "failed to set mc address hash: %d", rc); 3675mcfail: 3676 if_maddr_runlock(ifp); 3677 } 3678 3679 return (rc); 3680} 3681 3682/* 3683 * {begin|end}_synchronized_op must be called from the same thread. 3684 */ 3685int 3686begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 3687 char *wmesg) 3688{ 3689 int rc, pri; 3690 3691#ifdef WITNESS 3692 /* the caller thinks it's ok to sleep, but is it really? */ 3693 if (flags & SLEEP_OK) 3694 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 3695 "begin_synchronized_op"); 3696#endif 3697 3698 if (INTR_OK) 3699 pri = PCATCH; 3700 else 3701 pri = 0; 3702 3703 ADAPTER_LOCK(sc); 3704 for (;;) { 3705 3706 if (vi && IS_DOOMED(vi)) { 3707 rc = ENXIO; 3708 goto done; 3709 } 3710 3711 if (!IS_BUSY(sc)) { 3712 rc = 0; 3713 break; 3714 } 3715 3716 if (!(flags & SLEEP_OK)) { 3717 rc = EBUSY; 3718 goto done; 3719 } 3720 3721 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 3722 rc = EINTR; 3723 goto done; 3724 } 3725 } 3726 3727 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 3728 SET_BUSY(sc); 3729#ifdef INVARIANTS 3730 sc->last_op = wmesg; 3731 sc->last_op_thr = curthread; 3732 sc->last_op_flags = flags; 3733#endif 3734 3735done: 3736 if (!(flags & HOLD_LOCK) || rc) 3737 ADAPTER_UNLOCK(sc); 3738 3739 return (rc); 3740} 3741 3742/* 3743 * Tell if_ioctl and if_init that the VI is going away. This is 3744 * special variant of begin_synchronized_op and must be paired with a 3745 * call to end_synchronized_op. 3746 */ 3747void 3748doom_vi(struct adapter *sc, struct vi_info *vi) 3749{ 3750 3751 ADAPTER_LOCK(sc); 3752 SET_DOOMED(vi); 3753 wakeup(&sc->flags); 3754 while (IS_BUSY(sc)) 3755 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 3756 SET_BUSY(sc); 3757#ifdef INVARIANTS 3758 sc->last_op = "t4detach"; 3759 sc->last_op_thr = curthread; 3760 sc->last_op_flags = 0; 3761#endif 3762 ADAPTER_UNLOCK(sc); 3763} 3764 3765/* 3766 * {begin|end}_synchronized_op must be called from the same thread. 3767 */ 3768void 3769end_synchronized_op(struct adapter *sc, int flags) 3770{ 3771 3772 if (flags & LOCK_HELD) 3773 ADAPTER_LOCK_ASSERT_OWNED(sc); 3774 else 3775 ADAPTER_LOCK(sc); 3776 3777 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 3778 CLR_BUSY(sc); 3779 wakeup(&sc->flags); 3780 ADAPTER_UNLOCK(sc); 3781} 3782 3783static int 3784cxgbe_init_synchronized(struct vi_info *vi) 3785{ 3786 struct port_info *pi = vi->pi; 3787 struct adapter *sc = pi->adapter; 3788 struct ifnet *ifp = vi->ifp; 3789 int rc = 0, i; 3790 struct sge_txq *txq; 3791 3792 ASSERT_SYNCHRONIZED_OP(sc); 3793 3794 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3795 return (0); /* already running */ 3796 3797 if (!(sc->flags & FULL_INIT_DONE) && 3798 ((rc = adapter_full_init(sc)) != 0)) 3799 return (rc); /* error message displayed already */ 3800 3801 if (!(vi->flags & VI_INIT_DONE) && 3802 ((rc = vi_full_init(vi)) != 0)) 3803 return (rc); /* error message displayed already */ 3804 3805 rc = update_mac_settings(ifp, XGMAC_ALL); 3806 if (rc) 3807 goto done; /* error message displayed already */ 3808 3809 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 3810 if (rc != 0) { 3811 if_printf(ifp, "enable_vi failed: %d\n", rc); 3812 goto done; 3813 } 3814 3815 /* 3816 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 3817 * if this changes. 3818 */ 3819 3820 for_each_txq(vi, i, txq) { 3821 TXQ_LOCK(txq); 3822 txq->eq.flags |= EQ_ENABLED; 3823 TXQ_UNLOCK(txq); 3824 } 3825 3826 /* 3827 * The first iq of the first port to come up is used for tracing. 3828 */ 3829 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 3830 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 3831 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 3832 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 3833 V_QUEUENUMBER(sc->traceq)); 3834 pi->flags |= HAS_TRACEQ; 3835 } 3836 3837 /* all ok */ 3838 PORT_LOCK(pi); 3839 ifp->if_drv_flags |= IFF_DRV_RUNNING; 3840 pi->up_vis++; 3841 3842 if (pi->nvi > 1 || sc->flags & IS_VF) 3843 callout_reset(&vi->tick, hz, vi_tick, vi); 3844 else 3845 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 3846 PORT_UNLOCK(pi); 3847done: 3848 if (rc != 0) 3849 cxgbe_uninit_synchronized(vi); 3850 3851 return (rc); 3852} 3853 3854/* 3855 * Idempotent. 3856 */ 3857static int 3858cxgbe_uninit_synchronized(struct vi_info *vi) 3859{ 3860 struct port_info *pi = vi->pi; 3861 struct adapter *sc = pi->adapter; 3862 struct ifnet *ifp = vi->ifp; 3863 int rc, i; 3864 struct sge_txq *txq; 3865 3866 ASSERT_SYNCHRONIZED_OP(sc); 3867 3868 if (!(vi->flags & VI_INIT_DONE)) { 3869 KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING), 3870 ("uninited VI is running")); 3871 return (0); 3872 } 3873 3874 /* 3875 * Disable the VI so that all its data in either direction is discarded 3876 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 3877 * tick) intact as the TP can deliver negative advice or data that it's 3878 * holding in its RAM (for an offloaded connection) even after the VI is 3879 * disabled. 3880 */ 3881 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 3882 if (rc) { 3883 if_printf(ifp, "disable_vi failed: %d\n", rc); 3884 return (rc); 3885 } 3886 3887 for_each_txq(vi, i, txq) { 3888 TXQ_LOCK(txq); 3889 txq->eq.flags &= ~EQ_ENABLED; 3890 TXQ_UNLOCK(txq); 3891 } 3892 3893 PORT_LOCK(pi); 3894 if (pi->nvi > 1 || sc->flags & IS_VF) 3895 callout_stop(&vi->tick); 3896 else 3897 callout_stop(&pi->tick); 3898 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3899 PORT_UNLOCK(pi); 3900 return (0); 3901 } 3902 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3903 pi->up_vis--; 3904 if (pi->up_vis > 0) { 3905 PORT_UNLOCK(pi); 3906 return (0); 3907 } 3908 PORT_UNLOCK(pi); 3909 3910 pi->link_cfg.link_ok = 0; 3911 pi->link_cfg.speed = 0; 3912 pi->link_cfg.link_down_rc = 255; 3913 t4_os_link_changed(sc, pi->port_id, 0); 3914 3915 return (0); 3916} 3917 3918/* 3919 * It is ok for this function to fail midway and return right away. t4_detach 3920 * will walk the entire sc->irq list and clean up whatever is valid. 3921 */ 3922int 3923t4_setup_intr_handlers(struct adapter *sc) 3924{ 3925 int rc, rid, p, q, v; 3926 char s[8]; 3927 struct irq *irq; 3928 struct port_info *pi; 3929 struct vi_info *vi; 3930 struct sge *sge = &sc->sge; 3931 struct sge_rxq *rxq; 3932#ifdef TCP_OFFLOAD 3933 struct sge_ofld_rxq *ofld_rxq; 3934#endif 3935#ifdef DEV_NETMAP 3936 struct sge_nm_rxq *nm_rxq; 3937#endif 3938 3939 /* 3940 * Setup interrupts. 3941 */ 3942 irq = &sc->irq[0]; 3943 rid = sc->intr_type == INTR_INTX ? 0 : 1; 3944 if (sc->intr_count == 1) 3945 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 3946 3947 /* Multiple interrupts. */ 3948 if (sc->flags & IS_VF) 3949 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 3950 ("%s: too few intr.", __func__)); 3951 else 3952 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 3953 ("%s: too few intr.", __func__)); 3954 3955 /* The first one is always error intr on PFs */ 3956 if (!(sc->flags & IS_VF)) { 3957 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 3958 if (rc != 0) 3959 return (rc); 3960 irq++; 3961 rid++; 3962 } 3963 3964 /* The second one is always the firmware event queue (first on VFs) */ 3965 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 3966 if (rc != 0) 3967 return (rc); 3968 irq++; 3969 rid++; 3970 3971 for_each_port(sc, p) { 3972 pi = sc->port[p]; 3973 for_each_vi(pi, v, vi) { 3974 vi->first_intr = rid - 1; 3975 3976 if (vi->nnmrxq > 0) { 3977 int n = max(vi->nrxq, vi->nnmrxq); 3978 3979 MPASS(vi->flags & INTR_RXQ); 3980 3981 rxq = &sge->rxq[vi->first_rxq]; 3982#ifdef DEV_NETMAP 3983 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 3984#endif 3985 for (q = 0; q < n; q++) { 3986 snprintf(s, sizeof(s), "%x%c%x", p, 3987 'a' + v, q); 3988 if (q < vi->nrxq) 3989 irq->rxq = rxq++; 3990#ifdef DEV_NETMAP 3991 if (q < vi->nnmrxq) 3992 irq->nm_rxq = nm_rxq++; 3993#endif 3994 rc = t4_alloc_irq(sc, irq, rid, 3995 t4_vi_intr, irq, s); 3996 if (rc != 0) 3997 return (rc); 3998 irq++; 3999 rid++; 4000 vi->nintr++; 4001 } 4002 } else if (vi->flags & INTR_RXQ) { 4003 for_each_rxq(vi, q, rxq) { 4004 snprintf(s, sizeof(s), "%x%c%x", p, 4005 'a' + v, q); 4006 rc = t4_alloc_irq(sc, irq, rid, 4007 t4_intr, rxq, s); 4008 if (rc != 0) 4009 return (rc); 4010 irq++; 4011 rid++; 4012 vi->nintr++; 4013 } 4014 } 4015#ifdef TCP_OFFLOAD 4016 if (vi->flags & INTR_OFLD_RXQ) { 4017 for_each_ofld_rxq(vi, q, ofld_rxq) { 4018 snprintf(s, sizeof(s), "%x%c%x", p, 4019 'A' + v, q); 4020 rc = t4_alloc_irq(sc, irq, rid, 4021 t4_intr, ofld_rxq, s); 4022 if (rc != 0) 4023 return (rc); 4024 irq++; 4025 rid++; 4026 vi->nintr++; 4027 } 4028 } 4029#endif 4030 } 4031 } 4032 MPASS(irq == &sc->irq[sc->intr_count]); 4033 4034 return (0); 4035} 4036 4037int 4038adapter_full_init(struct adapter *sc) 4039{ 4040 int rc, i; 4041 4042 ASSERT_SYNCHRONIZED_OP(sc); 4043 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4044 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 4045 ("%s: FULL_INIT_DONE already", __func__)); 4046 4047 /* 4048 * queues that belong to the adapter (not any particular port). 4049 */ 4050 rc = t4_setup_adapter_queues(sc); 4051 if (rc != 0) 4052 goto done; 4053 4054 for (i = 0; i < nitems(sc->tq); i++) { 4055 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 4056 taskqueue_thread_enqueue, &sc->tq[i]); 4057 if (sc->tq[i] == NULL) { 4058 device_printf(sc->dev, 4059 "failed to allocate task queue %d\n", i); 4060 rc = ENOMEM; 4061 goto done; 4062 } 4063 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 4064 device_get_nameunit(sc->dev), i); 4065 } 4066 4067 if (!(sc->flags & IS_VF)) 4068 t4_intr_enable(sc); 4069 sc->flags |= FULL_INIT_DONE; 4070done: 4071 if (rc != 0) 4072 adapter_full_uninit(sc); 4073 4074 return (rc); 4075} 4076 4077int 4078adapter_full_uninit(struct adapter *sc) 4079{ 4080 int i; 4081 4082 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 4083 4084 t4_teardown_adapter_queues(sc); 4085 4086 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 4087 taskqueue_free(sc->tq[i]); 4088 sc->tq[i] = NULL; 4089 } 4090 4091 sc->flags &= ~FULL_INIT_DONE; 4092 4093 return (0); 4094} 4095 4096#ifdef RSS 4097#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 4098 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 4099 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 4100 RSS_HASHTYPE_RSS_UDP_IPV6) 4101 4102/* Translates kernel hash types to hardware. */ 4103static int 4104hashconfig_to_hashen(int hashconfig) 4105{ 4106 int hashen = 0; 4107 4108 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 4109 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 4110 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 4111 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 4112 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 4113 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4114 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4115 } 4116 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 4117 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 4118 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4119 } 4120 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 4121 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 4122 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 4123 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 4124 4125 return (hashen); 4126} 4127 4128/* Translates hardware hash types to kernel. */ 4129static int 4130hashen_to_hashconfig(int hashen) 4131{ 4132 int hashconfig = 0; 4133 4134 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 4135 /* 4136 * If UDP hashing was enabled it must have been enabled for 4137 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 4138 * enabling any 4-tuple hash is nonsense configuration. 4139 */ 4140 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4141 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 4142 4143 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4144 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 4145 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4146 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 4147 } 4148 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 4149 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 4150 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 4151 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 4152 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 4153 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 4154 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 4155 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 4156 4157 return (hashconfig); 4158} 4159#endif 4160 4161int 4162vi_full_init(struct vi_info *vi) 4163{ 4164 struct adapter *sc = vi->pi->adapter; 4165 struct ifnet *ifp = vi->ifp; 4166 uint16_t *rss; 4167 struct sge_rxq *rxq; 4168 int rc, i, j, hashen; 4169#ifdef RSS 4170 int nbuckets = rss_getnumbuckets(); 4171 int hashconfig = rss_gethashconfig(); 4172 int extra; 4173 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4174 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 4175#endif 4176 4177 ASSERT_SYNCHRONIZED_OP(sc); 4178 KASSERT((vi->flags & VI_INIT_DONE) == 0, 4179 ("%s: VI_INIT_DONE already", __func__)); 4180 4181 sysctl_ctx_init(&vi->ctx); 4182 vi->flags |= VI_SYSCTL_CTX; 4183 4184 /* 4185 * Allocate tx/rx/fl queues for this VI. 4186 */ 4187 rc = t4_setup_vi_queues(vi); 4188 if (rc != 0) 4189 goto done; /* error message displayed already */ 4190 4191 /* 4192 * Setup RSS for this VI. Save a copy of the RSS table for later use. 4193 */ 4194 if (vi->nrxq > vi->rss_size) { 4195 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 4196 "some queues will never receive traffic.\n", vi->nrxq, 4197 vi->rss_size); 4198 } else if (vi->rss_size % vi->nrxq) { 4199 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 4200 "expect uneven traffic distribution.\n", vi->nrxq, 4201 vi->rss_size); 4202 } 4203#ifdef RSS 4204 MPASS(RSS_KEYSIZE == 40); 4205 if (vi->nrxq != nbuckets) { 4206 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 4207 "performance will be impacted.\n", vi->nrxq, nbuckets); 4208 } 4209 4210 rss_getkey((void *)&raw_rss_key[0]); 4211 for (i = 0; i < nitems(rss_key); i++) { 4212 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 4213 } 4214 t4_write_rss_key(sc, &rss_key[0], -1); 4215#endif 4216 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 4217 for (i = 0; i < vi->rss_size;) { 4218#ifdef RSS 4219 j = rss_get_indirection_to_bucket(i); 4220 j %= vi->nrxq; 4221 rxq = &sc->sge.rxq[vi->first_rxq + j]; 4222 rss[i++] = rxq->iq.abs_id; 4223#else 4224 for_each_rxq(vi, j, rxq) { 4225 rss[i++] = rxq->iq.abs_id; 4226 if (i == vi->rss_size) 4227 break; 4228 } 4229#endif 4230 } 4231 4232 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 4233 vi->rss_size); 4234 if (rc != 0) { 4235 if_printf(ifp, "rss_config failed: %d\n", rc); 4236 goto done; 4237 } 4238 4239#ifdef RSS 4240 hashen = hashconfig_to_hashen(hashconfig); 4241 4242 /* 4243 * We may have had to enable some hashes even though the global config 4244 * wants them disabled. This is a potential problem that must be 4245 * reported to the user. 4246 */ 4247 extra = hashen_to_hashconfig(hashen) ^ hashconfig; 4248 4249 /* 4250 * If we consider only the supported hash types, then the enabled hashes 4251 * are a superset of the requested hashes. In other words, there cannot 4252 * be any supported hash that was requested but not enabled, but there 4253 * can be hashes that were not requested but had to be enabled. 4254 */ 4255 extra &= SUPPORTED_RSS_HASHTYPES; 4256 MPASS((extra & hashconfig) == 0); 4257 4258 if (extra) { 4259 if_printf(ifp, 4260 "global RSS config (0x%x) cannot be accomodated.\n", 4261 hashconfig); 4262 } 4263 if (extra & RSS_HASHTYPE_RSS_IPV4) 4264 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 4265 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 4266 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 4267 if (extra & RSS_HASHTYPE_RSS_IPV6) 4268 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 4269 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 4270 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 4271 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 4272 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 4273 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 4274 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 4275#else 4276 hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 4277 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 4278 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 4279 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 4280#endif 4281 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0); 4282 if (rc != 0) { 4283 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 4284 goto done; 4285 } 4286 4287 vi->rss = rss; 4288 vi->flags |= VI_INIT_DONE; 4289done: 4290 if (rc != 0) 4291 vi_full_uninit(vi); 4292 4293 return (rc); 4294} 4295 4296/* 4297 * Idempotent. 4298 */ 4299int 4300vi_full_uninit(struct vi_info *vi) 4301{ 4302 struct port_info *pi = vi->pi; 4303 struct adapter *sc = pi->adapter; 4304 int i; 4305 struct sge_rxq *rxq; 4306 struct sge_txq *txq; 4307#ifdef TCP_OFFLOAD 4308 struct sge_ofld_rxq *ofld_rxq; 4309 struct sge_wrq *ofld_txq; 4310#endif 4311 4312 if (vi->flags & VI_INIT_DONE) { 4313 4314 /* Need to quiesce queues. */ 4315 4316 /* XXX: Only for the first VI? */ 4317 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 4318 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 4319 4320 for_each_txq(vi, i, txq) { 4321 quiesce_txq(sc, txq); 4322 } 4323 4324#ifdef TCP_OFFLOAD 4325 for_each_ofld_txq(vi, i, ofld_txq) { 4326 quiesce_wrq(sc, ofld_txq); 4327 } 4328#endif 4329 4330 for_each_rxq(vi, i, rxq) { 4331 quiesce_iq(sc, &rxq->iq); 4332 quiesce_fl(sc, &rxq->fl); 4333 } 4334 4335#ifdef TCP_OFFLOAD 4336 for_each_ofld_rxq(vi, i, ofld_rxq) { 4337 quiesce_iq(sc, &ofld_rxq->iq); 4338 quiesce_fl(sc, &ofld_rxq->fl); 4339 } 4340#endif 4341 free(vi->rss, M_CXGBE); 4342 free(vi->nm_rss, M_CXGBE); 4343 } 4344 4345 t4_teardown_vi_queues(vi); 4346 vi->flags &= ~VI_INIT_DONE; 4347 4348 return (0); 4349} 4350 4351static void 4352quiesce_txq(struct adapter *sc, struct sge_txq *txq) 4353{ 4354 struct sge_eq *eq = &txq->eq; 4355 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4356 4357 (void) sc; /* unused */ 4358 4359#ifdef INVARIANTS 4360 TXQ_LOCK(txq); 4361 MPASS((eq->flags & EQ_ENABLED) == 0); 4362 TXQ_UNLOCK(txq); 4363#endif 4364 4365 /* Wait for the mp_ring to empty. */ 4366 while (!mp_ring_is_idle(txq->r)) { 4367 mp_ring_check_drainage(txq->r, 0); 4368 pause("rquiesce", 1); 4369 } 4370 4371 /* Then wait for the hardware to finish. */ 4372 while (spg->cidx != htobe16(eq->pidx)) 4373 pause("equiesce", 1); 4374 4375 /* Finally, wait for the driver to reclaim all descriptors. */ 4376 while (eq->cidx != eq->pidx) 4377 pause("dquiesce", 1); 4378} 4379 4380static void 4381quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 4382{ 4383 4384 /* XXXTX */ 4385} 4386 4387static void 4388quiesce_iq(struct adapter *sc, struct sge_iq *iq) 4389{ 4390 (void) sc; /* unused */ 4391 4392 /* Synchronize with the interrupt handler */ 4393 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 4394 pause("iqfree", 1); 4395} 4396 4397static void 4398quiesce_fl(struct adapter *sc, struct sge_fl *fl) 4399{ 4400 mtx_lock(&sc->sfl_lock); 4401 FL_LOCK(fl); 4402 fl->flags |= FL_DOOMED; 4403 FL_UNLOCK(fl); 4404 callout_stop(&sc->sfl_callout); 4405 mtx_unlock(&sc->sfl_lock); 4406 4407 KASSERT((fl->flags & FL_STARVING) == 0, 4408 ("%s: still starving", __func__)); 4409} 4410 4411static int 4412t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 4413 driver_intr_t *handler, void *arg, char *name) 4414{ 4415 int rc; 4416 4417 irq->rid = rid; 4418 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 4419 RF_SHAREABLE | RF_ACTIVE); 4420 if (irq->res == NULL) { 4421 device_printf(sc->dev, 4422 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 4423 return (ENOMEM); 4424 } 4425 4426 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 4427 NULL, handler, arg, &irq->tag); 4428 if (rc != 0) { 4429 device_printf(sc->dev, 4430 "failed to setup interrupt for rid %d, name %s: %d\n", 4431 rid, name, rc); 4432 } else if (name) 4433 bus_describe_intr(sc->dev, irq->res, irq->tag, name); 4434 4435 return (rc); 4436} 4437 4438static int 4439t4_free_irq(struct adapter *sc, struct irq *irq) 4440{ 4441 if (irq->tag) 4442 bus_teardown_intr(sc->dev, irq->res, irq->tag); 4443 if (irq->res) 4444 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 4445 4446 bzero(irq, sizeof(*irq)); 4447 4448 return (0); 4449} 4450 4451static void 4452get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 4453{ 4454 4455 regs->version = chip_id(sc) | chip_rev(sc) << 10; 4456 t4_get_regs(sc, buf, regs->len); 4457} 4458 4459#define A_PL_INDIR_CMD 0x1f8 4460 4461#define S_PL_AUTOINC 31 4462#define M_PL_AUTOINC 0x1U 4463#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 4464#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 4465 4466#define S_PL_VFID 20 4467#define M_PL_VFID 0xffU 4468#define V_PL_VFID(x) ((x) << S_PL_VFID) 4469#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 4470 4471#define S_PL_ADDR 0 4472#define M_PL_ADDR 0xfffffU 4473#define V_PL_ADDR(x) ((x) << S_PL_ADDR) 4474#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 4475 4476#define A_PL_INDIR_DATA 0x1fc 4477 4478static uint64_t 4479read_vf_stat(struct adapter *sc, unsigned int viid, int reg) 4480{ 4481 u32 stats[2]; 4482 4483 mtx_assert(&sc->reg_lock, MA_OWNED); 4484 if (sc->flags & IS_VF) { 4485 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 4486 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 4487 } else { 4488 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4489 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4490 V_PL_ADDR(VF_MPS_REG(reg))); 4491 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 4492 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 4493 } 4494 return (((uint64_t)stats[1]) << 32 | stats[0]); 4495} 4496 4497static void 4498t4_get_vi_stats(struct adapter *sc, unsigned int viid, 4499 struct fw_vi_stats_vf *stats) 4500{ 4501 4502#define GET_STAT(name) \ 4503 read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L) 4504 4505 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 4506 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 4507 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 4508 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 4509 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 4510 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 4511 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 4512 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 4513 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 4514 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 4515 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 4516 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 4517 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 4518 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 4519 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 4520 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 4521 4522#undef GET_STAT 4523} 4524 4525static void 4526t4_clr_vi_stats(struct adapter *sc, unsigned int viid) 4527{ 4528 int reg; 4529 4530 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 4531 V_PL_VFID(G_FW_VIID_VIN(viid)) | 4532 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 4533 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 4534 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 4535 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 4536} 4537 4538static void 4539vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 4540{ 4541 struct ifnet *ifp = vi->ifp; 4542 struct sge_txq *txq; 4543 int i, drops; 4544 struct fw_vi_stats_vf *s = &vi->stats; 4545 struct timeval tv; 4546 const struct timeval interval = {0, 250000}; /* 250ms */ 4547 4548 if (!(vi->flags & VI_INIT_DONE)) 4549 return; 4550 4551 getmicrotime(&tv); 4552 timevalsub(&tv, &interval); 4553 if (timevalcmp(&tv, &vi->last_refreshed, <)) 4554 return; 4555 4556 mtx_lock(&sc->reg_lock); 4557 t4_get_vi_stats(sc, vi->viid, &vi->stats); 4558 4559 ifp->if_ipackets = s->rx_bcast_frames + s->rx_mcast_frames + 4560 s->rx_ucast_frames; 4561 ifp->if_ierrors = s->rx_err_frames; 4562 ifp->if_opackets = s->tx_bcast_frames + s->tx_mcast_frames + 4563 s->tx_ucast_frames + s->tx_offload_frames; 4564 ifp->if_oerrors = s->tx_drop_frames; 4565 ifp->if_ibytes = s->rx_bcast_bytes + s->rx_mcast_bytes + 4566 s->rx_ucast_bytes; 4567 ifp->if_obytes = s->tx_bcast_bytes + s->tx_mcast_bytes + 4568 s->tx_ucast_bytes + s->tx_offload_bytes; 4569 ifp->if_imcasts = s->rx_mcast_frames; 4570 ifp->if_omcasts = s->tx_mcast_frames; 4571 4572 drops = 0; 4573 for_each_txq(vi, i, txq) 4574 drops += counter_u64_fetch(txq->r->drops); 4575 ifp->if_snd.ifq_drops = drops; 4576 4577 getmicrotime(&vi->last_refreshed); 4578 mtx_unlock(&sc->reg_lock); 4579} 4580 4581static void 4582cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 4583{ 4584 struct vi_info *vi = &pi->vi[0]; 4585 struct ifnet *ifp = vi->ifp; 4586 struct sge_txq *txq; 4587 int i, drops; 4588 struct port_stats *s = &pi->stats; 4589 struct timeval tv; 4590 const struct timeval interval = {0, 250000}; /* 250ms */ 4591 4592 getmicrotime(&tv); 4593 timevalsub(&tv, &interval); 4594 if (timevalcmp(&tv, &pi->last_refreshed, <)) 4595 return; 4596 4597 t4_get_port_stats(sc, pi->tx_chan, s); 4598 4599 ifp->if_opackets = s->tx_frames; 4600 ifp->if_ipackets = s->rx_frames; 4601 ifp->if_obytes = s->tx_octets; 4602 ifp->if_ibytes = s->rx_octets; 4603 ifp->if_omcasts = s->tx_mcast_frames; 4604 ifp->if_imcasts = s->rx_mcast_frames; 4605 ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 4606 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 4607 s->rx_trunc3; 4608 for (i = 0; i < sc->chip_params->nchan; i++) { 4609 if (pi->rx_chan_map & (1 << i)) { 4610 uint32_t v; 4611 4612 mtx_lock(&sc->reg_lock); 4613 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 4614 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 4615 mtx_unlock(&sc->reg_lock); 4616 ifp->if_iqdrops += v; 4617 } 4618 } 4619 4620 drops = s->tx_drop; 4621 for_each_txq(vi, i, txq) 4622 drops += counter_u64_fetch(txq->r->drops); 4623 ifp->if_snd.ifq_drops = drops; 4624 4625 ifp->if_oerrors = s->tx_error_frames; 4626 ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long + 4627 s->rx_fcs_err + s->rx_len_err; 4628 4629 getmicrotime(&pi->last_refreshed); 4630} 4631 4632static void 4633cxgbe_tick(void *arg) 4634{ 4635 struct port_info *pi = arg; 4636 struct adapter *sc = pi->adapter; 4637 4638 PORT_LOCK_ASSERT_OWNED(pi); 4639 cxgbe_refresh_stats(sc, pi); 4640 4641 callout_schedule(&pi->tick, hz); 4642} 4643 4644void 4645vi_tick(void *arg) 4646{ 4647 struct vi_info *vi = arg; 4648 struct adapter *sc = vi->pi->adapter; 4649 4650 vi_refresh_stats(sc, vi); 4651 4652 callout_schedule(&vi->tick, hz); 4653} 4654 4655static void 4656cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid) 4657{ 4658 struct ifnet *vlan; 4659 4660 if (arg != ifp || ifp->if_type != IFT_ETHER) 4661 return; 4662 4663 vlan = VLAN_DEVAT(ifp, vid); 4664 VLAN_SETCOOKIE(vlan, ifp); 4665} 4666 4667/* 4668 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 4669 */ 4670static char *caps_decoder[] = { 4671 "\20\001IPMI\002NCSI", /* 0: NBM */ 4672 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 4673 "\20\001INGRESS\002EGRESS", /* 2: switch */ 4674 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 4675 "\006HASHFILTER\007ETHOFLD", 4676 "\20\001TOE", /* 4: TOE */ 4677 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 4678 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 4679 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 4680 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 4681 "\007T10DIF" 4682 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 4683 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 4684 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 4685 "\004PO_INITIATOR\005PO_TARGET", 4686}; 4687 4688void 4689t4_sysctls(struct adapter *sc) 4690{ 4691 struct sysctl_ctx_list *ctx; 4692 struct sysctl_oid *oid; 4693 struct sysctl_oid_list *children, *c0; 4694 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 4695 4696 ctx = device_get_sysctl_ctx(sc->dev); 4697 4698 /* 4699 * dev.t4nex.X. 4700 */ 4701 oid = device_get_sysctl_tree(sc->dev); 4702 c0 = children = SYSCTL_CHILDREN(oid); 4703 4704 sc->sc_do_rxcopy = 1; 4705 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 4706 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 4707 4708 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 4709 sc->params.nports, "# of ports"); 4710 4711 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 4712 CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells, 4713 sysctl_bitfield, "A", "available doorbells"); 4714 4715 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 4716 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 4717 4718 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 4719 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 4720 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 4721 "interrupt holdoff timer values (us)"); 4722 4723 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 4724 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 4725 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 4726 "interrupt holdoff packet counter values"); 4727 4728 t4_sge_sysctls(sc, ctx, children); 4729 4730 sc->lro_timeout = 100; 4731 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 4732 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 4733 4734 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 4735 &sc->debug_flags, 0, "flags to enable runtime debugging"); 4736 4737 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 4738 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 4739 4740 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 4741 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 4742 4743 if (sc->flags & IS_VF) 4744 return; 4745 4746 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 4747 NULL, chip_rev(sc), "chip hardware revision"); 4748 4749 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 4750 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 4751 4752 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 4753 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 4754 4755 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 4756 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 4757 4758 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 4759 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 4760 4761 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 4762 sc->er_version, 0, "expansion ROM version"); 4763 4764 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 4765 sc->bs_version, 0, "bootstrap firmware version"); 4766 4767 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 4768 NULL, sc->params.scfg_vers, "serial config version"); 4769 4770 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 4771 NULL, sc->params.vpd_vers, "VPD version"); 4772 4773 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 4774 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 4775 4776 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 4777 sc->cfcsum, "config file checksum"); 4778 4779#define SYSCTL_CAP(name, n, text) \ 4780 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 4781 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \ 4782 sysctl_bitfield, "A", "available " text " capabilities") 4783 4784 SYSCTL_CAP(nbmcaps, 0, "NBM"); 4785 SYSCTL_CAP(linkcaps, 1, "link"); 4786 SYSCTL_CAP(switchcaps, 2, "switch"); 4787 SYSCTL_CAP(niccaps, 3, "NIC"); 4788 SYSCTL_CAP(toecaps, 4, "TCP offload"); 4789 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 4790 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 4791 SYSCTL_CAP(cryptocaps, 7, "crypto"); 4792 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 4793#undef SYSCTL_CAP 4794 4795 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 4796 NULL, sc->tids.nftids, "number of filters"); 4797 4798 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 4799 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 4800 "chip temperature (in Celsius)"); 4801 4802#ifdef SBUF_DRAIN 4803 /* 4804 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 4805 */ 4806 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 4807 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 4808 "logs and miscellaneous information"); 4809 children = SYSCTL_CHILDREN(oid); 4810 4811 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 4812 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4813 sysctl_cctrl, "A", "congestion control"); 4814 4815 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 4816 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4817 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 4818 4819 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 4820 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 4821 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 4822 4823 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 4824 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 4825 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 4826 4827 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 4828 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 4829 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 4830 4831 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 4832 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 4833 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 4834 4835 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 4836 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 4837 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 4838 4839 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 4840 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4841 chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6, 4842 "A", "CIM logic analyzer"); 4843 4844 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 4845 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4846 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 4847 4848 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 4849 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 4850 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 4851 4852 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 4853 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 4854 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 4855 4856 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 4857 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 4858 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 4859 4860 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 4861 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 4862 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 4863 4864 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 4865 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 4866 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 4867 4868 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 4869 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 4870 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 4871 4872 if (chip_id(sc) > CHELSIO_T4) { 4873 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 4874 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 4875 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 4876 4877 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 4878 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 4879 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 4880 } 4881 4882 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 4883 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4884 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 4885 4886 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 4887 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4888 sysctl_cim_qcfg, "A", "CIM queue configuration"); 4889 4890 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 4891 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4892 sysctl_cpl_stats, "A", "CPL statistics"); 4893 4894 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 4895 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4896 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 4897 4898 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 4899 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4900 sysctl_devlog, "A", "firmware's device log"); 4901 4902 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 4903 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4904 sysctl_fcoe_stats, "A", "FCoE statistics"); 4905 4906 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 4907 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4908 sysctl_hw_sched, "A", "hardware scheduler "); 4909 4910 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 4911 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4912 sysctl_l2t, "A", "hardware L2 table"); 4913 4914 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 4915 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4916 sysctl_lb_stats, "A", "loopback statistics"); 4917 4918 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 4919 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4920 sysctl_meminfo, "A", "memory regions"); 4921 4922 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 4923 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4924 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 4925 "A", "MPS TCAM entries"); 4926 4927 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 4928 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4929 sysctl_path_mtus, "A", "path MTUs"); 4930 4931 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 4932 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4933 sysctl_pm_stats, "A", "PM statistics"); 4934 4935 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 4936 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4937 sysctl_rdma_stats, "A", "RDMA statistics"); 4938 4939 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 4940 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4941 sysctl_tcp_stats, "A", "TCP statistics"); 4942 4943 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 4944 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4945 sysctl_tids, "A", "TID information"); 4946 4947 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 4948 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4949 sysctl_tp_err_stats, "A", "TP error statistics"); 4950 4951 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 4952 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 4953 "TP logic analyzer event capture mask"); 4954 4955 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 4956 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4957 sysctl_tp_la, "A", "TP logic analyzer"); 4958 4959 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 4960 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4961 sysctl_tx_rate, "A", "Tx rate"); 4962 4963 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 4964 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4965 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 4966 4967 if (chip_id(sc) >= CHELSIO_T5) { 4968 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 4969 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 4970 sysctl_wcwr_stats, "A", "write combined work requests"); 4971 } 4972#endif 4973 4974#ifdef TCP_OFFLOAD 4975 if (is_offload(sc)) { 4976 /* 4977 * dev.t4nex.X.toe. 4978 */ 4979 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 4980 NULL, "TOE parameters"); 4981 children = SYSCTL_CHILDREN(oid); 4982 4983 sc->tt.sndbuf = 256 * 1024; 4984 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 4985 &sc->tt.sndbuf, 0, "max hardware send buffer size"); 4986 4987 sc->tt.ddp = 0; 4988 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 4989 &sc->tt.ddp, 0, "DDP allowed"); 4990 4991 sc->tt.indsz = G_INDICATESIZE(t4_read_reg(sc, A_TP_PARA_REG5)); 4992 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "indsz", CTLFLAG_RW, 4993 &sc->tt.indsz, 0, "DDP max indicate size allowed"); 4994 4995 sc->tt.ddp_thres = 4996 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 4997 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW, 4998 &sc->tt.ddp_thres, 0, "DDP threshold"); 4999 5000 sc->tt.rx_coalesce = 1; 5001 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 5002 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 5003 5004 sc->tt.tx_align = 1; 5005 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 5006 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 5007 5008 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 5009 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 5010 "TP timer tick (us)"); 5011 5012 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 5013 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 5014 "TCP timestamp tick (us)"); 5015 5016 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 5017 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 5018 "DACK tick (us)"); 5019 5020 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 5021 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 5022 "IU", "DACK timer (us)"); 5023 5024 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 5025 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 5026 sysctl_tp_timer, "LU", "Retransmit min (us)"); 5027 5028 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 5029 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 5030 sysctl_tp_timer, "LU", "Retransmit max (us)"); 5031 5032 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 5033 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 5034 sysctl_tp_timer, "LU", "Persist timer min (us)"); 5035 5036 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 5037 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 5038 sysctl_tp_timer, "LU", "Persist timer max (us)"); 5039 5040 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 5041 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 5042 sysctl_tp_timer, "LU", "Keepidle idle timer (us)"); 5043 5044 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl", 5045 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 5046 sysctl_tp_timer, "LU", "Keepidle interval (us)"); 5047 5048 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 5049 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 5050 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 5051 5052 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 5053 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 5054 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 5055 } 5056#endif 5057} 5058 5059void 5060vi_sysctls(struct vi_info *vi) 5061{ 5062 struct sysctl_ctx_list *ctx; 5063 struct sysctl_oid *oid; 5064 struct sysctl_oid_list *children; 5065 5066 ctx = device_get_sysctl_ctx(vi->dev); 5067 5068 /* 5069 * dev.v?(cxgbe|cxl).X. 5070 */ 5071 oid = device_get_sysctl_tree(vi->dev); 5072 children = SYSCTL_CHILDREN(oid); 5073 5074 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 5075 vi->viid, "VI identifer"); 5076 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 5077 &vi->nrxq, 0, "# of rx queues"); 5078 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 5079 &vi->ntxq, 0, "# of tx queues"); 5080 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 5081 &vi->first_rxq, 0, "index of first rx queue"); 5082 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 5083 &vi->first_txq, 0, "index of first tx queue"); 5084 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 5085 vi->rss_size, "size of RSS indirection table"); 5086 5087 if (IS_MAIN_VI(vi)) { 5088 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 5089 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 5090 "Reserve queue 0 for non-flowid packets"); 5091 } 5092 5093#ifdef TCP_OFFLOAD 5094 if (vi->nofldrxq != 0) { 5095 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 5096 &vi->nofldrxq, 0, 5097 "# of rx queues for offloaded TCP connections"); 5098 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 5099 &vi->nofldtxq, 0, 5100 "# of tx queues for offloaded TCP connections"); 5101 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 5102 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 5103 "index of first TOE rx queue"); 5104 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 5105 CTLFLAG_RD, &vi->first_ofld_txq, 0, 5106 "index of first TOE tx queue"); 5107 } 5108#endif 5109#ifdef DEV_NETMAP 5110 if (vi->nnmrxq != 0) { 5111 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 5112 &vi->nnmrxq, 0, "# of netmap rx queues"); 5113 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 5114 &vi->nnmtxq, 0, "# of netmap tx queues"); 5115 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 5116 CTLFLAG_RD, &vi->first_nm_rxq, 0, 5117 "index of first netmap rx queue"); 5118 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 5119 CTLFLAG_RD, &vi->first_nm_txq, 0, 5120 "index of first netmap tx queue"); 5121 } 5122#endif 5123 5124 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 5125 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 5126 "holdoff timer index"); 5127 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 5128 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 5129 "holdoff packet counter index"); 5130 5131 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 5132 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 5133 "rx queue size"); 5134 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 5135 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 5136 "tx queue size"); 5137} 5138 5139static void 5140cxgbe_sysctls(struct port_info *pi) 5141{ 5142 struct sysctl_ctx_list *ctx; 5143 struct sysctl_oid *oid; 5144 struct sysctl_oid_list *children, *children2; 5145 struct adapter *sc = pi->adapter; 5146 int i; 5147 char name[16]; 5148 5149 ctx = device_get_sysctl_ctx(pi->dev); 5150 5151 /* 5152 * dev.cxgbe.X. 5153 */ 5154 oid = device_get_sysctl_tree(pi->dev); 5155 children = SYSCTL_CHILDREN(oid); 5156 5157 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 5158 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 5159 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 5160 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 5161 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 5162 "PHY temperature (in Celsius)"); 5163 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 5164 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 5165 "PHY firmware version"); 5166 } 5167 5168 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 5169 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", 5170 "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)"); 5171 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", 5172 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", 5173 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); 5174 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", 5175 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", 5176 "autonegotiation (-1 = not supported)"); 5177 5178 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 5179 port_top_speed(pi), "max speed (in Gbps)"); 5180 5181 if (sc->flags & IS_VF) 5182 return; 5183 5184 /* 5185 * dev.(cxgbe|cxl).X.tc. 5186 */ 5187 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 5188 "Tx scheduler traffic classes"); 5189 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 5190 struct tx_sched_class *tc = &pi->tc[i]; 5191 5192 snprintf(name, sizeof(name), "%d", i); 5193 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 5194 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 5195 "traffic class")); 5196 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD, 5197 &tc->flags, 0, "flags"); 5198 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 5199 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 5200#ifdef SBUF_DRAIN 5201 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 5202 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 5203 sysctl_tc_params, "A", "traffic class parameters"); 5204#endif 5205 } 5206 5207 /* 5208 * dev.cxgbe.X.stats. 5209 */ 5210 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 5211 NULL, "port statistics"); 5212 children = SYSCTL_CHILDREN(oid); 5213 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 5214 &pi->tx_parse_error, 0, 5215 "# of tx packets with invalid length or # of segments"); 5216 5217#define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \ 5218 SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \ 5219 CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \ 5220 sysctl_handle_t4_reg64, "QU", desc) 5221 5222 SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames", 5223 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BYTES_L)); 5224 SYSCTL_ADD_T4_REG64(pi, "tx_frames", "total # of good frames", 5225 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_FRAMES_L)); 5226 SYSCTL_ADD_T4_REG64(pi, "tx_bcast_frames", "# of broadcast frames", 5227 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_BCAST_L)); 5228 SYSCTL_ADD_T4_REG64(pi, "tx_mcast_frames", "# of multicast frames", 5229 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_MCAST_L)); 5230 SYSCTL_ADD_T4_REG64(pi, "tx_ucast_frames", "# of unicast frames", 5231 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_UCAST_L)); 5232 SYSCTL_ADD_T4_REG64(pi, "tx_error_frames", "# of error frames", 5233 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_ERROR_L)); 5234 SYSCTL_ADD_T4_REG64(pi, "tx_frames_64", 5235 "# of tx frames in this range", 5236 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_64B_L)); 5237 SYSCTL_ADD_T4_REG64(pi, "tx_frames_65_127", 5238 "# of tx frames in this range", 5239 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_65B_127B_L)); 5240 SYSCTL_ADD_T4_REG64(pi, "tx_frames_128_255", 5241 "# of tx frames in this range", 5242 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_128B_255B_L)); 5243 SYSCTL_ADD_T4_REG64(pi, "tx_frames_256_511", 5244 "# of tx frames in this range", 5245 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_256B_511B_L)); 5246 SYSCTL_ADD_T4_REG64(pi, "tx_frames_512_1023", 5247 "# of tx frames in this range", 5248 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_512B_1023B_L)); 5249 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1024_1518", 5250 "# of tx frames in this range", 5251 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L)); 5252 SYSCTL_ADD_T4_REG64(pi, "tx_frames_1519_max", 5253 "# of tx frames in this range", 5254 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L)); 5255 SYSCTL_ADD_T4_REG64(pi, "tx_drop", "# of dropped tx frames", 5256 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_DROP_L)); 5257 SYSCTL_ADD_T4_REG64(pi, "tx_pause", "# of pause frames transmitted", 5258 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PAUSE_L)); 5259 SYSCTL_ADD_T4_REG64(pi, "tx_ppp0", "# of PPP prio 0 frames transmitted", 5260 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP0_L)); 5261 SYSCTL_ADD_T4_REG64(pi, "tx_ppp1", "# of PPP prio 1 frames transmitted", 5262 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP1_L)); 5263 SYSCTL_ADD_T4_REG64(pi, "tx_ppp2", "# of PPP prio 2 frames transmitted", 5264 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP2_L)); 5265 SYSCTL_ADD_T4_REG64(pi, "tx_ppp3", "# of PPP prio 3 frames transmitted", 5266 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP3_L)); 5267 SYSCTL_ADD_T4_REG64(pi, "tx_ppp4", "# of PPP prio 4 frames transmitted", 5268 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP4_L)); 5269 SYSCTL_ADD_T4_REG64(pi, "tx_ppp5", "# of PPP prio 5 frames transmitted", 5270 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP5_L)); 5271 SYSCTL_ADD_T4_REG64(pi, "tx_ppp6", "# of PPP prio 6 frames transmitted", 5272 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP6_L)); 5273 SYSCTL_ADD_T4_REG64(pi, "tx_ppp7", "# of PPP prio 7 frames transmitted", 5274 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_TX_PORT_PPP7_L)); 5275 5276 SYSCTL_ADD_T4_REG64(pi, "rx_octets", "# of octets in good frames", 5277 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BYTES_L)); 5278 SYSCTL_ADD_T4_REG64(pi, "rx_frames", "total # of good frames", 5279 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_FRAMES_L)); 5280 SYSCTL_ADD_T4_REG64(pi, "rx_bcast_frames", "# of broadcast frames", 5281 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_BCAST_L)); 5282 SYSCTL_ADD_T4_REG64(pi, "rx_mcast_frames", "# of multicast frames", 5283 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MCAST_L)); 5284 SYSCTL_ADD_T4_REG64(pi, "rx_ucast_frames", "# of unicast frames", 5285 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_UCAST_L)); 5286 SYSCTL_ADD_T4_REG64(pi, "rx_too_long", "# of frames exceeding MTU", 5287 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L)); 5288 SYSCTL_ADD_T4_REG64(pi, "rx_jabber", "# of jabber frames", 5289 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L)); 5290 SYSCTL_ADD_T4_REG64(pi, "rx_fcs_err", 5291 "# of frames received with bad FCS", 5292 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L)); 5293 SYSCTL_ADD_T4_REG64(pi, "rx_len_err", 5294 "# of frames received with length error", 5295 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L)); 5296 SYSCTL_ADD_T4_REG64(pi, "rx_symbol_err", "symbol errors", 5297 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L)); 5298 SYSCTL_ADD_T4_REG64(pi, "rx_runt", "# of short frames received", 5299 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_LESS_64B_L)); 5300 SYSCTL_ADD_T4_REG64(pi, "rx_frames_64", 5301 "# of rx frames in this range", 5302 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_64B_L)); 5303 SYSCTL_ADD_T4_REG64(pi, "rx_frames_65_127", 5304 "# of rx frames in this range", 5305 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_65B_127B_L)); 5306 SYSCTL_ADD_T4_REG64(pi, "rx_frames_128_255", 5307 "# of rx frames in this range", 5308 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_128B_255B_L)); 5309 SYSCTL_ADD_T4_REG64(pi, "rx_frames_256_511", 5310 "# of rx frames in this range", 5311 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_256B_511B_L)); 5312 SYSCTL_ADD_T4_REG64(pi, "rx_frames_512_1023", 5313 "# of rx frames in this range", 5314 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_512B_1023B_L)); 5315 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1024_1518", 5316 "# of rx frames in this range", 5317 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L)); 5318 SYSCTL_ADD_T4_REG64(pi, "rx_frames_1519_max", 5319 "# of rx frames in this range", 5320 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L)); 5321 SYSCTL_ADD_T4_REG64(pi, "rx_pause", "# of pause frames received", 5322 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PAUSE_L)); 5323 SYSCTL_ADD_T4_REG64(pi, "rx_ppp0", "# of PPP prio 0 frames received", 5324 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP0_L)); 5325 SYSCTL_ADD_T4_REG64(pi, "rx_ppp1", "# of PPP prio 1 frames received", 5326 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP1_L)); 5327 SYSCTL_ADD_T4_REG64(pi, "rx_ppp2", "# of PPP prio 2 frames received", 5328 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP2_L)); 5329 SYSCTL_ADD_T4_REG64(pi, "rx_ppp3", "# of PPP prio 3 frames received", 5330 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP3_L)); 5331 SYSCTL_ADD_T4_REG64(pi, "rx_ppp4", "# of PPP prio 4 frames received", 5332 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP4_L)); 5333 SYSCTL_ADD_T4_REG64(pi, "rx_ppp5", "# of PPP prio 5 frames received", 5334 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP5_L)); 5335 SYSCTL_ADD_T4_REG64(pi, "rx_ppp6", "# of PPP prio 6 frames received", 5336 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP6_L)); 5337 SYSCTL_ADD_T4_REG64(pi, "rx_ppp7", "# of PPP prio 7 frames received", 5338 PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_RX_PORT_PPP7_L)); 5339 5340#undef SYSCTL_ADD_T4_REG64 5341 5342#define SYSCTL_ADD_T4_PORTSTAT(name, desc) \ 5343 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 5344 &pi->stats.name, desc) 5345 5346 /* We get these from port_stats and they may be stale by upto 1s */ 5347 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow0, 5348 "# drops due to buffer-group 0 overflows"); 5349 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow1, 5350 "# drops due to buffer-group 1 overflows"); 5351 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow2, 5352 "# drops due to buffer-group 2 overflows"); 5353 SYSCTL_ADD_T4_PORTSTAT(rx_ovflow3, 5354 "# drops due to buffer-group 3 overflows"); 5355 SYSCTL_ADD_T4_PORTSTAT(rx_trunc0, 5356 "# of buffer-group 0 truncated packets"); 5357 SYSCTL_ADD_T4_PORTSTAT(rx_trunc1, 5358 "# of buffer-group 1 truncated packets"); 5359 SYSCTL_ADD_T4_PORTSTAT(rx_trunc2, 5360 "# of buffer-group 2 truncated packets"); 5361 SYSCTL_ADD_T4_PORTSTAT(rx_trunc3, 5362 "# of buffer-group 3 truncated packets"); 5363 5364#undef SYSCTL_ADD_T4_PORTSTAT 5365} 5366 5367static int 5368sysctl_int_array(SYSCTL_HANDLER_ARGS) 5369{ 5370 int rc, *i, space = 0; 5371 struct sbuf sb; 5372 5373 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 5374 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 5375 if (space) 5376 sbuf_printf(&sb, " "); 5377 sbuf_printf(&sb, "%d", *i); 5378 space = 1; 5379 } 5380 sbuf_finish(&sb); 5381 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5382 sbuf_delete(&sb); 5383 return (rc); 5384} 5385 5386static int 5387sysctl_bitfield(SYSCTL_HANDLER_ARGS) 5388{ 5389 int rc; 5390 struct sbuf *sb; 5391 5392 rc = sysctl_wire_old_buffer(req, 0); 5393 if (rc != 0) 5394 return(rc); 5395 5396 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5397 if (sb == NULL) 5398 return (ENOMEM); 5399 5400 sbuf_printf(sb, "%b", (int)arg2, (char *)arg1); 5401 rc = sbuf_finish(sb); 5402 sbuf_delete(sb); 5403 5404 return (rc); 5405} 5406 5407static int 5408sysctl_btphy(SYSCTL_HANDLER_ARGS) 5409{ 5410 struct port_info *pi = arg1; 5411 int op = arg2; 5412 struct adapter *sc = pi->adapter; 5413 u_int v; 5414 int rc; 5415 5416 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 5417 if (rc) 5418 return (rc); 5419 /* XXX: magic numbers */ 5420 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 5421 &v); 5422 end_synchronized_op(sc, 0); 5423 if (rc) 5424 return (rc); 5425 if (op == 0) 5426 v /= 256; 5427 5428 rc = sysctl_handle_int(oidp, &v, 0, req); 5429 return (rc); 5430} 5431 5432static int 5433sysctl_noflowq(SYSCTL_HANDLER_ARGS) 5434{ 5435 struct vi_info *vi = arg1; 5436 int rc, val; 5437 5438 val = vi->rsrv_noflowq; 5439 rc = sysctl_handle_int(oidp, &val, 0, req); 5440 if (rc != 0 || req->newptr == NULL) 5441 return (rc); 5442 5443 if ((val >= 1) && (vi->ntxq > 1)) 5444 vi->rsrv_noflowq = 1; 5445 else 5446 vi->rsrv_noflowq = 0; 5447 5448 return (rc); 5449} 5450 5451static int 5452sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 5453{ 5454 struct vi_info *vi = arg1; 5455 struct adapter *sc = vi->pi->adapter; 5456 int idx, rc, i; 5457 struct sge_rxq *rxq; 5458#ifdef TCP_OFFLOAD 5459 struct sge_ofld_rxq *ofld_rxq; 5460#endif 5461 uint8_t v; 5462 5463 idx = vi->tmr_idx; 5464 5465 rc = sysctl_handle_int(oidp, &idx, 0, req); 5466 if (rc != 0 || req->newptr == NULL) 5467 return (rc); 5468 5469 if (idx < 0 || idx >= SGE_NTIMERS) 5470 return (EINVAL); 5471 5472 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5473 "t4tmr"); 5474 if (rc) 5475 return (rc); 5476 5477 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 5478 for_each_rxq(vi, i, rxq) { 5479#ifdef atomic_store_rel_8 5480 atomic_store_rel_8(&rxq->iq.intr_params, v); 5481#else 5482 rxq->iq.intr_params = v; 5483#endif 5484 } 5485#ifdef TCP_OFFLOAD 5486 for_each_ofld_rxq(vi, i, ofld_rxq) { 5487#ifdef atomic_store_rel_8 5488 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 5489#else 5490 ofld_rxq->iq.intr_params = v; 5491#endif 5492 } 5493#endif 5494 vi->tmr_idx = idx; 5495 5496 end_synchronized_op(sc, LOCK_HELD); 5497 return (0); 5498} 5499 5500static int 5501sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 5502{ 5503 struct vi_info *vi = arg1; 5504 struct adapter *sc = vi->pi->adapter; 5505 int idx, rc; 5506 5507 idx = vi->pktc_idx; 5508 5509 rc = sysctl_handle_int(oidp, &idx, 0, req); 5510 if (rc != 0 || req->newptr == NULL) 5511 return (rc); 5512 5513 if (idx < -1 || idx >= SGE_NCOUNTERS) 5514 return (EINVAL); 5515 5516 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5517 "t4pktc"); 5518 if (rc) 5519 return (rc); 5520 5521 if (vi->flags & VI_INIT_DONE) 5522 rc = EBUSY; /* cannot be changed once the queues are created */ 5523 else 5524 vi->pktc_idx = idx; 5525 5526 end_synchronized_op(sc, LOCK_HELD); 5527 return (rc); 5528} 5529 5530static int 5531sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 5532{ 5533 struct vi_info *vi = arg1; 5534 struct adapter *sc = vi->pi->adapter; 5535 int qsize, rc; 5536 5537 qsize = vi->qsize_rxq; 5538 5539 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5540 if (rc != 0 || req->newptr == NULL) 5541 return (rc); 5542 5543 if (qsize < 128 || (qsize & 7)) 5544 return (EINVAL); 5545 5546 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5547 "t4rxqs"); 5548 if (rc) 5549 return (rc); 5550 5551 if (vi->flags & VI_INIT_DONE) 5552 rc = EBUSY; /* cannot be changed once the queues are created */ 5553 else 5554 vi->qsize_rxq = qsize; 5555 5556 end_synchronized_op(sc, LOCK_HELD); 5557 return (rc); 5558} 5559 5560static int 5561sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 5562{ 5563 struct vi_info *vi = arg1; 5564 struct adapter *sc = vi->pi->adapter; 5565 int qsize, rc; 5566 5567 qsize = vi->qsize_txq; 5568 5569 rc = sysctl_handle_int(oidp, &qsize, 0, req); 5570 if (rc != 0 || req->newptr == NULL) 5571 return (rc); 5572 5573 if (qsize < 128 || qsize > 65536) 5574 return (EINVAL); 5575 5576 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 5577 "t4txqs"); 5578 if (rc) 5579 return (rc); 5580 5581 if (vi->flags & VI_INIT_DONE) 5582 rc = EBUSY; /* cannot be changed once the queues are created */ 5583 else 5584 vi->qsize_txq = qsize; 5585 5586 end_synchronized_op(sc, LOCK_HELD); 5587 return (rc); 5588} 5589 5590static int 5591sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 5592{ 5593 struct port_info *pi = arg1; 5594 struct adapter *sc = pi->adapter; 5595 struct link_config *lc = &pi->link_cfg; 5596 int rc; 5597 5598 if (req->newptr == NULL) { 5599 struct sbuf *sb; 5600 static char *bits = "\20\1PAUSE_RX\2PAUSE_TX"; 5601 5602 rc = sysctl_wire_old_buffer(req, 0); 5603 if (rc != 0) 5604 return(rc); 5605 5606 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5607 if (sb == NULL) 5608 return (ENOMEM); 5609 5610 sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits); 5611 rc = sbuf_finish(sb); 5612 sbuf_delete(sb); 5613 } else { 5614 char s[2]; 5615 int n; 5616 5617 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX)); 5618 s[1] = 0; 5619 5620 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5621 if (rc != 0) 5622 return(rc); 5623 5624 if (s[1] != 0) 5625 return (EINVAL); 5626 if (s[0] < '0' || s[0] > '9') 5627 return (EINVAL); /* not a number */ 5628 n = s[0] - '0'; 5629 if (n & ~(PAUSE_TX | PAUSE_RX)) 5630 return (EINVAL); /* some other bit is set too */ 5631 5632 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5633 "t4PAUSE"); 5634 if (rc) 5635 return (rc); 5636 if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) { 5637 lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX); 5638 lc->requested_fc |= n; 5639 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5640 } 5641 end_synchronized_op(sc, 0); 5642 } 5643 5644 return (rc); 5645} 5646 5647static int 5648sysctl_fec(SYSCTL_HANDLER_ARGS) 5649{ 5650 struct port_info *pi = arg1; 5651 struct adapter *sc = pi->adapter; 5652 struct link_config *lc = &pi->link_cfg; 5653 int rc; 5654 5655 if (req->newptr == NULL) { 5656 struct sbuf *sb; 5657 static char *bits = "\20\1RS\2BASER_RS\3RESERVED"; 5658 5659 rc = sysctl_wire_old_buffer(req, 0); 5660 if (rc != 0) 5661 return(rc); 5662 5663 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 5664 if (sb == NULL) 5665 return (ENOMEM); 5666 5667 sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits); 5668 rc = sbuf_finish(sb); 5669 sbuf_delete(sb); 5670 } else { 5671 char s[2]; 5672 int n; 5673 5674 s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC); 5675 s[1] = 0; 5676 5677 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 5678 if (rc != 0) 5679 return(rc); 5680 5681 if (s[1] != 0) 5682 return (EINVAL); 5683 if (s[0] < '0' || s[0] > '9') 5684 return (EINVAL); /* not a number */ 5685 n = s[0] - '0'; 5686 if (n & ~M_FW_PORT_CAP_FEC) 5687 return (EINVAL); /* some other bit is set too */ 5688 5689 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5690 "t4fec"); 5691 if (rc) 5692 return (rc); 5693 if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) { 5694 lc->requested_fec = n & 5695 G_FW_PORT_CAP_FEC(lc->supported); 5696 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5697 } 5698 end_synchronized_op(sc, 0); 5699 } 5700 5701 return (rc); 5702} 5703 5704static int 5705sysctl_autoneg(SYSCTL_HANDLER_ARGS) 5706{ 5707 struct port_info *pi = arg1; 5708 struct adapter *sc = pi->adapter; 5709 struct link_config *lc = &pi->link_cfg; 5710 int rc, val, old; 5711 5712 if (lc->supported & FW_PORT_CAP_ANEG) 5713 val = lc->autoneg == AUTONEG_ENABLE ? 1 : 0; 5714 else 5715 val = -1; 5716 rc = sysctl_handle_int(oidp, &val, 0, req); 5717 if (rc != 0 || req->newptr == NULL) 5718 return (rc); 5719 if ((lc->supported & FW_PORT_CAP_ANEG) == 0) 5720 return (ENOTSUP); 5721 5722 val = val ? AUTONEG_ENABLE : AUTONEG_DISABLE; 5723 if (lc->autoneg == val) 5724 return (0); /* no change */ 5725 5726 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 5727 "t4aneg"); 5728 if (rc) 5729 return (rc); 5730 old = lc->autoneg; 5731 lc->autoneg = val; 5732 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 5733 if (rc != 0) 5734 lc->autoneg = old; 5735 return (rc); 5736} 5737 5738static int 5739sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 5740{ 5741 struct adapter *sc = arg1; 5742 int reg = arg2; 5743 uint64_t val; 5744 5745 val = t4_read_reg64(sc, reg); 5746 5747 return (sysctl_handle_64(oidp, &val, 0, req)); 5748} 5749 5750static int 5751sysctl_temperature(SYSCTL_HANDLER_ARGS) 5752{ 5753 struct adapter *sc = arg1; 5754 int rc, t; 5755 uint32_t param, val; 5756 5757 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 5758 if (rc) 5759 return (rc); 5760 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5761 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 5762 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 5763 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 5764 end_synchronized_op(sc, 0); 5765 if (rc) 5766 return (rc); 5767 5768 /* unknown is returned as 0 but we display -1 in that case */ 5769 t = val == 0 ? -1 : val; 5770 5771 rc = sysctl_handle_int(oidp, &t, 0, req); 5772 return (rc); 5773} 5774 5775#ifdef SBUF_DRAIN 5776static int 5777sysctl_cctrl(SYSCTL_HANDLER_ARGS) 5778{ 5779 struct adapter *sc = arg1; 5780 struct sbuf *sb; 5781 int rc, i; 5782 uint16_t incr[NMTUS][NCCTRL_WIN]; 5783 static const char *dec_fac[] = { 5784 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 5785 "0.9375" 5786 }; 5787 5788 rc = sysctl_wire_old_buffer(req, 0); 5789 if (rc != 0) 5790 return (rc); 5791 5792 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5793 if (sb == NULL) 5794 return (ENOMEM); 5795 5796 t4_read_cong_tbl(sc, incr); 5797 5798 for (i = 0; i < NCCTRL_WIN; ++i) { 5799 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 5800 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 5801 incr[5][i], incr[6][i], incr[7][i]); 5802 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 5803 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 5804 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 5805 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 5806 } 5807 5808 rc = sbuf_finish(sb); 5809 sbuf_delete(sb); 5810 5811 return (rc); 5812} 5813 5814static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 5815 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 5816 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 5817 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 5818}; 5819 5820static int 5821sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 5822{ 5823 struct adapter *sc = arg1; 5824 struct sbuf *sb; 5825 int rc, i, n, qid = arg2; 5826 uint32_t *buf, *p; 5827 char *qtype; 5828 u_int cim_num_obq = sc->chip_params->cim_num_obq; 5829 5830 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 5831 ("%s: bad qid %d\n", __func__, qid)); 5832 5833 if (qid < CIM_NUM_IBQ) { 5834 /* inbound queue */ 5835 qtype = "IBQ"; 5836 n = 4 * CIM_IBQ_SIZE; 5837 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5838 rc = t4_read_cim_ibq(sc, qid, buf, n); 5839 } else { 5840 /* outbound queue */ 5841 qtype = "OBQ"; 5842 qid -= CIM_NUM_IBQ; 5843 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 5844 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 5845 rc = t4_read_cim_obq(sc, qid, buf, n); 5846 } 5847 5848 if (rc < 0) { 5849 rc = -rc; 5850 goto done; 5851 } 5852 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 5853 5854 rc = sysctl_wire_old_buffer(req, 0); 5855 if (rc != 0) 5856 goto done; 5857 5858 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 5859 if (sb == NULL) { 5860 rc = ENOMEM; 5861 goto done; 5862 } 5863 5864 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 5865 for (i = 0, p = buf; i < n; i += 16, p += 4) 5866 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 5867 p[2], p[3]); 5868 5869 rc = sbuf_finish(sb); 5870 sbuf_delete(sb); 5871done: 5872 free(buf, M_CXGBE); 5873 return (rc); 5874} 5875 5876static int 5877sysctl_cim_la(SYSCTL_HANDLER_ARGS) 5878{ 5879 struct adapter *sc = arg1; 5880 u_int cfg; 5881 struct sbuf *sb; 5882 uint32_t *buf, *p; 5883 int rc; 5884 5885 MPASS(chip_id(sc) <= CHELSIO_T5); 5886 5887 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5888 if (rc != 0) 5889 return (rc); 5890 5891 rc = sysctl_wire_old_buffer(req, 0); 5892 if (rc != 0) 5893 return (rc); 5894 5895 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5896 if (sb == NULL) 5897 return (ENOMEM); 5898 5899 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5900 M_ZERO | M_WAITOK); 5901 5902 rc = -t4_cim_read_la(sc, buf, NULL); 5903 if (rc != 0) 5904 goto done; 5905 5906 sbuf_printf(sb, "Status Data PC%s", 5907 cfg & F_UPDBGLACAPTPCONLY ? "" : 5908 " LS0Stat LS0Addr LS0Data"); 5909 5910 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 5911 if (cfg & F_UPDBGLACAPTPCONLY) { 5912 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 5913 p[6], p[7]); 5914 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 5915 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 5916 p[4] & 0xff, p[5] >> 8); 5917 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 5918 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5919 p[1] & 0xf, p[2] >> 4); 5920 } else { 5921 sbuf_printf(sb, 5922 "\n %02x %x%07x %x%07x %08x %08x " 5923 "%08x%08x%08x%08x", 5924 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 5925 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 5926 p[6], p[7]); 5927 } 5928 } 5929 5930 rc = sbuf_finish(sb); 5931 sbuf_delete(sb); 5932done: 5933 free(buf, M_CXGBE); 5934 return (rc); 5935} 5936 5937static int 5938sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS) 5939{ 5940 struct adapter *sc = arg1; 5941 u_int cfg; 5942 struct sbuf *sb; 5943 uint32_t *buf, *p; 5944 int rc; 5945 5946 MPASS(chip_id(sc) > CHELSIO_T5); 5947 5948 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 5949 if (rc != 0) 5950 return (rc); 5951 5952 rc = sysctl_wire_old_buffer(req, 0); 5953 if (rc != 0) 5954 return (rc); 5955 5956 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 5957 if (sb == NULL) 5958 return (ENOMEM); 5959 5960 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 5961 M_ZERO | M_WAITOK); 5962 5963 rc = -t4_cim_read_la(sc, buf, NULL); 5964 if (rc != 0) 5965 goto done; 5966 5967 sbuf_printf(sb, "Status Inst Data PC%s", 5968 cfg & F_UPDBGLACAPTPCONLY ? "" : 5969 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 5970 5971 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 5972 if (cfg & F_UPDBGLACAPTPCONLY) { 5973 sbuf_printf(sb, "\n %02x %08x %08x %08x", 5974 p[3] & 0xff, p[2], p[1], p[0]); 5975 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 5976 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 5977 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 5978 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 5979 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 5980 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 5981 p[6] >> 16); 5982 } else { 5983 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 5984 "%08x %08x %08x %08x %08x %08x", 5985 (p[9] >> 16) & 0xff, 5986 p[9] & 0xffff, p[8] >> 16, 5987 p[8] & 0xffff, p[7] >> 16, 5988 p[7] & 0xffff, p[6] >> 16, 5989 p[2], p[1], p[0], p[5], p[4], p[3]); 5990 } 5991 } 5992 5993 rc = sbuf_finish(sb); 5994 sbuf_delete(sb); 5995done: 5996 free(buf, M_CXGBE); 5997 return (rc); 5998} 5999 6000static int 6001sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 6002{ 6003 struct adapter *sc = arg1; 6004 u_int i; 6005 struct sbuf *sb; 6006 uint32_t *buf, *p; 6007 int rc; 6008 6009 rc = sysctl_wire_old_buffer(req, 0); 6010 if (rc != 0) 6011 return (rc); 6012 6013 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6014 if (sb == NULL) 6015 return (ENOMEM); 6016 6017 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 6018 M_ZERO | M_WAITOK); 6019 6020 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 6021 p = buf; 6022 6023 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6024 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 6025 p[1], p[0]); 6026 } 6027 6028 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 6029 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 6030 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 6031 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 6032 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 6033 (p[1] >> 2) | ((p[2] & 3) << 30), 6034 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 6035 p[0] & 1); 6036 } 6037 6038 rc = sbuf_finish(sb); 6039 sbuf_delete(sb); 6040 free(buf, M_CXGBE); 6041 return (rc); 6042} 6043 6044static int 6045sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 6046{ 6047 struct adapter *sc = arg1; 6048 u_int i; 6049 struct sbuf *sb; 6050 uint32_t *buf, *p; 6051 int rc; 6052 6053 rc = sysctl_wire_old_buffer(req, 0); 6054 if (rc != 0) 6055 return (rc); 6056 6057 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6058 if (sb == NULL) 6059 return (ENOMEM); 6060 6061 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 6062 M_ZERO | M_WAITOK); 6063 6064 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 6065 p = buf; 6066 6067 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 6068 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6069 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 6070 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 6071 p[4], p[3], p[2], p[1], p[0]); 6072 } 6073 6074 sbuf_printf(sb, "\n\nCntl ID Data"); 6075 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 6076 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 6077 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 6078 } 6079 6080 rc = sbuf_finish(sb); 6081 sbuf_delete(sb); 6082 free(buf, M_CXGBE); 6083 return (rc); 6084} 6085 6086static int 6087sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 6088{ 6089 struct adapter *sc = arg1; 6090 struct sbuf *sb; 6091 int rc, i; 6092 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6093 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 6094 uint16_t thres[CIM_NUM_IBQ]; 6095 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 6096 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 6097 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 6098 6099 cim_num_obq = sc->chip_params->cim_num_obq; 6100 if (is_t4(sc)) { 6101 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 6102 obq_rdaddr = A_UP_OBQ_0_REALADDR; 6103 } else { 6104 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 6105 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 6106 } 6107 nq = CIM_NUM_IBQ + cim_num_obq; 6108 6109 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 6110 if (rc == 0) 6111 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 6112 if (rc != 0) 6113 return (rc); 6114 6115 t4_read_cimq_cfg(sc, base, size, thres); 6116 6117 rc = sysctl_wire_old_buffer(req, 0); 6118 if (rc != 0) 6119 return (rc); 6120 6121 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 6122 if (sb == NULL) 6123 return (ENOMEM); 6124 6125 sbuf_printf(sb, 6126 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 6127 6128 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 6129 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 6130 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 6131 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6132 G_QUEREMFLITS(p[2]) * 16); 6133 for ( ; i < nq; i++, p += 4, wr += 2) 6134 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 6135 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 6136 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 6137 G_QUEREMFLITS(p[2]) * 16); 6138 6139 rc = sbuf_finish(sb); 6140 sbuf_delete(sb); 6141 6142 return (rc); 6143} 6144 6145static int 6146sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 6147{ 6148 struct adapter *sc = arg1; 6149 struct sbuf *sb; 6150 int rc; 6151 struct tp_cpl_stats stats; 6152 6153 rc = sysctl_wire_old_buffer(req, 0); 6154 if (rc != 0) 6155 return (rc); 6156 6157 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6158 if (sb == NULL) 6159 return (ENOMEM); 6160 6161 mtx_lock(&sc->reg_lock); 6162 t4_tp_get_cpl_stats(sc, &stats); 6163 mtx_unlock(&sc->reg_lock); 6164 6165 if (sc->chip_params->nchan > 2) { 6166 sbuf_printf(sb, " channel 0 channel 1" 6167 " channel 2 channel 3"); 6168 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 6169 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 6170 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 6171 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 6172 } else { 6173 sbuf_printf(sb, " channel 0 channel 1"); 6174 sbuf_printf(sb, "\nCPL requests: %10u %10u", 6175 stats.req[0], stats.req[1]); 6176 sbuf_printf(sb, "\nCPL responses: %10u %10u", 6177 stats.rsp[0], stats.rsp[1]); 6178 } 6179 6180 rc = sbuf_finish(sb); 6181 sbuf_delete(sb); 6182 6183 return (rc); 6184} 6185 6186static int 6187sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 6188{ 6189 struct adapter *sc = arg1; 6190 struct sbuf *sb; 6191 int rc; 6192 struct tp_usm_stats stats; 6193 6194 rc = sysctl_wire_old_buffer(req, 0); 6195 if (rc != 0) 6196 return(rc); 6197 6198 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6199 if (sb == NULL) 6200 return (ENOMEM); 6201 6202 t4_get_usm_stats(sc, &stats); 6203 6204 sbuf_printf(sb, "Frames: %u\n", stats.frames); 6205 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 6206 sbuf_printf(sb, "Drops: %u", stats.drops); 6207 6208 rc = sbuf_finish(sb); 6209 sbuf_delete(sb); 6210 6211 return (rc); 6212} 6213 6214static const char * const devlog_level_strings[] = { 6215 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 6216 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 6217 [FW_DEVLOG_LEVEL_ERR] = "ERR", 6218 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 6219 [FW_DEVLOG_LEVEL_INFO] = "INFO", 6220 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 6221}; 6222 6223static const char * const devlog_facility_strings[] = { 6224 [FW_DEVLOG_FACILITY_CORE] = "CORE", 6225 [FW_DEVLOG_FACILITY_CF] = "CF", 6226 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 6227 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 6228 [FW_DEVLOG_FACILITY_RES] = "RES", 6229 [FW_DEVLOG_FACILITY_HW] = "HW", 6230 [FW_DEVLOG_FACILITY_FLR] = "FLR", 6231 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 6232 [FW_DEVLOG_FACILITY_PHY] = "PHY", 6233 [FW_DEVLOG_FACILITY_MAC] = "MAC", 6234 [FW_DEVLOG_FACILITY_PORT] = "PORT", 6235 [FW_DEVLOG_FACILITY_VI] = "VI", 6236 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 6237 [FW_DEVLOG_FACILITY_ACL] = "ACL", 6238 [FW_DEVLOG_FACILITY_TM] = "TM", 6239 [FW_DEVLOG_FACILITY_QFC] = "QFC", 6240 [FW_DEVLOG_FACILITY_DCB] = "DCB", 6241 [FW_DEVLOG_FACILITY_ETH] = "ETH", 6242 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 6243 [FW_DEVLOG_FACILITY_RI] = "RI", 6244 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 6245 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 6246 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 6247 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 6248 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 6249}; 6250 6251static int 6252sysctl_devlog(SYSCTL_HANDLER_ARGS) 6253{ 6254 struct adapter *sc = arg1; 6255 struct devlog_params *dparams = &sc->params.devlog; 6256 struct fw_devlog_e *buf, *e; 6257 int i, j, rc, nentries, first = 0; 6258 struct sbuf *sb; 6259 uint64_t ftstamp = UINT64_MAX; 6260 6261 if (dparams->addr == 0) 6262 return (ENXIO); 6263 6264 buf = malloc(dparams->size, M_CXGBE, M_NOWAIT); 6265 if (buf == NULL) 6266 return (ENOMEM); 6267 6268 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 6269 if (rc != 0) 6270 goto done; 6271 6272 nentries = dparams->size / sizeof(struct fw_devlog_e); 6273 for (i = 0; i < nentries; i++) { 6274 e = &buf[i]; 6275 6276 if (e->timestamp == 0) 6277 break; /* end */ 6278 6279 e->timestamp = be64toh(e->timestamp); 6280 e->seqno = be32toh(e->seqno); 6281 for (j = 0; j < 8; j++) 6282 e->params[j] = be32toh(e->params[j]); 6283 6284 if (e->timestamp < ftstamp) { 6285 ftstamp = e->timestamp; 6286 first = i; 6287 } 6288 } 6289 6290 if (buf[first].timestamp == 0) 6291 goto done; /* nothing in the log */ 6292 6293 rc = sysctl_wire_old_buffer(req, 0); 6294 if (rc != 0) 6295 goto done; 6296 6297 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6298 if (sb == NULL) { 6299 rc = ENOMEM; 6300 goto done; 6301 } 6302 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 6303 "Seq#", "Tstamp", "Level", "Facility", "Message"); 6304 6305 i = first; 6306 do { 6307 e = &buf[i]; 6308 if (e->timestamp == 0) 6309 break; /* end */ 6310 6311 sbuf_printf(sb, "%10d %15ju %8s %8s ", 6312 e->seqno, e->timestamp, 6313 (e->level < nitems(devlog_level_strings) ? 6314 devlog_level_strings[e->level] : "UNKNOWN"), 6315 (e->facility < nitems(devlog_facility_strings) ? 6316 devlog_facility_strings[e->facility] : "UNKNOWN")); 6317 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 6318 e->params[2], e->params[3], e->params[4], 6319 e->params[5], e->params[6], e->params[7]); 6320 6321 if (++i == nentries) 6322 i = 0; 6323 } while (i != first); 6324 6325 rc = sbuf_finish(sb); 6326 sbuf_delete(sb); 6327done: 6328 free(buf, M_CXGBE); 6329 return (rc); 6330} 6331 6332static int 6333sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 6334{ 6335 struct adapter *sc = arg1; 6336 struct sbuf *sb; 6337 int rc; 6338 struct tp_fcoe_stats stats[MAX_NCHAN]; 6339 int i, nchan = sc->chip_params->nchan; 6340 6341 rc = sysctl_wire_old_buffer(req, 0); 6342 if (rc != 0) 6343 return (rc); 6344 6345 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6346 if (sb == NULL) 6347 return (ENOMEM); 6348 6349 for (i = 0; i < nchan; i++) 6350 t4_get_fcoe_stats(sc, i, &stats[i]); 6351 6352 if (nchan > 2) { 6353 sbuf_printf(sb, " channel 0 channel 1" 6354 " channel 2 channel 3"); 6355 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 6356 stats[0].octets_ddp, stats[1].octets_ddp, 6357 stats[2].octets_ddp, stats[3].octets_ddp); 6358 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 6359 stats[0].frames_ddp, stats[1].frames_ddp, 6360 stats[2].frames_ddp, stats[3].frames_ddp); 6361 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 6362 stats[0].frames_drop, stats[1].frames_drop, 6363 stats[2].frames_drop, stats[3].frames_drop); 6364 } else { 6365 sbuf_printf(sb, " channel 0 channel 1"); 6366 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 6367 stats[0].octets_ddp, stats[1].octets_ddp); 6368 sbuf_printf(sb, "\nframesDDP: %16u %16u", 6369 stats[0].frames_ddp, stats[1].frames_ddp); 6370 sbuf_printf(sb, "\nframesDrop: %16u %16u", 6371 stats[0].frames_drop, stats[1].frames_drop); 6372 } 6373 6374 rc = sbuf_finish(sb); 6375 sbuf_delete(sb); 6376 6377 return (rc); 6378} 6379 6380static int 6381sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 6382{ 6383 struct adapter *sc = arg1; 6384 struct sbuf *sb; 6385 int rc, i; 6386 unsigned int map, kbps, ipg, mode; 6387 unsigned int pace_tab[NTX_SCHED]; 6388 6389 rc = sysctl_wire_old_buffer(req, 0); 6390 if (rc != 0) 6391 return (rc); 6392 6393 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 6394 if (sb == NULL) 6395 return (ENOMEM); 6396 6397 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 6398 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 6399 t4_read_pace_tbl(sc, pace_tab); 6400 6401 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 6402 "Class IPG (0.1 ns) Flow IPG (us)"); 6403 6404 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 6405 t4_get_tx_sched(sc, i, &kbps, &ipg); 6406 sbuf_printf(sb, "\n %u %-5s %u ", i, 6407 (mode & (1 << i)) ? "flow" : "class", map & 3); 6408 if (kbps) 6409 sbuf_printf(sb, "%9u ", kbps); 6410 else 6411 sbuf_printf(sb, " disabled "); 6412 6413 if (ipg) 6414 sbuf_printf(sb, "%13u ", ipg); 6415 else 6416 sbuf_printf(sb, " disabled "); 6417 6418 if (pace_tab[i]) 6419 sbuf_printf(sb, "%10u", pace_tab[i]); 6420 else 6421 sbuf_printf(sb, " disabled"); 6422 } 6423 6424 rc = sbuf_finish(sb); 6425 sbuf_delete(sb); 6426 6427 return (rc); 6428} 6429 6430static int 6431sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 6432{ 6433 struct adapter *sc = arg1; 6434 struct sbuf *sb; 6435 int rc, i, j; 6436 uint64_t *p0, *p1; 6437 struct lb_port_stats s[2]; 6438 static const char *stat_name[] = { 6439 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 6440 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 6441 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 6442 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 6443 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 6444 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 6445 "BG2FramesTrunc:", "BG3FramesTrunc:" 6446 }; 6447 6448 rc = sysctl_wire_old_buffer(req, 0); 6449 if (rc != 0) 6450 return (rc); 6451 6452 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6453 if (sb == NULL) 6454 return (ENOMEM); 6455 6456 memset(s, 0, sizeof(s)); 6457 6458 for (i = 0; i < sc->chip_params->nchan; i += 2) { 6459 t4_get_lb_stats(sc, i, &s[0]); 6460 t4_get_lb_stats(sc, i + 1, &s[1]); 6461 6462 p0 = &s[0].octets; 6463 p1 = &s[1].octets; 6464 sbuf_printf(sb, "%s Loopback %u" 6465 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 6466 6467 for (j = 0; j < nitems(stat_name); j++) 6468 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 6469 *p0++, *p1++); 6470 } 6471 6472 rc = sbuf_finish(sb); 6473 sbuf_delete(sb); 6474 6475 return (rc); 6476} 6477 6478static int 6479sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 6480{ 6481 int rc = 0; 6482 struct port_info *pi = arg1; 6483 struct link_config *lc = &pi->link_cfg; 6484 struct sbuf *sb; 6485 6486 rc = sysctl_wire_old_buffer(req, 0); 6487 if (rc != 0) 6488 return(rc); 6489 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 6490 if (sb == NULL) 6491 return (ENOMEM); 6492 6493 if (lc->link_ok || lc->link_down_rc == 255) 6494 sbuf_printf(sb, "n/a"); 6495 else 6496 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); 6497 6498 rc = sbuf_finish(sb); 6499 sbuf_delete(sb); 6500 6501 return (rc); 6502} 6503 6504struct mem_desc { 6505 unsigned int base; 6506 unsigned int limit; 6507 unsigned int idx; 6508}; 6509 6510static int 6511mem_desc_cmp(const void *a, const void *b) 6512{ 6513 return ((const struct mem_desc *)a)->base - 6514 ((const struct mem_desc *)b)->base; 6515} 6516 6517static void 6518mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 6519 unsigned int to) 6520{ 6521 unsigned int size; 6522 6523 if (from == to) 6524 return; 6525 6526 size = to - from + 1; 6527 if (size == 0) 6528 return; 6529 6530 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 6531 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 6532} 6533 6534static int 6535sysctl_meminfo(SYSCTL_HANDLER_ARGS) 6536{ 6537 struct adapter *sc = arg1; 6538 struct sbuf *sb; 6539 int rc, i, n; 6540 uint32_t lo, hi, used, alloc; 6541 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 6542 static const char *region[] = { 6543 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 6544 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 6545 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 6546 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 6547 "RQUDP region:", "PBL region:", "TXPBL region:", 6548 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 6549 "On-chip queues:" 6550 }; 6551 struct mem_desc avail[4]; 6552 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 6553 struct mem_desc *md = mem; 6554 6555 rc = sysctl_wire_old_buffer(req, 0); 6556 if (rc != 0) 6557 return (rc); 6558 6559 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6560 if (sb == NULL) 6561 return (ENOMEM); 6562 6563 for (i = 0; i < nitems(mem); i++) { 6564 mem[i].limit = 0; 6565 mem[i].idx = i; 6566 } 6567 6568 /* Find and sort the populated memory ranges */ 6569 i = 0; 6570 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 6571 if (lo & F_EDRAM0_ENABLE) { 6572 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 6573 avail[i].base = G_EDRAM0_BASE(hi) << 20; 6574 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 6575 avail[i].idx = 0; 6576 i++; 6577 } 6578 if (lo & F_EDRAM1_ENABLE) { 6579 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 6580 avail[i].base = G_EDRAM1_BASE(hi) << 20; 6581 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 6582 avail[i].idx = 1; 6583 i++; 6584 } 6585 if (lo & F_EXT_MEM_ENABLE) { 6586 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 6587 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 6588 avail[i].limit = avail[i].base + 6589 (G_EXT_MEM_SIZE(hi) << 20); 6590 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 6591 i++; 6592 } 6593 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 6594 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 6595 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 6596 avail[i].limit = avail[i].base + 6597 (G_EXT_MEM1_SIZE(hi) << 20); 6598 avail[i].idx = 4; 6599 i++; 6600 } 6601 if (!i) /* no memory available */ 6602 return 0; 6603 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 6604 6605 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 6606 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 6607 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 6608 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 6609 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 6610 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 6611 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 6612 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 6613 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 6614 6615 /* the next few have explicit upper bounds */ 6616 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 6617 md->limit = md->base - 1 + 6618 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 6619 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 6620 md++; 6621 6622 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 6623 md->limit = md->base - 1 + 6624 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 6625 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 6626 md++; 6627 6628 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 6629 if (chip_id(sc) <= CHELSIO_T5) 6630 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 6631 else 6632 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 6633 md->limit = 0; 6634 } else { 6635 md->base = 0; 6636 md->idx = nitems(region); /* hide it */ 6637 } 6638 md++; 6639 6640#define ulp_region(reg) \ 6641 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 6642 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 6643 6644 ulp_region(RX_ISCSI); 6645 ulp_region(RX_TDDP); 6646 ulp_region(TX_TPT); 6647 ulp_region(RX_STAG); 6648 ulp_region(RX_RQ); 6649 ulp_region(RX_RQUDP); 6650 ulp_region(RX_PBL); 6651 ulp_region(TX_PBL); 6652#undef ulp_region 6653 6654 md->base = 0; 6655 md->idx = nitems(region); 6656 if (!is_t4(sc)) { 6657 uint32_t size = 0; 6658 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 6659 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 6660 6661 if (is_t5(sc)) { 6662 if (sge_ctrl & F_VFIFO_ENABLE) 6663 size = G_DBVFIFO_SIZE(fifo_size); 6664 } else 6665 size = G_T6_DBVFIFO_SIZE(fifo_size); 6666 6667 if (size) { 6668 md->base = G_BASEADDR(t4_read_reg(sc, 6669 A_SGE_DBVFIFO_BADDR)); 6670 md->limit = md->base + (size << 2) - 1; 6671 } 6672 } 6673 md++; 6674 6675 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 6676 md->limit = 0; 6677 md++; 6678 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 6679 md->limit = 0; 6680 md++; 6681 6682 md->base = sc->vres.ocq.start; 6683 if (sc->vres.ocq.size) 6684 md->limit = md->base + sc->vres.ocq.size - 1; 6685 else 6686 md->idx = nitems(region); /* hide it */ 6687 md++; 6688 6689 /* add any address-space holes, there can be up to 3 */ 6690 for (n = 0; n < i - 1; n++) 6691 if (avail[n].limit < avail[n + 1].base) 6692 (md++)->base = avail[n].limit; 6693 if (avail[n].limit) 6694 (md++)->base = avail[n].limit; 6695 6696 n = md - mem; 6697 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 6698 6699 for (lo = 0; lo < i; lo++) 6700 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 6701 avail[lo].limit - 1); 6702 6703 sbuf_printf(sb, "\n"); 6704 for (i = 0; i < n; i++) { 6705 if (mem[i].idx >= nitems(region)) 6706 continue; /* skip holes */ 6707 if (!mem[i].limit) 6708 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 6709 mem_region_show(sb, region[mem[i].idx], mem[i].base, 6710 mem[i].limit); 6711 } 6712 6713 sbuf_printf(sb, "\n"); 6714 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 6715 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 6716 mem_region_show(sb, "uP RAM:", lo, hi); 6717 6718 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 6719 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 6720 mem_region_show(sb, "uP Extmem2:", lo, hi); 6721 6722 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 6723 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 6724 G_PMRXMAXPAGE(lo), 6725 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 6726 (lo & F_PMRXNUMCHN) ? 2 : 1); 6727 6728 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 6729 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 6730 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 6731 G_PMTXMAXPAGE(lo), 6732 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 6733 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 6734 sbuf_printf(sb, "%u p-structs\n", 6735 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 6736 6737 for (i = 0; i < 4; i++) { 6738 if (chip_id(sc) > CHELSIO_T5) 6739 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 6740 else 6741 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 6742 if (is_t5(sc)) { 6743 used = G_T5_USED(lo); 6744 alloc = G_T5_ALLOC(lo); 6745 } else { 6746 used = G_USED(lo); 6747 alloc = G_ALLOC(lo); 6748 } 6749 /* For T6 these are MAC buffer groups */ 6750 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 6751 i, used, alloc); 6752 } 6753 for (i = 0; i < sc->chip_params->nchan; i++) { 6754 if (chip_id(sc) > CHELSIO_T5) 6755 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 6756 else 6757 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 6758 if (is_t5(sc)) { 6759 used = G_T5_USED(lo); 6760 alloc = G_T5_ALLOC(lo); 6761 } else { 6762 used = G_USED(lo); 6763 alloc = G_ALLOC(lo); 6764 } 6765 /* For T6 these are MAC buffer groups */ 6766 sbuf_printf(sb, 6767 "\nLoopback %d using %u pages out of %u allocated", 6768 i, used, alloc); 6769 } 6770 6771 rc = sbuf_finish(sb); 6772 sbuf_delete(sb); 6773 6774 return (rc); 6775} 6776 6777static inline void 6778tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 6779{ 6780 *mask = x | y; 6781 y = htobe64(y); 6782 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 6783} 6784 6785static int 6786sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 6787{ 6788 struct adapter *sc = arg1; 6789 struct sbuf *sb; 6790 int rc, i; 6791 6792 MPASS(chip_id(sc) <= CHELSIO_T5); 6793 6794 rc = sysctl_wire_old_buffer(req, 0); 6795 if (rc != 0) 6796 return (rc); 6797 6798 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6799 if (sb == NULL) 6800 return (ENOMEM); 6801 6802 sbuf_printf(sb, 6803 "Idx Ethernet address Mask Vld Ports PF" 6804 " VF Replication P0 P1 P2 P3 ML"); 6805 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6806 uint64_t tcamx, tcamy, mask; 6807 uint32_t cls_lo, cls_hi; 6808 uint8_t addr[ETHER_ADDR_LEN]; 6809 6810 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 6811 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 6812 if (tcamx & tcamy) 6813 continue; 6814 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6815 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6816 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6817 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 6818 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 6819 addr[3], addr[4], addr[5], (uintmax_t)mask, 6820 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 6821 G_PORTMAP(cls_hi), G_PF(cls_lo), 6822 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 6823 6824 if (cls_lo & F_REPLICATE) { 6825 struct fw_ldst_cmd ldst_cmd; 6826 6827 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6828 ldst_cmd.op_to_addrspace = 6829 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6830 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6831 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6832 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6833 ldst_cmd.u.mps.rplc.fid_idx = 6834 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6835 V_FW_LDST_CMD_IDX(i)); 6836 6837 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6838 "t4mps"); 6839 if (rc) 6840 break; 6841 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6842 sizeof(ldst_cmd), &ldst_cmd); 6843 end_synchronized_op(sc, 0); 6844 6845 if (rc != 0) { 6846 sbuf_printf(sb, "%36d", rc); 6847 rc = 0; 6848 } else { 6849 sbuf_printf(sb, " %08x %08x %08x %08x", 6850 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 6851 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 6852 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 6853 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 6854 } 6855 } else 6856 sbuf_printf(sb, "%36s", ""); 6857 6858 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 6859 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 6860 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 6861 } 6862 6863 if (rc) 6864 (void) sbuf_finish(sb); 6865 else 6866 rc = sbuf_finish(sb); 6867 sbuf_delete(sb); 6868 6869 return (rc); 6870} 6871 6872static int 6873sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 6874{ 6875 struct adapter *sc = arg1; 6876 struct sbuf *sb; 6877 int rc, i; 6878 6879 MPASS(chip_id(sc) > CHELSIO_T5); 6880 6881 rc = sysctl_wire_old_buffer(req, 0); 6882 if (rc != 0) 6883 return (rc); 6884 6885 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 6886 if (sb == NULL) 6887 return (ENOMEM); 6888 6889 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 6890 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 6891 " Replication" 6892 " P0 P1 P2 P3 ML\n"); 6893 6894 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 6895 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 6896 uint16_t ivlan; 6897 uint64_t tcamx, tcamy, val, mask; 6898 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 6899 uint8_t addr[ETHER_ADDR_LEN]; 6900 6901 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 6902 if (i < 256) 6903 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 6904 else 6905 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 6906 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6907 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6908 tcamy = G_DMACH(val) << 32; 6909 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6910 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6911 lookup_type = G_DATALKPTYPE(data2); 6912 port_num = G_DATAPORTNUM(data2); 6913 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6914 /* Inner header VNI */ 6915 vniy = ((data2 & F_DATAVIDH2) << 23) | 6916 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6917 dip_hit = data2 & F_DATADIPHIT; 6918 vlan_vld = 0; 6919 } else { 6920 vniy = 0; 6921 dip_hit = 0; 6922 vlan_vld = data2 & F_DATAVIDH2; 6923 ivlan = G_VIDL(val); 6924 } 6925 6926 ctl |= V_CTLXYBITSEL(1); 6927 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 6928 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 6929 tcamx = G_DMACH(val) << 32; 6930 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 6931 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 6932 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6933 /* Inner header VNI mask */ 6934 vnix = ((data2 & F_DATAVIDH2) << 23) | 6935 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 6936 } else 6937 vnix = 0; 6938 6939 if (tcamx & tcamy) 6940 continue; 6941 tcamxy2valmask(tcamx, tcamy, addr, &mask); 6942 6943 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 6944 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 6945 6946 if (lookup_type && lookup_type != M_DATALKPTYPE) { 6947 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6948 "%012jx %06x %06x - - %3c" 6949 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 6950 addr[1], addr[2], addr[3], addr[4], addr[5], 6951 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 6952 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6953 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6954 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6955 } else { 6956 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 6957 "%012jx - - ", i, addr[0], addr[1], 6958 addr[2], addr[3], addr[4], addr[5], 6959 (uintmax_t)mask); 6960 6961 if (vlan_vld) 6962 sbuf_printf(sb, "%4u Y ", ivlan); 6963 else 6964 sbuf_printf(sb, " - N "); 6965 6966 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 6967 lookup_type ? 'I' : 'O', port_num, 6968 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 6969 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 6970 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 6971 } 6972 6973 6974 if (cls_lo & F_T6_REPLICATE) { 6975 struct fw_ldst_cmd ldst_cmd; 6976 6977 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 6978 ldst_cmd.op_to_addrspace = 6979 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 6980 F_FW_CMD_REQUEST | F_FW_CMD_READ | 6981 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 6982 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 6983 ldst_cmd.u.mps.rplc.fid_idx = 6984 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 6985 V_FW_LDST_CMD_IDX(i)); 6986 6987 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 6988 "t6mps"); 6989 if (rc) 6990 break; 6991 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 6992 sizeof(ldst_cmd), &ldst_cmd); 6993 end_synchronized_op(sc, 0); 6994 6995 if (rc != 0) { 6996 sbuf_printf(sb, "%72d", rc); 6997 rc = 0; 6998 } else { 6999 sbuf_printf(sb, " %08x %08x %08x %08x" 7000 " %08x %08x %08x %08x", 7001 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 7002 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 7003 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 7004 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 7005 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 7006 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 7007 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 7008 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 7009 } 7010 } else 7011 sbuf_printf(sb, "%72s", ""); 7012 7013 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 7014 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 7015 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 7016 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 7017 } 7018 7019 if (rc) 7020 (void) sbuf_finish(sb); 7021 else 7022 rc = sbuf_finish(sb); 7023 sbuf_delete(sb); 7024 7025 return (rc); 7026} 7027 7028static int 7029sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 7030{ 7031 struct adapter *sc = arg1; 7032 struct sbuf *sb; 7033 int rc; 7034 uint16_t mtus[NMTUS]; 7035 7036 rc = sysctl_wire_old_buffer(req, 0); 7037 if (rc != 0) 7038 return (rc); 7039 7040 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7041 if (sb == NULL) 7042 return (ENOMEM); 7043 7044 t4_read_mtu_tbl(sc, mtus, NULL); 7045 7046 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 7047 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 7048 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 7049 mtus[14], mtus[15]); 7050 7051 rc = sbuf_finish(sb); 7052 sbuf_delete(sb); 7053 7054 return (rc); 7055} 7056 7057static int 7058sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 7059{ 7060 struct adapter *sc = arg1; 7061 struct sbuf *sb; 7062 int rc, i; 7063 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 7064 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 7065 static const char *tx_stats[MAX_PM_NSTATS] = { 7066 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 7067 "Tx FIFO wait", NULL, "Tx latency" 7068 }; 7069 static const char *rx_stats[MAX_PM_NSTATS] = { 7070 "Read:", "Write bypass:", "Write mem:", "Flush:", 7071 "Rx FIFO wait", NULL, "Rx latency" 7072 }; 7073 7074 rc = sysctl_wire_old_buffer(req, 0); 7075 if (rc != 0) 7076 return (rc); 7077 7078 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7079 if (sb == NULL) 7080 return (ENOMEM); 7081 7082 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 7083 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 7084 7085 sbuf_printf(sb, " Tx pcmds Tx bytes"); 7086 for (i = 0; i < 4; i++) { 7087 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7088 tx_cyc[i]); 7089 } 7090 7091 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 7092 for (i = 0; i < 4; i++) { 7093 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7094 rx_cyc[i]); 7095 } 7096 7097 if (chip_id(sc) > CHELSIO_T5) { 7098 sbuf_printf(sb, 7099 "\n Total wait Total occupancy"); 7100 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7101 tx_cyc[i]); 7102 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7103 rx_cyc[i]); 7104 7105 i += 2; 7106 MPASS(i < nitems(tx_stats)); 7107 7108 sbuf_printf(sb, 7109 "\n Reads Total wait"); 7110 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 7111 tx_cyc[i]); 7112 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 7113 rx_cyc[i]); 7114 } 7115 7116 rc = sbuf_finish(sb); 7117 sbuf_delete(sb); 7118 7119 return (rc); 7120} 7121 7122static int 7123sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 7124{ 7125 struct adapter *sc = arg1; 7126 struct sbuf *sb; 7127 int rc; 7128 struct tp_rdma_stats stats; 7129 7130 rc = sysctl_wire_old_buffer(req, 0); 7131 if (rc != 0) 7132 return (rc); 7133 7134 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7135 if (sb == NULL) 7136 return (ENOMEM); 7137 7138 mtx_lock(&sc->reg_lock); 7139 t4_tp_get_rdma_stats(sc, &stats); 7140 mtx_unlock(&sc->reg_lock); 7141 7142 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 7143 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 7144 7145 rc = sbuf_finish(sb); 7146 sbuf_delete(sb); 7147 7148 return (rc); 7149} 7150 7151static int 7152sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 7153{ 7154 struct adapter *sc = arg1; 7155 struct sbuf *sb; 7156 int rc; 7157 struct tp_tcp_stats v4, v6; 7158 7159 rc = sysctl_wire_old_buffer(req, 0); 7160 if (rc != 0) 7161 return (rc); 7162 7163 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7164 if (sb == NULL) 7165 return (ENOMEM); 7166 7167 mtx_lock(&sc->reg_lock); 7168 t4_tp_get_tcp_stats(sc, &v4, &v6); 7169 mtx_unlock(&sc->reg_lock); 7170 7171 sbuf_printf(sb, 7172 " IP IPv6\n"); 7173 sbuf_printf(sb, "OutRsts: %20u %20u\n", 7174 v4.tcp_out_rsts, v6.tcp_out_rsts); 7175 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 7176 v4.tcp_in_segs, v6.tcp_in_segs); 7177 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 7178 v4.tcp_out_segs, v6.tcp_out_segs); 7179 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 7180 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 7181 7182 rc = sbuf_finish(sb); 7183 sbuf_delete(sb); 7184 7185 return (rc); 7186} 7187 7188static int 7189sysctl_tids(SYSCTL_HANDLER_ARGS) 7190{ 7191 struct adapter *sc = arg1; 7192 struct sbuf *sb; 7193 int rc; 7194 struct tid_info *t = &sc->tids; 7195 7196 rc = sysctl_wire_old_buffer(req, 0); 7197 if (rc != 0) 7198 return (rc); 7199 7200 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7201 if (sb == NULL) 7202 return (ENOMEM); 7203 7204 if (t->natids) { 7205 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 7206 t->atids_in_use); 7207 } 7208 7209 if (t->ntids) { 7210 sbuf_printf(sb, "TID range: "); 7211 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 7212 uint32_t b, hb; 7213 7214 if (chip_id(sc) <= CHELSIO_T5) { 7215 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 7216 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 7217 } else { 7218 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 7219 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); 7220 } 7221 7222 if (b) 7223 sbuf_printf(sb, "0-%u, ", b - 1); 7224 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); 7225 } else 7226 sbuf_printf(sb, "0-%u", t->ntids - 1); 7227 sbuf_printf(sb, ", in use: %u\n", 7228 atomic_load_acq_int(&t->tids_in_use)); 7229 } 7230 7231 if (t->nstids) { 7232 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 7233 t->stid_base + t->nstids - 1, t->stids_in_use); 7234 } 7235 7236 if (t->nftids) { 7237 sbuf_printf(sb, "FTID range: %u-%u\n", t->ftid_base, 7238 t->ftid_base + t->nftids - 1); 7239 } 7240 7241 if (t->netids) { 7242 sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base, 7243 t->etid_base + t->netids - 1); 7244 } 7245 7246 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 7247 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 7248 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 7249 7250 rc = sbuf_finish(sb); 7251 sbuf_delete(sb); 7252 7253 return (rc); 7254} 7255 7256static int 7257sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 7258{ 7259 struct adapter *sc = arg1; 7260 struct sbuf *sb; 7261 int rc; 7262 struct tp_err_stats stats; 7263 7264 rc = sysctl_wire_old_buffer(req, 0); 7265 if (rc != 0) 7266 return (rc); 7267 7268 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7269 if (sb == NULL) 7270 return (ENOMEM); 7271 7272 mtx_lock(&sc->reg_lock); 7273 t4_tp_get_err_stats(sc, &stats); 7274 mtx_unlock(&sc->reg_lock); 7275 7276 if (sc->chip_params->nchan > 2) { 7277 sbuf_printf(sb, " channel 0 channel 1" 7278 " channel 2 channel 3\n"); 7279 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 7280 stats.mac_in_errs[0], stats.mac_in_errs[1], 7281 stats.mac_in_errs[2], stats.mac_in_errs[3]); 7282 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 7283 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 7284 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 7285 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 7286 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 7287 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 7288 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 7289 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 7290 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 7291 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 7292 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 7293 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 7294 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 7295 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 7296 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 7297 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 7298 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 7299 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 7300 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 7301 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 7302 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 7303 } else { 7304 sbuf_printf(sb, " channel 0 channel 1\n"); 7305 sbuf_printf(sb, "macInErrs: %10u %10u\n", 7306 stats.mac_in_errs[0], stats.mac_in_errs[1]); 7307 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 7308 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 7309 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 7310 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 7311 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 7312 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 7313 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 7314 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 7315 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 7316 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 7317 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 7318 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 7319 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 7320 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 7321 } 7322 7323 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 7324 stats.ofld_no_neigh, stats.ofld_cong_defer); 7325 7326 rc = sbuf_finish(sb); 7327 sbuf_delete(sb); 7328 7329 return (rc); 7330} 7331 7332static int 7333sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 7334{ 7335 struct adapter *sc = arg1; 7336 struct tp_params *tpp = &sc->params.tp; 7337 u_int mask; 7338 int rc; 7339 7340 mask = tpp->la_mask >> 16; 7341 rc = sysctl_handle_int(oidp, &mask, 0, req); 7342 if (rc != 0 || req->newptr == NULL) 7343 return (rc); 7344 if (mask > 0xffff) 7345 return (EINVAL); 7346 tpp->la_mask = mask << 16; 7347 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 7348 7349 return (0); 7350} 7351 7352struct field_desc { 7353 const char *name; 7354 u_int start; 7355 u_int width; 7356}; 7357 7358static void 7359field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 7360{ 7361 char buf[32]; 7362 int line_size = 0; 7363 7364 while (f->name) { 7365 uint64_t mask = (1ULL << f->width) - 1; 7366 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 7367 ((uintmax_t)v >> f->start) & mask); 7368 7369 if (line_size + len >= 79) { 7370 line_size = 8; 7371 sbuf_printf(sb, "\n "); 7372 } 7373 sbuf_printf(sb, "%s ", buf); 7374 line_size += len + 1; 7375 f++; 7376 } 7377 sbuf_printf(sb, "\n"); 7378} 7379 7380static const struct field_desc tp_la0[] = { 7381 { "RcfOpCodeOut", 60, 4 }, 7382 { "State", 56, 4 }, 7383 { "WcfState", 52, 4 }, 7384 { "RcfOpcSrcOut", 50, 2 }, 7385 { "CRxError", 49, 1 }, 7386 { "ERxError", 48, 1 }, 7387 { "SanityFailed", 47, 1 }, 7388 { "SpuriousMsg", 46, 1 }, 7389 { "FlushInputMsg", 45, 1 }, 7390 { "FlushInputCpl", 44, 1 }, 7391 { "RssUpBit", 43, 1 }, 7392 { "RssFilterHit", 42, 1 }, 7393 { "Tid", 32, 10 }, 7394 { "InitTcb", 31, 1 }, 7395 { "LineNumber", 24, 7 }, 7396 { "Emsg", 23, 1 }, 7397 { "EdataOut", 22, 1 }, 7398 { "Cmsg", 21, 1 }, 7399 { "CdataOut", 20, 1 }, 7400 { "EreadPdu", 19, 1 }, 7401 { "CreadPdu", 18, 1 }, 7402 { "TunnelPkt", 17, 1 }, 7403 { "RcfPeerFin", 16, 1 }, 7404 { "RcfReasonOut", 12, 4 }, 7405 { "TxCchannel", 10, 2 }, 7406 { "RcfTxChannel", 8, 2 }, 7407 { "RxEchannel", 6, 2 }, 7408 { "RcfRxChannel", 5, 1 }, 7409 { "RcfDataOutSrdy", 4, 1 }, 7410 { "RxDvld", 3, 1 }, 7411 { "RxOoDvld", 2, 1 }, 7412 { "RxCongestion", 1, 1 }, 7413 { "TxCongestion", 0, 1 }, 7414 { NULL } 7415}; 7416 7417static const struct field_desc tp_la1[] = { 7418 { "CplCmdIn", 56, 8 }, 7419 { "CplCmdOut", 48, 8 }, 7420 { "ESynOut", 47, 1 }, 7421 { "EAckOut", 46, 1 }, 7422 { "EFinOut", 45, 1 }, 7423 { "ERstOut", 44, 1 }, 7424 { "SynIn", 43, 1 }, 7425 { "AckIn", 42, 1 }, 7426 { "FinIn", 41, 1 }, 7427 { "RstIn", 40, 1 }, 7428 { "DataIn", 39, 1 }, 7429 { "DataInVld", 38, 1 }, 7430 { "PadIn", 37, 1 }, 7431 { "RxBufEmpty", 36, 1 }, 7432 { "RxDdp", 35, 1 }, 7433 { "RxFbCongestion", 34, 1 }, 7434 { "TxFbCongestion", 33, 1 }, 7435 { "TxPktSumSrdy", 32, 1 }, 7436 { "RcfUlpType", 28, 4 }, 7437 { "Eread", 27, 1 }, 7438 { "Ebypass", 26, 1 }, 7439 { "Esave", 25, 1 }, 7440 { "Static0", 24, 1 }, 7441 { "Cread", 23, 1 }, 7442 { "Cbypass", 22, 1 }, 7443 { "Csave", 21, 1 }, 7444 { "CPktOut", 20, 1 }, 7445 { "RxPagePoolFull", 18, 2 }, 7446 { "RxLpbkPkt", 17, 1 }, 7447 { "TxLpbkPkt", 16, 1 }, 7448 { "RxVfValid", 15, 1 }, 7449 { "SynLearned", 14, 1 }, 7450 { "SetDelEntry", 13, 1 }, 7451 { "SetInvEntry", 12, 1 }, 7452 { "CpcmdDvld", 11, 1 }, 7453 { "CpcmdSave", 10, 1 }, 7454 { "RxPstructsFull", 8, 2 }, 7455 { "EpcmdDvld", 7, 1 }, 7456 { "EpcmdFlush", 6, 1 }, 7457 { "EpcmdTrimPrefix", 5, 1 }, 7458 { "EpcmdTrimPostfix", 4, 1 }, 7459 { "ERssIp4Pkt", 3, 1 }, 7460 { "ERssIp6Pkt", 2, 1 }, 7461 { "ERssTcpUdpPkt", 1, 1 }, 7462 { "ERssFceFipPkt", 0, 1 }, 7463 { NULL } 7464}; 7465 7466static const struct field_desc tp_la2[] = { 7467 { "CplCmdIn", 56, 8 }, 7468 { "MpsVfVld", 55, 1 }, 7469 { "MpsPf", 52, 3 }, 7470 { "MpsVf", 44, 8 }, 7471 { "SynIn", 43, 1 }, 7472 { "AckIn", 42, 1 }, 7473 { "FinIn", 41, 1 }, 7474 { "RstIn", 40, 1 }, 7475 { "DataIn", 39, 1 }, 7476 { "DataInVld", 38, 1 }, 7477 { "PadIn", 37, 1 }, 7478 { "RxBufEmpty", 36, 1 }, 7479 { "RxDdp", 35, 1 }, 7480 { "RxFbCongestion", 34, 1 }, 7481 { "TxFbCongestion", 33, 1 }, 7482 { "TxPktSumSrdy", 32, 1 }, 7483 { "RcfUlpType", 28, 4 }, 7484 { "Eread", 27, 1 }, 7485 { "Ebypass", 26, 1 }, 7486 { "Esave", 25, 1 }, 7487 { "Static0", 24, 1 }, 7488 { "Cread", 23, 1 }, 7489 { "Cbypass", 22, 1 }, 7490 { "Csave", 21, 1 }, 7491 { "CPktOut", 20, 1 }, 7492 { "RxPagePoolFull", 18, 2 }, 7493 { "RxLpbkPkt", 17, 1 }, 7494 { "TxLpbkPkt", 16, 1 }, 7495 { "RxVfValid", 15, 1 }, 7496 { "SynLearned", 14, 1 }, 7497 { "SetDelEntry", 13, 1 }, 7498 { "SetInvEntry", 12, 1 }, 7499 { "CpcmdDvld", 11, 1 }, 7500 { "CpcmdSave", 10, 1 }, 7501 { "RxPstructsFull", 8, 2 }, 7502 { "EpcmdDvld", 7, 1 }, 7503 { "EpcmdFlush", 6, 1 }, 7504 { "EpcmdTrimPrefix", 5, 1 }, 7505 { "EpcmdTrimPostfix", 4, 1 }, 7506 { "ERssIp4Pkt", 3, 1 }, 7507 { "ERssIp6Pkt", 2, 1 }, 7508 { "ERssTcpUdpPkt", 1, 1 }, 7509 { "ERssFceFipPkt", 0, 1 }, 7510 { NULL } 7511}; 7512 7513static void 7514tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 7515{ 7516 7517 field_desc_show(sb, *p, tp_la0); 7518} 7519 7520static void 7521tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 7522{ 7523 7524 if (idx) 7525 sbuf_printf(sb, "\n"); 7526 field_desc_show(sb, p[0], tp_la0); 7527 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7528 field_desc_show(sb, p[1], tp_la0); 7529} 7530 7531static void 7532tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 7533{ 7534 7535 if (idx) 7536 sbuf_printf(sb, "\n"); 7537 field_desc_show(sb, p[0], tp_la0); 7538 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 7539 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 7540} 7541 7542static int 7543sysctl_tp_la(SYSCTL_HANDLER_ARGS) 7544{ 7545 struct adapter *sc = arg1; 7546 struct sbuf *sb; 7547 uint64_t *buf, *p; 7548 int rc; 7549 u_int i, inc; 7550 void (*show_func)(struct sbuf *, uint64_t *, int); 7551 7552 rc = sysctl_wire_old_buffer(req, 0); 7553 if (rc != 0) 7554 return (rc); 7555 7556 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7557 if (sb == NULL) 7558 return (ENOMEM); 7559 7560 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 7561 7562 t4_tp_read_la(sc, buf, NULL); 7563 p = buf; 7564 7565 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 7566 case 2: 7567 inc = 2; 7568 show_func = tp_la_show2; 7569 break; 7570 case 3: 7571 inc = 2; 7572 show_func = tp_la_show3; 7573 break; 7574 default: 7575 inc = 1; 7576 show_func = tp_la_show; 7577 } 7578 7579 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 7580 (*show_func)(sb, p, i); 7581 7582 rc = sbuf_finish(sb); 7583 sbuf_delete(sb); 7584 free(buf, M_CXGBE); 7585 return (rc); 7586} 7587 7588static int 7589sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 7590{ 7591 struct adapter *sc = arg1; 7592 struct sbuf *sb; 7593 int rc; 7594 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 7595 7596 rc = sysctl_wire_old_buffer(req, 0); 7597 if (rc != 0) 7598 return (rc); 7599 7600 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7601 if (sb == NULL) 7602 return (ENOMEM); 7603 7604 t4_get_chan_txrate(sc, nrate, orate); 7605 7606 if (sc->chip_params->nchan > 2) { 7607 sbuf_printf(sb, " channel 0 channel 1" 7608 " channel 2 channel 3\n"); 7609 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 7610 nrate[0], nrate[1], nrate[2], nrate[3]); 7611 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 7612 orate[0], orate[1], orate[2], orate[3]); 7613 } else { 7614 sbuf_printf(sb, " channel 0 channel 1\n"); 7615 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 7616 nrate[0], nrate[1]); 7617 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 7618 orate[0], orate[1]); 7619 } 7620 7621 rc = sbuf_finish(sb); 7622 sbuf_delete(sb); 7623 7624 return (rc); 7625} 7626 7627static int 7628sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 7629{ 7630 struct adapter *sc = arg1; 7631 struct sbuf *sb; 7632 uint32_t *buf, *p; 7633 int rc, i; 7634 7635 rc = sysctl_wire_old_buffer(req, 0); 7636 if (rc != 0) 7637 return (rc); 7638 7639 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7640 if (sb == NULL) 7641 return (ENOMEM); 7642 7643 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 7644 M_ZERO | M_WAITOK); 7645 7646 t4_ulprx_read_la(sc, buf); 7647 p = buf; 7648 7649 sbuf_printf(sb, " Pcmd Type Message" 7650 " Data"); 7651 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 7652 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 7653 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 7654 } 7655 7656 rc = sbuf_finish(sb); 7657 sbuf_delete(sb); 7658 free(buf, M_CXGBE); 7659 return (rc); 7660} 7661 7662static int 7663sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 7664{ 7665 struct adapter *sc = arg1; 7666 struct sbuf *sb; 7667 int rc, v; 7668 7669 MPASS(chip_id(sc) >= CHELSIO_T5); 7670 7671 rc = sysctl_wire_old_buffer(req, 0); 7672 if (rc != 0) 7673 return (rc); 7674 7675 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7676 if (sb == NULL) 7677 return (ENOMEM); 7678 7679 v = t4_read_reg(sc, A_SGE_STAT_CFG); 7680 if (G_STATSOURCE_T5(v) == 7) { 7681 int mode; 7682 7683 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 7684 if (mode == 0) { 7685 sbuf_printf(sb, "total %d, incomplete %d", 7686 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7687 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7688 } else if (mode == 1) { 7689 sbuf_printf(sb, "total %d, data overflow %d", 7690 t4_read_reg(sc, A_SGE_STAT_TOTAL), 7691 t4_read_reg(sc, A_SGE_STAT_MATCH)); 7692 } else { 7693 sbuf_printf(sb, "unknown mode %d", mode); 7694 } 7695 } 7696 rc = sbuf_finish(sb); 7697 sbuf_delete(sb); 7698 7699 return (rc); 7700} 7701 7702static int 7703sysctl_tc_params(SYSCTL_HANDLER_ARGS) 7704{ 7705 struct adapter *sc = arg1; 7706 struct tx_sched_class *tc; 7707 struct t4_sched_class_params p; 7708 struct sbuf *sb; 7709 int i, rc, port_id, flags, mbps, gbps; 7710 7711 rc = sysctl_wire_old_buffer(req, 0); 7712 if (rc != 0) 7713 return (rc); 7714 7715 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7716 if (sb == NULL) 7717 return (ENOMEM); 7718 7719 port_id = arg2 >> 16; 7720 MPASS(port_id < sc->params.nports); 7721 MPASS(sc->port[port_id] != NULL); 7722 i = arg2 & 0xffff; 7723 MPASS(i < sc->chip_params->nsched_cls); 7724 tc = &sc->port[port_id]->tc[i]; 7725 7726 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 7727 "t4tc_p"); 7728 if (rc) 7729 goto done; 7730 flags = tc->flags; 7731 p = tc->params; 7732 end_synchronized_op(sc, LOCK_HELD); 7733 7734 if ((flags & TX_SC_OK) == 0) { 7735 sbuf_printf(sb, "none"); 7736 goto done; 7737 } 7738 7739 if (p.level == SCHED_CLASS_LEVEL_CL_WRR) { 7740 sbuf_printf(sb, "cl-wrr weight %u", p.weight); 7741 goto done; 7742 } else if (p.level == SCHED_CLASS_LEVEL_CL_RL) 7743 sbuf_printf(sb, "cl-rl"); 7744 else if (p.level == SCHED_CLASS_LEVEL_CH_RL) 7745 sbuf_printf(sb, "ch-rl"); 7746 else { 7747 rc = ENXIO; 7748 goto done; 7749 } 7750 7751 if (p.ratemode == SCHED_CLASS_RATEMODE_REL) { 7752 /* XXX: top speed or actual link speed? */ 7753 gbps = port_top_speed(sc->port[port_id]); 7754 sbuf_printf(sb, " %u%% of %uGbps", p.maxrate, gbps); 7755 } 7756 else if (p.ratemode == SCHED_CLASS_RATEMODE_ABS) { 7757 switch (p.rateunit) { 7758 case SCHED_CLASS_RATEUNIT_BITS: 7759 mbps = p.maxrate / 1000; 7760 gbps = p.maxrate / 1000000; 7761 if (p.maxrate == gbps * 1000000) 7762 sbuf_printf(sb, " %uGbps", gbps); 7763 else if (p.maxrate == mbps * 1000) 7764 sbuf_printf(sb, " %uMbps", mbps); 7765 else 7766 sbuf_printf(sb, " %uKbps", p.maxrate); 7767 break; 7768 case SCHED_CLASS_RATEUNIT_PKTS: 7769 sbuf_printf(sb, " %upps", p.maxrate); 7770 break; 7771 default: 7772 rc = ENXIO; 7773 goto done; 7774 } 7775 } 7776 7777 switch (p.mode) { 7778 case SCHED_CLASS_MODE_CLASS: 7779 sbuf_printf(sb, " aggregate"); 7780 break; 7781 case SCHED_CLASS_MODE_FLOW: 7782 sbuf_printf(sb, " per-flow"); 7783 break; 7784 default: 7785 rc = ENXIO; 7786 goto done; 7787 } 7788 7789done: 7790 if (rc == 0) 7791 rc = sbuf_finish(sb); 7792 sbuf_delete(sb); 7793 7794 return (rc); 7795} 7796#endif 7797 7798#ifdef TCP_OFFLOAD 7799static void 7800unit_conv(char *buf, size_t len, u_int val, u_int factor) 7801{ 7802 u_int rem = val % factor; 7803 7804 if (rem == 0) 7805 snprintf(buf, len, "%u", val / factor); 7806 else { 7807 while (rem % 10 == 0) 7808 rem /= 10; 7809 snprintf(buf, len, "%u.%u", val / factor, rem); 7810 } 7811} 7812 7813static int 7814sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 7815{ 7816 struct adapter *sc = arg1; 7817 char buf[16]; 7818 u_int res, re; 7819 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7820 7821 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7822 switch (arg2) { 7823 case 0: 7824 /* timer_tick */ 7825 re = G_TIMERRESOLUTION(res); 7826 break; 7827 case 1: 7828 /* TCP timestamp tick */ 7829 re = G_TIMESTAMPRESOLUTION(res); 7830 break; 7831 case 2: 7832 /* DACK tick */ 7833 re = G_DELAYEDACKRESOLUTION(res); 7834 break; 7835 default: 7836 return (EDOOFUS); 7837 } 7838 7839 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 7840 7841 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 7842} 7843 7844static int 7845sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 7846{ 7847 struct adapter *sc = arg1; 7848 u_int res, dack_re, v; 7849 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7850 7851 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 7852 dack_re = G_DELAYEDACKRESOLUTION(res); 7853 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 7854 7855 return (sysctl_handle_int(oidp, &v, 0, req)); 7856} 7857 7858static int 7859sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 7860{ 7861 struct adapter *sc = arg1; 7862 int reg = arg2; 7863 u_int tre; 7864 u_long tp_tick_us, v; 7865 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 7866 7867 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 7868 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 7869 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || 7870 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); 7871 7872 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 7873 tp_tick_us = (cclk_ps << tre) / 1000000; 7874 7875 if (reg == A_TP_INIT_SRTT) 7876 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 7877 else 7878 v = tp_tick_us * t4_read_reg(sc, reg); 7879 7880 return (sysctl_handle_long(oidp, &v, 0, req)); 7881} 7882#endif 7883 7884static uint32_t 7885fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf) 7886{ 7887 uint32_t mode; 7888 7889 mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR | 7890 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT; 7891 7892 if (fconf & F_FRAGMENTATION) 7893 mode |= T4_FILTER_IP_FRAGMENT; 7894 7895 if (fconf & F_MPSHITTYPE) 7896 mode |= T4_FILTER_MPS_HIT_TYPE; 7897 7898 if (fconf & F_MACMATCH) 7899 mode |= T4_FILTER_MAC_IDX; 7900 7901 if (fconf & F_ETHERTYPE) 7902 mode |= T4_FILTER_ETH_TYPE; 7903 7904 if (fconf & F_PROTOCOL) 7905 mode |= T4_FILTER_IP_PROTO; 7906 7907 if (fconf & F_TOS) 7908 mode |= T4_FILTER_IP_TOS; 7909 7910 if (fconf & F_VLAN) 7911 mode |= T4_FILTER_VLAN; 7912 7913 if (fconf & F_VNIC_ID) { 7914 mode |= T4_FILTER_VNIC; 7915 if (iconf & F_VNIC) 7916 mode |= T4_FILTER_IC_VNIC; 7917 } 7918 7919 if (fconf & F_PORT) 7920 mode |= T4_FILTER_PORT; 7921 7922 if (fconf & F_FCOE) 7923 mode |= T4_FILTER_FCoE; 7924 7925 return (mode); 7926} 7927 7928static uint32_t 7929mode_to_fconf(uint32_t mode) 7930{ 7931 uint32_t fconf = 0; 7932 7933 if (mode & T4_FILTER_IP_FRAGMENT) 7934 fconf |= F_FRAGMENTATION; 7935 7936 if (mode & T4_FILTER_MPS_HIT_TYPE) 7937 fconf |= F_MPSHITTYPE; 7938 7939 if (mode & T4_FILTER_MAC_IDX) 7940 fconf |= F_MACMATCH; 7941 7942 if (mode & T4_FILTER_ETH_TYPE) 7943 fconf |= F_ETHERTYPE; 7944 7945 if (mode & T4_FILTER_IP_PROTO) 7946 fconf |= F_PROTOCOL; 7947 7948 if (mode & T4_FILTER_IP_TOS) 7949 fconf |= F_TOS; 7950 7951 if (mode & T4_FILTER_VLAN) 7952 fconf |= F_VLAN; 7953 7954 if (mode & T4_FILTER_VNIC) 7955 fconf |= F_VNIC_ID; 7956 7957 if (mode & T4_FILTER_PORT) 7958 fconf |= F_PORT; 7959 7960 if (mode & T4_FILTER_FCoE) 7961 fconf |= F_FCOE; 7962 7963 return (fconf); 7964} 7965 7966static uint32_t 7967mode_to_iconf(uint32_t mode) 7968{ 7969 7970 if (mode & T4_FILTER_IC_VNIC) 7971 return (F_VNIC); 7972 return (0); 7973} 7974 7975static int check_fspec_against_fconf_iconf(struct adapter *sc, 7976 struct t4_filter_specification *fs) 7977{ 7978 struct tp_params *tpp = &sc->params.tp; 7979 uint32_t fconf = 0; 7980 7981 if (fs->val.frag || fs->mask.frag) 7982 fconf |= F_FRAGMENTATION; 7983 7984 if (fs->val.matchtype || fs->mask.matchtype) 7985 fconf |= F_MPSHITTYPE; 7986 7987 if (fs->val.macidx || fs->mask.macidx) 7988 fconf |= F_MACMATCH; 7989 7990 if (fs->val.ethtype || fs->mask.ethtype) 7991 fconf |= F_ETHERTYPE; 7992 7993 if (fs->val.proto || fs->mask.proto) 7994 fconf |= F_PROTOCOL; 7995 7996 if (fs->val.tos || fs->mask.tos) 7997 fconf |= F_TOS; 7998 7999 if (fs->val.vlan_vld || fs->mask.vlan_vld) 8000 fconf |= F_VLAN; 8001 8002 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) { 8003 fconf |= F_VNIC_ID; 8004 if (tpp->ingress_config & F_VNIC) 8005 return (EINVAL); 8006 } 8007 8008 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) { 8009 fconf |= F_VNIC_ID; 8010 if ((tpp->ingress_config & F_VNIC) == 0) 8011 return (EINVAL); 8012 } 8013 8014 if (fs->val.iport || fs->mask.iport) 8015 fconf |= F_PORT; 8016 8017 if (fs->val.fcoe || fs->mask.fcoe) 8018 fconf |= F_FCOE; 8019 8020 if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map) 8021 return (E2BIG); 8022 8023 return (0); 8024} 8025 8026static int 8027get_filter_mode(struct adapter *sc, uint32_t *mode) 8028{ 8029 struct tp_params *tpp = &sc->params.tp; 8030 8031 /* 8032 * We trust the cached values of the relevant TP registers. This means 8033 * things work reliably only if writes to those registers are always via 8034 * t4_set_filter_mode. 8035 */ 8036 *mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config); 8037 8038 return (0); 8039} 8040 8041static int 8042set_filter_mode(struct adapter *sc, uint32_t mode) 8043{ 8044 struct tp_params *tpp = &sc->params.tp; 8045 uint32_t fconf, iconf; 8046 int rc; 8047 8048 iconf = mode_to_iconf(mode); 8049 if ((iconf ^ tpp->ingress_config) & F_VNIC) { 8050 /* 8051 * For now we just complain if A_TP_INGRESS_CONFIG is not 8052 * already set to the correct value for the requested filter 8053 * mode. It's not clear if it's safe to write to this register 8054 * on the fly. (And we trust the cached value of the register). 8055 */ 8056 return (EBUSY); 8057 } 8058 8059 fconf = mode_to_fconf(mode); 8060 8061 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8062 "t4setfm"); 8063 if (rc) 8064 return (rc); 8065 8066 if (sc->tids.ftids_in_use > 0) { 8067 rc = EBUSY; 8068 goto done; 8069 } 8070 8071#ifdef TCP_OFFLOAD 8072 if (uld_active(sc, ULD_TOM)) { 8073 rc = EBUSY; 8074 goto done; 8075 } 8076#endif 8077 8078 rc = -t4_set_filter_mode(sc, fconf); 8079done: 8080 end_synchronized_op(sc, LOCK_HELD); 8081 return (rc); 8082} 8083 8084static inline uint64_t 8085get_filter_hits(struct adapter *sc, uint32_t fid) 8086{ 8087 uint32_t tcb_addr; 8088 8089 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + 8090 (fid + sc->tids.ftid_base) * TCB_SIZE; 8091 8092 if (is_t4(sc)) { 8093 uint64_t hits; 8094 8095 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8); 8096 return (be64toh(hits)); 8097 } else { 8098 uint32_t hits; 8099 8100 read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4); 8101 return (be32toh(hits)); 8102 } 8103} 8104 8105static int 8106get_filter(struct adapter *sc, struct t4_filter *t) 8107{ 8108 int i, rc, nfilters = sc->tids.nftids; 8109 struct filter_entry *f; 8110 8111 rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK, 8112 "t4getf"); 8113 if (rc) 8114 return (rc); 8115 8116 if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL || 8117 t->idx >= nfilters) { 8118 t->idx = 0xffffffff; 8119 goto done; 8120 } 8121 8122 f = &sc->tids.ftid_tab[t->idx]; 8123 for (i = t->idx; i < nfilters; i++, f++) { 8124 if (f->valid) { 8125 t->idx = i; 8126 t->l2tidx = f->l2t ? f->l2t->idx : 0; 8127 t->smtidx = f->smtidx; 8128 if (f->fs.hitcnts) 8129 t->hits = get_filter_hits(sc, t->idx); 8130 else 8131 t->hits = UINT64_MAX; 8132 t->fs = f->fs; 8133 8134 goto done; 8135 } 8136 } 8137 8138 t->idx = 0xffffffff; 8139done: 8140 end_synchronized_op(sc, LOCK_HELD); 8141 return (0); 8142} 8143 8144static int 8145set_filter(struct adapter *sc, struct t4_filter *t) 8146{ 8147 unsigned int nfilters, nports; 8148 struct filter_entry *f; 8149 int i, rc; 8150 8151 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf"); 8152 if (rc) 8153 return (rc); 8154 8155 nfilters = sc->tids.nftids; 8156 nports = sc->params.nports; 8157 8158 if (nfilters == 0) { 8159 rc = ENOTSUP; 8160 goto done; 8161 } 8162 8163 if (t->idx >= nfilters) { 8164 rc = EINVAL; 8165 goto done; 8166 } 8167 8168 /* Validate against the global filter mode and ingress config */ 8169 rc = check_fspec_against_fconf_iconf(sc, &t->fs); 8170 if (rc != 0) 8171 goto done; 8172 8173 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) { 8174 rc = EINVAL; 8175 goto done; 8176 } 8177 8178 if (t->fs.val.iport >= nports) { 8179 rc = EINVAL; 8180 goto done; 8181 } 8182 8183 /* Can't specify an iq if not steering to it */ 8184 if (!t->fs.dirsteer && t->fs.iq) { 8185 rc = EINVAL; 8186 goto done; 8187 } 8188 8189 /* IPv6 filter idx must be 4 aligned */ 8190 if (t->fs.type == 1 && 8191 ((t->idx & 0x3) || t->idx + 4 >= nfilters)) { 8192 rc = EINVAL; 8193 goto done; 8194 } 8195 8196 if (!(sc->flags & FULL_INIT_DONE) && 8197 ((rc = adapter_full_init(sc)) != 0)) 8198 goto done; 8199 8200 if (sc->tids.ftid_tab == NULL) { 8201 KASSERT(sc->tids.ftids_in_use == 0, 8202 ("%s: no memory allocated but filters_in_use > 0", 8203 __func__)); 8204 8205 sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) * 8206 nfilters, M_CXGBE, M_NOWAIT | M_ZERO); 8207 if (sc->tids.ftid_tab == NULL) { 8208 rc = ENOMEM; 8209 goto done; 8210 } 8211 mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF); 8212 } 8213 8214 for (i = 0; i < 4; i++) { 8215 f = &sc->tids.ftid_tab[t->idx + i]; 8216 8217 if (f->pending || f->valid) { 8218 rc = EBUSY; 8219 goto done; 8220 } 8221 if (f->locked) { 8222 rc = EPERM; 8223 goto done; 8224 } 8225 8226 if (t->fs.type == 0) 8227 break; 8228 } 8229 8230 f = &sc->tids.ftid_tab[t->idx]; 8231 f->fs = t->fs; 8232 8233 rc = set_filter_wr(sc, t->idx); 8234done: 8235 end_synchronized_op(sc, 0); 8236 8237 if (rc == 0) { 8238 mtx_lock(&sc->tids.ftid_lock); 8239 for (;;) { 8240 if (f->pending == 0) { 8241 rc = f->valid ? 0 : EIO; 8242 break; 8243 } 8244 8245 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8246 PCATCH, "t4setfw", 0)) { 8247 rc = EINPROGRESS; 8248 break; 8249 } 8250 } 8251 mtx_unlock(&sc->tids.ftid_lock); 8252 } 8253 return (rc); 8254} 8255 8256static int 8257del_filter(struct adapter *sc, struct t4_filter *t) 8258{ 8259 unsigned int nfilters; 8260 struct filter_entry *f; 8261 int rc; 8262 8263 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf"); 8264 if (rc) 8265 return (rc); 8266 8267 nfilters = sc->tids.nftids; 8268 8269 if (nfilters == 0) { 8270 rc = ENOTSUP; 8271 goto done; 8272 } 8273 8274 if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 || 8275 t->idx >= nfilters) { 8276 rc = EINVAL; 8277 goto done; 8278 } 8279 8280 if (!(sc->flags & FULL_INIT_DONE)) { 8281 rc = EAGAIN; 8282 goto done; 8283 } 8284 8285 f = &sc->tids.ftid_tab[t->idx]; 8286 8287 if (f->pending) { 8288 rc = EBUSY; 8289 goto done; 8290 } 8291 if (f->locked) { 8292 rc = EPERM; 8293 goto done; 8294 } 8295 8296 if (f->valid) { 8297 t->fs = f->fs; /* extra info for the caller */ 8298 rc = del_filter_wr(sc, t->idx); 8299 } 8300 8301done: 8302 end_synchronized_op(sc, 0); 8303 8304 if (rc == 0) { 8305 mtx_lock(&sc->tids.ftid_lock); 8306 for (;;) { 8307 if (f->pending == 0) { 8308 rc = f->valid ? EIO : 0; 8309 break; 8310 } 8311 8312 if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock, 8313 PCATCH, "t4delfw", 0)) { 8314 rc = EINPROGRESS; 8315 break; 8316 } 8317 } 8318 mtx_unlock(&sc->tids.ftid_lock); 8319 } 8320 8321 return (rc); 8322} 8323 8324static void 8325clear_filter(struct filter_entry *f) 8326{ 8327 if (f->l2t) 8328 t4_l2t_release(f->l2t); 8329 8330 bzero(f, sizeof (*f)); 8331} 8332 8333static int 8334set_filter_wr(struct adapter *sc, int fidx) 8335{ 8336 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8337 struct fw_filter_wr *fwr; 8338 unsigned int ftid, vnic_vld, vnic_vld_mask; 8339 struct wrq_cookie cookie; 8340 8341 ASSERT_SYNCHRONIZED_OP(sc); 8342 8343 if (f->fs.newdmac || f->fs.newvlan) { 8344 /* This filter needs an L2T entry; allocate one. */ 8345 f->l2t = t4_l2t_alloc_switching(sc->l2t); 8346 if (f->l2t == NULL) 8347 return (EAGAIN); 8348 if (t4_l2t_set_switching(sc, f->l2t, f->fs.vlan, f->fs.eport, 8349 f->fs.dmac)) { 8350 t4_l2t_release(f->l2t); 8351 f->l2t = NULL; 8352 return (ENOMEM); 8353 } 8354 } 8355 8356 /* Already validated against fconf, iconf */ 8357 MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0); 8358 MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0); 8359 if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld) 8360 vnic_vld = 1; 8361 else 8362 vnic_vld = 0; 8363 if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld) 8364 vnic_vld_mask = 1; 8365 else 8366 vnic_vld_mask = 0; 8367 8368 ftid = sc->tids.ftid_base + fidx; 8369 8370 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8371 if (fwr == NULL) 8372 return (ENOMEM); 8373 bzero(fwr, sizeof(*fwr)); 8374 8375 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR)); 8376 fwr->len16_pkd = htobe32(FW_LEN16(*fwr)); 8377 fwr->tid_to_iq = 8378 htobe32(V_FW_FILTER_WR_TID(ftid) | 8379 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 8380 V_FW_FILTER_WR_NOREPLY(0) | 8381 V_FW_FILTER_WR_IQ(f->fs.iq)); 8382 fwr->del_filter_to_l2tix = 8383 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 8384 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 8385 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 8386 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 8387 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 8388 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 8389 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 8390 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 8391 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 8392 f->fs.newvlan == VLAN_REWRITE) | 8393 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 8394 f->fs.newvlan == VLAN_REWRITE) | 8395 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 8396 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 8397 V_FW_FILTER_WR_PRIO(f->fs.prio) | 8398 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 8399 fwr->ethtype = htobe16(f->fs.val.ethtype); 8400 fwr->ethtypem = htobe16(f->fs.mask.ethtype); 8401 fwr->frag_to_ovlan_vldm = 8402 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 8403 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 8404 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) | 8405 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) | 8406 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) | 8407 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask)); 8408 fwr->smac_sel = 0; 8409 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) | 8410 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id)); 8411 fwr->maci_to_matchtypem = 8412 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 8413 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 8414 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 8415 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 8416 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 8417 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 8418 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 8419 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 8420 fwr->ptcl = f->fs.val.proto; 8421 fwr->ptclm = f->fs.mask.proto; 8422 fwr->ttyp = f->fs.val.tos; 8423 fwr->ttypm = f->fs.mask.tos; 8424 fwr->ivlan = htobe16(f->fs.val.vlan); 8425 fwr->ivlanm = htobe16(f->fs.mask.vlan); 8426 fwr->ovlan = htobe16(f->fs.val.vnic); 8427 fwr->ovlanm = htobe16(f->fs.mask.vnic); 8428 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip)); 8429 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm)); 8430 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip)); 8431 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm)); 8432 fwr->lp = htobe16(f->fs.val.dport); 8433 fwr->lpm = htobe16(f->fs.mask.dport); 8434 fwr->fp = htobe16(f->fs.val.sport); 8435 fwr->fpm = htobe16(f->fs.mask.sport); 8436 if (f->fs.newsmac) 8437 bcopy(f->fs.smac, fwr->sma, sizeof (fwr->sma)); 8438 8439 f->pending = 1; 8440 sc->tids.ftids_in_use++; 8441 8442 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8443 return (0); 8444} 8445 8446static int 8447del_filter_wr(struct adapter *sc, int fidx) 8448{ 8449 struct filter_entry *f = &sc->tids.ftid_tab[fidx]; 8450 struct fw_filter_wr *fwr; 8451 unsigned int ftid; 8452 struct wrq_cookie cookie; 8453 8454 ftid = sc->tids.ftid_base + fidx; 8455 8456 fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie); 8457 if (fwr == NULL) 8458 return (ENOMEM); 8459 bzero(fwr, sizeof (*fwr)); 8460 8461 t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id); 8462 8463 f->pending = 1; 8464 commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie); 8465 return (0); 8466} 8467 8468int 8469t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8470{ 8471 struct adapter *sc = iq->adapter; 8472 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1); 8473 unsigned int idx = GET_TID(rpl); 8474 unsigned int rc; 8475 struct filter_entry *f; 8476 8477 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 8478 rss->opcode)); 8479 MPASS(iq == &sc->sge.fwq); 8480 MPASS(is_ftid(sc, idx)); 8481 8482 idx -= sc->tids.ftid_base; 8483 f = &sc->tids.ftid_tab[idx]; 8484 rc = G_COOKIE(rpl->cookie); 8485 8486 mtx_lock(&sc->tids.ftid_lock); 8487 if (rc == FW_FILTER_WR_FLT_ADDED) { 8488 KASSERT(f->pending, ("%s: filter[%u] isn't pending.", 8489 __func__, idx)); 8490 f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff; 8491 f->pending = 0; /* asynchronous setup completed */ 8492 f->valid = 1; 8493 } else { 8494 if (rc != FW_FILTER_WR_FLT_DELETED) { 8495 /* Add or delete failed, display an error */ 8496 log(LOG_ERR, 8497 "filter %u setup failed with error %u\n", 8498 idx, rc); 8499 } 8500 8501 clear_filter(f); 8502 sc->tids.ftids_in_use--; 8503 } 8504 wakeup(&sc->tids.ftid_tab); 8505 mtx_unlock(&sc->tids.ftid_lock); 8506 8507 return (0); 8508} 8509 8510static int 8511set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8512{ 8513 8514 MPASS(iq->set_tcb_rpl != NULL); 8515 return (iq->set_tcb_rpl(iq, rss, m)); 8516} 8517 8518static int 8519l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 8520{ 8521 8522 MPASS(iq->l2t_write_rpl != NULL); 8523 return (iq->l2t_write_rpl(iq, rss, m)); 8524} 8525 8526static int 8527get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 8528{ 8529 int rc; 8530 8531 if (cntxt->cid > M_CTXTQID) 8532 return (EINVAL); 8533 8534 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 8535 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 8536 return (EINVAL); 8537 8538 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 8539 if (rc) 8540 return (rc); 8541 8542 if (sc->flags & FW_OK) { 8543 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 8544 &cntxt->data[0]); 8545 if (rc == 0) 8546 goto done; 8547 } 8548 8549 /* 8550 * Read via firmware failed or wasn't even attempted. Read directly via 8551 * the backdoor. 8552 */ 8553 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 8554done: 8555 end_synchronized_op(sc, 0); 8556 return (rc); 8557} 8558 8559static int 8560load_fw(struct adapter *sc, struct t4_data *fw) 8561{ 8562 int rc; 8563 uint8_t *fw_data; 8564 8565 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 8566 if (rc) 8567 return (rc); 8568 8569 if (sc->flags & FULL_INIT_DONE) { 8570 rc = EBUSY; 8571 goto done; 8572 } 8573 8574 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 8575 if (fw_data == NULL) { 8576 rc = ENOMEM; 8577 goto done; 8578 } 8579 8580 rc = copyin(fw->data, fw_data, fw->len); 8581 if (rc == 0) 8582 rc = -t4_load_fw(sc, fw_data, fw->len); 8583 8584 free(fw_data, M_CXGBE); 8585done: 8586 end_synchronized_op(sc, 0); 8587 return (rc); 8588} 8589 8590static int 8591load_cfg(struct adapter *sc, struct t4_data *cfg) 8592{ 8593 int rc; 8594 uint8_t *cfg_data = NULL; 8595 8596 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 8597 if (rc) 8598 return (rc); 8599 8600 if (cfg->len == 0) { 8601 /* clear */ 8602 rc = -t4_load_cfg(sc, NULL, 0); 8603 goto done; 8604 } 8605 8606 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); 8607 if (cfg_data == NULL) { 8608 rc = ENOMEM; 8609 goto done; 8610 } 8611 8612 rc = copyin(cfg->data, cfg_data, cfg->len); 8613 if (rc == 0) 8614 rc = -t4_load_cfg(sc, cfg_data, cfg->len); 8615 8616 free(cfg_data, M_CXGBE); 8617done: 8618 end_synchronized_op(sc, 0); 8619 return (rc); 8620} 8621 8622#define MAX_READ_BUF_SIZE (128 * 1024) 8623static int 8624read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 8625{ 8626 uint32_t addr, remaining, n; 8627 uint32_t *buf; 8628 int rc; 8629 uint8_t *dst; 8630 8631 rc = validate_mem_range(sc, mr->addr, mr->len); 8632 if (rc != 0) 8633 return (rc); 8634 8635 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 8636 addr = mr->addr; 8637 remaining = mr->len; 8638 dst = (void *)mr->data; 8639 8640 while (remaining) { 8641 n = min(remaining, MAX_READ_BUF_SIZE); 8642 read_via_memwin(sc, 2, addr, buf, n); 8643 8644 rc = copyout(buf, dst, n); 8645 if (rc != 0) 8646 break; 8647 8648 dst += n; 8649 remaining -= n; 8650 addr += n; 8651 } 8652 8653 free(buf, M_CXGBE); 8654 return (rc); 8655} 8656#undef MAX_READ_BUF_SIZE 8657 8658static int 8659read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 8660{ 8661 int rc; 8662 8663 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 8664 return (EINVAL); 8665 8666 if (i2cd->len > sizeof(i2cd->data)) 8667 return (EFBIG); 8668 8669 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 8670 if (rc) 8671 return (rc); 8672 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 8673 i2cd->offset, i2cd->len, &i2cd->data[0]); 8674 end_synchronized_op(sc, 0); 8675 8676 return (rc); 8677} 8678 8679static int 8680in_range(int val, int lo, int hi) 8681{ 8682 8683 return (val < 0 || (val <= hi && val >= lo)); 8684} 8685 8686static int 8687set_sched_class_config(struct adapter *sc, int minmax) 8688{ 8689 int rc; 8690 8691 if (minmax < 0) 8692 return (EINVAL); 8693 8694 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc"); 8695 if (rc) 8696 return (rc); 8697 rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1); 8698 end_synchronized_op(sc, 0); 8699 8700 return (rc); 8701} 8702 8703static int 8704set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p, 8705 int sleep_ok) 8706{ 8707 int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode; 8708 struct port_info *pi; 8709 struct tx_sched_class *tc; 8710 8711 if (p->level == SCHED_CLASS_LEVEL_CL_RL) 8712 fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL; 8713 else if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8714 fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR; 8715 else if (p->level == SCHED_CLASS_LEVEL_CH_RL) 8716 fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL; 8717 else 8718 return (EINVAL); 8719 8720 if (p->mode == SCHED_CLASS_MODE_CLASS) 8721 fw_mode = FW_SCHED_PARAMS_MODE_CLASS; 8722 else if (p->mode == SCHED_CLASS_MODE_FLOW) 8723 fw_mode = FW_SCHED_PARAMS_MODE_FLOW; 8724 else 8725 return (EINVAL); 8726 8727 if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS) 8728 fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE; 8729 else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS) 8730 fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE; 8731 else 8732 return (EINVAL); 8733 8734 if (p->ratemode == SCHED_CLASS_RATEMODE_REL) 8735 fw_ratemode = FW_SCHED_PARAMS_RATE_REL; 8736 else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS) 8737 fw_ratemode = FW_SCHED_PARAMS_RATE_ABS; 8738 else 8739 return (EINVAL); 8740 8741 /* Vet our parameters ... */ 8742 if (!in_range(p->channel, 0, sc->chip_params->nchan - 1)) 8743 return (ERANGE); 8744 8745 pi = sc->port[sc->chan_map[p->channel]]; 8746 if (pi == NULL) 8747 return (ENXIO); 8748 MPASS(pi->tx_chan == p->channel); 8749 top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */ 8750 8751 if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) || 8752 !in_range(p->minrate, 0, top_speed) || 8753 !in_range(p->maxrate, 0, top_speed) || 8754 !in_range(p->weight, 0, 100)) 8755 return (ERANGE); 8756 8757 /* 8758 * Translate any unset parameters into the firmware's 8759 * nomenclature and/or fail the call if the parameters 8760 * are required ... 8761 */ 8762 if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0) 8763 return (EINVAL); 8764 8765 if (p->minrate < 0) 8766 p->minrate = 0; 8767 if (p->maxrate < 0) { 8768 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8769 p->level == SCHED_CLASS_LEVEL_CH_RL) 8770 return (EINVAL); 8771 else 8772 p->maxrate = 0; 8773 } 8774 if (p->weight < 0) { 8775 if (p->level == SCHED_CLASS_LEVEL_CL_WRR) 8776 return (EINVAL); 8777 else 8778 p->weight = 0; 8779 } 8780 if (p->pktsize < 0) { 8781 if (p->level == SCHED_CLASS_LEVEL_CL_RL || 8782 p->level == SCHED_CLASS_LEVEL_CH_RL) 8783 return (EINVAL); 8784 else 8785 p->pktsize = 0; 8786 } 8787 8788 rc = begin_synchronized_op(sc, NULL, 8789 sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp"); 8790 if (rc) 8791 return (rc); 8792 tc = &pi->tc[p->cl]; 8793 tc->params = *p; 8794 rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode, 8795 fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate, 8796 p->weight, p->pktsize, sleep_ok); 8797 if (rc == 0) 8798 tc->flags |= TX_SC_OK; 8799 else { 8800 /* 8801 * Unknown state at this point, see tc->params for what was 8802 * attempted. 8803 */ 8804 tc->flags &= ~TX_SC_OK; 8805 } 8806 end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD); 8807 8808 return (rc); 8809} 8810 8811int 8812t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p) 8813{ 8814 8815 if (p->type != SCHED_CLASS_TYPE_PACKET) 8816 return (EINVAL); 8817 8818 if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG) 8819 return (set_sched_class_config(sc, p->u.config.minmax)); 8820 8821 if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS) 8822 return (set_sched_class_params(sc, &p->u.params, 1)); 8823 8824 return (EINVAL); 8825} 8826 8827int 8828t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p) 8829{ 8830 struct port_info *pi = NULL; 8831 struct vi_info *vi; 8832 struct sge_txq *txq; 8833 uint32_t fw_mnem, fw_queue, fw_class; 8834 int i, rc; 8835 8836 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq"); 8837 if (rc) 8838 return (rc); 8839 8840 if (p->port >= sc->params.nports) { 8841 rc = EINVAL; 8842 goto done; 8843 } 8844 8845 /* XXX: Only supported for the main VI. */ 8846 pi = sc->port[p->port]; 8847 vi = &pi->vi[0]; 8848 if (!(vi->flags & VI_INIT_DONE)) { 8849 /* tx queues not set up yet */ 8850 rc = EAGAIN; 8851 goto done; 8852 } 8853 8854 if (!in_range(p->queue, 0, vi->ntxq - 1) || 8855 !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) { 8856 rc = EINVAL; 8857 goto done; 8858 } 8859 8860 /* 8861 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX 8862 * Scheduling Class in this case). 8863 */ 8864 fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 8865 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH)); 8866 fw_class = p->cl < 0 ? 0xffffffff : p->cl; 8867 8868 /* 8869 * If op.queue is non-negative, then we're only changing the scheduling 8870 * on a single specified TX queue. 8871 */ 8872 if (p->queue >= 0) { 8873 txq = &sc->sge.txq[vi->first_txq + p->queue]; 8874 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8875 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8876 &fw_class); 8877 goto done; 8878 } 8879 8880 /* 8881 * Change the scheduling on all the TX queues for the 8882 * interface. 8883 */ 8884 for_each_txq(vi, i, txq) { 8885 fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id)); 8886 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, 8887 &fw_class); 8888 if (rc) 8889 goto done; 8890 } 8891 8892 rc = 0; 8893done: 8894 end_synchronized_op(sc, 0); 8895 return (rc); 8896} 8897 8898int 8899t4_os_find_pci_capability(struct adapter *sc, int cap) 8900{ 8901 int i; 8902 8903 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 8904} 8905 8906int 8907t4_os_pci_save_state(struct adapter *sc) 8908{ 8909 device_t dev; 8910 struct pci_devinfo *dinfo; 8911 8912 dev = sc->dev; 8913 dinfo = device_get_ivars(dev); 8914 8915 pci_cfg_save(dev, dinfo, 0); 8916 return (0); 8917} 8918 8919int 8920t4_os_pci_restore_state(struct adapter *sc) 8921{ 8922 device_t dev; 8923 struct pci_devinfo *dinfo; 8924 8925 dev = sc->dev; 8926 dinfo = device_get_ivars(dev); 8927 8928 pci_cfg_restore(dev, dinfo); 8929 return (0); 8930} 8931 8932void 8933t4_os_portmod_changed(const struct adapter *sc, int idx) 8934{ 8935 struct port_info *pi = sc->port[idx]; 8936 struct vi_info *vi; 8937 struct ifnet *ifp; 8938 int v; 8939 static const char *mod_str[] = { 8940 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 8941 }; 8942 8943 for_each_vi(pi, v, vi) { 8944 build_medialist(pi, &vi->media); 8945 } 8946 8947 ifp = pi->vi[0].ifp; 8948 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 8949 if_printf(ifp, "transceiver unplugged.\n"); 8950 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 8951 if_printf(ifp, "unknown transceiver inserted.\n"); 8952 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 8953 if_printf(ifp, "unsupported transceiver inserted.\n"); 8954 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 8955 if_printf(ifp, "%s transceiver inserted.\n", 8956 mod_str[pi->mod_type]); 8957 } else { 8958 if_printf(ifp, "transceiver (type %d) inserted.\n", 8959 pi->mod_type); 8960 } 8961} 8962 8963void 8964t4_os_link_changed(struct adapter *sc, int idx, int link_stat) 8965{ 8966 struct port_info *pi = sc->port[idx]; 8967 struct vi_info *vi; 8968 struct ifnet *ifp; 8969 int v; 8970 8971 for_each_vi(pi, v, vi) { 8972 ifp = vi->ifp; 8973 if (ifp == NULL) 8974 continue; 8975 8976 if (link_stat) { 8977 ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed); 8978 if_link_state_change(ifp, LINK_STATE_UP); 8979 } else { 8980 if_link_state_change(ifp, LINK_STATE_DOWN); 8981 } 8982 } 8983} 8984 8985void 8986t4_iterate(void (*func)(struct adapter *, void *), void *arg) 8987{ 8988 struct adapter *sc; 8989 8990 sx_slock(&t4_list_lock); 8991 SLIST_FOREACH(sc, &t4_list, link) { 8992 /* 8993 * func should not make any assumptions about what state sc is 8994 * in - the only guarantee is that sc->sc_lock is a valid lock. 8995 */ 8996 func(sc, arg); 8997 } 8998 sx_sunlock(&t4_list_lock); 8999} 9000 9001static int 9002t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 9003 struct thread *td) 9004{ 9005 int rc; 9006 struct adapter *sc = dev->si_drv1; 9007 9008 rc = priv_check(td, PRIV_DRIVER); 9009 if (rc != 0) 9010 return (rc); 9011 9012 switch (cmd) { 9013 case CHELSIO_T4_GETREG: { 9014 struct t4_reg *edata = (struct t4_reg *)data; 9015 9016 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9017 return (EFAULT); 9018 9019 if (edata->size == 4) 9020 edata->val = t4_read_reg(sc, edata->addr); 9021 else if (edata->size == 8) 9022 edata->val = t4_read_reg64(sc, edata->addr); 9023 else 9024 return (EINVAL); 9025 9026 break; 9027 } 9028 case CHELSIO_T4_SETREG: { 9029 struct t4_reg *edata = (struct t4_reg *)data; 9030 9031 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 9032 return (EFAULT); 9033 9034 if (edata->size == 4) { 9035 if (edata->val & 0xffffffff00000000) 9036 return (EINVAL); 9037 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 9038 } else if (edata->size == 8) 9039 t4_write_reg64(sc, edata->addr, edata->val); 9040 else 9041 return (EINVAL); 9042 break; 9043 } 9044 case CHELSIO_T4_REGDUMP: { 9045 struct t4_regdump *regs = (struct t4_regdump *)data; 9046 int reglen = t4_get_regs_len(sc); 9047 uint8_t *buf; 9048 9049 if (regs->len < reglen) { 9050 regs->len = reglen; /* hint to the caller */ 9051 return (ENOBUFS); 9052 } 9053 9054 regs->len = reglen; 9055 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 9056 get_regs(sc, regs, buf); 9057 rc = copyout(buf, regs->data, reglen); 9058 free(buf, M_CXGBE); 9059 break; 9060 } 9061 case CHELSIO_T4_GET_FILTER_MODE: 9062 rc = get_filter_mode(sc, (uint32_t *)data); 9063 break; 9064 case CHELSIO_T4_SET_FILTER_MODE: 9065 rc = set_filter_mode(sc, *(uint32_t *)data); 9066 break; 9067 case CHELSIO_T4_GET_FILTER: 9068 rc = get_filter(sc, (struct t4_filter *)data); 9069 break; 9070 case CHELSIO_T4_SET_FILTER: 9071 rc = set_filter(sc, (struct t4_filter *)data); 9072 break; 9073 case CHELSIO_T4_DEL_FILTER: 9074 rc = del_filter(sc, (struct t4_filter *)data); 9075 break; 9076 case CHELSIO_T4_GET_SGE_CONTEXT: 9077 rc = get_sge_context(sc, (struct t4_sge_context *)data); 9078 break; 9079 case CHELSIO_T4_LOAD_FW: 9080 rc = load_fw(sc, (struct t4_data *)data); 9081 break; 9082 case CHELSIO_T4_GET_MEM: 9083 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 9084 break; 9085 case CHELSIO_T4_GET_I2C: 9086 rc = read_i2c(sc, (struct t4_i2c_data *)data); 9087 break; 9088 case CHELSIO_T4_CLEAR_STATS: { 9089 int i, v; 9090 u_int port_id = *(uint32_t *)data; 9091 struct port_info *pi; 9092 struct vi_info *vi; 9093 9094 if (port_id >= sc->params.nports) 9095 return (EINVAL); 9096 pi = sc->port[port_id]; 9097 if (pi == NULL) 9098 return (EIO); 9099 9100 /* MAC stats */ 9101 t4_clr_port_stats(sc, pi->tx_chan); 9102 pi->tx_parse_error = 0; 9103 mtx_lock(&sc->reg_lock); 9104 for_each_vi(pi, v, vi) { 9105 if (vi->flags & VI_INIT_DONE) 9106 t4_clr_vi_stats(sc, vi->viid); 9107 } 9108 mtx_unlock(&sc->reg_lock); 9109 9110 /* 9111 * Since this command accepts a port, clear stats for 9112 * all VIs on this port. 9113 */ 9114 for_each_vi(pi, v, vi) { 9115 if (vi->flags & VI_INIT_DONE) { 9116 struct sge_rxq *rxq; 9117 struct sge_txq *txq; 9118 struct sge_wrq *wrq; 9119 9120 for_each_rxq(vi, i, rxq) { 9121#if defined(INET) || defined(INET6) 9122 rxq->lro.lro_queued = 0; 9123 rxq->lro.lro_flushed = 0; 9124#endif 9125 rxq->rxcsum = 0; 9126 rxq->vlan_extraction = 0; 9127 } 9128 9129 for_each_txq(vi, i, txq) { 9130 txq->txcsum = 0; 9131 txq->tso_wrs = 0; 9132 txq->vlan_insertion = 0; 9133 txq->imm_wrs = 0; 9134 txq->sgl_wrs = 0; 9135 txq->txpkt_wrs = 0; 9136 txq->txpkts0_wrs = 0; 9137 txq->txpkts1_wrs = 0; 9138 txq->txpkts0_pkts = 0; 9139 txq->txpkts1_pkts = 0; 9140 mp_ring_reset_stats(txq->r); 9141 } 9142 9143#ifdef TCP_OFFLOAD 9144 /* nothing to clear for each ofld_rxq */ 9145 9146 for_each_ofld_txq(vi, i, wrq) { 9147 wrq->tx_wrs_direct = 0; 9148 wrq->tx_wrs_copied = 0; 9149 } 9150#endif 9151 9152 if (IS_MAIN_VI(vi)) { 9153 wrq = &sc->sge.ctrlq[pi->port_id]; 9154 wrq->tx_wrs_direct = 0; 9155 wrq->tx_wrs_copied = 0; 9156 } 9157 } 9158 } 9159 break; 9160 } 9161 case CHELSIO_T4_SCHED_CLASS: 9162 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 9163 break; 9164 case CHELSIO_T4_SCHED_QUEUE: 9165 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 9166 break; 9167 case CHELSIO_T4_GET_TRACER: 9168 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 9169 break; 9170 case CHELSIO_T4_SET_TRACER: 9171 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 9172 break; 9173 case CHELSIO_T4_LOAD_CFG: 9174 rc = load_cfg(sc, (struct t4_data *)data); 9175 break; 9176 default: 9177 rc = ENOTTY; 9178 } 9179 9180 return (rc); 9181} 9182 9183void 9184t4_db_full(struct adapter *sc) 9185{ 9186 9187 CXGBE_UNIMPLEMENTED(__func__); 9188} 9189 9190void 9191t4_db_dropped(struct adapter *sc) 9192{ 9193 9194 CXGBE_UNIMPLEMENTED(__func__); 9195} 9196 9197#ifdef TCP_OFFLOAD 9198void 9199t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order) 9200{ 9201 9202 t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask); 9203 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) | 9204 V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) | 9205 V_HPZ3(pgsz_order[3])); 9206} 9207 9208static int 9209toe_capability(struct vi_info *vi, int enable) 9210{ 9211 int rc; 9212 struct port_info *pi = vi->pi; 9213 struct adapter *sc = pi->adapter; 9214 9215 ASSERT_SYNCHRONIZED_OP(sc); 9216 9217 if (!is_offload(sc)) 9218 return (ENODEV); 9219 9220 if (enable) { 9221 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 9222 /* TOE is already enabled. */ 9223 return (0); 9224 } 9225 9226 /* 9227 * We need the port's queues around so that we're able to send 9228 * and receive CPLs to/from the TOE even if the ifnet for this 9229 * port has never been UP'd administratively. 9230 */ 9231 if (!(vi->flags & VI_INIT_DONE)) { 9232 rc = vi_full_init(vi); 9233 if (rc) 9234 return (rc); 9235 } 9236 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 9237 rc = vi_full_init(&pi->vi[0]); 9238 if (rc) 9239 return (rc); 9240 } 9241 9242 if (isset(&sc->offload_map, pi->port_id)) { 9243 /* TOE is enabled on another VI of this port. */ 9244 pi->uld_vis++; 9245 return (0); 9246 } 9247 9248 if (!uld_active(sc, ULD_TOM)) { 9249 rc = t4_activate_uld(sc, ULD_TOM); 9250 if (rc == EAGAIN) { 9251 log(LOG_WARNING, 9252 "You must kldload t4_tom.ko before trying " 9253 "to enable TOE on a cxgbe interface.\n"); 9254 } 9255 if (rc != 0) 9256 return (rc); 9257 KASSERT(sc->tom_softc != NULL, 9258 ("%s: TOM activated but softc NULL", __func__)); 9259 KASSERT(uld_active(sc, ULD_TOM), 9260 ("%s: TOM activated but flag not set", __func__)); 9261 } 9262 9263 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 9264 if (!uld_active(sc, ULD_IWARP)) 9265 (void) t4_activate_uld(sc, ULD_IWARP); 9266 if (!uld_active(sc, ULD_ISCSI)) 9267 (void) t4_activate_uld(sc, ULD_ISCSI); 9268 9269 pi->uld_vis++; 9270 setbit(&sc->offload_map, pi->port_id); 9271 } else { 9272 pi->uld_vis--; 9273 9274 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 9275 return (0); 9276 9277 KASSERT(uld_active(sc, ULD_TOM), 9278 ("%s: TOM never initialized?", __func__)); 9279 clrbit(&sc->offload_map, pi->port_id); 9280 } 9281 9282 return (0); 9283} 9284 9285/* 9286 * Add an upper layer driver to the global list. 9287 */ 9288int 9289t4_register_uld(struct uld_info *ui) 9290{ 9291 int rc = 0; 9292 struct uld_info *u; 9293 9294 sx_xlock(&t4_uld_list_lock); 9295 SLIST_FOREACH(u, &t4_uld_list, link) { 9296 if (u->uld_id == ui->uld_id) { 9297 rc = EEXIST; 9298 goto done; 9299 } 9300 } 9301 9302 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 9303 ui->refcount = 0; 9304done: 9305 sx_xunlock(&t4_uld_list_lock); 9306 return (rc); 9307} 9308 9309int 9310t4_unregister_uld(struct uld_info *ui) 9311{ 9312 int rc = EINVAL; 9313 struct uld_info *u; 9314 9315 sx_xlock(&t4_uld_list_lock); 9316 9317 SLIST_FOREACH(u, &t4_uld_list, link) { 9318 if (u == ui) { 9319 if (ui->refcount > 0) { 9320 rc = EBUSY; 9321 goto done; 9322 } 9323 9324 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 9325 rc = 0; 9326 goto done; 9327 } 9328 } 9329done: 9330 sx_xunlock(&t4_uld_list_lock); 9331 return (rc); 9332} 9333 9334int 9335t4_activate_uld(struct adapter *sc, int id) 9336{ 9337 int rc; 9338 struct uld_info *ui; 9339 9340 ASSERT_SYNCHRONIZED_OP(sc); 9341 9342 if (id < 0 || id > ULD_MAX) 9343 return (EINVAL); 9344 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 9345 9346 sx_slock(&t4_uld_list_lock); 9347 9348 SLIST_FOREACH(ui, &t4_uld_list, link) { 9349 if (ui->uld_id == id) { 9350 if (!(sc->flags & FULL_INIT_DONE)) { 9351 rc = adapter_full_init(sc); 9352 if (rc != 0) 9353 break; 9354 } 9355 9356 rc = ui->activate(sc); 9357 if (rc == 0) { 9358 setbit(&sc->active_ulds, id); 9359 ui->refcount++; 9360 } 9361 break; 9362 } 9363 } 9364 9365 sx_sunlock(&t4_uld_list_lock); 9366 9367 return (rc); 9368} 9369 9370int 9371t4_deactivate_uld(struct adapter *sc, int id) 9372{ 9373 int rc; 9374 struct uld_info *ui; 9375 9376 ASSERT_SYNCHRONIZED_OP(sc); 9377 9378 if (id < 0 || id > ULD_MAX) 9379 return (EINVAL); 9380 rc = ENXIO; 9381 9382 sx_slock(&t4_uld_list_lock); 9383 9384 SLIST_FOREACH(ui, &t4_uld_list, link) { 9385 if (ui->uld_id == id) { 9386 rc = ui->deactivate(sc); 9387 if (rc == 0) { 9388 clrbit(&sc->active_ulds, id); 9389 ui->refcount--; 9390 } 9391 break; 9392 } 9393 } 9394 9395 sx_sunlock(&t4_uld_list_lock); 9396 9397 return (rc); 9398} 9399 9400int 9401uld_active(struct adapter *sc, int uld_id) 9402{ 9403 9404 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 9405 9406 return (isset(&sc->active_ulds, uld_id)); 9407} 9408#endif 9409 9410/* 9411 * t = ptr to tunable. 9412 * nc = number of CPUs. 9413 * c = compiled in default for that tunable. 9414 */ 9415static void 9416calculate_nqueues(int *t, int nc, const int c) 9417{ 9418 int nq; 9419 9420 if (*t > 0) 9421 return; 9422 nq = *t < 0 ? -*t : c; 9423 *t = min(nc, nq); 9424} 9425 9426/* 9427 * Come up with reasonable defaults for some of the tunables, provided they're 9428 * not set by the user (in which case we'll use the values as is). 9429 */ 9430static void 9431tweak_tunables(void) 9432{ 9433 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 9434 9435 if (t4_ntxq10g < 1) { 9436#ifdef RSS 9437 t4_ntxq10g = rss_getnumbuckets(); 9438#else 9439 calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G); 9440#endif 9441 } 9442 9443 if (t4_ntxq1g < 1) { 9444#ifdef RSS 9445 /* XXX: way too many for 1GbE? */ 9446 t4_ntxq1g = rss_getnumbuckets(); 9447#else 9448 calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G); 9449#endif 9450 } 9451 9452 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); 9453 9454 if (t4_nrxq10g < 1) { 9455#ifdef RSS 9456 t4_nrxq10g = rss_getnumbuckets(); 9457#else 9458 calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G); 9459#endif 9460 } 9461 9462 if (t4_nrxq1g < 1) { 9463#ifdef RSS 9464 /* XXX: way too many for 1GbE? */ 9465 t4_nrxq1g = rss_getnumbuckets(); 9466#else 9467 calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G); 9468#endif 9469 } 9470 9471 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); 9472 9473#ifdef TCP_OFFLOAD 9474 calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G); 9475 calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G); 9476 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); 9477 calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G); 9478 calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G); 9479 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); 9480 9481 if (t4_toecaps_allowed == -1) 9482 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 9483 9484 if (t4_rdmacaps_allowed == -1) { 9485 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 9486 FW_CAPS_CONFIG_RDMA_RDMAC; 9487 } 9488 9489 if (t4_iscsicaps_allowed == -1) { 9490 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 9491 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 9492 FW_CAPS_CONFIG_ISCSI_T10DIF; 9493 } 9494#else 9495 if (t4_toecaps_allowed == -1) 9496 t4_toecaps_allowed = 0; 9497 9498 if (t4_rdmacaps_allowed == -1) 9499 t4_rdmacaps_allowed = 0; 9500 9501 if (t4_iscsicaps_allowed == -1) 9502 t4_iscsicaps_allowed = 0; 9503#endif 9504 9505#ifdef DEV_NETMAP 9506 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); 9507 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); 9508#endif 9509 9510 if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS) 9511 t4_tmr_idx_10g = TMR_IDX_10G; 9512 9513 if (t4_pktc_idx_10g < -1 || t4_pktc_idx_10g >= SGE_NCOUNTERS) 9514 t4_pktc_idx_10g = PKTC_IDX_10G; 9515 9516 if (t4_tmr_idx_1g < 0 || t4_tmr_idx_1g >= SGE_NTIMERS) 9517 t4_tmr_idx_1g = TMR_IDX_1G; 9518 9519 if (t4_pktc_idx_1g < -1 || t4_pktc_idx_1g >= SGE_NCOUNTERS) 9520 t4_pktc_idx_1g = PKTC_IDX_1G; 9521 9522 if (t4_qsize_txq < 128) 9523 t4_qsize_txq = 128; 9524 9525 if (t4_qsize_rxq < 128) 9526 t4_qsize_rxq = 128; 9527 while (t4_qsize_rxq & 7) 9528 t4_qsize_rxq++; 9529 9530 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 9531} 9532 9533#ifdef DDB 9534static void 9535t4_dump_tcb(struct adapter *sc, int tid) 9536{ 9537 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 9538 9539 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 9540 save = t4_read_reg(sc, reg); 9541 base = sc->memwin[2].mw_base; 9542 9543 /* Dump TCB for the tid */ 9544 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 9545 tcb_addr += tid * TCB_SIZE; 9546 9547 if (is_t4(sc)) { 9548 pf = 0; 9549 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 9550 } else { 9551 pf = V_PFNUM(sc->pf); 9552 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 9553 } 9554 t4_write_reg(sc, reg, win_pos | pf); 9555 t4_read_reg(sc, reg); 9556 9557 off = tcb_addr - win_pos; 9558 for (i = 0; i < 4; i++) { 9559 uint32_t buf[8]; 9560 for (j = 0; j < 8; j++, off += 4) 9561 buf[j] = htonl(t4_read_reg(sc, base + off)); 9562 9563 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 9564 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 9565 buf[7]); 9566 } 9567 9568 t4_write_reg(sc, reg, save); 9569 t4_read_reg(sc, reg); 9570} 9571 9572static void 9573t4_dump_devlog(struct adapter *sc) 9574{ 9575 struct devlog_params *dparams = &sc->params.devlog; 9576 struct fw_devlog_e e; 9577 int i, first, j, m, nentries, rc; 9578 uint64_t ftstamp = UINT64_MAX; 9579 9580 if (dparams->start == 0) { 9581 db_printf("devlog params not valid\n"); 9582 return; 9583 } 9584 9585 nentries = dparams->size / sizeof(struct fw_devlog_e); 9586 m = fwmtype_to_hwmtype(dparams->memtype); 9587 9588 /* Find the first entry. */ 9589 first = -1; 9590 for (i = 0; i < nentries && !db_pager_quit; i++) { 9591 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9592 sizeof(e), (void *)&e); 9593 if (rc != 0) 9594 break; 9595 9596 if (e.timestamp == 0) 9597 break; 9598 9599 e.timestamp = be64toh(e.timestamp); 9600 if (e.timestamp < ftstamp) { 9601 ftstamp = e.timestamp; 9602 first = i; 9603 } 9604 } 9605 9606 if (first == -1) 9607 return; 9608 9609 i = first; 9610 do { 9611 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 9612 sizeof(e), (void *)&e); 9613 if (rc != 0) 9614 return; 9615 9616 if (e.timestamp == 0) 9617 return; 9618 9619 e.timestamp = be64toh(e.timestamp); 9620 e.seqno = be32toh(e.seqno); 9621 for (j = 0; j < 8; j++) 9622 e.params[j] = be32toh(e.params[j]); 9623 9624 db_printf("%10d %15ju %8s %8s ", 9625 e.seqno, e.timestamp, 9626 (e.level < nitems(devlog_level_strings) ? 9627 devlog_level_strings[e.level] : "UNKNOWN"), 9628 (e.facility < nitems(devlog_facility_strings) ? 9629 devlog_facility_strings[e.facility] : "UNKNOWN")); 9630 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 9631 e.params[3], e.params[4], e.params[5], e.params[6], 9632 e.params[7]); 9633 9634 if (++i == nentries) 9635 i = 0; 9636 } while (i != first && !db_pager_quit); 9637} 9638 9639static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 9640_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 9641 9642DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 9643{ 9644 device_t dev; 9645 int t; 9646 bool valid; 9647 9648 valid = false; 9649 t = db_read_token(); 9650 if (t == tIDENT) { 9651 dev = device_lookup_by_name(db_tok_string); 9652 valid = true; 9653 } 9654 db_skip_to_eol(); 9655 if (!valid) { 9656 db_printf("usage: show t4 devlog <nexus>\n"); 9657 return; 9658 } 9659 9660 if (dev == NULL) { 9661 db_printf("device not found\n"); 9662 return; 9663 } 9664 9665 t4_dump_devlog(device_get_softc(dev)); 9666} 9667 9668DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 9669{ 9670 device_t dev; 9671 int radix, tid, t; 9672 bool valid; 9673 9674 valid = false; 9675 radix = db_radix; 9676 db_radix = 10; 9677 t = db_read_token(); 9678 if (t == tIDENT) { 9679 dev = device_lookup_by_name(db_tok_string); 9680 t = db_read_token(); 9681 if (t == tNUMBER) { 9682 tid = db_tok_number; 9683 valid = true; 9684 } 9685 } 9686 db_radix = radix; 9687 db_skip_to_eol(); 9688 if (!valid) { 9689 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 9690 return; 9691 } 9692 9693 if (dev == NULL) { 9694 db_printf("device not found\n"); 9695 return; 9696 } 9697 if (tid < 0) { 9698 db_printf("invalid tid\n"); 9699 return; 9700 } 9701 9702 t4_dump_tcb(device_get_softc(dev), tid); 9703} 9704#endif 9705 9706static struct sx mlu; /* mod load unload */ 9707SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 9708 9709static int 9710mod_event(module_t mod, int cmd, void *arg) 9711{ 9712 int rc = 0; 9713 static int loaded = 0; 9714 9715 switch (cmd) { 9716 case MOD_LOAD: 9717 sx_xlock(&mlu); 9718 if (loaded++ == 0) { 9719 t4_sge_modload(); 9720 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl); 9721 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl); 9722 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 9723 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 9724 sx_init(&t4_list_lock, "T4/T5 adapters"); 9725 SLIST_INIT(&t4_list); 9726#ifdef TCP_OFFLOAD 9727 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 9728 SLIST_INIT(&t4_uld_list); 9729#endif 9730 t4_tracer_modload(); 9731 tweak_tunables(); 9732 } 9733 sx_xunlock(&mlu); 9734 break; 9735 9736 case MOD_UNLOAD: 9737 sx_xlock(&mlu); 9738 if (--loaded == 0) { 9739 int tries; 9740 9741 sx_slock(&t4_list_lock); 9742 if (!SLIST_EMPTY(&t4_list)) { 9743 rc = EBUSY; 9744 sx_sunlock(&t4_list_lock); 9745 goto done_unload; 9746 } 9747#ifdef TCP_OFFLOAD 9748 sx_slock(&t4_uld_list_lock); 9749 if (!SLIST_EMPTY(&t4_uld_list)) { 9750 rc = EBUSY; 9751 sx_sunlock(&t4_uld_list_lock); 9752 sx_sunlock(&t4_list_lock); 9753 goto done_unload; 9754 } 9755#endif 9756 tries = 0; 9757 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 9758 uprintf("%ju clusters with custom free routine " 9759 "still is use.\n", t4_sge_extfree_refs()); 9760 pause("t4unload", 2 * hz); 9761 } 9762#ifdef TCP_OFFLOAD 9763 sx_sunlock(&t4_uld_list_lock); 9764#endif 9765 sx_sunlock(&t4_list_lock); 9766 9767 if (t4_sge_extfree_refs() == 0) { 9768 t4_tracer_modunload(); 9769#ifdef TCP_OFFLOAD 9770 sx_destroy(&t4_uld_list_lock); 9771#endif 9772 sx_destroy(&t4_list_lock); 9773 t4_sge_modunload(); 9774 loaded = 0; 9775 } else { 9776 rc = EBUSY; 9777 loaded++; /* undo earlier decrement */ 9778 } 9779 } 9780done_unload: 9781 sx_xunlock(&mlu); 9782 break; 9783 } 9784 9785 return (rc); 9786} 9787 9788static devclass_t t4_devclass, t5_devclass, t6_devclass; 9789static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 9790static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 9791 9792DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 9793MODULE_VERSION(t4nex, 1); 9794MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 9795 9796DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 9797MODULE_VERSION(t5nex, 1); 9798MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 9799 9800DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 9801MODULE_VERSION(t6nex, 1); 9802MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 9803#ifdef DEV_NETMAP 9804MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 9805#endif /* DEV_NETMAP */ 9806 9807DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 9808MODULE_VERSION(cxgbe, 1); 9809 9810DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 9811MODULE_VERSION(cxl, 1); 9812 9813DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 9814MODULE_VERSION(cc, 1); 9815 9816DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 9817MODULE_VERSION(vcxgbe, 1); 9818 9819DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 9820MODULE_VERSION(vcxl, 1); 9821 9822DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 9823MODULE_VERSION(vcc, 1); 9824