1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33#include "opt_ddb.h" 34#include "opt_inet.h" 35#include "opt_inet6.h" 36#include "opt_ratelimit.h" 37#include "opt_rss.h" 38 39#include <sys/param.h> 40#include <sys/conf.h> 41#include <sys/priv.h> 42#include <sys/kernel.h> 43#include <sys/bus.h> 44#include <sys/eventhandler.h> 45#include <sys/module.h> 46#include <sys/malloc.h> 47#include <sys/queue.h> 48#include <sys/refcount.h> 49#include <sys/taskqueue.h> 50#include <sys/pciio.h> 51#include <dev/pci/pcireg.h> 52#include <dev/pci/pcivar.h> 53#include <dev/pci/pci_private.h> 54#include <sys/firmware.h> 55#include <sys/sbuf.h> 56#include <sys/smp.h> 57#include <sys/socket.h> 58#include <sys/sockio.h> 59#include <sys/sysctl.h> 60#include <net/ethernet.h> 61#include <net/if.h> 62#include <net/if_types.h> 63#include <net/if_dl.h> 64#include <net/if_vlan_var.h> 65#ifdef RSS 66#include <net/rss_config.h> 67#endif 68#include <netinet/in.h> 69#include <netinet/ip.h> 70#if defined(__i386__) || defined(__amd64__) 71#include <machine/md_var.h> 72#include <machine/cputypes.h> 73#include <vm/vm.h> 74#include <vm/pmap.h> 75#endif 76#ifdef DDB 77#include <ddb/ddb.h> 78#include <ddb/db_lex.h> 79#endif 80 81#include "common/common.h" 82#include "common/t4_msg.h" 83#include "common/t4_regs.h" 84#include "common/t4_regs_values.h" 85#include "cudbg/cudbg.h" 86#include "t4_clip.h" 87#include "t4_ioctl.h" 88#include "t4_l2t.h" 89#include "t4_mp_ring.h" 90#include "t4_if.h" 91#include "t4_smt.h" 92 93/* T4 bus driver interface */ 94static int t4_probe(device_t); 95static int t4_attach(device_t); 96static int t4_detach(device_t); 97static int t4_child_location_str(device_t, device_t, char *, size_t); 98static int t4_ready(device_t); 99static int t4_read_port_device(device_t, int, device_t *); 100static device_method_t t4_methods[] = { 101 DEVMETHOD(device_probe, t4_probe), 102 DEVMETHOD(device_attach, t4_attach), 103 DEVMETHOD(device_detach, t4_detach), 104 105 DEVMETHOD(bus_child_location_str, t4_child_location_str), 106 107 DEVMETHOD(t4_is_main_ready, t4_ready), 108 DEVMETHOD(t4_read_port_device, t4_read_port_device), 109 110 DEVMETHOD_END 111}; 112static driver_t t4_driver = { 113 "t4nex", 114 t4_methods, 115 sizeof(struct adapter) 116}; 117 118 119/* T4 port (cxgbe) interface */ 120static int cxgbe_probe(device_t); 121static int cxgbe_attach(device_t); 122static int cxgbe_detach(device_t); 123device_method_t cxgbe_methods[] = { 124 DEVMETHOD(device_probe, cxgbe_probe), 125 DEVMETHOD(device_attach, cxgbe_attach), 126 DEVMETHOD(device_detach, cxgbe_detach), 127 { 0, 0 } 128}; 129static driver_t cxgbe_driver = { 130 "cxgbe", 131 cxgbe_methods, 132 sizeof(struct port_info) 133}; 134 135/* T4 VI (vcxgbe) interface */ 136static int vcxgbe_probe(device_t); 137static int vcxgbe_attach(device_t); 138static int vcxgbe_detach(device_t); 139static device_method_t vcxgbe_methods[] = { 140 DEVMETHOD(device_probe, vcxgbe_probe), 141 DEVMETHOD(device_attach, vcxgbe_attach), 142 DEVMETHOD(device_detach, vcxgbe_detach), 143 { 0, 0 } 144}; 145static driver_t vcxgbe_driver = { 146 "vcxgbe", 147 vcxgbe_methods, 148 sizeof(struct vi_info) 149}; 150 151static d_ioctl_t t4_ioctl; 152 153static struct cdevsw t4_cdevsw = { 154 .d_version = D_VERSION, 155 .d_ioctl = t4_ioctl, 156 .d_name = "t4nex", 157}; 158 159/* T5 bus driver interface */ 160static int t5_probe(device_t); 161static device_method_t t5_methods[] = { 162 DEVMETHOD(device_probe, t5_probe), 163 DEVMETHOD(device_attach, t4_attach), 164 DEVMETHOD(device_detach, t4_detach), 165 166 DEVMETHOD(bus_child_location_str, t4_child_location_str), 167 168 DEVMETHOD(t4_is_main_ready, t4_ready), 169 DEVMETHOD(t4_read_port_device, t4_read_port_device), 170 171 DEVMETHOD_END 172}; 173static driver_t t5_driver = { 174 "t5nex", 175 t5_methods, 176 sizeof(struct adapter) 177}; 178 179 180/* T5 port (cxl) interface */ 181static driver_t cxl_driver = { 182 "cxl", 183 cxgbe_methods, 184 sizeof(struct port_info) 185}; 186 187/* T5 VI (vcxl) interface */ 188static driver_t vcxl_driver = { 189 "vcxl", 190 vcxgbe_methods, 191 sizeof(struct vi_info) 192}; 193 194/* T6 bus driver interface */ 195static int t6_probe(device_t); 196static device_method_t t6_methods[] = { 197 DEVMETHOD(device_probe, t6_probe), 198 DEVMETHOD(device_attach, t4_attach), 199 DEVMETHOD(device_detach, t4_detach), 200 201 DEVMETHOD(bus_child_location_str, t4_child_location_str), 202 203 DEVMETHOD(t4_is_main_ready, t4_ready), 204 DEVMETHOD(t4_read_port_device, t4_read_port_device), 205 206 DEVMETHOD_END 207}; 208static driver_t t6_driver = { 209 "t6nex", 210 t6_methods, 211 sizeof(struct adapter) 212}; 213 214 215/* T6 port (cc) interface */ 216static driver_t cc_driver = { 217 "cc", 218 cxgbe_methods, 219 sizeof(struct port_info) 220}; 221 222/* T6 VI (vcc) interface */ 223static driver_t vcc_driver = { 224 "vcc", 225 vcxgbe_methods, 226 sizeof(struct vi_info) 227}; 228 229/* ifnet interface */ 230static void cxgbe_init(void *); 231static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t); 232static int cxgbe_transmit(struct ifnet *, struct mbuf *); 233static void cxgbe_qflush(struct ifnet *); 234 235MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services"); 236 237/* 238 * Correct lock order when you need to acquire multiple locks is t4_list_lock, 239 * then ADAPTER_LOCK, then t4_uld_list_lock. 240 */ 241static struct sx t4_list_lock; 242SLIST_HEAD(, adapter) t4_list; 243#ifdef TCP_OFFLOAD 244static struct sx t4_uld_list_lock; 245SLIST_HEAD(, uld_info) t4_uld_list; 246#endif 247 248/* 249 * Tunables. See tweak_tunables() too. 250 * 251 * Each tunable is set to a default value here if it's known at compile-time. 252 * Otherwise it is set to -n as an indication to tweak_tunables() that it should 253 * provide a reasonable default (upto n) when the driver is loaded. 254 * 255 * Tunables applicable to both T4 and T5 are under hw.cxgbe. Those specific to 256 * T5 are under hw.cxl. 257 */ 258SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe(4) parameters"); 259SYSCTL_NODE(_hw, OID_AUTO, cxl, CTLFLAG_RD, 0, "cxgbe(4) T5+ parameters"); 260SYSCTL_NODE(_hw_cxgbe, OID_AUTO, toe, CTLFLAG_RD, 0, "cxgbe(4) TOE parameters"); 261 262/* 263 * Number of queues for tx and rx, NIC and offload. 264 */ 265#define NTXQ 16 266int t4_ntxq = -NTXQ; 267SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq, CTLFLAG_RDTUN, &t4_ntxq, 0, 268 "Number of TX queues per port"); 269TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq); /* Old name, undocumented */ 270 271#define NRXQ 8 272int t4_nrxq = -NRXQ; 273SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq, CTLFLAG_RDTUN, &t4_nrxq, 0, 274 "Number of RX queues per port"); 275TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq); /* Old name, undocumented */ 276 277#define NTXQ_VI 1 278static int t4_ntxq_vi = -NTXQ_VI; 279SYSCTL_INT(_hw_cxgbe, OID_AUTO, ntxq_vi, CTLFLAG_RDTUN, &t4_ntxq_vi, 0, 280 "Number of TX queues per VI"); 281 282#define NRXQ_VI 1 283static int t4_nrxq_vi = -NRXQ_VI; 284SYSCTL_INT(_hw_cxgbe, OID_AUTO, nrxq_vi, CTLFLAG_RDTUN, &t4_nrxq_vi, 0, 285 "Number of RX queues per VI"); 286 287static int t4_rsrv_noflowq = 0; 288SYSCTL_INT(_hw_cxgbe, OID_AUTO, rsrv_noflowq, CTLFLAG_RDTUN, &t4_rsrv_noflowq, 289 0, "Reserve TX queue 0 of each VI for non-flowid packets"); 290 291#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 292#define NOFLDTXQ 8 293static int t4_nofldtxq = -NOFLDTXQ; 294SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0, 295 "Number of offload TX queues per port"); 296 297#define NOFLDRXQ 2 298static int t4_nofldrxq = -NOFLDRXQ; 299SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0, 300 "Number of offload RX queues per port"); 301 302#define NOFLDTXQ_VI 1 303static int t4_nofldtxq_vi = -NOFLDTXQ_VI; 304SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0, 305 "Number of offload TX queues per VI"); 306 307#define NOFLDRXQ_VI 1 308static int t4_nofldrxq_vi = -NOFLDRXQ_VI; 309SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0, 310 "Number of offload RX queues per VI"); 311 312#define TMR_IDX_OFLD 1 313int t4_tmr_idx_ofld = TMR_IDX_OFLD; 314SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN, 315 &t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues"); 316 317#define PKTC_IDX_OFLD (-1) 318int t4_pktc_idx_ofld = PKTC_IDX_OFLD; 319SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN, 320 &t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues"); 321 322/* 0 means chip/fw default, non-zero number is value in microseconds */ 323static u_long t4_toe_keepalive_idle = 0; 324SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_idle, CTLFLAG_RDTUN, 325 &t4_toe_keepalive_idle, 0, "TOE keepalive idle timer (us)"); 326 327/* 0 means chip/fw default, non-zero number is value in microseconds */ 328static u_long t4_toe_keepalive_interval = 0; 329SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, keepalive_interval, CTLFLAG_RDTUN, 330 &t4_toe_keepalive_interval, 0, "TOE keepalive interval timer (us)"); 331 332/* 0 means chip/fw default, non-zero number is # of keepalives before abort */ 333static int t4_toe_keepalive_count = 0; 334SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, keepalive_count, CTLFLAG_RDTUN, 335 &t4_toe_keepalive_count, 0, "Number of TOE keepalive probes before abort"); 336 337/* 0 means chip/fw default, non-zero number is value in microseconds */ 338static u_long t4_toe_rexmt_min = 0; 339SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_min, CTLFLAG_RDTUN, 340 &t4_toe_rexmt_min, 0, "Minimum TOE retransmit interval (us)"); 341 342/* 0 means chip/fw default, non-zero number is value in microseconds */ 343static u_long t4_toe_rexmt_max = 0; 344SYSCTL_ULONG(_hw_cxgbe_toe, OID_AUTO, rexmt_max, CTLFLAG_RDTUN, 345 &t4_toe_rexmt_max, 0, "Maximum TOE retransmit interval (us)"); 346 347/* 0 means chip/fw default, non-zero number is # of rexmt before abort */ 348static int t4_toe_rexmt_count = 0; 349SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, rexmt_count, CTLFLAG_RDTUN, 350 &t4_toe_rexmt_count, 0, "Number of TOE retransmissions before abort"); 351 352/* -1 means chip/fw default, other values are raw backoff values to use */ 353static int t4_toe_rexmt_backoff[16] = { 354 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 355}; 356SYSCTL_NODE(_hw_cxgbe_toe, OID_AUTO, rexmt_backoff, CTLFLAG_RD, 0, 357 "cxgbe(4) TOE retransmit backoff values"); 358SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 0, CTLFLAG_RDTUN, 359 &t4_toe_rexmt_backoff[0], 0, ""); 360SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 1, CTLFLAG_RDTUN, 361 &t4_toe_rexmt_backoff[1], 0, ""); 362SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 2, CTLFLAG_RDTUN, 363 &t4_toe_rexmt_backoff[2], 0, ""); 364SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 3, CTLFLAG_RDTUN, 365 &t4_toe_rexmt_backoff[3], 0, ""); 366SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 4, CTLFLAG_RDTUN, 367 &t4_toe_rexmt_backoff[4], 0, ""); 368SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 5, CTLFLAG_RDTUN, 369 &t4_toe_rexmt_backoff[5], 0, ""); 370SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 6, CTLFLAG_RDTUN, 371 &t4_toe_rexmt_backoff[6], 0, ""); 372SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 7, CTLFLAG_RDTUN, 373 &t4_toe_rexmt_backoff[7], 0, ""); 374SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 8, CTLFLAG_RDTUN, 375 &t4_toe_rexmt_backoff[8], 0, ""); 376SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 9, CTLFLAG_RDTUN, 377 &t4_toe_rexmt_backoff[9], 0, ""); 378SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 10, CTLFLAG_RDTUN, 379 &t4_toe_rexmt_backoff[10], 0, ""); 380SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 11, CTLFLAG_RDTUN, 381 &t4_toe_rexmt_backoff[11], 0, ""); 382SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 12, CTLFLAG_RDTUN, 383 &t4_toe_rexmt_backoff[12], 0, ""); 384SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 13, CTLFLAG_RDTUN, 385 &t4_toe_rexmt_backoff[13], 0, ""); 386SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 14, CTLFLAG_RDTUN, 387 &t4_toe_rexmt_backoff[14], 0, ""); 388SYSCTL_INT(_hw_cxgbe_toe_rexmt_backoff, OID_AUTO, 15, CTLFLAG_RDTUN, 389 &t4_toe_rexmt_backoff[15], 0, ""); 390#endif 391 392#ifdef DEV_NETMAP 393#define NN_MAIN_VI (1 << 0) /* Native netmap on the main VI */ 394#define NN_EXTRA_VI (1 << 1) /* Native netmap on the extra VI(s) */ 395static int t4_native_netmap = NN_EXTRA_VI; 396SYSCTL_INT(_hw_cxgbe, OID_AUTO, native_netmap, CTLFLAG_RDTUN, &t4_native_netmap, 397 0, "Native netmap support. bit 0 = main VI, bit 1 = extra VIs"); 398 399#define NNMTXQ 8 400static int t4_nnmtxq = -NNMTXQ; 401SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq, CTLFLAG_RDTUN, &t4_nnmtxq, 0, 402 "Number of netmap TX queues"); 403 404#define NNMRXQ 8 405static int t4_nnmrxq = -NNMRXQ; 406SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq, CTLFLAG_RDTUN, &t4_nnmrxq, 0, 407 "Number of netmap RX queues"); 408 409#define NNMTXQ_VI 2 410static int t4_nnmtxq_vi = -NNMTXQ_VI; 411SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmtxq_vi, CTLFLAG_RDTUN, &t4_nnmtxq_vi, 0, 412 "Number of netmap TX queues per VI"); 413 414#define NNMRXQ_VI 2 415static int t4_nnmrxq_vi = -NNMRXQ_VI; 416SYSCTL_INT(_hw_cxgbe, OID_AUTO, nnmrxq_vi, CTLFLAG_RDTUN, &t4_nnmrxq_vi, 0, 417 "Number of netmap RX queues per VI"); 418#endif 419 420/* 421 * Holdoff parameters for ports. 422 */ 423#define TMR_IDX 1 424int t4_tmr_idx = TMR_IDX; 425SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx, CTLFLAG_RDTUN, &t4_tmr_idx, 426 0, "Holdoff timer index"); 427TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx); /* Old name */ 428 429#define PKTC_IDX (-1) 430int t4_pktc_idx = PKTC_IDX; 431SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx, CTLFLAG_RDTUN, &t4_pktc_idx, 432 0, "Holdoff packet counter index"); 433TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx); /* Old name */ 434 435/* 436 * Size (# of entries) of each tx and rx queue. 437 */ 438unsigned int t4_qsize_txq = TX_EQ_QSIZE; 439SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_txq, CTLFLAG_RDTUN, &t4_qsize_txq, 0, 440 "Number of descriptors in each TX queue"); 441 442unsigned int t4_qsize_rxq = RX_IQ_QSIZE; 443SYSCTL_INT(_hw_cxgbe, OID_AUTO, qsize_rxq, CTLFLAG_RDTUN, &t4_qsize_rxq, 0, 444 "Number of descriptors in each RX queue"); 445 446/* 447 * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively). 448 */ 449int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX; 450SYSCTL_INT(_hw_cxgbe, OID_AUTO, interrupt_types, CTLFLAG_RDTUN, &t4_intr_types, 451 0, "Interrupt types allowed (bit 0 = INTx, 1 = MSI, 2 = MSI-X)"); 452 453/* 454 * Configuration file. All the _CF names here are special. 455 */ 456#define DEFAULT_CF "default" 457#define BUILTIN_CF "built-in" 458#define FLASH_CF "flash" 459#define UWIRE_CF "uwire" 460#define FPGA_CF "fpga" 461static char t4_cfg_file[32] = DEFAULT_CF; 462SYSCTL_STRING(_hw_cxgbe, OID_AUTO, config_file, CTLFLAG_RDTUN, t4_cfg_file, 463 sizeof(t4_cfg_file), "Firmware configuration file"); 464 465/* 466 * PAUSE settings (bit 0, 1, 2 = rx_pause, tx_pause, pause_autoneg respectively). 467 * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them. 468 * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water 469 * mark or when signalled to do so, 0 to never emit PAUSE. 470 * pause_autoneg = 1 means PAUSE will be negotiated if possible and the 471 * negotiated settings will override rx_pause/tx_pause. 472 * Otherwise rx_pause/tx_pause are applied forcibly. 473 */ 474static int t4_pause_settings = PAUSE_RX | PAUSE_TX | PAUSE_AUTONEG; 475SYSCTL_INT(_hw_cxgbe, OID_AUTO, pause_settings, CTLFLAG_RDTUN, 476 &t4_pause_settings, 0, 477 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); 478 479/* 480 * Forward Error Correction settings (bit 0, 1 = RS, BASER respectively). 481 * -1 to run with the firmware default. Same as FEC_AUTO (bit 5) 482 * 0 to disable FEC. 483 */ 484static int t4_fec = -1; 485SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0, 486 "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)"); 487 488/* 489 * Link autonegotiation. 490 * -1 to run with the firmware default. 491 * 0 to disable. 492 * 1 to enable. 493 */ 494static int t4_autoneg = -1; 495SYSCTL_INT(_hw_cxgbe, OID_AUTO, autoneg, CTLFLAG_RDTUN, &t4_autoneg, 0, 496 "Link autonegotiation"); 497 498/* 499 * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed, 500 * encouraged respectively). '-n' is the same as 'n' except the firmware 501 * version used in the checks is read from the firmware bundled with the driver. 502 */ 503static int t4_fw_install = 1; 504SYSCTL_INT(_hw_cxgbe, OID_AUTO, fw_install, CTLFLAG_RDTUN, &t4_fw_install, 0, 505 "Firmware auto-install (0 = prohibited, 1 = allowed, 2 = encouraged)"); 506 507/* 508 * ASIC features that will be used. Disable the ones you don't want so that the 509 * chip resources aren't wasted on features that will not be used. 510 */ 511static int t4_nbmcaps_allowed = 0; 512SYSCTL_INT(_hw_cxgbe, OID_AUTO, nbmcaps_allowed, CTLFLAG_RDTUN, 513 &t4_nbmcaps_allowed, 0, "Default NBM capabilities"); 514 515static int t4_linkcaps_allowed = 0; /* No DCBX, PPP, etc. by default */ 516SYSCTL_INT(_hw_cxgbe, OID_AUTO, linkcaps_allowed, CTLFLAG_RDTUN, 517 &t4_linkcaps_allowed, 0, "Default link capabilities"); 518 519static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS | 520 FW_CAPS_CONFIG_SWITCH_EGRESS; 521SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN, 522 &t4_switchcaps_allowed, 0, "Default switch capabilities"); 523 524#ifdef RATELIMIT 525static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | 526 FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD; 527#else 528static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC | 529 FW_CAPS_CONFIG_NIC_HASHFILTER; 530#endif 531SYSCTL_INT(_hw_cxgbe, OID_AUTO, niccaps_allowed, CTLFLAG_RDTUN, 532 &t4_niccaps_allowed, 0, "Default NIC capabilities"); 533 534static int t4_toecaps_allowed = -1; 535SYSCTL_INT(_hw_cxgbe, OID_AUTO, toecaps_allowed, CTLFLAG_RDTUN, 536 &t4_toecaps_allowed, 0, "Default TCP offload capabilities"); 537 538static int t4_rdmacaps_allowed = -1; 539SYSCTL_INT(_hw_cxgbe, OID_AUTO, rdmacaps_allowed, CTLFLAG_RDTUN, 540 &t4_rdmacaps_allowed, 0, "Default RDMA capabilities"); 541 542static int t4_cryptocaps_allowed = -1; 543SYSCTL_INT(_hw_cxgbe, OID_AUTO, cryptocaps_allowed, CTLFLAG_RDTUN, 544 &t4_cryptocaps_allowed, 0, "Default crypto capabilities"); 545 546static int t4_iscsicaps_allowed = -1; 547SYSCTL_INT(_hw_cxgbe, OID_AUTO, iscsicaps_allowed, CTLFLAG_RDTUN, 548 &t4_iscsicaps_allowed, 0, "Default iSCSI capabilities"); 549 550static int t4_fcoecaps_allowed = 0; 551SYSCTL_INT(_hw_cxgbe, OID_AUTO, fcoecaps_allowed, CTLFLAG_RDTUN, 552 &t4_fcoecaps_allowed, 0, "Default FCoE capabilities"); 553 554static int t5_write_combine = 0; 555SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine, 556 0, "Use WC instead of UC for BAR2"); 557 558static int t4_num_vis = 1; 559SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0, 560 "Number of VIs per port"); 561 562/* 563 * PCIe Relaxed Ordering. 564 * -1: driver should figure out a good value. 565 * 0: disable RO. 566 * 1: enable RO. 567 * 2: leave RO alone. 568 */ 569static int pcie_relaxed_ordering = -1; 570SYSCTL_INT(_hw_cxgbe, OID_AUTO, pcie_relaxed_ordering, CTLFLAG_RDTUN, 571 &pcie_relaxed_ordering, 0, 572 "PCIe Relaxed Ordering: 0 = disable, 1 = enable, 2 = leave alone"); 573 574static int t4_panic_on_fatal_err = 0; 575SYSCTL_INT(_hw_cxgbe, OID_AUTO, panic_on_fatal_err, CTLFLAG_RDTUN, 576 &t4_panic_on_fatal_err, 0, "panic on fatal errors"); 577 578static int t4_tx_vm_wr = 0; 579SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_vm_wr, CTLFLAG_RWTUN, &t4_tx_vm_wr, 0, 580 "Use VM work requests to transmit packets."); 581 582/* 583 * Set to non-zero to enable the attack filter. A packet that matches any of 584 * these conditions will get dropped on ingress: 585 * 1) IP && source address == destination address. 586 * 2) TCP/IP && source address is not a unicast address. 587 * 3) TCP/IP && destination address is not a unicast address. 588 * 4) IP && source address is loopback (127.x.y.z). 589 * 5) IP && destination address is loopback (127.x.y.z). 590 * 6) IPv6 && source address == destination address. 591 * 7) IPv6 && source address is not a unicast address. 592 * 8) IPv6 && source address is loopback (::1/128). 593 * 9) IPv6 && destination address is loopback (::1/128). 594 * 10) IPv6 && source address is unspecified (::/128). 595 * 11) IPv6 && destination address is unspecified (::/128). 596 * 12) TCP/IPv6 && source address is multicast (ff00::/8). 597 * 13) TCP/IPv6 && destination address is multicast (ff00::/8). 598 */ 599static int t4_attack_filter = 0; 600SYSCTL_INT(_hw_cxgbe, OID_AUTO, attack_filter, CTLFLAG_RDTUN, 601 &t4_attack_filter, 0, "Drop suspicious traffic"); 602 603static int t4_drop_ip_fragments = 0; 604SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_ip_fragments, CTLFLAG_RDTUN, 605 &t4_drop_ip_fragments, 0, "Drop IP fragments"); 606 607static int t4_drop_pkts_with_l2_errors = 1; 608SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l2_errors, CTLFLAG_RDTUN, 609 &t4_drop_pkts_with_l2_errors, 0, 610 "Drop all frames with Layer 2 length or checksum errors"); 611 612static int t4_drop_pkts_with_l3_errors = 0; 613SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l3_errors, CTLFLAG_RDTUN, 614 &t4_drop_pkts_with_l3_errors, 0, 615 "Drop all frames with IP version, length, or checksum errors"); 616 617static int t4_drop_pkts_with_l4_errors = 0; 618SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN, 619 &t4_drop_pkts_with_l4_errors, 0, 620 "Drop all frames with Layer 4 length, checksum, or other errors"); 621 622#ifdef TCP_OFFLOAD 623/* 624 * TOE tunables. 625 */ 626static int t4_cop_managed_offloading = 0; 627SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN, 628 &t4_cop_managed_offloading, 0, 629 "COP (Connection Offload Policy) controls all TOE offload"); 630#endif 631 632/* Functions used by VIs to obtain unique MAC addresses for each VI. */ 633static int vi_mac_funcs[] = { 634 FW_VI_FUNC_ETH, 635 FW_VI_FUNC_OFLD, 636 FW_VI_FUNC_IWARP, 637 FW_VI_FUNC_OPENISCSI, 638 FW_VI_FUNC_OPENFCOE, 639 FW_VI_FUNC_FOISCSI, 640 FW_VI_FUNC_FOFCOE, 641}; 642 643struct intrs_and_queues { 644 uint16_t intr_type; /* INTx, MSI, or MSI-X */ 645 uint16_t num_vis; /* number of VIs for each port */ 646 uint16_t nirq; /* Total # of vectors */ 647 uint16_t ntxq; /* # of NIC txq's for each port */ 648 uint16_t nrxq; /* # of NIC rxq's for each port */ 649 uint16_t nofldtxq; /* # of TOE/ETHOFLD txq's for each port */ 650 uint16_t nofldrxq; /* # of TOE rxq's for each port */ 651 uint16_t nnmtxq; /* # of netmap txq's */ 652 uint16_t nnmrxq; /* # of netmap rxq's */ 653 654 /* The vcxgbe/vcxl interfaces use these and not the ones above. */ 655 uint16_t ntxq_vi; /* # of NIC txq's */ 656 uint16_t nrxq_vi; /* # of NIC rxq's */ 657 uint16_t nofldtxq_vi; /* # of TOE txq's */ 658 uint16_t nofldrxq_vi; /* # of TOE rxq's */ 659 uint16_t nnmtxq_vi; /* # of netmap txq's */ 660 uint16_t nnmrxq_vi; /* # of netmap rxq's */ 661}; 662 663static void setup_memwin(struct adapter *); 664static void position_memwin(struct adapter *, int, uint32_t); 665static int validate_mem_range(struct adapter *, uint32_t, uint32_t); 666static int fwmtype_to_hwmtype(int); 667static int validate_mt_off_len(struct adapter *, int, uint32_t, uint32_t, 668 uint32_t *); 669static int fixup_devlog_params(struct adapter *); 670static int cfg_itype_and_nqueues(struct adapter *, struct intrs_and_queues *); 671static int contact_firmware(struct adapter *); 672static int partition_resources(struct adapter *); 673static int get_params__pre_init(struct adapter *); 674static int set_params__pre_init(struct adapter *); 675static int get_params__post_init(struct adapter *); 676static int set_params__post_init(struct adapter *); 677static void t4_set_desc(struct adapter *); 678static bool fixed_ifmedia(struct port_info *); 679static void build_medialist(struct port_info *); 680static void init_link_config(struct port_info *); 681static int fixup_link_config(struct port_info *); 682static int apply_link_config(struct port_info *); 683static int cxgbe_init_synchronized(struct vi_info *); 684static int cxgbe_uninit_synchronized(struct vi_info *); 685static void quiesce_txq(struct adapter *, struct sge_txq *); 686static void quiesce_wrq(struct adapter *, struct sge_wrq *); 687static void quiesce_iq(struct adapter *, struct sge_iq *); 688static void quiesce_fl(struct adapter *, struct sge_fl *); 689static int t4_alloc_irq(struct adapter *, struct irq *, int rid, 690 driver_intr_t *, void *, char *); 691static int t4_free_irq(struct adapter *, struct irq *); 692static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *); 693static void vi_refresh_stats(struct adapter *, struct vi_info *); 694static void cxgbe_refresh_stats(struct adapter *, struct port_info *); 695static void cxgbe_tick(void *); 696static void cxgbe_sysctls(struct port_info *); 697static int sysctl_int_array(SYSCTL_HANDLER_ARGS); 698static int sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS); 699static int sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS); 700static int sysctl_btphy(SYSCTL_HANDLER_ARGS); 701static int sysctl_noflowq(SYSCTL_HANDLER_ARGS); 702static int sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS); 703static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS); 704static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS); 705static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS); 706static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS); 707static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS); 708static int sysctl_fec(SYSCTL_HANDLER_ARGS); 709static int sysctl_module_fec(SYSCTL_HANDLER_ARGS); 710static int sysctl_autoneg(SYSCTL_HANDLER_ARGS); 711static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS); 712static int sysctl_temperature(SYSCTL_HANDLER_ARGS); 713static int sysctl_vdd(SYSCTL_HANDLER_ARGS); 714static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS); 715static int sysctl_loadavg(SYSCTL_HANDLER_ARGS); 716static int sysctl_cctrl(SYSCTL_HANDLER_ARGS); 717static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS); 718static int sysctl_cim_la(SYSCTL_HANDLER_ARGS); 719static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS); 720static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS); 721static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS); 722static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS); 723static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS); 724static int sysctl_devlog(SYSCTL_HANDLER_ARGS); 725static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS); 726static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS); 727static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS); 728static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS); 729static int sysctl_meminfo(SYSCTL_HANDLER_ARGS); 730static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS); 731static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS); 732static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS); 733static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS); 734static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS); 735static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS); 736static int sysctl_tids(SYSCTL_HANDLER_ARGS); 737static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS); 738static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS); 739static int sysctl_tp_la(SYSCTL_HANDLER_ARGS); 740static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS); 741static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS); 742static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS); 743static int sysctl_cpus(SYSCTL_HANDLER_ARGS); 744#ifdef TCP_OFFLOAD 745static int sysctl_tls(SYSCTL_HANDLER_ARGS); 746static int sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS); 747static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS); 748static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS); 749static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS); 750static int sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS); 751static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS); 752static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS); 753static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS); 754#endif 755static int get_sge_context(struct adapter *, struct t4_sge_context *); 756static int load_fw(struct adapter *, struct t4_data *); 757static int load_cfg(struct adapter *, struct t4_data *); 758static int load_boot(struct adapter *, struct t4_bootrom *); 759static int load_bootcfg(struct adapter *, struct t4_data *); 760static int cudbg_dump(struct adapter *, struct t4_cudbg_dump *); 761static void free_offload_policy(struct t4_offload_policy *); 762static int set_offload_policy(struct adapter *, struct t4_offload_policy *); 763static int read_card_mem(struct adapter *, int, struct t4_mem_range *); 764static int read_i2c(struct adapter *, struct t4_i2c_data *); 765static int clear_stats(struct adapter *, u_int); 766#ifdef TCP_OFFLOAD 767static int toe_capability(struct vi_info *, int); 768#endif 769static int mod_event(module_t, int, void *); 770static int notify_siblings(device_t, int); 771 772struct { 773 uint16_t device; 774 char *desc; 775} t4_pciids[] = { 776 {0xa000, "Chelsio Terminator 4 FPGA"}, 777 {0x4400, "Chelsio T440-dbg"}, 778 {0x4401, "Chelsio T420-CR"}, 779 {0x4402, "Chelsio T422-CR"}, 780 {0x4403, "Chelsio T440-CR"}, 781 {0x4404, "Chelsio T420-BCH"}, 782 {0x4405, "Chelsio T440-BCH"}, 783 {0x4406, "Chelsio T440-CH"}, 784 {0x4407, "Chelsio T420-SO"}, 785 {0x4408, "Chelsio T420-CX"}, 786 {0x4409, "Chelsio T420-BT"}, 787 {0x440a, "Chelsio T404-BT"}, 788 {0x440e, "Chelsio T440-LP-CR"}, 789}, t5_pciids[] = { 790 {0xb000, "Chelsio Terminator 5 FPGA"}, 791 {0x5400, "Chelsio T580-dbg"}, 792 {0x5401, "Chelsio T520-CR"}, /* 2 x 10G */ 793 {0x5402, "Chelsio T522-CR"}, /* 2 x 10G, 2 X 1G */ 794 {0x5403, "Chelsio T540-CR"}, /* 4 x 10G */ 795 {0x5407, "Chelsio T520-SO"}, /* 2 x 10G, nomem */ 796 {0x5409, "Chelsio T520-BT"}, /* 2 x 10GBaseT */ 797 {0x540a, "Chelsio T504-BT"}, /* 4 x 1G */ 798 {0x540d, "Chelsio T580-CR"}, /* 2 x 40G */ 799 {0x540e, "Chelsio T540-LP-CR"}, /* 4 x 10G */ 800 {0x5410, "Chelsio T580-LP-CR"}, /* 2 x 40G */ 801 {0x5411, "Chelsio T520-LL-CR"}, /* 2 x 10G */ 802 {0x5412, "Chelsio T560-CR"}, /* 1 x 40G, 2 x 10G */ 803 {0x5414, "Chelsio T580-LP-SO-CR"}, /* 2 x 40G, nomem */ 804 {0x5415, "Chelsio T502-BT"}, /* 2 x 1G */ 805 {0x5418, "Chelsio T540-BT"}, /* 4 x 10GBaseT */ 806 {0x5419, "Chelsio T540-LP-BT"}, /* 4 x 10GBaseT */ 807 {0x541a, "Chelsio T540-SO-BT"}, /* 4 x 10GBaseT, nomem */ 808 {0x541b, "Chelsio T540-SO-CR"}, /* 4 x 10G, nomem */ 809 810 /* Custom */ 811 {0x5483, "Custom T540-CR"}, 812 {0x5484, "Custom T540-BT"}, 813}, t6_pciids[] = { 814 {0xc006, "Chelsio Terminator 6 FPGA"}, /* T6 PE10K6 FPGA (PF0) */ 815 {0x6400, "Chelsio T6-DBG-25"}, /* 2 x 10/25G, debug */ 816 {0x6401, "Chelsio T6225-CR"}, /* 2 x 10/25G */ 817 {0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */ 818 {0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */ 819 {0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */ 820 {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */ 821 {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */ 822 {0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */ 823 {0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */ 824 {0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */ 825 {0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */ 826 {0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */ 827 {0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */ 828 {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */ 829 {0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */ 830 831 /* Custom */ 832 {0x6480, "Custom T6225-CR"}, 833 {0x6481, "Custom T62100-CR"}, 834 {0x6482, "Custom T6225-CR"}, 835 {0x6483, "Custom T62100-CR"}, 836 {0x6484, "Custom T64100-CR"}, 837 {0x6485, "Custom T6240-SO"}, 838 {0x6486, "Custom T6225-SO-CR"}, 839 {0x6487, "Custom T6225-CR"}, 840}; 841 842#ifdef TCP_OFFLOAD 843/* 844 * service_iq_fl() has an iq and needs the fl. Offset of fl from the iq should 845 * be exactly the same for both rxq and ofld_rxq. 846 */ 847CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq)); 848CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl)); 849#endif 850CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE); 851 852static int 853t4_probe(device_t dev) 854{ 855 int i; 856 uint16_t v = pci_get_vendor(dev); 857 uint16_t d = pci_get_device(dev); 858 uint8_t f = pci_get_function(dev); 859 860 if (v != PCI_VENDOR_ID_CHELSIO) 861 return (ENXIO); 862 863 /* Attach only to PF0 of the FPGA */ 864 if (d == 0xa000 && f != 0) 865 return (ENXIO); 866 867 for (i = 0; i < nitems(t4_pciids); i++) { 868 if (d == t4_pciids[i].device) { 869 device_set_desc(dev, t4_pciids[i].desc); 870 return (BUS_PROBE_DEFAULT); 871 } 872 } 873 874 return (ENXIO); 875} 876 877static int 878t5_probe(device_t dev) 879{ 880 int i; 881 uint16_t v = pci_get_vendor(dev); 882 uint16_t d = pci_get_device(dev); 883 uint8_t f = pci_get_function(dev); 884 885 if (v != PCI_VENDOR_ID_CHELSIO) 886 return (ENXIO); 887 888 /* Attach only to PF0 of the FPGA */ 889 if (d == 0xb000 && f != 0) 890 return (ENXIO); 891 892 for (i = 0; i < nitems(t5_pciids); i++) { 893 if (d == t5_pciids[i].device) { 894 device_set_desc(dev, t5_pciids[i].desc); 895 return (BUS_PROBE_DEFAULT); 896 } 897 } 898 899 return (ENXIO); 900} 901 902static int 903t6_probe(device_t dev) 904{ 905 int i; 906 uint16_t v = pci_get_vendor(dev); 907 uint16_t d = pci_get_device(dev); 908 909 if (v != PCI_VENDOR_ID_CHELSIO) 910 return (ENXIO); 911 912 for (i = 0; i < nitems(t6_pciids); i++) { 913 if (d == t6_pciids[i].device) { 914 device_set_desc(dev, t6_pciids[i].desc); 915 return (BUS_PROBE_DEFAULT); 916 } 917 } 918 919 return (ENXIO); 920} 921 922static void 923t5_attribute_workaround(device_t dev) 924{ 925 device_t root_port; 926 uint32_t v; 927 928 /* 929 * The T5 chips do not properly echo the No Snoop and Relaxed 930 * Ordering attributes when replying to a TLP from a Root 931 * Port. As a workaround, find the parent Root Port and 932 * disable No Snoop and Relaxed Ordering. Note that this 933 * affects all devices under this root port. 934 */ 935 root_port = pci_find_pcie_root_port(dev); 936 if (root_port == NULL) { 937 device_printf(dev, "Unable to find parent root port\n"); 938 return; 939 } 940 941 v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL, 942 PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2); 943 if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) != 944 0) 945 device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n", 946 device_get_nameunit(root_port)); 947} 948 949static const struct devnames devnames[] = { 950 { 951 .nexus_name = "t4nex", 952 .ifnet_name = "cxgbe", 953 .vi_ifnet_name = "vcxgbe", 954 .pf03_drv_name = "t4iov", 955 .vf_nexus_name = "t4vf", 956 .vf_ifnet_name = "cxgbev" 957 }, { 958 .nexus_name = "t5nex", 959 .ifnet_name = "cxl", 960 .vi_ifnet_name = "vcxl", 961 .pf03_drv_name = "t5iov", 962 .vf_nexus_name = "t5vf", 963 .vf_ifnet_name = "cxlv" 964 }, { 965 .nexus_name = "t6nex", 966 .ifnet_name = "cc", 967 .vi_ifnet_name = "vcc", 968 .pf03_drv_name = "t6iov", 969 .vf_nexus_name = "t6vf", 970 .vf_ifnet_name = "ccv" 971 } 972}; 973 974void 975t4_init_devnames(struct adapter *sc) 976{ 977 int id; 978 979 id = chip_id(sc); 980 if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames)) 981 sc->names = &devnames[id - CHELSIO_T4]; 982 else { 983 device_printf(sc->dev, "chip id %d is not supported.\n", id); 984 sc->names = NULL; 985 } 986} 987 988static int 989t4_ifnet_unit(struct adapter *sc, struct port_info *pi) 990{ 991 const char *parent, *name; 992 long value; 993 int line, unit; 994 995 line = 0; 996 parent = device_get_nameunit(sc->dev); 997 name = sc->names->ifnet_name; 998 while (resource_find_dev(&line, name, &unit, "at", parent) == 0) { 999 if (resource_long_value(name, unit, "port", &value) == 0 && 1000 value == pi->port_id) 1001 return (unit); 1002 } 1003 return (-1); 1004} 1005 1006static int 1007t4_attach(device_t dev) 1008{ 1009 struct adapter *sc; 1010 int rc = 0, i, j, rqidx, tqidx, nports; 1011 struct make_dev_args mda; 1012 struct intrs_and_queues iaq; 1013 struct sge *s; 1014 uint32_t *buf; 1015#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1016 int ofld_tqidx; 1017#endif 1018#ifdef TCP_OFFLOAD 1019 int ofld_rqidx; 1020#endif 1021#ifdef DEV_NETMAP 1022 int nm_rqidx, nm_tqidx; 1023#endif 1024 int num_vis; 1025 1026 sc = device_get_softc(dev); 1027 sc->dev = dev; 1028 TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags); 1029 1030 if ((pci_get_device(dev) & 0xff00) == 0x5400) 1031 t5_attribute_workaround(dev); 1032 pci_enable_busmaster(dev); 1033 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 1034 uint32_t v; 1035 1036 pci_set_max_read_req(dev, 4096); 1037 v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2); 1038 sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5); 1039 if (pcie_relaxed_ordering == 0 && 1040 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) != 0) { 1041 v &= ~PCIEM_CTL_RELAXED_ORD_ENABLE; 1042 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 1043 } else if (pcie_relaxed_ordering == 1 && 1044 (v & PCIEM_CTL_RELAXED_ORD_ENABLE) == 0) { 1045 v |= PCIEM_CTL_RELAXED_ORD_ENABLE; 1046 pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2); 1047 } 1048 } 1049 1050 sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS); 1051 sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL); 1052 sc->traceq = -1; 1053 mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF); 1054 snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer", 1055 device_get_nameunit(dev)); 1056 1057 snprintf(sc->lockname, sizeof(sc->lockname), "%s", 1058 device_get_nameunit(dev)); 1059 mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF); 1060 t4_add_adapter(sc); 1061 1062 mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF); 1063 TAILQ_INIT(&sc->sfl); 1064 callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0); 1065 1066 mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF); 1067 1068 sc->policy = NULL; 1069 rw_init(&sc->policy_lock, "connection offload policy"); 1070 1071 refcount_init(&sc->vxlan_refcount, 0); 1072 1073 rc = t4_map_bars_0_and_4(sc); 1074 if (rc != 0) 1075 goto done; /* error message displayed already */ 1076 1077 memset(sc->chan_map, 0xff, sizeof(sc->chan_map)); 1078 1079 /* Prepare the adapter for operation. */ 1080 buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK); 1081 rc = -t4_prep_adapter(sc, buf); 1082 free(buf, M_CXGBE); 1083 if (rc != 0) { 1084 device_printf(dev, "failed to prepare adapter: %d.\n", rc); 1085 goto done; 1086 } 1087 1088 /* 1089 * This is the real PF# to which we're attaching. Works from within PCI 1090 * passthrough environments too, where pci_get_function() could return a 1091 * different PF# depending on the passthrough configuration. We need to 1092 * use the real PF# in all our communication with the firmware. 1093 */ 1094 j = t4_read_reg(sc, A_PL_WHOAMI); 1095 sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j); 1096 sc->mbox = sc->pf; 1097 1098 t4_init_devnames(sc); 1099 if (sc->names == NULL) { 1100 rc = ENOTSUP; 1101 goto done; /* error message displayed already */ 1102 } 1103 1104 /* 1105 * Do this really early, with the memory windows set up even before the 1106 * character device. The userland tool's register i/o and mem read 1107 * will work even in "recovery mode". 1108 */ 1109 setup_memwin(sc); 1110 if (t4_init_devlog_params(sc, 0) == 0) 1111 fixup_devlog_params(sc); 1112 make_dev_args_init(&mda); 1113 mda.mda_devsw = &t4_cdevsw; 1114 mda.mda_uid = UID_ROOT; 1115 mda.mda_gid = GID_WHEEL; 1116 mda.mda_mode = 0600; 1117 mda.mda_si_drv1 = sc; 1118 rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev)); 1119 if (rc != 0) 1120 device_printf(dev, "failed to create nexus char device: %d.\n", 1121 rc); 1122 1123 /* Go no further if recovery mode has been requested. */ 1124 if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) { 1125 device_printf(dev, "recovery mode.\n"); 1126 goto done; 1127 } 1128 1129#if defined(__i386__) 1130 if ((cpu_feature & CPUID_CX8) == 0) { 1131 device_printf(dev, "64 bit atomics not available.\n"); 1132 rc = ENOTSUP; 1133 goto done; 1134 } 1135#endif 1136 1137 /* Contact the firmware and try to become the master driver. */ 1138 rc = contact_firmware(sc); 1139 if (rc != 0) 1140 goto done; /* error message displayed already */ 1141 MPASS(sc->flags & FW_OK); 1142 1143 rc = get_params__pre_init(sc); 1144 if (rc != 0) 1145 goto done; /* error message displayed already */ 1146 1147 if (sc->flags & MASTER_PF) { 1148 rc = partition_resources(sc); 1149 if (rc != 0) 1150 goto done; /* error message displayed already */ 1151 t4_intr_clear(sc); 1152 } 1153 1154 rc = get_params__post_init(sc); 1155 if (rc != 0) 1156 goto done; /* error message displayed already */ 1157 1158 rc = set_params__post_init(sc); 1159 if (rc != 0) 1160 goto done; /* error message displayed already */ 1161 1162 rc = t4_map_bar_2(sc); 1163 if (rc != 0) 1164 goto done; /* error message displayed already */ 1165 1166 rc = t4_create_dma_tag(sc); 1167 if (rc != 0) 1168 goto done; /* error message displayed already */ 1169 1170 /* 1171 * First pass over all the ports - allocate VIs and initialize some 1172 * basic parameters like mac address, port type, etc. 1173 */ 1174 for_each_port(sc, i) { 1175 struct port_info *pi; 1176 1177 pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK); 1178 sc->port[i] = pi; 1179 1180 /* These must be set before t4_port_init */ 1181 pi->adapter = sc; 1182 pi->port_id = i; 1183 /* 1184 * XXX: vi[0] is special so we can't delay this allocation until 1185 * pi->nvi's final value is known. 1186 */ 1187 pi->vi = malloc(sizeof(struct vi_info) * t4_num_vis, M_CXGBE, 1188 M_ZERO | M_WAITOK); 1189 1190 /* 1191 * Allocate the "main" VI and initialize parameters 1192 * like mac addr. 1193 */ 1194 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i); 1195 if (rc != 0) { 1196 device_printf(dev, "unable to initialize port %d: %d\n", 1197 i, rc); 1198 free(pi->vi, M_CXGBE); 1199 free(pi, M_CXGBE); 1200 sc->port[i] = NULL; 1201 goto done; 1202 } 1203 1204 snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d", 1205 device_get_nameunit(dev), i); 1206 mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF); 1207 sc->chan_map[pi->tx_chan] = i; 1208 1209 /* 1210 * The MPS counter for FCS errors doesn't work correctly on the 1211 * T6 so we use the MAC counter here. Which MAC is in use 1212 * depends on the link settings which will be known when the 1213 * link comes up. 1214 */ 1215 if (is_t6(sc)) { 1216 pi->fcs_reg = -1; 1217 } else if (is_t4(sc)) { 1218 pi->fcs_reg = PORT_REG(pi->tx_chan, 1219 A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L); 1220 } else { 1221 pi->fcs_reg = T5_PORT_REG(pi->tx_chan, 1222 A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L); 1223 } 1224 pi->fcs_base = 0; 1225 1226 /* All VIs on this port share this media. */ 1227 ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change, 1228 cxgbe_media_status); 1229 1230 PORT_LOCK(pi); 1231 init_link_config(pi); 1232 fixup_link_config(pi); 1233 build_medialist(pi); 1234 if (fixed_ifmedia(pi)) 1235 pi->flags |= FIXED_IFMEDIA; 1236 PORT_UNLOCK(pi); 1237 1238 pi->dev = device_add_child(dev, sc->names->ifnet_name, 1239 t4_ifnet_unit(sc, pi)); 1240 if (pi->dev == NULL) { 1241 device_printf(dev, 1242 "failed to add device for port %d.\n", i); 1243 rc = ENXIO; 1244 goto done; 1245 } 1246 pi->vi[0].dev = pi->dev; 1247 device_set_softc(pi->dev, pi); 1248 } 1249 1250 /* 1251 * Interrupt type, # of interrupts, # of rx/tx queues, etc. 1252 */ 1253 nports = sc->params.nports; 1254 rc = cfg_itype_and_nqueues(sc, &iaq); 1255 if (rc != 0) 1256 goto done; /* error message displayed already */ 1257 1258 num_vis = iaq.num_vis; 1259 sc->intr_type = iaq.intr_type; 1260 sc->intr_count = iaq.nirq; 1261 1262 s = &sc->sge; 1263 s->nrxq = nports * iaq.nrxq; 1264 s->ntxq = nports * iaq.ntxq; 1265 if (num_vis > 1) { 1266 s->nrxq += nports * (num_vis - 1) * iaq.nrxq_vi; 1267 s->ntxq += nports * (num_vis - 1) * iaq.ntxq_vi; 1268 } 1269 s->neq = s->ntxq + s->nrxq; /* the free list in an rxq is an eq */ 1270 s->neq += nports; /* ctrl queues: 1 per port */ 1271 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 1272#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1273 if (is_offload(sc) || is_ethoffload(sc)) { 1274 s->nofldtxq = nports * iaq.nofldtxq; 1275 if (num_vis > 1) 1276 s->nofldtxq += nports * (num_vis - 1) * iaq.nofldtxq_vi; 1277 s->neq += s->nofldtxq; 1278 1279 s->ofld_txq = malloc(s->nofldtxq * sizeof(struct sge_wrq), 1280 M_CXGBE, M_ZERO | M_WAITOK); 1281 } 1282#endif 1283#ifdef TCP_OFFLOAD 1284 if (is_offload(sc)) { 1285 s->nofldrxq = nports * iaq.nofldrxq; 1286 if (num_vis > 1) 1287 s->nofldrxq += nports * (num_vis - 1) * iaq.nofldrxq_vi; 1288 s->neq += s->nofldrxq; /* free list */ 1289 s->niq += s->nofldrxq; 1290 1291 s->ofld_rxq = malloc(s->nofldrxq * sizeof(struct sge_ofld_rxq), 1292 M_CXGBE, M_ZERO | M_WAITOK); 1293 } 1294#endif 1295#ifdef DEV_NETMAP 1296 s->nnmrxq = 0; 1297 s->nnmtxq = 0; 1298 if (t4_native_netmap & NN_MAIN_VI) { 1299 s->nnmrxq += nports * iaq.nnmrxq; 1300 s->nnmtxq += nports * iaq.nnmtxq; 1301 } 1302 if (num_vis > 1 && t4_native_netmap & NN_EXTRA_VI) { 1303 s->nnmrxq += nports * (num_vis - 1) * iaq.nnmrxq_vi; 1304 s->nnmtxq += nports * (num_vis - 1) * iaq.nnmtxq_vi; 1305 } 1306 s->neq += s->nnmtxq + s->nnmrxq; 1307 s->niq += s->nnmrxq; 1308 1309 s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq), 1310 M_CXGBE, M_ZERO | M_WAITOK); 1311 s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq), 1312 M_CXGBE, M_ZERO | M_WAITOK); 1313#endif 1314 MPASS(s->niq <= s->iqmap_sz); 1315 MPASS(s->neq <= s->eqmap_sz); 1316 1317 s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE, 1318 M_ZERO | M_WAITOK); 1319 s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE, 1320 M_ZERO | M_WAITOK); 1321 s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE, 1322 M_ZERO | M_WAITOK); 1323 s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE, 1324 M_ZERO | M_WAITOK); 1325 s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE, 1326 M_ZERO | M_WAITOK); 1327 1328 sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE, 1329 M_ZERO | M_WAITOK); 1330 1331 t4_init_l2t(sc, M_WAITOK); 1332 t4_init_smt(sc, M_WAITOK); 1333 t4_init_tx_sched(sc); 1334#ifdef RATELIMIT 1335 t4_init_etid_table(sc); 1336#endif 1337#ifdef INET6 1338 t4_init_clip_table(sc); 1339#endif 1340 if (sc->vres.key.size != 0) 1341 sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start, 1342 sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK); 1343 1344 /* 1345 * Second pass over the ports. This time we know the number of rx and 1346 * tx queues that each port should get. 1347 */ 1348 rqidx = tqidx = 0; 1349#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1350 ofld_tqidx = 0; 1351#endif 1352#ifdef TCP_OFFLOAD 1353 ofld_rqidx = 0; 1354#endif 1355#ifdef DEV_NETMAP 1356 nm_rqidx = nm_tqidx = 0; 1357#endif 1358 for_each_port(sc, i) { 1359 struct port_info *pi = sc->port[i]; 1360 struct vi_info *vi; 1361 1362 if (pi == NULL) 1363 continue; 1364 1365 pi->nvi = num_vis; 1366 for_each_vi(pi, j, vi) { 1367 vi->pi = pi; 1368 vi->adapter = sc; 1369 vi->qsize_rxq = t4_qsize_rxq; 1370 vi->qsize_txq = t4_qsize_txq; 1371 1372 vi->first_rxq = rqidx; 1373 vi->first_txq = tqidx; 1374 vi->tmr_idx = t4_tmr_idx; 1375 vi->pktc_idx = t4_pktc_idx; 1376 vi->nrxq = j == 0 ? iaq.nrxq : iaq.nrxq_vi; 1377 vi->ntxq = j == 0 ? iaq.ntxq : iaq.ntxq_vi; 1378 1379 rqidx += vi->nrxq; 1380 tqidx += vi->ntxq; 1381 1382 if (j == 0 && vi->ntxq > 1) 1383 vi->rsrv_noflowq = t4_rsrv_noflowq ? 1 : 0; 1384 else 1385 vi->rsrv_noflowq = 0; 1386 1387#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1388 vi->first_ofld_txq = ofld_tqidx; 1389 vi->nofldtxq = j == 0 ? iaq.nofldtxq : iaq.nofldtxq_vi; 1390 ofld_tqidx += vi->nofldtxq; 1391#endif 1392#ifdef TCP_OFFLOAD 1393 vi->ofld_tmr_idx = t4_tmr_idx_ofld; 1394 vi->ofld_pktc_idx = t4_pktc_idx_ofld; 1395 vi->first_ofld_rxq = ofld_rqidx; 1396 vi->nofldrxq = j == 0 ? iaq.nofldrxq : iaq.nofldrxq_vi; 1397 1398 ofld_rqidx += vi->nofldrxq; 1399#endif 1400#ifdef DEV_NETMAP 1401 vi->first_nm_rxq = nm_rqidx; 1402 vi->first_nm_txq = nm_tqidx; 1403 if (j == 0) { 1404 vi->nnmrxq = iaq.nnmrxq; 1405 vi->nnmtxq = iaq.nnmtxq; 1406 } else { 1407 vi->nnmrxq = iaq.nnmrxq_vi; 1408 vi->nnmtxq = iaq.nnmtxq_vi; 1409 } 1410 nm_rqidx += vi->nnmrxq; 1411 nm_tqidx += vi->nnmtxq; 1412#endif 1413 } 1414 } 1415 1416 rc = t4_setup_intr_handlers(sc); 1417 if (rc != 0) { 1418 device_printf(dev, 1419 "failed to setup interrupt handlers: %d\n", rc); 1420 goto done; 1421 } 1422 1423 rc = bus_generic_probe(dev); 1424 if (rc != 0) { 1425 device_printf(dev, "failed to probe child drivers: %d\n", rc); 1426 goto done; 1427 } 1428 1429 /* 1430 * Ensure thread-safe mailbox access (in debug builds). 1431 * 1432 * So far this was the only thread accessing the mailbox but various 1433 * ifnets and sysctls are about to be created and their handlers/ioctls 1434 * will access the mailbox from different threads. 1435 */ 1436 sc->flags |= CHK_MBOX_ACCESS; 1437 1438 rc = bus_generic_attach(dev); 1439 if (rc != 0) { 1440 device_printf(dev, 1441 "failed to attach all child ports: %d\n", rc); 1442 goto done; 1443 } 1444 1445 device_printf(dev, 1446 "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n", 1447 sc->params.pci.speed, sc->params.pci.width, sc->params.nports, 1448 sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" : 1449 (sc->intr_type == INTR_MSI ? "MSI" : "INTx"), 1450 sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq); 1451 1452 t4_set_desc(sc); 1453 1454 notify_siblings(dev, 0); 1455 1456done: 1457 if (rc != 0 && sc->cdev) { 1458 /* cdev was created and so cxgbetool works; recover that way. */ 1459 device_printf(dev, 1460 "error during attach, adapter is now in recovery mode.\n"); 1461 rc = 0; 1462 } 1463 1464 if (rc != 0) 1465 t4_detach_common(dev); 1466 else 1467 t4_sysctls(sc); 1468 1469 return (rc); 1470} 1471 1472static int 1473t4_child_location_str(device_t bus, device_t dev, char *buf, size_t buflen) 1474{ 1475 struct adapter *sc; 1476 struct port_info *pi; 1477 int i; 1478 1479 sc = device_get_softc(bus); 1480 buf[0] = '\0'; 1481 for_each_port(sc, i) { 1482 pi = sc->port[i]; 1483 if (pi != NULL && pi->dev == dev) { 1484 snprintf(buf, buflen, "port=%d", pi->port_id); 1485 break; 1486 } 1487 } 1488 return (0); 1489} 1490 1491static int 1492t4_ready(device_t dev) 1493{ 1494 struct adapter *sc; 1495 1496 sc = device_get_softc(dev); 1497 if (sc->flags & FW_OK) 1498 return (0); 1499 return (ENXIO); 1500} 1501 1502static int 1503t4_read_port_device(device_t dev, int port, device_t *child) 1504{ 1505 struct adapter *sc; 1506 struct port_info *pi; 1507 1508 sc = device_get_softc(dev); 1509 if (port < 0 || port >= MAX_NPORTS) 1510 return (EINVAL); 1511 pi = sc->port[port]; 1512 if (pi == NULL || pi->dev == NULL) 1513 return (ENXIO); 1514 *child = pi->dev; 1515 return (0); 1516} 1517 1518static int 1519notify_siblings(device_t dev, int detaching) 1520{ 1521 device_t sibling; 1522 int error, i; 1523 1524 error = 0; 1525 for (i = 0; i < PCI_FUNCMAX; i++) { 1526 if (i == pci_get_function(dev)) 1527 continue; 1528 sibling = pci_find_dbsf(pci_get_domain(dev), pci_get_bus(dev), 1529 pci_get_slot(dev), i); 1530 if (sibling == NULL || !device_is_attached(sibling)) 1531 continue; 1532 if (detaching) 1533 error = T4_DETACH_CHILD(sibling); 1534 else 1535 (void)T4_ATTACH_CHILD(sibling); 1536 if (error) 1537 break; 1538 } 1539 return (error); 1540} 1541 1542/* 1543 * Idempotent 1544 */ 1545static int 1546t4_detach(device_t dev) 1547{ 1548 struct adapter *sc; 1549 int rc; 1550 1551 sc = device_get_softc(dev); 1552 1553 rc = notify_siblings(dev, 1); 1554 if (rc) { 1555 device_printf(dev, 1556 "failed to detach sibling devices: %d\n", rc); 1557 return (rc); 1558 } 1559 1560 return (t4_detach_common(dev)); 1561} 1562 1563int 1564t4_detach_common(device_t dev) 1565{ 1566 struct adapter *sc; 1567 struct port_info *pi; 1568 int i, rc; 1569 1570 sc = device_get_softc(dev); 1571 1572 if (sc->cdev) { 1573 destroy_dev(sc->cdev); 1574 sc->cdev = NULL; 1575 } 1576 1577 sx_xlock(&t4_list_lock); 1578 SLIST_REMOVE(&t4_list, sc, adapter, link); 1579 sx_xunlock(&t4_list_lock); 1580 1581 sc->flags &= ~CHK_MBOX_ACCESS; 1582 if (sc->flags & FULL_INIT_DONE) { 1583 if (!(sc->flags & IS_VF)) 1584 t4_intr_disable(sc); 1585 } 1586 1587 if (device_is_attached(dev)) { 1588 rc = bus_generic_detach(dev); 1589 if (rc) { 1590 device_printf(dev, 1591 "failed to detach child devices: %d\n", rc); 1592 return (rc); 1593 } 1594 } 1595 1596 for (i = 0; i < sc->intr_count; i++) 1597 t4_free_irq(sc, &sc->irq[i]); 1598 1599 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1600 t4_free_tx_sched(sc); 1601 1602 for (i = 0; i < MAX_NPORTS; i++) { 1603 pi = sc->port[i]; 1604 if (pi) { 1605 t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid); 1606 if (pi->dev) 1607 device_delete_child(dev, pi->dev); 1608 1609 mtx_destroy(&pi->pi_lock); 1610 free(pi->vi, M_CXGBE); 1611 free(pi, M_CXGBE); 1612 } 1613 } 1614 1615 device_delete_children(dev); 1616 1617 if (sc->flags & FULL_INIT_DONE) 1618 adapter_full_uninit(sc); 1619 1620 if ((sc->flags & (IS_VF | FW_OK)) == FW_OK) 1621 t4_fw_bye(sc, sc->mbox); 1622 1623 if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX) 1624 pci_release_msi(dev); 1625 1626 if (sc->regs_res) 1627 bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid, 1628 sc->regs_res); 1629 1630 if (sc->udbs_res) 1631 bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid, 1632 sc->udbs_res); 1633 1634 if (sc->msix_res) 1635 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid, 1636 sc->msix_res); 1637 1638 if (sc->l2t) 1639 t4_free_l2t(sc->l2t); 1640 if (sc->smt) 1641 t4_free_smt(sc->smt); 1642#ifdef RATELIMIT 1643 t4_free_etid_table(sc); 1644#endif 1645 if (sc->key_map) 1646 vmem_destroy(sc->key_map); 1647#ifdef INET6 1648 t4_destroy_clip_table(sc); 1649#endif 1650 1651#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1652 free(sc->sge.ofld_txq, M_CXGBE); 1653#endif 1654#ifdef TCP_OFFLOAD 1655 free(sc->sge.ofld_rxq, M_CXGBE); 1656#endif 1657#ifdef DEV_NETMAP 1658 free(sc->sge.nm_rxq, M_CXGBE); 1659 free(sc->sge.nm_txq, M_CXGBE); 1660#endif 1661 free(sc->irq, M_CXGBE); 1662 free(sc->sge.rxq, M_CXGBE); 1663 free(sc->sge.txq, M_CXGBE); 1664 free(sc->sge.ctrlq, M_CXGBE); 1665 free(sc->sge.iqmap, M_CXGBE); 1666 free(sc->sge.eqmap, M_CXGBE); 1667 free(sc->tids.ftid_tab, M_CXGBE); 1668 free(sc->tids.hpftid_tab, M_CXGBE); 1669 free_hftid_hash(&sc->tids); 1670 free(sc->tids.atid_tab, M_CXGBE); 1671 free(sc->tids.tid_tab, M_CXGBE); 1672 free(sc->tt.tls_rx_ports, M_CXGBE); 1673 t4_destroy_dma_tag(sc); 1674 1675 callout_drain(&sc->sfl_callout); 1676 if (mtx_initialized(&sc->tids.ftid_lock)) { 1677 mtx_destroy(&sc->tids.ftid_lock); 1678 cv_destroy(&sc->tids.ftid_cv); 1679 } 1680 if (mtx_initialized(&sc->tids.atid_lock)) 1681 mtx_destroy(&sc->tids.atid_lock); 1682 if (mtx_initialized(&sc->ifp_lock)) 1683 mtx_destroy(&sc->ifp_lock); 1684 1685 if (rw_initialized(&sc->policy_lock)) { 1686 rw_destroy(&sc->policy_lock); 1687#ifdef TCP_OFFLOAD 1688 if (sc->policy != NULL) 1689 free_offload_policy(sc->policy); 1690#endif 1691 } 1692 1693 for (i = 0; i < NUM_MEMWIN; i++) { 1694 struct memwin *mw = &sc->memwin[i]; 1695 1696 if (rw_initialized(&mw->mw_lock)) 1697 rw_destroy(&mw->mw_lock); 1698 } 1699 1700 mtx_destroy(&sc->sfl_lock); 1701 mtx_destroy(&sc->reg_lock); 1702 mtx_destroy(&sc->sc_lock); 1703 1704 bzero(sc, sizeof(*sc)); 1705 1706 return (0); 1707} 1708 1709static int 1710cxgbe_probe(device_t dev) 1711{ 1712 char buf[128]; 1713 struct port_info *pi = device_get_softc(dev); 1714 1715 snprintf(buf, sizeof(buf), "port %d", pi->port_id); 1716 device_set_desc_copy(dev, buf); 1717 1718 return (BUS_PROBE_DEFAULT); 1719} 1720 1721#define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 1722 IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 1723 IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS | \ 1724 IFCAP_HWRXTSTMP | IFCAP_NOMAP) 1725#define T4_CAP_ENABLE (T4_CAP) 1726 1727static int 1728cxgbe_vi_attach(device_t dev, struct vi_info *vi) 1729{ 1730 struct ifnet *ifp; 1731 struct sbuf *sb; 1732 struct adapter *sc = vi->adapter; 1733 1734 vi->xact_addr_filt = -1; 1735 callout_init(&vi->tick, 1); 1736 if (sc->flags & IS_VF || t4_tx_vm_wr != 0) 1737 vi->flags |= TX_USES_VM_WR; 1738 1739 /* Allocate an ifnet and set it up */ 1740 ifp = if_alloc(IFT_ETHER); 1741 if (ifp == NULL) { 1742 device_printf(dev, "Cannot allocate ifnet\n"); 1743 return (ENOMEM); 1744 } 1745 vi->ifp = ifp; 1746 ifp->if_softc = vi; 1747 1748 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1749 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1750 1751 ifp->if_init = cxgbe_init; 1752 ifp->if_ioctl = cxgbe_ioctl; 1753 ifp->if_transmit = cxgbe_transmit; 1754 ifp->if_qflush = cxgbe_qflush; 1755 ifp->if_get_counter = cxgbe_get_counter; 1756#ifdef RATELIMIT 1757 ifp->if_snd_tag_alloc = cxgbe_snd_tag_alloc; 1758 ifp->if_snd_tag_modify = cxgbe_snd_tag_modify; 1759 ifp->if_snd_tag_query = cxgbe_snd_tag_query; 1760 ifp->if_snd_tag_free = cxgbe_snd_tag_free; 1761#endif 1762 1763 ifp->if_capabilities = T4_CAP; 1764 ifp->if_capenable = T4_CAP_ENABLE; 1765 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1766 CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1767 if (chip_id(sc) >= CHELSIO_T6) { 1768 ifp->if_capabilities |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO; 1769 ifp->if_capenable |= IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO; 1770 ifp->if_hwassist |= CSUM_INNER_IP6_UDP | CSUM_INNER_IP6_TCP | 1771 CSUM_INNER_IP6_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | 1772 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_ENCAP_VXLAN; 1773 } 1774 1775#ifdef TCP_OFFLOAD 1776 if (vi->nofldrxq != 0) 1777 ifp->if_capabilities |= IFCAP_TOE; 1778#endif 1779#ifdef RATELIMIT 1780 if (is_ethoffload(sc) && vi->nofldtxq != 0) { 1781 ifp->if_capabilities |= IFCAP_TXRTLMT; 1782 ifp->if_capenable |= IFCAP_TXRTLMT; 1783 } 1784#endif 1785 1786 ifp->if_hw_tsomax = IP_MAXPACKET; 1787 if (vi->flags & TX_USES_VM_WR) 1788 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO; 1789 else 1790 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO; 1791#ifdef RATELIMIT 1792 if (is_ethoffload(sc) && vi->nofldtxq != 0) 1793 ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_EO_TSO; 1794#endif 1795 ifp->if_hw_tsomaxsegsize = 65536; 1796 1797 ether_ifattach(ifp, vi->hw_addr); 1798#ifdef DEV_NETMAP 1799 if (vi->nnmrxq != 0) 1800 cxgbe_nm_attach(vi); 1801#endif 1802 sb = sbuf_new_auto(); 1803 sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq); 1804#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1805 switch (ifp->if_capabilities & (IFCAP_TOE | IFCAP_TXRTLMT)) { 1806 case IFCAP_TOE: 1807 sbuf_printf(sb, "; %d txq (TOE)", vi->nofldtxq); 1808 break; 1809 case IFCAP_TOE | IFCAP_TXRTLMT: 1810 sbuf_printf(sb, "; %d txq (TOE/ETHOFLD)", vi->nofldtxq); 1811 break; 1812 case IFCAP_TXRTLMT: 1813 sbuf_printf(sb, "; %d txq (ETHOFLD)", vi->nofldtxq); 1814 break; 1815 } 1816#endif 1817#ifdef TCP_OFFLOAD 1818 if (ifp->if_capabilities & IFCAP_TOE) 1819 sbuf_printf(sb, ", %d rxq (TOE)", vi->nofldrxq); 1820#endif 1821#ifdef DEV_NETMAP 1822 if (ifp->if_capabilities & IFCAP_NETMAP) 1823 sbuf_printf(sb, "; %d txq, %d rxq (netmap)", 1824 vi->nnmtxq, vi->nnmrxq); 1825#endif 1826 sbuf_finish(sb); 1827 device_printf(dev, "%s\n", sbuf_data(sb)); 1828 sbuf_delete(sb); 1829 1830 vi_sysctls(vi); 1831 1832 return (0); 1833} 1834 1835static int 1836cxgbe_attach(device_t dev) 1837{ 1838 struct port_info *pi = device_get_softc(dev); 1839 struct adapter *sc = pi->adapter; 1840 struct vi_info *vi; 1841 int i, rc; 1842 1843 callout_init_mtx(&pi->tick, &pi->pi_lock, 0); 1844 1845 rc = cxgbe_vi_attach(dev, &pi->vi[0]); 1846 if (rc) 1847 return (rc); 1848 1849 for_each_vi(pi, i, vi) { 1850 if (i == 0) 1851 continue; 1852 vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1); 1853 if (vi->dev == NULL) { 1854 device_printf(dev, "failed to add VI %d\n", i); 1855 continue; 1856 } 1857 device_set_softc(vi->dev, vi); 1858 } 1859 1860 cxgbe_sysctls(pi); 1861 1862 bus_generic_attach(dev); 1863 1864 return (0); 1865} 1866 1867static void 1868cxgbe_vi_detach(struct vi_info *vi) 1869{ 1870 struct ifnet *ifp = vi->ifp; 1871 1872 ether_ifdetach(ifp); 1873 1874 /* Let detach proceed even if these fail. */ 1875#ifdef DEV_NETMAP 1876 if (ifp->if_capabilities & IFCAP_NETMAP) 1877 cxgbe_nm_detach(vi); 1878#endif 1879 cxgbe_uninit_synchronized(vi); 1880 callout_drain(&vi->tick); 1881 vi_full_uninit(vi); 1882 1883 if_free(vi->ifp); 1884 vi->ifp = NULL; 1885} 1886 1887static int 1888cxgbe_detach(device_t dev) 1889{ 1890 struct port_info *pi = device_get_softc(dev); 1891 struct adapter *sc = pi->adapter; 1892 int rc; 1893 1894 /* Detach the extra VIs first. */ 1895 rc = bus_generic_detach(dev); 1896 if (rc) 1897 return (rc); 1898 device_delete_children(dev); 1899 1900 doom_vi(sc, &pi->vi[0]); 1901 1902 if (pi->flags & HAS_TRACEQ) { 1903 sc->traceq = -1; /* cloner should not create ifnet */ 1904 t4_tracer_port_detach(sc); 1905 } 1906 1907 cxgbe_vi_detach(&pi->vi[0]); 1908 callout_drain(&pi->tick); 1909 ifmedia_removeall(&pi->media); 1910 1911 end_synchronized_op(sc, 0); 1912 1913 return (0); 1914} 1915 1916static void 1917cxgbe_init(void *arg) 1918{ 1919 struct vi_info *vi = arg; 1920 struct adapter *sc = vi->adapter; 1921 1922 if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0) 1923 return; 1924 cxgbe_init_synchronized(vi); 1925 end_synchronized_op(sc, 0); 1926} 1927 1928static int 1929cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data) 1930{ 1931 int rc = 0, mtu, flags; 1932 struct vi_info *vi = ifp->if_softc; 1933 struct port_info *pi = vi->pi; 1934 struct adapter *sc = pi->adapter; 1935 struct ifreq *ifr = (struct ifreq *)data; 1936 uint32_t mask; 1937 1938 switch (cmd) { 1939 case SIOCSIFMTU: 1940 mtu = ifr->ifr_mtu; 1941 if (mtu < ETHERMIN || mtu > MAX_MTU) 1942 return (EINVAL); 1943 1944 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu"); 1945 if (rc) 1946 return (rc); 1947 ifp->if_mtu = mtu; 1948 if (vi->flags & VI_INIT_DONE) { 1949 t4_update_fl_bufsize(ifp); 1950 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1951 rc = update_mac_settings(ifp, XGMAC_MTU); 1952 } 1953 end_synchronized_op(sc, 0); 1954 break; 1955 1956 case SIOCSIFFLAGS: 1957 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4flg"); 1958 if (rc) 1959 return (rc); 1960 1961 if (ifp->if_flags & IFF_UP) { 1962 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1963 flags = vi->if_flags; 1964 if ((ifp->if_flags ^ flags) & 1965 (IFF_PROMISC | IFF_ALLMULTI)) { 1966 rc = update_mac_settings(ifp, 1967 XGMAC_PROMISC | XGMAC_ALLMULTI); 1968 } 1969 } else { 1970 rc = cxgbe_init_synchronized(vi); 1971 } 1972 vi->if_flags = ifp->if_flags; 1973 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1974 rc = cxgbe_uninit_synchronized(vi); 1975 } 1976 end_synchronized_op(sc, 0); 1977 break; 1978 1979 case SIOCADDMULTI: 1980 case SIOCDELMULTI: 1981 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi"); 1982 if (rc) 1983 return (rc); 1984 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1985 rc = update_mac_settings(ifp, XGMAC_MCADDRS); 1986 end_synchronized_op(sc, 0); 1987 break; 1988 1989 case SIOCSIFCAP: 1990 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap"); 1991 if (rc) 1992 return (rc); 1993 1994 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1995 if (mask & IFCAP_TXCSUM) { 1996 ifp->if_capenable ^= IFCAP_TXCSUM; 1997 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1998 1999 if (IFCAP_TSO4 & ifp->if_capenable && 2000 !(IFCAP_TXCSUM & ifp->if_capenable)) { 2001 mask &= ~IFCAP_TSO4; 2002 ifp->if_capenable &= ~IFCAP_TSO4; 2003 if_printf(ifp, 2004 "tso4 disabled due to -txcsum.\n"); 2005 } 2006 } 2007 if (mask & IFCAP_TXCSUM_IPV6) { 2008 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 2009 ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2010 2011 if (IFCAP_TSO6 & ifp->if_capenable && 2012 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 2013 mask &= ~IFCAP_TSO6; 2014 ifp->if_capenable &= ~IFCAP_TSO6; 2015 if_printf(ifp, 2016 "tso6 disabled due to -txcsum6.\n"); 2017 } 2018 } 2019 if (mask & IFCAP_RXCSUM) 2020 ifp->if_capenable ^= IFCAP_RXCSUM; 2021 if (mask & IFCAP_RXCSUM_IPV6) 2022 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 2023 2024 /* 2025 * Note that we leave CSUM_TSO alone (it is always set). The 2026 * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 2027 * sending a TSO request our way, so it's sufficient to toggle 2028 * IFCAP_TSOx only. 2029 */ 2030 if (mask & IFCAP_TSO4) { 2031 if (!(IFCAP_TSO4 & ifp->if_capenable) && 2032 !(IFCAP_TXCSUM & ifp->if_capenable)) { 2033 if_printf(ifp, "enable txcsum first.\n"); 2034 rc = EAGAIN; 2035 goto fail; 2036 } 2037 ifp->if_capenable ^= IFCAP_TSO4; 2038 } 2039 if (mask & IFCAP_TSO6) { 2040 if (!(IFCAP_TSO6 & ifp->if_capenable) && 2041 !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 2042 if_printf(ifp, "enable txcsum6 first.\n"); 2043 rc = EAGAIN; 2044 goto fail; 2045 } 2046 ifp->if_capenable ^= IFCAP_TSO6; 2047 } 2048 if (mask & IFCAP_LRO) { 2049#if defined(INET) || defined(INET6) 2050 int i; 2051 struct sge_rxq *rxq; 2052 2053 ifp->if_capenable ^= IFCAP_LRO; 2054 for_each_rxq(vi, i, rxq) { 2055 if (ifp->if_capenable & IFCAP_LRO) 2056 rxq->iq.flags |= IQ_LRO_ENABLED; 2057 else 2058 rxq->iq.flags &= ~IQ_LRO_ENABLED; 2059 } 2060#endif 2061 } 2062#ifdef TCP_OFFLOAD 2063 if (mask & IFCAP_TOE) { 2064 int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE; 2065 2066 rc = toe_capability(vi, enable); 2067 if (rc != 0) 2068 goto fail; 2069 2070 ifp->if_capenable ^= mask; 2071 } 2072#endif 2073 if (mask & IFCAP_VLAN_HWTAGGING) { 2074 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2075 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2076 rc = update_mac_settings(ifp, XGMAC_VLANEX); 2077 } 2078 if (mask & IFCAP_VLAN_MTU) { 2079 ifp->if_capenable ^= IFCAP_VLAN_MTU; 2080 2081 /* Need to find out how to disable auto-mtu-inflation */ 2082 } 2083 if (mask & IFCAP_VLAN_HWTSO) 2084 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2085 if (mask & IFCAP_VLAN_HWCSUM) 2086 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2087#ifdef RATELIMIT 2088 if (mask & IFCAP_TXRTLMT) 2089 ifp->if_capenable ^= IFCAP_TXRTLMT; 2090#endif 2091 if (mask & IFCAP_HWRXTSTMP) { 2092 int i; 2093 struct sge_rxq *rxq; 2094 2095 ifp->if_capenable ^= IFCAP_HWRXTSTMP; 2096 for_each_rxq(vi, i, rxq) { 2097 if (ifp->if_capenable & IFCAP_HWRXTSTMP) 2098 rxq->iq.flags |= IQ_RX_TIMESTAMP; 2099 else 2100 rxq->iq.flags &= ~IQ_RX_TIMESTAMP; 2101 } 2102 } 2103 if (mask & IFCAP_NOMAP) 2104 ifp->if_capenable ^= IFCAP_NOMAP; 2105 2106 if (mask & IFCAP_VXLAN_HWCSUM) { 2107 ifp->if_capenable ^= IFCAP_VXLAN_HWCSUM; 2108 ifp->if_hwassist ^= CSUM_INNER_IP6_UDP | 2109 CSUM_INNER_IP6_TCP | CSUM_INNER_IP | 2110 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP; 2111 } 2112 if (mask & IFCAP_VXLAN_HWTSO) { 2113 ifp->if_capenable ^= IFCAP_VXLAN_HWTSO; 2114 ifp->if_hwassist ^= CSUM_INNER_IP6_TSO | 2115 CSUM_INNER_IP_TSO; 2116 } 2117 2118#ifdef VLAN_CAPABILITIES 2119 VLAN_CAPABILITIES(ifp); 2120#endif 2121fail: 2122 end_synchronized_op(sc, 0); 2123 break; 2124 2125 case SIOCSIFMEDIA: 2126 case SIOCGIFMEDIA: 2127 case SIOCGIFXMEDIA: 2128 ifmedia_ioctl(ifp, ifr, &pi->media, cmd); 2129 break; 2130 2131 case SIOCGI2C: { 2132 struct ifi2creq i2c; 2133 2134 rc = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2135 if (rc != 0) 2136 break; 2137 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 2138 rc = EPERM; 2139 break; 2140 } 2141 if (i2c.len > sizeof(i2c.data)) { 2142 rc = EINVAL; 2143 break; 2144 } 2145 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c"); 2146 if (rc) 2147 return (rc); 2148 rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr, 2149 i2c.offset, i2c.len, &i2c.data[0]); 2150 end_synchronized_op(sc, 0); 2151 if (rc == 0) 2152 rc = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2153 break; 2154 } 2155 2156 default: 2157 rc = ether_ioctl(ifp, cmd, data); 2158 } 2159 2160 return (rc); 2161} 2162 2163static int 2164cxgbe_transmit(struct ifnet *ifp, struct mbuf *m) 2165{ 2166 struct vi_info *vi = ifp->if_softc; 2167 struct port_info *pi = vi->pi; 2168 struct adapter *sc; 2169 struct sge_txq *txq; 2170 void *items[1]; 2171 int rc; 2172 2173 M_ASSERTPKTHDR(m); 2174 MPASS(m->m_nextpkt == NULL); /* not quite ready for this yet */ 2175 2176 if (__predict_false(pi->link_cfg.link_ok == false)) { 2177 m_freem(m); 2178 return (ENETDOWN); 2179 } 2180 2181 rc = parse_pkt(&m, vi->flags & TX_USES_VM_WR); 2182 if (__predict_false(rc != 0)) { 2183 MPASS(m == NULL); /* was freed already */ 2184 atomic_add_int(&pi->tx_parse_error, 1); /* rare, atomic is ok */ 2185 return (rc); 2186 } 2187#ifdef RATELIMIT 2188 if (m->m_pkthdr.snd_tag != NULL) { 2189 /* EAGAIN tells the stack we are not the correct interface. */ 2190 if (__predict_false(ifp != m->m_pkthdr.snd_tag->ifp)) { 2191 m_freem(m); 2192 return (EAGAIN); 2193 } 2194 2195 return (ethofld_transmit(ifp, m)); 2196 } 2197#endif 2198 2199 /* Select a txq. */ 2200 sc = vi->adapter; 2201 txq = &sc->sge.txq[vi->first_txq]; 2202 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) 2203 txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) + 2204 vi->rsrv_noflowq); 2205 2206 items[0] = m; 2207 rc = mp_ring_enqueue(txq->r, items, 1, 256); 2208 if (__predict_false(rc != 0)) 2209 m_freem(m); 2210 2211 return (rc); 2212} 2213 2214static void 2215cxgbe_qflush(struct ifnet *ifp) 2216{ 2217 struct vi_info *vi = ifp->if_softc; 2218 struct sge_txq *txq; 2219 int i; 2220 2221 /* queues do not exist if !VI_INIT_DONE. */ 2222 if (vi->flags & VI_INIT_DONE) { 2223 for_each_txq(vi, i, txq) { 2224 TXQ_LOCK(txq); 2225 txq->eq.flags |= EQ_QFLUSH; 2226 TXQ_UNLOCK(txq); 2227 while (!mp_ring_is_idle(txq->r)) { 2228 mp_ring_check_drainage(txq->r, 4096); 2229 pause("qflush", 1); 2230 } 2231 TXQ_LOCK(txq); 2232 txq->eq.flags &= ~EQ_QFLUSH; 2233 TXQ_UNLOCK(txq); 2234 } 2235 } 2236 if_qflush(ifp); 2237} 2238 2239static uint64_t 2240vi_get_counter(struct ifnet *ifp, ift_counter c) 2241{ 2242 struct vi_info *vi = ifp->if_softc; 2243 struct fw_vi_stats_vf *s = &vi->stats; 2244 2245 vi_refresh_stats(vi->adapter, vi); 2246 2247 switch (c) { 2248 case IFCOUNTER_IPACKETS: 2249 return (s->rx_bcast_frames + s->rx_mcast_frames + 2250 s->rx_ucast_frames); 2251 case IFCOUNTER_IERRORS: 2252 return (s->rx_err_frames); 2253 case IFCOUNTER_OPACKETS: 2254 return (s->tx_bcast_frames + s->tx_mcast_frames + 2255 s->tx_ucast_frames + s->tx_offload_frames); 2256 case IFCOUNTER_OERRORS: 2257 return (s->tx_drop_frames); 2258 case IFCOUNTER_IBYTES: 2259 return (s->rx_bcast_bytes + s->rx_mcast_bytes + 2260 s->rx_ucast_bytes); 2261 case IFCOUNTER_OBYTES: 2262 return (s->tx_bcast_bytes + s->tx_mcast_bytes + 2263 s->tx_ucast_bytes + s->tx_offload_bytes); 2264 case IFCOUNTER_IMCASTS: 2265 return (s->rx_mcast_frames); 2266 case IFCOUNTER_OMCASTS: 2267 return (s->tx_mcast_frames); 2268 case IFCOUNTER_OQDROPS: { 2269 uint64_t drops; 2270 2271 drops = 0; 2272 if (vi->flags & VI_INIT_DONE) { 2273 int i; 2274 struct sge_txq *txq; 2275 2276 for_each_txq(vi, i, txq) 2277 drops += counter_u64_fetch(txq->r->dropped); 2278 } 2279 2280 return (drops); 2281 2282 } 2283 2284 default: 2285 return (if_get_counter_default(ifp, c)); 2286 } 2287} 2288 2289uint64_t 2290cxgbe_get_counter(struct ifnet *ifp, ift_counter c) 2291{ 2292 struct vi_info *vi = ifp->if_softc; 2293 struct port_info *pi = vi->pi; 2294 struct adapter *sc = pi->adapter; 2295 struct port_stats *s = &pi->stats; 2296 2297 if (pi->nvi > 1 || sc->flags & IS_VF) 2298 return (vi_get_counter(ifp, c)); 2299 2300 cxgbe_refresh_stats(sc, pi); 2301 2302 switch (c) { 2303 case IFCOUNTER_IPACKETS: 2304 return (s->rx_frames); 2305 2306 case IFCOUNTER_IERRORS: 2307 return (s->rx_jabber + s->rx_runt + s->rx_too_long + 2308 s->rx_fcs_err + s->rx_len_err); 2309 2310 case IFCOUNTER_OPACKETS: 2311 return (s->tx_frames); 2312 2313 case IFCOUNTER_OERRORS: 2314 return (s->tx_error_frames); 2315 2316 case IFCOUNTER_IBYTES: 2317 return (s->rx_octets); 2318 2319 case IFCOUNTER_OBYTES: 2320 return (s->tx_octets); 2321 2322 case IFCOUNTER_IMCASTS: 2323 return (s->rx_mcast_frames); 2324 2325 case IFCOUNTER_OMCASTS: 2326 return (s->tx_mcast_frames); 2327 2328 case IFCOUNTER_IQDROPS: 2329 return (s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 + 2330 s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 + 2331 s->rx_trunc3 + pi->tnl_cong_drops); 2332 2333 case IFCOUNTER_OQDROPS: { 2334 uint64_t drops; 2335 2336 drops = s->tx_drop; 2337 if (vi->flags & VI_INIT_DONE) { 2338 int i; 2339 struct sge_txq *txq; 2340 2341 for_each_txq(vi, i, txq) 2342 drops += counter_u64_fetch(txq->r->dropped); 2343 } 2344 2345 return (drops); 2346 2347 } 2348 2349 default: 2350 return (if_get_counter_default(ifp, c)); 2351 } 2352} 2353 2354/* 2355 * The kernel picks a media from the list we had provided but we still validate 2356 * the requeste. 2357 */ 2358int 2359cxgbe_media_change(struct ifnet *ifp) 2360{ 2361 struct vi_info *vi = ifp->if_softc; 2362 struct port_info *pi = vi->pi; 2363 struct ifmedia *ifm = &pi->media; 2364 struct link_config *lc = &pi->link_cfg; 2365 struct adapter *sc = pi->adapter; 2366 int rc; 2367 2368 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mec"); 2369 if (rc != 0) 2370 return (rc); 2371 PORT_LOCK(pi); 2372 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 2373 /* ifconfig .. media autoselect */ 2374 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { 2375 rc = ENOTSUP; /* AN not supported by transceiver */ 2376 goto done; 2377 } 2378 lc->requested_aneg = AUTONEG_ENABLE; 2379 lc->requested_speed = 0; 2380 lc->requested_fc |= PAUSE_AUTONEG; 2381 } else { 2382 lc->requested_aneg = AUTONEG_DISABLE; 2383 lc->requested_speed = 2384 ifmedia_baudrate(ifm->ifm_media) / 1000000; 2385 lc->requested_fc = 0; 2386 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 2387 lc->requested_fc |= PAUSE_RX; 2388 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 2389 lc->requested_fc |= PAUSE_TX; 2390 } 2391 if (pi->up_vis > 0) { 2392 fixup_link_config(pi); 2393 rc = apply_link_config(pi); 2394 } 2395done: 2396 PORT_UNLOCK(pi); 2397 end_synchronized_op(sc, 0); 2398 return (rc); 2399} 2400 2401/* 2402 * Base media word (without ETHER, pause, link active, etc.) for the port at the 2403 * given speed. 2404 */ 2405static int 2406port_mword(struct port_info *pi, uint32_t speed) 2407{ 2408 2409 MPASS(speed & M_FW_PORT_CAP32_SPEED); 2410 MPASS(powerof2(speed)); 2411 2412 switch(pi->port_type) { 2413 case FW_PORT_TYPE_BT_SGMII: 2414 case FW_PORT_TYPE_BT_XFI: 2415 case FW_PORT_TYPE_BT_XAUI: 2416 /* BaseT */ 2417 switch (speed) { 2418 case FW_PORT_CAP32_SPEED_100M: 2419 return (IFM_100_T); 2420 case FW_PORT_CAP32_SPEED_1G: 2421 return (IFM_1000_T); 2422 case FW_PORT_CAP32_SPEED_10G: 2423 return (IFM_10G_T); 2424 } 2425 break; 2426 case FW_PORT_TYPE_KX4: 2427 if (speed == FW_PORT_CAP32_SPEED_10G) 2428 return (IFM_10G_KX4); 2429 break; 2430 case FW_PORT_TYPE_CX4: 2431 if (speed == FW_PORT_CAP32_SPEED_10G) 2432 return (IFM_10G_CX4); 2433 break; 2434 case FW_PORT_TYPE_KX: 2435 if (speed == FW_PORT_CAP32_SPEED_1G) 2436 return (IFM_1000_KX); 2437 break; 2438 case FW_PORT_TYPE_KR: 2439 case FW_PORT_TYPE_BP_AP: 2440 case FW_PORT_TYPE_BP4_AP: 2441 case FW_PORT_TYPE_BP40_BA: 2442 case FW_PORT_TYPE_KR4_100G: 2443 case FW_PORT_TYPE_KR_SFP28: 2444 case FW_PORT_TYPE_KR_XLAUI: 2445 switch (speed) { 2446 case FW_PORT_CAP32_SPEED_1G: 2447 return (IFM_1000_KX); 2448 case FW_PORT_CAP32_SPEED_10G: 2449 return (IFM_10G_KR); 2450 case FW_PORT_CAP32_SPEED_25G: 2451 return (IFM_25G_KR); 2452 case FW_PORT_CAP32_SPEED_40G: 2453 return (IFM_40G_KR4); 2454 case FW_PORT_CAP32_SPEED_50G: 2455 return (IFM_50G_KR2); 2456 case FW_PORT_CAP32_SPEED_100G: 2457 return (IFM_100G_KR4); 2458 } 2459 break; 2460 case FW_PORT_TYPE_FIBER_XFI: 2461 case FW_PORT_TYPE_FIBER_XAUI: 2462 case FW_PORT_TYPE_SFP: 2463 case FW_PORT_TYPE_QSFP_10G: 2464 case FW_PORT_TYPE_QSA: 2465 case FW_PORT_TYPE_QSFP: 2466 case FW_PORT_TYPE_CR4_QSFP: 2467 case FW_PORT_TYPE_CR_QSFP: 2468 case FW_PORT_TYPE_CR2_QSFP: 2469 case FW_PORT_TYPE_SFP28: 2470 /* Pluggable transceiver */ 2471 switch (pi->mod_type) { 2472 case FW_PORT_MOD_TYPE_LR: 2473 switch (speed) { 2474 case FW_PORT_CAP32_SPEED_1G: 2475 return (IFM_1000_LX); 2476 case FW_PORT_CAP32_SPEED_10G: 2477 return (IFM_10G_LR); 2478 case FW_PORT_CAP32_SPEED_25G: 2479 return (IFM_25G_LR); 2480 case FW_PORT_CAP32_SPEED_40G: 2481 return (IFM_40G_LR4); 2482 case FW_PORT_CAP32_SPEED_50G: 2483 return (IFM_50G_LR2); 2484 case FW_PORT_CAP32_SPEED_100G: 2485 return (IFM_100G_LR4); 2486 } 2487 break; 2488 case FW_PORT_MOD_TYPE_SR: 2489 switch (speed) { 2490 case FW_PORT_CAP32_SPEED_1G: 2491 return (IFM_1000_SX); 2492 case FW_PORT_CAP32_SPEED_10G: 2493 return (IFM_10G_SR); 2494 case FW_PORT_CAP32_SPEED_25G: 2495 return (IFM_25G_SR); 2496 case FW_PORT_CAP32_SPEED_40G: 2497 return (IFM_40G_SR4); 2498 case FW_PORT_CAP32_SPEED_50G: 2499 return (IFM_50G_SR2); 2500 case FW_PORT_CAP32_SPEED_100G: 2501 return (IFM_100G_SR4); 2502 } 2503 break; 2504 case FW_PORT_MOD_TYPE_ER: 2505 if (speed == FW_PORT_CAP32_SPEED_10G) 2506 return (IFM_10G_ER); 2507 break; 2508 case FW_PORT_MOD_TYPE_TWINAX_PASSIVE: 2509 case FW_PORT_MOD_TYPE_TWINAX_ACTIVE: 2510 switch (speed) { 2511 case FW_PORT_CAP32_SPEED_1G: 2512 return (IFM_1000_CX); 2513 case FW_PORT_CAP32_SPEED_10G: 2514 return (IFM_10G_TWINAX); 2515 case FW_PORT_CAP32_SPEED_25G: 2516 return (IFM_25G_CR); 2517 case FW_PORT_CAP32_SPEED_40G: 2518 return (IFM_40G_CR4); 2519 case FW_PORT_CAP32_SPEED_50G: 2520 return (IFM_50G_CR2); 2521 case FW_PORT_CAP32_SPEED_100G: 2522 return (IFM_100G_CR4); 2523 } 2524 break; 2525 case FW_PORT_MOD_TYPE_LRM: 2526 if (speed == FW_PORT_CAP32_SPEED_10G) 2527 return (IFM_10G_LRM); 2528 break; 2529 case FW_PORT_MOD_TYPE_NA: 2530 MPASS(0); /* Not pluggable? */ 2531 /* fall throough */ 2532 case FW_PORT_MOD_TYPE_ERROR: 2533 case FW_PORT_MOD_TYPE_UNKNOWN: 2534 case FW_PORT_MOD_TYPE_NOTSUPPORTED: 2535 break; 2536 case FW_PORT_MOD_TYPE_NONE: 2537 return (IFM_NONE); 2538 } 2539 break; 2540 case FW_PORT_TYPE_NONE: 2541 return (IFM_NONE); 2542 } 2543 2544 return (IFM_UNKNOWN); 2545} 2546 2547void 2548cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2549{ 2550 struct vi_info *vi = ifp->if_softc; 2551 struct port_info *pi = vi->pi; 2552 struct adapter *sc = pi->adapter; 2553 struct link_config *lc = &pi->link_cfg; 2554 2555 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4med") != 0) 2556 return; 2557 PORT_LOCK(pi); 2558 2559 if (pi->up_vis == 0) { 2560 /* 2561 * If all the interfaces are administratively down the firmware 2562 * does not report transceiver changes. Refresh port info here 2563 * so that ifconfig displays accurate ifmedia at all times. 2564 * This is the only reason we have a synchronized op in this 2565 * function. Just PORT_LOCK would have been enough otherwise. 2566 */ 2567 t4_update_port_info(pi); 2568 build_medialist(pi); 2569 } 2570 2571 /* ifm_status */ 2572 ifmr->ifm_status = IFM_AVALID; 2573 if (lc->link_ok == false) 2574 goto done; 2575 ifmr->ifm_status |= IFM_ACTIVE; 2576 2577 /* ifm_active */ 2578 ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2579 ifmr->ifm_active &= ~(IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE); 2580 if (lc->fc & PAUSE_RX) 2581 ifmr->ifm_active |= IFM_ETH_RXPAUSE; 2582 if (lc->fc & PAUSE_TX) 2583 ifmr->ifm_active |= IFM_ETH_TXPAUSE; 2584 ifmr->ifm_active |= port_mword(pi, speed_to_fwcap(lc->speed)); 2585done: 2586 PORT_UNLOCK(pi); 2587 end_synchronized_op(sc, 0); 2588} 2589 2590static int 2591vcxgbe_probe(device_t dev) 2592{ 2593 char buf[128]; 2594 struct vi_info *vi = device_get_softc(dev); 2595 2596 snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id, 2597 vi - vi->pi->vi); 2598 device_set_desc_copy(dev, buf); 2599 2600 return (BUS_PROBE_DEFAULT); 2601} 2602 2603static int 2604alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi) 2605{ 2606 int func, index, rc; 2607 uint32_t param, val; 2608 2609 ASSERT_SYNCHRONIZED_OP(sc); 2610 2611 index = vi - pi->vi; 2612 MPASS(index > 0); /* This function deals with _extra_ VIs only */ 2613 KASSERT(index < nitems(vi_mac_funcs), 2614 ("%s: VI %s doesn't have a MAC func", __func__, 2615 device_get_nameunit(vi->dev))); 2616 func = vi_mac_funcs[index]; 2617 rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1, 2618 vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0); 2619 if (rc < 0) { 2620 device_printf(vi->dev, "failed to allocate virtual interface %d" 2621 "for port %d: %d\n", index, pi->port_id, -rc); 2622 return (-rc); 2623 } 2624 vi->viid = rc; 2625 2626 if (vi->rss_size == 1) { 2627 /* 2628 * This VI didn't get a slice of the RSS table. Reduce the 2629 * number of VIs being created (hw.cxgbe.num_vis) or modify the 2630 * configuration file (nvi, rssnvi for this PF) if this is a 2631 * problem. 2632 */ 2633 device_printf(vi->dev, "RSS table not available.\n"); 2634 vi->rss_base = 0xffff; 2635 2636 return (0); 2637 } 2638 2639 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 2640 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) | 2641 V_FW_PARAMS_PARAM_YZ(vi->viid); 2642 rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2643 if (rc) 2644 vi->rss_base = 0xffff; 2645 else { 2646 MPASS((val >> 16) == vi->rss_size); 2647 vi->rss_base = val & 0xffff; 2648 } 2649 2650 return (0); 2651} 2652 2653static int 2654vcxgbe_attach(device_t dev) 2655{ 2656 struct vi_info *vi; 2657 struct port_info *pi; 2658 struct adapter *sc; 2659 int rc; 2660 2661 vi = device_get_softc(dev); 2662 pi = vi->pi; 2663 sc = pi->adapter; 2664 2665 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4via"); 2666 if (rc) 2667 return (rc); 2668 rc = alloc_extra_vi(sc, pi, vi); 2669 end_synchronized_op(sc, 0); 2670 if (rc) 2671 return (rc); 2672 2673 rc = cxgbe_vi_attach(dev, vi); 2674 if (rc) { 2675 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2676 return (rc); 2677 } 2678 return (0); 2679} 2680 2681static int 2682vcxgbe_detach(device_t dev) 2683{ 2684 struct vi_info *vi; 2685 struct adapter *sc; 2686 2687 vi = device_get_softc(dev); 2688 sc = vi->adapter; 2689 2690 doom_vi(sc, vi); 2691 2692 cxgbe_vi_detach(vi); 2693 t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid); 2694 2695 end_synchronized_op(sc, 0); 2696 2697 return (0); 2698} 2699 2700static struct callout fatal_callout; 2701 2702static void 2703delayed_panic(void *arg) 2704{ 2705 struct adapter *sc = arg; 2706 2707 panic("%s: panic on fatal error", device_get_nameunit(sc->dev)); 2708} 2709 2710void 2711t4_fatal_err(struct adapter *sc, bool fw_error) 2712{ 2713 2714 t4_shutdown_adapter(sc); 2715 log(LOG_ALERT, "%s: encountered fatal error, adapter stopped.\n", 2716 device_get_nameunit(sc->dev)); 2717 if (fw_error) { 2718 ASSERT_SYNCHRONIZED_OP(sc); 2719 sc->flags |= ADAP_ERR; 2720 } else { 2721 ADAPTER_LOCK(sc); 2722 sc->flags |= ADAP_ERR; 2723 ADAPTER_UNLOCK(sc); 2724 } 2725 2726 if (t4_panic_on_fatal_err) { 2727 log(LOG_ALERT, "%s: panic on fatal error after 30s", 2728 device_get_nameunit(sc->dev)); 2729 callout_reset(&fatal_callout, hz * 30, delayed_panic, sc); 2730 } 2731} 2732 2733void 2734t4_add_adapter(struct adapter *sc) 2735{ 2736 sx_xlock(&t4_list_lock); 2737 SLIST_INSERT_HEAD(&t4_list, sc, link); 2738 sx_xunlock(&t4_list_lock); 2739} 2740 2741int 2742t4_map_bars_0_and_4(struct adapter *sc) 2743{ 2744 sc->regs_rid = PCIR_BAR(0); 2745 sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2746 &sc->regs_rid, RF_ACTIVE); 2747 if (sc->regs_res == NULL) { 2748 device_printf(sc->dev, "cannot map registers.\n"); 2749 return (ENXIO); 2750 } 2751 sc->bt = rman_get_bustag(sc->regs_res); 2752 sc->bh = rman_get_bushandle(sc->regs_res); 2753 sc->mmio_len = rman_get_size(sc->regs_res); 2754 setbit(&sc->doorbells, DOORBELL_KDB); 2755 2756 sc->msix_rid = PCIR_BAR(4); 2757 sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2758 &sc->msix_rid, RF_ACTIVE); 2759 if (sc->msix_res == NULL) { 2760 device_printf(sc->dev, "cannot map MSI-X BAR.\n"); 2761 return (ENXIO); 2762 } 2763 2764 return (0); 2765} 2766 2767int 2768t4_map_bar_2(struct adapter *sc) 2769{ 2770 2771 /* 2772 * T4: only iWARP driver uses the userspace doorbells. There is no need 2773 * to map it if RDMA is disabled. 2774 */ 2775 if (is_t4(sc) && sc->rdmacaps == 0) 2776 return (0); 2777 2778 sc->udbs_rid = PCIR_BAR(2); 2779 sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 2780 &sc->udbs_rid, RF_ACTIVE); 2781 if (sc->udbs_res == NULL) { 2782 device_printf(sc->dev, "cannot map doorbell BAR.\n"); 2783 return (ENXIO); 2784 } 2785 sc->udbs_base = rman_get_virtual(sc->udbs_res); 2786 2787 if (chip_id(sc) >= CHELSIO_T5) { 2788 setbit(&sc->doorbells, DOORBELL_UDB); 2789#if defined(__i386__) || defined(__amd64__) 2790 if (t5_write_combine) { 2791 int rc, mode; 2792 2793 /* 2794 * Enable write combining on BAR2. This is the 2795 * userspace doorbell BAR and is split into 128B 2796 * (UDBS_SEG_SIZE) doorbell regions, each associated 2797 * with an egress queue. The first 64B has the doorbell 2798 * and the second 64B can be used to submit a tx work 2799 * request with an implicit doorbell. 2800 */ 2801 2802 rc = pmap_change_attr((vm_offset_t)sc->udbs_base, 2803 rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING); 2804 if (rc == 0) { 2805 clrbit(&sc->doorbells, DOORBELL_UDB); 2806 setbit(&sc->doorbells, DOORBELL_WCWR); 2807 setbit(&sc->doorbells, DOORBELL_UDBWC); 2808 } else { 2809 device_printf(sc->dev, 2810 "couldn't enable write combining: %d\n", 2811 rc); 2812 } 2813 2814 mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0); 2815 t4_write_reg(sc, A_SGE_STAT_CFG, 2816 V_STATSOURCE_T5(7) | mode); 2817 } 2818#endif 2819 } 2820 sc->iwt.wc_en = isset(&sc->doorbells, DOORBELL_UDBWC) ? 1 : 0; 2821 2822 return (0); 2823} 2824 2825struct memwin_init { 2826 uint32_t base; 2827 uint32_t aperture; 2828}; 2829 2830static const struct memwin_init t4_memwin[NUM_MEMWIN] = { 2831 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2832 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2833 { MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 } 2834}; 2835 2836static const struct memwin_init t5_memwin[NUM_MEMWIN] = { 2837 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 2838 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 2839 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 2840}; 2841 2842static void 2843setup_memwin(struct adapter *sc) 2844{ 2845 const struct memwin_init *mw_init; 2846 struct memwin *mw; 2847 int i; 2848 uint32_t bar0; 2849 2850 if (is_t4(sc)) { 2851 /* 2852 * Read low 32b of bar0 indirectly via the hardware backdoor 2853 * mechanism. Works from within PCI passthrough environments 2854 * too, where rman_get_start() can return a different value. We 2855 * need to program the T4 memory window decoders with the actual 2856 * addresses that will be coming across the PCIe link. 2857 */ 2858 bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0)); 2859 bar0 &= (uint32_t) PCIM_BAR_MEM_BASE; 2860 2861 mw_init = &t4_memwin[0]; 2862 } else { 2863 /* T5+ use the relative offset inside the PCIe BAR */ 2864 bar0 = 0; 2865 2866 mw_init = &t5_memwin[0]; 2867 } 2868 2869 for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) { 2870 rw_init(&mw->mw_lock, "memory window access"); 2871 mw->mw_base = mw_init->base; 2872 mw->mw_aperture = mw_init->aperture; 2873 mw->mw_curpos = 0; 2874 t4_write_reg(sc, 2875 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i), 2876 (mw->mw_base + bar0) | V_BIR(0) | 2877 V_WINDOW(ilog2(mw->mw_aperture) - 10)); 2878 rw_wlock(&mw->mw_lock); 2879 position_memwin(sc, i, 0); 2880 rw_wunlock(&mw->mw_lock); 2881 } 2882 2883 /* flush */ 2884 t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 2885} 2886 2887/* 2888 * Positions the memory window at the given address in the card's address space. 2889 * There are some alignment requirements and the actual position may be at an 2890 * address prior to the requested address. mw->mw_curpos always has the actual 2891 * position of the window. 2892 */ 2893static void 2894position_memwin(struct adapter *sc, int idx, uint32_t addr) 2895{ 2896 struct memwin *mw; 2897 uint32_t pf; 2898 uint32_t reg; 2899 2900 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2901 mw = &sc->memwin[idx]; 2902 rw_assert(&mw->mw_lock, RA_WLOCKED); 2903 2904 if (is_t4(sc)) { 2905 pf = 0; 2906 mw->mw_curpos = addr & ~0xf; /* start must be 16B aligned */ 2907 } else { 2908 pf = V_PFNUM(sc->pf); 2909 mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */ 2910 } 2911 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx); 2912 t4_write_reg(sc, reg, mw->mw_curpos | pf); 2913 t4_read_reg(sc, reg); /* flush */ 2914} 2915 2916int 2917rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val, 2918 int len, int rw) 2919{ 2920 struct memwin *mw; 2921 uint32_t mw_end, v; 2922 2923 MPASS(idx >= 0 && idx < NUM_MEMWIN); 2924 2925 /* Memory can only be accessed in naturally aligned 4 byte units */ 2926 if (addr & 3 || len & 3 || len <= 0) 2927 return (EINVAL); 2928 2929 mw = &sc->memwin[idx]; 2930 while (len > 0) { 2931 rw_rlock(&mw->mw_lock); 2932 mw_end = mw->mw_curpos + mw->mw_aperture; 2933 if (addr >= mw_end || addr < mw->mw_curpos) { 2934 /* Will need to reposition the window */ 2935 if (!rw_try_upgrade(&mw->mw_lock)) { 2936 rw_runlock(&mw->mw_lock); 2937 rw_wlock(&mw->mw_lock); 2938 } 2939 rw_assert(&mw->mw_lock, RA_WLOCKED); 2940 position_memwin(sc, idx, addr); 2941 rw_downgrade(&mw->mw_lock); 2942 mw_end = mw->mw_curpos + mw->mw_aperture; 2943 } 2944 rw_assert(&mw->mw_lock, RA_RLOCKED); 2945 while (addr < mw_end && len > 0) { 2946 if (rw == 0) { 2947 v = t4_read_reg(sc, mw->mw_base + addr - 2948 mw->mw_curpos); 2949 *val++ = le32toh(v); 2950 } else { 2951 v = *val++; 2952 t4_write_reg(sc, mw->mw_base + addr - 2953 mw->mw_curpos, htole32(v)); 2954 } 2955 addr += 4; 2956 len -= 4; 2957 } 2958 rw_runlock(&mw->mw_lock); 2959 } 2960 2961 return (0); 2962} 2963 2964int 2965alloc_atid_tab(struct tid_info *t, int flags) 2966{ 2967 int i; 2968 2969 MPASS(t->natids > 0); 2970 MPASS(t->atid_tab == NULL); 2971 2972 t->atid_tab = malloc(t->natids * sizeof(*t->atid_tab), M_CXGBE, 2973 M_ZERO | flags); 2974 if (t->atid_tab == NULL) 2975 return (ENOMEM); 2976 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); 2977 t->afree = t->atid_tab; 2978 t->atids_in_use = 0; 2979 for (i = 1; i < t->natids; i++) 2980 t->atid_tab[i - 1].next = &t->atid_tab[i]; 2981 t->atid_tab[t->natids - 1].next = NULL; 2982 2983 return (0); 2984} 2985 2986void 2987free_atid_tab(struct tid_info *t) 2988{ 2989 2990 KASSERT(t->atids_in_use == 0, 2991 ("%s: %d atids still in use.", __func__, t->atids_in_use)); 2992 2993 if (mtx_initialized(&t->atid_lock)) 2994 mtx_destroy(&t->atid_lock); 2995 free(t->atid_tab, M_CXGBE); 2996 t->atid_tab = NULL; 2997} 2998 2999int 3000alloc_atid(struct adapter *sc, void *ctx) 3001{ 3002 struct tid_info *t = &sc->tids; 3003 int atid = -1; 3004 3005 mtx_lock(&t->atid_lock); 3006 if (t->afree) { 3007 union aopen_entry *p = t->afree; 3008 3009 atid = p - t->atid_tab; 3010 MPASS(atid <= M_TID_TID); 3011 t->afree = p->next; 3012 p->data = ctx; 3013 t->atids_in_use++; 3014 } 3015 mtx_unlock(&t->atid_lock); 3016 return (atid); 3017} 3018 3019void * 3020lookup_atid(struct adapter *sc, int atid) 3021{ 3022 struct tid_info *t = &sc->tids; 3023 3024 return (t->atid_tab[atid].data); 3025} 3026 3027void 3028free_atid(struct adapter *sc, int atid) 3029{ 3030 struct tid_info *t = &sc->tids; 3031 union aopen_entry *p = &t->atid_tab[atid]; 3032 3033 mtx_lock(&t->atid_lock); 3034 p->next = t->afree; 3035 t->afree = p; 3036 t->atids_in_use--; 3037 mtx_unlock(&t->atid_lock); 3038} 3039 3040static void 3041queue_tid_release(struct adapter *sc, int tid) 3042{ 3043 3044 CXGBE_UNIMPLEMENTED("deferred tid release"); 3045} 3046 3047void 3048release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) 3049{ 3050 struct wrqe *wr; 3051 struct cpl_tid_release *req; 3052 3053 wr = alloc_wrqe(sizeof(*req), ctrlq); 3054 if (wr == NULL) { 3055 queue_tid_release(sc, tid); /* defer */ 3056 return; 3057 } 3058 req = wrtod(wr); 3059 3060 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); 3061 3062 t4_wrq_tx(sc, wr); 3063} 3064 3065static int 3066t4_range_cmp(const void *a, const void *b) 3067{ 3068 return ((const struct t4_range *)a)->start - 3069 ((const struct t4_range *)b)->start; 3070} 3071 3072/* 3073 * Verify that the memory range specified by the addr/len pair is valid within 3074 * the card's address space. 3075 */ 3076static int 3077validate_mem_range(struct adapter *sc, uint32_t addr, uint32_t len) 3078{ 3079 struct t4_range mem_ranges[4], *r, *next; 3080 uint32_t em, addr_len; 3081 int i, n, remaining; 3082 3083 /* Memory can only be accessed in naturally aligned 4 byte units */ 3084 if (addr & 3 || len & 3 || len == 0) 3085 return (EINVAL); 3086 3087 /* Enabled memories */ 3088 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 3089 3090 r = &mem_ranges[0]; 3091 n = 0; 3092 bzero(r, sizeof(mem_ranges)); 3093 if (em & F_EDRAM0_ENABLE) { 3094 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 3095 r->size = G_EDRAM0_SIZE(addr_len) << 20; 3096 if (r->size > 0) { 3097 r->start = G_EDRAM0_BASE(addr_len) << 20; 3098 if (addr >= r->start && 3099 addr + len <= r->start + r->size) 3100 return (0); 3101 r++; 3102 n++; 3103 } 3104 } 3105 if (em & F_EDRAM1_ENABLE) { 3106 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 3107 r->size = G_EDRAM1_SIZE(addr_len) << 20; 3108 if (r->size > 0) { 3109 r->start = G_EDRAM1_BASE(addr_len) << 20; 3110 if (addr >= r->start && 3111 addr + len <= r->start + r->size) 3112 return (0); 3113 r++; 3114 n++; 3115 } 3116 } 3117 if (em & F_EXT_MEM_ENABLE) { 3118 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 3119 r->size = G_EXT_MEM_SIZE(addr_len) << 20; 3120 if (r->size > 0) { 3121 r->start = G_EXT_MEM_BASE(addr_len) << 20; 3122 if (addr >= r->start && 3123 addr + len <= r->start + r->size) 3124 return (0); 3125 r++; 3126 n++; 3127 } 3128 } 3129 if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) { 3130 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 3131 r->size = G_EXT_MEM1_SIZE(addr_len) << 20; 3132 if (r->size > 0) { 3133 r->start = G_EXT_MEM1_BASE(addr_len) << 20; 3134 if (addr >= r->start && 3135 addr + len <= r->start + r->size) 3136 return (0); 3137 r++; 3138 n++; 3139 } 3140 } 3141 MPASS(n <= nitems(mem_ranges)); 3142 3143 if (n > 1) { 3144 /* Sort and merge the ranges. */ 3145 qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp); 3146 3147 /* Start from index 0 and examine the next n - 1 entries. */ 3148 r = &mem_ranges[0]; 3149 for (remaining = n - 1; remaining > 0; remaining--, r++) { 3150 3151 MPASS(r->size > 0); /* r is a valid entry. */ 3152 next = r + 1; 3153 MPASS(next->size > 0); /* and so is the next one. */ 3154 3155 while (r->start + r->size >= next->start) { 3156 /* Merge the next one into the current entry. */ 3157 r->size = max(r->start + r->size, 3158 next->start + next->size) - r->start; 3159 n--; /* One fewer entry in total. */ 3160 if (--remaining == 0) 3161 goto done; /* short circuit */ 3162 next++; 3163 } 3164 if (next != r + 1) { 3165 /* 3166 * Some entries were merged into r and next 3167 * points to the first valid entry that couldn't 3168 * be merged. 3169 */ 3170 MPASS(next->size > 0); /* must be valid */ 3171 memcpy(r + 1, next, remaining * sizeof(*r)); 3172#ifdef INVARIANTS 3173 /* 3174 * This so that the foo->size assertion in the 3175 * next iteration of the loop do the right 3176 * thing for entries that were pulled up and are 3177 * no longer valid. 3178 */ 3179 MPASS(n < nitems(mem_ranges)); 3180 bzero(&mem_ranges[n], (nitems(mem_ranges) - n) * 3181 sizeof(struct t4_range)); 3182#endif 3183 } 3184 } 3185done: 3186 /* Done merging the ranges. */ 3187 MPASS(n > 0); 3188 r = &mem_ranges[0]; 3189 for (i = 0; i < n; i++, r++) { 3190 if (addr >= r->start && 3191 addr + len <= r->start + r->size) 3192 return (0); 3193 } 3194 } 3195 3196 return (EFAULT); 3197} 3198 3199static int 3200fwmtype_to_hwmtype(int mtype) 3201{ 3202 3203 switch (mtype) { 3204 case FW_MEMTYPE_EDC0: 3205 return (MEM_EDC0); 3206 case FW_MEMTYPE_EDC1: 3207 return (MEM_EDC1); 3208 case FW_MEMTYPE_EXTMEM: 3209 return (MEM_MC0); 3210 case FW_MEMTYPE_EXTMEM1: 3211 return (MEM_MC1); 3212 default: 3213 panic("%s: cannot translate fw mtype %d.", __func__, mtype); 3214 } 3215} 3216 3217/* 3218 * Verify that the memory range specified by the memtype/offset/len pair is 3219 * valid and lies entirely within the memtype specified. The global address of 3220 * the start of the range is returned in addr. 3221 */ 3222static int 3223validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, uint32_t len, 3224 uint32_t *addr) 3225{ 3226 uint32_t em, addr_len, maddr; 3227 3228 /* Memory can only be accessed in naturally aligned 4 byte units */ 3229 if (off & 3 || len & 3 || len == 0) 3230 return (EINVAL); 3231 3232 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 3233 switch (fwmtype_to_hwmtype(mtype)) { 3234 case MEM_EDC0: 3235 if (!(em & F_EDRAM0_ENABLE)) 3236 return (EINVAL); 3237 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 3238 maddr = G_EDRAM0_BASE(addr_len) << 20; 3239 break; 3240 case MEM_EDC1: 3241 if (!(em & F_EDRAM1_ENABLE)) 3242 return (EINVAL); 3243 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 3244 maddr = G_EDRAM1_BASE(addr_len) << 20; 3245 break; 3246 case MEM_MC: 3247 if (!(em & F_EXT_MEM_ENABLE)) 3248 return (EINVAL); 3249 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 3250 maddr = G_EXT_MEM_BASE(addr_len) << 20; 3251 break; 3252 case MEM_MC1: 3253 if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE)) 3254 return (EINVAL); 3255 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 3256 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 3257 break; 3258 default: 3259 return (EINVAL); 3260 } 3261 3262 *addr = maddr + off; /* global address */ 3263 return (validate_mem_range(sc, *addr, len)); 3264} 3265 3266static int 3267fixup_devlog_params(struct adapter *sc) 3268{ 3269 struct devlog_params *dparams = &sc->params.devlog; 3270 int rc; 3271 3272 rc = validate_mt_off_len(sc, dparams->memtype, dparams->start, 3273 dparams->size, &dparams->addr); 3274 3275 return (rc); 3276} 3277 3278static void 3279update_nirq(struct intrs_and_queues *iaq, int nports) 3280{ 3281 3282 iaq->nirq = T4_EXTRA_INTR; 3283 iaq->nirq += nports * max(iaq->nrxq, iaq->nnmrxq); 3284 iaq->nirq += nports * iaq->nofldrxq; 3285 iaq->nirq += nports * (iaq->num_vis - 1) * 3286 max(iaq->nrxq_vi, iaq->nnmrxq_vi); 3287 iaq->nirq += nports * (iaq->num_vis - 1) * iaq->nofldrxq_vi; 3288} 3289 3290/* 3291 * Adjust requirements to fit the number of interrupts available. 3292 */ 3293static void 3294calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype, 3295 int navail) 3296{ 3297 int old_nirq; 3298 const int nports = sc->params.nports; 3299 3300 MPASS(nports > 0); 3301 MPASS(navail > 0); 3302 3303 bzero(iaq, sizeof(*iaq)); 3304 iaq->intr_type = itype; 3305 iaq->num_vis = t4_num_vis; 3306 iaq->ntxq = t4_ntxq; 3307 iaq->ntxq_vi = t4_ntxq_vi; 3308 iaq->nrxq = t4_nrxq; 3309 iaq->nrxq_vi = t4_nrxq_vi; 3310#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 3311 if (is_offload(sc) || is_ethoffload(sc)) { 3312 iaq->nofldtxq = t4_nofldtxq; 3313 iaq->nofldtxq_vi = t4_nofldtxq_vi; 3314 } 3315#endif 3316#ifdef TCP_OFFLOAD 3317 if (is_offload(sc)) { 3318 iaq->nofldrxq = t4_nofldrxq; 3319 iaq->nofldrxq_vi = t4_nofldrxq_vi; 3320 } 3321#endif 3322#ifdef DEV_NETMAP 3323 if (t4_native_netmap & NN_MAIN_VI) { 3324 iaq->nnmtxq = t4_nnmtxq; 3325 iaq->nnmrxq = t4_nnmrxq; 3326 } 3327 if (t4_native_netmap & NN_EXTRA_VI) { 3328 iaq->nnmtxq_vi = t4_nnmtxq_vi; 3329 iaq->nnmrxq_vi = t4_nnmrxq_vi; 3330 } 3331#endif 3332 3333 update_nirq(iaq, nports); 3334 if (iaq->nirq <= navail && 3335 (itype != INTR_MSI || powerof2(iaq->nirq))) { 3336 /* 3337 * This is the normal case -- there are enough interrupts for 3338 * everything. 3339 */ 3340 goto done; 3341 } 3342 3343 /* 3344 * If extra VIs have been configured try reducing their count and see if 3345 * that works. 3346 */ 3347 while (iaq->num_vis > 1) { 3348 iaq->num_vis--; 3349 update_nirq(iaq, nports); 3350 if (iaq->nirq <= navail && 3351 (itype != INTR_MSI || powerof2(iaq->nirq))) { 3352 device_printf(sc->dev, "virtual interfaces per port " 3353 "reduced to %d from %d. nrxq=%u, nofldrxq=%u, " 3354 "nrxq_vi=%u nofldrxq_vi=%u, nnmrxq_vi=%u. " 3355 "itype %d, navail %u, nirq %d.\n", 3356 iaq->num_vis, t4_num_vis, iaq->nrxq, iaq->nofldrxq, 3357 iaq->nrxq_vi, iaq->nofldrxq_vi, iaq->nnmrxq_vi, 3358 itype, navail, iaq->nirq); 3359 goto done; 3360 } 3361 } 3362 3363 /* 3364 * Extra VIs will not be created. Log a message if they were requested. 3365 */ 3366 MPASS(iaq->num_vis == 1); 3367 iaq->ntxq_vi = iaq->nrxq_vi = 0; 3368 iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0; 3369 iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0; 3370 if (iaq->num_vis != t4_num_vis) { 3371 device_printf(sc->dev, "extra virtual interfaces disabled. " 3372 "nrxq=%u, nofldrxq=%u, nrxq_vi=%u nofldrxq_vi=%u, " 3373 "nnmrxq_vi=%u. itype %d, navail %u, nirq %d.\n", 3374 iaq->nrxq, iaq->nofldrxq, iaq->nrxq_vi, iaq->nofldrxq_vi, 3375 iaq->nnmrxq_vi, itype, navail, iaq->nirq); 3376 } 3377 3378 /* 3379 * Keep reducing the number of NIC rx queues to the next lower power of 3380 * 2 (for even RSS distribution) and halving the TOE rx queues and see 3381 * if that works. 3382 */ 3383 do { 3384 if (iaq->nrxq > 1) { 3385 do { 3386 iaq->nrxq--; 3387 } while (!powerof2(iaq->nrxq)); 3388 if (iaq->nnmrxq > iaq->nrxq) 3389 iaq->nnmrxq = iaq->nrxq; 3390 } 3391 if (iaq->nofldrxq > 1) 3392 iaq->nofldrxq >>= 1; 3393 3394 old_nirq = iaq->nirq; 3395 update_nirq(iaq, nports); 3396 if (iaq->nirq <= navail && 3397 (itype != INTR_MSI || powerof2(iaq->nirq))) { 3398 device_printf(sc->dev, "running with reduced number of " 3399 "rx queues because of shortage of interrupts. " 3400 "nrxq=%u, nofldrxq=%u. " 3401 "itype %d, navail %u, nirq %d.\n", iaq->nrxq, 3402 iaq->nofldrxq, itype, navail, iaq->nirq); 3403 goto done; 3404 } 3405 } while (old_nirq != iaq->nirq); 3406 3407 /* One interrupt for everything. Ugh. */ 3408 device_printf(sc->dev, "running with minimal number of queues. " 3409 "itype %d, navail %u.\n", itype, navail); 3410 iaq->nirq = 1; 3411 iaq->nrxq = 1; 3412 iaq->ntxq = 1; 3413 if (iaq->nofldrxq > 0) { 3414 iaq->nofldrxq = 1; 3415 iaq->nofldtxq = 1; 3416 } 3417 iaq->nnmtxq = 0; 3418 iaq->nnmrxq = 0; 3419done: 3420 MPASS(iaq->num_vis > 0); 3421 if (iaq->num_vis > 1) { 3422 MPASS(iaq->nrxq_vi > 0); 3423 MPASS(iaq->ntxq_vi > 0); 3424 } 3425 MPASS(iaq->nirq > 0); 3426 MPASS(iaq->nrxq > 0); 3427 MPASS(iaq->ntxq > 0); 3428 if (itype == INTR_MSI) { 3429 MPASS(powerof2(iaq->nirq)); 3430 } 3431} 3432 3433static int 3434cfg_itype_and_nqueues(struct adapter *sc, struct intrs_and_queues *iaq) 3435{ 3436 int rc, itype, navail, nalloc; 3437 3438 for (itype = INTR_MSIX; itype; itype >>= 1) { 3439 3440 if ((itype & t4_intr_types) == 0) 3441 continue; /* not allowed */ 3442 3443 if (itype == INTR_MSIX) 3444 navail = pci_msix_count(sc->dev); 3445 else if (itype == INTR_MSI) 3446 navail = pci_msi_count(sc->dev); 3447 else 3448 navail = 1; 3449restart: 3450 if (navail == 0) 3451 continue; 3452 3453 calculate_iaq(sc, iaq, itype, navail); 3454 nalloc = iaq->nirq; 3455 rc = 0; 3456 if (itype == INTR_MSIX) 3457 rc = pci_alloc_msix(sc->dev, &nalloc); 3458 else if (itype == INTR_MSI) 3459 rc = pci_alloc_msi(sc->dev, &nalloc); 3460 3461 if (rc == 0 && nalloc > 0) { 3462 if (nalloc == iaq->nirq) 3463 return (0); 3464 3465 /* 3466 * Didn't get the number requested. Use whatever number 3467 * the kernel is willing to allocate. 3468 */ 3469 device_printf(sc->dev, "fewer vectors than requested, " 3470 "type=%d, req=%d, rcvd=%d; will downshift req.\n", 3471 itype, iaq->nirq, nalloc); 3472 pci_release_msi(sc->dev); 3473 navail = nalloc; 3474 goto restart; 3475 } 3476 3477 device_printf(sc->dev, 3478 "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n", 3479 itype, rc, iaq->nirq, nalloc); 3480 } 3481 3482 device_printf(sc->dev, 3483 "failed to find a usable interrupt type. " 3484 "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types, 3485 pci_msix_count(sc->dev), pci_msi_count(sc->dev)); 3486 3487 return (ENXIO); 3488} 3489 3490#define FW_VERSION(chip) ( \ 3491 V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \ 3492 V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \ 3493 V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \ 3494 V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD)) 3495#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf) 3496 3497/* Just enough of fw_hdr to cover all version info. */ 3498struct fw_h { 3499 __u8 ver; 3500 __u8 chip; 3501 __be16 len512; 3502 __be32 fw_ver; 3503 __be32 tp_microcode_ver; 3504 __u8 intfver_nic; 3505 __u8 intfver_vnic; 3506 __u8 intfver_ofld; 3507 __u8 intfver_ri; 3508 __u8 intfver_iscsipdu; 3509 __u8 intfver_iscsi; 3510 __u8 intfver_fcoepdu; 3511 __u8 intfver_fcoe; 3512}; 3513/* Spot check a couple of fields. */ 3514CTASSERT(offsetof(struct fw_h, fw_ver) == offsetof(struct fw_hdr, fw_ver)); 3515CTASSERT(offsetof(struct fw_h, intfver_nic) == offsetof(struct fw_hdr, intfver_nic)); 3516CTASSERT(offsetof(struct fw_h, intfver_fcoe) == offsetof(struct fw_hdr, intfver_fcoe)); 3517 3518struct fw_info { 3519 uint8_t chip; 3520 char *kld_name; 3521 char *fw_mod_name; 3522 struct fw_h fw_h; 3523} fw_info[] = { 3524 { 3525 .chip = CHELSIO_T4, 3526 .kld_name = "t4fw_cfg", 3527 .fw_mod_name = "t4fw", 3528 .fw_h = { 3529 .chip = FW_HDR_CHIP_T4, 3530 .fw_ver = htobe32(FW_VERSION(T4)), 3531 .intfver_nic = FW_INTFVER(T4, NIC), 3532 .intfver_vnic = FW_INTFVER(T4, VNIC), 3533 .intfver_ofld = FW_INTFVER(T4, OFLD), 3534 .intfver_ri = FW_INTFVER(T4, RI), 3535 .intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU), 3536 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 3537 .intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU), 3538 .intfver_fcoe = FW_INTFVER(T4, FCOE), 3539 }, 3540 }, { 3541 .chip = CHELSIO_T5, 3542 .kld_name = "t5fw_cfg", 3543 .fw_mod_name = "t5fw", 3544 .fw_h = { 3545 .chip = FW_HDR_CHIP_T5, 3546 .fw_ver = htobe32(FW_VERSION(T5)), 3547 .intfver_nic = FW_INTFVER(T5, NIC), 3548 .intfver_vnic = FW_INTFVER(T5, VNIC), 3549 .intfver_ofld = FW_INTFVER(T5, OFLD), 3550 .intfver_ri = FW_INTFVER(T5, RI), 3551 .intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU), 3552 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 3553 .intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU), 3554 .intfver_fcoe = FW_INTFVER(T5, FCOE), 3555 }, 3556 }, { 3557 .chip = CHELSIO_T6, 3558 .kld_name = "t6fw_cfg", 3559 .fw_mod_name = "t6fw", 3560 .fw_h = { 3561 .chip = FW_HDR_CHIP_T6, 3562 .fw_ver = htobe32(FW_VERSION(T6)), 3563 .intfver_nic = FW_INTFVER(T6, NIC), 3564 .intfver_vnic = FW_INTFVER(T6, VNIC), 3565 .intfver_ofld = FW_INTFVER(T6, OFLD), 3566 .intfver_ri = FW_INTFVER(T6, RI), 3567 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU), 3568 .intfver_iscsi = FW_INTFVER(T6, ISCSI), 3569 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU), 3570 .intfver_fcoe = FW_INTFVER(T6, FCOE), 3571 }, 3572 } 3573}; 3574 3575static struct fw_info * 3576find_fw_info(int chip) 3577{ 3578 int i; 3579 3580 for (i = 0; i < nitems(fw_info); i++) { 3581 if (fw_info[i].chip == chip) 3582 return (&fw_info[i]); 3583 } 3584 return (NULL); 3585} 3586 3587/* 3588 * Is the given firmware API compatible with the one the driver was compiled 3589 * with? 3590 */ 3591static int 3592fw_compatible(const struct fw_h *hdr1, const struct fw_h *hdr2) 3593{ 3594 3595 /* short circuit if it's the exact same firmware version */ 3596 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 3597 return (1); 3598 3599 /* 3600 * XXX: Is this too conservative? Perhaps I should limit this to the 3601 * features that are supported in the driver. 3602 */ 3603#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 3604 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 3605 SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) && 3606 SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe)) 3607 return (1); 3608#undef SAME_INTF 3609 3610 return (0); 3611} 3612 3613static int 3614load_fw_module(struct adapter *sc, const struct firmware **dcfg, 3615 const struct firmware **fw) 3616{ 3617 struct fw_info *fw_info; 3618 3619 *dcfg = NULL; 3620 if (fw != NULL) 3621 *fw = NULL; 3622 3623 fw_info = find_fw_info(chip_id(sc)); 3624 if (fw_info == NULL) { 3625 device_printf(sc->dev, 3626 "unable to look up firmware information for chip %d.\n", 3627 chip_id(sc)); 3628 return (EINVAL); 3629 } 3630 3631 *dcfg = firmware_get(fw_info->kld_name); 3632 if (*dcfg != NULL) { 3633 if (fw != NULL) 3634 *fw = firmware_get(fw_info->fw_mod_name); 3635 return (0); 3636 } 3637 3638 return (ENOENT); 3639} 3640 3641static void 3642unload_fw_module(struct adapter *sc, const struct firmware *dcfg, 3643 const struct firmware *fw) 3644{ 3645 3646 if (fw != NULL) 3647 firmware_put(fw, FIRMWARE_UNLOAD); 3648 if (dcfg != NULL) 3649 firmware_put(dcfg, FIRMWARE_UNLOAD); 3650} 3651 3652/* 3653 * Return values: 3654 * 0 means no firmware install attempted. 3655 * ERESTART means a firmware install was attempted and was successful. 3656 * +ve errno means a firmware install was attempted but failed. 3657 */ 3658static int 3659install_kld_firmware(struct adapter *sc, struct fw_h *card_fw, 3660 const struct fw_h *drv_fw, const char *reason, int *already) 3661{ 3662 const struct firmware *cfg, *fw; 3663 const uint32_t c = be32toh(card_fw->fw_ver); 3664 uint32_t d, k; 3665 int rc, fw_install; 3666 struct fw_h bundled_fw; 3667 bool load_attempted; 3668 3669 cfg = fw = NULL; 3670 load_attempted = false; 3671 fw_install = t4_fw_install < 0 ? -t4_fw_install : t4_fw_install; 3672 3673 memcpy(&bundled_fw, drv_fw, sizeof(bundled_fw)); 3674 if (t4_fw_install < 0) { 3675 rc = load_fw_module(sc, &cfg, &fw); 3676 if (rc != 0 || fw == NULL) { 3677 device_printf(sc->dev, 3678 "failed to load firmware module: %d. cfg %p, fw %p;" 3679 " will use compiled-in firmware version for" 3680 "hw.cxgbe.fw_install checks.\n", 3681 rc, cfg, fw); 3682 } else { 3683 memcpy(&bundled_fw, fw->data, sizeof(bundled_fw)); 3684 } 3685 load_attempted = true; 3686 } 3687 d = be32toh(bundled_fw.fw_ver); 3688 3689 if (reason != NULL) 3690 goto install; 3691 3692 if ((sc->flags & FW_OK) == 0) { 3693 3694 if (c == 0xffffffff) { 3695 reason = "missing"; 3696 goto install; 3697 } 3698 3699 rc = 0; 3700 goto done; 3701 } 3702 3703 if (!fw_compatible(card_fw, &bundled_fw)) { 3704 reason = "incompatible or unusable"; 3705 goto install; 3706 } 3707 3708 if (d > c) { 3709 reason = "older than the version bundled with this driver"; 3710 goto install; 3711 } 3712 3713 if (fw_install == 2 && d != c) { 3714 reason = "different than the version bundled with this driver"; 3715 goto install; 3716 } 3717 3718 /* No reason to do anything to the firmware already on the card. */ 3719 rc = 0; 3720 goto done; 3721 3722install: 3723 rc = 0; 3724 if ((*already)++) 3725 goto done; 3726 3727 if (fw_install == 0) { 3728 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 3729 "but the driver is prohibited from installing a firmware " 3730 "on the card.\n", 3731 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3732 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 3733 3734 goto done; 3735 } 3736 3737 /* 3738 * We'll attempt to install a firmware. Load the module first (if it 3739 * hasn't been loaded already). 3740 */ 3741 if (!load_attempted) { 3742 rc = load_fw_module(sc, &cfg, &fw); 3743 if (rc != 0 || fw == NULL) { 3744 device_printf(sc->dev, 3745 "failed to load firmware module: %d. cfg %p, fw %p\n", 3746 rc, cfg, fw); 3747 /* carry on */ 3748 } 3749 } 3750 if (fw == NULL) { 3751 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 3752 "but the driver cannot take corrective action because it " 3753 "is unable to load the firmware module.\n", 3754 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3755 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason); 3756 rc = sc->flags & FW_OK ? 0 : ENOENT; 3757 goto done; 3758 } 3759 k = be32toh(((const struct fw_hdr *)fw->data)->fw_ver); 3760 if (k != d) { 3761 MPASS(t4_fw_install > 0); 3762 device_printf(sc->dev, 3763 "firmware in KLD (%u.%u.%u.%u) is not what the driver was " 3764 "expecting (%u.%u.%u.%u) and will not be used.\n", 3765 G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k), 3766 G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k), 3767 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3768 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); 3769 rc = sc->flags & FW_OK ? 0 : EINVAL; 3770 goto done; 3771 } 3772 3773 device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, " 3774 "installing firmware %u.%u.%u.%u on card.\n", 3775 G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c), 3776 G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason, 3777 G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d), 3778 G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d)); 3779 3780 rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0); 3781 if (rc != 0) { 3782 device_printf(sc->dev, "failed to install firmware: %d\n", rc); 3783 } else { 3784 /* Installed successfully, update the cached header too. */ 3785 rc = ERESTART; 3786 memcpy(card_fw, fw->data, sizeof(*card_fw)); 3787 } 3788done: 3789 unload_fw_module(sc, cfg, fw); 3790 3791 return (rc); 3792} 3793 3794/* 3795 * Establish contact with the firmware and attempt to become the master driver. 3796 * 3797 * A firmware will be installed to the card if needed (if the driver is allowed 3798 * to do so). 3799 */ 3800static int 3801contact_firmware(struct adapter *sc) 3802{ 3803 int rc, already = 0; 3804 enum dev_state state; 3805 struct fw_info *fw_info; 3806 struct fw_hdr *card_fw; /* fw on the card */ 3807 const struct fw_h *drv_fw; 3808 3809 fw_info = find_fw_info(chip_id(sc)); 3810 if (fw_info == NULL) { 3811 device_printf(sc->dev, 3812 "unable to look up firmware information for chip %d.\n", 3813 chip_id(sc)); 3814 return (EINVAL); 3815 } 3816 drv_fw = &fw_info->fw_h; 3817 3818 /* Read the header of the firmware on the card */ 3819 card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK); 3820restart: 3821 rc = -t4_get_fw_hdr(sc, card_fw); 3822 if (rc != 0) { 3823 device_printf(sc->dev, 3824 "unable to read firmware header from card's flash: %d\n", 3825 rc); 3826 goto done; 3827 } 3828 3829 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, NULL, 3830 &already); 3831 if (rc == ERESTART) 3832 goto restart; 3833 if (rc != 0) 3834 goto done; 3835 3836 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state); 3837 if (rc < 0 || state == DEV_STATE_ERR) { 3838 rc = -rc; 3839 device_printf(sc->dev, 3840 "failed to connect to the firmware: %d, %d. " 3841 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); 3842#if 0 3843 if (install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, 3844 "not responding properly to HELLO", &already) == ERESTART) 3845 goto restart; 3846#endif 3847 goto done; 3848 } 3849 MPASS(be32toh(card_fw->flags) & FW_HDR_FLAGS_RESET_HALT); 3850 sc->flags |= FW_OK; /* The firmware responded to the FW_HELLO. */ 3851 3852 if (rc == sc->pf) { 3853 sc->flags |= MASTER_PF; 3854 rc = install_kld_firmware(sc, (struct fw_h *)card_fw, drv_fw, 3855 NULL, &already); 3856 if (rc == ERESTART) 3857 rc = 0; 3858 else if (rc != 0) 3859 goto done; 3860 } else if (state == DEV_STATE_UNINIT) { 3861 /* 3862 * We didn't get to be the master so we definitely won't be 3863 * configuring the chip. It's a bug if someone else hasn't 3864 * configured it already. 3865 */ 3866 device_printf(sc->dev, "couldn't be master(%d), " 3867 "device not already initialized either(%d). " 3868 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); 3869 rc = EPROTO; 3870 goto done; 3871 } else { 3872 /* 3873 * Some other PF is the master and has configured the chip. 3874 * This is allowed but untested. 3875 */ 3876 device_printf(sc->dev, "PF%d is master, device state %d. " 3877 "PCIE_FW 0x%08x\n", rc, state, t4_read_reg(sc, A_PCIE_FW)); 3878 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", rc); 3879 sc->cfcsum = 0; 3880 rc = 0; 3881 } 3882done: 3883 if (rc != 0 && sc->flags & FW_OK) { 3884 t4_fw_bye(sc, sc->mbox); 3885 sc->flags &= ~FW_OK; 3886 } 3887 free(card_fw, M_CXGBE); 3888 return (rc); 3889} 3890 3891static int 3892copy_cfg_file_to_card(struct adapter *sc, char *cfg_file, 3893 uint32_t mtype, uint32_t moff) 3894{ 3895 struct fw_info *fw_info; 3896 const struct firmware *dcfg, *rcfg = NULL; 3897 const uint32_t *cfdata; 3898 uint32_t cflen, addr; 3899 int rc; 3900 3901 load_fw_module(sc, &dcfg, NULL); 3902 3903 /* Card specific interpretation of "default". */ 3904 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 3905 if (pci_get_device(sc->dev) == 0x440a) 3906 snprintf(cfg_file, sizeof(t4_cfg_file), UWIRE_CF); 3907 if (is_fpga(sc)) 3908 snprintf(cfg_file, sizeof(t4_cfg_file), FPGA_CF); 3909 } 3910 3911 if (strncmp(cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) { 3912 if (dcfg == NULL) { 3913 device_printf(sc->dev, 3914 "KLD with default config is not available.\n"); 3915 rc = ENOENT; 3916 goto done; 3917 } 3918 cfdata = dcfg->data; 3919 cflen = dcfg->datasize & ~3; 3920 } else { 3921 char s[32]; 3922 3923 fw_info = find_fw_info(chip_id(sc)); 3924 if (fw_info == NULL) { 3925 device_printf(sc->dev, 3926 "unable to look up firmware information for chip %d.\n", 3927 chip_id(sc)); 3928 rc = EINVAL; 3929 goto done; 3930 } 3931 snprintf(s, sizeof(s), "%s_%s", fw_info->kld_name, cfg_file); 3932 3933 rcfg = firmware_get(s); 3934 if (rcfg == NULL) { 3935 device_printf(sc->dev, 3936 "unable to load module \"%s\" for configuration " 3937 "profile \"%s\".\n", s, cfg_file); 3938 rc = ENOENT; 3939 goto done; 3940 } 3941 cfdata = rcfg->data; 3942 cflen = rcfg->datasize & ~3; 3943 } 3944 3945 if (cflen > FLASH_CFG_MAX_SIZE) { 3946 device_printf(sc->dev, 3947 "config file too long (%d, max allowed is %d).\n", 3948 cflen, FLASH_CFG_MAX_SIZE); 3949 rc = EINVAL; 3950 goto done; 3951 } 3952 3953 rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr); 3954 if (rc != 0) { 3955 device_printf(sc->dev, 3956 "%s: addr (%d/0x%x) or len %d is not valid: %d.\n", 3957 __func__, mtype, moff, cflen, rc); 3958 rc = EINVAL; 3959 goto done; 3960 } 3961 write_via_memwin(sc, 2, addr, cfdata, cflen); 3962done: 3963 if (rcfg != NULL) 3964 firmware_put(rcfg, FIRMWARE_UNLOAD); 3965 unload_fw_module(sc, dcfg, NULL); 3966 return (rc); 3967} 3968 3969struct caps_allowed { 3970 uint16_t nbmcaps; 3971 uint16_t linkcaps; 3972 uint16_t switchcaps; 3973 uint16_t niccaps; 3974 uint16_t toecaps; 3975 uint16_t rdmacaps; 3976 uint16_t cryptocaps; 3977 uint16_t iscsicaps; 3978 uint16_t fcoecaps; 3979}; 3980 3981#define FW_PARAM_DEV(param) \ 3982 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3983 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3984#define FW_PARAM_PFVF(param) \ 3985 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3986 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 3987 3988/* 3989 * Provide a configuration profile to the firmware and have it initialize the 3990 * chip accordingly. This may involve uploading a configuration file to the 3991 * card. 3992 */ 3993static int 3994apply_cfg_and_initialize(struct adapter *sc, char *cfg_file, 3995 const struct caps_allowed *caps_allowed) 3996{ 3997 int rc; 3998 struct fw_caps_config_cmd caps; 3999 uint32_t mtype, moff, finicsum, cfcsum, param, val; 4000 4001 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 4002 if (rc != 0) { 4003 device_printf(sc->dev, "firmware reset failed: %d.\n", rc); 4004 return (rc); 4005 } 4006 4007 bzero(&caps, sizeof(caps)); 4008 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 4009 F_FW_CMD_REQUEST | F_FW_CMD_READ); 4010 if (strncmp(cfg_file, BUILTIN_CF, sizeof(t4_cfg_file)) == 0) { 4011 mtype = 0; 4012 moff = 0; 4013 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 4014 } else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) { 4015 mtype = FW_MEMTYPE_FLASH; 4016 moff = t4_flash_cfg_addr(sc); 4017 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 4018 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 4019 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | 4020 FW_LEN16(caps)); 4021 } else { 4022 /* 4023 * Ask the firmware where it wants us to upload the config file. 4024 */ 4025 param = FW_PARAM_DEV(CF); 4026 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 4027 if (rc != 0) { 4028 /* No support for config file? Shouldn't happen. */ 4029 device_printf(sc->dev, 4030 "failed to query config file location: %d.\n", rc); 4031 goto done; 4032 } 4033 mtype = G_FW_PARAMS_PARAM_Y(val); 4034 moff = G_FW_PARAMS_PARAM_Z(val) << 16; 4035 caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID | 4036 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 4037 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | 4038 FW_LEN16(caps)); 4039 4040 rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff); 4041 if (rc != 0) { 4042 device_printf(sc->dev, 4043 "failed to upload config file to card: %d.\n", rc); 4044 goto done; 4045 } 4046 } 4047 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 4048 if (rc != 0) { 4049 device_printf(sc->dev, "failed to pre-process config file: %d " 4050 "(mtype %d, moff 0x%x).\n", rc, mtype, moff); 4051 goto done; 4052 } 4053 4054 finicsum = be32toh(caps.finicsum); 4055 cfcsum = be32toh(caps.cfcsum); /* actual */ 4056 if (finicsum != cfcsum) { 4057 device_printf(sc->dev, 4058 "WARNING: config file checksum mismatch: %08x %08x\n", 4059 finicsum, cfcsum); 4060 } 4061 sc->cfcsum = cfcsum; 4062 snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", cfg_file); 4063 4064 /* 4065 * Let the firmware know what features will (not) be used so it can tune 4066 * things accordingly. 4067 */ 4068#define LIMIT_CAPS(x) do { \ 4069 caps.x##caps &= htobe16(caps_allowed->x##caps); \ 4070} while (0) 4071 LIMIT_CAPS(nbm); 4072 LIMIT_CAPS(link); 4073 LIMIT_CAPS(switch); 4074 LIMIT_CAPS(nic); 4075 LIMIT_CAPS(toe); 4076 LIMIT_CAPS(rdma); 4077 LIMIT_CAPS(crypto); 4078 LIMIT_CAPS(iscsi); 4079 LIMIT_CAPS(fcoe); 4080#undef LIMIT_CAPS 4081 if (caps.niccaps & htobe16(FW_CAPS_CONFIG_NIC_HASHFILTER)) { 4082 /* 4083 * TOE and hashfilters are mutually exclusive. It is a config 4084 * file or firmware bug if both are reported as available. Try 4085 * to cope with the situation in non-debug builds by disabling 4086 * TOE. 4087 */ 4088 MPASS(caps.toecaps == 0); 4089 4090 caps.toecaps = 0; 4091 caps.rdmacaps = 0; 4092 caps.iscsicaps = 0; 4093 } 4094 4095 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 4096 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 4097 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 4098 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), NULL); 4099 if (rc != 0) { 4100 device_printf(sc->dev, 4101 "failed to process config file: %d.\n", rc); 4102 goto done; 4103 } 4104 4105 t4_tweak_chip_settings(sc); 4106 set_params__pre_init(sc); 4107 4108 /* get basic stuff going */ 4109 rc = -t4_fw_initialize(sc, sc->mbox); 4110 if (rc != 0) { 4111 device_printf(sc->dev, "fw_initialize failed: %d.\n", rc); 4112 goto done; 4113 } 4114done: 4115 return (rc); 4116} 4117 4118/* 4119 * Partition chip resources for use between various PFs, VFs, etc. 4120 */ 4121static int 4122partition_resources(struct adapter *sc) 4123{ 4124 char cfg_file[sizeof(t4_cfg_file)]; 4125 struct caps_allowed caps_allowed; 4126 int rc; 4127 bool fallback; 4128 4129 /* Only the master driver gets to configure the chip resources. */ 4130 MPASS(sc->flags & MASTER_PF); 4131 4132#define COPY_CAPS(x) do { \ 4133 caps_allowed.x##caps = t4_##x##caps_allowed; \ 4134} while (0) 4135 bzero(&caps_allowed, sizeof(caps_allowed)); 4136 COPY_CAPS(nbm); 4137 COPY_CAPS(link); 4138 COPY_CAPS(switch); 4139 COPY_CAPS(nic); 4140 COPY_CAPS(toe); 4141 COPY_CAPS(rdma); 4142 COPY_CAPS(crypto); 4143 COPY_CAPS(iscsi); 4144 COPY_CAPS(fcoe); 4145 fallback = sc->debug_flags & DF_DISABLE_CFG_RETRY ? false : true; 4146 snprintf(cfg_file, sizeof(cfg_file), "%s", t4_cfg_file); 4147retry: 4148 rc = apply_cfg_and_initialize(sc, cfg_file, &caps_allowed); 4149 if (rc != 0 && fallback) { 4150 device_printf(sc->dev, 4151 "failed (%d) to configure card with \"%s\" profile, " 4152 "will fall back to a basic configuration and retry.\n", 4153 rc, cfg_file); 4154 snprintf(cfg_file, sizeof(cfg_file), "%s", BUILTIN_CF); 4155 bzero(&caps_allowed, sizeof(caps_allowed)); 4156 COPY_CAPS(switch); 4157 caps_allowed.niccaps = FW_CAPS_CONFIG_NIC; 4158 fallback = false; 4159 goto retry; 4160 } 4161#undef COPY_CAPS 4162 return (rc); 4163} 4164 4165/* 4166 * Retrieve parameters that are needed (or nice to have) very early. 4167 */ 4168static int 4169get_params__pre_init(struct adapter *sc) 4170{ 4171 int rc; 4172 uint32_t param[2], val[2]; 4173 4174 t4_get_version_info(sc); 4175 4176 snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u", 4177 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 4178 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 4179 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 4180 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 4181 4182 snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u", 4183 G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers), 4184 G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers), 4185 G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers), 4186 G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers)); 4187 4188 snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u", 4189 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 4190 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 4191 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 4192 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 4193 4194 snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u", 4195 G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers), 4196 G_FW_HDR_FW_VER_MINOR(sc->params.er_vers), 4197 G_FW_HDR_FW_VER_MICRO(sc->params.er_vers), 4198 G_FW_HDR_FW_VER_BUILD(sc->params.er_vers)); 4199 4200 param[0] = FW_PARAM_DEV(PORTVEC); 4201 param[1] = FW_PARAM_DEV(CCLK); 4202 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 4203 if (rc != 0) { 4204 device_printf(sc->dev, 4205 "failed to query parameters (pre_init): %d.\n", rc); 4206 return (rc); 4207 } 4208 4209 sc->params.portvec = val[0]; 4210 sc->params.nports = bitcount32(val[0]); 4211 sc->params.vpd.cclk = val[1]; 4212 4213 /* Read device log parameters. */ 4214 rc = -t4_init_devlog_params(sc, 1); 4215 if (rc == 0) 4216 fixup_devlog_params(sc); 4217 else { 4218 device_printf(sc->dev, 4219 "failed to get devlog parameters: %d.\n", rc); 4220 rc = 0; /* devlog isn't critical for device operation */ 4221 } 4222 4223 return (rc); 4224} 4225 4226/* 4227 * Any params that need to be set before FW_INITIALIZE. 4228 */ 4229static int 4230set_params__pre_init(struct adapter *sc) 4231{ 4232 int rc = 0; 4233 uint32_t param, val; 4234 4235 if (chip_id(sc) >= CHELSIO_T6) { 4236 param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT); 4237 val = 1; 4238 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 4239 /* firmwares < 1.20.1.0 do not have this param. */ 4240 if (rc == FW_EINVAL && 4241 sc->params.fw_vers < FW_VERSION32(1, 20, 1, 0)) { 4242 rc = 0; 4243 } 4244 if (rc != 0) { 4245 device_printf(sc->dev, 4246 "failed to enable high priority filters :%d.\n", 4247 rc); 4248 } 4249 } 4250 4251 /* Enable opaque VIIDs with firmwares that support it. */ 4252 param = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); 4253 val = 1; 4254 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 4255 if (rc == 0 && val == 1) 4256 sc->params.viid_smt_extn_support = true; 4257 else 4258 sc->params.viid_smt_extn_support = false; 4259 4260 return (rc); 4261} 4262 4263/* 4264 * Retrieve various parameters that are of interest to the driver. The device 4265 * has been initialized by the firmware at this point. 4266 */ 4267static int 4268get_params__post_init(struct adapter *sc) 4269{ 4270 int rc; 4271 uint32_t param[7], val[7]; 4272 struct fw_caps_config_cmd caps; 4273 4274 param[0] = FW_PARAM_PFVF(IQFLINT_START); 4275 param[1] = FW_PARAM_PFVF(EQ_START); 4276 param[2] = FW_PARAM_PFVF(FILTER_START); 4277 param[3] = FW_PARAM_PFVF(FILTER_END); 4278 param[4] = FW_PARAM_PFVF(L2T_START); 4279 param[5] = FW_PARAM_PFVF(L2T_END); 4280 param[6] = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 4281 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 4282 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); 4283 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 7, param, val); 4284 if (rc != 0) { 4285 device_printf(sc->dev, 4286 "failed to query parameters (post_init): %d.\n", rc); 4287 return (rc); 4288 } 4289 4290 sc->sge.iq_start = val[0]; 4291 sc->sge.eq_start = val[1]; 4292 if ((int)val[3] > (int)val[2]) { 4293 sc->tids.ftid_base = val[2]; 4294 sc->tids.ftid_end = val[3]; 4295 sc->tids.nftids = val[3] - val[2] + 1; 4296 } 4297 sc->vres.l2t.start = val[4]; 4298 sc->vres.l2t.size = val[5] - val[4] + 1; 4299 KASSERT(sc->vres.l2t.size <= L2T_SIZE, 4300 ("%s: L2 table size (%u) larger than expected (%u)", 4301 __func__, sc->vres.l2t.size, L2T_SIZE)); 4302 sc->params.core_vdd = val[6]; 4303 4304 param[0] = FW_PARAM_PFVF(IQFLINT_END); 4305 param[1] = FW_PARAM_PFVF(EQ_END); 4306 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 4307 if (rc != 0) { 4308 device_printf(sc->dev, 4309 "failed to query parameters (post_init2): %d.\n", rc); 4310 return (rc); 4311 } 4312 MPASS((int)val[0] >= sc->sge.iq_start); 4313 sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1; 4314 MPASS((int)val[1] >= sc->sge.eq_start); 4315 sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1; 4316 4317 if (chip_id(sc) >= CHELSIO_T6) { 4318 4319 sc->tids.tid_base = t4_read_reg(sc, 4320 A_LE_DB_ACTIVE_TABLE_START_INDEX); 4321 4322 param[0] = FW_PARAM_PFVF(HPFILTER_START); 4323 param[1] = FW_PARAM_PFVF(HPFILTER_END); 4324 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 4325 if (rc != 0) { 4326 device_printf(sc->dev, 4327 "failed to query hpfilter parameters: %d.\n", rc); 4328 return (rc); 4329 } 4330 if ((int)val[1] > (int)val[0]) { 4331 sc->tids.hpftid_base = val[0]; 4332 sc->tids.hpftid_end = val[1]; 4333 sc->tids.nhpftids = val[1] - val[0] + 1; 4334 4335 /* 4336 * These should go off if the layout changes and the 4337 * driver needs to catch up. 4338 */ 4339 MPASS(sc->tids.hpftid_base == 0); 4340 MPASS(sc->tids.tid_base == sc->tids.nhpftids); 4341 } 4342 4343 param[0] = FW_PARAM_PFVF(RAWF_START); 4344 param[1] = FW_PARAM_PFVF(RAWF_END); 4345 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 4346 if (rc != 0) { 4347 device_printf(sc->dev, 4348 "failed to query rawf parameters: %d.\n", rc); 4349 return (rc); 4350 } 4351 if ((int)val[1] > (int)val[0]) { 4352 sc->rawf_base = val[0]; 4353 sc->nrawf = val[1] - val[0] + 1; 4354 } 4355 } 4356 4357 /* 4358 * MPSBGMAP is queried separately because only recent firmwares support 4359 * it as a parameter and we don't want the compound query above to fail 4360 * on older firmwares. 4361 */ 4362 param[0] = FW_PARAM_DEV(MPSBGMAP); 4363 val[0] = 0; 4364 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); 4365 if (rc == 0) 4366 sc->params.mps_bg_map = val[0]; 4367 else 4368 sc->params.mps_bg_map = 0; 4369 4370 /* 4371 * Determine whether the firmware supports the filter2 work request. 4372 * This is queried separately for the same reason as MPSBGMAP above. 4373 */ 4374 param[0] = FW_PARAM_DEV(FILTER2_WR); 4375 val[0] = 0; 4376 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); 4377 if (rc == 0) 4378 sc->params.filter2_wr_support = val[0] != 0; 4379 else 4380 sc->params.filter2_wr_support = 0; 4381 4382 /* 4383 * Find out whether we're allowed to use the ULPTX MEMWRITE DSGL. 4384 * This is queried separately for the same reason as other params above. 4385 */ 4386 param[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); 4387 val[0] = 0; 4388 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); 4389 if (rc == 0) 4390 sc->params.ulptx_memwrite_dsgl = val[0] != 0; 4391 else 4392 sc->params.ulptx_memwrite_dsgl = false; 4393 4394 /* FW_RI_FR_NSMR_TPTE_WR support */ 4395 param[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR); 4396 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); 4397 if (rc == 0) 4398 sc->params.fr_nsmr_tpte_wr_support = val[0] != 0; 4399 else 4400 sc->params.fr_nsmr_tpte_wr_support = false; 4401 4402 param[0] = FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR); 4403 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); 4404 if (rc == 0) 4405 sc->params.max_pkts_per_eth_tx_pkts_wr = val[0]; 4406 else 4407 sc->params.max_pkts_per_eth_tx_pkts_wr = 15; 4408 4409 /* get capabilites */ 4410 bzero(&caps, sizeof(caps)); 4411 caps.op_to_write = htobe32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 4412 F_FW_CMD_REQUEST | F_FW_CMD_READ); 4413 caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps)); 4414 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps); 4415 if (rc != 0) { 4416 device_printf(sc->dev, 4417 "failed to get card capabilities: %d.\n", rc); 4418 return (rc); 4419 } 4420 4421#define READ_CAPS(x) do { \ 4422 sc->x = htobe16(caps.x); \ 4423} while (0) 4424 READ_CAPS(nbmcaps); 4425 READ_CAPS(linkcaps); 4426 READ_CAPS(switchcaps); 4427 READ_CAPS(niccaps); 4428 READ_CAPS(toecaps); 4429 READ_CAPS(rdmacaps); 4430 READ_CAPS(cryptocaps); 4431 READ_CAPS(iscsicaps); 4432 READ_CAPS(fcoecaps); 4433 4434 if (sc->niccaps & FW_CAPS_CONFIG_NIC_HASHFILTER) { 4435 MPASS(chip_id(sc) > CHELSIO_T4); 4436 MPASS(sc->toecaps == 0); 4437 sc->toecaps = 0; 4438 4439 param[0] = FW_PARAM_DEV(NTID); 4440 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val); 4441 if (rc != 0) { 4442 device_printf(sc->dev, 4443 "failed to query HASHFILTER parameters: %d.\n", rc); 4444 return (rc); 4445 } 4446 sc->tids.ntids = val[0]; 4447 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { 4448 MPASS(sc->tids.ntids >= sc->tids.nhpftids); 4449 sc->tids.ntids -= sc->tids.nhpftids; 4450 } 4451 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 4452 sc->params.hash_filter = 1; 4453 } 4454 if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) { 4455 param[0] = FW_PARAM_PFVF(ETHOFLD_START); 4456 param[1] = FW_PARAM_PFVF(ETHOFLD_END); 4457 param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 4458 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val); 4459 if (rc != 0) { 4460 device_printf(sc->dev, 4461 "failed to query NIC parameters: %d.\n", rc); 4462 return (rc); 4463 } 4464 if ((int)val[1] > (int)val[0]) { 4465 sc->tids.etid_base = val[0]; 4466 sc->tids.etid_end = val[1]; 4467 sc->tids.netids = val[1] - val[0] + 1; 4468 sc->params.eo_wr_cred = val[2]; 4469 sc->params.ethoffload = 1; 4470 } 4471 } 4472 if (sc->toecaps) { 4473 /* query offload-related parameters */ 4474 param[0] = FW_PARAM_DEV(NTID); 4475 param[1] = FW_PARAM_PFVF(SERVER_START); 4476 param[2] = FW_PARAM_PFVF(SERVER_END); 4477 param[3] = FW_PARAM_PFVF(TDDP_START); 4478 param[4] = FW_PARAM_PFVF(TDDP_END); 4479 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 4480 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 4481 if (rc != 0) { 4482 device_printf(sc->dev, 4483 "failed to query TOE parameters: %d.\n", rc); 4484 return (rc); 4485 } 4486 sc->tids.ntids = val[0]; 4487 if (sc->params.fw_vers < FW_VERSION32(1, 20, 5, 0)) { 4488 MPASS(sc->tids.ntids >= sc->tids.nhpftids); 4489 sc->tids.ntids -= sc->tids.nhpftids; 4490 } 4491 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 4492 if ((int)val[2] > (int)val[1]) { 4493 sc->tids.stid_base = val[1]; 4494 sc->tids.nstids = val[2] - val[1] + 1; 4495 } 4496 sc->vres.ddp.start = val[3]; 4497 sc->vres.ddp.size = val[4] - val[3] + 1; 4498 sc->params.ofldq_wr_cred = val[5]; 4499 sc->params.offload = 1; 4500 } else { 4501 /* 4502 * The firmware attempts memfree TOE configuration for -SO cards 4503 * and will report toecaps=0 if it runs out of resources (this 4504 * depends on the config file). It may not report 0 for other 4505 * capabilities dependent on the TOE in this case. Set them to 4506 * 0 here so that the driver doesn't bother tracking resources 4507 * that will never be used. 4508 */ 4509 sc->iscsicaps = 0; 4510 sc->rdmacaps = 0; 4511 } 4512 if (sc->rdmacaps) { 4513 param[0] = FW_PARAM_PFVF(STAG_START); 4514 param[1] = FW_PARAM_PFVF(STAG_END); 4515 param[2] = FW_PARAM_PFVF(RQ_START); 4516 param[3] = FW_PARAM_PFVF(RQ_END); 4517 param[4] = FW_PARAM_PFVF(PBL_START); 4518 param[5] = FW_PARAM_PFVF(PBL_END); 4519 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 4520 if (rc != 0) { 4521 device_printf(sc->dev, 4522 "failed to query RDMA parameters(1): %d.\n", rc); 4523 return (rc); 4524 } 4525 sc->vres.stag.start = val[0]; 4526 sc->vres.stag.size = val[1] - val[0] + 1; 4527 sc->vres.rq.start = val[2]; 4528 sc->vres.rq.size = val[3] - val[2] + 1; 4529 sc->vres.pbl.start = val[4]; 4530 sc->vres.pbl.size = val[5] - val[4] + 1; 4531 4532 param[0] = FW_PARAM_PFVF(SQRQ_START); 4533 param[1] = FW_PARAM_PFVF(SQRQ_END); 4534 param[2] = FW_PARAM_PFVF(CQ_START); 4535 param[3] = FW_PARAM_PFVF(CQ_END); 4536 param[4] = FW_PARAM_PFVF(OCQ_START); 4537 param[5] = FW_PARAM_PFVF(OCQ_END); 4538 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 4539 if (rc != 0) { 4540 device_printf(sc->dev, 4541 "failed to query RDMA parameters(2): %d.\n", rc); 4542 return (rc); 4543 } 4544 sc->vres.qp.start = val[0]; 4545 sc->vres.qp.size = val[1] - val[0] + 1; 4546 sc->vres.cq.start = val[2]; 4547 sc->vres.cq.size = val[3] - val[2] + 1; 4548 sc->vres.ocq.start = val[4]; 4549 sc->vres.ocq.size = val[5] - val[4] + 1; 4550 4551 param[0] = FW_PARAM_PFVF(SRQ_START); 4552 param[1] = FW_PARAM_PFVF(SRQ_END); 4553 param[2] = FW_PARAM_DEV(MAXORDIRD_QP); 4554 param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER); 4555 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val); 4556 if (rc != 0) { 4557 device_printf(sc->dev, 4558 "failed to query RDMA parameters(3): %d.\n", rc); 4559 return (rc); 4560 } 4561 sc->vres.srq.start = val[0]; 4562 sc->vres.srq.size = val[1] - val[0] + 1; 4563 sc->params.max_ordird_qp = val[2]; 4564 sc->params.max_ird_adapter = val[3]; 4565 } 4566 if (sc->iscsicaps) { 4567 param[0] = FW_PARAM_PFVF(ISCSI_START); 4568 param[1] = FW_PARAM_PFVF(ISCSI_END); 4569 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 4570 if (rc != 0) { 4571 device_printf(sc->dev, 4572 "failed to query iSCSI parameters: %d.\n", rc); 4573 return (rc); 4574 } 4575 sc->vres.iscsi.start = val[0]; 4576 sc->vres.iscsi.size = val[1] - val[0] + 1; 4577 } 4578 if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) { 4579 param[0] = FW_PARAM_PFVF(TLS_START); 4580 param[1] = FW_PARAM_PFVF(TLS_END); 4581 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 4582 if (rc != 0) { 4583 device_printf(sc->dev, 4584 "failed to query TLS parameters: %d.\n", rc); 4585 return (rc); 4586 } 4587 sc->vres.key.start = val[0]; 4588 sc->vres.key.size = val[1] - val[0] + 1; 4589 } 4590 4591 t4_init_sge_params(sc); 4592 4593 /* 4594 * We've got the params we wanted to query via the firmware. Now grab 4595 * some others directly from the chip. 4596 */ 4597 rc = t4_read_chip_settings(sc); 4598 4599 return (rc); 4600} 4601 4602static int 4603set_params__post_init(struct adapter *sc) 4604{ 4605 uint32_t mask, param, val; 4606#ifdef TCP_OFFLOAD 4607 int i, v, shift; 4608#endif 4609 4610 /* ask for encapsulated CPLs */ 4611 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 4612 val = 1; 4613 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 4614 4615 /* Enable 32b port caps if the firmware supports it. */ 4616 param = FW_PARAM_PFVF(PORT_CAPS32); 4617 val = 1; 4618 if (t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val) == 0) 4619 sc->params.port_caps32 = 1; 4620 4621 /* Let filter + maskhash steer to a part of the VI's RSS region. */ 4622 val = 1 << (G_MASKSIZE(t4_read_reg(sc, A_TP_RSS_CONFIG_TNL)) - 1); 4623 t4_set_reg_field(sc, A_TP_RSS_CONFIG_TNL, V_MASKFILTER(M_MASKFILTER), 4624 V_MASKFILTER(val - 1)); 4625 4626 mask = F_DROPERRORANY | F_DROPERRORMAC | F_DROPERRORIPVER | 4627 F_DROPERRORFRAG | F_DROPERRORATTACK | F_DROPERRORETHHDRLEN | 4628 F_DROPERRORIPHDRLEN | F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | 4629 F_DROPERRORTCPOPT | F_DROPERRORCSUMIP | F_DROPERRORCSUM; 4630 val = 0; 4631 if (chip_id(sc) < CHELSIO_T6 && t4_attack_filter != 0) { 4632 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_ATTACKFILTERENABLE, 4633 F_ATTACKFILTERENABLE); 4634 val |= F_DROPERRORATTACK; 4635 } 4636 if (t4_drop_ip_fragments != 0) { 4637 t4_set_reg_field(sc, A_TP_GLOBAL_CONFIG, F_FRAGMENTDROP, 4638 F_FRAGMENTDROP); 4639 val |= F_DROPERRORFRAG; 4640 } 4641 if (t4_drop_pkts_with_l2_errors != 0) 4642 val |= F_DROPERRORMAC | F_DROPERRORETHHDRLEN; 4643 if (t4_drop_pkts_with_l3_errors != 0) { 4644 val |= F_DROPERRORIPVER | F_DROPERRORIPHDRLEN | 4645 F_DROPERRORCSUMIP; 4646 } 4647 if (t4_drop_pkts_with_l4_errors != 0) { 4648 val |= F_DROPERRORTCPHDRLEN | F_DROPERRORPKTLEN | 4649 F_DROPERRORTCPOPT | F_DROPERRORCSUM; 4650 } 4651 t4_set_reg_field(sc, A_TP_ERR_CONFIG, mask, val); 4652 4653#ifdef TCP_OFFLOAD 4654 /* 4655 * Override the TOE timers with user provided tunables. This is not the 4656 * recommended way to change the timers (the firmware config file is) so 4657 * these tunables are not documented. 4658 * 4659 * All the timer tunables are in microseconds. 4660 */ 4661 if (t4_toe_keepalive_idle != 0) { 4662 v = us_to_tcp_ticks(sc, t4_toe_keepalive_idle); 4663 v &= M_KEEPALIVEIDLE; 4664 t4_set_reg_field(sc, A_TP_KEEP_IDLE, 4665 V_KEEPALIVEIDLE(M_KEEPALIVEIDLE), V_KEEPALIVEIDLE(v)); 4666 } 4667 if (t4_toe_keepalive_interval != 0) { 4668 v = us_to_tcp_ticks(sc, t4_toe_keepalive_interval); 4669 v &= M_KEEPALIVEINTVL; 4670 t4_set_reg_field(sc, A_TP_KEEP_INTVL, 4671 V_KEEPALIVEINTVL(M_KEEPALIVEINTVL), V_KEEPALIVEINTVL(v)); 4672 } 4673 if (t4_toe_keepalive_count != 0) { 4674 v = t4_toe_keepalive_count & M_KEEPALIVEMAXR2; 4675 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 4676 V_KEEPALIVEMAXR1(M_KEEPALIVEMAXR1) | 4677 V_KEEPALIVEMAXR2(M_KEEPALIVEMAXR2), 4678 V_KEEPALIVEMAXR1(1) | V_KEEPALIVEMAXR2(v)); 4679 } 4680 if (t4_toe_rexmt_min != 0) { 4681 v = us_to_tcp_ticks(sc, t4_toe_rexmt_min); 4682 v &= M_RXTMIN; 4683 t4_set_reg_field(sc, A_TP_RXT_MIN, 4684 V_RXTMIN(M_RXTMIN), V_RXTMIN(v)); 4685 } 4686 if (t4_toe_rexmt_max != 0) { 4687 v = us_to_tcp_ticks(sc, t4_toe_rexmt_max); 4688 v &= M_RXTMAX; 4689 t4_set_reg_field(sc, A_TP_RXT_MAX, 4690 V_RXTMAX(M_RXTMAX), V_RXTMAX(v)); 4691 } 4692 if (t4_toe_rexmt_count != 0) { 4693 v = t4_toe_rexmt_count & M_RXTSHIFTMAXR2; 4694 t4_set_reg_field(sc, A_TP_SHIFT_CNT, 4695 V_RXTSHIFTMAXR1(M_RXTSHIFTMAXR1) | 4696 V_RXTSHIFTMAXR2(M_RXTSHIFTMAXR2), 4697 V_RXTSHIFTMAXR1(1) | V_RXTSHIFTMAXR2(v)); 4698 } 4699 for (i = 0; i < nitems(t4_toe_rexmt_backoff); i++) { 4700 if (t4_toe_rexmt_backoff[i] != -1) { 4701 v = t4_toe_rexmt_backoff[i] & M_TIMERBACKOFFINDEX0; 4702 shift = (i & 3) << 3; 4703 t4_set_reg_field(sc, A_TP_TCP_BACKOFF_REG0 + (i & ~3), 4704 M_TIMERBACKOFFINDEX0 << shift, v << shift); 4705 } 4706 } 4707#endif 4708 return (0); 4709} 4710 4711#undef FW_PARAM_PFVF 4712#undef FW_PARAM_DEV 4713 4714static void 4715t4_set_desc(struct adapter *sc) 4716{ 4717 char buf[128]; 4718 struct adapter_params *p = &sc->params; 4719 4720 snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id); 4721 4722 device_set_desc_copy(sc->dev, buf); 4723} 4724 4725static inline void 4726ifmedia_add4(struct ifmedia *ifm, int m) 4727{ 4728 4729 ifmedia_add(ifm, m, 0, NULL); 4730 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE, 0, NULL); 4731 ifmedia_add(ifm, m | IFM_ETH_RXPAUSE, 0, NULL); 4732 ifmedia_add(ifm, m | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE, 0, NULL); 4733} 4734 4735/* 4736 * This is the selected media, which is not quite the same as the active media. 4737 * The media line in ifconfig is "media: Ethernet selected (active)" if selected 4738 * and active are not the same, and "media: Ethernet selected" otherwise. 4739 */ 4740static void 4741set_current_media(struct port_info *pi) 4742{ 4743 struct link_config *lc; 4744 struct ifmedia *ifm; 4745 int mword; 4746 u_int speed; 4747 4748 PORT_LOCK_ASSERT_OWNED(pi); 4749 4750 /* Leave current media alone if it's already set to IFM_NONE. */ 4751 ifm = &pi->media; 4752 if (ifm->ifm_cur != NULL && 4753 IFM_SUBTYPE(ifm->ifm_cur->ifm_media) == IFM_NONE) 4754 return; 4755 4756 lc = &pi->link_cfg; 4757 if (lc->requested_aneg != AUTONEG_DISABLE && 4758 lc->pcaps & FW_PORT_CAP32_ANEG) { 4759 ifmedia_set(ifm, IFM_ETHER | IFM_AUTO); 4760 return; 4761 } 4762 mword = IFM_ETHER | IFM_FDX; 4763 if (lc->requested_fc & PAUSE_TX) 4764 mword |= IFM_ETH_TXPAUSE; 4765 if (lc->requested_fc & PAUSE_RX) 4766 mword |= IFM_ETH_RXPAUSE; 4767 if (lc->requested_speed == 0) 4768 speed = port_top_speed(pi) * 1000; /* Gbps -> Mbps */ 4769 else 4770 speed = lc->requested_speed; 4771 mword |= port_mword(pi, speed_to_fwcap(speed)); 4772 ifmedia_set(ifm, mword); 4773} 4774 4775/* 4776 * Returns true if the ifmedia list for the port cannot change. 4777 */ 4778static bool 4779fixed_ifmedia(struct port_info *pi) 4780{ 4781 4782 return (pi->port_type == FW_PORT_TYPE_BT_SGMII || 4783 pi->port_type == FW_PORT_TYPE_BT_XFI || 4784 pi->port_type == FW_PORT_TYPE_BT_XAUI || 4785 pi->port_type == FW_PORT_TYPE_KX4 || 4786 pi->port_type == FW_PORT_TYPE_KX || 4787 pi->port_type == FW_PORT_TYPE_KR || 4788 pi->port_type == FW_PORT_TYPE_BP_AP || 4789 pi->port_type == FW_PORT_TYPE_BP4_AP || 4790 pi->port_type == FW_PORT_TYPE_BP40_BA || 4791 pi->port_type == FW_PORT_TYPE_KR4_100G || 4792 pi->port_type == FW_PORT_TYPE_KR_SFP28 || 4793 pi->port_type == FW_PORT_TYPE_KR_XLAUI); 4794} 4795 4796static void 4797build_medialist(struct port_info *pi) 4798{ 4799 uint32_t ss, speed; 4800 int unknown, mword, bit; 4801 struct link_config *lc; 4802 struct ifmedia *ifm; 4803 4804 PORT_LOCK_ASSERT_OWNED(pi); 4805 4806 if (pi->flags & FIXED_IFMEDIA) 4807 return; 4808 4809 /* 4810 * Rebuild the ifmedia list. 4811 */ 4812 ifm = &pi->media; 4813 ifmedia_removeall(ifm); 4814 lc = &pi->link_cfg; 4815 ss = G_FW_PORT_CAP32_SPEED(lc->pcaps); /* Supported Speeds */ 4816 if (__predict_false(ss == 0)) { /* not supposed to happen. */ 4817 MPASS(ss != 0); 4818no_media: 4819 MPASS(LIST_EMPTY(&ifm->ifm_list)); 4820 ifmedia_add(ifm, IFM_ETHER | IFM_NONE, 0, NULL); 4821 ifmedia_set(ifm, IFM_ETHER | IFM_NONE); 4822 return; 4823 } 4824 4825 unknown = 0; 4826 for (bit = S_FW_PORT_CAP32_SPEED; bit < fls(ss); bit++) { 4827 speed = 1 << bit; 4828 MPASS(speed & M_FW_PORT_CAP32_SPEED); 4829 if (ss & speed) { 4830 mword = port_mword(pi, speed); 4831 if (mword == IFM_NONE) { 4832 goto no_media; 4833 } else if (mword == IFM_UNKNOWN) 4834 unknown++; 4835 else 4836 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | mword); 4837 } 4838 } 4839 if (unknown > 0) /* Add one unknown for all unknown media types. */ 4840 ifmedia_add4(ifm, IFM_ETHER | IFM_FDX | IFM_UNKNOWN); 4841 if (lc->pcaps & FW_PORT_CAP32_ANEG) 4842 ifmedia_add(ifm, IFM_ETHER | IFM_AUTO, 0, NULL); 4843 4844 set_current_media(pi); 4845} 4846 4847/* 4848 * Initialize the requested fields in the link config based on driver tunables. 4849 */ 4850static void 4851init_link_config(struct port_info *pi) 4852{ 4853 struct link_config *lc = &pi->link_cfg; 4854 4855 PORT_LOCK_ASSERT_OWNED(pi); 4856 4857 lc->requested_speed = 0; 4858 4859 if (t4_autoneg == 0) 4860 lc->requested_aneg = AUTONEG_DISABLE; 4861 else if (t4_autoneg == 1) 4862 lc->requested_aneg = AUTONEG_ENABLE; 4863 else 4864 lc->requested_aneg = AUTONEG_AUTO; 4865 4866 lc->requested_fc = t4_pause_settings & (PAUSE_TX | PAUSE_RX | 4867 PAUSE_AUTONEG); 4868 4869 if (t4_fec & FEC_AUTO) 4870 lc->requested_fec = FEC_AUTO; 4871 else if (t4_fec == 0) 4872 lc->requested_fec = FEC_NONE; 4873 else { 4874 /* -1 is handled by the FEC_AUTO block above and not here. */ 4875 lc->requested_fec = t4_fec & 4876 (FEC_RS | FEC_BASER_RS | FEC_NONE | FEC_MODULE); 4877 if (lc->requested_fec == 0) 4878 lc->requested_fec = FEC_AUTO; 4879 } 4880} 4881 4882/* 4883 * Makes sure that all requested settings comply with what's supported by the 4884 * port. Returns the number of settings that were invalid and had to be fixed. 4885 */ 4886static int 4887fixup_link_config(struct port_info *pi) 4888{ 4889 int n = 0; 4890 struct link_config *lc = &pi->link_cfg; 4891 uint32_t fwspeed; 4892 4893 PORT_LOCK_ASSERT_OWNED(pi); 4894 4895 /* Speed (when not autonegotiating) */ 4896 if (lc->requested_speed != 0) { 4897 fwspeed = speed_to_fwcap(lc->requested_speed); 4898 if ((fwspeed & lc->pcaps) == 0) { 4899 n++; 4900 lc->requested_speed = 0; 4901 } 4902 } 4903 4904 /* Link autonegotiation */ 4905 MPASS(lc->requested_aneg == AUTONEG_ENABLE || 4906 lc->requested_aneg == AUTONEG_DISABLE || 4907 lc->requested_aneg == AUTONEG_AUTO); 4908 if (lc->requested_aneg == AUTONEG_ENABLE && 4909 !(lc->pcaps & FW_PORT_CAP32_ANEG)) { 4910 n++; 4911 lc->requested_aneg = AUTONEG_AUTO; 4912 } 4913 4914 /* Flow control */ 4915 MPASS((lc->requested_fc & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) == 0); 4916 if (lc->requested_fc & PAUSE_TX && 4917 !(lc->pcaps & FW_PORT_CAP32_FC_TX)) { 4918 n++; 4919 lc->requested_fc &= ~PAUSE_TX; 4920 } 4921 if (lc->requested_fc & PAUSE_RX && 4922 !(lc->pcaps & FW_PORT_CAP32_FC_RX)) { 4923 n++; 4924 lc->requested_fc &= ~PAUSE_RX; 4925 } 4926 if (!(lc->requested_fc & PAUSE_AUTONEG) && 4927 !(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)) { 4928 n++; 4929 lc->requested_fc |= PAUSE_AUTONEG; 4930 } 4931 4932 /* FEC */ 4933 if ((lc->requested_fec & FEC_RS && 4934 !(lc->pcaps & FW_PORT_CAP32_FEC_RS)) || 4935 (lc->requested_fec & FEC_BASER_RS && 4936 !(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS))) { 4937 n++; 4938 lc->requested_fec = FEC_AUTO; 4939 } 4940 4941 return (n); 4942} 4943 4944/* 4945 * Apply the requested L1 settings, which are expected to be valid, to the 4946 * hardware. 4947 */ 4948static int 4949apply_link_config(struct port_info *pi) 4950{ 4951 struct adapter *sc = pi->adapter; 4952 struct link_config *lc = &pi->link_cfg; 4953 int rc; 4954 4955#ifdef INVARIANTS 4956 ASSERT_SYNCHRONIZED_OP(sc); 4957 PORT_LOCK_ASSERT_OWNED(pi); 4958 4959 if (lc->requested_aneg == AUTONEG_ENABLE) 4960 MPASS(lc->pcaps & FW_PORT_CAP32_ANEG); 4961 if (!(lc->requested_fc & PAUSE_AUTONEG)) 4962 MPASS(lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE); 4963 if (lc->requested_fc & PAUSE_TX) 4964 MPASS(lc->pcaps & FW_PORT_CAP32_FC_TX); 4965 if (lc->requested_fc & PAUSE_RX) 4966 MPASS(lc->pcaps & FW_PORT_CAP32_FC_RX); 4967 if (lc->requested_fec & FEC_RS) 4968 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_RS); 4969 if (lc->requested_fec & FEC_BASER_RS) 4970 MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS); 4971#endif 4972 rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc); 4973 if (rc != 0) { 4974 /* Don't complain if the VF driver gets back an EPERM. */ 4975 if (!(sc->flags & IS_VF) || rc != FW_EPERM) 4976 device_printf(pi->dev, "l1cfg failed: %d\n", rc); 4977 } else { 4978 /* 4979 * An L1_CFG will almost always result in a link-change event if 4980 * the link is up, and the driver will refresh the actual 4981 * fec/fc/etc. when the notification is processed. If the link 4982 * is down then the actual settings are meaningless. 4983 * 4984 * This takes care of the case where a change in the L1 settings 4985 * may not result in a notification. 4986 */ 4987 if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG)) 4988 lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX); 4989 } 4990 return (rc); 4991} 4992 4993#define FW_MAC_EXACT_CHUNK 7 4994 4995/* 4996 * Program the port's XGMAC based on parameters in ifnet. The caller also 4997 * indicates which parameters should be programmed (the rest are left alone). 4998 */ 4999int 5000update_mac_settings(struct ifnet *ifp, int flags) 5001{ 5002 int rc = 0; 5003 struct vi_info *vi = ifp->if_softc; 5004 struct port_info *pi = vi->pi; 5005 struct adapter *sc = pi->adapter; 5006 int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1; 5007 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; 5008 5009 ASSERT_SYNCHRONIZED_OP(sc); 5010 KASSERT(flags, ("%s: not told what to update.", __func__)); 5011 5012 if (flags & XGMAC_MTU) 5013 mtu = ifp->if_mtu; 5014 5015 if (flags & XGMAC_PROMISC) 5016 promisc = ifp->if_flags & IFF_PROMISC ? 1 : 0; 5017 5018 if (flags & XGMAC_ALLMULTI) 5019 allmulti = ifp->if_flags & IFF_ALLMULTI ? 1 : 0; 5020 5021 if (flags & XGMAC_VLANEX) 5022 vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0; 5023 5024 if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) { 5025 rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc, 5026 allmulti, 1, vlanex, false); 5027 if (rc) { 5028 if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, 5029 rc); 5030 return (rc); 5031 } 5032 } 5033 5034 if (flags & XGMAC_UCADDR) { 5035 uint8_t ucaddr[ETHER_ADDR_LEN]; 5036 5037 bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr)); 5038 rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt, 5039 ucaddr, true, &vi->smt_idx); 5040 if (rc < 0) { 5041 rc = -rc; 5042 if_printf(ifp, "change_mac failed: %d\n", rc); 5043 return (rc); 5044 } else { 5045 vi->xact_addr_filt = rc; 5046 rc = 0; 5047 } 5048 } 5049 5050 if (flags & XGMAC_MCADDRS) { 5051 const uint8_t *mcaddr[FW_MAC_EXACT_CHUNK]; 5052 int del = 1; 5053 uint64_t hash = 0; 5054 struct ifmultiaddr *ifma; 5055 int i = 0, j; 5056 5057 if_maddr_rlock(ifp); 5058 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 5059 if (ifma->ifma_addr->sa_family != AF_LINK) 5060 continue; 5061 mcaddr[i] = 5062 LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 5063 MPASS(ETHER_IS_MULTICAST(mcaddr[i])); 5064 i++; 5065 5066 if (i == FW_MAC_EXACT_CHUNK) { 5067 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, 5068 del, i, mcaddr, NULL, &hash, 0); 5069 if (rc < 0) { 5070 rc = -rc; 5071 for (j = 0; j < i; j++) { 5072 if_printf(ifp, 5073 "failed to add mc address" 5074 " %02x:%02x:%02x:" 5075 "%02x:%02x:%02x rc=%d\n", 5076 mcaddr[j][0], mcaddr[j][1], 5077 mcaddr[j][2], mcaddr[j][3], 5078 mcaddr[j][4], mcaddr[j][5], 5079 rc); 5080 } 5081 goto mcfail; 5082 } 5083 del = 0; 5084 i = 0; 5085 } 5086 } 5087 if (i > 0) { 5088 rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i, 5089 mcaddr, NULL, &hash, 0); 5090 if (rc < 0) { 5091 rc = -rc; 5092 for (j = 0; j < i; j++) { 5093 if_printf(ifp, 5094 "failed to add mcast address" 5095 " %02x:%02x:%02x:" 5096 "%02x:%02x:%02x rc=%d\n", 5097 mcaddr[j][0], mcaddr[j][1], 5098 mcaddr[j][2], mcaddr[j][3], 5099 mcaddr[j][4], mcaddr[j][5], 5100 rc); 5101 } 5102 goto mcfail; 5103 } 5104 del = 0; 5105 } 5106 5107 rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0); 5108 if (rc != 0) 5109 if_printf(ifp, "failed to set mcast address hash: %d\n", 5110 rc); 5111 if (del == 0) { 5112 /* We clobbered the VXLAN entry if there was one. */ 5113 pi->vxlan_tcam_entry = false; 5114 } 5115mcfail: 5116 if_maddr_runlock(ifp); 5117 } 5118 5119 if (IS_MAIN_VI(vi) && sc->vxlan_refcount > 0 && 5120 pi->vxlan_tcam_entry == false) { 5121 rc = t4_alloc_raw_mac_filt(sc, vi->viid, match_all_mac, 5122 match_all_mac, sc->rawf_base + pi->port_id, 1, pi->port_id, 5123 true); 5124 if (rc < 0) { 5125 rc = -rc; 5126 if_printf(ifp, "failed to add VXLAN TCAM entry: %d.\n", 5127 rc); 5128 } else { 5129 MPASS(rc == sc->rawf_base + pi->port_id); 5130 rc = 0; 5131 pi->vxlan_tcam_entry = true; 5132 } 5133 } 5134 5135 return (rc); 5136} 5137 5138/* 5139 * {begin|end}_synchronized_op must be called from the same thread. 5140 */ 5141int 5142begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags, 5143 char *wmesg) 5144{ 5145 int rc, pri; 5146 5147#ifdef WITNESS 5148 /* the caller thinks it's ok to sleep, but is it really? */ 5149 if (flags & SLEEP_OK) 5150 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 5151 "begin_synchronized_op"); 5152#endif 5153 5154 if (INTR_OK) 5155 pri = PCATCH; 5156 else 5157 pri = 0; 5158 5159 ADAPTER_LOCK(sc); 5160 for (;;) { 5161 5162 if (vi && IS_DOOMED(vi)) { 5163 rc = ENXIO; 5164 goto done; 5165 } 5166 5167 if (!IS_BUSY(sc)) { 5168 rc = 0; 5169 break; 5170 } 5171 5172 if (!(flags & SLEEP_OK)) { 5173 rc = EBUSY; 5174 goto done; 5175 } 5176 5177 if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) { 5178 rc = EINTR; 5179 goto done; 5180 } 5181 } 5182 5183 KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 5184 SET_BUSY(sc); 5185#ifdef INVARIANTS 5186 sc->last_op = wmesg; 5187 sc->last_op_thr = curthread; 5188 sc->last_op_flags = flags; 5189#endif 5190 5191done: 5192 if (!(flags & HOLD_LOCK) || rc) 5193 ADAPTER_UNLOCK(sc); 5194 5195 return (rc); 5196} 5197 5198/* 5199 * Tell if_ioctl and if_init that the VI is going away. This is 5200 * special variant of begin_synchronized_op and must be paired with a 5201 * call to end_synchronized_op. 5202 */ 5203void 5204doom_vi(struct adapter *sc, struct vi_info *vi) 5205{ 5206 5207 ADAPTER_LOCK(sc); 5208 SET_DOOMED(vi); 5209 wakeup(&sc->flags); 5210 while (IS_BUSY(sc)) 5211 mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0); 5212 SET_BUSY(sc); 5213#ifdef INVARIANTS 5214 sc->last_op = "t4detach"; 5215 sc->last_op_thr = curthread; 5216 sc->last_op_flags = 0; 5217#endif 5218 ADAPTER_UNLOCK(sc); 5219} 5220 5221/* 5222 * {begin|end}_synchronized_op must be called from the same thread. 5223 */ 5224void 5225end_synchronized_op(struct adapter *sc, int flags) 5226{ 5227 5228 if (flags & LOCK_HELD) 5229 ADAPTER_LOCK_ASSERT_OWNED(sc); 5230 else 5231 ADAPTER_LOCK(sc); 5232 5233 KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 5234 CLR_BUSY(sc); 5235 wakeup(&sc->flags); 5236 ADAPTER_UNLOCK(sc); 5237} 5238 5239static int 5240cxgbe_init_synchronized(struct vi_info *vi) 5241{ 5242 struct port_info *pi = vi->pi; 5243 struct adapter *sc = pi->adapter; 5244 struct ifnet *ifp = vi->ifp; 5245 int rc = 0, i; 5246 struct sge_txq *txq; 5247 5248 ASSERT_SYNCHRONIZED_OP(sc); 5249 5250 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 5251 return (0); /* already running */ 5252 5253 if (!(sc->flags & FULL_INIT_DONE) && 5254 ((rc = adapter_full_init(sc)) != 0)) 5255 return (rc); /* error message displayed already */ 5256 5257 if (!(vi->flags & VI_INIT_DONE) && 5258 ((rc = vi_full_init(vi)) != 0)) 5259 return (rc); /* error message displayed already */ 5260 5261 rc = update_mac_settings(ifp, XGMAC_ALL); 5262 if (rc) 5263 goto done; /* error message displayed already */ 5264 5265 PORT_LOCK(pi); 5266 if (pi->up_vis == 0) { 5267 t4_update_port_info(pi); 5268 fixup_link_config(pi); 5269 build_medialist(pi); 5270 apply_link_config(pi); 5271 } 5272 5273 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true); 5274 if (rc != 0) { 5275 if_printf(ifp, "enable_vi failed: %d\n", rc); 5276 PORT_UNLOCK(pi); 5277 goto done; 5278 } 5279 5280 /* 5281 * Can't fail from this point onwards. Review cxgbe_uninit_synchronized 5282 * if this changes. 5283 */ 5284 5285 for_each_txq(vi, i, txq) { 5286 TXQ_LOCK(txq); 5287 txq->eq.flags |= EQ_ENABLED; 5288 TXQ_UNLOCK(txq); 5289 } 5290 5291 /* 5292 * The first iq of the first port to come up is used for tracing. 5293 */ 5294 if (sc->traceq < 0 && IS_MAIN_VI(vi)) { 5295 sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id; 5296 t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL : 5297 A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) | 5298 V_QUEUENUMBER(sc->traceq)); 5299 pi->flags |= HAS_TRACEQ; 5300 } 5301 5302 /* all ok */ 5303 pi->up_vis++; 5304 ifp->if_drv_flags |= IFF_DRV_RUNNING; 5305 5306 if (pi->nvi > 1 || sc->flags & IS_VF) 5307 callout_reset(&vi->tick, hz, vi_tick, vi); 5308 else 5309 callout_reset(&pi->tick, hz, cxgbe_tick, pi); 5310 if (pi->link_cfg.link_ok) 5311 t4_os_link_changed(pi); 5312 PORT_UNLOCK(pi); 5313done: 5314 if (rc != 0) 5315 cxgbe_uninit_synchronized(vi); 5316 5317 return (rc); 5318} 5319 5320/* 5321 * Idempotent. 5322 */ 5323static int 5324cxgbe_uninit_synchronized(struct vi_info *vi) 5325{ 5326 struct port_info *pi = vi->pi; 5327 struct adapter *sc = pi->adapter; 5328 struct ifnet *ifp = vi->ifp; 5329 int rc, i; 5330 struct sge_txq *txq; 5331 5332 ASSERT_SYNCHRONIZED_OP(sc); 5333 5334 if (!(vi->flags & VI_INIT_DONE)) { 5335 if (__predict_false(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5336 KASSERT(0, ("uninited VI is running")); 5337 if_printf(ifp, "uninited VI with running ifnet. " 5338 "vi->flags 0x%016lx, if_flags 0x%08x, " 5339 "if_drv_flags 0x%08x\n", vi->flags, ifp->if_flags, 5340 ifp->if_drv_flags); 5341 } 5342 return (0); 5343 } 5344 5345 /* 5346 * Disable the VI so that all its data in either direction is discarded 5347 * by the MPS. Leave everything else (the queues, interrupts, and 1Hz 5348 * tick) intact as the TP can deliver negative advice or data that it's 5349 * holding in its RAM (for an offloaded connection) even after the VI is 5350 * disabled. 5351 */ 5352 rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false); 5353 if (rc) { 5354 if_printf(ifp, "disable_vi failed: %d\n", rc); 5355 return (rc); 5356 } 5357 5358 for_each_txq(vi, i, txq) { 5359 TXQ_LOCK(txq); 5360 txq->eq.flags &= ~EQ_ENABLED; 5361 TXQ_UNLOCK(txq); 5362 } 5363 5364 PORT_LOCK(pi); 5365 if (pi->nvi > 1 || sc->flags & IS_VF) 5366 callout_stop(&vi->tick); 5367 else 5368 callout_stop(&pi->tick); 5369 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 5370 PORT_UNLOCK(pi); 5371 return (0); 5372 } 5373 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 5374 pi->up_vis--; 5375 if (pi->up_vis > 0) { 5376 PORT_UNLOCK(pi); 5377 return (0); 5378 } 5379 5380 pi->link_cfg.link_ok = false; 5381 pi->link_cfg.speed = 0; 5382 pi->link_cfg.link_down_rc = 255; 5383 t4_os_link_changed(pi); 5384 PORT_UNLOCK(pi); 5385 5386 return (0); 5387} 5388 5389/* 5390 * It is ok for this function to fail midway and return right away. t4_detach 5391 * will walk the entire sc->irq list and clean up whatever is valid. 5392 */ 5393int 5394t4_setup_intr_handlers(struct adapter *sc) 5395{ 5396 int rc, rid, p, q, v; 5397 char s[8]; 5398 struct irq *irq; 5399 struct port_info *pi; 5400 struct vi_info *vi; 5401 struct sge *sge = &sc->sge; 5402 struct sge_rxq *rxq; 5403#ifdef TCP_OFFLOAD 5404 struct sge_ofld_rxq *ofld_rxq; 5405#endif 5406#ifdef DEV_NETMAP 5407 struct sge_nm_rxq *nm_rxq; 5408#endif 5409#ifdef RSS 5410 int nbuckets = rss_getnumbuckets(); 5411#endif 5412 5413 /* 5414 * Setup interrupts. 5415 */ 5416 irq = &sc->irq[0]; 5417 rid = sc->intr_type == INTR_INTX ? 0 : 1; 5418 if (forwarding_intr_to_fwq(sc)) 5419 return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all")); 5420 5421 /* Multiple interrupts. */ 5422 if (sc->flags & IS_VF) 5423 KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports, 5424 ("%s: too few intr.", __func__)); 5425 else 5426 KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports, 5427 ("%s: too few intr.", __func__)); 5428 5429 /* The first one is always error intr on PFs */ 5430 if (!(sc->flags & IS_VF)) { 5431 rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err"); 5432 if (rc != 0) 5433 return (rc); 5434 irq++; 5435 rid++; 5436 } 5437 5438 /* The second one is always the firmware event queue (first on VFs) */ 5439 rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt"); 5440 if (rc != 0) 5441 return (rc); 5442 irq++; 5443 rid++; 5444 5445 for_each_port(sc, p) { 5446 pi = sc->port[p]; 5447 for_each_vi(pi, v, vi) { 5448 vi->first_intr = rid - 1; 5449 5450 if (vi->nnmrxq > 0) { 5451 int n = max(vi->nrxq, vi->nnmrxq); 5452 5453 rxq = &sge->rxq[vi->first_rxq]; 5454#ifdef DEV_NETMAP 5455 nm_rxq = &sge->nm_rxq[vi->first_nm_rxq]; 5456#endif 5457 for (q = 0; q < n; q++) { 5458 snprintf(s, sizeof(s), "%x%c%x", p, 5459 'a' + v, q); 5460 if (q < vi->nrxq) 5461 irq->rxq = rxq++; 5462#ifdef DEV_NETMAP 5463 if (q < vi->nnmrxq) 5464 irq->nm_rxq = nm_rxq++; 5465 5466 if (irq->nm_rxq != NULL && 5467 irq->rxq == NULL) { 5468 /* Netmap rx only */ 5469 rc = t4_alloc_irq(sc, irq, rid, 5470 t4_nm_intr, irq->nm_rxq, s); 5471 } 5472 if (irq->nm_rxq != NULL && 5473 irq->rxq != NULL) { 5474 /* NIC and Netmap rx */ 5475 rc = t4_alloc_irq(sc, irq, rid, 5476 t4_vi_intr, irq, s); 5477 } 5478#endif 5479 if (irq->rxq != NULL && 5480 irq->nm_rxq == NULL) { 5481 /* NIC rx only */ 5482 rc = t4_alloc_irq(sc, irq, rid, 5483 t4_intr, irq->rxq, s); 5484 } 5485 if (rc != 0) 5486 return (rc); 5487#ifdef RSS 5488 if (q < vi->nrxq) { 5489 bus_bind_intr(sc->dev, irq->res, 5490 rss_getcpu(q % nbuckets)); 5491 } 5492#endif 5493 irq++; 5494 rid++; 5495 vi->nintr++; 5496 } 5497 } else { 5498 for_each_rxq(vi, q, rxq) { 5499 snprintf(s, sizeof(s), "%x%c%x", p, 5500 'a' + v, q); 5501 rc = t4_alloc_irq(sc, irq, rid, 5502 t4_intr, rxq, s); 5503 if (rc != 0) 5504 return (rc); 5505#ifdef RSS 5506 bus_bind_intr(sc->dev, irq->res, 5507 rss_getcpu(q % nbuckets)); 5508#endif 5509 irq++; 5510 rid++; 5511 vi->nintr++; 5512 } 5513 } 5514#ifdef TCP_OFFLOAD 5515 for_each_ofld_rxq(vi, q, ofld_rxq) { 5516 snprintf(s, sizeof(s), "%x%c%x", p, 'A' + v, q); 5517 rc = t4_alloc_irq(sc, irq, rid, t4_intr, 5518 ofld_rxq, s); 5519 if (rc != 0) 5520 return (rc); 5521 irq++; 5522 rid++; 5523 vi->nintr++; 5524 } 5525#endif 5526 } 5527 } 5528 MPASS(irq == &sc->irq[sc->intr_count]); 5529 5530 return (0); 5531} 5532 5533int 5534adapter_full_init(struct adapter *sc) 5535{ 5536 int rc, i; 5537#ifdef RSS 5538 uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 5539 uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)]; 5540#endif 5541 5542 ASSERT_SYNCHRONIZED_OP(sc); 5543 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 5544 KASSERT((sc->flags & FULL_INIT_DONE) == 0, 5545 ("%s: FULL_INIT_DONE already", __func__)); 5546 5547 /* 5548 * queues that belong to the adapter (not any particular port). 5549 */ 5550 rc = t4_setup_adapter_queues(sc); 5551 if (rc != 0) 5552 goto done; 5553 5554 for (i = 0; i < nitems(sc->tq); i++) { 5555 sc->tq[i] = taskqueue_create("t4 taskq", M_NOWAIT, 5556 taskqueue_thread_enqueue, &sc->tq[i]); 5557 if (sc->tq[i] == NULL) { 5558 device_printf(sc->dev, 5559 "failed to allocate task queue %d\n", i); 5560 rc = ENOMEM; 5561 goto done; 5562 } 5563 taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d", 5564 device_get_nameunit(sc->dev), i); 5565 } 5566#ifdef RSS 5567 MPASS(RSS_KEYSIZE == 40); 5568 rss_getkey((void *)&raw_rss_key[0]); 5569 for (i = 0; i < nitems(rss_key); i++) { 5570 rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]); 5571 } 5572 t4_write_rss_key(sc, &rss_key[0], -1, 1); 5573#endif 5574 5575 if (!(sc->flags & IS_VF)) 5576 t4_intr_enable(sc); 5577 sc->flags |= FULL_INIT_DONE; 5578done: 5579 if (rc != 0) 5580 adapter_full_uninit(sc); 5581 5582 return (rc); 5583} 5584 5585int 5586adapter_full_uninit(struct adapter *sc) 5587{ 5588 int i; 5589 5590 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 5591 5592 t4_teardown_adapter_queues(sc); 5593 5594 for (i = 0; i < nitems(sc->tq) && sc->tq[i]; i++) { 5595 taskqueue_free(sc->tq[i]); 5596 sc->tq[i] = NULL; 5597 } 5598 5599 sc->flags &= ~FULL_INIT_DONE; 5600 5601 return (0); 5602} 5603 5604#ifdef RSS 5605#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \ 5606 RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \ 5607 RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \ 5608 RSS_HASHTYPE_RSS_UDP_IPV6) 5609 5610/* Translates kernel hash types to hardware. */ 5611static int 5612hashconfig_to_hashen(int hashconfig) 5613{ 5614 int hashen = 0; 5615 5616 if (hashconfig & RSS_HASHTYPE_RSS_IPV4) 5617 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN; 5618 if (hashconfig & RSS_HASHTYPE_RSS_IPV6) 5619 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN; 5620 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) { 5621 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 5622 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 5623 } 5624 if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) { 5625 hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN | 5626 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 5627 } 5628 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4) 5629 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN; 5630 if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6) 5631 hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN; 5632 5633 return (hashen); 5634} 5635 5636/* Translates hardware hash types to kernel. */ 5637static int 5638hashen_to_hashconfig(int hashen) 5639{ 5640 int hashconfig = 0; 5641 5642 if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) { 5643 /* 5644 * If UDP hashing was enabled it must have been enabled for 5645 * either IPv4 or IPv6 (inclusive or). Enabling UDP without 5646 * enabling any 4-tuple hash is nonsense configuration. 5647 */ 5648 MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 5649 F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)); 5650 5651 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 5652 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4; 5653 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 5654 hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6; 5655 } 5656 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 5657 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4; 5658 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 5659 hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6; 5660 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 5661 hashconfig |= RSS_HASHTYPE_RSS_IPV4; 5662 if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 5663 hashconfig |= RSS_HASHTYPE_RSS_IPV6; 5664 5665 return (hashconfig); 5666} 5667#endif 5668 5669int 5670vi_full_init(struct vi_info *vi) 5671{ 5672 struct adapter *sc = vi->adapter; 5673 struct ifnet *ifp = vi->ifp; 5674 uint16_t *rss; 5675 struct sge_rxq *rxq; 5676 int rc, i, j; 5677#ifdef RSS 5678 int nbuckets = rss_getnumbuckets(); 5679 int hashconfig = rss_gethashconfig(); 5680 int extra; 5681#endif 5682 5683 ASSERT_SYNCHRONIZED_OP(sc); 5684 KASSERT((vi->flags & VI_INIT_DONE) == 0, 5685 ("%s: VI_INIT_DONE already", __func__)); 5686 5687 sysctl_ctx_init(&vi->ctx); 5688 vi->flags |= VI_SYSCTL_CTX; 5689 5690 /* 5691 * Allocate tx/rx/fl queues for this VI. 5692 */ 5693 rc = t4_setup_vi_queues(vi); 5694 if (rc != 0) 5695 goto done; /* error message displayed already */ 5696 5697 /* 5698 * Setup RSS for this VI. Save a copy of the RSS table for later use. 5699 */ 5700 if (vi->nrxq > vi->rss_size) { 5701 if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); " 5702 "some queues will never receive traffic.\n", vi->nrxq, 5703 vi->rss_size); 5704 } else if (vi->rss_size % vi->nrxq) { 5705 if_printf(ifp, "nrxq (%d), hw RSS table size (%d); " 5706 "expect uneven traffic distribution.\n", vi->nrxq, 5707 vi->rss_size); 5708 } 5709#ifdef RSS 5710 if (vi->nrxq != nbuckets) { 5711 if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);" 5712 "performance will be impacted.\n", vi->nrxq, nbuckets); 5713 } 5714#endif 5715 rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK); 5716 for (i = 0; i < vi->rss_size;) { 5717#ifdef RSS 5718 j = rss_get_indirection_to_bucket(i); 5719 j %= vi->nrxq; 5720 rxq = &sc->sge.rxq[vi->first_rxq + j]; 5721 rss[i++] = rxq->iq.abs_id; 5722#else 5723 for_each_rxq(vi, j, rxq) { 5724 rss[i++] = rxq->iq.abs_id; 5725 if (i == vi->rss_size) 5726 break; 5727 } 5728#endif 5729 } 5730 5731 rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 5732 vi->rss_size); 5733 if (rc != 0) { 5734 free(rss, M_CXGBE); 5735 if_printf(ifp, "rss_config failed: %d\n", rc); 5736 goto done; 5737 } 5738 5739#ifdef RSS 5740 vi->hashen = hashconfig_to_hashen(hashconfig); 5741 5742 /* 5743 * We may have had to enable some hashes even though the global config 5744 * wants them disabled. This is a potential problem that must be 5745 * reported to the user. 5746 */ 5747 extra = hashen_to_hashconfig(vi->hashen) ^ hashconfig; 5748 5749 /* 5750 * If we consider only the supported hash types, then the enabled hashes 5751 * are a superset of the requested hashes. In other words, there cannot 5752 * be any supported hash that was requested but not enabled, but there 5753 * can be hashes that were not requested but had to be enabled. 5754 */ 5755 extra &= SUPPORTED_RSS_HASHTYPES; 5756 MPASS((extra & hashconfig) == 0); 5757 5758 if (extra) { 5759 if_printf(ifp, 5760 "global RSS config (0x%x) cannot be accommodated.\n", 5761 hashconfig); 5762 } 5763 if (extra & RSS_HASHTYPE_RSS_IPV4) 5764 if_printf(ifp, "IPv4 2-tuple hashing forced on.\n"); 5765 if (extra & RSS_HASHTYPE_RSS_TCP_IPV4) 5766 if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n"); 5767 if (extra & RSS_HASHTYPE_RSS_IPV6) 5768 if_printf(ifp, "IPv6 2-tuple hashing forced on.\n"); 5769 if (extra & RSS_HASHTYPE_RSS_TCP_IPV6) 5770 if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n"); 5771 if (extra & RSS_HASHTYPE_RSS_UDP_IPV4) 5772 if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n"); 5773 if (extra & RSS_HASHTYPE_RSS_UDP_IPV6) 5774 if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n"); 5775#else 5776 vi->hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN | 5777 F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN | 5778 F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN | 5779 F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN; 5780#endif 5781 rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, rss[0], 0, 0); 5782 if (rc != 0) { 5783 free(rss, M_CXGBE); 5784 if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc); 5785 goto done; 5786 } 5787 5788 vi->rss = rss; 5789 vi->flags |= VI_INIT_DONE; 5790done: 5791 if (rc != 0) 5792 vi_full_uninit(vi); 5793 5794 return (rc); 5795} 5796 5797/* 5798 * Idempotent. 5799 */ 5800int 5801vi_full_uninit(struct vi_info *vi) 5802{ 5803 struct port_info *pi = vi->pi; 5804 struct adapter *sc = pi->adapter; 5805 int i; 5806 struct sge_rxq *rxq; 5807 struct sge_txq *txq; 5808#ifdef TCP_OFFLOAD 5809 struct sge_ofld_rxq *ofld_rxq; 5810#endif 5811#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 5812 struct sge_wrq *ofld_txq; 5813#endif 5814 5815 if (vi->flags & VI_INIT_DONE) { 5816 5817 /* Need to quiesce queues. */ 5818 5819 /* XXX: Only for the first VI? */ 5820 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 5821 quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 5822 5823 for_each_txq(vi, i, txq) { 5824 quiesce_txq(sc, txq); 5825 } 5826 5827#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 5828 for_each_ofld_txq(vi, i, ofld_txq) { 5829 quiesce_wrq(sc, ofld_txq); 5830 } 5831#endif 5832 5833 for_each_rxq(vi, i, rxq) { 5834 quiesce_iq(sc, &rxq->iq); 5835 quiesce_fl(sc, &rxq->fl); 5836 } 5837 5838#ifdef TCP_OFFLOAD 5839 for_each_ofld_rxq(vi, i, ofld_rxq) { 5840 quiesce_iq(sc, &ofld_rxq->iq); 5841 quiesce_fl(sc, &ofld_rxq->fl); 5842 } 5843#endif 5844 free(vi->rss, M_CXGBE); 5845 free(vi->nm_rss, M_CXGBE); 5846 } 5847 5848 t4_teardown_vi_queues(vi); 5849 vi->flags &= ~VI_INIT_DONE; 5850 5851 return (0); 5852} 5853 5854static void 5855quiesce_txq(struct adapter *sc, struct sge_txq *txq) 5856{ 5857 struct sge_eq *eq = &txq->eq; 5858 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 5859 5860 (void) sc; /* unused */ 5861 5862#ifdef INVARIANTS 5863 TXQ_LOCK(txq); 5864 MPASS((eq->flags & EQ_ENABLED) == 0); 5865 TXQ_UNLOCK(txq); 5866#endif 5867 5868 /* Wait for the mp_ring to empty. */ 5869 while (!mp_ring_is_idle(txq->r)) { 5870 mp_ring_check_drainage(txq->r, 4096); 5871 pause("rquiesce", 1); 5872 } 5873 5874 /* Then wait for the hardware to finish. */ 5875 while (spg->cidx != htobe16(eq->pidx)) 5876 pause("equiesce", 1); 5877 5878 /* Finally, wait for the driver to reclaim all descriptors. */ 5879 while (eq->cidx != eq->pidx) 5880 pause("dquiesce", 1); 5881} 5882 5883static void 5884quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq) 5885{ 5886 5887 /* XXXTX */ 5888} 5889 5890static void 5891quiesce_iq(struct adapter *sc, struct sge_iq *iq) 5892{ 5893 (void) sc; /* unused */ 5894 5895 /* Synchronize with the interrupt handler */ 5896 while (!atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_DISABLED)) 5897 pause("iqfree", 1); 5898} 5899 5900static void 5901quiesce_fl(struct adapter *sc, struct sge_fl *fl) 5902{ 5903 mtx_lock(&sc->sfl_lock); 5904 FL_LOCK(fl); 5905 fl->flags |= FL_DOOMED; 5906 FL_UNLOCK(fl); 5907 callout_stop(&sc->sfl_callout); 5908 mtx_unlock(&sc->sfl_lock); 5909 5910 KASSERT((fl->flags & FL_STARVING) == 0, 5911 ("%s: still starving", __func__)); 5912} 5913 5914static int 5915t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid, 5916 driver_intr_t *handler, void *arg, char *name) 5917{ 5918 int rc; 5919 5920 irq->rid = rid; 5921 irq->res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->rid, 5922 RF_SHAREABLE | RF_ACTIVE); 5923 if (irq->res == NULL) { 5924 device_printf(sc->dev, 5925 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 5926 return (ENOMEM); 5927 } 5928 5929 rc = bus_setup_intr(sc->dev, irq->res, INTR_MPSAFE | INTR_TYPE_NET, 5930 NULL, handler, arg, &irq->tag); 5931 if (rc != 0) { 5932 device_printf(sc->dev, 5933 "failed to setup interrupt for rid %d, name %s: %d\n", 5934 rid, name, rc); 5935 } else if (name) 5936 bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name); 5937 5938 return (rc); 5939} 5940 5941static int 5942t4_free_irq(struct adapter *sc, struct irq *irq) 5943{ 5944 if (irq->tag) 5945 bus_teardown_intr(sc->dev, irq->res, irq->tag); 5946 if (irq->res) 5947 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->rid, irq->res); 5948 5949 bzero(irq, sizeof(*irq)); 5950 5951 return (0); 5952} 5953 5954static void 5955get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf) 5956{ 5957 5958 regs->version = chip_id(sc) | chip_rev(sc) << 10; 5959 t4_get_regs(sc, buf, regs->len); 5960} 5961 5962#define A_PL_INDIR_CMD 0x1f8 5963 5964#define S_PL_AUTOINC 31 5965#define M_PL_AUTOINC 0x1U 5966#define V_PL_AUTOINC(x) ((x) << S_PL_AUTOINC) 5967#define G_PL_AUTOINC(x) (((x) >> S_PL_AUTOINC) & M_PL_AUTOINC) 5968 5969#define S_PL_VFID 20 5970#define M_PL_VFID 0xffU 5971#define V_PL_VFID(x) ((x) << S_PL_VFID) 5972#define G_PL_VFID(x) (((x) >> S_PL_VFID) & M_PL_VFID) 5973 5974#define S_PL_ADDR 0 5975#define M_PL_ADDR 0xfffffU 5976#define V_PL_ADDR(x) ((x) << S_PL_ADDR) 5977#define G_PL_ADDR(x) (((x) >> S_PL_ADDR) & M_PL_ADDR) 5978 5979#define A_PL_INDIR_DATA 0x1fc 5980 5981static uint64_t 5982read_vf_stat(struct adapter *sc, u_int vin, int reg) 5983{ 5984 u32 stats[2]; 5985 5986 mtx_assert(&sc->reg_lock, MA_OWNED); 5987 if (sc->flags & IS_VF) { 5988 stats[0] = t4_read_reg(sc, VF_MPS_REG(reg)); 5989 stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4)); 5990 } else { 5991 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | 5992 V_PL_VFID(vin) | V_PL_ADDR(VF_MPS_REG(reg))); 5993 stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA); 5994 stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA); 5995 } 5996 return (((uint64_t)stats[1]) << 32 | stats[0]); 5997} 5998 5999static void 6000t4_get_vi_stats(struct adapter *sc, u_int vin, struct fw_vi_stats_vf *stats) 6001{ 6002 6003#define GET_STAT(name) \ 6004 read_vf_stat(sc, vin, A_MPS_VF_STAT_##name##_L) 6005 6006 stats->tx_bcast_bytes = GET_STAT(TX_VF_BCAST_BYTES); 6007 stats->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES); 6008 stats->tx_mcast_bytes = GET_STAT(TX_VF_MCAST_BYTES); 6009 stats->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES); 6010 stats->tx_ucast_bytes = GET_STAT(TX_VF_UCAST_BYTES); 6011 stats->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES); 6012 stats->tx_drop_frames = GET_STAT(TX_VF_DROP_FRAMES); 6013 stats->tx_offload_bytes = GET_STAT(TX_VF_OFFLOAD_BYTES); 6014 stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES); 6015 stats->rx_bcast_bytes = GET_STAT(RX_VF_BCAST_BYTES); 6016 stats->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES); 6017 stats->rx_mcast_bytes = GET_STAT(RX_VF_MCAST_BYTES); 6018 stats->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES); 6019 stats->rx_ucast_bytes = GET_STAT(RX_VF_UCAST_BYTES); 6020 stats->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES); 6021 stats->rx_err_frames = GET_STAT(RX_VF_ERR_FRAMES); 6022 6023#undef GET_STAT 6024} 6025 6026static void 6027t4_clr_vi_stats(struct adapter *sc, u_int vin) 6028{ 6029 int reg; 6030 6031 t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) | V_PL_VFID(vin) | 6032 V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L))); 6033 for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L; 6034 reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4) 6035 t4_write_reg(sc, A_PL_INDIR_DATA, 0); 6036} 6037 6038static void 6039vi_refresh_stats(struct adapter *sc, struct vi_info *vi) 6040{ 6041 struct timeval tv; 6042 const struct timeval interval = {0, 250000}; /* 250ms */ 6043 6044 if (!(vi->flags & VI_INIT_DONE)) 6045 return; 6046 6047 getmicrotime(&tv); 6048 timevalsub(&tv, &interval); 6049 if (timevalcmp(&tv, &vi->last_refreshed, <)) 6050 return; 6051 6052 mtx_lock(&sc->reg_lock); 6053 t4_get_vi_stats(sc, vi->vin, &vi->stats); 6054 getmicrotime(&vi->last_refreshed); 6055 mtx_unlock(&sc->reg_lock); 6056} 6057 6058static void 6059cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi) 6060{ 6061 u_int i, v, tnl_cong_drops, chan_map; 6062 struct timeval tv; 6063 const struct timeval interval = {0, 250000}; /* 250ms */ 6064 6065 getmicrotime(&tv); 6066 timevalsub(&tv, &interval); 6067 if (timevalcmp(&tv, &pi->last_refreshed, <)) 6068 return; 6069 6070 tnl_cong_drops = 0; 6071 t4_get_port_stats(sc, pi->tx_chan, &pi->stats); 6072 chan_map = pi->rx_e_chan_map; 6073 while (chan_map) { 6074 i = ffs(chan_map) - 1; 6075 mtx_lock(&sc->reg_lock); 6076 t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 1, 6077 A_TP_MIB_TNL_CNG_DROP_0 + i); 6078 mtx_unlock(&sc->reg_lock); 6079 tnl_cong_drops += v; 6080 chan_map &= ~(1 << i); 6081 } 6082 pi->tnl_cong_drops = tnl_cong_drops; 6083 getmicrotime(&pi->last_refreshed); 6084} 6085 6086static void 6087cxgbe_tick(void *arg) 6088{ 6089 struct port_info *pi = arg; 6090 struct adapter *sc = pi->adapter; 6091 6092 PORT_LOCK_ASSERT_OWNED(pi); 6093 cxgbe_refresh_stats(sc, pi); 6094 6095 callout_schedule(&pi->tick, hz); 6096} 6097 6098void 6099vi_tick(void *arg) 6100{ 6101 struct vi_info *vi = arg; 6102 struct adapter *sc = vi->adapter; 6103 6104 vi_refresh_stats(sc, vi); 6105 6106 callout_schedule(&vi->tick, hz); 6107} 6108 6109/* 6110 * Should match fw_caps_config_<foo> enums in t4fw_interface.h 6111 */ 6112static char *caps_decoder[] = { 6113 "\20\001IPMI\002NCSI", /* 0: NBM */ 6114 "\20\001PPP\002QFC\003DCBX", /* 1: link */ 6115 "\20\001INGRESS\002EGRESS", /* 2: switch */ 6116 "\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */ 6117 "\006HASHFILTER\007ETHOFLD", 6118 "\20\001TOE", /* 4: TOE */ 6119 "\20\001RDDP\002RDMAC", /* 5: RDMA */ 6120 "\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */ 6121 "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD" 6122 "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD" 6123 "\007T10DIF" 6124 "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD", 6125 "\20\001LOOKASIDE\002TLSKEYS", /* 7: Crypto */ 6126 "\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */ 6127 "\004PO_INITIATOR\005PO_TARGET", 6128}; 6129 6130void 6131t4_sysctls(struct adapter *sc) 6132{ 6133 struct sysctl_ctx_list *ctx; 6134 struct sysctl_oid *oid; 6135 struct sysctl_oid_list *children, *c0; 6136 static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"}; 6137 6138 ctx = device_get_sysctl_ctx(sc->dev); 6139 6140 /* 6141 * dev.t4nex.X. 6142 */ 6143 oid = device_get_sysctl_tree(sc->dev); 6144 c0 = children = SYSCTL_CHILDREN(oid); 6145 6146 sc->sc_do_rxcopy = 1; 6147 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW, 6148 &sc->sc_do_rxcopy, 1, "Do RX copy of small frames"); 6149 6150 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL, 6151 sc->params.nports, "# of ports"); 6152 6153 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells", 6154 CTLTYPE_STRING | CTLFLAG_RD, doorbells, (uintptr_t)&sc->doorbells, 6155 sysctl_bitfield_8b, "A", "available doorbells"); 6156 6157 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL, 6158 sc->params.vpd.cclk, "core clock frequency (in KHz)"); 6159 6160 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers", 6161 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val, 6162 sizeof(sc->params.sge.timer_val), sysctl_int_array, "A", 6163 "interrupt holdoff timer values (us)"); 6164 6165 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts", 6166 CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val, 6167 sizeof(sc->params.sge.counter_val), sysctl_int_array, "A", 6168 "interrupt holdoff packet counter values"); 6169 6170 t4_sge_sysctls(sc, ctx, children); 6171 6172 sc->lro_timeout = 100; 6173 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW, 6174 &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)"); 6175 6176 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW, 6177 &sc->debug_flags, 0, "flags to enable runtime debugging"); 6178 6179 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version", 6180 CTLFLAG_RD, sc->tp_version, 0, "TP microcode version"); 6181 6182 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", 6183 CTLFLAG_RD, sc->fw_version, 0, "firmware version"); 6184 6185 if (sc->flags & IS_VF) 6186 return; 6187 6188 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD, 6189 NULL, chip_rev(sc), "chip hardware revision"); 6190 6191 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn", 6192 CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number"); 6193 6194 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn", 6195 CTLFLAG_RD, sc->params.vpd.pn, 0, "part number"); 6196 6197 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec", 6198 CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change"); 6199 6200 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "md_version", 6201 CTLFLAG_RD, sc->params.vpd.md, 0, "manufacturing diags version"); 6202 6203 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na", 6204 CTLFLAG_RD, sc->params.vpd.na, 0, "network address"); 6205 6206 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD, 6207 sc->er_version, 0, "expansion ROM version"); 6208 6209 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD, 6210 sc->bs_version, 0, "bootstrap firmware version"); 6211 6212 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD, 6213 NULL, sc->params.scfg_vers, "serial config version"); 6214 6215 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD, 6216 NULL, sc->params.vpd_vers, "VPD version"); 6217 6218 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf", 6219 CTLFLAG_RD, sc->cfg_file, 0, "configuration file"); 6220 6221 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL, 6222 sc->cfcsum, "config file checksum"); 6223 6224#define SYSCTL_CAP(name, n, text) \ 6225 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \ 6226 CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], (uintptr_t)&sc->name, \ 6227 sysctl_bitfield_16b, "A", "available " text " capabilities") 6228 6229 SYSCTL_CAP(nbmcaps, 0, "NBM"); 6230 SYSCTL_CAP(linkcaps, 1, "link"); 6231 SYSCTL_CAP(switchcaps, 2, "switch"); 6232 SYSCTL_CAP(niccaps, 3, "NIC"); 6233 SYSCTL_CAP(toecaps, 4, "TCP offload"); 6234 SYSCTL_CAP(rdmacaps, 5, "RDMA"); 6235 SYSCTL_CAP(iscsicaps, 6, "iSCSI"); 6236 SYSCTL_CAP(cryptocaps, 7, "crypto"); 6237 SYSCTL_CAP(fcoecaps, 8, "FCoE"); 6238#undef SYSCTL_CAP 6239 6240 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD, 6241 NULL, sc->tids.nftids, "number of filters"); 6242 6243 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT | 6244 CTLFLAG_RD, sc, 0, sysctl_temperature, "I", 6245 "chip temperature (in Celsius)"); 6246 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reset_sensor", CTLTYPE_INT | 6247 CTLFLAG_RW, sc, 0, sysctl_reset_sensor, "I", 6248 "reset the chip's temperature sensor."); 6249 6250 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg", CTLTYPE_STRING | 6251 CTLFLAG_RD, sc, 0, sysctl_loadavg, "A", 6252 "microprocessor load averages (debug firmwares only)"); 6253 6254 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd", CTLTYPE_INT | 6255 CTLFLAG_RD, sc, 0, sysctl_vdd, "I", "core Vdd (in mV)"); 6256 6257 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "local_cpus", 6258 CTLTYPE_STRING | CTLFLAG_RD, sc, LOCAL_CPUS, 6259 sysctl_cpus, "A", "local CPUs"); 6260 6261 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_cpus", 6262 CTLTYPE_STRING | CTLFLAG_RD, sc, INTR_CPUS, 6263 sysctl_cpus, "A", "preferred CPUs for interrupts"); 6264 6265 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "swintr", CTLFLAG_RW, 6266 &sc->swintr, 0, "software triggered interrupts"); 6267 6268 /* 6269 * dev.t4nex.X.misc. Marked CTLFLAG_SKIP to avoid information overload. 6270 */ 6271 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "misc", 6272 CTLFLAG_RD | CTLFLAG_SKIP, NULL, 6273 "logs and miscellaneous information"); 6274 children = SYSCTL_CHILDREN(oid); 6275 6276 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cctrl", 6277 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6278 sysctl_cctrl, "A", "congestion control"); 6279 6280 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0", 6281 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6282 sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)"); 6283 6284 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1", 6285 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, 6286 sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)"); 6287 6288 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp", 6289 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, 6290 sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)"); 6291 6292 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0", 6293 CTLTYPE_STRING | CTLFLAG_RD, sc, 3, 6294 sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)"); 6295 6296 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1", 6297 CTLTYPE_STRING | CTLFLAG_RD, sc, 4, 6298 sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)"); 6299 6300 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi", 6301 CTLTYPE_STRING | CTLFLAG_RD, sc, 5, 6302 sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)"); 6303 6304 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la", 6305 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_cim_la, 6306 "A", "CIM logic analyzer"); 6307 6308 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la", 6309 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6310 sysctl_cim_ma_la, "A", "CIM MA logic analyzer"); 6311 6312 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0", 6313 CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ, 6314 sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)"); 6315 6316 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1", 6317 CTLTYPE_STRING | CTLFLAG_RD, sc, 1 + CIM_NUM_IBQ, 6318 sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)"); 6319 6320 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2", 6321 CTLTYPE_STRING | CTLFLAG_RD, sc, 2 + CIM_NUM_IBQ, 6322 sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)"); 6323 6324 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3", 6325 CTLTYPE_STRING | CTLFLAG_RD, sc, 3 + CIM_NUM_IBQ, 6326 sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)"); 6327 6328 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge", 6329 CTLTYPE_STRING | CTLFLAG_RD, sc, 4 + CIM_NUM_IBQ, 6330 sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)"); 6331 6332 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi", 6333 CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ, 6334 sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)"); 6335 6336 if (chip_id(sc) > CHELSIO_T4) { 6337 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx", 6338 CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ, 6339 sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)"); 6340 6341 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx", 6342 CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ, 6343 sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)"); 6344 } 6345 6346 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la", 6347 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6348 sysctl_cim_pif_la, "A", "CIM PIF logic analyzer"); 6349 6350 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg", 6351 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6352 sysctl_cim_qcfg, "A", "CIM queue configuration"); 6353 6354 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats", 6355 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6356 sysctl_cpl_stats, "A", "CPL statistics"); 6357 6358 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats", 6359 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6360 sysctl_ddp_stats, "A", "non-TCP DDP statistics"); 6361 6362 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog", 6363 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6364 sysctl_devlog, "A", "firmware's device log"); 6365 6366 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats", 6367 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6368 sysctl_fcoe_stats, "A", "FCoE statistics"); 6369 6370 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "hw_sched", 6371 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6372 sysctl_hw_sched, "A", "hardware scheduler "); 6373 6374 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "l2t", 6375 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6376 sysctl_l2t, "A", "hardware L2 table"); 6377 6378 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "smt", 6379 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6380 sysctl_smt, "A", "hardware source MAC table"); 6381 6382#ifdef INET6 6383 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "clip", 6384 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6385 sysctl_clip, "A", "active CLIP table entries"); 6386#endif 6387 6388 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "lb_stats", 6389 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6390 sysctl_lb_stats, "A", "loopback statistics"); 6391 6392 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "meminfo", 6393 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6394 sysctl_meminfo, "A", "memory regions"); 6395 6396 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam", 6397 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6398 chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6, 6399 "A", "MPS TCAM entries"); 6400 6401 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus", 6402 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6403 sysctl_path_mtus, "A", "path MTUs"); 6404 6405 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pm_stats", 6406 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6407 sysctl_pm_stats, "A", "PM statistics"); 6408 6409 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_stats", 6410 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6411 sysctl_rdma_stats, "A", "RDMA statistics"); 6412 6413 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tcp_stats", 6414 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6415 sysctl_tcp_stats, "A", "TCP statistics"); 6416 6417 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tids", 6418 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6419 sysctl_tids, "A", "TID information"); 6420 6421 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_err_stats", 6422 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6423 sysctl_tp_err_stats, "A", "TP error statistics"); 6424 6425 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask", 6426 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I", 6427 "TP logic analyzer event capture mask"); 6428 6429 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la", 6430 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6431 sysctl_tp_la, "A", "TP logic analyzer"); 6432 6433 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate", 6434 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6435 sysctl_tx_rate, "A", "Tx rate"); 6436 6437 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la", 6438 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6439 sysctl_ulprx_la, "A", "ULPRX logic analyzer"); 6440 6441 if (chip_id(sc) >= CHELSIO_T5) { 6442 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats", 6443 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, 6444 sysctl_wcwr_stats, "A", "write combined work requests"); 6445 } 6446 6447#ifdef TCP_OFFLOAD 6448 if (is_offload(sc)) { 6449 int i; 6450 char s[4]; 6451 6452 /* 6453 * dev.t4nex.X.toe. 6454 */ 6455 oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "toe", CTLFLAG_RD, 6456 NULL, "TOE parameters"); 6457 children = SYSCTL_CHILDREN(oid); 6458 6459 sc->tt.cong_algorithm = -1; 6460 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_algorithm", 6461 CTLFLAG_RW, &sc->tt.cong_algorithm, 0, "congestion control " 6462 "(-1 = default, 0 = reno, 1 = tahoe, 2 = newreno, " 6463 "3 = highspeed)"); 6464 6465 sc->tt.sndbuf = -1; 6466 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sndbuf", CTLFLAG_RW, 6467 &sc->tt.sndbuf, 0, "hardware send buffer"); 6468 6469 sc->tt.ddp = 0; 6470 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp", CTLFLAG_RW, 6471 &sc->tt.ddp, 0, "DDP allowed"); 6472 6473 sc->tt.rx_coalesce = -1; 6474 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce", 6475 CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing"); 6476 6477 sc->tt.tls = 0; 6478 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT | 6479 CTLFLAG_RW, sc, 0, sysctl_tls, "I", "Inline TLS allowed"); 6480 6481 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls_rx_ports", 6482 CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tls_rx_ports, 6483 "I", "TCP ports that use inline TLS+TOE RX"); 6484 6485 sc->tt.tx_align = -1; 6486 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align", 6487 CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload"); 6488 6489 sc->tt.tx_zcopy = 0; 6490 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy", 6491 CTLFLAG_RW, &sc->tt.tx_zcopy, 0, 6492 "Enable zero-copy aio_write(2)"); 6493 6494 sc->tt.cop_managed_offloading = !!t4_cop_managed_offloading; 6495 SYSCTL_ADD_INT(ctx, children, OID_AUTO, 6496 "cop_managed_offloading", CTLFLAG_RW, 6497 &sc->tt.cop_managed_offloading, 0, 6498 "COP (Connection Offload Policy) controls all TOE offload"); 6499 6500 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick", 6501 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A", 6502 "TP timer tick (us)"); 6503 6504 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick", 6505 CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A", 6506 "TCP timestamp tick (us)"); 6507 6508 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick", 6509 CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A", 6510 "DACK tick (us)"); 6511 6512 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer", 6513 CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer, 6514 "IU", "DACK timer (us)"); 6515 6516 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min", 6517 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN, 6518 sysctl_tp_timer, "LU", "Minimum retransmit interval (us)"); 6519 6520 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max", 6521 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX, 6522 sysctl_tp_timer, "LU", "Maximum retransmit interval (us)"); 6523 6524 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min", 6525 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN, 6526 sysctl_tp_timer, "LU", "Persist timer min (us)"); 6527 6528 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max", 6529 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX, 6530 sysctl_tp_timer, "LU", "Persist timer max (us)"); 6531 6532 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle", 6533 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE, 6534 sysctl_tp_timer, "LU", "Keepalive idle timer (us)"); 6535 6536 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_interval", 6537 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL, 6538 sysctl_tp_timer, "LU", "Keepalive interval timer (us)"); 6539 6540 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt", 6541 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT, 6542 sysctl_tp_timer, "LU", "Initial SRTT (us)"); 6543 6544 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer", 6545 CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER, 6546 sysctl_tp_timer, "LU", "FINWAIT2 timer (us)"); 6547 6548 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "syn_rexmt_count", 6549 CTLTYPE_UINT | CTLFLAG_RD, sc, S_SYNSHIFTMAX, 6550 sysctl_tp_shift_cnt, "IU", 6551 "Number of SYN retransmissions before abort"); 6552 6553 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_count", 6554 CTLTYPE_UINT | CTLFLAG_RD, sc, S_RXTSHIFTMAXR2, 6555 sysctl_tp_shift_cnt, "IU", 6556 "Number of retransmissions before abort"); 6557 6558 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_count", 6559 CTLTYPE_UINT | CTLFLAG_RD, sc, S_KEEPALIVEMAXR2, 6560 sysctl_tp_shift_cnt, "IU", 6561 "Number of keepalive probes before abort"); 6562 6563 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rexmt_backoff", 6564 CTLFLAG_RD, NULL, "TOE retransmit backoffs"); 6565 children = SYSCTL_CHILDREN(oid); 6566 for (i = 0; i < 16; i++) { 6567 snprintf(s, sizeof(s), "%u", i); 6568 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, s, 6569 CTLTYPE_UINT | CTLFLAG_RD, sc, i, sysctl_tp_backoff, 6570 "IU", "TOE retransmit backoff"); 6571 } 6572 } 6573#endif 6574} 6575 6576void 6577vi_sysctls(struct vi_info *vi) 6578{ 6579 struct sysctl_ctx_list *ctx; 6580 struct sysctl_oid *oid; 6581 struct sysctl_oid_list *children; 6582 6583 ctx = device_get_sysctl_ctx(vi->dev); 6584 6585 /* 6586 * dev.v?(cxgbe|cxl).X. 6587 */ 6588 oid = device_get_sysctl_tree(vi->dev); 6589 children = SYSCTL_CHILDREN(oid); 6590 6591 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL, 6592 vi->viid, "VI identifer"); 6593 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD, 6594 &vi->nrxq, 0, "# of rx queues"); 6595 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD, 6596 &vi->ntxq, 0, "# of tx queues"); 6597 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD, 6598 &vi->first_rxq, 0, "index of first rx queue"); 6599 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD, 6600 &vi->first_txq, 0, "index of first tx queue"); 6601 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_base", CTLFLAG_RD, NULL, 6602 vi->rss_base, "start of RSS indirection table"); 6603 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL, 6604 vi->rss_size, "size of RSS indirection table"); 6605 6606 if (IS_MAIN_VI(vi)) { 6607 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq", 6608 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU", 6609 "Reserve queue 0 for non-flowid packets"); 6610 } 6611 6612 if (vi->adapter->flags & IS_VF) { 6613 MPASS(vi->flags & TX_USES_VM_WR); 6614 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_vm_wr", CTLFLAG_RD, 6615 NULL, 1, "use VM work requests for transmit"); 6616 } else { 6617 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_vm_wr", 6618 CTLTYPE_INT | CTLFLAG_RW, vi, 0, 6619 sysctl_tx_vm_wr, "I", "use VM work requestes for transmit"); 6620 } 6621 6622#ifdef TCP_OFFLOAD 6623 if (vi->nofldrxq != 0) { 6624 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD, 6625 &vi->nofldrxq, 0, 6626 "# of rx queues for offloaded TCP connections"); 6627 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq", 6628 CTLFLAG_RD, &vi->first_ofld_rxq, 0, 6629 "index of first TOE rx queue"); 6630 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx_ofld", 6631 CTLTYPE_INT | CTLFLAG_RW, vi, 0, 6632 sysctl_holdoff_tmr_idx_ofld, "I", 6633 "holdoff timer index for TOE queues"); 6634 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx_ofld", 6635 CTLTYPE_INT | CTLFLAG_RW, vi, 0, 6636 sysctl_holdoff_pktc_idx_ofld, "I", 6637 "holdoff packet counter index for TOE queues"); 6638 } 6639#endif 6640#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 6641 if (vi->nofldtxq != 0) { 6642 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD, 6643 &vi->nofldtxq, 0, 6644 "# of tx queues for TOE/ETHOFLD"); 6645 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq", 6646 CTLFLAG_RD, &vi->first_ofld_txq, 0, 6647 "index of first TOE/ETHOFLD tx queue"); 6648 } 6649#endif 6650#ifdef DEV_NETMAP 6651 if (vi->nnmrxq != 0) { 6652 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD, 6653 &vi->nnmrxq, 0, "# of netmap rx queues"); 6654 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD, 6655 &vi->nnmtxq, 0, "# of netmap tx queues"); 6656 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq", 6657 CTLFLAG_RD, &vi->first_nm_rxq, 0, 6658 "index of first netmap rx queue"); 6659 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq", 6660 CTLFLAG_RD, &vi->first_nm_txq, 0, 6661 "index of first netmap tx queue"); 6662 } 6663#endif 6664 6665 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx", 6666 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I", 6667 "holdoff timer index"); 6668 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx", 6669 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I", 6670 "holdoff packet counter index"); 6671 6672 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq", 6673 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I", 6674 "rx queue size"); 6675 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq", 6676 CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I", 6677 "tx queue size"); 6678} 6679 6680static void 6681cxgbe_sysctls(struct port_info *pi) 6682{ 6683 struct sysctl_ctx_list *ctx; 6684 struct sysctl_oid *oid; 6685 struct sysctl_oid_list *children, *children2; 6686 struct adapter *sc = pi->adapter; 6687 int i; 6688 char name[16]; 6689 static char *tc_flags = {"\20\1USER\2SYNC\3ASYNC\4ERR"}; 6690 6691 ctx = device_get_sysctl_ctx(pi->dev); 6692 6693 /* 6694 * dev.cxgbe.X. 6695 */ 6696 oid = device_get_sysctl_tree(pi->dev); 6697 children = SYSCTL_CHILDREN(oid); 6698 6699 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING | 6700 CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down"); 6701 if (pi->port_type == FW_PORT_TYPE_BT_XAUI) { 6702 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", 6703 CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I", 6704 "PHY temperature (in Celsius)"); 6705 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version", 6706 CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I", 6707 "PHY firmware version"); 6708 } 6709 6710 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings", 6711 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A", 6712 "PAUSE settings (bit 0 = rx_pause, 1 = tx_pause, 2 = pause_autoneg)"); 6713 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec", 6714 CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A", 6715 "FECs to use (bit 0 = RS, 1 = FC, 2 = none, 5 = auto, 6 = module)"); 6716 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "module_fec", 6717 CTLTYPE_STRING, pi, 0, sysctl_module_fec, "A", 6718 "FEC recommended by the cable/transceiver"); 6719 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg", 6720 CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I", 6721 "autonegotiation (-1 = not supported)"); 6722 6723 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcaps", CTLFLAG_RD, 6724 &pi->link_cfg.pcaps, 0, "port capabilities"); 6725 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "acaps", CTLFLAG_RD, 6726 &pi->link_cfg.acaps, 0, "advertised capabilities"); 6727 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpacaps", CTLFLAG_RD, 6728 &pi->link_cfg.lpacaps, 0, "link partner advertised capabilities"); 6729 6730 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL, 6731 port_top_speed(pi), "max speed (in Gbps)"); 6732 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "mps_bg_map", CTLFLAG_RD, NULL, 6733 pi->mps_bg_map, "MPS buffer group map"); 6734 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_e_chan_map", CTLFLAG_RD, 6735 NULL, pi->rx_e_chan_map, "TP rx e-channel map"); 6736 6737 if (sc->flags & IS_VF) 6738 return; 6739 6740 /* 6741 * dev.(cxgbe|cxl).X.tc. 6742 */ 6743 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL, 6744 "Tx scheduler traffic classes (cl_rl)"); 6745 children2 = SYSCTL_CHILDREN(oid); 6746 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "pktsize", 6747 CTLFLAG_RW, &pi->sched_params->pktsize, 0, 6748 "pktsize for per-flow cl-rl (0 means up to the driver )"); 6749 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "burstsize", 6750 CTLFLAG_RW, &pi->sched_params->burstsize, 0, 6751 "burstsize for per-flow cl-rl (0 means up to the driver)"); 6752 for (i = 0; i < sc->chip_params->nsched_cls; i++) { 6753 struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i]; 6754 6755 snprintf(name, sizeof(name), "%d", i); 6756 children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx, 6757 SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL, 6758 "traffic class")); 6759 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "flags", 6760 CTLTYPE_STRING | CTLFLAG_RD, tc_flags, (uintptr_t)&tc->flags, 6761 sysctl_bitfield_8b, "A", "flags"); 6762 SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount", 6763 CTLFLAG_RD, &tc->refcount, 0, "references to this class"); 6764 SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params", 6765 CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i, 6766 sysctl_tc_params, "A", "traffic class parameters"); 6767 } 6768 6769 /* 6770 * dev.cxgbe.X.stats. 6771 */ 6772 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 6773 NULL, "port statistics"); 6774 children = SYSCTL_CHILDREN(oid); 6775 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD, 6776 &pi->tx_parse_error, 0, 6777 "# of tx packets with invalid length or # of segments"); 6778 6779#define T4_REGSTAT(name, stat, desc) \ 6780 SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \ 6781 CTLTYPE_U64 | CTLFLAG_RD, sc, \ 6782 (is_t4(sc) ? PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L) : \ 6783 T5_PORT_REG(pi->tx_chan, A_MPS_PORT_STAT_##stat##_L)), \ 6784 sysctl_handle_t4_reg64, "QU", desc) 6785 6786/* We get these from port_stats and they may be stale by up to 1s */ 6787#define T4_PORTSTAT(name, desc) \ 6788 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \ 6789 &pi->stats.name, desc) 6790 6791 T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames"); 6792 T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames"); 6793 T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames"); 6794 T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames"); 6795 T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames"); 6796 T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames"); 6797 T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range"); 6798 T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range"); 6799 T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range"); 6800 T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range"); 6801 T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range"); 6802 T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range"); 6803 T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range"); 6804 T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames"); 6805 T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted"); 6806 T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted"); 6807 T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted"); 6808 T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted"); 6809 T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted"); 6810 T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted"); 6811 T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted"); 6812 T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted"); 6813 T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted"); 6814 6815 T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames"); 6816 T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames"); 6817 T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames"); 6818 T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames"); 6819 T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames"); 6820 T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU"); 6821 T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames"); 6822 if (is_t6(sc)) { 6823 T4_PORTSTAT(rx_fcs_err, 6824 "# of frames received with bad FCS since last link up"); 6825 } else { 6826 T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR, 6827 "# of frames received with bad FCS"); 6828 } 6829 T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error"); 6830 T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors"); 6831 T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received"); 6832 T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range"); 6833 T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range"); 6834 T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range"); 6835 T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range"); 6836 T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range"); 6837 T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range"); 6838 T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range"); 6839 T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received"); 6840 T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received"); 6841 T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received"); 6842 T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received"); 6843 T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received"); 6844 T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received"); 6845 T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received"); 6846 T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received"); 6847 T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received"); 6848 6849 T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows"); 6850 T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows"); 6851 T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows"); 6852 T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows"); 6853 T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets"); 6854 T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets"); 6855 T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets"); 6856 T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets"); 6857 6858#undef T4_REGSTAT 6859#undef T4_PORTSTAT 6860 6861 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_toe_tls_records", 6862 CTLFLAG_RD, &pi->tx_toe_tls_records, 6863 "# of TLS records transmitted"); 6864 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tx_toe_tls_octets", 6865 CTLFLAG_RD, &pi->tx_toe_tls_octets, 6866 "# of payload octets in transmitted TLS records"); 6867 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_toe_tls_records", 6868 CTLFLAG_RD, &pi->rx_toe_tls_records, 6869 "# of TLS records received"); 6870 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "rx_toe_tls_octets", 6871 CTLFLAG_RD, &pi->rx_toe_tls_octets, 6872 "# of payload octets in received TLS records"); 6873} 6874 6875static int 6876sysctl_int_array(SYSCTL_HANDLER_ARGS) 6877{ 6878 int rc, *i, space = 0; 6879 struct sbuf sb; 6880 6881 sbuf_new_for_sysctl(&sb, NULL, 64, req); 6882 for (i = arg1; arg2; arg2 -= sizeof(int), i++) { 6883 if (space) 6884 sbuf_printf(&sb, " "); 6885 sbuf_printf(&sb, "%d", *i); 6886 space = 1; 6887 } 6888 rc = sbuf_finish(&sb); 6889 sbuf_delete(&sb); 6890 return (rc); 6891} 6892 6893static int 6894sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS) 6895{ 6896 int rc; 6897 struct sbuf *sb; 6898 6899 rc = sysctl_wire_old_buffer(req, 0); 6900 if (rc != 0) 6901 return(rc); 6902 6903 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6904 if (sb == NULL) 6905 return (ENOMEM); 6906 6907 sbuf_printf(sb, "%b", *(uint8_t *)(uintptr_t)arg2, (char *)arg1); 6908 rc = sbuf_finish(sb); 6909 sbuf_delete(sb); 6910 6911 return (rc); 6912} 6913 6914static int 6915sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS) 6916{ 6917 int rc; 6918 struct sbuf *sb; 6919 6920 rc = sysctl_wire_old_buffer(req, 0); 6921 if (rc != 0) 6922 return(rc); 6923 6924 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 6925 if (sb == NULL) 6926 return (ENOMEM); 6927 6928 sbuf_printf(sb, "%b", *(uint16_t *)(uintptr_t)arg2, (char *)arg1); 6929 rc = sbuf_finish(sb); 6930 sbuf_delete(sb); 6931 6932 return (rc); 6933} 6934 6935static int 6936sysctl_btphy(SYSCTL_HANDLER_ARGS) 6937{ 6938 struct port_info *pi = arg1; 6939 int op = arg2; 6940 struct adapter *sc = pi->adapter; 6941 u_int v; 6942 int rc; 6943 6944 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt"); 6945 if (rc) 6946 return (rc); 6947 /* XXX: magic numbers */ 6948 rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820, 6949 &v); 6950 end_synchronized_op(sc, 0); 6951 if (rc) 6952 return (rc); 6953 if (op == 0) 6954 v /= 256; 6955 6956 rc = sysctl_handle_int(oidp, &v, 0, req); 6957 return (rc); 6958} 6959 6960static int 6961sysctl_noflowq(SYSCTL_HANDLER_ARGS) 6962{ 6963 struct vi_info *vi = arg1; 6964 int rc, val; 6965 6966 val = vi->rsrv_noflowq; 6967 rc = sysctl_handle_int(oidp, &val, 0, req); 6968 if (rc != 0 || req->newptr == NULL) 6969 return (rc); 6970 6971 if ((val >= 1) && (vi->ntxq > 1)) 6972 vi->rsrv_noflowq = 1; 6973 else 6974 vi->rsrv_noflowq = 0; 6975 6976 return (rc); 6977} 6978 6979static int 6980sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS) 6981{ 6982 struct vi_info *vi = arg1; 6983 struct adapter *sc = vi->adapter; 6984 int rc, val, i; 6985 6986 MPASS(!(sc->flags & IS_VF)); 6987 6988 val = vi->flags & TX_USES_VM_WR ? 1 : 0; 6989 rc = sysctl_handle_int(oidp, &val, 0, req); 6990 if (rc != 0 || req->newptr == NULL) 6991 return (rc); 6992 6993 if (val != 0 && val != 1) 6994 return (EINVAL); 6995 6996 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 6997 "t4txvm"); 6998 if (rc) 6999 return (rc); 7000 if (vi->ifp->if_drv_flags & IFF_DRV_RUNNING) { 7001 /* 7002 * We don't want parse_pkt to run with one setting (VF or PF) 7003 * and then eth_tx to see a different setting but still use 7004 * stale information calculated by parse_pkt. 7005 */ 7006 rc = EBUSY; 7007 } else { 7008 struct port_info *pi = vi->pi; 7009 struct sge_txq *txq; 7010 uint32_t ctrl0; 7011 uint8_t npkt = sc->params.max_pkts_per_eth_tx_pkts_wr; 7012 7013 if (val) { 7014 vi->flags |= TX_USES_VM_WR; 7015 vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_VM_TSO; 7016 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 7017 V_TXPKT_INTF(pi->tx_chan)); 7018 if (!(sc->flags & IS_VF)) 7019 npkt--; 7020 } else { 7021 vi->flags &= ~TX_USES_VM_WR; 7022 vi->ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS_TSO; 7023 ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 7024 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 7025 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 7026 } 7027 for_each_txq(vi, i, txq) { 7028 txq->cpl_ctrl0 = ctrl0; 7029 txq->txp.max_npkt = npkt; 7030 } 7031 } 7032 end_synchronized_op(sc, LOCK_HELD); 7033 return (rc); 7034} 7035 7036static int 7037sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS) 7038{ 7039 struct vi_info *vi = arg1; 7040 struct adapter *sc = vi->adapter; 7041 int idx, rc, i; 7042 struct sge_rxq *rxq; 7043 uint8_t v; 7044 7045 idx = vi->tmr_idx; 7046 7047 rc = sysctl_handle_int(oidp, &idx, 0, req); 7048 if (rc != 0 || req->newptr == NULL) 7049 return (rc); 7050 7051 if (idx < 0 || idx >= SGE_NTIMERS) 7052 return (EINVAL); 7053 7054 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 7055 "t4tmr"); 7056 if (rc) 7057 return (rc); 7058 7059 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1); 7060 for_each_rxq(vi, i, rxq) { 7061#ifdef atomic_store_rel_8 7062 atomic_store_rel_8(&rxq->iq.intr_params, v); 7063#else 7064 rxq->iq.intr_params = v; 7065#endif 7066 } 7067 vi->tmr_idx = idx; 7068 7069 end_synchronized_op(sc, LOCK_HELD); 7070 return (0); 7071} 7072 7073static int 7074sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS) 7075{ 7076 struct vi_info *vi = arg1; 7077 struct adapter *sc = vi->adapter; 7078 int idx, rc; 7079 7080 idx = vi->pktc_idx; 7081 7082 rc = sysctl_handle_int(oidp, &idx, 0, req); 7083 if (rc != 0 || req->newptr == NULL) 7084 return (rc); 7085 7086 if (idx < -1 || idx >= SGE_NCOUNTERS) 7087 return (EINVAL); 7088 7089 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 7090 "t4pktc"); 7091 if (rc) 7092 return (rc); 7093 7094 if (vi->flags & VI_INIT_DONE) 7095 rc = EBUSY; /* cannot be changed once the queues are created */ 7096 else 7097 vi->pktc_idx = idx; 7098 7099 end_synchronized_op(sc, LOCK_HELD); 7100 return (rc); 7101} 7102 7103static int 7104sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS) 7105{ 7106 struct vi_info *vi = arg1; 7107 struct adapter *sc = vi->adapter; 7108 int qsize, rc; 7109 7110 qsize = vi->qsize_rxq; 7111 7112 rc = sysctl_handle_int(oidp, &qsize, 0, req); 7113 if (rc != 0 || req->newptr == NULL) 7114 return (rc); 7115 7116 if (qsize < 128 || (qsize & 7)) 7117 return (EINVAL); 7118 7119 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 7120 "t4rxqs"); 7121 if (rc) 7122 return (rc); 7123 7124 if (vi->flags & VI_INIT_DONE) 7125 rc = EBUSY; /* cannot be changed once the queues are created */ 7126 else 7127 vi->qsize_rxq = qsize; 7128 7129 end_synchronized_op(sc, LOCK_HELD); 7130 return (rc); 7131} 7132 7133static int 7134sysctl_qsize_txq(SYSCTL_HANDLER_ARGS) 7135{ 7136 struct vi_info *vi = arg1; 7137 struct adapter *sc = vi->adapter; 7138 int qsize, rc; 7139 7140 qsize = vi->qsize_txq; 7141 7142 rc = sysctl_handle_int(oidp, &qsize, 0, req); 7143 if (rc != 0 || req->newptr == NULL) 7144 return (rc); 7145 7146 if (qsize < 128 || qsize > 65536) 7147 return (EINVAL); 7148 7149 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 7150 "t4txqs"); 7151 if (rc) 7152 return (rc); 7153 7154 if (vi->flags & VI_INIT_DONE) 7155 rc = EBUSY; /* cannot be changed once the queues are created */ 7156 else 7157 vi->qsize_txq = qsize; 7158 7159 end_synchronized_op(sc, LOCK_HELD); 7160 return (rc); 7161} 7162 7163static int 7164sysctl_pause_settings(SYSCTL_HANDLER_ARGS) 7165{ 7166 struct port_info *pi = arg1; 7167 struct adapter *sc = pi->adapter; 7168 struct link_config *lc = &pi->link_cfg; 7169 int rc; 7170 7171 if (req->newptr == NULL) { 7172 struct sbuf *sb; 7173 static char *bits = "\20\1RX\2TX\3AUTO"; 7174 7175 rc = sysctl_wire_old_buffer(req, 0); 7176 if (rc != 0) 7177 return(rc); 7178 7179 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7180 if (sb == NULL) 7181 return (ENOMEM); 7182 7183 if (lc->link_ok) { 7184 sbuf_printf(sb, "%b", (lc->fc & (PAUSE_TX | PAUSE_RX)) | 7185 (lc->requested_fc & PAUSE_AUTONEG), bits); 7186 } else { 7187 sbuf_printf(sb, "%b", lc->requested_fc & (PAUSE_TX | 7188 PAUSE_RX | PAUSE_AUTONEG), bits); 7189 } 7190 rc = sbuf_finish(sb); 7191 sbuf_delete(sb); 7192 } else { 7193 char s[2]; 7194 int n; 7195 7196 s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX | 7197 PAUSE_AUTONEG)); 7198 s[1] = 0; 7199 7200 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 7201 if (rc != 0) 7202 return(rc); 7203 7204 if (s[1] != 0) 7205 return (EINVAL); 7206 if (s[0] < '0' || s[0] > '9') 7207 return (EINVAL); /* not a number */ 7208 n = s[0] - '0'; 7209 if (n & ~(PAUSE_TX | PAUSE_RX | PAUSE_AUTONEG)) 7210 return (EINVAL); /* some other bit is set too */ 7211 7212 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 7213 "t4PAUSE"); 7214 if (rc) 7215 return (rc); 7216 PORT_LOCK(pi); 7217 lc->requested_fc = n; 7218 fixup_link_config(pi); 7219 if (pi->up_vis > 0) 7220 rc = apply_link_config(pi); 7221 set_current_media(pi); 7222 PORT_UNLOCK(pi); 7223 end_synchronized_op(sc, 0); 7224 } 7225 7226 return (rc); 7227} 7228 7229static int 7230sysctl_fec(SYSCTL_HANDLER_ARGS) 7231{ 7232 struct port_info *pi = arg1; 7233 struct adapter *sc = pi->adapter; 7234 struct link_config *lc = &pi->link_cfg; 7235 int rc; 7236 int8_t old; 7237 7238 if (req->newptr == NULL) { 7239 struct sbuf *sb; 7240 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2" 7241 "\5RSVD3\6auto\7module"; 7242 7243 rc = sysctl_wire_old_buffer(req, 0); 7244 if (rc != 0) 7245 return(rc); 7246 7247 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7248 if (sb == NULL) 7249 return (ENOMEM); 7250 7251 /* 7252 * Display the requested_fec when the link is down -- the actual 7253 * FEC makes sense only when the link is up. 7254 */ 7255 if (lc->link_ok) { 7256 sbuf_printf(sb, "%b", (lc->fec & M_FW_PORT_CAP32_FEC) | 7257 (lc->requested_fec & (FEC_AUTO | FEC_MODULE)), 7258 bits); 7259 } else { 7260 sbuf_printf(sb, "%b", lc->requested_fec, bits); 7261 } 7262 rc = sbuf_finish(sb); 7263 sbuf_delete(sb); 7264 } else { 7265 char s[8]; 7266 int n; 7267 7268 snprintf(s, sizeof(s), "%d", 7269 lc->requested_fec == FEC_AUTO ? -1 : 7270 lc->requested_fec & (M_FW_PORT_CAP32_FEC | FEC_MODULE)); 7271 7272 rc = sysctl_handle_string(oidp, s, sizeof(s), req); 7273 if (rc != 0) 7274 return(rc); 7275 7276 n = strtol(&s[0], NULL, 0); 7277 if (n < 0 || n & FEC_AUTO) 7278 n = FEC_AUTO; 7279 else if (n & ~(M_FW_PORT_CAP32_FEC | FEC_MODULE)) 7280 return (EINVAL);/* some other bit is set too */ 7281 7282 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 7283 "t4fec"); 7284 if (rc) 7285 return (rc); 7286 PORT_LOCK(pi); 7287 old = lc->requested_fec; 7288 if (n == FEC_AUTO) 7289 lc->requested_fec = FEC_AUTO; 7290 else if (n == 0 || n == FEC_NONE) 7291 lc->requested_fec = FEC_NONE; 7292 else { 7293 if ((lc->pcaps | 7294 V_FW_PORT_CAP32_FEC(n & M_FW_PORT_CAP32_FEC)) != 7295 lc->pcaps) { 7296 rc = ENOTSUP; 7297 goto done; 7298 } 7299 lc->requested_fec = n & (M_FW_PORT_CAP32_FEC | 7300 FEC_MODULE); 7301 } 7302 fixup_link_config(pi); 7303 if (pi->up_vis > 0) { 7304 rc = apply_link_config(pi); 7305 if (rc != 0) { 7306 lc->requested_fec = old; 7307 if (rc == FW_EPROTO) 7308 rc = ENOTSUP; 7309 } 7310 } 7311done: 7312 PORT_UNLOCK(pi); 7313 end_synchronized_op(sc, 0); 7314 } 7315 7316 return (rc); 7317} 7318 7319static int 7320sysctl_module_fec(SYSCTL_HANDLER_ARGS) 7321{ 7322 struct port_info *pi = arg1; 7323 struct adapter *sc = pi->adapter; 7324 struct link_config *lc = &pi->link_cfg; 7325 int rc; 7326 int8_t fec; 7327 struct sbuf *sb; 7328 static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3"; 7329 7330 rc = sysctl_wire_old_buffer(req, 0); 7331 if (rc != 0) 7332 return (rc); 7333 7334 sb = sbuf_new_for_sysctl(NULL, NULL, 128, req); 7335 if (sb == NULL) 7336 return (ENOMEM); 7337 7338 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4mfec") != 0) 7339 return (EBUSY); 7340 PORT_LOCK(pi); 7341 if (pi->up_vis == 0) { 7342 /* 7343 * If all the interfaces are administratively down the firmware 7344 * does not report transceiver changes. Refresh port info here. 7345 * This is the only reason we have a synchronized op in this 7346 * function. Just PORT_LOCK would have been enough otherwise. 7347 */ 7348 t4_update_port_info(pi); 7349 } 7350 7351 fec = lc->fec_hint; 7352 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE || 7353 !fec_supported(lc->pcaps)) { 7354 sbuf_printf(sb, "n/a"); 7355 } else { 7356 if (fec == 0) 7357 fec = FEC_NONE; 7358 sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits); 7359 } 7360 rc = sbuf_finish(sb); 7361 sbuf_delete(sb); 7362 7363 PORT_UNLOCK(pi); 7364 end_synchronized_op(sc, 0); 7365 7366 return (rc); 7367} 7368 7369static int 7370sysctl_autoneg(SYSCTL_HANDLER_ARGS) 7371{ 7372 struct port_info *pi = arg1; 7373 struct adapter *sc = pi->adapter; 7374 struct link_config *lc = &pi->link_cfg; 7375 int rc, val; 7376 7377 if (lc->pcaps & FW_PORT_CAP32_ANEG) 7378 val = lc->requested_aneg == AUTONEG_DISABLE ? 0 : 1; 7379 else 7380 val = -1; 7381 rc = sysctl_handle_int(oidp, &val, 0, req); 7382 if (rc != 0 || req->newptr == NULL) 7383 return (rc); 7384 if (val == 0) 7385 val = AUTONEG_DISABLE; 7386 else if (val == 1) 7387 val = AUTONEG_ENABLE; 7388 else 7389 val = AUTONEG_AUTO; 7390 7391 rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, 7392 "t4aneg"); 7393 if (rc) 7394 return (rc); 7395 PORT_LOCK(pi); 7396 if (val == AUTONEG_ENABLE && !(lc->pcaps & FW_PORT_CAP32_ANEG)) { 7397 rc = ENOTSUP; 7398 goto done; 7399 } 7400 lc->requested_aneg = val; 7401 fixup_link_config(pi); 7402 if (pi->up_vis > 0) 7403 rc = apply_link_config(pi); 7404 set_current_media(pi); 7405done: 7406 PORT_UNLOCK(pi); 7407 end_synchronized_op(sc, 0); 7408 return (rc); 7409} 7410 7411static int 7412sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS) 7413{ 7414 struct adapter *sc = arg1; 7415 int reg = arg2; 7416 uint64_t val; 7417 7418 val = t4_read_reg64(sc, reg); 7419 7420 return (sysctl_handle_64(oidp, &val, 0, req)); 7421} 7422 7423static int 7424sysctl_temperature(SYSCTL_HANDLER_ARGS) 7425{ 7426 struct adapter *sc = arg1; 7427 int rc, t; 7428 uint32_t param, val; 7429 7430 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp"); 7431 if (rc) 7432 return (rc); 7433 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7434 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 7435 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP); 7436 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 7437 end_synchronized_op(sc, 0); 7438 if (rc) 7439 return (rc); 7440 7441 /* unknown is returned as 0 but we display -1 in that case */ 7442 t = val == 0 ? -1 : val; 7443 7444 rc = sysctl_handle_int(oidp, &t, 0, req); 7445 return (rc); 7446} 7447 7448static int 7449sysctl_vdd(SYSCTL_HANDLER_ARGS) 7450{ 7451 struct adapter *sc = arg1; 7452 int rc; 7453 uint32_t param, val; 7454 7455 if (sc->params.core_vdd == 0) { 7456 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 7457 "t4vdd"); 7458 if (rc) 7459 return (rc); 7460 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7461 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 7462 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_VDD); 7463 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 7464 end_synchronized_op(sc, 0); 7465 if (rc) 7466 return (rc); 7467 sc->params.core_vdd = val; 7468 } 7469 7470 return (sysctl_handle_int(oidp, &sc->params.core_vdd, 0, req)); 7471} 7472 7473static int 7474sysctl_reset_sensor(SYSCTL_HANDLER_ARGS) 7475{ 7476 struct adapter *sc = arg1; 7477 int rc, v; 7478 uint32_t param, val; 7479 7480 v = sc->sensor_resets; 7481 rc = sysctl_handle_int(oidp, &v, 0, req); 7482 if (rc != 0 || req->newptr == NULL || v <= 0) 7483 return (rc); 7484 7485 if (sc->params.fw_vers < FW_VERSION32(1, 24, 7, 0) || 7486 chip_id(sc) < CHELSIO_T5) 7487 return (ENOTSUP); 7488 7489 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst"); 7490 if (rc) 7491 return (rc); 7492 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7493 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) | 7494 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_RESET_TMP_SENSOR)); 7495 val = 1; 7496 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 7497 end_synchronized_op(sc, 0); 7498 if (rc == 0) 7499 sc->sensor_resets++; 7500 return (rc); 7501} 7502 7503static int 7504sysctl_loadavg(SYSCTL_HANDLER_ARGS) 7505{ 7506 struct adapter *sc = arg1; 7507 struct sbuf *sb; 7508 int rc; 7509 uint32_t param, val; 7510 7511 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg"); 7512 if (rc) 7513 return (rc); 7514 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 7515 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD); 7516 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 7517 end_synchronized_op(sc, 0); 7518 if (rc) 7519 return (rc); 7520 7521 rc = sysctl_wire_old_buffer(req, 0); 7522 if (rc != 0) 7523 return (rc); 7524 7525 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7526 if (sb == NULL) 7527 return (ENOMEM); 7528 7529 if (val == 0xffffffff) { 7530 /* Only debug and custom firmwares report load averages. */ 7531 sbuf_printf(sb, "not available"); 7532 } else { 7533 sbuf_printf(sb, "%d %d %d", val & 0xff, (val >> 8) & 0xff, 7534 (val >> 16) & 0xff); 7535 } 7536 rc = sbuf_finish(sb); 7537 sbuf_delete(sb); 7538 7539 return (rc); 7540} 7541 7542static int 7543sysctl_cctrl(SYSCTL_HANDLER_ARGS) 7544{ 7545 struct adapter *sc = arg1; 7546 struct sbuf *sb; 7547 int rc, i; 7548 uint16_t incr[NMTUS][NCCTRL_WIN]; 7549 static const char *dec_fac[] = { 7550 "0.5", "0.5625", "0.625", "0.6875", "0.75", "0.8125", "0.875", 7551 "0.9375" 7552 }; 7553 7554 rc = sysctl_wire_old_buffer(req, 0); 7555 if (rc != 0) 7556 return (rc); 7557 7558 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7559 if (sb == NULL) 7560 return (ENOMEM); 7561 7562 t4_read_cong_tbl(sc, incr); 7563 7564 for (i = 0; i < NCCTRL_WIN; ++i) { 7565 sbuf_printf(sb, "%2d: %4u %4u %4u %4u %4u %4u %4u %4u\n", i, 7566 incr[0][i], incr[1][i], incr[2][i], incr[3][i], incr[4][i], 7567 incr[5][i], incr[6][i], incr[7][i]); 7568 sbuf_printf(sb, "%8u %4u %4u %4u %4u %4u %4u %4u %5u %s\n", 7569 incr[8][i], incr[9][i], incr[10][i], incr[11][i], 7570 incr[12][i], incr[13][i], incr[14][i], incr[15][i], 7571 sc->params.a_wnd[i], dec_fac[sc->params.b_wnd[i]]); 7572 } 7573 7574 rc = sbuf_finish(sb); 7575 sbuf_delete(sb); 7576 7577 return (rc); 7578} 7579 7580static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = { 7581 "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */ 7582 "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */ 7583 "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */ 7584}; 7585 7586static int 7587sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS) 7588{ 7589 struct adapter *sc = arg1; 7590 struct sbuf *sb; 7591 int rc, i, n, qid = arg2; 7592 uint32_t *buf, *p; 7593 char *qtype; 7594 u_int cim_num_obq = sc->chip_params->cim_num_obq; 7595 7596 KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq, 7597 ("%s: bad qid %d\n", __func__, qid)); 7598 7599 if (qid < CIM_NUM_IBQ) { 7600 /* inbound queue */ 7601 qtype = "IBQ"; 7602 n = 4 * CIM_IBQ_SIZE; 7603 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 7604 rc = t4_read_cim_ibq(sc, qid, buf, n); 7605 } else { 7606 /* outbound queue */ 7607 qtype = "OBQ"; 7608 qid -= CIM_NUM_IBQ; 7609 n = 4 * cim_num_obq * CIM_OBQ_SIZE; 7610 buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK); 7611 rc = t4_read_cim_obq(sc, qid, buf, n); 7612 } 7613 7614 if (rc < 0) { 7615 rc = -rc; 7616 goto done; 7617 } 7618 n = rc * sizeof(uint32_t); /* rc has # of words actually read */ 7619 7620 rc = sysctl_wire_old_buffer(req, 0); 7621 if (rc != 0) 7622 goto done; 7623 7624 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 7625 if (sb == NULL) { 7626 rc = ENOMEM; 7627 goto done; 7628 } 7629 7630 sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]); 7631 for (i = 0, p = buf; i < n; i += 16, p += 4) 7632 sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1], 7633 p[2], p[3]); 7634 7635 rc = sbuf_finish(sb); 7636 sbuf_delete(sb); 7637done: 7638 free(buf, M_CXGBE); 7639 return (rc); 7640} 7641 7642static void 7643sbuf_cim_la4(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) 7644{ 7645 uint32_t *p; 7646 7647 sbuf_printf(sb, "Status Data PC%s", 7648 cfg & F_UPDBGLACAPTPCONLY ? "" : 7649 " LS0Stat LS0Addr LS0Data"); 7650 7651 for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) { 7652 if (cfg & F_UPDBGLACAPTPCONLY) { 7653 sbuf_printf(sb, "\n %02x %08x %08x", p[5] & 0xff, 7654 p[6], p[7]); 7655 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x", 7656 (p[3] >> 8) & 0xff, p[3] & 0xff, p[4] >> 8, 7657 p[4] & 0xff, p[5] >> 8); 7658 sbuf_printf(sb, "\n %02x %x%07x %x%07x", 7659 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 7660 p[1] & 0xf, p[2] >> 4); 7661 } else { 7662 sbuf_printf(sb, 7663 "\n %02x %x%07x %x%07x %08x %08x " 7664 "%08x%08x%08x%08x", 7665 (p[0] >> 4) & 0xff, p[0] & 0xf, p[1] >> 4, 7666 p[1] & 0xf, p[2] >> 4, p[2] & 0xf, p[3], p[4], p[5], 7667 p[6], p[7]); 7668 } 7669 } 7670} 7671 7672static void 7673sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg) 7674{ 7675 uint32_t *p; 7676 7677 sbuf_printf(sb, "Status Inst Data PC%s", 7678 cfg & F_UPDBGLACAPTPCONLY ? "" : 7679 " LS0Stat LS0Addr LS0Data LS1Stat LS1Addr LS1Data"); 7680 7681 for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) { 7682 if (cfg & F_UPDBGLACAPTPCONLY) { 7683 sbuf_printf(sb, "\n %02x %08x %08x %08x", 7684 p[3] & 0xff, p[2], p[1], p[0]); 7685 sbuf_printf(sb, "\n %02x %02x%06x %02x%06x %02x%06x", 7686 (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8, 7687 p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8); 7688 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x", 7689 (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16, 7690 p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff, 7691 p[6] >> 16); 7692 } else { 7693 sbuf_printf(sb, "\n %02x %04x%04x %04x%04x %04x%04x " 7694 "%08x %08x %08x %08x %08x %08x", 7695 (p[9] >> 16) & 0xff, 7696 p[9] & 0xffff, p[8] >> 16, 7697 p[8] & 0xffff, p[7] >> 16, 7698 p[7] & 0xffff, p[6] >> 16, 7699 p[2], p[1], p[0], p[5], p[4], p[3]); 7700 } 7701 } 7702} 7703 7704static int 7705sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags) 7706{ 7707 uint32_t cfg, *buf; 7708 int rc; 7709 7710 rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg); 7711 if (rc != 0) 7712 return (rc); 7713 7714 MPASS(flags == M_WAITOK || flags == M_NOWAIT); 7715 buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE, 7716 M_ZERO | flags); 7717 if (buf == NULL) 7718 return (ENOMEM); 7719 7720 rc = -t4_cim_read_la(sc, buf, NULL); 7721 if (rc != 0) 7722 goto done; 7723 if (chip_id(sc) < CHELSIO_T6) 7724 sbuf_cim_la4(sc, sb, buf, cfg); 7725 else 7726 sbuf_cim_la6(sc, sb, buf, cfg); 7727 7728done: 7729 free(buf, M_CXGBE); 7730 return (rc); 7731} 7732 7733static int 7734sysctl_cim_la(SYSCTL_HANDLER_ARGS) 7735{ 7736 struct adapter *sc = arg1; 7737 struct sbuf *sb; 7738 int rc; 7739 7740 rc = sysctl_wire_old_buffer(req, 0); 7741 if (rc != 0) 7742 return (rc); 7743 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7744 if (sb == NULL) 7745 return (ENOMEM); 7746 7747 rc = sbuf_cim_la(sc, sb, M_WAITOK); 7748 if (rc == 0) 7749 rc = sbuf_finish(sb); 7750 sbuf_delete(sb); 7751 return (rc); 7752} 7753 7754bool 7755t4_os_dump_cimla(struct adapter *sc, int arg, bool verbose) 7756{ 7757 struct sbuf sb; 7758 int rc; 7759 7760 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) 7761 return (false); 7762 rc = sbuf_cim_la(sc, &sb, M_NOWAIT); 7763 if (rc == 0) { 7764 rc = sbuf_finish(&sb); 7765 if (rc == 0) { 7766 log(LOG_DEBUG, "%s: CIM LA dump follows.\n%s", 7767 device_get_nameunit(sc->dev), sbuf_data(&sb)); 7768 } 7769 } 7770 sbuf_delete(&sb); 7771 return (false); 7772} 7773 7774static int 7775sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS) 7776{ 7777 struct adapter *sc = arg1; 7778 u_int i; 7779 struct sbuf *sb; 7780 uint32_t *buf, *p; 7781 int rc; 7782 7783 rc = sysctl_wire_old_buffer(req, 0); 7784 if (rc != 0) 7785 return (rc); 7786 7787 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7788 if (sb == NULL) 7789 return (ENOMEM); 7790 7791 buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE, 7792 M_ZERO | M_WAITOK); 7793 7794 t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE); 7795 p = buf; 7796 7797 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 7798 sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2], 7799 p[1], p[0]); 7800 } 7801 7802 sbuf_printf(sb, "\n\nCnt ID Tag UE Data RDY VLD"); 7803 for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) { 7804 sbuf_printf(sb, "\n%3u %2u %x %u %08x%08x %u %u", 7805 (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, 7806 (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, 7807 (p[1] >> 2) | ((p[2] & 3) << 30), 7808 (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, 7809 p[0] & 1); 7810 } 7811 7812 rc = sbuf_finish(sb); 7813 sbuf_delete(sb); 7814 free(buf, M_CXGBE); 7815 return (rc); 7816} 7817 7818static int 7819sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS) 7820{ 7821 struct adapter *sc = arg1; 7822 u_int i; 7823 struct sbuf *sb; 7824 uint32_t *buf, *p; 7825 int rc; 7826 7827 rc = sysctl_wire_old_buffer(req, 0); 7828 if (rc != 0) 7829 return (rc); 7830 7831 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 7832 if (sb == NULL) 7833 return (ENOMEM); 7834 7835 buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE, 7836 M_ZERO | M_WAITOK); 7837 7838 t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL); 7839 p = buf; 7840 7841 sbuf_printf(sb, "Cntl ID DataBE Addr Data"); 7842 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 7843 sbuf_printf(sb, "\n %02x %02x %04x %08x %08x%08x%08x%08x", 7844 (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff, 7845 p[4], p[3], p[2], p[1], p[0]); 7846 } 7847 7848 sbuf_printf(sb, "\n\nCntl ID Data"); 7849 for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) { 7850 sbuf_printf(sb, "\n %02x %02x %08x%08x%08x%08x", 7851 (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]); 7852 } 7853 7854 rc = sbuf_finish(sb); 7855 sbuf_delete(sb); 7856 free(buf, M_CXGBE); 7857 return (rc); 7858} 7859 7860static int 7861sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS) 7862{ 7863 struct adapter *sc = arg1; 7864 struct sbuf *sb; 7865 int rc, i; 7866 uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 7867 uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; 7868 uint16_t thres[CIM_NUM_IBQ]; 7869 uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr; 7870 uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat; 7871 u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq; 7872 7873 cim_num_obq = sc->chip_params->cim_num_obq; 7874 if (is_t4(sc)) { 7875 ibq_rdaddr = A_UP_IBQ_0_RDADDR; 7876 obq_rdaddr = A_UP_OBQ_0_REALADDR; 7877 } else { 7878 ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR; 7879 obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR; 7880 } 7881 nq = CIM_NUM_IBQ + cim_num_obq; 7882 7883 rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat); 7884 if (rc == 0) 7885 rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr); 7886 if (rc != 0) 7887 return (rc); 7888 7889 t4_read_cimq_cfg(sc, base, size, thres); 7890 7891 rc = sysctl_wire_old_buffer(req, 0); 7892 if (rc != 0) 7893 return (rc); 7894 7895 sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req); 7896 if (sb == NULL) 7897 return (ENOMEM); 7898 7899 sbuf_printf(sb, 7900 " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail"); 7901 7902 for (i = 0; i < CIM_NUM_IBQ; i++, p += 4) 7903 sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u", 7904 qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]), 7905 G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 7906 G_QUEREMFLITS(p[2]) * 16); 7907 for ( ; i < nq; i++, p += 4, wr += 2) 7908 sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u", qname[i], 7909 base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff, 7910 wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]), 7911 G_QUEREMFLITS(p[2]) * 16); 7912 7913 rc = sbuf_finish(sb); 7914 sbuf_delete(sb); 7915 7916 return (rc); 7917} 7918 7919static int 7920sysctl_cpl_stats(SYSCTL_HANDLER_ARGS) 7921{ 7922 struct adapter *sc = arg1; 7923 struct sbuf *sb; 7924 int rc; 7925 struct tp_cpl_stats stats; 7926 7927 rc = sysctl_wire_old_buffer(req, 0); 7928 if (rc != 0) 7929 return (rc); 7930 7931 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7932 if (sb == NULL) 7933 return (ENOMEM); 7934 7935 mtx_lock(&sc->reg_lock); 7936 t4_tp_get_cpl_stats(sc, &stats, 0); 7937 mtx_unlock(&sc->reg_lock); 7938 7939 if (sc->chip_params->nchan > 2) { 7940 sbuf_printf(sb, " channel 0 channel 1" 7941 " channel 2 channel 3"); 7942 sbuf_printf(sb, "\nCPL requests: %10u %10u %10u %10u", 7943 stats.req[0], stats.req[1], stats.req[2], stats.req[3]); 7944 sbuf_printf(sb, "\nCPL responses: %10u %10u %10u %10u", 7945 stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]); 7946 } else { 7947 sbuf_printf(sb, " channel 0 channel 1"); 7948 sbuf_printf(sb, "\nCPL requests: %10u %10u", 7949 stats.req[0], stats.req[1]); 7950 sbuf_printf(sb, "\nCPL responses: %10u %10u", 7951 stats.rsp[0], stats.rsp[1]); 7952 } 7953 7954 rc = sbuf_finish(sb); 7955 sbuf_delete(sb); 7956 7957 return (rc); 7958} 7959 7960static int 7961sysctl_ddp_stats(SYSCTL_HANDLER_ARGS) 7962{ 7963 struct adapter *sc = arg1; 7964 struct sbuf *sb; 7965 int rc; 7966 struct tp_usm_stats stats; 7967 7968 rc = sysctl_wire_old_buffer(req, 0); 7969 if (rc != 0) 7970 return(rc); 7971 7972 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 7973 if (sb == NULL) 7974 return (ENOMEM); 7975 7976 t4_get_usm_stats(sc, &stats, 1); 7977 7978 sbuf_printf(sb, "Frames: %u\n", stats.frames); 7979 sbuf_printf(sb, "Octets: %ju\n", stats.octets); 7980 sbuf_printf(sb, "Drops: %u", stats.drops); 7981 7982 rc = sbuf_finish(sb); 7983 sbuf_delete(sb); 7984 7985 return (rc); 7986} 7987 7988static const char * const devlog_level_strings[] = { 7989 [FW_DEVLOG_LEVEL_EMERG] = "EMERG", 7990 [FW_DEVLOG_LEVEL_CRIT] = "CRIT", 7991 [FW_DEVLOG_LEVEL_ERR] = "ERR", 7992 [FW_DEVLOG_LEVEL_NOTICE] = "NOTICE", 7993 [FW_DEVLOG_LEVEL_INFO] = "INFO", 7994 [FW_DEVLOG_LEVEL_DEBUG] = "DEBUG" 7995}; 7996 7997static const char * const devlog_facility_strings[] = { 7998 [FW_DEVLOG_FACILITY_CORE] = "CORE", 7999 [FW_DEVLOG_FACILITY_CF] = "CF", 8000 [FW_DEVLOG_FACILITY_SCHED] = "SCHED", 8001 [FW_DEVLOG_FACILITY_TIMER] = "TIMER", 8002 [FW_DEVLOG_FACILITY_RES] = "RES", 8003 [FW_DEVLOG_FACILITY_HW] = "HW", 8004 [FW_DEVLOG_FACILITY_FLR] = "FLR", 8005 [FW_DEVLOG_FACILITY_DMAQ] = "DMAQ", 8006 [FW_DEVLOG_FACILITY_PHY] = "PHY", 8007 [FW_DEVLOG_FACILITY_MAC] = "MAC", 8008 [FW_DEVLOG_FACILITY_PORT] = "PORT", 8009 [FW_DEVLOG_FACILITY_VI] = "VI", 8010 [FW_DEVLOG_FACILITY_FILTER] = "FILTER", 8011 [FW_DEVLOG_FACILITY_ACL] = "ACL", 8012 [FW_DEVLOG_FACILITY_TM] = "TM", 8013 [FW_DEVLOG_FACILITY_QFC] = "QFC", 8014 [FW_DEVLOG_FACILITY_DCB] = "DCB", 8015 [FW_DEVLOG_FACILITY_ETH] = "ETH", 8016 [FW_DEVLOG_FACILITY_OFLD] = "OFLD", 8017 [FW_DEVLOG_FACILITY_RI] = "RI", 8018 [FW_DEVLOG_FACILITY_ISCSI] = "ISCSI", 8019 [FW_DEVLOG_FACILITY_FCOE] = "FCOE", 8020 [FW_DEVLOG_FACILITY_FOISCSI] = "FOISCSI", 8021 [FW_DEVLOG_FACILITY_FOFCOE] = "FOFCOE", 8022 [FW_DEVLOG_FACILITY_CHNET] = "CHNET", 8023}; 8024 8025static int 8026sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags) 8027{ 8028 int i, j, rc, nentries, first = 0; 8029 struct devlog_params *dparams = &sc->params.devlog; 8030 struct fw_devlog_e *buf, *e; 8031 uint64_t ftstamp = UINT64_MAX; 8032 8033 if (dparams->addr == 0) 8034 return (ENXIO); 8035 8036 MPASS(flags == M_WAITOK || flags == M_NOWAIT); 8037 buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags); 8038 if (buf == NULL) 8039 return (ENOMEM); 8040 8041 rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size); 8042 if (rc != 0) 8043 goto done; 8044 8045 nentries = dparams->size / sizeof(struct fw_devlog_e); 8046 for (i = 0; i < nentries; i++) { 8047 e = &buf[i]; 8048 8049 if (e->timestamp == 0) 8050 break; /* end */ 8051 8052 e->timestamp = be64toh(e->timestamp); 8053 e->seqno = be32toh(e->seqno); 8054 for (j = 0; j < 8; j++) 8055 e->params[j] = be32toh(e->params[j]); 8056 8057 if (e->timestamp < ftstamp) { 8058 ftstamp = e->timestamp; 8059 first = i; 8060 } 8061 } 8062 8063 if (buf[first].timestamp == 0) 8064 goto done; /* nothing in the log */ 8065 8066 sbuf_printf(sb, "%10s %15s %8s %8s %s\n", 8067 "Seq#", "Tstamp", "Level", "Facility", "Message"); 8068 8069 i = first; 8070 do { 8071 e = &buf[i]; 8072 if (e->timestamp == 0) 8073 break; /* end */ 8074 8075 sbuf_printf(sb, "%10d %15ju %8s %8s ", 8076 e->seqno, e->timestamp, 8077 (e->level < nitems(devlog_level_strings) ? 8078 devlog_level_strings[e->level] : "UNKNOWN"), 8079 (e->facility < nitems(devlog_facility_strings) ? 8080 devlog_facility_strings[e->facility] : "UNKNOWN")); 8081 sbuf_printf(sb, e->fmt, e->params[0], e->params[1], 8082 e->params[2], e->params[3], e->params[4], 8083 e->params[5], e->params[6], e->params[7]); 8084 8085 if (++i == nentries) 8086 i = 0; 8087 } while (i != first); 8088done: 8089 free(buf, M_CXGBE); 8090 return (rc); 8091} 8092 8093static int 8094sysctl_devlog(SYSCTL_HANDLER_ARGS) 8095{ 8096 struct adapter *sc = arg1; 8097 int rc; 8098 struct sbuf *sb; 8099 8100 rc = sysctl_wire_old_buffer(req, 0); 8101 if (rc != 0) 8102 return (rc); 8103 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8104 if (sb == NULL) 8105 return (ENOMEM); 8106 8107 rc = sbuf_devlog(sc, sb, M_WAITOK); 8108 if (rc == 0) 8109 rc = sbuf_finish(sb); 8110 sbuf_delete(sb); 8111 return (rc); 8112} 8113 8114void 8115t4_os_dump_devlog(struct adapter *sc) 8116{ 8117 int rc; 8118 struct sbuf sb; 8119 8120 if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) 8121 return; 8122 rc = sbuf_devlog(sc, &sb, M_NOWAIT); 8123 if (rc == 0) { 8124 rc = sbuf_finish(&sb); 8125 if (rc == 0) { 8126 log(LOG_DEBUG, "%s: device log follows.\n%s", 8127 device_get_nameunit(sc->dev), sbuf_data(&sb)); 8128 } 8129 } 8130 sbuf_delete(&sb); 8131} 8132 8133static int 8134sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS) 8135{ 8136 struct adapter *sc = arg1; 8137 struct sbuf *sb; 8138 int rc; 8139 struct tp_fcoe_stats stats[MAX_NCHAN]; 8140 int i, nchan = sc->chip_params->nchan; 8141 8142 rc = sysctl_wire_old_buffer(req, 0); 8143 if (rc != 0) 8144 return (rc); 8145 8146 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8147 if (sb == NULL) 8148 return (ENOMEM); 8149 8150 for (i = 0; i < nchan; i++) 8151 t4_get_fcoe_stats(sc, i, &stats[i], 1); 8152 8153 if (nchan > 2) { 8154 sbuf_printf(sb, " channel 0 channel 1" 8155 " channel 2 channel 3"); 8156 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju %16ju %16ju", 8157 stats[0].octets_ddp, stats[1].octets_ddp, 8158 stats[2].octets_ddp, stats[3].octets_ddp); 8159 sbuf_printf(sb, "\nframesDDP: %16u %16u %16u %16u", 8160 stats[0].frames_ddp, stats[1].frames_ddp, 8161 stats[2].frames_ddp, stats[3].frames_ddp); 8162 sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u", 8163 stats[0].frames_drop, stats[1].frames_drop, 8164 stats[2].frames_drop, stats[3].frames_drop); 8165 } else { 8166 sbuf_printf(sb, " channel 0 channel 1"); 8167 sbuf_printf(sb, "\noctetsDDP: %16ju %16ju", 8168 stats[0].octets_ddp, stats[1].octets_ddp); 8169 sbuf_printf(sb, "\nframesDDP: %16u %16u", 8170 stats[0].frames_ddp, stats[1].frames_ddp); 8171 sbuf_printf(sb, "\nframesDrop: %16u %16u", 8172 stats[0].frames_drop, stats[1].frames_drop); 8173 } 8174 8175 rc = sbuf_finish(sb); 8176 sbuf_delete(sb); 8177 8178 return (rc); 8179} 8180 8181static int 8182sysctl_hw_sched(SYSCTL_HANDLER_ARGS) 8183{ 8184 struct adapter *sc = arg1; 8185 struct sbuf *sb; 8186 int rc, i; 8187 unsigned int map, kbps, ipg, mode; 8188 unsigned int pace_tab[NTX_SCHED]; 8189 8190 rc = sysctl_wire_old_buffer(req, 0); 8191 if (rc != 0) 8192 return (rc); 8193 8194 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8195 if (sb == NULL) 8196 return (ENOMEM); 8197 8198 map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP); 8199 mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG)); 8200 t4_read_pace_tbl(sc, pace_tab); 8201 8202 sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) " 8203 "Class IPG (0.1 ns) Flow IPG (us)"); 8204 8205 for (i = 0; i < NTX_SCHED; ++i, map >>= 2) { 8206 t4_get_tx_sched(sc, i, &kbps, &ipg, 1); 8207 sbuf_printf(sb, "\n %u %-5s %u ", i, 8208 (mode & (1 << i)) ? "flow" : "class", map & 3); 8209 if (kbps) 8210 sbuf_printf(sb, "%9u ", kbps); 8211 else 8212 sbuf_printf(sb, " disabled "); 8213 8214 if (ipg) 8215 sbuf_printf(sb, "%13u ", ipg); 8216 else 8217 sbuf_printf(sb, " disabled "); 8218 8219 if (pace_tab[i]) 8220 sbuf_printf(sb, "%10u", pace_tab[i]); 8221 else 8222 sbuf_printf(sb, " disabled"); 8223 } 8224 8225 rc = sbuf_finish(sb); 8226 sbuf_delete(sb); 8227 8228 return (rc); 8229} 8230 8231static int 8232sysctl_lb_stats(SYSCTL_HANDLER_ARGS) 8233{ 8234 struct adapter *sc = arg1; 8235 struct sbuf *sb; 8236 int rc, i, j; 8237 uint64_t *p0, *p1; 8238 struct lb_port_stats s[2]; 8239 static const char *stat_name[] = { 8240 "OctetsOK:", "FramesOK:", "BcastFrames:", "McastFrames:", 8241 "UcastFrames:", "ErrorFrames:", "Frames64:", "Frames65To127:", 8242 "Frames128To255:", "Frames256To511:", "Frames512To1023:", 8243 "Frames1024To1518:", "Frames1519ToMax:", "FramesDropped:", 8244 "BG0FramesDropped:", "BG1FramesDropped:", "BG2FramesDropped:", 8245 "BG3FramesDropped:", "BG0FramesTrunc:", "BG1FramesTrunc:", 8246 "BG2FramesTrunc:", "BG3FramesTrunc:" 8247 }; 8248 8249 rc = sysctl_wire_old_buffer(req, 0); 8250 if (rc != 0) 8251 return (rc); 8252 8253 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8254 if (sb == NULL) 8255 return (ENOMEM); 8256 8257 memset(s, 0, sizeof(s)); 8258 8259 for (i = 0; i < sc->chip_params->nchan; i += 2) { 8260 t4_get_lb_stats(sc, i, &s[0]); 8261 t4_get_lb_stats(sc, i + 1, &s[1]); 8262 8263 p0 = &s[0].octets; 8264 p1 = &s[1].octets; 8265 sbuf_printf(sb, "%s Loopback %u" 8266 " Loopback %u", i == 0 ? "" : "\n", i, i + 1); 8267 8268 for (j = 0; j < nitems(stat_name); j++) 8269 sbuf_printf(sb, "\n%-17s %20ju %20ju", stat_name[j], 8270 *p0++, *p1++); 8271 } 8272 8273 rc = sbuf_finish(sb); 8274 sbuf_delete(sb); 8275 8276 return (rc); 8277} 8278 8279static int 8280sysctl_linkdnrc(SYSCTL_HANDLER_ARGS) 8281{ 8282 int rc = 0; 8283 struct port_info *pi = arg1; 8284 struct link_config *lc = &pi->link_cfg; 8285 struct sbuf *sb; 8286 8287 rc = sysctl_wire_old_buffer(req, 0); 8288 if (rc != 0) 8289 return(rc); 8290 sb = sbuf_new_for_sysctl(NULL, NULL, 64, req); 8291 if (sb == NULL) 8292 return (ENOMEM); 8293 8294 if (lc->link_ok || lc->link_down_rc == 255) 8295 sbuf_printf(sb, "n/a"); 8296 else 8297 sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc)); 8298 8299 rc = sbuf_finish(sb); 8300 sbuf_delete(sb); 8301 8302 return (rc); 8303} 8304 8305struct mem_desc { 8306 unsigned int base; 8307 unsigned int limit; 8308 unsigned int idx; 8309}; 8310 8311static int 8312mem_desc_cmp(const void *a, const void *b) 8313{ 8314 return ((const struct mem_desc *)a)->base - 8315 ((const struct mem_desc *)b)->base; 8316} 8317 8318static void 8319mem_region_show(struct sbuf *sb, const char *name, unsigned int from, 8320 unsigned int to) 8321{ 8322 unsigned int size; 8323 8324 if (from == to) 8325 return; 8326 8327 size = to - from + 1; 8328 if (size == 0) 8329 return; 8330 8331 /* XXX: need humanize_number(3) in libkern for a more readable 'size' */ 8332 sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size); 8333} 8334 8335static int 8336sysctl_meminfo(SYSCTL_HANDLER_ARGS) 8337{ 8338 struct adapter *sc = arg1; 8339 struct sbuf *sb; 8340 int rc, i, n; 8341 uint32_t lo, hi, used, alloc; 8342 static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"}; 8343 static const char *region[] = { 8344 "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:", 8345 "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:", 8346 "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:", 8347 "TDDP region:", "TPT region:", "STAG region:", "RQ region:", 8348 "RQUDP region:", "PBL region:", "TXPBL region:", 8349 "DBVFIFO region:", "ULPRX state:", "ULPTX state:", 8350 "On-chip queues:", "TLS keys:", 8351 }; 8352 struct mem_desc avail[4]; 8353 struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */ 8354 struct mem_desc *md = mem; 8355 8356 rc = sysctl_wire_old_buffer(req, 0); 8357 if (rc != 0) 8358 return (rc); 8359 8360 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8361 if (sb == NULL) 8362 return (ENOMEM); 8363 8364 for (i = 0; i < nitems(mem); i++) { 8365 mem[i].limit = 0; 8366 mem[i].idx = i; 8367 } 8368 8369 /* Find and sort the populated memory ranges */ 8370 i = 0; 8371 lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 8372 if (lo & F_EDRAM0_ENABLE) { 8373 hi = t4_read_reg(sc, A_MA_EDRAM0_BAR); 8374 avail[i].base = G_EDRAM0_BASE(hi) << 20; 8375 avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20); 8376 avail[i].idx = 0; 8377 i++; 8378 } 8379 if (lo & F_EDRAM1_ENABLE) { 8380 hi = t4_read_reg(sc, A_MA_EDRAM1_BAR); 8381 avail[i].base = G_EDRAM1_BASE(hi) << 20; 8382 avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20); 8383 avail[i].idx = 1; 8384 i++; 8385 } 8386 if (lo & F_EXT_MEM_ENABLE) { 8387 hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 8388 avail[i].base = G_EXT_MEM_BASE(hi) << 20; 8389 avail[i].limit = avail[i].base + 8390 (G_EXT_MEM_SIZE(hi) << 20); 8391 avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */ 8392 i++; 8393 } 8394 if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) { 8395 hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 8396 avail[i].base = G_EXT_MEM1_BASE(hi) << 20; 8397 avail[i].limit = avail[i].base + 8398 (G_EXT_MEM1_SIZE(hi) << 20); 8399 avail[i].idx = 4; 8400 i++; 8401 } 8402 if (!i) /* no memory available */ 8403 return 0; 8404 qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp); 8405 8406 (md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR); 8407 (md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR); 8408 (md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR); 8409 (md++)->base = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 8410 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_BASE); 8411 (md++)->base = t4_read_reg(sc, A_TP_CMM_TIMER_BASE); 8412 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_RX_FLST_BASE); 8413 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_TX_FLST_BASE); 8414 (md++)->base = t4_read_reg(sc, A_TP_CMM_MM_PS_FLST_BASE); 8415 8416 /* the next few have explicit upper bounds */ 8417 md->base = t4_read_reg(sc, A_TP_PMM_TX_BASE); 8418 md->limit = md->base - 1 + 8419 t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE) * 8420 G_PMTXMAXPAGE(t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE)); 8421 md++; 8422 8423 md->base = t4_read_reg(sc, A_TP_PMM_RX_BASE); 8424 md->limit = md->base - 1 + 8425 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) * 8426 G_PMRXMAXPAGE(t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE)); 8427 md++; 8428 8429 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 8430 if (chip_id(sc) <= CHELSIO_T5) 8431 md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE); 8432 else 8433 md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR); 8434 md->limit = 0; 8435 } else { 8436 md->base = 0; 8437 md->idx = nitems(region); /* hide it */ 8438 } 8439 md++; 8440 8441#define ulp_region(reg) \ 8442 md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\ 8443 (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) 8444 8445 ulp_region(RX_ISCSI); 8446 ulp_region(RX_TDDP); 8447 ulp_region(TX_TPT); 8448 ulp_region(RX_STAG); 8449 ulp_region(RX_RQ); 8450 ulp_region(RX_RQUDP); 8451 ulp_region(RX_PBL); 8452 ulp_region(TX_PBL); 8453#undef ulp_region 8454 8455 md->base = 0; 8456 md->idx = nitems(region); 8457 if (!is_t4(sc)) { 8458 uint32_t size = 0; 8459 uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2); 8460 uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE); 8461 8462 if (is_t5(sc)) { 8463 if (sge_ctrl & F_VFIFO_ENABLE) 8464 size = G_DBVFIFO_SIZE(fifo_size); 8465 } else 8466 size = G_T6_DBVFIFO_SIZE(fifo_size); 8467 8468 if (size) { 8469 md->base = G_BASEADDR(t4_read_reg(sc, 8470 A_SGE_DBVFIFO_BADDR)); 8471 md->limit = md->base + (size << 2) - 1; 8472 } 8473 } 8474 md++; 8475 8476 md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE); 8477 md->limit = 0; 8478 md++; 8479 md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE); 8480 md->limit = 0; 8481 md++; 8482 8483 md->base = sc->vres.ocq.start; 8484 if (sc->vres.ocq.size) 8485 md->limit = md->base + sc->vres.ocq.size - 1; 8486 else 8487 md->idx = nitems(region); /* hide it */ 8488 md++; 8489 8490 md->base = sc->vres.key.start; 8491 if (sc->vres.key.size) 8492 md->limit = md->base + sc->vres.key.size - 1; 8493 else 8494 md->idx = nitems(region); /* hide it */ 8495 md++; 8496 8497 /* add any address-space holes, there can be up to 3 */ 8498 for (n = 0; n < i - 1; n++) 8499 if (avail[n].limit < avail[n + 1].base) 8500 (md++)->base = avail[n].limit; 8501 if (avail[n].limit) 8502 (md++)->base = avail[n].limit; 8503 8504 n = md - mem; 8505 qsort(mem, n, sizeof(struct mem_desc), mem_desc_cmp); 8506 8507 for (lo = 0; lo < i; lo++) 8508 mem_region_show(sb, memory[avail[lo].idx], avail[lo].base, 8509 avail[lo].limit - 1); 8510 8511 sbuf_printf(sb, "\n"); 8512 for (i = 0; i < n; i++) { 8513 if (mem[i].idx >= nitems(region)) 8514 continue; /* skip holes */ 8515 if (!mem[i].limit) 8516 mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0; 8517 mem_region_show(sb, region[mem[i].idx], mem[i].base, 8518 mem[i].limit); 8519 } 8520 8521 sbuf_printf(sb, "\n"); 8522 lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR); 8523 hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1; 8524 mem_region_show(sb, "uP RAM:", lo, hi); 8525 8526 lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR); 8527 hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1; 8528 mem_region_show(sb, "uP Extmem2:", lo, hi); 8529 8530 lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE); 8531 sbuf_printf(sb, "\n%u Rx pages of size %uKiB for %u channels\n", 8532 G_PMRXMAXPAGE(lo), 8533 t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, 8534 (lo & F_PMRXNUMCHN) ? 2 : 1); 8535 8536 lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE); 8537 hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 8538 sbuf_printf(sb, "%u Tx pages of size %u%ciB for %u channels\n", 8539 G_PMTXMAXPAGE(lo), 8540 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10), 8541 hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo)); 8542 sbuf_printf(sb, "%u p-structs\n", 8543 t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT)); 8544 8545 for (i = 0; i < 4; i++) { 8546 if (chip_id(sc) > CHELSIO_T5) 8547 lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4); 8548 else 8549 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4); 8550 if (is_t5(sc)) { 8551 used = G_T5_USED(lo); 8552 alloc = G_T5_ALLOC(lo); 8553 } else { 8554 used = G_USED(lo); 8555 alloc = G_ALLOC(lo); 8556 } 8557 /* For T6 these are MAC buffer groups */ 8558 sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated", 8559 i, used, alloc); 8560 } 8561 for (i = 0; i < sc->chip_params->nchan; i++) { 8562 if (chip_id(sc) > CHELSIO_T5) 8563 lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4); 8564 else 8565 lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4); 8566 if (is_t5(sc)) { 8567 used = G_T5_USED(lo); 8568 alloc = G_T5_ALLOC(lo); 8569 } else { 8570 used = G_USED(lo); 8571 alloc = G_ALLOC(lo); 8572 } 8573 /* For T6 these are MAC buffer groups */ 8574 sbuf_printf(sb, 8575 "\nLoopback %d using %u pages out of %u allocated", 8576 i, used, alloc); 8577 } 8578 8579 rc = sbuf_finish(sb); 8580 sbuf_delete(sb); 8581 8582 return (rc); 8583} 8584 8585static inline void 8586tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask) 8587{ 8588 *mask = x | y; 8589 y = htobe64(y); 8590 memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN); 8591} 8592 8593static int 8594sysctl_mps_tcam(SYSCTL_HANDLER_ARGS) 8595{ 8596 struct adapter *sc = arg1; 8597 struct sbuf *sb; 8598 int rc, i; 8599 8600 MPASS(chip_id(sc) <= CHELSIO_T5); 8601 8602 rc = sysctl_wire_old_buffer(req, 0); 8603 if (rc != 0) 8604 return (rc); 8605 8606 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8607 if (sb == NULL) 8608 return (ENOMEM); 8609 8610 sbuf_printf(sb, 8611 "Idx Ethernet address Mask Vld Ports PF" 8612 " VF Replication P0 P1 P2 P3 ML"); 8613 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 8614 uint64_t tcamx, tcamy, mask; 8615 uint32_t cls_lo, cls_hi; 8616 uint8_t addr[ETHER_ADDR_LEN]; 8617 8618 tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i)); 8619 tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i)); 8620 if (tcamx & tcamy) 8621 continue; 8622 tcamxy2valmask(tcamx, tcamy, addr, &mask); 8623 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 8624 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 8625 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx" 8626 " %c %#x%4u%4d", i, addr[0], addr[1], addr[2], 8627 addr[3], addr[4], addr[5], (uintmax_t)mask, 8628 (cls_lo & F_SRAM_VLD) ? 'Y' : 'N', 8629 G_PORTMAP(cls_hi), G_PF(cls_lo), 8630 (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1); 8631 8632 if (cls_lo & F_REPLICATE) { 8633 struct fw_ldst_cmd ldst_cmd; 8634 8635 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 8636 ldst_cmd.op_to_addrspace = 8637 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 8638 F_FW_CMD_REQUEST | F_FW_CMD_READ | 8639 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 8640 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 8641 ldst_cmd.u.mps.rplc.fid_idx = 8642 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 8643 V_FW_LDST_CMD_IDX(i)); 8644 8645 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 8646 "t4mps"); 8647 if (rc) 8648 break; 8649 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 8650 sizeof(ldst_cmd), &ldst_cmd); 8651 end_synchronized_op(sc, 0); 8652 8653 if (rc != 0) { 8654 sbuf_printf(sb, "%36d", rc); 8655 rc = 0; 8656 } else { 8657 sbuf_printf(sb, " %08x %08x %08x %08x", 8658 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 8659 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 8660 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 8661 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 8662 } 8663 } else 8664 sbuf_printf(sb, "%36s", ""); 8665 8666 sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo), 8667 G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo), 8668 G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf); 8669 } 8670 8671 if (rc) 8672 (void) sbuf_finish(sb); 8673 else 8674 rc = sbuf_finish(sb); 8675 sbuf_delete(sb); 8676 8677 return (rc); 8678} 8679 8680static int 8681sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS) 8682{ 8683 struct adapter *sc = arg1; 8684 struct sbuf *sb; 8685 int rc, i; 8686 8687 MPASS(chip_id(sc) > CHELSIO_T5); 8688 8689 rc = sysctl_wire_old_buffer(req, 0); 8690 if (rc != 0) 8691 return (rc); 8692 8693 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 8694 if (sb == NULL) 8695 return (ENOMEM); 8696 8697 sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask" 8698 " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF" 8699 " Replication" 8700 " P0 P1 P2 P3 ML\n"); 8701 8702 for (i = 0; i < sc->chip_params->mps_tcam_size; i++) { 8703 uint8_t dip_hit, vlan_vld, lookup_type, port_num; 8704 uint16_t ivlan; 8705 uint64_t tcamx, tcamy, val, mask; 8706 uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy; 8707 uint8_t addr[ETHER_ADDR_LEN]; 8708 8709 ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0); 8710 if (i < 256) 8711 ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0); 8712 else 8713 ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1); 8714 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 8715 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 8716 tcamy = G_DMACH(val) << 32; 8717 tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 8718 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 8719 lookup_type = G_DATALKPTYPE(data2); 8720 port_num = G_DATAPORTNUM(data2); 8721 if (lookup_type && lookup_type != M_DATALKPTYPE) { 8722 /* Inner header VNI */ 8723 vniy = ((data2 & F_DATAVIDH2) << 23) | 8724 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 8725 dip_hit = data2 & F_DATADIPHIT; 8726 vlan_vld = 0; 8727 } else { 8728 vniy = 0; 8729 dip_hit = 0; 8730 vlan_vld = data2 & F_DATAVIDH2; 8731 ivlan = G_VIDL(val); 8732 } 8733 8734 ctl |= V_CTLXYBITSEL(1); 8735 t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl); 8736 val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1); 8737 tcamx = G_DMACH(val) << 32; 8738 tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1); 8739 data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1); 8740 if (lookup_type && lookup_type != M_DATALKPTYPE) { 8741 /* Inner header VNI mask */ 8742 vnix = ((data2 & F_DATAVIDH2) << 23) | 8743 (G_DATAVIDH1(data2) << 16) | G_VIDL(val); 8744 } else 8745 vnix = 0; 8746 8747 if (tcamx & tcamy) 8748 continue; 8749 tcamxy2valmask(tcamx, tcamy, addr, &mask); 8750 8751 cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i)); 8752 cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i)); 8753 8754 if (lookup_type && lookup_type != M_DATALKPTYPE) { 8755 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 8756 "%012jx %06x %06x - - %3c" 8757 " 'I' %4x %3c %#x%4u%4d", i, addr[0], 8758 addr[1], addr[2], addr[3], addr[4], addr[5], 8759 (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N', 8760 port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 8761 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 8762 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 8763 } else { 8764 sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x " 8765 "%012jx - - ", i, addr[0], addr[1], 8766 addr[2], addr[3], addr[4], addr[5], 8767 (uintmax_t)mask); 8768 8769 if (vlan_vld) 8770 sbuf_printf(sb, "%4u Y ", ivlan); 8771 else 8772 sbuf_printf(sb, " - N "); 8773 8774 sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d", 8775 lookup_type ? 'I' : 'O', port_num, 8776 cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N', 8777 G_PORTMAP(cls_hi), G_T6_PF(cls_lo), 8778 cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1); 8779 } 8780 8781 8782 if (cls_lo & F_T6_REPLICATE) { 8783 struct fw_ldst_cmd ldst_cmd; 8784 8785 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 8786 ldst_cmd.op_to_addrspace = 8787 htobe32(V_FW_CMD_OP(FW_LDST_CMD) | 8788 F_FW_CMD_REQUEST | F_FW_CMD_READ | 8789 V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS)); 8790 ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd)); 8791 ldst_cmd.u.mps.rplc.fid_idx = 8792 htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) | 8793 V_FW_LDST_CMD_IDX(i)); 8794 8795 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, 8796 "t6mps"); 8797 if (rc) 8798 break; 8799 rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd, 8800 sizeof(ldst_cmd), &ldst_cmd); 8801 end_synchronized_op(sc, 0); 8802 8803 if (rc != 0) { 8804 sbuf_printf(sb, "%72d", rc); 8805 rc = 0; 8806 } else { 8807 sbuf_printf(sb, " %08x %08x %08x %08x" 8808 " %08x %08x %08x %08x", 8809 be32toh(ldst_cmd.u.mps.rplc.rplc255_224), 8810 be32toh(ldst_cmd.u.mps.rplc.rplc223_192), 8811 be32toh(ldst_cmd.u.mps.rplc.rplc191_160), 8812 be32toh(ldst_cmd.u.mps.rplc.rplc159_128), 8813 be32toh(ldst_cmd.u.mps.rplc.rplc127_96), 8814 be32toh(ldst_cmd.u.mps.rplc.rplc95_64), 8815 be32toh(ldst_cmd.u.mps.rplc.rplc63_32), 8816 be32toh(ldst_cmd.u.mps.rplc.rplc31_0)); 8817 } 8818 } else 8819 sbuf_printf(sb, "%72s", ""); 8820 8821 sbuf_printf(sb, "%4u%3u%3u%3u %#x", 8822 G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo), 8823 G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo), 8824 (cls_lo >> S_T6_MULTILISTEN0) & 0xf); 8825 } 8826 8827 if (rc) 8828 (void) sbuf_finish(sb); 8829 else 8830 rc = sbuf_finish(sb); 8831 sbuf_delete(sb); 8832 8833 return (rc); 8834} 8835 8836static int 8837sysctl_path_mtus(SYSCTL_HANDLER_ARGS) 8838{ 8839 struct adapter *sc = arg1; 8840 struct sbuf *sb; 8841 int rc; 8842 uint16_t mtus[NMTUS]; 8843 8844 rc = sysctl_wire_old_buffer(req, 0); 8845 if (rc != 0) 8846 return (rc); 8847 8848 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8849 if (sb == NULL) 8850 return (ENOMEM); 8851 8852 t4_read_mtu_tbl(sc, mtus, NULL); 8853 8854 sbuf_printf(sb, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", 8855 mtus[0], mtus[1], mtus[2], mtus[3], mtus[4], mtus[5], mtus[6], 8856 mtus[7], mtus[8], mtus[9], mtus[10], mtus[11], mtus[12], mtus[13], 8857 mtus[14], mtus[15]); 8858 8859 rc = sbuf_finish(sb); 8860 sbuf_delete(sb); 8861 8862 return (rc); 8863} 8864 8865static int 8866sysctl_pm_stats(SYSCTL_HANDLER_ARGS) 8867{ 8868 struct adapter *sc = arg1; 8869 struct sbuf *sb; 8870 int rc, i; 8871 uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS]; 8872 uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS]; 8873 static const char *tx_stats[MAX_PM_NSTATS] = { 8874 "Read:", "Write bypass:", "Write mem:", "Bypass + mem:", 8875 "Tx FIFO wait", NULL, "Tx latency" 8876 }; 8877 static const char *rx_stats[MAX_PM_NSTATS] = { 8878 "Read:", "Write bypass:", "Write mem:", "Flush:", 8879 "Rx FIFO wait", NULL, "Rx latency" 8880 }; 8881 8882 rc = sysctl_wire_old_buffer(req, 0); 8883 if (rc != 0) 8884 return (rc); 8885 8886 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8887 if (sb == NULL) 8888 return (ENOMEM); 8889 8890 t4_pmtx_get_stats(sc, tx_cnt, tx_cyc); 8891 t4_pmrx_get_stats(sc, rx_cnt, rx_cyc); 8892 8893 sbuf_printf(sb, " Tx pcmds Tx bytes"); 8894 for (i = 0; i < 4; i++) { 8895 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 8896 tx_cyc[i]); 8897 } 8898 8899 sbuf_printf(sb, "\n Rx pcmds Rx bytes"); 8900 for (i = 0; i < 4; i++) { 8901 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 8902 rx_cyc[i]); 8903 } 8904 8905 if (chip_id(sc) > CHELSIO_T5) { 8906 sbuf_printf(sb, 8907 "\n Total wait Total occupancy"); 8908 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 8909 tx_cyc[i]); 8910 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 8911 rx_cyc[i]); 8912 8913 i += 2; 8914 MPASS(i < nitems(tx_stats)); 8915 8916 sbuf_printf(sb, 8917 "\n Reads Total wait"); 8918 sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i], 8919 tx_cyc[i]); 8920 sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i], 8921 rx_cyc[i]); 8922 } 8923 8924 rc = sbuf_finish(sb); 8925 sbuf_delete(sb); 8926 8927 return (rc); 8928} 8929 8930static int 8931sysctl_rdma_stats(SYSCTL_HANDLER_ARGS) 8932{ 8933 struct adapter *sc = arg1; 8934 struct sbuf *sb; 8935 int rc; 8936 struct tp_rdma_stats stats; 8937 8938 rc = sysctl_wire_old_buffer(req, 0); 8939 if (rc != 0) 8940 return (rc); 8941 8942 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8943 if (sb == NULL) 8944 return (ENOMEM); 8945 8946 mtx_lock(&sc->reg_lock); 8947 t4_tp_get_rdma_stats(sc, &stats, 0); 8948 mtx_unlock(&sc->reg_lock); 8949 8950 sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod); 8951 sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt); 8952 8953 rc = sbuf_finish(sb); 8954 sbuf_delete(sb); 8955 8956 return (rc); 8957} 8958 8959static int 8960sysctl_tcp_stats(SYSCTL_HANDLER_ARGS) 8961{ 8962 struct adapter *sc = arg1; 8963 struct sbuf *sb; 8964 int rc; 8965 struct tp_tcp_stats v4, v6; 8966 8967 rc = sysctl_wire_old_buffer(req, 0); 8968 if (rc != 0) 8969 return (rc); 8970 8971 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 8972 if (sb == NULL) 8973 return (ENOMEM); 8974 8975 mtx_lock(&sc->reg_lock); 8976 t4_tp_get_tcp_stats(sc, &v4, &v6, 0); 8977 mtx_unlock(&sc->reg_lock); 8978 8979 sbuf_printf(sb, 8980 " IP IPv6\n"); 8981 sbuf_printf(sb, "OutRsts: %20u %20u\n", 8982 v4.tcp_out_rsts, v6.tcp_out_rsts); 8983 sbuf_printf(sb, "InSegs: %20ju %20ju\n", 8984 v4.tcp_in_segs, v6.tcp_in_segs); 8985 sbuf_printf(sb, "OutSegs: %20ju %20ju\n", 8986 v4.tcp_out_segs, v6.tcp_out_segs); 8987 sbuf_printf(sb, "RetransSegs: %20ju %20ju", 8988 v4.tcp_retrans_segs, v6.tcp_retrans_segs); 8989 8990 rc = sbuf_finish(sb); 8991 sbuf_delete(sb); 8992 8993 return (rc); 8994} 8995 8996static int 8997sysctl_tids(SYSCTL_HANDLER_ARGS) 8998{ 8999 struct adapter *sc = arg1; 9000 struct sbuf *sb; 9001 int rc; 9002 struct tid_info *t = &sc->tids; 9003 9004 rc = sysctl_wire_old_buffer(req, 0); 9005 if (rc != 0) 9006 return (rc); 9007 9008 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 9009 if (sb == NULL) 9010 return (ENOMEM); 9011 9012 if (t->natids) { 9013 sbuf_printf(sb, "ATID range: 0-%u, in use: %u\n", t->natids - 1, 9014 t->atids_in_use); 9015 } 9016 9017 if (t->nhpftids) { 9018 sbuf_printf(sb, "HPFTID range: %u-%u, in use: %u\n", 9019 t->hpftid_base, t->hpftid_end, t->hpftids_in_use); 9020 } 9021 9022 if (t->ntids) { 9023 sbuf_printf(sb, "TID range: "); 9024 if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) { 9025 uint32_t b, hb; 9026 9027 if (chip_id(sc) <= CHELSIO_T5) { 9028 b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4; 9029 hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4; 9030 } else { 9031 b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX); 9032 hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE); 9033 } 9034 9035 if (b) 9036 sbuf_printf(sb, "%u-%u, ", t->tid_base, b - 1); 9037 sbuf_printf(sb, "%u-%u", hb, t->ntids - 1); 9038 } else { 9039 sbuf_printf(sb, "%u-%u", t->tid_base, t->tid_base + 9040 t->ntids - 1); 9041 } 9042 sbuf_printf(sb, ", in use: %u\n", 9043 atomic_load_acq_int(&t->tids_in_use)); 9044 } 9045 9046 if (t->nstids) { 9047 sbuf_printf(sb, "STID range: %u-%u, in use: %u\n", t->stid_base, 9048 t->stid_base + t->nstids - 1, t->stids_in_use); 9049 } 9050 9051 if (t->nftids) { 9052 sbuf_printf(sb, "FTID range: %u-%u, in use: %u\n", t->ftid_base, 9053 t->ftid_end, t->ftids_in_use); 9054 } 9055 9056 if (t->netids) { 9057 sbuf_printf(sb, "ETID range: %u-%u, in use: %u\n", t->etid_base, 9058 t->etid_base + t->netids - 1, t->etids_in_use); 9059 } 9060 9061 sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users", 9062 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4), 9063 t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6)); 9064 9065 rc = sbuf_finish(sb); 9066 sbuf_delete(sb); 9067 9068 return (rc); 9069} 9070 9071static int 9072sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS) 9073{ 9074 struct adapter *sc = arg1; 9075 struct sbuf *sb; 9076 int rc; 9077 struct tp_err_stats stats; 9078 9079 rc = sysctl_wire_old_buffer(req, 0); 9080 if (rc != 0) 9081 return (rc); 9082 9083 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 9084 if (sb == NULL) 9085 return (ENOMEM); 9086 9087 mtx_lock(&sc->reg_lock); 9088 t4_tp_get_err_stats(sc, &stats, 0); 9089 mtx_unlock(&sc->reg_lock); 9090 9091 if (sc->chip_params->nchan > 2) { 9092 sbuf_printf(sb, " channel 0 channel 1" 9093 " channel 2 channel 3\n"); 9094 sbuf_printf(sb, "macInErrs: %10u %10u %10u %10u\n", 9095 stats.mac_in_errs[0], stats.mac_in_errs[1], 9096 stats.mac_in_errs[2], stats.mac_in_errs[3]); 9097 sbuf_printf(sb, "hdrInErrs: %10u %10u %10u %10u\n", 9098 stats.hdr_in_errs[0], stats.hdr_in_errs[1], 9099 stats.hdr_in_errs[2], stats.hdr_in_errs[3]); 9100 sbuf_printf(sb, "tcpInErrs: %10u %10u %10u %10u\n", 9101 stats.tcp_in_errs[0], stats.tcp_in_errs[1], 9102 stats.tcp_in_errs[2], stats.tcp_in_errs[3]); 9103 sbuf_printf(sb, "tcp6InErrs: %10u %10u %10u %10u\n", 9104 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1], 9105 stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]); 9106 sbuf_printf(sb, "tnlCongDrops: %10u %10u %10u %10u\n", 9107 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1], 9108 stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]); 9109 sbuf_printf(sb, "tnlTxDrops: %10u %10u %10u %10u\n", 9110 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1], 9111 stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]); 9112 sbuf_printf(sb, "ofldVlanDrops: %10u %10u %10u %10u\n", 9113 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1], 9114 stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]); 9115 sbuf_printf(sb, "ofldChanDrops: %10u %10u %10u %10u\n\n", 9116 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1], 9117 stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]); 9118 } else { 9119 sbuf_printf(sb, " channel 0 channel 1\n"); 9120 sbuf_printf(sb, "macInErrs: %10u %10u\n", 9121 stats.mac_in_errs[0], stats.mac_in_errs[1]); 9122 sbuf_printf(sb, "hdrInErrs: %10u %10u\n", 9123 stats.hdr_in_errs[0], stats.hdr_in_errs[1]); 9124 sbuf_printf(sb, "tcpInErrs: %10u %10u\n", 9125 stats.tcp_in_errs[0], stats.tcp_in_errs[1]); 9126 sbuf_printf(sb, "tcp6InErrs: %10u %10u\n", 9127 stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]); 9128 sbuf_printf(sb, "tnlCongDrops: %10u %10u\n", 9129 stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]); 9130 sbuf_printf(sb, "tnlTxDrops: %10u %10u\n", 9131 stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]); 9132 sbuf_printf(sb, "ofldVlanDrops: %10u %10u\n", 9133 stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]); 9134 sbuf_printf(sb, "ofldChanDrops: %10u %10u\n\n", 9135 stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]); 9136 } 9137 9138 sbuf_printf(sb, "ofldNoNeigh: %u\nofldCongDefer: %u", 9139 stats.ofld_no_neigh, stats.ofld_cong_defer); 9140 9141 rc = sbuf_finish(sb); 9142 sbuf_delete(sb); 9143 9144 return (rc); 9145} 9146 9147static int 9148sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS) 9149{ 9150 struct adapter *sc = arg1; 9151 struct tp_params *tpp = &sc->params.tp; 9152 u_int mask; 9153 int rc; 9154 9155 mask = tpp->la_mask >> 16; 9156 rc = sysctl_handle_int(oidp, &mask, 0, req); 9157 if (rc != 0 || req->newptr == NULL) 9158 return (rc); 9159 if (mask > 0xffff) 9160 return (EINVAL); 9161 tpp->la_mask = mask << 16; 9162 t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask); 9163 9164 return (0); 9165} 9166 9167struct field_desc { 9168 const char *name; 9169 u_int start; 9170 u_int width; 9171}; 9172 9173static void 9174field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f) 9175{ 9176 char buf[32]; 9177 int line_size = 0; 9178 9179 while (f->name) { 9180 uint64_t mask = (1ULL << f->width) - 1; 9181 int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name, 9182 ((uintmax_t)v >> f->start) & mask); 9183 9184 if (line_size + len >= 79) { 9185 line_size = 8; 9186 sbuf_printf(sb, "\n "); 9187 } 9188 sbuf_printf(sb, "%s ", buf); 9189 line_size += len + 1; 9190 f++; 9191 } 9192 sbuf_printf(sb, "\n"); 9193} 9194 9195static const struct field_desc tp_la0[] = { 9196 { "RcfOpCodeOut", 60, 4 }, 9197 { "State", 56, 4 }, 9198 { "WcfState", 52, 4 }, 9199 { "RcfOpcSrcOut", 50, 2 }, 9200 { "CRxError", 49, 1 }, 9201 { "ERxError", 48, 1 }, 9202 { "SanityFailed", 47, 1 }, 9203 { "SpuriousMsg", 46, 1 }, 9204 { "FlushInputMsg", 45, 1 }, 9205 { "FlushInputCpl", 44, 1 }, 9206 { "RssUpBit", 43, 1 }, 9207 { "RssFilterHit", 42, 1 }, 9208 { "Tid", 32, 10 }, 9209 { "InitTcb", 31, 1 }, 9210 { "LineNumber", 24, 7 }, 9211 { "Emsg", 23, 1 }, 9212 { "EdataOut", 22, 1 }, 9213 { "Cmsg", 21, 1 }, 9214 { "CdataOut", 20, 1 }, 9215 { "EreadPdu", 19, 1 }, 9216 { "CreadPdu", 18, 1 }, 9217 { "TunnelPkt", 17, 1 }, 9218 { "RcfPeerFin", 16, 1 }, 9219 { "RcfReasonOut", 12, 4 }, 9220 { "TxCchannel", 10, 2 }, 9221 { "RcfTxChannel", 8, 2 }, 9222 { "RxEchannel", 6, 2 }, 9223 { "RcfRxChannel", 5, 1 }, 9224 { "RcfDataOutSrdy", 4, 1 }, 9225 { "RxDvld", 3, 1 }, 9226 { "RxOoDvld", 2, 1 }, 9227 { "RxCongestion", 1, 1 }, 9228 { "TxCongestion", 0, 1 }, 9229 { NULL } 9230}; 9231 9232static const struct field_desc tp_la1[] = { 9233 { "CplCmdIn", 56, 8 }, 9234 { "CplCmdOut", 48, 8 }, 9235 { "ESynOut", 47, 1 }, 9236 { "EAckOut", 46, 1 }, 9237 { "EFinOut", 45, 1 }, 9238 { "ERstOut", 44, 1 }, 9239 { "SynIn", 43, 1 }, 9240 { "AckIn", 42, 1 }, 9241 { "FinIn", 41, 1 }, 9242 { "RstIn", 40, 1 }, 9243 { "DataIn", 39, 1 }, 9244 { "DataInVld", 38, 1 }, 9245 { "PadIn", 37, 1 }, 9246 { "RxBufEmpty", 36, 1 }, 9247 { "RxDdp", 35, 1 }, 9248 { "RxFbCongestion", 34, 1 }, 9249 { "TxFbCongestion", 33, 1 }, 9250 { "TxPktSumSrdy", 32, 1 }, 9251 { "RcfUlpType", 28, 4 }, 9252 { "Eread", 27, 1 }, 9253 { "Ebypass", 26, 1 }, 9254 { "Esave", 25, 1 }, 9255 { "Static0", 24, 1 }, 9256 { "Cread", 23, 1 }, 9257 { "Cbypass", 22, 1 }, 9258 { "Csave", 21, 1 }, 9259 { "CPktOut", 20, 1 }, 9260 { "RxPagePoolFull", 18, 2 }, 9261 { "RxLpbkPkt", 17, 1 }, 9262 { "TxLpbkPkt", 16, 1 }, 9263 { "RxVfValid", 15, 1 }, 9264 { "SynLearned", 14, 1 }, 9265 { "SetDelEntry", 13, 1 }, 9266 { "SetInvEntry", 12, 1 }, 9267 { "CpcmdDvld", 11, 1 }, 9268 { "CpcmdSave", 10, 1 }, 9269 { "RxPstructsFull", 8, 2 }, 9270 { "EpcmdDvld", 7, 1 }, 9271 { "EpcmdFlush", 6, 1 }, 9272 { "EpcmdTrimPrefix", 5, 1 }, 9273 { "EpcmdTrimPostfix", 4, 1 }, 9274 { "ERssIp4Pkt", 3, 1 }, 9275 { "ERssIp6Pkt", 2, 1 }, 9276 { "ERssTcpUdpPkt", 1, 1 }, 9277 { "ERssFceFipPkt", 0, 1 }, 9278 { NULL } 9279}; 9280 9281static const struct field_desc tp_la2[] = { 9282 { "CplCmdIn", 56, 8 }, 9283 { "MpsVfVld", 55, 1 }, 9284 { "MpsPf", 52, 3 }, 9285 { "MpsVf", 44, 8 }, 9286 { "SynIn", 43, 1 }, 9287 { "AckIn", 42, 1 }, 9288 { "FinIn", 41, 1 }, 9289 { "RstIn", 40, 1 }, 9290 { "DataIn", 39, 1 }, 9291 { "DataInVld", 38, 1 }, 9292 { "PadIn", 37, 1 }, 9293 { "RxBufEmpty", 36, 1 }, 9294 { "RxDdp", 35, 1 }, 9295 { "RxFbCongestion", 34, 1 }, 9296 { "TxFbCongestion", 33, 1 }, 9297 { "TxPktSumSrdy", 32, 1 }, 9298 { "RcfUlpType", 28, 4 }, 9299 { "Eread", 27, 1 }, 9300 { "Ebypass", 26, 1 }, 9301 { "Esave", 25, 1 }, 9302 { "Static0", 24, 1 }, 9303 { "Cread", 23, 1 }, 9304 { "Cbypass", 22, 1 }, 9305 { "Csave", 21, 1 }, 9306 { "CPktOut", 20, 1 }, 9307 { "RxPagePoolFull", 18, 2 }, 9308 { "RxLpbkPkt", 17, 1 }, 9309 { "TxLpbkPkt", 16, 1 }, 9310 { "RxVfValid", 15, 1 }, 9311 { "SynLearned", 14, 1 }, 9312 { "SetDelEntry", 13, 1 }, 9313 { "SetInvEntry", 12, 1 }, 9314 { "CpcmdDvld", 11, 1 }, 9315 { "CpcmdSave", 10, 1 }, 9316 { "RxPstructsFull", 8, 2 }, 9317 { "EpcmdDvld", 7, 1 }, 9318 { "EpcmdFlush", 6, 1 }, 9319 { "EpcmdTrimPrefix", 5, 1 }, 9320 { "EpcmdTrimPostfix", 4, 1 }, 9321 { "ERssIp4Pkt", 3, 1 }, 9322 { "ERssIp6Pkt", 2, 1 }, 9323 { "ERssTcpUdpPkt", 1, 1 }, 9324 { "ERssFceFipPkt", 0, 1 }, 9325 { NULL } 9326}; 9327 9328static void 9329tp_la_show(struct sbuf *sb, uint64_t *p, int idx) 9330{ 9331 9332 field_desc_show(sb, *p, tp_la0); 9333} 9334 9335static void 9336tp_la_show2(struct sbuf *sb, uint64_t *p, int idx) 9337{ 9338 9339 if (idx) 9340 sbuf_printf(sb, "\n"); 9341 field_desc_show(sb, p[0], tp_la0); 9342 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 9343 field_desc_show(sb, p[1], tp_la0); 9344} 9345 9346static void 9347tp_la_show3(struct sbuf *sb, uint64_t *p, int idx) 9348{ 9349 9350 if (idx) 9351 sbuf_printf(sb, "\n"); 9352 field_desc_show(sb, p[0], tp_la0); 9353 if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL) 9354 field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1); 9355} 9356 9357static int 9358sysctl_tp_la(SYSCTL_HANDLER_ARGS) 9359{ 9360 struct adapter *sc = arg1; 9361 struct sbuf *sb; 9362 uint64_t *buf, *p; 9363 int rc; 9364 u_int i, inc; 9365 void (*show_func)(struct sbuf *, uint64_t *, int); 9366 9367 rc = sysctl_wire_old_buffer(req, 0); 9368 if (rc != 0) 9369 return (rc); 9370 9371 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 9372 if (sb == NULL) 9373 return (ENOMEM); 9374 9375 buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK); 9376 9377 t4_tp_read_la(sc, buf, NULL); 9378 p = buf; 9379 9380 switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) { 9381 case 2: 9382 inc = 2; 9383 show_func = tp_la_show2; 9384 break; 9385 case 3: 9386 inc = 2; 9387 show_func = tp_la_show3; 9388 break; 9389 default: 9390 inc = 1; 9391 show_func = tp_la_show; 9392 } 9393 9394 for (i = 0; i < TPLA_SIZE / inc; i++, p += inc) 9395 (*show_func)(sb, p, i); 9396 9397 rc = sbuf_finish(sb); 9398 sbuf_delete(sb); 9399 free(buf, M_CXGBE); 9400 return (rc); 9401} 9402 9403static int 9404sysctl_tx_rate(SYSCTL_HANDLER_ARGS) 9405{ 9406 struct adapter *sc = arg1; 9407 struct sbuf *sb; 9408 int rc; 9409 u64 nrate[MAX_NCHAN], orate[MAX_NCHAN]; 9410 9411 rc = sysctl_wire_old_buffer(req, 0); 9412 if (rc != 0) 9413 return (rc); 9414 9415 sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); 9416 if (sb == NULL) 9417 return (ENOMEM); 9418 9419 t4_get_chan_txrate(sc, nrate, orate); 9420 9421 if (sc->chip_params->nchan > 2) { 9422 sbuf_printf(sb, " channel 0 channel 1" 9423 " channel 2 channel 3\n"); 9424 sbuf_printf(sb, "NIC B/s: %10ju %10ju %10ju %10ju\n", 9425 nrate[0], nrate[1], nrate[2], nrate[3]); 9426 sbuf_printf(sb, "Offload B/s: %10ju %10ju %10ju %10ju", 9427 orate[0], orate[1], orate[2], orate[3]); 9428 } else { 9429 sbuf_printf(sb, " channel 0 channel 1\n"); 9430 sbuf_printf(sb, "NIC B/s: %10ju %10ju\n", 9431 nrate[0], nrate[1]); 9432 sbuf_printf(sb, "Offload B/s: %10ju %10ju", 9433 orate[0], orate[1]); 9434 } 9435 9436 rc = sbuf_finish(sb); 9437 sbuf_delete(sb); 9438 9439 return (rc); 9440} 9441 9442static int 9443sysctl_ulprx_la(SYSCTL_HANDLER_ARGS) 9444{ 9445 struct adapter *sc = arg1; 9446 struct sbuf *sb; 9447 uint32_t *buf, *p; 9448 int rc, i; 9449 9450 rc = sysctl_wire_old_buffer(req, 0); 9451 if (rc != 0) 9452 return (rc); 9453 9454 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 9455 if (sb == NULL) 9456 return (ENOMEM); 9457 9458 buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE, 9459 M_ZERO | M_WAITOK); 9460 9461 t4_ulprx_read_la(sc, buf); 9462 p = buf; 9463 9464 sbuf_printf(sb, " Pcmd Type Message" 9465 " Data"); 9466 for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) { 9467 sbuf_printf(sb, "\n%08x%08x %4x %08x %08x%08x%08x%08x", 9468 p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]); 9469 } 9470 9471 rc = sbuf_finish(sb); 9472 sbuf_delete(sb); 9473 free(buf, M_CXGBE); 9474 return (rc); 9475} 9476 9477static int 9478sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS) 9479{ 9480 struct adapter *sc = arg1; 9481 struct sbuf *sb; 9482 int rc, v; 9483 9484 MPASS(chip_id(sc) >= CHELSIO_T5); 9485 9486 rc = sysctl_wire_old_buffer(req, 0); 9487 if (rc != 0) 9488 return (rc); 9489 9490 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 9491 if (sb == NULL) 9492 return (ENOMEM); 9493 9494 v = t4_read_reg(sc, A_SGE_STAT_CFG); 9495 if (G_STATSOURCE_T5(v) == 7) { 9496 int mode; 9497 9498 mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v); 9499 if (mode == 0) { 9500 sbuf_printf(sb, "total %d, incomplete %d", 9501 t4_read_reg(sc, A_SGE_STAT_TOTAL), 9502 t4_read_reg(sc, A_SGE_STAT_MATCH)); 9503 } else if (mode == 1) { 9504 sbuf_printf(sb, "total %d, data overflow %d", 9505 t4_read_reg(sc, A_SGE_STAT_TOTAL), 9506 t4_read_reg(sc, A_SGE_STAT_MATCH)); 9507 } else { 9508 sbuf_printf(sb, "unknown mode %d", mode); 9509 } 9510 } 9511 rc = sbuf_finish(sb); 9512 sbuf_delete(sb); 9513 9514 return (rc); 9515} 9516 9517static int 9518sysctl_cpus(SYSCTL_HANDLER_ARGS) 9519{ 9520 struct adapter *sc = arg1; 9521 enum cpu_sets op = arg2; 9522 cpuset_t cpuset; 9523 struct sbuf *sb; 9524 int i, rc; 9525 9526 MPASS(op == LOCAL_CPUS || op == INTR_CPUS); 9527 9528 CPU_ZERO(&cpuset); 9529 rc = bus_get_cpus(sc->dev, op, sizeof(cpuset), &cpuset); 9530 if (rc != 0) 9531 return (rc); 9532 9533 rc = sysctl_wire_old_buffer(req, 0); 9534 if (rc != 0) 9535 return (rc); 9536 9537 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req); 9538 if (sb == NULL) 9539 return (ENOMEM); 9540 9541 CPU_FOREACH(i) 9542 sbuf_printf(sb, "%d ", i); 9543 rc = sbuf_finish(sb); 9544 sbuf_delete(sb); 9545 9546 return (rc); 9547} 9548 9549#ifdef TCP_OFFLOAD 9550static int 9551sysctl_tls(SYSCTL_HANDLER_ARGS) 9552{ 9553 struct adapter *sc = arg1; 9554 int i, j, v, rc; 9555 struct vi_info *vi; 9556 9557 v = sc->tt.tls; 9558 rc = sysctl_handle_int(oidp, &v, 0, req); 9559 if (rc != 0 || req->newptr == NULL) 9560 return (rc); 9561 9562 if (v != 0 && !(sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS)) 9563 return (ENOTSUP); 9564 9565 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4stls"); 9566 if (rc) 9567 return (rc); 9568 sc->tt.tls = !!v; 9569 for_each_port(sc, i) { 9570 for_each_vi(sc->port[i], j, vi) { 9571 if (vi->flags & VI_INIT_DONE) 9572 t4_update_fl_bufsize(vi->ifp); 9573 } 9574 } 9575 end_synchronized_op(sc, 0); 9576 9577 return (0); 9578 9579} 9580 9581static int 9582sysctl_tls_rx_ports(SYSCTL_HANDLER_ARGS) 9583{ 9584 struct adapter *sc = arg1; 9585 int *old_ports, *new_ports; 9586 int i, new_count, rc; 9587 9588 if (req->newptr == NULL && req->oldptr == NULL) 9589 return (SYSCTL_OUT(req, NULL, imax(sc->tt.num_tls_rx_ports, 1) * 9590 sizeof(sc->tt.tls_rx_ports[0]))); 9591 9592 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tlsrx"); 9593 if (rc) 9594 return (rc); 9595 9596 if (sc->tt.num_tls_rx_ports == 0) { 9597 i = -1; 9598 rc = SYSCTL_OUT(req, &i, sizeof(i)); 9599 } else 9600 rc = SYSCTL_OUT(req, sc->tt.tls_rx_ports, 9601 sc->tt.num_tls_rx_ports * sizeof(sc->tt.tls_rx_ports[0])); 9602 if (rc == 0 && req->newptr != NULL) { 9603 new_count = req->newlen / sizeof(new_ports[0]); 9604 new_ports = malloc(new_count * sizeof(new_ports[0]), M_CXGBE, 9605 M_WAITOK); 9606 rc = SYSCTL_IN(req, new_ports, new_count * 9607 sizeof(new_ports[0])); 9608 if (rc) 9609 goto err; 9610 9611 /* Allow setting to a single '-1' to clear the list. */ 9612 if (new_count == 1 && new_ports[0] == -1) { 9613 ADAPTER_LOCK(sc); 9614 old_ports = sc->tt.tls_rx_ports; 9615 sc->tt.tls_rx_ports = NULL; 9616 sc->tt.num_tls_rx_ports = 0; 9617 ADAPTER_UNLOCK(sc); 9618 free(old_ports, M_CXGBE); 9619 } else { 9620 for (i = 0; i < new_count; i++) { 9621 if (new_ports[i] < 1 || 9622 new_ports[i] > IPPORT_MAX) { 9623 rc = EINVAL; 9624 goto err; 9625 } 9626 } 9627 9628 ADAPTER_LOCK(sc); 9629 old_ports = sc->tt.tls_rx_ports; 9630 sc->tt.tls_rx_ports = new_ports; 9631 sc->tt.num_tls_rx_ports = new_count; 9632 ADAPTER_UNLOCK(sc); 9633 free(old_ports, M_CXGBE); 9634 new_ports = NULL; 9635 } 9636 err: 9637 free(new_ports, M_CXGBE); 9638 } 9639 end_synchronized_op(sc, 0); 9640 return (rc); 9641} 9642 9643static void 9644unit_conv(char *buf, size_t len, u_int val, u_int factor) 9645{ 9646 u_int rem = val % factor; 9647 9648 if (rem == 0) 9649 snprintf(buf, len, "%u", val / factor); 9650 else { 9651 while (rem % 10 == 0) 9652 rem /= 10; 9653 snprintf(buf, len, "%u.%u", val / factor, rem); 9654 } 9655} 9656 9657static int 9658sysctl_tp_tick(SYSCTL_HANDLER_ARGS) 9659{ 9660 struct adapter *sc = arg1; 9661 char buf[16]; 9662 u_int res, re; 9663 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 9664 9665 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 9666 switch (arg2) { 9667 case 0: 9668 /* timer_tick */ 9669 re = G_TIMERRESOLUTION(res); 9670 break; 9671 case 1: 9672 /* TCP timestamp tick */ 9673 re = G_TIMESTAMPRESOLUTION(res); 9674 break; 9675 case 2: 9676 /* DACK tick */ 9677 re = G_DELAYEDACKRESOLUTION(res); 9678 break; 9679 default: 9680 return (EDOOFUS); 9681 } 9682 9683 unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000); 9684 9685 return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); 9686} 9687 9688static int 9689sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS) 9690{ 9691 struct adapter *sc = arg1; 9692 u_int res, dack_re, v; 9693 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 9694 9695 res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 9696 dack_re = G_DELAYEDACKRESOLUTION(res); 9697 v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER); 9698 9699 return (sysctl_handle_int(oidp, &v, 0, req)); 9700} 9701 9702static int 9703sysctl_tp_timer(SYSCTL_HANDLER_ARGS) 9704{ 9705 struct adapter *sc = arg1; 9706 int reg = arg2; 9707 u_int tre; 9708 u_long tp_tick_us, v; 9709 u_int cclk_ps = 1000000000 / sc->params.vpd.cclk; 9710 9711 MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX || 9712 reg == A_TP_PERS_MIN || reg == A_TP_PERS_MAX || 9713 reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL || 9714 reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER); 9715 9716 tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION)); 9717 tp_tick_us = (cclk_ps << tre) / 1000000; 9718 9719 if (reg == A_TP_INIT_SRTT) 9720 v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg)); 9721 else 9722 v = tp_tick_us * t4_read_reg(sc, reg); 9723 9724 return (sysctl_handle_long(oidp, &v, 0, req)); 9725} 9726 9727/* 9728 * All fields in TP_SHIFT_CNT are 4b and the starting location of the field is 9729 * passed to this function. 9730 */ 9731static int 9732sysctl_tp_shift_cnt(SYSCTL_HANDLER_ARGS) 9733{ 9734 struct adapter *sc = arg1; 9735 int idx = arg2; 9736 u_int v; 9737 9738 MPASS(idx >= 0 && idx <= 24); 9739 9740 v = (t4_read_reg(sc, A_TP_SHIFT_CNT) >> idx) & 0xf; 9741 9742 return (sysctl_handle_int(oidp, &v, 0, req)); 9743} 9744 9745static int 9746sysctl_tp_backoff(SYSCTL_HANDLER_ARGS) 9747{ 9748 struct adapter *sc = arg1; 9749 int idx = arg2; 9750 u_int shift, v, r; 9751 9752 MPASS(idx >= 0 && idx < 16); 9753 9754 r = A_TP_TCP_BACKOFF_REG0 + (idx & ~3); 9755 shift = (idx & 3) << 3; 9756 v = (t4_read_reg(sc, r) >> shift) & M_TIMERBACKOFFINDEX0; 9757 9758 return (sysctl_handle_int(oidp, &v, 0, req)); 9759} 9760 9761static int 9762sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS) 9763{ 9764 struct vi_info *vi = arg1; 9765 struct adapter *sc = vi->adapter; 9766 int idx, rc, i; 9767 struct sge_ofld_rxq *ofld_rxq; 9768 uint8_t v; 9769 9770 idx = vi->ofld_tmr_idx; 9771 9772 rc = sysctl_handle_int(oidp, &idx, 0, req); 9773 if (rc != 0 || req->newptr == NULL) 9774 return (rc); 9775 9776 if (idx < 0 || idx >= SGE_NTIMERS) 9777 return (EINVAL); 9778 9779 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 9780 "t4otmr"); 9781 if (rc) 9782 return (rc); 9783 9784 v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->ofld_pktc_idx != -1); 9785 for_each_ofld_rxq(vi, i, ofld_rxq) { 9786#ifdef atomic_store_rel_8 9787 atomic_store_rel_8(&ofld_rxq->iq.intr_params, v); 9788#else 9789 ofld_rxq->iq.intr_params = v; 9790#endif 9791 } 9792 vi->ofld_tmr_idx = idx; 9793 9794 end_synchronized_op(sc, LOCK_HELD); 9795 return (0); 9796} 9797 9798static int 9799sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS) 9800{ 9801 struct vi_info *vi = arg1; 9802 struct adapter *sc = vi->adapter; 9803 int idx, rc; 9804 9805 idx = vi->ofld_pktc_idx; 9806 9807 rc = sysctl_handle_int(oidp, &idx, 0, req); 9808 if (rc != 0 || req->newptr == NULL) 9809 return (rc); 9810 9811 if (idx < -1 || idx >= SGE_NCOUNTERS) 9812 return (EINVAL); 9813 9814 rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK, 9815 "t4opktc"); 9816 if (rc) 9817 return (rc); 9818 9819 if (vi->flags & VI_INIT_DONE) 9820 rc = EBUSY; /* cannot be changed once the queues are created */ 9821 else 9822 vi->ofld_pktc_idx = idx; 9823 9824 end_synchronized_op(sc, LOCK_HELD); 9825 return (rc); 9826} 9827#endif 9828 9829static int 9830get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt) 9831{ 9832 int rc; 9833 9834 if (cntxt->cid > M_CTXTQID) 9835 return (EINVAL); 9836 9837 if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS && 9838 cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM) 9839 return (EINVAL); 9840 9841 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt"); 9842 if (rc) 9843 return (rc); 9844 9845 if (sc->flags & FW_OK) { 9846 rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id, 9847 &cntxt->data[0]); 9848 if (rc == 0) 9849 goto done; 9850 } 9851 9852 /* 9853 * Read via firmware failed or wasn't even attempted. Read directly via 9854 * the backdoor. 9855 */ 9856 rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]); 9857done: 9858 end_synchronized_op(sc, 0); 9859 return (rc); 9860} 9861 9862static int 9863load_fw(struct adapter *sc, struct t4_data *fw) 9864{ 9865 int rc; 9866 uint8_t *fw_data; 9867 9868 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldfw"); 9869 if (rc) 9870 return (rc); 9871 9872 /* 9873 * The firmware, with the sole exception of the memory parity error 9874 * handler, runs from memory and not flash. It is almost always safe to 9875 * install a new firmware on a running system. Just set bit 1 in 9876 * hw.cxgbe.dflags or dev.<nexus>.<n>.dflags first. 9877 */ 9878 if (sc->flags & FULL_INIT_DONE && 9879 (sc->debug_flags & DF_LOAD_FW_ANYTIME) == 0) { 9880 rc = EBUSY; 9881 goto done; 9882 } 9883 9884 fw_data = malloc(fw->len, M_CXGBE, M_WAITOK); 9885 9886 rc = copyin(fw->data, fw_data, fw->len); 9887 if (rc == 0) 9888 rc = -t4_load_fw(sc, fw_data, fw->len); 9889 9890 free(fw_data, M_CXGBE); 9891done: 9892 end_synchronized_op(sc, 0); 9893 return (rc); 9894} 9895 9896static int 9897load_cfg(struct adapter *sc, struct t4_data *cfg) 9898{ 9899 int rc; 9900 uint8_t *cfg_data = NULL; 9901 9902 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 9903 if (rc) 9904 return (rc); 9905 9906 if (cfg->len == 0) { 9907 /* clear */ 9908 rc = -t4_load_cfg(sc, NULL, 0); 9909 goto done; 9910 } 9911 9912 cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK); 9913 9914 rc = copyin(cfg->data, cfg_data, cfg->len); 9915 if (rc == 0) 9916 rc = -t4_load_cfg(sc, cfg_data, cfg->len); 9917 9918 free(cfg_data, M_CXGBE); 9919done: 9920 end_synchronized_op(sc, 0); 9921 return (rc); 9922} 9923 9924static int 9925load_boot(struct adapter *sc, struct t4_bootrom *br) 9926{ 9927 int rc; 9928 uint8_t *br_data = NULL; 9929 u_int offset; 9930 9931 if (br->len > 1024 * 1024) 9932 return (EFBIG); 9933 9934 if (br->pf_offset == 0) { 9935 /* pfidx */ 9936 if (br->pfidx_addr > 7) 9937 return (EINVAL); 9938 offset = G_OFFSET(t4_read_reg(sc, PF_REG(br->pfidx_addr, 9939 A_PCIE_PF_EXPROM_OFST))); 9940 } else if (br->pf_offset == 1) { 9941 /* offset */ 9942 offset = G_OFFSET(br->pfidx_addr); 9943 } else { 9944 return (EINVAL); 9945 } 9946 9947 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldbr"); 9948 if (rc) 9949 return (rc); 9950 9951 if (br->len == 0) { 9952 /* clear */ 9953 rc = -t4_load_boot(sc, NULL, offset, 0); 9954 goto done; 9955 } 9956 9957 br_data = malloc(br->len, M_CXGBE, M_WAITOK); 9958 9959 rc = copyin(br->data, br_data, br->len); 9960 if (rc == 0) 9961 rc = -t4_load_boot(sc, br_data, offset, br->len); 9962 9963 free(br_data, M_CXGBE); 9964done: 9965 end_synchronized_op(sc, 0); 9966 return (rc); 9967} 9968 9969static int 9970load_bootcfg(struct adapter *sc, struct t4_data *bc) 9971{ 9972 int rc; 9973 uint8_t *bc_data = NULL; 9974 9975 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf"); 9976 if (rc) 9977 return (rc); 9978 9979 if (bc->len == 0) { 9980 /* clear */ 9981 rc = -t4_load_bootcfg(sc, NULL, 0); 9982 goto done; 9983 } 9984 9985 bc_data = malloc(bc->len, M_CXGBE, M_WAITOK); 9986 9987 rc = copyin(bc->data, bc_data, bc->len); 9988 if (rc == 0) 9989 rc = -t4_load_bootcfg(sc, bc_data, bc->len); 9990 9991 free(bc_data, M_CXGBE); 9992done: 9993 end_synchronized_op(sc, 0); 9994 return (rc); 9995} 9996 9997static int 9998cudbg_dump(struct adapter *sc, struct t4_cudbg_dump *dump) 9999{ 10000 int rc; 10001 struct cudbg_init *cudbg; 10002 void *handle, *buf; 10003 10004 /* buf is large, don't block if no memory is available */ 10005 buf = malloc(dump->len, M_CXGBE, M_NOWAIT | M_ZERO); 10006 if (buf == NULL) 10007 return (ENOMEM); 10008 10009 handle = cudbg_alloc_handle(); 10010 if (handle == NULL) { 10011 rc = ENOMEM; 10012 goto done; 10013 } 10014 10015 cudbg = cudbg_get_init(handle); 10016 cudbg->adap = sc; 10017 cudbg->print = (cudbg_print_cb)printf; 10018 10019#ifndef notyet 10020 device_printf(sc->dev, "%s: wr_flash %u, len %u, data %p.\n", 10021 __func__, dump->wr_flash, dump->len, dump->data); 10022#endif 10023 10024 if (dump->wr_flash) 10025 cudbg->use_flash = 1; 10026 MPASS(sizeof(cudbg->dbg_bitmap) == sizeof(dump->bitmap)); 10027 memcpy(cudbg->dbg_bitmap, dump->bitmap, sizeof(cudbg->dbg_bitmap)); 10028 10029 rc = cudbg_collect(handle, buf, &dump->len); 10030 if (rc != 0) 10031 goto done; 10032 10033 rc = copyout(buf, dump->data, dump->len); 10034done: 10035 cudbg_free_handle(handle); 10036 free(buf, M_CXGBE); 10037 return (rc); 10038} 10039 10040static void 10041free_offload_policy(struct t4_offload_policy *op) 10042{ 10043 struct offload_rule *r; 10044 int i; 10045 10046 if (op == NULL) 10047 return; 10048 10049 r = &op->rule[0]; 10050 for (i = 0; i < op->nrules; i++, r++) { 10051 free(r->bpf_prog.bf_insns, M_CXGBE); 10052 } 10053 free(op->rule, M_CXGBE); 10054 free(op, M_CXGBE); 10055} 10056 10057static int 10058set_offload_policy(struct adapter *sc, struct t4_offload_policy *uop) 10059{ 10060 int i, rc, len; 10061 struct t4_offload_policy *op, *old; 10062 struct bpf_program *bf; 10063 const struct offload_settings *s; 10064 struct offload_rule *r; 10065 void *u; 10066 10067 if (!is_offload(sc)) 10068 return (ENODEV); 10069 10070 if (uop->nrules == 0) { 10071 /* Delete installed policies. */ 10072 op = NULL; 10073 goto set_policy; 10074 } if (uop->nrules > 256) { /* arbitrary */ 10075 return (E2BIG); 10076 } 10077 10078 /* Copy userspace offload policy to kernel */ 10079 op = malloc(sizeof(*op), M_CXGBE, M_ZERO | M_WAITOK); 10080 op->nrules = uop->nrules; 10081 len = op->nrules * sizeof(struct offload_rule); 10082 op->rule = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); 10083 rc = copyin(uop->rule, op->rule, len); 10084 if (rc) { 10085 free(op->rule, M_CXGBE); 10086 free(op, M_CXGBE); 10087 return (rc); 10088 } 10089 10090 r = &op->rule[0]; 10091 for (i = 0; i < op->nrules; i++, r++) { 10092 10093 /* Validate open_type */ 10094 if (r->open_type != OPEN_TYPE_LISTEN && 10095 r->open_type != OPEN_TYPE_ACTIVE && 10096 r->open_type != OPEN_TYPE_PASSIVE && 10097 r->open_type != OPEN_TYPE_DONTCARE) { 10098error: 10099 /* 10100 * Rules 0 to i have malloc'd filters that need to be 10101 * freed. Rules i+1 to nrules have userspace pointers 10102 * and should be left alone. 10103 */ 10104 op->nrules = i; 10105 free_offload_policy(op); 10106 return (rc); 10107 } 10108 10109 /* Validate settings */ 10110 s = &r->settings; 10111 if ((s->offload != 0 && s->offload != 1) || 10112 s->cong_algo < -1 || s->cong_algo > CONG_ALG_HIGHSPEED || 10113 s->sched_class < -1 || 10114 s->sched_class >= sc->chip_params->nsched_cls) { 10115 rc = EINVAL; 10116 goto error; 10117 } 10118 10119 bf = &r->bpf_prog; 10120 u = bf->bf_insns; /* userspace ptr */ 10121 bf->bf_insns = NULL; 10122 if (bf->bf_len == 0) { 10123 /* legal, matches everything */ 10124 continue; 10125 } 10126 len = bf->bf_len * sizeof(*bf->bf_insns); 10127 bf->bf_insns = malloc(len, M_CXGBE, M_ZERO | M_WAITOK); 10128 rc = copyin(u, bf->bf_insns, len); 10129 if (rc != 0) 10130 goto error; 10131 10132 if (!bpf_validate(bf->bf_insns, bf->bf_len)) { 10133 rc = EINVAL; 10134 goto error; 10135 } 10136 } 10137set_policy: 10138 rw_wlock(&sc->policy_lock); 10139 old = sc->policy; 10140 sc->policy = op; 10141 rw_wunlock(&sc->policy_lock); 10142 free_offload_policy(old); 10143 10144 return (0); 10145} 10146 10147#define MAX_READ_BUF_SIZE (128 * 1024) 10148static int 10149read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr) 10150{ 10151 uint32_t addr, remaining, n; 10152 uint32_t *buf; 10153 int rc; 10154 uint8_t *dst; 10155 10156 rc = validate_mem_range(sc, mr->addr, mr->len); 10157 if (rc != 0) 10158 return (rc); 10159 10160 buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK); 10161 addr = mr->addr; 10162 remaining = mr->len; 10163 dst = (void *)mr->data; 10164 10165 while (remaining) { 10166 n = min(remaining, MAX_READ_BUF_SIZE); 10167 read_via_memwin(sc, 2, addr, buf, n); 10168 10169 rc = copyout(buf, dst, n); 10170 if (rc != 0) 10171 break; 10172 10173 dst += n; 10174 remaining -= n; 10175 addr += n; 10176 } 10177 10178 free(buf, M_CXGBE); 10179 return (rc); 10180} 10181#undef MAX_READ_BUF_SIZE 10182 10183static int 10184read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd) 10185{ 10186 int rc; 10187 10188 if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports) 10189 return (EINVAL); 10190 10191 if (i2cd->len > sizeof(i2cd->data)) 10192 return (EFBIG); 10193 10194 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd"); 10195 if (rc) 10196 return (rc); 10197 rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr, 10198 i2cd->offset, i2cd->len, &i2cd->data[0]); 10199 end_synchronized_op(sc, 0); 10200 10201 return (rc); 10202} 10203 10204static int 10205clear_stats(struct adapter *sc, u_int port_id) 10206{ 10207 int i, v, chan_map; 10208 struct port_info *pi; 10209 struct vi_info *vi; 10210 struct sge_rxq *rxq; 10211 struct sge_txq *txq; 10212 struct sge_wrq *wrq; 10213#ifdef TCP_OFFLOAD 10214 struct sge_ofld_rxq *ofld_rxq; 10215#endif 10216 10217 if (port_id >= sc->params.nports) 10218 return (EINVAL); 10219 pi = sc->port[port_id]; 10220 if (pi == NULL) 10221 return (EIO); 10222 10223 /* MAC stats */ 10224 t4_clr_port_stats(sc, pi->tx_chan); 10225 if (is_t6(sc)) { 10226 if (pi->fcs_reg != -1) 10227 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); 10228 else 10229 pi->stats.rx_fcs_err = 0; 10230 } 10231 pi->tx_parse_error = 0; 10232 pi->tnl_cong_drops = 0; 10233 mtx_lock(&sc->reg_lock); 10234 for_each_vi(pi, v, vi) { 10235 if (vi->flags & VI_INIT_DONE) 10236 t4_clr_vi_stats(sc, vi->vin); 10237 } 10238 chan_map = pi->rx_e_chan_map; 10239 v = 0; /* reuse */ 10240 while (chan_map) { 10241 i = ffs(chan_map) - 1; 10242 t4_write_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v, 10243 1, A_TP_MIB_TNL_CNG_DROP_0 + i); 10244 chan_map &= ~(1 << i); 10245 } 10246 mtx_unlock(&sc->reg_lock); 10247 10248 /* 10249 * Since this command accepts a port, clear stats for 10250 * all VIs on this port. 10251 */ 10252 for_each_vi(pi, v, vi) { 10253 if (vi->flags & VI_INIT_DONE) { 10254 10255 for_each_rxq(vi, i, rxq) { 10256#if defined(INET) || defined(INET6) 10257 rxq->lro.lro_queued = 0; 10258 rxq->lro.lro_flushed = 0; 10259#endif 10260 rxq->rxcsum = 0; 10261 rxq->vlan_extraction = 0; 10262 rxq->vxlan_rxcsum = 0; 10263 10264 rxq->fl.cl_allocated = 0; 10265 rxq->fl.cl_recycled = 0; 10266 rxq->fl.cl_fast_recycled = 0; 10267 } 10268 10269 for_each_txq(vi, i, txq) { 10270 txq->txcsum = 0; 10271 txq->tso_wrs = 0; 10272 txq->vlan_insertion = 0; 10273 txq->imm_wrs = 0; 10274 txq->sgl_wrs = 0; 10275 txq->txpkt_wrs = 0; 10276 txq->txpkts0_wrs = 0; 10277 txq->txpkts1_wrs = 0; 10278 txq->txpkts0_pkts = 0; 10279 txq->txpkts1_pkts = 0; 10280 txq->txpkts_flush = 0; 10281 txq->raw_wrs = 0; 10282 txq->vxlan_tso_wrs = 0; 10283 txq->vxlan_txcsum = 0; 10284 mp_ring_reset_stats(txq->r); 10285 } 10286 10287#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 10288 for_each_ofld_txq(vi, i, wrq) { 10289 wrq->tx_wrs_direct = 0; 10290 wrq->tx_wrs_copied = 0; 10291 } 10292#endif 10293#ifdef TCP_OFFLOAD 10294 for_each_ofld_rxq(vi, i, ofld_rxq) { 10295 ofld_rxq->fl.cl_allocated = 0; 10296 ofld_rxq->fl.cl_recycled = 0; 10297 ofld_rxq->fl.cl_fast_recycled = 0; 10298 } 10299#endif 10300 10301 if (IS_MAIN_VI(vi)) { 10302 wrq = &sc->sge.ctrlq[pi->port_id]; 10303 wrq->tx_wrs_direct = 0; 10304 wrq->tx_wrs_copied = 0; 10305 } 10306 } 10307 } 10308 10309 return (0); 10310} 10311 10312int 10313t4_os_find_pci_capability(struct adapter *sc, int cap) 10314{ 10315 int i; 10316 10317 return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0); 10318} 10319 10320int 10321t4_os_pci_save_state(struct adapter *sc) 10322{ 10323 device_t dev; 10324 struct pci_devinfo *dinfo; 10325 10326 dev = sc->dev; 10327 dinfo = device_get_ivars(dev); 10328 10329 pci_cfg_save(dev, dinfo, 0); 10330 return (0); 10331} 10332 10333int 10334t4_os_pci_restore_state(struct adapter *sc) 10335{ 10336 device_t dev; 10337 struct pci_devinfo *dinfo; 10338 10339 dev = sc->dev; 10340 dinfo = device_get_ivars(dev); 10341 10342 pci_cfg_restore(dev, dinfo); 10343 return (0); 10344} 10345 10346void 10347t4_os_portmod_changed(struct port_info *pi) 10348{ 10349 struct adapter *sc = pi->adapter; 10350 struct vi_info *vi; 10351 struct ifnet *ifp; 10352 static const char *mod_str[] = { 10353 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 10354 }; 10355 10356 KASSERT((pi->flags & FIXED_IFMEDIA) == 0, 10357 ("%s: port_type %u", __func__, pi->port_type)); 10358 10359 vi = &pi->vi[0]; 10360 if (begin_synchronized_op(sc, vi, HOLD_LOCK, "t4mod") == 0) { 10361 PORT_LOCK(pi); 10362 build_medialist(pi); 10363 if (pi->mod_type != FW_PORT_MOD_TYPE_NONE) { 10364 fixup_link_config(pi); 10365 apply_link_config(pi); 10366 } 10367 PORT_UNLOCK(pi); 10368 end_synchronized_op(sc, LOCK_HELD); 10369 } 10370 10371 ifp = vi->ifp; 10372 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 10373 if_printf(ifp, "transceiver unplugged.\n"); 10374 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 10375 if_printf(ifp, "unknown transceiver inserted.\n"); 10376 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 10377 if_printf(ifp, "unsupported transceiver inserted.\n"); 10378 else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) { 10379 if_printf(ifp, "%dGbps %s transceiver inserted.\n", 10380 port_top_speed(pi), mod_str[pi->mod_type]); 10381 } else { 10382 if_printf(ifp, "transceiver (type %d) inserted.\n", 10383 pi->mod_type); 10384 } 10385} 10386 10387void 10388t4_os_link_changed(struct port_info *pi) 10389{ 10390 struct vi_info *vi; 10391 struct ifnet *ifp; 10392 struct link_config *lc = &pi->link_cfg; 10393 struct adapter *sc = pi->adapter; 10394 int v; 10395 10396 PORT_LOCK_ASSERT_OWNED(pi); 10397 10398 if (is_t6(sc)) { 10399 if (lc->link_ok) { 10400 if (lc->speed > 25000 || 10401 (lc->speed == 25000 && lc->fec == FEC_RS)) { 10402 pi->fcs_reg = T5_PORT_REG(pi->tx_chan, 10403 A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS); 10404 } else { 10405 pi->fcs_reg = T5_PORT_REG(pi->tx_chan, 10406 A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS); 10407 } 10408 pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg); 10409 pi->stats.rx_fcs_err = 0; 10410 } else { 10411 pi->fcs_reg = -1; 10412 } 10413 } else { 10414 MPASS(pi->fcs_reg != -1); 10415 MPASS(pi->fcs_base == 0); 10416 } 10417 10418 for_each_vi(pi, v, vi) { 10419 ifp = vi->ifp; 10420 if (ifp == NULL) 10421 continue; 10422 10423 if (lc->link_ok) { 10424 ifp->if_baudrate = IF_Mbps(lc->speed); 10425 if_link_state_change(ifp, LINK_STATE_UP); 10426 } else { 10427 if_link_state_change(ifp, LINK_STATE_DOWN); 10428 } 10429 } 10430} 10431 10432void 10433t4_iterate(void (*func)(struct adapter *, void *), void *arg) 10434{ 10435 struct adapter *sc; 10436 10437 sx_slock(&t4_list_lock); 10438 SLIST_FOREACH(sc, &t4_list, link) { 10439 /* 10440 * func should not make any assumptions about what state sc is 10441 * in - the only guarantee is that sc->sc_lock is a valid lock. 10442 */ 10443 func(sc, arg); 10444 } 10445 sx_sunlock(&t4_list_lock); 10446} 10447 10448static int 10449t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, 10450 struct thread *td) 10451{ 10452 int rc; 10453 struct adapter *sc = dev->si_drv1; 10454 10455 rc = priv_check(td, PRIV_DRIVER); 10456 if (rc != 0) 10457 return (rc); 10458 10459 switch (cmd) { 10460 case CHELSIO_T4_GETREG: { 10461 struct t4_reg *edata = (struct t4_reg *)data; 10462 10463 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 10464 return (EFAULT); 10465 10466 if (edata->size == 4) 10467 edata->val = t4_read_reg(sc, edata->addr); 10468 else if (edata->size == 8) 10469 edata->val = t4_read_reg64(sc, edata->addr); 10470 else 10471 return (EINVAL); 10472 10473 break; 10474 } 10475 case CHELSIO_T4_SETREG: { 10476 struct t4_reg *edata = (struct t4_reg *)data; 10477 10478 if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 10479 return (EFAULT); 10480 10481 if (edata->size == 4) { 10482 if (edata->val & 0xffffffff00000000) 10483 return (EINVAL); 10484 t4_write_reg(sc, edata->addr, (uint32_t) edata->val); 10485 } else if (edata->size == 8) 10486 t4_write_reg64(sc, edata->addr, edata->val); 10487 else 10488 return (EINVAL); 10489 break; 10490 } 10491 case CHELSIO_T4_REGDUMP: { 10492 struct t4_regdump *regs = (struct t4_regdump *)data; 10493 int reglen = t4_get_regs_len(sc); 10494 uint8_t *buf; 10495 10496 if (regs->len < reglen) { 10497 regs->len = reglen; /* hint to the caller */ 10498 return (ENOBUFS); 10499 } 10500 10501 regs->len = reglen; 10502 buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO); 10503 get_regs(sc, regs, buf); 10504 rc = copyout(buf, regs->data, reglen); 10505 free(buf, M_CXGBE); 10506 break; 10507 } 10508 case CHELSIO_T4_GET_FILTER_MODE: 10509 rc = get_filter_mode(sc, (uint32_t *)data); 10510 break; 10511 case CHELSIO_T4_SET_FILTER_MODE: 10512 rc = set_filter_mode(sc, *(uint32_t *)data); 10513 break; 10514 case CHELSIO_T4_GET_FILTER: 10515 rc = get_filter(sc, (struct t4_filter *)data); 10516 break; 10517 case CHELSIO_T4_SET_FILTER: 10518 rc = set_filter(sc, (struct t4_filter *)data); 10519 break; 10520 case CHELSIO_T4_DEL_FILTER: 10521 rc = del_filter(sc, (struct t4_filter *)data); 10522 break; 10523 case CHELSIO_T4_GET_SGE_CONTEXT: 10524 rc = get_sge_context(sc, (struct t4_sge_context *)data); 10525 break; 10526 case CHELSIO_T4_LOAD_FW: 10527 rc = load_fw(sc, (struct t4_data *)data); 10528 break; 10529 case CHELSIO_T4_GET_MEM: 10530 rc = read_card_mem(sc, 2, (struct t4_mem_range *)data); 10531 break; 10532 case CHELSIO_T4_GET_I2C: 10533 rc = read_i2c(sc, (struct t4_i2c_data *)data); 10534 break; 10535 case CHELSIO_T4_CLEAR_STATS: 10536 rc = clear_stats(sc, *(uint32_t *)data); 10537 break; 10538 case CHELSIO_T4_SCHED_CLASS: 10539 rc = t4_set_sched_class(sc, (struct t4_sched_params *)data); 10540 break; 10541 case CHELSIO_T4_SCHED_QUEUE: 10542 rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data); 10543 break; 10544 case CHELSIO_T4_GET_TRACER: 10545 rc = t4_get_tracer(sc, (struct t4_tracer *)data); 10546 break; 10547 case CHELSIO_T4_SET_TRACER: 10548 rc = t4_set_tracer(sc, (struct t4_tracer *)data); 10549 break; 10550 case CHELSIO_T4_LOAD_CFG: 10551 rc = load_cfg(sc, (struct t4_data *)data); 10552 break; 10553 case CHELSIO_T4_LOAD_BOOT: 10554 rc = load_boot(sc, (struct t4_bootrom *)data); 10555 break; 10556 case CHELSIO_T4_LOAD_BOOTCFG: 10557 rc = load_bootcfg(sc, (struct t4_data *)data); 10558 break; 10559 case CHELSIO_T4_CUDBG_DUMP: 10560 rc = cudbg_dump(sc, (struct t4_cudbg_dump *)data); 10561 break; 10562 case CHELSIO_T4_SET_OFLD_POLICY: 10563 rc = set_offload_policy(sc, (struct t4_offload_policy *)data); 10564 break; 10565 default: 10566 rc = ENOTTY; 10567 } 10568 10569 return (rc); 10570} 10571 10572#ifdef TCP_OFFLOAD 10573static int 10574toe_capability(struct vi_info *vi, int enable) 10575{ 10576 int rc; 10577 struct port_info *pi = vi->pi; 10578 struct adapter *sc = pi->adapter; 10579 10580 ASSERT_SYNCHRONIZED_OP(sc); 10581 10582 if (!is_offload(sc)) 10583 return (ENODEV); 10584 10585 if (enable) { 10586 if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) { 10587 /* TOE is already enabled. */ 10588 return (0); 10589 } 10590 10591 /* 10592 * We need the port's queues around so that we're able to send 10593 * and receive CPLs to/from the TOE even if the ifnet for this 10594 * port has never been UP'd administratively. 10595 */ 10596 if (!(vi->flags & VI_INIT_DONE)) { 10597 rc = vi_full_init(vi); 10598 if (rc) 10599 return (rc); 10600 } 10601 if (!(pi->vi[0].flags & VI_INIT_DONE)) { 10602 rc = vi_full_init(&pi->vi[0]); 10603 if (rc) 10604 return (rc); 10605 } 10606 10607 if (isset(&sc->offload_map, pi->port_id)) { 10608 /* TOE is enabled on another VI of this port. */ 10609 pi->uld_vis++; 10610 return (0); 10611 } 10612 10613 if (!uld_active(sc, ULD_TOM)) { 10614 rc = t4_activate_uld(sc, ULD_TOM); 10615 if (rc == EAGAIN) { 10616 log(LOG_WARNING, 10617 "You must kldload t4_tom.ko before trying " 10618 "to enable TOE on a cxgbe interface.\n"); 10619 } 10620 if (rc != 0) 10621 return (rc); 10622 KASSERT(sc->tom_softc != NULL, 10623 ("%s: TOM activated but softc NULL", __func__)); 10624 KASSERT(uld_active(sc, ULD_TOM), 10625 ("%s: TOM activated but flag not set", __func__)); 10626 } 10627 10628 /* Activate iWARP and iSCSI too, if the modules are loaded. */ 10629 if (!uld_active(sc, ULD_IWARP)) 10630 (void) t4_activate_uld(sc, ULD_IWARP); 10631 if (!uld_active(sc, ULD_ISCSI)) 10632 (void) t4_activate_uld(sc, ULD_ISCSI); 10633 10634 pi->uld_vis++; 10635 setbit(&sc->offload_map, pi->port_id); 10636 } else { 10637 pi->uld_vis--; 10638 10639 if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0) 10640 return (0); 10641 10642 KASSERT(uld_active(sc, ULD_TOM), 10643 ("%s: TOM never initialized?", __func__)); 10644 clrbit(&sc->offload_map, pi->port_id); 10645 } 10646 10647 return (0); 10648} 10649 10650/* 10651 * Add an upper layer driver to the global list. 10652 */ 10653int 10654t4_register_uld(struct uld_info *ui) 10655{ 10656 int rc = 0; 10657 struct uld_info *u; 10658 10659 sx_xlock(&t4_uld_list_lock); 10660 SLIST_FOREACH(u, &t4_uld_list, link) { 10661 if (u->uld_id == ui->uld_id) { 10662 rc = EEXIST; 10663 goto done; 10664 } 10665 } 10666 10667 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 10668 ui->refcount = 0; 10669done: 10670 sx_xunlock(&t4_uld_list_lock); 10671 return (rc); 10672} 10673 10674int 10675t4_unregister_uld(struct uld_info *ui) 10676{ 10677 int rc = EINVAL; 10678 struct uld_info *u; 10679 10680 sx_xlock(&t4_uld_list_lock); 10681 10682 SLIST_FOREACH(u, &t4_uld_list, link) { 10683 if (u == ui) { 10684 if (ui->refcount > 0) { 10685 rc = EBUSY; 10686 goto done; 10687 } 10688 10689 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 10690 rc = 0; 10691 goto done; 10692 } 10693 } 10694done: 10695 sx_xunlock(&t4_uld_list_lock); 10696 return (rc); 10697} 10698 10699int 10700t4_activate_uld(struct adapter *sc, int id) 10701{ 10702 int rc; 10703 struct uld_info *ui; 10704 10705 ASSERT_SYNCHRONIZED_OP(sc); 10706 10707 if (id < 0 || id > ULD_MAX) 10708 return (EINVAL); 10709 rc = EAGAIN; /* kldoad the module with this ULD and try again. */ 10710 10711 sx_slock(&t4_uld_list_lock); 10712 10713 SLIST_FOREACH(ui, &t4_uld_list, link) { 10714 if (ui->uld_id == id) { 10715 if (!(sc->flags & FULL_INIT_DONE)) { 10716 rc = adapter_full_init(sc); 10717 if (rc != 0) 10718 break; 10719 } 10720 10721 rc = ui->activate(sc); 10722 if (rc == 0) { 10723 setbit(&sc->active_ulds, id); 10724 ui->refcount++; 10725 } 10726 break; 10727 } 10728 } 10729 10730 sx_sunlock(&t4_uld_list_lock); 10731 10732 return (rc); 10733} 10734 10735int 10736t4_deactivate_uld(struct adapter *sc, int id) 10737{ 10738 int rc; 10739 struct uld_info *ui; 10740 10741 ASSERT_SYNCHRONIZED_OP(sc); 10742 10743 if (id < 0 || id > ULD_MAX) 10744 return (EINVAL); 10745 rc = ENXIO; 10746 10747 sx_slock(&t4_uld_list_lock); 10748 10749 SLIST_FOREACH(ui, &t4_uld_list, link) { 10750 if (ui->uld_id == id) { 10751 rc = ui->deactivate(sc); 10752 if (rc == 0) { 10753 clrbit(&sc->active_ulds, id); 10754 ui->refcount--; 10755 } 10756 break; 10757 } 10758 } 10759 10760 sx_sunlock(&t4_uld_list_lock); 10761 10762 return (rc); 10763} 10764 10765int 10766uld_active(struct adapter *sc, int uld_id) 10767{ 10768 10769 MPASS(uld_id >= 0 && uld_id <= ULD_MAX); 10770 10771 return (isset(&sc->active_ulds, uld_id)); 10772} 10773#endif 10774 10775/* 10776 * t = ptr to tunable. 10777 * nc = number of CPUs. 10778 * c = compiled in default for that tunable. 10779 */ 10780static void 10781calculate_nqueues(int *t, int nc, const int c) 10782{ 10783 int nq; 10784 10785 if (*t > 0) 10786 return; 10787 nq = *t < 0 ? -*t : c; 10788 *t = min(nc, nq); 10789} 10790 10791/* 10792 * Come up with reasonable defaults for some of the tunables, provided they're 10793 * not set by the user (in which case we'll use the values as is). 10794 */ 10795static void 10796tweak_tunables(void) 10797{ 10798 int nc = mp_ncpus; /* our snapshot of the number of CPUs */ 10799 10800 if (t4_ntxq < 1) { 10801#ifdef RSS 10802 t4_ntxq = rss_getnumbuckets(); 10803#else 10804 calculate_nqueues(&t4_ntxq, nc, NTXQ); 10805#endif 10806 } 10807 10808 calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI); 10809 10810 if (t4_nrxq < 1) { 10811#ifdef RSS 10812 t4_nrxq = rss_getnumbuckets(); 10813#else 10814 calculate_nqueues(&t4_nrxq, nc, NRXQ); 10815#endif 10816 } 10817 10818 calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI); 10819 10820#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 10821 calculate_nqueues(&t4_nofldtxq, nc, NOFLDTXQ); 10822 calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI); 10823#endif 10824#ifdef TCP_OFFLOAD 10825 calculate_nqueues(&t4_nofldrxq, nc, NOFLDRXQ); 10826 calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI); 10827 10828 if (t4_toecaps_allowed == -1) 10829 t4_toecaps_allowed = FW_CAPS_CONFIG_TOE; 10830 10831 if (t4_rdmacaps_allowed == -1) { 10832 t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP | 10833 FW_CAPS_CONFIG_RDMA_RDMAC; 10834 } 10835 10836 if (t4_iscsicaps_allowed == -1) { 10837 t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU | 10838 FW_CAPS_CONFIG_ISCSI_TARGET_PDU | 10839 FW_CAPS_CONFIG_ISCSI_T10DIF; 10840 } 10841 10842 if (t4_tmr_idx_ofld < 0 || t4_tmr_idx_ofld >= SGE_NTIMERS) 10843 t4_tmr_idx_ofld = TMR_IDX_OFLD; 10844 10845 if (t4_pktc_idx_ofld < -1 || t4_pktc_idx_ofld >= SGE_NCOUNTERS) 10846 t4_pktc_idx_ofld = PKTC_IDX_OFLD; 10847#else 10848 if (t4_toecaps_allowed == -1) 10849 t4_toecaps_allowed = 0; 10850 10851 if (t4_rdmacaps_allowed == -1) 10852 t4_rdmacaps_allowed = 0; 10853 10854 if (t4_iscsicaps_allowed == -1) 10855 t4_iscsicaps_allowed = 0; 10856#endif 10857 10858#ifdef DEV_NETMAP 10859 calculate_nqueues(&t4_nnmtxq, nc, NNMTXQ); 10860 calculate_nqueues(&t4_nnmrxq, nc, NNMRXQ); 10861 calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI); 10862 calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI); 10863#endif 10864 10865 if (t4_tmr_idx < 0 || t4_tmr_idx >= SGE_NTIMERS) 10866 t4_tmr_idx = TMR_IDX; 10867 10868 if (t4_pktc_idx < -1 || t4_pktc_idx >= SGE_NCOUNTERS) 10869 t4_pktc_idx = PKTC_IDX; 10870 10871 if (t4_qsize_txq < 128) 10872 t4_qsize_txq = 128; 10873 10874 if (t4_qsize_rxq < 128) 10875 t4_qsize_rxq = 128; 10876 while (t4_qsize_rxq & 7) 10877 t4_qsize_rxq++; 10878 10879 t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX; 10880 10881 /* 10882 * Number of VIs to create per-port. The first VI is the "main" regular 10883 * VI for the port. The rest are additional virtual interfaces on the 10884 * same physical port. Note that the main VI does not have native 10885 * netmap support but the extra VIs do. 10886 * 10887 * Limit the number of VIs per port to the number of available 10888 * MAC addresses per port. 10889 */ 10890 if (t4_num_vis < 1) 10891 t4_num_vis = 1; 10892 if (t4_num_vis > nitems(vi_mac_funcs)) { 10893 t4_num_vis = nitems(vi_mac_funcs); 10894 printf("cxgbe: number of VIs limited to %d\n", t4_num_vis); 10895 } 10896 10897 if (pcie_relaxed_ordering < 0 || pcie_relaxed_ordering > 2) { 10898 pcie_relaxed_ordering = 1; 10899#if defined(__i386__) || defined(__amd64__) 10900 if (cpu_vendor_id == CPU_VENDOR_INTEL) 10901 pcie_relaxed_ordering = 0; 10902#endif 10903 } 10904} 10905 10906#ifdef DDB 10907static void 10908t4_dump_tcb(struct adapter *sc, int tid) 10909{ 10910 uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos; 10911 10912 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2); 10913 save = t4_read_reg(sc, reg); 10914 base = sc->memwin[2].mw_base; 10915 10916 /* Dump TCB for the tid */ 10917 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE); 10918 tcb_addr += tid * TCB_SIZE; 10919 10920 if (is_t4(sc)) { 10921 pf = 0; 10922 win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */ 10923 } else { 10924 pf = V_PFNUM(sc->pf); 10925 win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */ 10926 } 10927 t4_write_reg(sc, reg, win_pos | pf); 10928 t4_read_reg(sc, reg); 10929 10930 off = tcb_addr - win_pos; 10931 for (i = 0; i < 4; i++) { 10932 uint32_t buf[8]; 10933 for (j = 0; j < 8; j++, off += 4) 10934 buf[j] = htonl(t4_read_reg(sc, base + off)); 10935 10936 db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n", 10937 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 10938 buf[7]); 10939 } 10940 10941 t4_write_reg(sc, reg, save); 10942 t4_read_reg(sc, reg); 10943} 10944 10945static void 10946t4_dump_devlog(struct adapter *sc) 10947{ 10948 struct devlog_params *dparams = &sc->params.devlog; 10949 struct fw_devlog_e e; 10950 int i, first, j, m, nentries, rc; 10951 uint64_t ftstamp = UINT64_MAX; 10952 10953 if (dparams->start == 0) { 10954 db_printf("devlog params not valid\n"); 10955 return; 10956 } 10957 10958 nentries = dparams->size / sizeof(struct fw_devlog_e); 10959 m = fwmtype_to_hwmtype(dparams->memtype); 10960 10961 /* Find the first entry. */ 10962 first = -1; 10963 for (i = 0; i < nentries && !db_pager_quit; i++) { 10964 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 10965 sizeof(e), (void *)&e); 10966 if (rc != 0) 10967 break; 10968 10969 if (e.timestamp == 0) 10970 break; 10971 10972 e.timestamp = be64toh(e.timestamp); 10973 if (e.timestamp < ftstamp) { 10974 ftstamp = e.timestamp; 10975 first = i; 10976 } 10977 } 10978 10979 if (first == -1) 10980 return; 10981 10982 i = first; 10983 do { 10984 rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e), 10985 sizeof(e), (void *)&e); 10986 if (rc != 0) 10987 return; 10988 10989 if (e.timestamp == 0) 10990 return; 10991 10992 e.timestamp = be64toh(e.timestamp); 10993 e.seqno = be32toh(e.seqno); 10994 for (j = 0; j < 8; j++) 10995 e.params[j] = be32toh(e.params[j]); 10996 10997 db_printf("%10d %15ju %8s %8s ", 10998 e.seqno, e.timestamp, 10999 (e.level < nitems(devlog_level_strings) ? 11000 devlog_level_strings[e.level] : "UNKNOWN"), 11001 (e.facility < nitems(devlog_facility_strings) ? 11002 devlog_facility_strings[e.facility] : "UNKNOWN")); 11003 db_printf(e.fmt, e.params[0], e.params[1], e.params[2], 11004 e.params[3], e.params[4], e.params[5], e.params[6], 11005 e.params[7]); 11006 11007 if (++i == nentries) 11008 i = 0; 11009 } while (i != first && !db_pager_quit); 11010} 11011 11012static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table); 11013_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table); 11014 11015DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL) 11016{ 11017 device_t dev; 11018 int t; 11019 bool valid; 11020 11021 valid = false; 11022 t = db_read_token(); 11023 if (t == tIDENT) { 11024 dev = device_lookup_by_name(db_tok_string); 11025 valid = true; 11026 } 11027 db_skip_to_eol(); 11028 if (!valid) { 11029 db_printf("usage: show t4 devlog <nexus>\n"); 11030 return; 11031 } 11032 11033 if (dev == NULL) { 11034 db_printf("device not found\n"); 11035 return; 11036 } 11037 11038 t4_dump_devlog(device_get_softc(dev)); 11039} 11040 11041DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL) 11042{ 11043 device_t dev; 11044 int radix, tid, t; 11045 bool valid; 11046 11047 valid = false; 11048 radix = db_radix; 11049 db_radix = 10; 11050 t = db_read_token(); 11051 if (t == tIDENT) { 11052 dev = device_lookup_by_name(db_tok_string); 11053 t = db_read_token(); 11054 if (t == tNUMBER) { 11055 tid = db_tok_number; 11056 valid = true; 11057 } 11058 } 11059 db_radix = radix; 11060 db_skip_to_eol(); 11061 if (!valid) { 11062 db_printf("usage: show t4 tcb <nexus> <tid>\n"); 11063 return; 11064 } 11065 11066 if (dev == NULL) { 11067 db_printf("device not found\n"); 11068 return; 11069 } 11070 if (tid < 0) { 11071 db_printf("invalid tid\n"); 11072 return; 11073 } 11074 11075 t4_dump_tcb(device_get_softc(dev), tid); 11076} 11077#endif 11078 11079static eventhandler_tag vxlan_start_evtag; 11080static eventhandler_tag vxlan_stop_evtag; 11081 11082struct vxlan_evargs { 11083 struct ifnet *ifp; 11084 uint16_t port; 11085}; 11086 11087static void 11088t4_vxlan_start(struct adapter *sc, void *arg) 11089{ 11090 struct vxlan_evargs *v = arg; 11091 struct port_info *pi; 11092 uint8_t match_all_mac[ETHER_ADDR_LEN] = {0}; 11093 int i, rc; 11094 11095 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) 11096 return; 11097 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxst") != 0) 11098 return; 11099 11100 if (sc->vxlan_refcount == 0) { 11101 sc->vxlan_port = v->port; 11102 sc->vxlan_refcount = 1; 11103 t4_write_reg(sc, A_MPS_RX_VXLAN_TYPE, 11104 V_VXLAN(v->port) | F_VXLAN_EN); 11105 for_each_port(sc, i) { 11106 pi = sc->port[i]; 11107 if (pi->vxlan_tcam_entry == true) 11108 continue; 11109 rc = t4_alloc_raw_mac_filt(sc, pi->vi[0].viid, 11110 match_all_mac, match_all_mac, 11111 sc->rawf_base + pi->port_id, 1, pi->port_id, true); 11112 if (rc < 0) { 11113 rc = -rc; 11114 log(LOG_ERR, 11115 "%s: failed to add VXLAN TCAM entry: %d.\n", 11116 device_get_name(pi->vi[0].dev), rc); 11117 } else { 11118 MPASS(rc == sc->rawf_base + pi->port_id); 11119 rc = 0; 11120 pi->vxlan_tcam_entry = true; 11121 } 11122 } 11123 } else if (sc->vxlan_port == v->port) { 11124 sc->vxlan_refcount++; 11125 } else { 11126 log(LOG_ERR, "%s: VXLAN already configured on port %d; " 11127 "ignoring attempt to configure it on port %d\n", 11128 device_get_nameunit(sc->dev), sc->vxlan_port, v->port); 11129 } 11130 end_synchronized_op(sc, 0); 11131} 11132 11133static void 11134t4_vxlan_stop(struct adapter *sc, void *arg) 11135{ 11136 struct vxlan_evargs *v = arg; 11137 11138 if (sc->nrawf == 0 || chip_id(sc) <= CHELSIO_T5) 11139 return; 11140 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4vxsp") != 0) 11141 return; 11142 11143 /* 11144 * VXLANs may have been configured before the driver was loaded so we 11145 * may see more stops than starts. This is not handled cleanly but at 11146 * least we keep the refcount sane. 11147 */ 11148 if (sc->vxlan_port != v->port) 11149 goto done; 11150 if (sc->vxlan_refcount == 0) { 11151 log(LOG_ERR, 11152 "%s: VXLAN operation on port %d was stopped earlier; " 11153 "ignoring attempt to stop it again.\n", 11154 device_get_nameunit(sc->dev), sc->vxlan_port); 11155 } else if (--sc->vxlan_refcount == 0) { 11156 t4_set_reg_field(sc, A_MPS_RX_VXLAN_TYPE, F_VXLAN_EN, 0); 11157 } 11158done: 11159 end_synchronized_op(sc, 0); 11160} 11161 11162static void 11163t4_vxlan_start_handler(void *arg __unused, struct ifnet *ifp, 11164 sa_family_t family, u_int port) 11165{ 11166 struct vxlan_evargs v; 11167 11168 MPASS(family == AF_INET || family == AF_INET6); 11169 v.ifp = ifp; 11170 v.port = port; 11171 11172 t4_iterate(t4_vxlan_start, &v); 11173} 11174 11175static void 11176t4_vxlan_stop_handler(void *arg __unused, struct ifnet *ifp, sa_family_t family, 11177 u_int port) 11178{ 11179 struct vxlan_evargs v; 11180 11181 MPASS(family == AF_INET || family == AF_INET6); 11182 v.ifp = ifp; 11183 v.port = port; 11184 11185 t4_iterate(t4_vxlan_stop, &v); 11186} 11187 11188 11189static struct sx mlu; /* mod load unload */ 11190SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload"); 11191 11192static int 11193mod_event(module_t mod, int cmd, void *arg) 11194{ 11195 int rc = 0; 11196 static int loaded = 0; 11197 11198 switch (cmd) { 11199 case MOD_LOAD: 11200 sx_xlock(&mlu); 11201 if (loaded++ == 0) { 11202 t4_sge_modload(); 11203 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, 11204 t4_filter_rpl, CPL_COOKIE_FILTER); 11205 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, 11206 do_l2t_write_rpl, CPL_COOKIE_FILTER); 11207 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, 11208 t4_hashfilter_ao_rpl, CPL_COOKIE_HASHFILTER); 11209 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, 11210 t4_hashfilter_tcb_rpl, CPL_COOKIE_HASHFILTER); 11211 t4_register_shared_cpl_handler(CPL_ABORT_RPL_RSS, 11212 t4_del_hashfilter_rpl, CPL_COOKIE_HASHFILTER); 11213 t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt); 11214 t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt); 11215 t4_register_cpl_handler(CPL_SMT_WRITE_RPL, 11216 do_smt_write_rpl); 11217 sx_init(&t4_list_lock, "T4/T5 adapters"); 11218 SLIST_INIT(&t4_list); 11219 callout_init(&fatal_callout, 1); 11220#ifdef TCP_OFFLOAD 11221 sx_init(&t4_uld_list_lock, "T4/T5 ULDs"); 11222 SLIST_INIT(&t4_uld_list); 11223#endif 11224#ifdef INET6 11225 t4_clip_modload(); 11226#endif 11227 t4_tracer_modload(); 11228 tweak_tunables(); 11229 vxlan_start_evtag = 11230 EVENTHANDLER_REGISTER(vxlan_start, 11231 t4_vxlan_start_handler, NULL, 11232 EVENTHANDLER_PRI_ANY); 11233 vxlan_stop_evtag = 11234 EVENTHANDLER_REGISTER(vxlan_stop, 11235 t4_vxlan_stop_handler, NULL, 11236 EVENTHANDLER_PRI_ANY); 11237 } 11238 sx_xunlock(&mlu); 11239 break; 11240 11241 case MOD_UNLOAD: 11242 sx_xlock(&mlu); 11243 if (--loaded == 0) { 11244 int tries; 11245 11246 sx_slock(&t4_list_lock); 11247 if (!SLIST_EMPTY(&t4_list)) { 11248 rc = EBUSY; 11249 sx_sunlock(&t4_list_lock); 11250 goto done_unload; 11251 } 11252#ifdef TCP_OFFLOAD 11253 sx_slock(&t4_uld_list_lock); 11254 if (!SLIST_EMPTY(&t4_uld_list)) { 11255 rc = EBUSY; 11256 sx_sunlock(&t4_uld_list_lock); 11257 sx_sunlock(&t4_list_lock); 11258 goto done_unload; 11259 } 11260#endif 11261 tries = 0; 11262 while (tries++ < 5 && t4_sge_extfree_refs() != 0) { 11263 uprintf("%ju clusters with custom free routine " 11264 "still is use.\n", t4_sge_extfree_refs()); 11265 pause("t4unload", 2 * hz); 11266 } 11267#ifdef TCP_OFFLOAD 11268 sx_sunlock(&t4_uld_list_lock); 11269#endif 11270 sx_sunlock(&t4_list_lock); 11271 11272 if (t4_sge_extfree_refs() == 0) { 11273 EVENTHANDLER_DEREGISTER(vxlan_start, 11274 vxlan_start_evtag); 11275 EVENTHANDLER_DEREGISTER(vxlan_stop, 11276 vxlan_stop_evtag); 11277 t4_tracer_modunload(); 11278#ifdef INET6 11279 t4_clip_modunload(); 11280#endif 11281#ifdef TCP_OFFLOAD 11282 sx_destroy(&t4_uld_list_lock); 11283#endif 11284 sx_destroy(&t4_list_lock); 11285 t4_sge_modunload(); 11286 loaded = 0; 11287 } else { 11288 rc = EBUSY; 11289 loaded++; /* undo earlier decrement */ 11290 } 11291 } 11292done_unload: 11293 sx_xunlock(&mlu); 11294 break; 11295 } 11296 11297 return (rc); 11298} 11299 11300static devclass_t t4_devclass, t5_devclass, t6_devclass; 11301static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass; 11302static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass; 11303 11304DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0); 11305MODULE_VERSION(t4nex, 1); 11306MODULE_DEPEND(t4nex, firmware, 1, 1, 1); 11307#ifdef DEV_NETMAP 11308MODULE_DEPEND(t4nex, netmap, 1, 1, 1); 11309#endif /* DEV_NETMAP */ 11310 11311DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0); 11312MODULE_VERSION(t5nex, 1); 11313MODULE_DEPEND(t5nex, firmware, 1, 1, 1); 11314#ifdef DEV_NETMAP 11315MODULE_DEPEND(t5nex, netmap, 1, 1, 1); 11316#endif /* DEV_NETMAP */ 11317 11318DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0); 11319MODULE_VERSION(t6nex, 1); 11320MODULE_DEPEND(t6nex, firmware, 1, 1, 1); 11321#ifdef DEV_NETMAP 11322MODULE_DEPEND(t6nex, netmap, 1, 1, 1); 11323#endif /* DEV_NETMAP */ 11324 11325DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0); 11326MODULE_VERSION(cxgbe, 1); 11327 11328DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0); 11329MODULE_VERSION(cxl, 1); 11330 11331DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0); 11332MODULE_VERSION(cc, 1); 11333 11334DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0); 11335MODULE_VERSION(vcxgbe, 1); 11336 11337DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0); 11338MODULE_VERSION(vcxl, 1); 11339 11340DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0); 11341MODULE_VERSION(vcc, 1); 11342