cxgb_main.c revision 171471
1167514Skmacy/************************************************************************** 2167514Skmacy 3167514SkmacyCopyright (c) 2007, Chelsio Inc. 4167514SkmacyAll rights reserved. 5167514Skmacy 6167514SkmacyRedistribution and use in source and binary forms, with or without 7167514Skmacymodification, are permitted provided that the following conditions are met: 8167514Skmacy 9167514Skmacy 1. Redistributions of source code must retain the above copyright notice, 10167514Skmacy this list of conditions and the following disclaimer. 11167514Skmacy 12169978Skmacy2. Neither the name of the Chelsio Corporation nor the names of its 13167514Skmacy contributors may be used to endorse or promote products derived from 14167514Skmacy this software without specific prior written permission. 15167514Skmacy 16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26167514SkmacyPOSSIBILITY OF SUCH DAMAGE. 27167514Skmacy 28167514Skmacy***************************************************************************/ 29167514Skmacy 30167514Skmacy#include <sys/cdefs.h> 31167514Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_main.c 171471 2007-07-17 06:50:35Z kmacy $"); 32167514Skmacy 33167514Skmacy#include <sys/param.h> 34167514Skmacy#include <sys/systm.h> 35167514Skmacy#include <sys/kernel.h> 36167514Skmacy#include <sys/bus.h> 37167514Skmacy#include <sys/module.h> 38167514Skmacy#include <sys/pciio.h> 39167514Skmacy#include <sys/conf.h> 40167514Skmacy#include <machine/bus.h> 41167514Skmacy#include <machine/resource.h> 42167514Skmacy#include <sys/bus_dma.h> 43167514Skmacy#include <sys/rman.h> 44167514Skmacy#include <sys/ioccom.h> 45167514Skmacy#include <sys/mbuf.h> 46167514Skmacy#include <sys/linker.h> 47167514Skmacy#include <sys/firmware.h> 48167514Skmacy#include <sys/socket.h> 49167514Skmacy#include <sys/sockio.h> 50167514Skmacy#include <sys/smp.h> 51167514Skmacy#include <sys/sysctl.h> 52167514Skmacy#include <sys/queue.h> 53167514Skmacy#include <sys/taskqueue.h> 54167514Skmacy 55167514Skmacy#include <net/bpf.h> 56167514Skmacy#include <net/ethernet.h> 57167514Skmacy#include <net/if.h> 58167514Skmacy#include <net/if_arp.h> 59167514Skmacy#include <net/if_dl.h> 60167514Skmacy#include <net/if_media.h> 61167514Skmacy#include <net/if_types.h> 62167514Skmacy 63167514Skmacy#include <netinet/in_systm.h> 64167514Skmacy#include <netinet/in.h> 65167514Skmacy#include <netinet/if_ether.h> 66167514Skmacy#include <netinet/ip.h> 67167514Skmacy#include <netinet/ip.h> 68167514Skmacy#include <netinet/tcp.h> 69167514Skmacy#include <netinet/udp.h> 70167514Skmacy 71167514Skmacy#include <dev/pci/pcireg.h> 72167514Skmacy#include <dev/pci/pcivar.h> 73167514Skmacy#include <dev/pci/pci_private.h> 74167514Skmacy 75170076Skmacy#ifdef CONFIG_DEFINED 76170076Skmacy#include <cxgb_include.h> 77170076Skmacy#else 78170076Skmacy#include <dev/cxgb/cxgb_include.h> 79170076Skmacy#endif 80167514Skmacy 81167514Skmacy#ifdef PRIV_SUPPORTED 82167514Skmacy#include <sys/priv.h> 83167514Skmacy#endif 84167514Skmacy 85167514Skmacystatic int cxgb_setup_msix(adapter_t *, int); 86170654Skmacystatic void cxgb_teardown_msix(adapter_t *); 87167514Skmacystatic void cxgb_init(void *); 88167514Skmacystatic void cxgb_init_locked(struct port_info *); 89167734Skmacystatic void cxgb_stop_locked(struct port_info *); 90167514Skmacystatic void cxgb_set_rxmode(struct port_info *); 91167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t); 92167514Skmacystatic void cxgb_start(struct ifnet *); 93167514Skmacystatic void cxgb_start_proc(void *, int ncount); 94167514Skmacystatic int cxgb_media_change(struct ifnet *); 95167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *); 96167514Skmacystatic int setup_sge_qsets(adapter_t *); 97167514Skmacystatic void cxgb_async_intr(void *); 98167514Skmacystatic void cxgb_ext_intr_handler(void *, int); 99170869Skmacystatic void cxgb_tick_handler(void *, int); 100170869Skmacystatic void cxgb_down_locked(struct adapter *sc); 101167514Skmacystatic void cxgb_tick(void *); 102167514Skmacystatic void setup_rss(adapter_t *sc); 103167514Skmacy 104167514Skmacy/* Attachment glue for the PCI controller end of the device. Each port of 105167514Skmacy * the device is attached separately, as defined later. 106167514Skmacy */ 107167514Skmacystatic int cxgb_controller_probe(device_t); 108167514Skmacystatic int cxgb_controller_attach(device_t); 109167514Skmacystatic int cxgb_controller_detach(device_t); 110167514Skmacystatic void cxgb_free(struct adapter *); 111167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 112167514Skmacy unsigned int end); 113167514Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf); 114167514Skmacystatic int cxgb_get_regs_len(void); 115169978Skmacystatic int offload_open(struct port_info *pi); 116170789Skmacy#ifdef notyet 117169978Skmacystatic int offload_close(struct toedev *tdev); 118170789Skmacy#endif 119167514Skmacy 120169978Skmacy 121167514Skmacystatic device_method_t cxgb_controller_methods[] = { 122167514Skmacy DEVMETHOD(device_probe, cxgb_controller_probe), 123167514Skmacy DEVMETHOD(device_attach, cxgb_controller_attach), 124167514Skmacy DEVMETHOD(device_detach, cxgb_controller_detach), 125167514Skmacy 126167514Skmacy /* bus interface */ 127167514Skmacy DEVMETHOD(bus_print_child, bus_generic_print_child), 128167514Skmacy DEVMETHOD(bus_driver_added, bus_generic_driver_added), 129167514Skmacy 130167514Skmacy { 0, 0 } 131167514Skmacy}; 132167514Skmacy 133167514Skmacystatic driver_t cxgb_controller_driver = { 134167514Skmacy "cxgbc", 135167514Skmacy cxgb_controller_methods, 136167514Skmacy sizeof(struct adapter) 137167514Skmacy}; 138167514Skmacy 139167514Skmacystatic devclass_t cxgb_controller_devclass; 140167514SkmacyDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 0, 0); 141167514Skmacy 142167514Skmacy/* 143167514Skmacy * Attachment glue for the ports. Attachment is done directly to the 144167514Skmacy * controller device. 145167514Skmacy */ 146167514Skmacystatic int cxgb_port_probe(device_t); 147167514Skmacystatic int cxgb_port_attach(device_t); 148167514Skmacystatic int cxgb_port_detach(device_t); 149167514Skmacy 150167514Skmacystatic device_method_t cxgb_port_methods[] = { 151167514Skmacy DEVMETHOD(device_probe, cxgb_port_probe), 152167514Skmacy DEVMETHOD(device_attach, cxgb_port_attach), 153167514Skmacy DEVMETHOD(device_detach, cxgb_port_detach), 154167514Skmacy { 0, 0 } 155167514Skmacy}; 156167514Skmacy 157167514Skmacystatic driver_t cxgb_port_driver = { 158167514Skmacy "cxgb", 159167514Skmacy cxgb_port_methods, 160167514Skmacy 0 161167514Skmacy}; 162167514Skmacy 163167514Skmacystatic d_ioctl_t cxgb_extension_ioctl; 164170654Skmacystatic d_open_t cxgb_extension_open; 165170654Skmacystatic d_close_t cxgb_extension_close; 166167514Skmacy 167170654Skmacystatic struct cdevsw cxgb_cdevsw = { 168170654Skmacy .d_version = D_VERSION, 169170654Skmacy .d_flags = 0, 170170654Skmacy .d_open = cxgb_extension_open, 171170654Skmacy .d_close = cxgb_extension_close, 172170654Skmacy .d_ioctl = cxgb_extension_ioctl, 173170654Skmacy .d_name = "cxgb", 174170654Skmacy}; 175170654Skmacy 176167514Skmacystatic devclass_t cxgb_port_devclass; 177167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0); 178167514Skmacy 179167514Skmacy#define SGE_MSIX_COUNT (SGE_QSETS + 1) 180167514Skmacy 181168749Skmacyextern int collapse_mbufs; 182167514Skmacy/* 183167514Skmacy * The driver uses the best interrupt scheme available on a platform in the 184167514Skmacy * order MSI-X, MSI, legacy pin interrupts. This parameter determines which 185167514Skmacy * of these schemes the driver may consider as follows: 186167514Skmacy * 187167514Skmacy * msi = 2: choose from among all three options 188167514Skmacy * msi = 1 : only consider MSI and pin interrupts 189167514Skmacy * msi = 0: force pin interrupts 190167514Skmacy */ 191167760Skmacystatic int msi_allowed = 2; 192170083Skmacy 193167514SkmacyTUNABLE_INT("hw.cxgb.msi_allowed", &msi_allowed); 194167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters"); 195167514SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, 196167514Skmacy "MSI-X, MSI, INTx selector"); 197169978Skmacy 198169053Skmacy/* 199169978Skmacy * The driver enables offload as a default. 200169978Skmacy * To disable it, use ofld_disable = 1. 201169053Skmacy */ 202169978Skmacystatic int ofld_disable = 0; 203169978SkmacyTUNABLE_INT("hw.cxgb.ofld_disable", &ofld_disable); 204169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, ofld_disable, CTLFLAG_RDTUN, &ofld_disable, 0, 205169978Skmacy "disable ULP offload"); 206169978Skmacy 207169978Skmacy/* 208169978Skmacy * The driver uses an auto-queue algorithm by default. 209169978Skmacy * To disable it and force a single queue-set per port, use singleq = 1. 210169978Skmacy */ 211169053Skmacystatic int singleq = 1; 212169978SkmacyTUNABLE_INT("hw.cxgb.singleq", &singleq); 213169978SkmacySYSCTL_UINT(_hw_cxgb, OID_AUTO, singleq, CTLFLAG_RDTUN, &singleq, 0, 214169978Skmacy "use a single queue-set per port"); 215167514Skmacy 216167514Skmacyenum { 217167514Skmacy MAX_TXQ_ENTRIES = 16384, 218167514Skmacy MAX_CTRL_TXQ_ENTRIES = 1024, 219167514Skmacy MAX_RSPQ_ENTRIES = 16384, 220167514Skmacy MAX_RX_BUFFERS = 16384, 221167514Skmacy MAX_RX_JUMBO_BUFFERS = 16384, 222167514Skmacy MIN_TXQ_ENTRIES = 4, 223167514Skmacy MIN_CTRL_TXQ_ENTRIES = 4, 224167514Skmacy MIN_RSPQ_ENTRIES = 32, 225167514Skmacy MIN_FL_ENTRIES = 32 226167514Skmacy}; 227167514Skmacy 228171471Skmacystruct filter_info { 229171471Skmacy u32 sip; 230171471Skmacy u32 sip_mask; 231171471Skmacy u32 dip; 232171471Skmacy u16 sport; 233171471Skmacy u16 dport; 234171471Skmacy u32 vlan:12; 235171471Skmacy u32 vlan_prio:3; 236171471Skmacy u32 mac_hit:1; 237171471Skmacy u32 mac_idx:4; 238171471Skmacy u32 mac_vld:1; 239171471Skmacy u32 pkt_type:2; 240171471Skmacy u32 report_filter_id:1; 241171471Skmacy u32 pass:1; 242171471Skmacy u32 rss:1; 243171471Skmacy u32 qset:3; 244171471Skmacy u32 locked:1; 245171471Skmacy u32 valid:1; 246171471Skmacy}; 247171471Skmacy 248171471Skmacyenum { FILTER_NO_VLAN_PRI = 7 }; 249171471Skmacy 250167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1) 251167514Skmacy 252167514Skmacy/* Table for probing the cards. The desc field isn't actually used */ 253167514Skmacystruct cxgb_ident { 254167514Skmacy uint16_t vendor; 255167514Skmacy uint16_t device; 256167514Skmacy int index; 257167514Skmacy char *desc; 258167514Skmacy} cxgb_identifiers[] = { 259167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"}, 260167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"}, 261167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"}, 262167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"}, 263167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"}, 264167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"}, 265167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"}, 266167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"}, 267167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"}, 268167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"}, 269170654Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"}, 270167514Skmacy {0, 0, 0, NULL} 271167514Skmacy}; 272167514Skmacy 273171471Skmacy 274171471Skmacystatic int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset); 275171471Skmacy 276171471Skmacystatic inline char 277171471Skmacyt3rev2char(struct adapter *adapter) 278171471Skmacy{ 279171471Skmacy char rev = 'z'; 280171471Skmacy 281171471Skmacy switch(adapter->params.rev) { 282171471Skmacy case T3_REV_A: 283171471Skmacy rev = 'a'; 284171471Skmacy break; 285171471Skmacy case T3_REV_B: 286171471Skmacy case T3_REV_B2: 287171471Skmacy rev = 'b'; 288171471Skmacy break; 289171471Skmacy case T3_REV_C: 290171471Skmacy rev = 'c'; 291171471Skmacy break; 292171471Skmacy } 293171471Skmacy return rev; 294171471Skmacy} 295171471Skmacy 296167514Skmacystatic struct cxgb_ident * 297167514Skmacycxgb_get_ident(device_t dev) 298167514Skmacy{ 299167514Skmacy struct cxgb_ident *id; 300167514Skmacy 301167514Skmacy for (id = cxgb_identifiers; id->desc != NULL; id++) { 302167514Skmacy if ((id->vendor == pci_get_vendor(dev)) && 303167514Skmacy (id->device == pci_get_device(dev))) { 304167514Skmacy return (id); 305167514Skmacy } 306167514Skmacy } 307167514Skmacy return (NULL); 308167514Skmacy} 309167514Skmacy 310167514Skmacystatic const struct adapter_info * 311167514Skmacycxgb_get_adapter_info(device_t dev) 312167514Skmacy{ 313167514Skmacy struct cxgb_ident *id; 314167514Skmacy const struct adapter_info *ai; 315167514Skmacy 316167514Skmacy id = cxgb_get_ident(dev); 317167514Skmacy if (id == NULL) 318167514Skmacy return (NULL); 319167514Skmacy 320167514Skmacy ai = t3_get_adapter_info(id->index); 321167514Skmacy 322167514Skmacy return (ai); 323167514Skmacy} 324167514Skmacy 325167514Skmacystatic int 326167514Skmacycxgb_controller_probe(device_t dev) 327167514Skmacy{ 328167514Skmacy const struct adapter_info *ai; 329167514Skmacy char *ports, buf[80]; 330170654Skmacy int nports; 331170654Skmacy 332167514Skmacy ai = cxgb_get_adapter_info(dev); 333167514Skmacy if (ai == NULL) 334167514Skmacy return (ENXIO); 335167514Skmacy 336170654Skmacy nports = ai->nports0 + ai->nports1; 337170654Skmacy if (nports == 1) 338167514Skmacy ports = "port"; 339167514Skmacy else 340167514Skmacy ports = "ports"; 341167514Skmacy 342170654Skmacy snprintf(buf, sizeof(buf), "%s RNIC, %d %s", ai->desc, nports, ports); 343167514Skmacy device_set_desc_copy(dev, buf); 344167514Skmacy return (BUS_PROBE_DEFAULT); 345167514Skmacy} 346167514Skmacy 347171471Skmacy#define FW_FNAME "t3fw%d%d%d" 348171471Skmacy#define TPEEPROM_NAME "t3%ctpe%d%d%d" 349171471Skmacy#define TPSRAM_NAME "t3%cps%d%d%d" 350171471Skmacy 351167514Skmacystatic int 352169978Skmacyupgrade_fw(adapter_t *sc) 353167514Skmacy{ 354167514Skmacy char buf[32]; 355167514Skmacy#ifdef FIRMWARE_LATEST 356167514Skmacy const struct firmware *fw; 357167514Skmacy#else 358167514Skmacy struct firmware *fw; 359167514Skmacy#endif 360167514Skmacy int status; 361167514Skmacy 362171471Skmacy snprintf(&buf[0], sizeof(buf), FW_FNAME, FW_VERSION_MAJOR, 363169978Skmacy FW_VERSION_MINOR, FW_VERSION_MICRO); 364167514Skmacy 365167514Skmacy fw = firmware_get(buf); 366167514Skmacy 367167514Skmacy if (fw == NULL) { 368169978Skmacy device_printf(sc->dev, "Could not find firmware image %s\n", buf); 369169978Skmacy return (ENOENT); 370171471Skmacy } else 371171471Skmacy device_printf(sc->dev, "updating firmware on card with %s\n", buf); 372167514Skmacy status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); 373167514Skmacy 374171471Skmacy device_printf(sc->dev, "firmware update returned %s %d\n", (status == 0) ? "success" : "fail", status); 375171471Skmacy 376167514Skmacy firmware_put(fw, FIRMWARE_UNLOAD); 377167514Skmacy 378167514Skmacy return (status); 379167514Skmacy} 380167514Skmacy 381167514Skmacystatic int 382167514Skmacycxgb_controller_attach(device_t dev) 383167514Skmacy{ 384167514Skmacy device_t child; 385167514Skmacy const struct adapter_info *ai; 386167514Skmacy struct adapter *sc; 387169978Skmacy int i, reg, msi_needed, error = 0; 388167514Skmacy uint32_t vers; 389167760Skmacy int port_qsets = 1; 390170869Skmacy 391167514Skmacy sc = device_get_softc(dev); 392167514Skmacy sc->dev = dev; 393169978Skmacy sc->msi_count = 0; 394169978Skmacy 395167840Skmacy /* find the PCIe link width and set max read request to 4KB*/ 396167840Skmacy if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 397167840Skmacy uint16_t lnk, pectl; 398167840Skmacy lnk = pci_read_config(dev, reg + 0x12, 2); 399167840Skmacy sc->link_width = (lnk >> 4) & 0x3f; 400167840Skmacy 401167840Skmacy pectl = pci_read_config(dev, reg + 0x8, 2); 402167840Skmacy pectl = (pectl & ~0x7000) | (5 << 12); 403167840Skmacy pci_write_config(dev, reg + 0x8, pectl, 2); 404167840Skmacy } 405171471Skmacy 406171471Skmacy ai = cxgb_get_adapter_info(dev); 407171471Skmacy if (sc->link_width != 0 && sc->link_width <= 4 && 408171471Skmacy (ai->nports0 + ai->nports1) <= 2) { 409167840Skmacy device_printf(sc->dev, 410167862Skmacy "PCIe x%d Link, expect reduced performance\n", 411167840Skmacy sc->link_width); 412167840Skmacy } 413167840Skmacy 414167514Skmacy pci_enable_busmaster(dev); 415167514Skmacy /* 416167514Skmacy * Allocate the registers and make them available to the driver. 417167514Skmacy * The registers that we care about for NIC mode are in BAR 0 418167514Skmacy */ 419167514Skmacy sc->regs_rid = PCIR_BAR(0); 420167514Skmacy if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 421167514Skmacy &sc->regs_rid, RF_ACTIVE)) == NULL) { 422167514Skmacy device_printf(dev, "Cannot allocate BAR\n"); 423167514Skmacy return (ENXIO); 424167514Skmacy } 425167514Skmacy 426170869Skmacy snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", 427170869Skmacy device_get_unit(dev)); 428170869Skmacy ADAPTER_LOCK_INIT(sc, sc->lockbuf); 429170869Skmacy 430170869Skmacy snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", 431170869Skmacy device_get_unit(dev)); 432170869Skmacy snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", 433170869Skmacy device_get_unit(dev)); 434170869Skmacy snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", 435170869Skmacy device_get_unit(dev)); 436167514Skmacy 437170869Skmacy MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_DEF); 438170869Skmacy MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); 439170869Skmacy MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); 440170869Skmacy 441167514Skmacy sc->bt = rman_get_bustag(sc->regs_res); 442167514Skmacy sc->bh = rman_get_bushandle(sc->regs_res); 443167514Skmacy sc->mmio_len = rman_get_size(sc->regs_res); 444167769Skmacy 445167769Skmacy if (t3_prep_adapter(sc, ai, 1) < 0) { 446170654Skmacy printf("prep adapter failed\n"); 447167769Skmacy error = ENODEV; 448167769Skmacy goto out; 449167769Skmacy } 450167514Skmacy /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate 451167514Skmacy * enough messages for the queue sets. If that fails, try falling 452167514Skmacy * back to MSI. If that fails, then try falling back to the legacy 453167514Skmacy * interrupt pin model. 454167514Skmacy */ 455167514Skmacy#ifdef MSI_SUPPORTED 456167760Skmacy 457167514Skmacy sc->msix_regs_rid = 0x20; 458167514Skmacy if ((msi_allowed >= 2) && 459167514Skmacy (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 460167514Skmacy &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { 461167514Skmacy 462169978Skmacy msi_needed = sc->msi_count = SGE_MSIX_COUNT; 463167760Skmacy 464169978Skmacy if (((error = pci_alloc_msix(dev, &sc->msi_count)) != 0) || 465169978Skmacy (sc->msi_count != msi_needed)) { 466169978Skmacy device_printf(dev, "msix allocation failed - msi_count = %d" 467169978Skmacy " msi_needed=%d will try msi err=%d\n", sc->msi_count, 468169978Skmacy msi_needed, error); 469169978Skmacy sc->msi_count = 0; 470167514Skmacy pci_release_msi(dev); 471167514Skmacy bus_release_resource(dev, SYS_RES_MEMORY, 472167514Skmacy sc->msix_regs_rid, sc->msix_regs_res); 473167514Skmacy sc->msix_regs_res = NULL; 474167514Skmacy } else { 475167514Skmacy sc->flags |= USING_MSIX; 476170081Skmacy sc->cxgb_intr = t3_intr_msix; 477167514Skmacy } 478167514Skmacy } 479167514Skmacy 480169978Skmacy if ((msi_allowed >= 1) && (sc->msi_count == 0)) { 481169978Skmacy sc->msi_count = 1; 482169978Skmacy if (pci_alloc_msi(dev, &sc->msi_count)) { 483167760Skmacy device_printf(dev, "alloc msi failed - will try INTx\n"); 484169978Skmacy sc->msi_count = 0; 485167514Skmacy pci_release_msi(dev); 486167514Skmacy } else { 487167514Skmacy sc->flags |= USING_MSI; 488167514Skmacy sc->irq_rid = 1; 489170081Skmacy sc->cxgb_intr = t3_intr_msi; 490167514Skmacy } 491167514Skmacy } 492167514Skmacy#endif 493169978Skmacy if (sc->msi_count == 0) { 494167760Skmacy device_printf(dev, "using line interrupts\n"); 495167514Skmacy sc->irq_rid = 0; 496170081Skmacy sc->cxgb_intr = t3b_intr; 497167514Skmacy } 498167514Skmacy 499167514Skmacy 500167514Skmacy /* Create a private taskqueue thread for handling driver events */ 501167514Skmacy#ifdef TASKQUEUE_CURRENT 502167514Skmacy sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, 503167514Skmacy taskqueue_thread_enqueue, &sc->tq); 504167514Skmacy#else 505167514Skmacy sc->tq = taskqueue_create_fast("cxgb_taskq", M_NOWAIT, 506167514Skmacy taskqueue_thread_enqueue, &sc->tq); 507167514Skmacy#endif 508167514Skmacy if (sc->tq == NULL) { 509167514Skmacy device_printf(dev, "failed to allocate controller task queue\n"); 510167514Skmacy goto out; 511167514Skmacy } 512167514Skmacy 513167514Skmacy taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 514167514Skmacy device_get_nameunit(dev)); 515167514Skmacy TASK_INIT(&sc->ext_intr_task, 0, cxgb_ext_intr_handler, sc); 516170869Skmacy TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); 517167514Skmacy 518167514Skmacy 519167514Skmacy /* Create a periodic callout for checking adapter status */ 520170869Skmacy callout_init(&sc->cxgb_tick_ch, TRUE); 521167514Skmacy 522167514Skmacy if (t3_check_fw_version(sc) != 0) { 523167514Skmacy /* 524167514Skmacy * Warn user that a firmware update will be attempted in init. 525167514Skmacy */ 526169978Skmacy device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n", 527169978Skmacy FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); 528167514Skmacy sc->flags &= ~FW_UPTODATE; 529167514Skmacy } else { 530167514Skmacy sc->flags |= FW_UPTODATE; 531167514Skmacy } 532171471Skmacy 533171471Skmacy if (t3_check_tpsram_version(sc) != 0) { 534171471Skmacy /* 535171471Skmacy * Warn user that a firmware update will be attempted in init. 536171471Skmacy */ 537171471Skmacy device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", 538171471Skmacy t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 539171471Skmacy sc->flags &= ~TPS_UPTODATE; 540171471Skmacy } else { 541171471Skmacy sc->flags |= TPS_UPTODATE; 542171471Skmacy } 543167514Skmacy 544169978Skmacy if ((sc->flags & USING_MSIX) && !singleq) 545167760Skmacy port_qsets = min((SGE_QSETS/(sc)->params.nports), mp_ncpus); 546167760Skmacy 547167514Skmacy /* 548167514Skmacy * Create a child device for each MAC. The ethernet attachment 549167514Skmacy * will be done in these children. 550167760Skmacy */ 551167760Skmacy for (i = 0; i < (sc)->params.nports; i++) { 552167514Skmacy if ((child = device_add_child(dev, "cxgb", -1)) == NULL) { 553167514Skmacy device_printf(dev, "failed to add child port\n"); 554167514Skmacy error = EINVAL; 555167514Skmacy goto out; 556167514Skmacy } 557167514Skmacy sc->port[i].adapter = sc; 558167760Skmacy sc->port[i].nqsets = port_qsets; 559167760Skmacy sc->port[i].first_qset = i*port_qsets; 560167514Skmacy sc->port[i].port = i; 561171471Skmacy sc->portdev[i] = child; 562167514Skmacy device_set_softc(child, &sc->port[i]); 563167514Skmacy } 564167514Skmacy if ((error = bus_generic_attach(dev)) != 0) 565167514Skmacy goto out; 566167514Skmacy 567169978Skmacy /* 568169978Skmacy * XXX need to poll for link status 569169978Skmacy */ 570167514Skmacy sc->params.stats_update_period = 1; 571167514Skmacy 572167514Skmacy /* initialize sge private state */ 573170654Skmacy t3_sge_init_adapter(sc); 574167514Skmacy 575167514Skmacy t3_led_ready(sc); 576169978Skmacy 577169978Skmacy cxgb_offload_init(); 578169978Skmacy if (is_offload(sc)) { 579169978Skmacy setbit(&sc->registered_device_map, OFFLOAD_DEVMAP_BIT); 580169978Skmacy cxgb_adapter_ofld(sc); 581169978Skmacy } 582167514Skmacy error = t3_get_fw_version(sc, &vers); 583167514Skmacy if (error) 584167514Skmacy goto out; 585167514Skmacy 586169978Skmacy snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", 587169978Skmacy G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), 588169978Skmacy G_FW_VERSION_MICRO(vers)); 589169978Skmacy 590167514Skmacy t3_add_sysctls(sc); 591167514Skmacyout: 592167514Skmacy if (error) 593167514Skmacy cxgb_free(sc); 594167514Skmacy 595167514Skmacy return (error); 596167514Skmacy} 597167514Skmacy 598167514Skmacystatic int 599167514Skmacycxgb_controller_detach(device_t dev) 600167514Skmacy{ 601167514Skmacy struct adapter *sc; 602167514Skmacy 603167514Skmacy sc = device_get_softc(dev); 604167514Skmacy 605167514Skmacy cxgb_free(sc); 606167514Skmacy 607167514Skmacy return (0); 608167514Skmacy} 609167514Skmacy 610167514Skmacystatic void 611167514Skmacycxgb_free(struct adapter *sc) 612167514Skmacy{ 613167514Skmacy int i; 614167514Skmacy 615170869Skmacy ADAPTER_LOCK(sc); 616170869Skmacy /* 617170869Skmacy * drops the lock 618170869Skmacy */ 619170869Skmacy cxgb_down_locked(sc); 620169978Skmacy 621169978Skmacy#ifdef MSI_SUPPORTED 622169978Skmacy if (sc->flags & (USING_MSI | USING_MSIX)) { 623169978Skmacy device_printf(sc->dev, "releasing msi message(s)\n"); 624169978Skmacy pci_release_msi(sc->dev); 625169978Skmacy } else { 626169978Skmacy device_printf(sc->dev, "no msi message to release\n"); 627169978Skmacy } 628169978Skmacy#endif 629169978Skmacy if (sc->msix_regs_res != NULL) { 630169978Skmacy bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, 631169978Skmacy sc->msix_regs_res); 632169978Skmacy } 633169978Skmacy 634167514Skmacy t3_sge_deinit_sw(sc); 635167514Skmacy 636167514Skmacy if (sc->tq != NULL) { 637167514Skmacy taskqueue_drain(sc->tq, &sc->ext_intr_task); 638170869Skmacy taskqueue_drain(sc->tq, &sc->tick_task); 639167514Skmacy taskqueue_free(sc->tq); 640167514Skmacy } 641170869Skmacy 642170869Skmacy tsleep(&sc, 0, "cxgb unload", hz); 643167514Skmacy 644167760Skmacy for (i = 0; i < (sc)->params.nports; ++i) { 645167760Skmacy if (sc->portdev[i] != NULL) 646167760Skmacy device_delete_child(sc->dev, sc->portdev[i]); 647167760Skmacy } 648167760Skmacy 649167514Skmacy bus_generic_detach(sc->dev); 650170654Skmacy#ifdef notyet 651169978Skmacy if (is_offload(sc)) { 652169978Skmacy cxgb_adapter_unofld(sc); 653169978Skmacy if (isset(&sc->open_device_map, OFFLOAD_DEVMAP_BIT)) 654169978Skmacy offload_close(&sc->tdev); 655169978Skmacy } 656170654Skmacy#endif 657167514Skmacy t3_free_sge_resources(sc); 658171471Skmacy free(sc->filters, M_DEVBUF); 659167514Skmacy t3_sge_free(sc); 660170869Skmacy 661170869Skmacy cxgb_offload_exit(); 662170869Skmacy 663167514Skmacy if (sc->regs_res != NULL) 664167514Skmacy bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, 665167514Skmacy sc->regs_res); 666167514Skmacy 667170869Skmacy MTX_DESTROY(&sc->mdio_lock); 668170869Skmacy MTX_DESTROY(&sc->sge.reg_lock); 669170869Skmacy MTX_DESTROY(&sc->elmer_lock); 670170869Skmacy ADAPTER_LOCK_DEINIT(sc); 671167514Skmacy 672167514Skmacy return; 673167514Skmacy} 674167514Skmacy 675171471Skmacy 676171471Skmacy 677171471Skmacystatic int 678171471Skmacyalloc_filters(struct adapter *adap) 679171471Skmacy{ 680171471Skmacy struct filter_info *p; 681171471Skmacy int nfilters; 682171471Skmacy 683171471Skmacy if ((nfilters = adap->params.mc5.nfilters) == 0) 684171471Skmacy return (0); 685171471Skmacy 686171471Skmacy adap->filters = malloc(nfilters*sizeof(struct filter_info), 687171471Skmacy M_DEVBUF, M_ZERO|M_WAITOK); 688171471Skmacy 689171471Skmacy if (adap->filters == NULL) 690171471Skmacy return (ENOMEM); 691171471Skmacy 692171471Skmacy /* Set the default filters, only need to set non-0 fields here. */ 693171471Skmacy p = &adap->filters[nfilters - 1]; 694171471Skmacy p->vlan = 0xfff; 695171471Skmacy p->vlan_prio = FILTER_NO_VLAN_PRI; 696171471Skmacy p->pass = p->rss = p->valid = p->locked = 1; 697171471Skmacy 698171471Skmacy return (0); 699171471Skmacy} 700171471Skmacy 701171471Skmacystatic inline void 702171471Skmacyset_tcb_field_ulp(struct cpl_set_tcb_field *req, 703171471Skmacy unsigned int tid, unsigned int word, 704171471Skmacy uint64_t mask, uint64_t val) 705171471Skmacy{ 706171471Skmacy struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; 707171471Skmacy 708171471Skmacy txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 709171471Skmacy txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); 710171471Skmacy OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 711171471Skmacy req->reply = V_NO_REPLY(1); 712171471Skmacy req->cpu_idx = 0; 713171471Skmacy req->word = htons(word); 714171471Skmacy req->mask = htobe64(mask); 715171471Skmacy req->val = htobe64(val); 716171471Skmacy} 717171471Skmacy 718171471Skmacystatic int 719171471Skmacyset_filter(struct adapter *adap, int id, const struct filter_info *f) 720171471Skmacy{ 721171471Skmacy int len; 722171471Skmacy struct mbuf *m; 723171471Skmacy struct ulp_txpkt *txpkt; 724171471Skmacy struct work_request_hdr *wr; 725171471Skmacy struct cpl_pass_open_req *oreq; 726171471Skmacy struct cpl_set_tcb_field *sreq; 727171471Skmacy 728171471Skmacy len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq); 729171471Skmacy id += t3_mc5_size(&adap->mc5) - adap->params.mc5.nroutes - 730171471Skmacy adap->params.mc5.nfilters; 731171471Skmacy 732171471Skmacy m = m_gethdr(M_TRYWAIT, MT_DATA); 733171471Skmacy wr = mtod(m, struct work_request_hdr *); 734171471Skmacy wr->wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); 735171471Skmacy m->m_len = m->m_pkthdr.len = len; 736171471Skmacy 737171471Skmacy oreq = (struct cpl_pass_open_req *)(wr + 1); 738171471Skmacy txpkt = (struct ulp_txpkt *)oreq; 739171471Skmacy txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 740171471Skmacy txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); 741171471Skmacy OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id)); 742171471Skmacy oreq->local_port = htons(f->dport); 743171471Skmacy oreq->peer_port = htons(f->sport); 744171471Skmacy oreq->local_ip = htonl(f->dip); 745171471Skmacy oreq->peer_ip = htonl(f->sip); 746171471Skmacy oreq->peer_netmask = htonl(f->sip_mask); 747171471Skmacy oreq->opt0h = 0; 748171471Skmacy oreq->opt0l = htonl(F_NO_OFFLOAD); 749171471Skmacy oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | 750171471Skmacy V_CONN_POLICY(CPL_CONN_POLICY_FILTER) | 751171471Skmacy V_VLAN_PRI(f->vlan_prio >> 1) | 752171471Skmacy V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | 753171471Skmacy V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | 754171471Skmacy V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); 755171471Skmacy 756171471Skmacy sreq = (struct cpl_set_tcb_field *)(oreq + 1); 757171471Skmacy set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL, 758171471Skmacy (f->report_filter_id << 15) | (1 << 23) | 759171471Skmacy ((u64)f->pass << 35) | ((u64)!f->rss << 36)); 760171471Skmacy set_tcb_field_ulp(sreq + 1, id, 25, 0x3f80000, 761171471Skmacy (u64)adap->rrss_map[f->qset] << 19); 762171471Skmacy t3_mgmt_tx(adap, m); 763171471Skmacy return 0; 764171471Skmacy} 765171471Skmacy 766171471Skmacystatic int 767171471Skmacysetup_hw_filters(struct adapter *adap) 768171471Skmacy{ 769171471Skmacy int i, err; 770171471Skmacy 771171471Skmacy if (adap->filters == NULL) 772171471Skmacy return 0; 773171471Skmacy 774171471Skmacy t3_enable_filters(adap); 775171471Skmacy 776171471Skmacy for (i = err = 0; i < adap->params.mc5.nfilters && !err; i++) 777171471Skmacy if (adap->filters[i].locked) 778171471Skmacy err = set_filter(adap, i, &adap->filters[i]); 779171471Skmacy return err; 780171471Skmacy} 781171471Skmacy 782167514Skmacy/** 783167514Skmacy * setup_sge_qsets - configure SGE Tx/Rx/response queues 784167514Skmacy * @sc: the controller softc 785167514Skmacy * 786167514Skmacy * Determines how many sets of SGE queues to use and initializes them. 787167514Skmacy * We support multiple queue sets per port if we have MSI-X, otherwise 788167514Skmacy * just one queue set per port. 789167514Skmacy */ 790167514Skmacystatic int 791167514Skmacysetup_sge_qsets(adapter_t *sc) 792167514Skmacy{ 793167514Skmacy int i, j, err, irq_idx, qset_idx; 794169978Skmacy u_int ntxq = SGE_TXQ_PER_SET; 795167514Skmacy 796167514Skmacy if ((err = t3_sge_alloc(sc)) != 0) { 797167760Skmacy device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); 798167514Skmacy return (err); 799167514Skmacy } 800167514Skmacy 801167514Skmacy if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) 802167514Skmacy irq_idx = -1; 803167514Skmacy else 804167514Skmacy irq_idx = 0; 805167514Skmacy 806167514Skmacy for (qset_idx = 0, i = 0; i < (sc)->params.nports; ++i) { 807167514Skmacy struct port_info *pi = &sc->port[i]; 808167514Skmacy 809167514Skmacy for (j = 0; j < pi->nqsets; ++j, ++qset_idx) { 810167760Skmacy err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, 811167514Skmacy (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, 812167514Skmacy &sc->params.sge.qset[qset_idx], ntxq, pi); 813167514Skmacy if (err) { 814167514Skmacy t3_free_sge_resources(sc); 815167760Skmacy device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n", err); 816167514Skmacy return (err); 817167514Skmacy } 818167514Skmacy } 819167514Skmacy } 820167514Skmacy 821167514Skmacy return (0); 822167514Skmacy} 823167514Skmacy 824170654Skmacystatic void 825170654Skmacycxgb_teardown_msix(adapter_t *sc) 826170654Skmacy{ 827170654Skmacy int i, nqsets; 828170654Skmacy 829170654Skmacy for (nqsets = i = 0; i < (sc)->params.nports; i++) 830170654Skmacy nqsets += sc->port[i].nqsets; 831170654Skmacy 832170654Skmacy for (i = 0; i < nqsets; i++) { 833170654Skmacy if (sc->msix_intr_tag[i] != NULL) { 834170654Skmacy bus_teardown_intr(sc->dev, sc->msix_irq_res[i], 835170654Skmacy sc->msix_intr_tag[i]); 836170654Skmacy sc->msix_intr_tag[i] = NULL; 837170654Skmacy } 838170654Skmacy if (sc->msix_irq_res[i] != NULL) { 839170654Skmacy bus_release_resource(sc->dev, SYS_RES_IRQ, 840170654Skmacy sc->msix_irq_rid[i], sc->msix_irq_res[i]); 841170654Skmacy sc->msix_irq_res[i] = NULL; 842170654Skmacy } 843170654Skmacy } 844170654Skmacy} 845170654Skmacy 846167514Skmacystatic int 847167514Skmacycxgb_setup_msix(adapter_t *sc, int msix_count) 848167514Skmacy{ 849167514Skmacy int i, j, k, nqsets, rid; 850167514Skmacy 851167514Skmacy /* The first message indicates link changes and error conditions */ 852167514Skmacy sc->irq_rid = 1; 853167514Skmacy if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 854167514Skmacy &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { 855167514Skmacy device_printf(sc->dev, "Cannot allocate msix interrupt\n"); 856167514Skmacy return (EINVAL); 857167514Skmacy } 858167760Skmacy 859167514Skmacy if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET, 860167514Skmacy#ifdef INTR_FILTERS 861167514Skmacy NULL, 862167514Skmacy#endif 863167514Skmacy cxgb_async_intr, sc, &sc->intr_tag)) { 864167514Skmacy device_printf(sc->dev, "Cannot set up interrupt\n"); 865167514Skmacy return (EINVAL); 866167514Skmacy } 867170654Skmacy for (i = k = 0; i < (sc)->params.nports; i++) { 868167514Skmacy nqsets = sc->port[i].nqsets; 869170654Skmacy for (j = 0; j < nqsets; j++, k++) { 870167514Skmacy struct sge_qset *qs = &sc->sge.qs[k]; 871167514Skmacy 872167514Skmacy rid = k + 2; 873167514Skmacy if (cxgb_debug) 874167514Skmacy printf("rid=%d ", rid); 875167514Skmacy if ((sc->msix_irq_res[k] = bus_alloc_resource_any( 876167514Skmacy sc->dev, SYS_RES_IRQ, &rid, 877167514Skmacy RF_SHAREABLE | RF_ACTIVE)) == NULL) { 878167514Skmacy device_printf(sc->dev, "Cannot allocate " 879167514Skmacy "interrupt for message %d\n", rid); 880167514Skmacy return (EINVAL); 881167514Skmacy } 882167514Skmacy sc->msix_irq_rid[k] = rid; 883170654Skmacy if (bus_setup_intr(sc->dev, sc->msix_irq_res[k], 884167514Skmacy INTR_MPSAFE|INTR_TYPE_NET, 885167514Skmacy#ifdef INTR_FILTERS 886167514Skmacy NULL, 887167514Skmacy#endif 888167514Skmacy t3_intr_msix, qs, &sc->msix_intr_tag[k])) { 889167514Skmacy device_printf(sc->dev, "Cannot set up " 890167514Skmacy "interrupt for message %d\n", rid); 891167514Skmacy return (EINVAL); 892167514Skmacy } 893167514Skmacy } 894167514Skmacy } 895167760Skmacy 896167760Skmacy 897167514Skmacy return (0); 898167514Skmacy} 899167514Skmacy 900167514Skmacystatic int 901167514Skmacycxgb_port_probe(device_t dev) 902167514Skmacy{ 903167514Skmacy struct port_info *p; 904167514Skmacy char buf[80]; 905167514Skmacy 906167514Skmacy p = device_get_softc(dev); 907167514Skmacy 908167514Skmacy snprintf(buf, sizeof(buf), "Port %d %s", p->port, p->port_type->desc); 909167514Skmacy device_set_desc_copy(dev, buf); 910167514Skmacy return (0); 911167514Skmacy} 912167514Skmacy 913167514Skmacy 914167514Skmacystatic int 915167514Skmacycxgb_makedev(struct port_info *pi) 916167514Skmacy{ 917167514Skmacy 918170654Skmacy pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit, 919170654Skmacy UID_ROOT, GID_WHEEL, 0600, if_name(pi->ifp)); 920167514Skmacy 921167514Skmacy if (pi->port_cdev == NULL) 922167514Skmacy return (ENOMEM); 923167514Skmacy 924167514Skmacy pi->port_cdev->si_drv1 = (void *)pi; 925167514Skmacy 926167514Skmacy return (0); 927167514Skmacy} 928167514Skmacy 929167514Skmacy 930167514Skmacy#ifdef TSO_SUPPORTED 931167514Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU) 932167514Skmacy/* Don't enable TSO6 yet */ 933167514Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO4 | IFCAP_JUMBO_MTU) 934167514Skmacy#else 935167514Skmacy#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU) 936167514Skmacy/* Don't enable TSO6 yet */ 937167514Skmacy#define CXGB_CAP_ENABLE (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_JUMBO_MTU) 938167514Skmacy#define IFCAP_TSO4 0x0 939167514Skmacy#define CSUM_TSO 0x0 940167514Skmacy#endif 941167514Skmacy 942167514Skmacy 943167514Skmacystatic int 944167514Skmacycxgb_port_attach(device_t dev) 945167514Skmacy{ 946167514Skmacy struct port_info *p; 947167514Skmacy struct ifnet *ifp; 948170654Skmacy int err, media_flags; 949167514Skmacy 950167514Skmacy p = device_get_softc(dev); 951167514Skmacy 952170869Skmacy snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", 953170869Skmacy device_get_unit(device_get_parent(dev)), p->port); 954170869Skmacy PORT_LOCK_INIT(p, p->lockbuf); 955167514Skmacy 956167514Skmacy /* Allocate an ifnet object and set it up */ 957167514Skmacy ifp = p->ifp = if_alloc(IFT_ETHER); 958167514Skmacy if (ifp == NULL) { 959167514Skmacy device_printf(dev, "Cannot allocate ifnet\n"); 960167514Skmacy return (ENOMEM); 961167514Skmacy } 962167514Skmacy 963167514Skmacy /* 964167514Skmacy * Note that there is currently no watchdog timer. 965167514Skmacy */ 966167514Skmacy if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 967167514Skmacy ifp->if_init = cxgb_init; 968167514Skmacy ifp->if_softc = p; 969167514Skmacy ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 970167514Skmacy ifp->if_ioctl = cxgb_ioctl; 971167514Skmacy ifp->if_start = cxgb_start; 972167514Skmacy ifp->if_timer = 0; /* Disable ifnet watchdog */ 973167514Skmacy ifp->if_watchdog = NULL; 974167514Skmacy 975167514Skmacy ifp->if_snd.ifq_drv_maxlen = TX_ETH_Q_SIZE; 976167514Skmacy IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 977167514Skmacy IFQ_SET_READY(&ifp->if_snd); 978167514Skmacy 979167514Skmacy ifp->if_hwassist = ifp->if_capabilities = ifp->if_capenable = 0; 980167514Skmacy ifp->if_capabilities |= CXGB_CAP; 981167514Skmacy ifp->if_capenable |= CXGB_CAP_ENABLE; 982167514Skmacy ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO); 983171471Skmacy /* 984171471Skmacy * disable TSO on 4-port - it isn't supported by the firmware yet 985171471Skmacy */ 986171471Skmacy if (p->adapter->params.nports > 2) { 987171471Skmacy ifp->if_capabilities &= ~(IFCAP_TSO4 | IFCAP_TSO6); 988171471Skmacy ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_TSO6); 989171471Skmacy ifp->if_hwassist &= ~CSUM_TSO; 990171471Skmacy } 991171471Skmacy 992167514Skmacy ether_ifattach(ifp, p->hw_addr); 993171471Skmacy /* 994171471Skmacy * Only default to jumbo frames on 10GigE 995171471Skmacy */ 996171471Skmacy if (p->adapter->params.nports <= 2) 997171471Skmacy ifp->if_mtu = 9000; 998167514Skmacy if ((err = cxgb_makedev(p)) != 0) { 999167514Skmacy printf("makedev failed %d\n", err); 1000167514Skmacy return (err); 1001167514Skmacy } 1002167514Skmacy ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, 1003167514Skmacy cxgb_media_status); 1004170654Skmacy 1005170654Skmacy if (!strcmp(p->port_type->desc, "10GBASE-CX4")) { 1006170654Skmacy media_flags = IFM_ETHER | IFM_10G_CX4 | IFM_FDX; 1007170654Skmacy } else if (!strcmp(p->port_type->desc, "10GBASE-SR")) { 1008170654Skmacy media_flags = IFM_ETHER | IFM_10G_SR | IFM_FDX; 1009170654Skmacy } else if (!strcmp(p->port_type->desc, "10GBASE-XR")) { 1010170654Skmacy media_flags = IFM_ETHER | IFM_10G_LR | IFM_FDX; 1011170654Skmacy } else if (!strcmp(p->port_type->desc, "10/100/1000BASE-T")) { 1012170654Skmacy ifmedia_add(&p->media, IFM_ETHER | IFM_10_T, 0, NULL); 1013170654Skmacy ifmedia_add(&p->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1014170654Skmacy 0, NULL); 1015170654Skmacy ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX, 1016170654Skmacy 0, NULL); 1017170654Skmacy ifmedia_add(&p->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1018170654Skmacy 0, NULL); 1019170654Skmacy ifmedia_add(&p->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 1020170654Skmacy 0, NULL); 1021170654Skmacy media_flags = 0; 1022170654Skmacy } else { 1023167514Skmacy printf("unsupported media type %s\n", p->port_type->desc); 1024167514Skmacy return (ENXIO); 1025167514Skmacy } 1026170654Skmacy if (media_flags) { 1027170654Skmacy ifmedia_add(&p->media, media_flags, 0, NULL); 1028170654Skmacy ifmedia_set(&p->media, media_flags); 1029170654Skmacy } else { 1030170654Skmacy ifmedia_add(&p->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1031170654Skmacy ifmedia_set(&p->media, IFM_ETHER | IFM_AUTO); 1032170654Skmacy } 1033167514Skmacy 1034170654Skmacy 1035170869Skmacy snprintf(p->taskqbuf, TASKQ_NAME_LEN, "cxgb_port_taskq%d", p->port); 1036167514Skmacy#ifdef TASKQUEUE_CURRENT 1037167514Skmacy /* Create a port for handling TX without starvation */ 1038170869Skmacy p->tq = taskqueue_create(p->taskqbuf, M_NOWAIT, 1039167514Skmacy taskqueue_thread_enqueue, &p->tq); 1040167514Skmacy#else 1041167514Skmacy /* Create a port for handling TX without starvation */ 1042167514Skmacy p->tq = taskqueue_create_fast(buf, M_NOWAIT, 1043167514Skmacy taskqueue_thread_enqueue, &p->tq); 1044167514Skmacy#endif 1045170654Skmacy 1046167514Skmacy if (p->tq == NULL) { 1047167514Skmacy device_printf(dev, "failed to allocate port task queue\n"); 1048167514Skmacy return (ENOMEM); 1049167514Skmacy } 1050167514Skmacy taskqueue_start_threads(&p->tq, 1, PI_NET, "%s taskq", 1051167514Skmacy device_get_nameunit(dev)); 1052167514Skmacy TASK_INIT(&p->start_task, 0, cxgb_start_proc, ifp); 1053167514Skmacy 1054170654Skmacy t3_sge_init_port(p); 1055170654Skmacy 1056167514Skmacy return (0); 1057167514Skmacy} 1058167514Skmacy 1059167514Skmacystatic int 1060167514Skmacycxgb_port_detach(device_t dev) 1061167514Skmacy{ 1062167514Skmacy struct port_info *p; 1063167514Skmacy 1064167514Skmacy p = device_get_softc(dev); 1065169978Skmacy 1066169978Skmacy PORT_LOCK(p); 1067170654Skmacy if (p->ifp->if_drv_flags & IFF_DRV_RUNNING) 1068170654Skmacy cxgb_stop_locked(p); 1069169978Skmacy PORT_UNLOCK(p); 1070169978Skmacy 1071167514Skmacy if (p->tq != NULL) { 1072167514Skmacy taskqueue_drain(p->tq, &p->start_task); 1073167514Skmacy taskqueue_free(p->tq); 1074167514Skmacy p->tq = NULL; 1075167514Skmacy } 1076170869Skmacy 1077170869Skmacy PORT_LOCK_DEINIT(p); 1078167514Skmacy ether_ifdetach(p->ifp); 1079167514Skmacy if_free(p->ifp); 1080167514Skmacy 1081170654Skmacy if (p->port_cdev != NULL) 1082170654Skmacy destroy_dev(p->port_cdev); 1083170654Skmacy 1084167514Skmacy return (0); 1085167514Skmacy} 1086167514Skmacy 1087167514Skmacyvoid 1088167514Skmacyt3_fatal_err(struct adapter *sc) 1089167514Skmacy{ 1090167514Skmacy u_int fw_status[4]; 1091167514Skmacy 1092167514Skmacy device_printf(sc->dev,"encountered fatal error, operation suspended\n"); 1093167514Skmacy if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status)) 1094167514Skmacy device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", 1095167514Skmacy fw_status[0], fw_status[1], fw_status[2], fw_status[3]); 1096167514Skmacy} 1097167514Skmacy 1098167514Skmacyint 1099167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap) 1100167514Skmacy{ 1101167514Skmacy device_t dev; 1102167514Skmacy struct pci_devinfo *dinfo; 1103167514Skmacy pcicfgregs *cfg; 1104167514Skmacy uint32_t status; 1105167514Skmacy uint8_t ptr; 1106167514Skmacy 1107167514Skmacy dev = sc->dev; 1108167514Skmacy dinfo = device_get_ivars(dev); 1109167514Skmacy cfg = &dinfo->cfg; 1110167514Skmacy 1111167514Skmacy status = pci_read_config(dev, PCIR_STATUS, 2); 1112167514Skmacy if (!(status & PCIM_STATUS_CAPPRESENT)) 1113167514Skmacy return (0); 1114167514Skmacy 1115167514Skmacy switch (cfg->hdrtype & PCIM_HDRTYPE) { 1116167514Skmacy case 0: 1117167514Skmacy case 1: 1118167514Skmacy ptr = PCIR_CAP_PTR; 1119167514Skmacy break; 1120167514Skmacy case 2: 1121167514Skmacy ptr = PCIR_CAP_PTR_2; 1122167514Skmacy break; 1123167514Skmacy default: 1124167514Skmacy return (0); 1125167514Skmacy break; 1126167514Skmacy } 1127167514Skmacy ptr = pci_read_config(dev, ptr, 1); 1128167514Skmacy 1129167514Skmacy while (ptr != 0) { 1130167514Skmacy if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap) 1131167514Skmacy return (ptr); 1132167514Skmacy ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); 1133167514Skmacy } 1134167514Skmacy 1135167514Skmacy return (0); 1136167514Skmacy} 1137167514Skmacy 1138167514Skmacyint 1139167514Skmacyt3_os_pci_save_state(struct adapter *sc) 1140167514Skmacy{ 1141167514Skmacy device_t dev; 1142167514Skmacy struct pci_devinfo *dinfo; 1143167514Skmacy 1144167514Skmacy dev = sc->dev; 1145167514Skmacy dinfo = device_get_ivars(dev); 1146167514Skmacy 1147167514Skmacy pci_cfg_save(dev, dinfo, 0); 1148167514Skmacy return (0); 1149167514Skmacy} 1150167514Skmacy 1151167514Skmacyint 1152167514Skmacyt3_os_pci_restore_state(struct adapter *sc) 1153167514Skmacy{ 1154167514Skmacy device_t dev; 1155167514Skmacy struct pci_devinfo *dinfo; 1156167514Skmacy 1157167514Skmacy dev = sc->dev; 1158167514Skmacy dinfo = device_get_ivars(dev); 1159167514Skmacy 1160167514Skmacy pci_cfg_restore(dev, dinfo); 1161167514Skmacy return (0); 1162167514Skmacy} 1163167514Skmacy 1164167514Skmacy/** 1165167514Skmacy * t3_os_link_changed - handle link status changes 1166167514Skmacy * @adapter: the adapter associated with the link change 1167167514Skmacy * @port_id: the port index whose limk status has changed 1168167514Skmacy * @link_stat: the new status of the link 1169167514Skmacy * @speed: the new speed setting 1170167514Skmacy * @duplex: the new duplex setting 1171167514Skmacy * @fc: the new flow-control setting 1172167514Skmacy * 1173167514Skmacy * This is the OS-dependent handler for link status changes. The OS 1174167514Skmacy * neutral handler takes care of most of the processing for these events, 1175167514Skmacy * then calls this handler for any OS-specific processing. 1176167514Skmacy */ 1177167514Skmacyvoid 1178167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, 1179167514Skmacy int duplex, int fc) 1180167514Skmacy{ 1181167514Skmacy struct port_info *pi = &adapter->port[port_id]; 1182169978Skmacy struct cmac *mac = &adapter->port[port_id].mac; 1183167514Skmacy 1184167514Skmacy if ((pi->ifp->if_flags & IFF_UP) == 0) 1185167514Skmacy return; 1186169978Skmacy 1187169978Skmacy if (link_status) { 1188169978Skmacy t3_mac_enable(mac, MAC_DIRECTION_RX); 1189167514Skmacy if_link_state_change(pi->ifp, LINK_STATE_UP); 1190169978Skmacy } else { 1191167514Skmacy if_link_state_change(pi->ifp, LINK_STATE_DOWN); 1192169978Skmacy pi->phy.ops->power_down(&pi->phy, 1); 1193169978Skmacy t3_mac_disable(mac, MAC_DIRECTION_RX); 1194169978Skmacy t3_link_start(&pi->phy, mac, &pi->link_config); 1195169978Skmacy } 1196167514Skmacy} 1197167514Skmacy 1198167514Skmacy 1199167514Skmacy/* 1200167514Skmacy * Interrupt-context handler for external (PHY) interrupts. 1201167514Skmacy */ 1202167514Skmacyvoid 1203167514Skmacyt3_os_ext_intr_handler(adapter_t *sc) 1204167514Skmacy{ 1205167514Skmacy if (cxgb_debug) 1206167514Skmacy printf("t3_os_ext_intr_handler\n"); 1207167514Skmacy /* 1208167514Skmacy * Schedule a task to handle external interrupts as they may be slow 1209167514Skmacy * and we use a mutex to protect MDIO registers. We disable PHY 1210167514Skmacy * interrupts in the meantime and let the task reenable them when 1211167514Skmacy * it's done. 1212167514Skmacy */ 1213169978Skmacy ADAPTER_LOCK(sc); 1214167514Skmacy if (sc->slow_intr_mask) { 1215167514Skmacy sc->slow_intr_mask &= ~F_T3DBG; 1216167514Skmacy t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask); 1217167514Skmacy taskqueue_enqueue(sc->tq, &sc->ext_intr_task); 1218167514Skmacy } 1219169978Skmacy ADAPTER_UNLOCK(sc); 1220167514Skmacy} 1221167514Skmacy 1222167514Skmacyvoid 1223167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]) 1224167514Skmacy{ 1225167514Skmacy 1226167514Skmacy /* 1227167514Skmacy * The ifnet might not be allocated before this gets called, 1228167514Skmacy * as this is called early on in attach by t3_prep_adapter 1229167514Skmacy * save the address off in the port structure 1230167514Skmacy */ 1231167514Skmacy if (cxgb_debug) 1232167514Skmacy printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":"); 1233167514Skmacy bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); 1234167514Skmacy} 1235167514Skmacy 1236167514Skmacy/** 1237167514Skmacy * link_start - enable a port 1238167514Skmacy * @p: the port to enable 1239167514Skmacy * 1240167514Skmacy * Performs the MAC and PHY actions needed to enable a port. 1241167514Skmacy */ 1242167514Skmacystatic void 1243167514Skmacycxgb_link_start(struct port_info *p) 1244167514Skmacy{ 1245167514Skmacy struct ifnet *ifp; 1246167514Skmacy struct t3_rx_mode rm; 1247167514Skmacy struct cmac *mac = &p->mac; 1248167514Skmacy 1249167514Skmacy ifp = p->ifp; 1250167514Skmacy 1251167514Skmacy t3_init_rx_mode(&rm, p); 1252167514Skmacy t3_mac_reset(mac); 1253170654Skmacy t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 1254167514Skmacy t3_mac_set_address(mac, 0, p->hw_addr); 1255167514Skmacy t3_mac_set_rx_mode(mac, &rm); 1256167514Skmacy t3_link_start(&p->phy, mac, &p->link_config); 1257167514Skmacy t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 1258167514Skmacy} 1259167514Skmacy 1260167514Skmacy/** 1261167514Skmacy * setup_rss - configure Receive Side Steering (per-queue connection demux) 1262167514Skmacy * @adap: the adapter 1263167514Skmacy * 1264167514Skmacy * Sets up RSS to distribute packets to multiple receive queues. We 1265167514Skmacy * configure the RSS CPU lookup table to distribute to the number of HW 1266167514Skmacy * receive queues, and the response queue lookup table to narrow that 1267167514Skmacy * down to the response queues actually configured for each port. 1268167514Skmacy * We always configure the RSS mapping for two ports since the mapping 1269167514Skmacy * table has plenty of entries. 1270167514Skmacy */ 1271167514Skmacystatic void 1272167514Skmacysetup_rss(adapter_t *adap) 1273167514Skmacy{ 1274167514Skmacy int i; 1275171471Skmacy u_int nq[2]; 1276167514Skmacy uint8_t cpus[SGE_QSETS + 1]; 1277167514Skmacy uint16_t rspq_map[RSS_TABLE_SIZE]; 1278171471Skmacy 1279171471Skmacy nq[0] = adap->port[0].nqsets; 1280171471Skmacy nq[1] = max((u_int)adap->port[1].nqsets, 1U); 1281171471Skmacy 1282167514Skmacy for (i = 0; i < SGE_QSETS; ++i) 1283167514Skmacy cpus[i] = i; 1284167514Skmacy cpus[SGE_QSETS] = 0xff; 1285167514Skmacy 1286167514Skmacy for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { 1287171471Skmacy rspq_map[i] = nq[0] ? i % nq[0] : 0; 1288171471Skmacy rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0; 1289167514Skmacy } 1290171471Skmacy /* Calculate the reverse RSS map table */ 1291171471Skmacy for (i = 0; i < RSS_TABLE_SIZE; ++i) 1292171471Skmacy if (adap->rrss_map[rspq_map[i]] == 0xff) 1293171471Skmacy adap->rrss_map[rspq_map[i]] = i; 1294167514Skmacy 1295167514Skmacy t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | 1296171471Skmacy F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN | 1297171471Skmacy F_RRCPLMAPEN | V_RRCPLCPUSIZE(6), cpus, rspq_map); 1298171471Skmacy 1299167514Skmacy} 1300167514Skmacy 1301169978Skmacy/* 1302169978Skmacy * Sends an mbuf to an offload queue driver 1303169978Skmacy * after dealing with any active network taps. 1304169978Skmacy */ 1305169978Skmacystatic inline int 1306169978Skmacyoffload_tx(struct toedev *tdev, struct mbuf *m) 1307169978Skmacy{ 1308169978Skmacy int ret; 1309169978Skmacy 1310169978Skmacy critical_enter(); 1311169978Skmacy ret = t3_offload_tx(tdev, m); 1312169978Skmacy critical_exit(); 1313170654Skmacy return (ret); 1314169978Skmacy} 1315169978Skmacy 1316169978Skmacystatic int 1317169978Skmacywrite_smt_entry(struct adapter *adapter, int idx) 1318169978Skmacy{ 1319169978Skmacy struct port_info *pi = &adapter->port[idx]; 1320169978Skmacy struct cpl_smt_write_req *req; 1321169978Skmacy struct mbuf *m; 1322169978Skmacy 1323169978Skmacy if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL) 1324169978Skmacy return (ENOMEM); 1325169978Skmacy 1326169978Skmacy req = mtod(m, struct cpl_smt_write_req *); 1327169978Skmacy req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1328169978Skmacy OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); 1329169978Skmacy req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ 1330169978Skmacy req->iff = idx; 1331169978Skmacy memset(req->src_mac1, 0, sizeof(req->src_mac1)); 1332169978Skmacy memcpy(req->src_mac0, pi->hw_addr, ETHER_ADDR_LEN); 1333169978Skmacy 1334169978Skmacy m_set_priority(m, 1); 1335169978Skmacy 1336169978Skmacy offload_tx(&adapter->tdev, m); 1337169978Skmacy 1338169978Skmacy return (0); 1339169978Skmacy} 1340169978Skmacy 1341169978Skmacystatic int 1342169978Skmacyinit_smt(struct adapter *adapter) 1343169978Skmacy{ 1344169978Skmacy int i; 1345169978Skmacy 1346169978Skmacy for_each_port(adapter, i) 1347169978Skmacy write_smt_entry(adapter, i); 1348169978Skmacy return 0; 1349169978Skmacy} 1350169978Skmacy 1351167514Skmacystatic void 1352169978Skmacyinit_port_mtus(adapter_t *adapter) 1353169978Skmacy{ 1354169978Skmacy unsigned int mtus = adapter->port[0].ifp->if_mtu; 1355169978Skmacy 1356169978Skmacy if (adapter->port[1].ifp) 1357169978Skmacy mtus |= adapter->port[1].ifp->if_mtu << 16; 1358169978Skmacy t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus); 1359169978Skmacy} 1360169978Skmacy 1361169978Skmacystatic void 1362167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, 1363167514Skmacy int hi, int port) 1364167514Skmacy{ 1365167514Skmacy struct mbuf *m; 1366167514Skmacy struct mngt_pktsched_wr *req; 1367167514Skmacy 1368171471Skmacy m = m_gethdr(M_DONTWAIT, MT_DATA); 1369167848Skmacy if (m) { 1370169978Skmacy req = mtod(m, struct mngt_pktsched_wr *); 1371167848Skmacy req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 1372167848Skmacy req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 1373167848Skmacy req->sched = sched; 1374167848Skmacy req->idx = qidx; 1375167848Skmacy req->min = lo; 1376167848Skmacy req->max = hi; 1377167848Skmacy req->binding = port; 1378167848Skmacy m->m_len = m->m_pkthdr.len = sizeof(*req); 1379167848Skmacy t3_mgmt_tx(adap, m); 1380167848Skmacy } 1381167514Skmacy} 1382167514Skmacy 1383167514Skmacystatic void 1384167514Skmacybind_qsets(adapter_t *sc) 1385167514Skmacy{ 1386167514Skmacy int i, j; 1387167514Skmacy 1388167514Skmacy for (i = 0; i < (sc)->params.nports; ++i) { 1389167514Skmacy const struct port_info *pi = adap2pinfo(sc, i); 1390167514Skmacy 1391167514Skmacy for (j = 0; j < pi->nqsets; ++j) 1392167514Skmacy send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, 1393167514Skmacy -1, i); 1394167514Skmacy } 1395167514Skmacy} 1396167514Skmacy 1397171471Skmacystatic void 1398171471Skmacyupdate_tpeeprom(struct adapter *adap) 1399171471Skmacy{ 1400171471Skmacy const struct firmware *tpeeprom; 1401171471Skmacy char buf[64]; 1402171471Skmacy uint32_t version; 1403171471Skmacy unsigned int major, minor; 1404171471Skmacy int ret, len; 1405171471Skmacy char rev; 1406171471Skmacy 1407171471Skmacy t3_seeprom_read(adap, TP_SRAM_OFFSET, &version); 1408171471Skmacy 1409171471Skmacy major = G_TP_VERSION_MAJOR(version); 1410171471Skmacy minor = G_TP_VERSION_MINOR(version); 1411171471Skmacy if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 1412171471Skmacy return; 1413171471Skmacy 1414171471Skmacy rev = t3rev2char(adap); 1415171471Skmacy 1416171471Skmacy snprintf(buf, sizeof(buf), TPEEPROM_NAME, rev, 1417171471Skmacy TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1418171471Skmacy 1419171471Skmacy tpeeprom = firmware_get(buf); 1420171471Skmacy if (tpeeprom == NULL) { 1421171471Skmacy device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n", 1422171471Skmacy buf); 1423171471Skmacy return; 1424171471Skmacy } 1425171471Skmacy 1426171471Skmacy len = tpeeprom->datasize - 4; 1427171471Skmacy 1428171471Skmacy ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); 1429171471Skmacy if (ret) 1430171471Skmacy goto release_tpeeprom; 1431171471Skmacy 1432171471Skmacy if (len != TP_SRAM_LEN) { 1433171471Skmacy device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", buf, len, TP_SRAM_LEN); 1434171471Skmacy return; 1435171471Skmacy } 1436171471Skmacy 1437171471Skmacy ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, 1438171471Skmacy TP_SRAM_OFFSET); 1439171471Skmacy 1440171471Skmacy if (!ret) { 1441171471Skmacy device_printf(adap->dev, 1442171471Skmacy "Protocol SRAM image updated in EEPROM to %d.%d.%d\n", 1443171471Skmacy TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1444171471Skmacy } else 1445171471Skmacy device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n"); 1446171471Skmacy 1447171471Skmacyrelease_tpeeprom: 1448171471Skmacy firmware_put(tpeeprom, FIRMWARE_UNLOAD); 1449171471Skmacy 1450171471Skmacy return; 1451171471Skmacy} 1452171471Skmacy 1453171471Skmacystatic int 1454171471Skmacyupdate_tpsram(struct adapter *adap) 1455171471Skmacy{ 1456171471Skmacy const struct firmware *tpsram; 1457171471Skmacy char buf[64]; 1458171471Skmacy int ret; 1459171471Skmacy char rev; 1460171471Skmacy 1461171471Skmacy rev = t3rev2char(adap); 1462171471Skmacy if (!rev) 1463171471Skmacy return 0; 1464171471Skmacy 1465171471Skmacy update_tpeeprom(adap); 1466171471Skmacy 1467171471Skmacy snprintf(buf, sizeof(buf), TPSRAM_NAME, rev, 1468171471Skmacy TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1469171471Skmacy 1470171471Skmacy tpsram = firmware_get(buf); 1471171471Skmacy if (tpsram == NULL){ 1472171471Skmacy device_printf(adap->dev, "could not load TP SRAM: unable to load %s\n", 1473171471Skmacy buf); 1474171471Skmacy return (EINVAL); 1475171471Skmacy } else 1476171471Skmacy device_printf(adap->dev, "updating TP SRAM with %s\n", buf); 1477171471Skmacy 1478171471Skmacy ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); 1479171471Skmacy if (ret) 1480171471Skmacy goto release_tpsram; 1481171471Skmacy 1482171471Skmacy ret = t3_set_proto_sram(adap, tpsram->data); 1483171471Skmacy if (ret) 1484171471Skmacy device_printf(adap->dev, "loading protocol SRAM failed\n"); 1485171471Skmacy 1486171471Skmacyrelease_tpsram: 1487171471Skmacy firmware_put(tpsram, FIRMWARE_UNLOAD); 1488171471Skmacy 1489171471Skmacy return ret; 1490171471Skmacy} 1491171471Skmacy 1492169978Skmacy/** 1493169978Skmacy * cxgb_up - enable the adapter 1494169978Skmacy * @adap: adapter being enabled 1495169978Skmacy * 1496169978Skmacy * Called when the first port is enabled, this function performs the 1497169978Skmacy * actions necessary to make an adapter operational, such as completing 1498169978Skmacy * the initialization of HW modules, and enabling interrupts. 1499169978Skmacy * 1500169978Skmacy */ 1501169978Skmacystatic int 1502169978Skmacycxgb_up(struct adapter *sc) 1503169978Skmacy{ 1504169978Skmacy int err = 0; 1505169978Skmacy 1506169978Skmacy if ((sc->flags & FULL_INIT_DONE) == 0) { 1507169978Skmacy 1508169978Skmacy if ((sc->flags & FW_UPTODATE) == 0) 1509171471Skmacy if ((err = upgrade_fw(sc))) 1510171471Skmacy goto out; 1511171471Skmacy if ((sc->flags & TPS_UPTODATE) == 0) 1512171471Skmacy if ((err = update_tpsram(sc))) 1513171471Skmacy goto out; 1514169978Skmacy err = t3_init_hw(sc, 0); 1515169978Skmacy if (err) 1516169978Skmacy goto out; 1517169978Skmacy 1518169978Skmacy t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 1519169978Skmacy 1520169978Skmacy err = setup_sge_qsets(sc); 1521169978Skmacy if (err) 1522169978Skmacy goto out; 1523169978Skmacy 1524171471Skmacy alloc_filters(sc); 1525169978Skmacy setup_rss(sc); 1526169978Skmacy sc->flags |= FULL_INIT_DONE; 1527169978Skmacy } 1528169978Skmacy 1529169978Skmacy t3_intr_clear(sc); 1530169978Skmacy 1531169978Skmacy /* If it's MSI or INTx, allocate a single interrupt for everything */ 1532169978Skmacy if ((sc->flags & USING_MSIX) == 0) { 1533169978Skmacy if ((sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 1534169978Skmacy &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { 1535169978Skmacy device_printf(sc->dev, "Cannot allocate interrupt rid=%d\n", sc->irq_rid); 1536169978Skmacy err = EINVAL; 1537169978Skmacy goto out; 1538169978Skmacy } 1539169978Skmacy device_printf(sc->dev, "allocated irq_res=%p\n", sc->irq_res); 1540169978Skmacy 1541169978Skmacy if (bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE|INTR_TYPE_NET, 1542169978Skmacy#ifdef INTR_FILTERS 1543169978Skmacy NULL, 1544169978Skmacy#endif 1545169978Skmacy sc->cxgb_intr, sc, &sc->intr_tag)) { 1546169978Skmacy device_printf(sc->dev, "Cannot set up interrupt\n"); 1547169978Skmacy err = EINVAL; 1548169978Skmacy goto irq_err; 1549169978Skmacy } 1550169978Skmacy } else { 1551169978Skmacy cxgb_setup_msix(sc, sc->msi_count); 1552169978Skmacy } 1553169978Skmacy 1554169978Skmacy t3_sge_start(sc); 1555169978Skmacy t3_intr_enable(sc); 1556169978Skmacy 1557171471Skmacy if ((sc->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX) { 1558169978Skmacy bind_qsets(sc); 1559171471Skmacy setup_hw_filters(sc); 1560171471Skmacy sc->flags |= QUEUES_BOUND; 1561171471Skmacy } 1562169978Skmacyout: 1563169978Skmacy return (err); 1564169978Skmacyirq_err: 1565169978Skmacy CH_ERR(sc, "request_irq failed, err %d\n", err); 1566169978Skmacy goto out; 1567169978Skmacy} 1568169978Skmacy 1569169978Skmacy 1570169978Skmacy/* 1571169978Skmacy * Release resources when all the ports and offloading have been stopped. 1572169978Skmacy */ 1573167514Skmacystatic void 1574170869Skmacycxgb_down_locked(struct adapter *sc) 1575169978Skmacy{ 1576169978Skmacy int i; 1577170654Skmacy 1578169978Skmacy t3_sge_stop(sc); 1579169978Skmacy t3_intr_disable(sc); 1580170654Skmacy 1581169978Skmacy if (sc->intr_tag != NULL) { 1582169978Skmacy bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); 1583169978Skmacy sc->intr_tag = NULL; 1584169978Skmacy } 1585169978Skmacy if (sc->irq_res != NULL) { 1586169978Skmacy device_printf(sc->dev, "de-allocating interrupt irq_rid=%d irq_res=%p\n", 1587169978Skmacy sc->irq_rid, sc->irq_res); 1588169978Skmacy bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, 1589169978Skmacy sc->irq_res); 1590169978Skmacy sc->irq_res = NULL; 1591169978Skmacy } 1592170654Skmacy 1593170654Skmacy if (sc->flags & USING_MSIX) 1594170654Skmacy cxgb_teardown_msix(sc); 1595170869Skmacy ADAPTER_UNLOCK(sc); 1596169978Skmacy 1597170869Skmacy callout_drain(&sc->cxgb_tick_ch); 1598169978Skmacy callout_drain(&sc->sge_timer_ch); 1599170869Skmacy 1600170654Skmacy if (sc->tq != NULL) 1601170654Skmacy taskqueue_drain(sc->tq, &sc->slow_intr_task); 1602170654Skmacy for (i = 0; i < sc->params.nports; i++) 1603170654Skmacy if (sc->port[i].tq != NULL) 1604170654Skmacy taskqueue_drain(sc->port[i].tq, &sc->port[i].timer_reclaim_task); 1605170654Skmacy 1606169978Skmacy} 1607169978Skmacy 1608169978Skmacystatic int 1609169978Skmacyoffload_open(struct port_info *pi) 1610169978Skmacy{ 1611169978Skmacy struct adapter *adapter = pi->adapter; 1612169978Skmacy struct toedev *tdev = TOEDEV(pi->ifp); 1613169978Skmacy int adap_up = adapter->open_device_map & PORT_MASK; 1614169978Skmacy int err = 0; 1615169978Skmacy 1616169978Skmacy if (atomic_cmpset_int(&adapter->open_device_map, 1617169978Skmacy (adapter->open_device_map & ~OFFLOAD_DEVMAP_BIT), 1618169978Skmacy (adapter->open_device_map | OFFLOAD_DEVMAP_BIT)) == 0) 1619169978Skmacy return (0); 1620169978Skmacy 1621169978Skmacy ADAPTER_LOCK(pi->adapter); 1622169978Skmacy if (!adap_up) 1623169978Skmacy err = cxgb_up(adapter); 1624169978Skmacy ADAPTER_UNLOCK(pi->adapter); 1625171471Skmacy if (err) 1626169978Skmacy return (err); 1627169978Skmacy 1628169978Skmacy t3_tp_set_offload_mode(adapter, 1); 1629169978Skmacy tdev->lldev = adapter->port[0].ifp; 1630169978Skmacy err = cxgb_offload_activate(adapter); 1631169978Skmacy if (err) 1632169978Skmacy goto out; 1633169978Skmacy 1634169978Skmacy init_port_mtus(adapter); 1635169978Skmacy t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd, 1636169978Skmacy adapter->params.b_wnd, 1637169978Skmacy adapter->params.rev == 0 ? 1638169978Skmacy adapter->port[0].ifp->if_mtu : 0xffff); 1639169978Skmacy init_smt(adapter); 1640169978Skmacy 1641169978Skmacy /* Call back all registered clients */ 1642169978Skmacy cxgb_add_clients(tdev); 1643169978Skmacy 1644169978Skmacyout: 1645169978Skmacy /* restore them in case the offload module has changed them */ 1646169978Skmacy if (err) { 1647169978Skmacy t3_tp_set_offload_mode(adapter, 0); 1648169978Skmacy clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT); 1649169978Skmacy cxgb_set_dummy_ops(tdev); 1650169978Skmacy } 1651169978Skmacy return (err); 1652169978Skmacy} 1653170789Skmacy#ifdef notyet 1654169978Skmacystatic int 1655169978Skmacyoffload_close(struct toedev *tdev) 1656169978Skmacy{ 1657169978Skmacy struct adapter *adapter = tdev2adap(tdev); 1658169978Skmacy 1659169978Skmacy if (!isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT)) 1660170654Skmacy return (0); 1661169978Skmacy 1662169978Skmacy /* Call back all registered clients */ 1663169978Skmacy cxgb_remove_clients(tdev); 1664169978Skmacy tdev->lldev = NULL; 1665169978Skmacy cxgb_set_dummy_ops(tdev); 1666169978Skmacy t3_tp_set_offload_mode(adapter, 0); 1667169978Skmacy clrbit(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT); 1668169978Skmacy 1669169978Skmacy if (!adapter->open_device_map) 1670169978Skmacy cxgb_down(adapter); 1671169978Skmacy 1672169978Skmacy cxgb_offload_deactivate(adapter); 1673170654Skmacy return (0); 1674169978Skmacy} 1675170789Skmacy#endif 1676169978Skmacy 1677169978Skmacystatic void 1678167514Skmacycxgb_init(void *arg) 1679167514Skmacy{ 1680167514Skmacy struct port_info *p = arg; 1681167514Skmacy 1682167514Skmacy PORT_LOCK(p); 1683167514Skmacy cxgb_init_locked(p); 1684167514Skmacy PORT_UNLOCK(p); 1685167514Skmacy} 1686167514Skmacy 1687167514Skmacystatic void 1688167514Skmacycxgb_init_locked(struct port_info *p) 1689167514Skmacy{ 1690167514Skmacy struct ifnet *ifp; 1691167514Skmacy adapter_t *sc = p->adapter; 1692169978Skmacy int err; 1693167514Skmacy 1694170869Skmacy PORT_LOCK_ASSERT_OWNED(p); 1695167514Skmacy ifp = p->ifp; 1696167514Skmacy 1697167514Skmacy ADAPTER_LOCK(p->adapter); 1698171471Skmacy if ((sc->open_device_map == 0) && (err = cxgb_up(sc))) { 1699169978Skmacy ADAPTER_UNLOCK(p->adapter); 1700169978Skmacy cxgb_stop_locked(p); 1701169978Skmacy return; 1702169978Skmacy } 1703170869Skmacy if (p->adapter->open_device_map == 0) { 1704167514Skmacy t3_intr_clear(sc); 1705170869Skmacy t3_sge_init_adapter(sc); 1706170869Skmacy } 1707169978Skmacy setbit(&p->adapter->open_device_map, p->port); 1708170654Skmacy ADAPTER_UNLOCK(p->adapter); 1709169978Skmacy 1710169978Skmacy if (is_offload(sc) && !ofld_disable) { 1711169978Skmacy err = offload_open(p); 1712169978Skmacy if (err) 1713169978Skmacy log(LOG_WARNING, 1714169978Skmacy "Could not initialize offload capabilities\n"); 1715169978Skmacy } 1716169978Skmacy cxgb_link_start(p); 1717170654Skmacy t3_link_changed(sc, p->port); 1718170654Skmacy ifp->if_baudrate = p->link_config.speed * 1000000; 1719170654Skmacy 1720167514Skmacy t3_port_intr_enable(sc, p->port); 1721167760Skmacy 1722167514Skmacy callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz, 1723167514Skmacy cxgb_tick, sc); 1724170869Skmacy 1725167514Skmacy ifp->if_drv_flags |= IFF_DRV_RUNNING; 1726167514Skmacy ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1727167514Skmacy} 1728167514Skmacy 1729167514Skmacystatic void 1730167514Skmacycxgb_set_rxmode(struct port_info *p) 1731167514Skmacy{ 1732167514Skmacy struct t3_rx_mode rm; 1733167514Skmacy struct cmac *mac = &p->mac; 1734167760Skmacy 1735170869Skmacy PORT_LOCK_ASSERT_OWNED(p); 1736170654Skmacy 1737167514Skmacy t3_init_rx_mode(&rm, p); 1738167514Skmacy t3_mac_set_rx_mode(mac, &rm); 1739167514Skmacy} 1740167514Skmacy 1741167514Skmacystatic void 1742167734Skmacycxgb_stop_locked(struct port_info *p) 1743167514Skmacy{ 1744167514Skmacy struct ifnet *ifp; 1745167514Skmacy 1746170869Skmacy PORT_LOCK_ASSERT_OWNED(p); 1747170869Skmacy ADAPTER_LOCK_ASSERT_NOTOWNED(p->adapter); 1748170654Skmacy 1749167514Skmacy ifp = p->ifp; 1750167514Skmacy 1751169978Skmacy t3_port_intr_disable(p->adapter, p->port); 1752169978Skmacy ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1753169978Skmacy p->phy.ops->power_down(&p->phy, 1); 1754169978Skmacy t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); 1755169978Skmacy 1756167514Skmacy ADAPTER_LOCK(p->adapter); 1757169978Skmacy clrbit(&p->adapter->open_device_map, p->port); 1758170869Skmacy 1759170869Skmacy 1760170869Skmacy if (p->adapter->open_device_map == 0) { 1761170869Skmacy cxgb_down_locked(p->adapter); 1762170869Skmacy } else 1763170869Skmacy ADAPTER_UNLOCK(p->adapter); 1764170869Skmacy 1765167514Skmacy} 1766167514Skmacy 1767167514Skmacystatic int 1768170654Skmacycxgb_set_mtu(struct port_info *p, int mtu) 1769170654Skmacy{ 1770170654Skmacy struct ifnet *ifp = p->ifp; 1771170654Skmacy int error = 0; 1772170654Skmacy 1773170654Skmacy if ((mtu < ETHERMIN) || (mtu > ETHER_MAX_LEN_JUMBO)) 1774170654Skmacy error = EINVAL; 1775170654Skmacy else if (ifp->if_mtu != mtu) { 1776170654Skmacy PORT_LOCK(p); 1777170654Skmacy ifp->if_mtu = mtu; 1778170654Skmacy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1779170654Skmacy callout_stop(&p->adapter->cxgb_tick_ch); 1780170654Skmacy cxgb_stop_locked(p); 1781170654Skmacy cxgb_init_locked(p); 1782170654Skmacy } 1783170654Skmacy PORT_UNLOCK(p); 1784170654Skmacy } 1785170654Skmacy return (error); 1786170654Skmacy} 1787170654Skmacy 1788170654Skmacystatic int 1789167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) 1790167514Skmacy{ 1791167514Skmacy struct port_info *p = ifp->if_softc; 1792167514Skmacy struct ifaddr *ifa = (struct ifaddr *)data; 1793167514Skmacy struct ifreq *ifr = (struct ifreq *)data; 1794167514Skmacy int flags, error = 0; 1795167514Skmacy uint32_t mask; 1796167514Skmacy 1797168737Skmacy /* 1798168737Skmacy * XXX need to check that we aren't in the middle of an unload 1799168737Skmacy */ 1800167514Skmacy switch (command) { 1801167514Skmacy case SIOCSIFMTU: 1802170654Skmacy error = cxgb_set_mtu(p, ifr->ifr_mtu); 1803167514Skmacy break; 1804167514Skmacy case SIOCSIFADDR: 1805167514Skmacy case SIOCGIFADDR: 1806170654Skmacy PORT_LOCK(p); 1807167514Skmacy if (ifa->ifa_addr->sa_family == AF_INET) { 1808167514Skmacy ifp->if_flags |= IFF_UP; 1809170654Skmacy if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1810170654Skmacy cxgb_init_locked(p); 1811167514Skmacy arp_ifinit(ifp, ifa); 1812167514Skmacy } else 1813167514Skmacy error = ether_ioctl(ifp, command, data); 1814170654Skmacy PORT_UNLOCK(p); 1815167514Skmacy break; 1816167514Skmacy case SIOCSIFFLAGS: 1817170869Skmacy callout_drain(&p->adapter->cxgb_tick_ch); 1818170869Skmacy PORT_LOCK(p); 1819167514Skmacy if (ifp->if_flags & IFF_UP) { 1820167514Skmacy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1821167514Skmacy flags = p->if_flags; 1822167514Skmacy if (((ifp->if_flags ^ flags) & IFF_PROMISC) || 1823167514Skmacy ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) 1824167514Skmacy cxgb_set_rxmode(p); 1825167514Skmacy } else 1826167514Skmacy cxgb_init_locked(p); 1827167760Skmacy p->if_flags = ifp->if_flags; 1828170869Skmacy } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1829170869Skmacy cxgb_stop_locked(p); 1830170869Skmacy 1831170869Skmacy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1832170869Skmacy adapter_t *sc = p->adapter; 1833170869Skmacy callout_reset(&sc->cxgb_tick_ch, 1834170869Skmacy sc->params.stats_update_period * hz, 1835170869Skmacy cxgb_tick, sc); 1836167514Skmacy } 1837170654Skmacy PORT_UNLOCK(p); 1838167514Skmacy break; 1839167514Skmacy case SIOCSIFMEDIA: 1840167514Skmacy case SIOCGIFMEDIA: 1841167514Skmacy error = ifmedia_ioctl(ifp, ifr, &p->media, command); 1842167514Skmacy break; 1843167514Skmacy case SIOCSIFCAP: 1844167514Skmacy PORT_LOCK(p); 1845167514Skmacy mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1846167514Skmacy if (mask & IFCAP_TXCSUM) { 1847167514Skmacy if (IFCAP_TXCSUM & ifp->if_capenable) { 1848167514Skmacy ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); 1849167514Skmacy ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP 1850167514Skmacy | CSUM_TSO); 1851167514Skmacy } else { 1852167514Skmacy ifp->if_capenable |= IFCAP_TXCSUM; 1853167514Skmacy ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 1854167514Skmacy } 1855167514Skmacy } else if (mask & IFCAP_RXCSUM) { 1856167514Skmacy if (IFCAP_RXCSUM & ifp->if_capenable) { 1857167514Skmacy ifp->if_capenable &= ~IFCAP_RXCSUM; 1858167514Skmacy } else { 1859167514Skmacy ifp->if_capenable |= IFCAP_RXCSUM; 1860167514Skmacy } 1861167514Skmacy } 1862167514Skmacy if (mask & IFCAP_TSO4) { 1863167514Skmacy if (IFCAP_TSO4 & ifp->if_capenable) { 1864167514Skmacy ifp->if_capenable &= ~IFCAP_TSO4; 1865167514Skmacy ifp->if_hwassist &= ~CSUM_TSO; 1866167514Skmacy } else if (IFCAP_TXCSUM & ifp->if_capenable) { 1867167514Skmacy ifp->if_capenable |= IFCAP_TSO4; 1868167514Skmacy ifp->if_hwassist |= CSUM_TSO; 1869167514Skmacy } else { 1870167514Skmacy if (cxgb_debug) 1871167514Skmacy printf("cxgb requires tx checksum offload" 1872167514Skmacy " be enabled to use TSO\n"); 1873167514Skmacy error = EINVAL; 1874167514Skmacy } 1875167514Skmacy } 1876167514Skmacy PORT_UNLOCK(p); 1877167514Skmacy break; 1878167514Skmacy default: 1879167514Skmacy error = ether_ioctl(ifp, command, data); 1880167514Skmacy break; 1881167514Skmacy } 1882167514Skmacy return (error); 1883167514Skmacy} 1884167514Skmacy 1885167514Skmacystatic int 1886167514Skmacycxgb_start_tx(struct ifnet *ifp, uint32_t txmax) 1887167514Skmacy{ 1888167514Skmacy struct sge_qset *qs; 1889167514Skmacy struct sge_txq *txq; 1890167514Skmacy struct port_info *p = ifp->if_softc; 1891168737Skmacy struct mbuf *m0, *m = NULL; 1892167514Skmacy int err, in_use_init; 1893170654Skmacy 1894167514Skmacy if (!p->link_config.link_ok) 1895167514Skmacy return (ENXIO); 1896167514Skmacy 1897167514Skmacy if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1898167514Skmacy return (ENOBUFS); 1899167514Skmacy 1900167514Skmacy qs = &p->adapter->sge.qs[p->first_qset]; 1901167514Skmacy txq = &qs->txq[TXQ_ETH]; 1902167514Skmacy err = 0; 1903167514Skmacy 1904171335Skmacy if (txq->flags & TXQ_TRANSMITTING) 1905171335Skmacy return (EINPROGRESS); 1906171335Skmacy 1907167514Skmacy mtx_lock(&txq->lock); 1908171335Skmacy txq->flags |= TXQ_TRANSMITTING; 1909167514Skmacy in_use_init = txq->in_use; 1910167514Skmacy while ((txq->in_use - in_use_init < txmax) && 1911167514Skmacy (txq->size > txq->in_use + TX_MAX_DESC)) { 1912167514Skmacy IFQ_DRV_DEQUEUE(&ifp->if_snd, m); 1913167514Skmacy if (m == NULL) 1914167514Skmacy break; 1915168737Skmacy /* 1916168737Skmacy * Convert chain to M_IOVEC 1917168737Skmacy */ 1918168737Skmacy KASSERT((m->m_flags & M_IOVEC) == 0, ("IOVEC set too early")); 1919168737Skmacy m0 = m; 1920168737Skmacy#ifdef INVARIANTS 1921168737Skmacy /* 1922168737Skmacy * Clean up after net stack sloppiness 1923168737Skmacy * before calling m_sanity 1924168737Skmacy */ 1925168737Skmacy m0 = m->m_next; 1926168737Skmacy while (m0) { 1927168737Skmacy m0->m_flags &= ~M_PKTHDR; 1928168737Skmacy m0 = m0->m_next; 1929168737Skmacy } 1930168737Skmacy m_sanity(m0, 0); 1931168737Skmacy m0 = m; 1932168749Skmacy#endif 1933168749Skmacy if (collapse_mbufs && m->m_pkthdr.len > MCLBYTES && 1934168737Skmacy m_collapse(m, TX_MAX_SEGS, &m0) == EFBIG) { 1935168737Skmacy if ((m0 = m_defrag(m, M_NOWAIT)) != NULL) { 1936168737Skmacy m = m0; 1937168737Skmacy m_collapse(m, TX_MAX_SEGS, &m0); 1938168737Skmacy } else 1939168737Skmacy break; 1940168737Skmacy } 1941168737Skmacy m = m0; 1942167514Skmacy if ((err = t3_encap(p, &m)) != 0) 1943167514Skmacy break; 1944169978Skmacy BPF_MTAP(ifp, m); 1945167514Skmacy } 1946171335Skmacy txq->flags &= ~TXQ_TRANSMITTING; 1947167514Skmacy mtx_unlock(&txq->lock); 1948167514Skmacy 1949167514Skmacy if (__predict_false(err)) { 1950167514Skmacy if (err == ENOMEM) { 1951170083Skmacy ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1952167514Skmacy IFQ_LOCK(&ifp->if_snd); 1953167514Skmacy IFQ_DRV_PREPEND(&ifp->if_snd, m); 1954167514Skmacy IFQ_UNLOCK(&ifp->if_snd); 1955167514Skmacy } 1956167514Skmacy } 1957170654Skmacy if (err == 0 && m == NULL) 1958170654Skmacy err = ENOBUFS; 1959170654Skmacy else if ((err == 0) && (txq->size <= txq->in_use + TX_MAX_DESC) && 1960170007Skmacy (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { 1961170007Skmacy ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1962170654Skmacy err = ENOSPC; 1963170007Skmacy } 1964167514Skmacy return (err); 1965167514Skmacy} 1966167514Skmacy 1967167514Skmacystatic void 1968167514Skmacycxgb_start_proc(void *arg, int ncount) 1969167514Skmacy{ 1970167514Skmacy struct ifnet *ifp = arg; 1971167514Skmacy struct port_info *pi = ifp->if_softc; 1972167514Skmacy struct sge_qset *qs; 1973167514Skmacy struct sge_txq *txq; 1974170654Skmacy int error; 1975167514Skmacy 1976167514Skmacy qs = &pi->adapter->sge.qs[pi->first_qset]; 1977167514Skmacy txq = &qs->txq[TXQ_ETH]; 1978167514Skmacy 1979170654Skmacy do { 1980171469Skmacy if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2) 1981171469Skmacy taskqueue_enqueue(pi->tq, &txq->qreclaim_task); 1982167525Skmacy 1983167538Skmacy error = cxgb_start_tx(ifp, TX_START_MAX_DESC); 1984170654Skmacy } while (error == 0); 1985167514Skmacy} 1986167514Skmacy 1987167514Skmacystatic void 1988167514Skmacycxgb_start(struct ifnet *ifp) 1989167514Skmacy{ 1990167514Skmacy struct port_info *pi = ifp->if_softc; 1991167514Skmacy struct sge_qset *qs; 1992167514Skmacy struct sge_txq *txq; 1993167514Skmacy int err; 1994167514Skmacy 1995167514Skmacy qs = &pi->adapter->sge.qs[pi->first_qset]; 1996167514Skmacy txq = &qs->txq[TXQ_ETH]; 1997167514Skmacy 1998171469Skmacy if (desc_reclaimable(txq) > TX_CLEAN_MAX_DESC >> 2) 1999171469Skmacy taskqueue_enqueue(pi->tq, 2000171469Skmacy &txq->qreclaim_task); 2001167538Skmacy 2002167514Skmacy err = cxgb_start_tx(ifp, TX_START_MAX_DESC); 2003167514Skmacy 2004167514Skmacy if (err == 0) 2005167514Skmacy taskqueue_enqueue(pi->tq, &pi->start_task); 2006167514Skmacy} 2007167514Skmacy 2008167514Skmacy 2009167514Skmacystatic int 2010167514Skmacycxgb_media_change(struct ifnet *ifp) 2011167514Skmacy{ 2012167514Skmacy if_printf(ifp, "media change not supported\n"); 2013167514Skmacy return (ENXIO); 2014167514Skmacy} 2015167514Skmacy 2016167514Skmacystatic void 2017167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2018167514Skmacy{ 2019167514Skmacy struct port_info *p = ifp->if_softc; 2020167514Skmacy 2021167514Skmacy ifmr->ifm_status = IFM_AVALID; 2022167514Skmacy ifmr->ifm_active = IFM_ETHER; 2023167514Skmacy 2024167514Skmacy if (!p->link_config.link_ok) 2025167514Skmacy return; 2026167514Skmacy 2027167514Skmacy ifmr->ifm_status |= IFM_ACTIVE; 2028167514Skmacy 2029170654Skmacy switch (p->link_config.speed) { 2030170654Skmacy case 10: 2031170654Skmacy ifmr->ifm_active |= IFM_10_T; 2032170654Skmacy break; 2033170654Skmacy case 100: 2034170654Skmacy ifmr->ifm_active |= IFM_100_TX; 2035170654Skmacy break; 2036170654Skmacy case 1000: 2037170654Skmacy ifmr->ifm_active |= IFM_1000_T; 2038170654Skmacy break; 2039170654Skmacy } 2040170654Skmacy 2041167514Skmacy if (p->link_config.duplex) 2042167514Skmacy ifmr->ifm_active |= IFM_FDX; 2043167514Skmacy else 2044167514Skmacy ifmr->ifm_active |= IFM_HDX; 2045167514Skmacy} 2046167514Skmacy 2047167514Skmacystatic void 2048167514Skmacycxgb_async_intr(void *data) 2049167514Skmacy{ 2050167760Skmacy adapter_t *sc = data; 2051167760Skmacy 2052167514Skmacy if (cxgb_debug) 2053167760Skmacy device_printf(sc->dev, "cxgb_async_intr\n"); 2054170869Skmacy /* 2055170869Skmacy * May need to sleep - defer to taskqueue 2056170869Skmacy */ 2057170869Skmacy taskqueue_enqueue(sc->tq, &sc->slow_intr_task); 2058167514Skmacy} 2059167514Skmacy 2060167514Skmacystatic void 2061167514Skmacycxgb_ext_intr_handler(void *arg, int count) 2062167514Skmacy{ 2063167514Skmacy adapter_t *sc = (adapter_t *)arg; 2064167514Skmacy 2065167514Skmacy if (cxgb_debug) 2066167514Skmacy printf("cxgb_ext_intr_handler\n"); 2067167514Skmacy 2068167514Skmacy t3_phy_intr_handler(sc); 2069167514Skmacy 2070167514Skmacy /* Now reenable external interrupts */ 2071169978Skmacy ADAPTER_LOCK(sc); 2072167514Skmacy if (sc->slow_intr_mask) { 2073167514Skmacy sc->slow_intr_mask |= F_T3DBG; 2074167514Skmacy t3_write_reg(sc, A_PL_INT_CAUSE0, F_T3DBG); 2075167514Skmacy t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask); 2076167514Skmacy } 2077169978Skmacy ADAPTER_UNLOCK(sc); 2078167514Skmacy} 2079167514Skmacy 2080167514Skmacystatic void 2081167746Skmacycheck_link_status(adapter_t *sc) 2082167514Skmacy{ 2083167746Skmacy int i; 2084167514Skmacy 2085167746Skmacy for (i = 0; i < (sc)->params.nports; ++i) { 2086167746Skmacy struct port_info *p = &sc->port[i]; 2087167514Skmacy 2088170654Skmacy if (!(p->port_type->caps & SUPPORTED_IRQ)) 2089167746Skmacy t3_link_changed(sc, i); 2090170654Skmacy p->ifp->if_baudrate = p->link_config.speed * 1000000; 2091167746Skmacy } 2092167514Skmacy} 2093167514Skmacy 2094167514Skmacystatic void 2095167746Skmacycheck_t3b2_mac(struct adapter *adapter) 2096167514Skmacy{ 2097167514Skmacy int i; 2098167514Skmacy 2099167746Skmacy for_each_port(adapter, i) { 2100167746Skmacy struct port_info *p = &adapter->port[i]; 2101167746Skmacy struct ifnet *ifp = p->ifp; 2102167746Skmacy int status; 2103167514Skmacy 2104167746Skmacy if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2105167746Skmacy continue; 2106167746Skmacy 2107167746Skmacy status = 0; 2108167746Skmacy PORT_LOCK(p); 2109167746Skmacy if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) 2110167746Skmacy status = t3b2_mac_watchdog_task(&p->mac); 2111167746Skmacy if (status == 1) 2112167746Skmacy p->mac.stats.num_toggled++; 2113167746Skmacy else if (status == 2) { 2114167746Skmacy struct cmac *mac = &p->mac; 2115167746Skmacy 2116170654Skmacy t3_mac_set_mtu(mac, ifp->if_mtu + ETHER_HDR_LEN 2117170654Skmacy + ETHER_VLAN_ENCAP_LEN); 2118167746Skmacy t3_mac_set_address(mac, 0, p->hw_addr); 2119167746Skmacy cxgb_set_rxmode(p); 2120167746Skmacy t3_link_start(&p->phy, mac, &p->link_config); 2121167746Skmacy t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2122167746Skmacy t3_port_intr_enable(adapter, p->port); 2123167746Skmacy p->mac.stats.num_resets++; 2124167746Skmacy } 2125167746Skmacy PORT_UNLOCK(p); 2126167514Skmacy } 2127167514Skmacy} 2128167514Skmacy 2129167746Skmacystatic void 2130167746Skmacycxgb_tick(void *arg) 2131167746Skmacy{ 2132167746Skmacy adapter_t *sc = (adapter_t *)arg; 2133170869Skmacy 2134170869Skmacy taskqueue_enqueue(sc->tq, &sc->tick_task); 2135170869Skmacy 2136170869Skmacy if (sc->open_device_map != 0) 2137170869Skmacy callout_reset(&sc->cxgb_tick_ch, sc->params.stats_update_period * hz, 2138170869Skmacy cxgb_tick, sc); 2139170869Skmacy} 2140170869Skmacy 2141170869Skmacystatic void 2142170869Skmacycxgb_tick_handler(void *arg, int count) 2143170869Skmacy{ 2144170869Skmacy adapter_t *sc = (adapter_t *)arg; 2145167746Skmacy const struct adapter_params *p = &sc->params; 2146167746Skmacy 2147170869Skmacy ADAPTER_LOCK(sc); 2148167746Skmacy if (p->linkpoll_period) 2149167746Skmacy check_link_status(sc); 2150167746Skmacy 2151167746Skmacy /* 2152167746Skmacy * adapter lock can currently only be acquire after the 2153167746Skmacy * port lock 2154167746Skmacy */ 2155167746Skmacy ADAPTER_UNLOCK(sc); 2156170654Skmacy 2157171471Skmacy if (p->rev == T3_REV_B2 && p->nports < 4) 2158167746Skmacy check_t3b2_mac(sc); 2159167746Skmacy} 2160167746Skmacy 2161171471Skmacy#if 0 2162171471Skmacystatic void * 2163171471Skmacyfilter_get_idx(struct seq_file *seq, loff_t pos) 2164171471Skmacy{ 2165171471Skmacy int i; 2166171471Skmacy struct adapter *adap = seq->private; 2167171471Skmacy struct filter_info *p = adap->filters; 2168171471Skmacy 2169171471Skmacy if (!p) 2170171471Skmacy return NULL; 2171171471Skmacy 2172171471Skmacy for (i = 0; i < adap->params.mc5.nfilters; i++, p++) 2173171471Skmacy if (p->valid) { 2174171471Skmacy if (!pos) 2175171471Skmacy return p; 2176171471Skmacy pos--; 2177171471Skmacy } 2178171471Skmacy return NULL; 2179171471Skmacy} 2180171471Skmacy 2181171471Skmacystatic void *filter_get_nxt_idx(struct seq_file *seq, struct filter_info *p) 2182171471Skmacy{ 2183171471Skmacy struct adapter *adap = seq->private; 2184171471Skmacy struct filter_info *end = &adap->filters[adap->params.mc5.nfilters]; 2185171471Skmacy 2186171471Skmacy while (++p < end && !p->valid) 2187171471Skmacy ; 2188171471Skmacy return p < end ? p : NULL; 2189171471Skmacy} 2190171471Skmacy 2191171471Skmacystatic void *filter_seq_start(struct seq_file *seq, loff_t *pos) 2192171471Skmacy{ 2193171471Skmacy return *pos ? filter_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2194171471Skmacy} 2195171471Skmacy 2196171471Skmacystatic void *filter_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2197171471Skmacy{ 2198171471Skmacy v = *pos ? filter_get_nxt_idx(seq, v) : filter_get_idx(seq, 0); 2199171471Skmacy if (v) 2200171471Skmacy ++*pos; 2201171471Skmacy return v; 2202171471Skmacy} 2203171471Skmacy 2204171471Skmacystatic void filter_seq_stop(struct seq_file *seq, void *v) 2205171471Skmacy{ 2206171471Skmacy} 2207171471Skmacy 2208171471Skmacystatic int filter_seq_show(struct seq_file *seq, void *v) 2209171471Skmacy{ 2210171471Skmacy static const char *pkt_type[] = { "any", "tcp", "udp", "frag" }; 2211171471Skmacy 2212171471Skmacy if (v == SEQ_START_TOKEN) 2213171471Skmacy seq_puts(seq, "index SIP DIP sport " 2214171471Skmacy "dport VLAN PRI MAC type Q\n"); 2215171471Skmacy else { 2216171471Skmacy char sip[20], dip[20]; 2217171471Skmacy struct filter_info *f = v; 2218171471Skmacy struct adapter *adap = seq->private; 2219171471Skmacy 2220171471Skmacy sprintf(sip, NIPQUAD_FMT "/%-2u", HIPQUAD(f->sip), 2221171471Skmacy f->sip_mask ? 33 - ffs(f->sip_mask) : 0); 2222171471Skmacy sprintf(dip, NIPQUAD_FMT, HIPQUAD(f->dip)); 2223171471Skmacy seq_printf(seq, "%5zu %18s %15s ", f - adap->filters, sip, dip); 2224171471Skmacy seq_printf(seq, f->sport ? "%5u " : " * ", f->sport); 2225171471Skmacy seq_printf(seq, f->dport ? "%5u " : " * ", f->dport); 2226171471Skmacy seq_printf(seq, f->vlan != 0xfff ? "%4u " : " * ", f->vlan); 2227171471Skmacy seq_printf(seq, f->vlan_prio == FILTER_NO_VLAN_PRI ? 2228171471Skmacy " * " : "%1u/%1u ", f->vlan_prio, f->vlan_prio | 1); 2229171471Skmacy if (!f->mac_vld) 2230171471Skmacy seq_printf(seq, " * "); 2231171471Skmacy else if (f->mac_hit) 2232171471Skmacy seq_printf(seq, "%3u ", f->mac_idx); 2233171471Skmacy else 2234171471Skmacy seq_printf(seq, " -1 "); 2235171471Skmacy seq_printf(seq, "%4s ", pkt_type[f->pkt_type]); 2236171471Skmacy if (!f->pass) 2237171471Skmacy seq_printf(seq, "-\n"); 2238171471Skmacy else if (f->rss) 2239171471Skmacy seq_printf(seq, "*\n"); 2240171471Skmacy else 2241171471Skmacy seq_printf(seq, "%1u\n", f->qset); 2242171471Skmacy } 2243171471Skmacy return 0; 2244171471Skmacy} 2245171471Skmacy 2246171471Skmacystatic struct seq_operations filter_seq_ops = { 2247171471Skmacy .start = filter_seq_start, 2248171471Skmacy .next = filter_seq_next, 2249171471Skmacy .stop = filter_seq_stop, 2250171471Skmacy .show = filter_seq_show 2251171471Skmacy}; 2252171471Skmacy 2253171471Skmacystatic int filter_seq_open(struct inode *inode, struct file *file) 2254171471Skmacy{ 2255171471Skmacy int rc = seq_open(file, &filter_seq_ops); 2256171471Skmacy 2257171471Skmacy if (!rc) { 2258171471Skmacy struct proc_dir_entry *dp = PDE(inode); 2259171471Skmacy struct seq_file *seq = file->private_data; 2260171471Skmacy 2261171471Skmacy seq->private = dp->data; 2262171471Skmacy } 2263171471Skmacy return rc; 2264171471Skmacy} 2265171471Skmacy 2266171471Skmacystatic struct file_operations filter_seq_fops = { 2267171471Skmacy .owner = THIS_MODULE, 2268171471Skmacy .open = filter_seq_open, 2269171471Skmacy .read = seq_read, 2270171471Skmacy .llseek = seq_lseek, 2271171471Skmacy .release = seq_release 2272171471Skmacy}; 2273171471Skmacy 2274171471Skmacy#endif 2275171471Skmacy 2276167514Skmacystatic int 2277171471Skmacyset_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset) 2278171471Skmacy{ 2279171471Skmacy uint8_t *buf; 2280171471Skmacy int err = 0; 2281171471Skmacy u32 aligned_offset, aligned_len, *p; 2282171471Skmacy struct adapter *adapter = pi->adapter; 2283171471Skmacy 2284171471Skmacy 2285171471Skmacy aligned_offset = offset & ~3; 2286171471Skmacy aligned_len = (len + (offset & 3) + 3) & ~3; 2287171471Skmacy 2288171471Skmacy if (aligned_offset != offset || aligned_len != len) { 2289171471Skmacy buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO); 2290171471Skmacy if (!buf) 2291171471Skmacy return (ENOMEM); 2292171471Skmacy err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf); 2293171471Skmacy if (!err && aligned_len > 4) 2294171471Skmacy err = t3_seeprom_read(adapter, 2295171471Skmacy aligned_offset + aligned_len - 4, 2296171471Skmacy (u32 *)&buf[aligned_len - 4]); 2297171471Skmacy if (err) 2298171471Skmacy goto out; 2299171471Skmacy memcpy(buf + (offset & 3), data, len); 2300171471Skmacy } else 2301171471Skmacy buf = (uint8_t *)(uintptr_t)data; 2302171471Skmacy 2303171471Skmacy err = t3_seeprom_wp(adapter, 0); 2304171471Skmacy if (err) 2305171471Skmacy goto out; 2306171471Skmacy 2307171471Skmacy for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 2308171471Skmacy err = t3_seeprom_write(adapter, aligned_offset, *p); 2309171471Skmacy aligned_offset += 4; 2310171471Skmacy } 2311171471Skmacy 2312171471Skmacy if (!err) 2313171471Skmacy err = t3_seeprom_wp(adapter, 1); 2314171471Skmacyout: 2315171471Skmacy if (buf != data) 2316171471Skmacy free(buf, M_DEVBUF); 2317171471Skmacy return err; 2318171471Skmacy} 2319171471Skmacy 2320171471Skmacy 2321171471Skmacystatic int 2322167514Skmacyin_range(int val, int lo, int hi) 2323167514Skmacy{ 2324167514Skmacy return val < 0 || (val <= hi && val >= lo); 2325167514Skmacy} 2326167514Skmacy 2327167514Skmacystatic int 2328170654Skmacycxgb_extension_open(struct cdev *dev, int flags, int fmp, d_thread_t *td) 2329170654Skmacy{ 2330170654Skmacy return (0); 2331170654Skmacy} 2332170654Skmacy 2333170654Skmacystatic int 2334170654Skmacycxgb_extension_close(struct cdev *dev, int flags, int fmt, d_thread_t *td) 2335170654Skmacy{ 2336170654Skmacy return (0); 2337170654Skmacy} 2338170654Skmacy 2339170654Skmacystatic int 2340167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, 2341167514Skmacy int fflag, struct thread *td) 2342167514Skmacy{ 2343167514Skmacy int mmd, error = 0; 2344167514Skmacy struct port_info *pi = dev->si_drv1; 2345167514Skmacy adapter_t *sc = pi->adapter; 2346167514Skmacy 2347167514Skmacy#ifdef PRIV_SUPPORTED 2348167514Skmacy if (priv_check(td, PRIV_DRIVER)) { 2349167514Skmacy if (cxgb_debug) 2350167514Skmacy printf("user does not have access to privileged ioctls\n"); 2351167514Skmacy return (EPERM); 2352167514Skmacy } 2353167514Skmacy#else 2354167514Skmacy if (suser(td)) { 2355167514Skmacy if (cxgb_debug) 2356167514Skmacy printf("user does not have access to privileged ioctls\n"); 2357167514Skmacy return (EPERM); 2358167514Skmacy } 2359167514Skmacy#endif 2360167514Skmacy 2361167514Skmacy switch (cmd) { 2362167514Skmacy case SIOCGMIIREG: { 2363167514Skmacy uint32_t val; 2364167514Skmacy struct cphy *phy = &pi->phy; 2365167514Skmacy struct mii_data *mid = (struct mii_data *)data; 2366167514Skmacy 2367167514Skmacy if (!phy->mdio_read) 2368167514Skmacy return (EOPNOTSUPP); 2369167514Skmacy if (is_10G(sc)) { 2370167514Skmacy mmd = mid->phy_id >> 8; 2371167514Skmacy if (!mmd) 2372167514Skmacy mmd = MDIO_DEV_PCS; 2373167514Skmacy else if (mmd > MDIO_DEV_XGXS) 2374171471Skmacy return (EINVAL); 2375167514Skmacy 2376167514Skmacy error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, 2377167514Skmacy mid->reg_num, &val); 2378167514Skmacy } else 2379167514Skmacy error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, 2380167514Skmacy mid->reg_num & 0x1f, &val); 2381167514Skmacy if (error == 0) 2382167514Skmacy mid->val_out = val; 2383167514Skmacy break; 2384167514Skmacy } 2385167514Skmacy case SIOCSMIIREG: { 2386167514Skmacy struct cphy *phy = &pi->phy; 2387167514Skmacy struct mii_data *mid = (struct mii_data *)data; 2388167514Skmacy 2389167514Skmacy if (!phy->mdio_write) 2390167514Skmacy return (EOPNOTSUPP); 2391167514Skmacy if (is_10G(sc)) { 2392167514Skmacy mmd = mid->phy_id >> 8; 2393167514Skmacy if (!mmd) 2394167514Skmacy mmd = MDIO_DEV_PCS; 2395167514Skmacy else if (mmd > MDIO_DEV_XGXS) 2396167514Skmacy return (EINVAL); 2397167514Skmacy 2398167514Skmacy error = phy->mdio_write(sc, mid->phy_id & 0x1f, 2399167514Skmacy mmd, mid->reg_num, mid->val_in); 2400167514Skmacy } else 2401167514Skmacy error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, 2402167514Skmacy mid->reg_num & 0x1f, 2403167514Skmacy mid->val_in); 2404167514Skmacy break; 2405167514Skmacy } 2406167514Skmacy case CHELSIO_SETREG: { 2407167514Skmacy struct ch_reg *edata = (struct ch_reg *)data; 2408167514Skmacy if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2409167514Skmacy return (EFAULT); 2410167514Skmacy t3_write_reg(sc, edata->addr, edata->val); 2411167514Skmacy break; 2412167514Skmacy } 2413167514Skmacy case CHELSIO_GETREG: { 2414167514Skmacy struct ch_reg *edata = (struct ch_reg *)data; 2415167514Skmacy if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2416167514Skmacy return (EFAULT); 2417167514Skmacy edata->val = t3_read_reg(sc, edata->addr); 2418167514Skmacy break; 2419167514Skmacy } 2420167514Skmacy case CHELSIO_GET_SGE_CONTEXT: { 2421167514Skmacy struct ch_cntxt *ecntxt = (struct ch_cntxt *)data; 2422167514Skmacy mtx_lock(&sc->sge.reg_lock); 2423167514Skmacy switch (ecntxt->cntxt_type) { 2424167514Skmacy case CNTXT_TYPE_EGRESS: 2425167514Skmacy error = t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, 2426167514Skmacy ecntxt->data); 2427167514Skmacy break; 2428167514Skmacy case CNTXT_TYPE_FL: 2429167514Skmacy error = t3_sge_read_fl(sc, ecntxt->cntxt_id, 2430167514Skmacy ecntxt->data); 2431167514Skmacy break; 2432167514Skmacy case CNTXT_TYPE_RSP: 2433167514Skmacy error = t3_sge_read_rspq(sc, ecntxt->cntxt_id, 2434167514Skmacy ecntxt->data); 2435167514Skmacy break; 2436167514Skmacy case CNTXT_TYPE_CQ: 2437167514Skmacy error = t3_sge_read_cq(sc, ecntxt->cntxt_id, 2438167514Skmacy ecntxt->data); 2439167514Skmacy break; 2440167514Skmacy default: 2441167514Skmacy error = EINVAL; 2442167514Skmacy break; 2443167514Skmacy } 2444167514Skmacy mtx_unlock(&sc->sge.reg_lock); 2445167514Skmacy break; 2446167514Skmacy } 2447167514Skmacy case CHELSIO_GET_SGE_DESC: { 2448167514Skmacy struct ch_desc *edesc = (struct ch_desc *)data; 2449167514Skmacy int ret; 2450167514Skmacy if (edesc->queue_num >= SGE_QSETS * 6) 2451167514Skmacy return (EINVAL); 2452167514Skmacy ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], 2453167514Skmacy edesc->queue_num % 6, edesc->idx, edesc->data); 2454167514Skmacy if (ret < 0) 2455167514Skmacy return (EINVAL); 2456167514Skmacy edesc->size = ret; 2457167514Skmacy break; 2458167514Skmacy } 2459167514Skmacy case CHELSIO_SET_QSET_PARAMS: { 2460167514Skmacy struct qset_params *q; 2461167514Skmacy struct ch_qset_params *t = (struct ch_qset_params *)data; 2462167514Skmacy 2463167514Skmacy if (t->qset_idx >= SGE_QSETS) 2464171471Skmacy return (EINVAL); 2465167514Skmacy if (!in_range(t->intr_lat, 0, M_NEWTIMER) || 2466167514Skmacy !in_range(t->cong_thres, 0, 255) || 2467167514Skmacy !in_range(t->txq_size[0], MIN_TXQ_ENTRIES, 2468167514Skmacy MAX_TXQ_ENTRIES) || 2469167514Skmacy !in_range(t->txq_size[1], MIN_TXQ_ENTRIES, 2470167514Skmacy MAX_TXQ_ENTRIES) || 2471167514Skmacy !in_range(t->txq_size[2], MIN_CTRL_TXQ_ENTRIES, 2472167514Skmacy MAX_CTRL_TXQ_ENTRIES) || 2473167514Skmacy !in_range(t->fl_size[0], MIN_FL_ENTRIES, MAX_RX_BUFFERS) || 2474167514Skmacy !in_range(t->fl_size[1], MIN_FL_ENTRIES, 2475167514Skmacy MAX_RX_JUMBO_BUFFERS) || 2476167514Skmacy !in_range(t->rspq_size, MIN_RSPQ_ENTRIES, MAX_RSPQ_ENTRIES)) 2477171471Skmacy return (EINVAL); 2478167514Skmacy if ((sc->flags & FULL_INIT_DONE) && 2479167514Skmacy (t->rspq_size >= 0 || t->fl_size[0] >= 0 || 2480167514Skmacy t->fl_size[1] >= 0 || t->txq_size[0] >= 0 || 2481167514Skmacy t->txq_size[1] >= 0 || t->txq_size[2] >= 0 || 2482167514Skmacy t->polling >= 0 || t->cong_thres >= 0)) 2483171471Skmacy return (EBUSY); 2484167514Skmacy 2485167514Skmacy q = &sc->params.sge.qset[t->qset_idx]; 2486167514Skmacy 2487167514Skmacy if (t->rspq_size >= 0) 2488167514Skmacy q->rspq_size = t->rspq_size; 2489167514Skmacy if (t->fl_size[0] >= 0) 2490167514Skmacy q->fl_size = t->fl_size[0]; 2491167514Skmacy if (t->fl_size[1] >= 0) 2492167514Skmacy q->jumbo_size = t->fl_size[1]; 2493167514Skmacy if (t->txq_size[0] >= 0) 2494167514Skmacy q->txq_size[0] = t->txq_size[0]; 2495167514Skmacy if (t->txq_size[1] >= 0) 2496167514Skmacy q->txq_size[1] = t->txq_size[1]; 2497167514Skmacy if (t->txq_size[2] >= 0) 2498167514Skmacy q->txq_size[2] = t->txq_size[2]; 2499167514Skmacy if (t->cong_thres >= 0) 2500167514Skmacy q->cong_thres = t->cong_thres; 2501167514Skmacy if (t->intr_lat >= 0) { 2502167514Skmacy struct sge_qset *qs = &sc->sge.qs[t->qset_idx]; 2503167514Skmacy 2504167514Skmacy q->coalesce_nsecs = t->intr_lat*1000; 2505167514Skmacy t3_update_qset_coalesce(qs, q); 2506167514Skmacy } 2507167514Skmacy break; 2508167514Skmacy } 2509167514Skmacy case CHELSIO_GET_QSET_PARAMS: { 2510167514Skmacy struct qset_params *q; 2511167514Skmacy struct ch_qset_params *t = (struct ch_qset_params *)data; 2512167514Skmacy 2513167514Skmacy if (t->qset_idx >= SGE_QSETS) 2514167514Skmacy return (EINVAL); 2515167514Skmacy 2516167514Skmacy q = &(sc)->params.sge.qset[t->qset_idx]; 2517167514Skmacy t->rspq_size = q->rspq_size; 2518167514Skmacy t->txq_size[0] = q->txq_size[0]; 2519167514Skmacy t->txq_size[1] = q->txq_size[1]; 2520167514Skmacy t->txq_size[2] = q->txq_size[2]; 2521167514Skmacy t->fl_size[0] = q->fl_size; 2522167514Skmacy t->fl_size[1] = q->jumbo_size; 2523167514Skmacy t->polling = q->polling; 2524167514Skmacy t->intr_lat = q->coalesce_nsecs / 1000; 2525167514Skmacy t->cong_thres = q->cong_thres; 2526167514Skmacy break; 2527167514Skmacy } 2528167514Skmacy case CHELSIO_SET_QSET_NUM: { 2529167514Skmacy struct ch_reg *edata = (struct ch_reg *)data; 2530167514Skmacy unsigned int port_idx = pi->port; 2531167514Skmacy 2532167514Skmacy if (sc->flags & FULL_INIT_DONE) 2533167514Skmacy return (EBUSY); 2534167514Skmacy if (edata->val < 1 || 2535167514Skmacy (edata->val > 1 && !(sc->flags & USING_MSIX))) 2536167514Skmacy return (EINVAL); 2537167514Skmacy if (edata->val + sc->port[!port_idx].nqsets > SGE_QSETS) 2538167514Skmacy return (EINVAL); 2539167514Skmacy sc->port[port_idx].nqsets = edata->val; 2540169978Skmacy sc->port[0].first_qset = 0; 2541167514Skmacy /* 2542169978Skmacy * XXX hardcode ourselves to 2 ports just like LEEENUX 2543167514Skmacy */ 2544167514Skmacy sc->port[1].first_qset = sc->port[0].nqsets; 2545167514Skmacy break; 2546167514Skmacy } 2547167514Skmacy case CHELSIO_GET_QSET_NUM: { 2548167514Skmacy struct ch_reg *edata = (struct ch_reg *)data; 2549167514Skmacy edata->val = pi->nqsets; 2550167514Skmacy break; 2551167514Skmacy } 2552169978Skmacy#ifdef notyet 2553167514Skmacy case CHELSIO_LOAD_FW: 2554167514Skmacy case CHELSIO_GET_PM: 2555167514Skmacy case CHELSIO_SET_PM: 2556167514Skmacy return (EOPNOTSUPP); 2557167514Skmacy break; 2558167514Skmacy#endif 2559169978Skmacy case CHELSIO_SETMTUTAB: { 2560169978Skmacy struct ch_mtus *m = (struct ch_mtus *)data; 2561169978Skmacy int i; 2562169978Skmacy 2563169978Skmacy if (!is_offload(sc)) 2564169978Skmacy return (EOPNOTSUPP); 2565169978Skmacy if (offload_running(sc)) 2566169978Skmacy return (EBUSY); 2567169978Skmacy if (m->nmtus != NMTUS) 2568169978Skmacy return (EINVAL); 2569169978Skmacy if (m->mtus[0] < 81) /* accommodate SACK */ 2570169978Skmacy return (EINVAL); 2571169978Skmacy 2572169978Skmacy /* 2573169978Skmacy * MTUs must be in ascending order 2574169978Skmacy */ 2575169978Skmacy for (i = 1; i < NMTUS; ++i) 2576169978Skmacy if (m->mtus[i] < m->mtus[i - 1]) 2577169978Skmacy return (EINVAL); 2578169978Skmacy 2579169978Skmacy memcpy(sc->params.mtus, m->mtus, 2580169978Skmacy sizeof(sc->params.mtus)); 2581169978Skmacy break; 2582169978Skmacy } 2583169978Skmacy case CHELSIO_GETMTUTAB: { 2584169978Skmacy struct ch_mtus *m = (struct ch_mtus *)data; 2585169978Skmacy 2586169978Skmacy if (!is_offload(sc)) 2587169978Skmacy return (EOPNOTSUPP); 2588169978Skmacy 2589169978Skmacy memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); 2590169978Skmacy m->nmtus = NMTUS; 2591169978Skmacy break; 2592171471Skmacy } 2593171471Skmacy case CHELSIO_SET_FILTER: { 2594171471Skmacy struct ch_filter *f = (struct ch_filter *)data; 2595171471Skmacy struct filter_info *p; 2596171471Skmacy int ret; 2597171471Skmacy 2598171471Skmacy if (sc->params.mc5.nfilters == 0) 2599171471Skmacy return (EOPNOTSUPP); 2600171471Skmacy if (!(sc->flags & FULL_INIT_DONE)) 2601171471Skmacy return (EAGAIN); /* can still change nfilters */ 2602171471Skmacy if (sc->filters == NULL) 2603171471Skmacy return (ENOMEM); 2604171471Skmacy 2605171471Skmacy if (f->filter_id >= sc->params.mc5.nfilters || 2606171471Skmacy (f->val.dip && f->mask.dip != 0xffffffff) || 2607171471Skmacy (f->val.sport && f->mask.sport != 0xffff) || 2608171471Skmacy (f->val.dport && f->mask.dport != 0xffff) || 2609171471Skmacy (f->mask.vlan && f->mask.vlan != 0xfff) || 2610171471Skmacy (f->mask.vlan_prio && f->mask.vlan_prio != 7) || 2611171471Skmacy (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || 2612171471Skmacy f->qset >= SGE_QSETS || 2613171471Skmacy sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) 2614171471Skmacy return (EINVAL); 2615171471Skmacy 2616171471Skmacy p = &sc->filters[f->filter_id]; 2617171471Skmacy if (p->locked) 2618171471Skmacy return (EPERM); 2619171471Skmacy 2620171471Skmacy p->sip = f->val.sip; 2621171471Skmacy p->sip_mask = f->mask.sip; 2622171471Skmacy p->dip = f->val.dip; 2623171471Skmacy p->sport = f->val.sport; 2624171471Skmacy p->dport = f->val.dport; 2625171471Skmacy p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; 2626171471Skmacy p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : 2627171471Skmacy FILTER_NO_VLAN_PRI; 2628171471Skmacy p->mac_hit = f->mac_hit; 2629171471Skmacy p->mac_vld = f->mac_addr_idx != 0xffff; 2630171471Skmacy p->mac_idx = f->mac_addr_idx; 2631171471Skmacy p->pkt_type = f->proto; 2632171471Skmacy p->report_filter_id = f->want_filter_id; 2633171471Skmacy p->pass = f->pass; 2634171471Skmacy p->rss = f->rss; 2635171471Skmacy p->qset = f->qset; 2636171471Skmacy 2637171471Skmacy ret = set_filter(sc, f->filter_id, p); 2638171471Skmacy if (ret) 2639171471Skmacy return ret; 2640171471Skmacy p->valid = 1; 2641171471Skmacy break; 2642171471Skmacy } 2643171471Skmacy case CHELSIO_DEL_FILTER: { 2644171471Skmacy struct ch_filter *f = (struct ch_filter *)data; 2645171471Skmacy struct filter_info *p; 2646171471Skmacy 2647171471Skmacy if (sc->params.mc5.nfilters == 0) 2648171471Skmacy return (EOPNOTSUPP); 2649171471Skmacy if (!(sc->flags & FULL_INIT_DONE)) 2650171471Skmacy return (EAGAIN); /* can still change nfilters */ 2651171471Skmacy if (sc->filters == NULL) 2652171471Skmacy return (ENOMEM); 2653171471Skmacy if (f->filter_id >= sc->params.mc5.nfilters) 2654171471Skmacy return (EINVAL); 2655171471Skmacy 2656171471Skmacy p = &sc->filters[f->filter_id]; 2657171471Skmacy if (p->locked) 2658171471Skmacy return (EPERM); 2659171471Skmacy memset(p, 0, sizeof(*p)); 2660171471Skmacy p->sip_mask = 0xffffffff; 2661171471Skmacy p->vlan = 0xfff; 2662171471Skmacy p->vlan_prio = FILTER_NO_VLAN_PRI; 2663171471Skmacy p->pkt_type = 1; 2664171471Skmacy return set_filter(sc, f->filter_id, p); 2665169978Skmacy } 2666169978Skmacy case CHELSIO_DEVUP: 2667169978Skmacy if (!is_offload(sc)) 2668169978Skmacy return (EOPNOTSUPP); 2669169978Skmacy return offload_open(pi); 2670169978Skmacy break; 2671167514Skmacy case CHELSIO_GET_MEM: { 2672167514Skmacy struct ch_mem_range *t = (struct ch_mem_range *)data; 2673167514Skmacy struct mc7 *mem; 2674167514Skmacy uint8_t *useraddr; 2675167514Skmacy u64 buf[32]; 2676167514Skmacy 2677167514Skmacy if (!is_offload(sc)) 2678167514Skmacy return (EOPNOTSUPP); 2679167514Skmacy if (!(sc->flags & FULL_INIT_DONE)) 2680167514Skmacy return (EIO); /* need the memory controllers */ 2681167514Skmacy if ((t->addr & 0x7) || (t->len & 0x7)) 2682167514Skmacy return (EINVAL); 2683167514Skmacy if (t->mem_id == MEM_CM) 2684167514Skmacy mem = &sc->cm; 2685167514Skmacy else if (t->mem_id == MEM_PMRX) 2686167514Skmacy mem = &sc->pmrx; 2687167514Skmacy else if (t->mem_id == MEM_PMTX) 2688167514Skmacy mem = &sc->pmtx; 2689167514Skmacy else 2690167514Skmacy return (EINVAL); 2691167514Skmacy 2692167514Skmacy /* 2693167514Skmacy * Version scheme: 2694167514Skmacy * bits 0..9: chip version 2695167514Skmacy * bits 10..15: chip revision 2696167514Skmacy */ 2697167514Skmacy t->version = 3 | (sc->params.rev << 10); 2698167514Skmacy 2699167514Skmacy /* 2700167514Skmacy * Read 256 bytes at a time as len can be large and we don't 2701167514Skmacy * want to use huge intermediate buffers. 2702167514Skmacy */ 2703167514Skmacy useraddr = (uint8_t *)(t + 1); /* advance to start of buffer */ 2704167514Skmacy while (t->len) { 2705167514Skmacy unsigned int chunk = min(t->len, sizeof(buf)); 2706167514Skmacy 2707167514Skmacy error = t3_mc7_bd_read(mem, t->addr / 8, chunk / 8, buf); 2708167514Skmacy if (error) 2709167514Skmacy return (-error); 2710167514Skmacy if (copyout(buf, useraddr, chunk)) 2711167514Skmacy return (EFAULT); 2712167514Skmacy useraddr += chunk; 2713167514Skmacy t->addr += chunk; 2714167514Skmacy t->len -= chunk; 2715167514Skmacy } 2716167514Skmacy break; 2717167514Skmacy } 2718169978Skmacy case CHELSIO_READ_TCAM_WORD: { 2719169978Skmacy struct ch_tcam_word *t = (struct ch_tcam_word *)data; 2720169978Skmacy 2721169978Skmacy if (!is_offload(sc)) 2722169978Skmacy return (EOPNOTSUPP); 2723171471Skmacy if (!(sc->flags & FULL_INIT_DONE)) 2724171471Skmacy return (EIO); /* need MC5 */ 2725169978Skmacy return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); 2726169978Skmacy break; 2727169978Skmacy } 2728167514Skmacy case CHELSIO_SET_TRACE_FILTER: { 2729167514Skmacy struct ch_trace *t = (struct ch_trace *)data; 2730167514Skmacy const struct trace_params *tp; 2731167514Skmacy 2732167514Skmacy tp = (const struct trace_params *)&t->sip; 2733167514Skmacy if (t->config_tx) 2734167514Skmacy t3_config_trace_filter(sc, tp, 0, t->invert_match, 2735167514Skmacy t->trace_tx); 2736167514Skmacy if (t->config_rx) 2737167514Skmacy t3_config_trace_filter(sc, tp, 1, t->invert_match, 2738167514Skmacy t->trace_rx); 2739167514Skmacy break; 2740167514Skmacy } 2741167514Skmacy case CHELSIO_SET_PKTSCHED: { 2742167514Skmacy struct ch_pktsched_params *p = (struct ch_pktsched_params *)data; 2743167514Skmacy if (sc->open_device_map == 0) 2744167514Skmacy return (EAGAIN); 2745167514Skmacy send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, 2746167514Skmacy p->binding); 2747167514Skmacy break; 2748167514Skmacy } 2749167514Skmacy case CHELSIO_IFCONF_GETREGS: { 2750167514Skmacy struct ifconf_regs *regs = (struct ifconf_regs *)data; 2751167514Skmacy int reglen = cxgb_get_regs_len(); 2752167514Skmacy uint8_t *buf = malloc(REGDUMP_SIZE, M_DEVBUF, M_NOWAIT); 2753167514Skmacy if (buf == NULL) { 2754167514Skmacy return (ENOMEM); 2755167514Skmacy } if (regs->len > reglen) 2756167514Skmacy regs->len = reglen; 2757167514Skmacy else if (regs->len < reglen) { 2758167514Skmacy error = E2BIG; 2759167514Skmacy goto done; 2760167514Skmacy } 2761167514Skmacy cxgb_get_regs(sc, regs, buf); 2762167514Skmacy error = copyout(buf, regs->data, reglen); 2763167514Skmacy 2764167514Skmacy done: 2765167514Skmacy free(buf, M_DEVBUF); 2766167514Skmacy 2767167514Skmacy break; 2768167514Skmacy } 2769169978Skmacy case CHELSIO_SET_HW_SCHED: { 2770169978Skmacy struct ch_hw_sched *t = (struct ch_hw_sched *)data; 2771169978Skmacy unsigned int ticks_per_usec = core_ticks_per_usec(sc); 2772169978Skmacy 2773169978Skmacy if ((sc->flags & FULL_INIT_DONE) == 0) 2774169978Skmacy return (EAGAIN); /* need TP to be initialized */ 2775169978Skmacy if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || 2776169978Skmacy !in_range(t->channel, 0, 1) || 2777169978Skmacy !in_range(t->kbps, 0, 10000000) || 2778169978Skmacy !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || 2779169978Skmacy !in_range(t->flow_ipg, 0, 2780169978Skmacy dack_ticks_to_usec(sc, 0x7ff))) 2781169978Skmacy return (EINVAL); 2782169978Skmacy 2783169978Skmacy if (t->kbps >= 0) { 2784169978Skmacy error = t3_config_sched(sc, t->kbps, t->sched); 2785169978Skmacy if (error < 0) 2786169978Skmacy return (-error); 2787169978Skmacy } 2788169978Skmacy if (t->class_ipg >= 0) 2789169978Skmacy t3_set_sched_ipg(sc, t->sched, t->class_ipg); 2790169978Skmacy if (t->flow_ipg >= 0) { 2791169978Skmacy t->flow_ipg *= 1000; /* us -> ns */ 2792169978Skmacy t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); 2793169978Skmacy } 2794169978Skmacy if (t->mode >= 0) { 2795169978Skmacy int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); 2796169978Skmacy 2797169978Skmacy t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2798169978Skmacy bit, t->mode ? bit : 0); 2799169978Skmacy } 2800169978Skmacy if (t->channel >= 0) 2801169978Skmacy t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2802169978Skmacy 1 << t->sched, t->channel << t->sched); 2803169978Skmacy break; 2804169978Skmacy } 2805167514Skmacy default: 2806167514Skmacy return (EOPNOTSUPP); 2807167514Skmacy break; 2808167514Skmacy } 2809167514Skmacy 2810167514Skmacy return (error); 2811167514Skmacy} 2812167514Skmacy 2813167514Skmacystatic __inline void 2814167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 2815167514Skmacy unsigned int end) 2816167514Skmacy{ 2817167514Skmacy uint32_t *p = (uint32_t *)buf + start; 2818167514Skmacy 2819167514Skmacy for ( ; start <= end; start += sizeof(uint32_t)) 2820167514Skmacy *p++ = t3_read_reg(ap, start); 2821167514Skmacy} 2822167514Skmacy 2823167514Skmacy#define T3_REGMAP_SIZE (3 * 1024) 2824167514Skmacystatic int 2825167514Skmacycxgb_get_regs_len(void) 2826167514Skmacy{ 2827167514Skmacy return T3_REGMAP_SIZE; 2828167514Skmacy} 2829167514Skmacy#undef T3_REGMAP_SIZE 2830167514Skmacy 2831167514Skmacystatic void 2832167514Skmacycxgb_get_regs(adapter_t *sc, struct ifconf_regs *regs, uint8_t *buf) 2833167514Skmacy{ 2834167514Skmacy 2835167514Skmacy /* 2836167514Skmacy * Version scheme: 2837167514Skmacy * bits 0..9: chip version 2838167514Skmacy * bits 10..15: chip revision 2839167514Skmacy * bit 31: set for PCIe cards 2840167514Skmacy */ 2841167514Skmacy regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); 2842167514Skmacy 2843167514Skmacy /* 2844167514Skmacy * We skip the MAC statistics registers because they are clear-on-read. 2845167514Skmacy * Also reading multi-register stats would need to synchronize with the 2846167514Skmacy * periodic mac stats accumulation. Hard to justify the complexity. 2847167514Skmacy */ 2848167514Skmacy memset(buf, 0, REGDUMP_SIZE); 2849167514Skmacy reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN); 2850167514Skmacy reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); 2851167514Skmacy reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); 2852167514Skmacy reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); 2853167514Skmacy reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); 2854167514Skmacy reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0, 2855167514Skmacy XGM_REG(A_XGM_SERDES_STAT3, 1)); 2856167514Skmacy reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), 2857167514Skmacy XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); 2858167514Skmacy} 2859