1167514Skmacy/************************************************************************** 2167514Skmacy 3189643SgnnCopyright (c) 2007-2009, Chelsio Inc. 4167514SkmacyAll rights reserved. 5167514Skmacy 6167514SkmacyRedistribution and use in source and binary forms, with or without 7167514Skmacymodification, are permitted provided that the following conditions are met: 8167514Skmacy 9167514Skmacy 1. Redistributions of source code must retain the above copyright notice, 10167514Skmacy this list of conditions and the following disclaimer. 11167514Skmacy 12178302Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its 13167514Skmacy contributors may be used to endorse or promote products derived from 14167514Skmacy this software without specific prior written permission. 15167514Skmacy 16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26167514SkmacyPOSSIBILITY OF SUCH DAMAGE. 27167514Skmacy 28167514Skmacy***************************************************************************/ 29167514Skmacy 30167514Skmacy#include <sys/cdefs.h> 31167514Skmacy__FBSDID("$FreeBSD: releng/11.0/sys/dev/cxgb/cxgb_main.c 283291 2015-05-22 17:05:21Z jkim $"); 32167514Skmacy 33237263Snp#include "opt_inet.h" 34237263Snp 35167514Skmacy#include <sys/param.h> 36167514Skmacy#include <sys/systm.h> 37167514Skmacy#include <sys/kernel.h> 38167514Skmacy#include <sys/bus.h> 39167514Skmacy#include <sys/module.h> 40167514Skmacy#include <sys/pciio.h> 41167514Skmacy#include <sys/conf.h> 42167514Skmacy#include <machine/bus.h> 43167514Skmacy#include <machine/resource.h> 44167514Skmacy#include <sys/bus_dma.h> 45176472Skmacy#include <sys/ktr.h> 46167514Skmacy#include <sys/rman.h> 47167514Skmacy#include <sys/ioccom.h> 48167514Skmacy#include <sys/mbuf.h> 49167514Skmacy#include <sys/linker.h> 50167514Skmacy#include <sys/firmware.h> 51167514Skmacy#include <sys/socket.h> 52167514Skmacy#include <sys/sockio.h> 53167514Skmacy#include <sys/smp.h> 54167514Skmacy#include <sys/sysctl.h> 55174708Skmacy#include <sys/syslog.h> 56167514Skmacy#include <sys/queue.h> 57167514Skmacy#include <sys/taskqueue.h> 58174708Skmacy#include <sys/proc.h> 59167514Skmacy 60167514Skmacy#include <net/bpf.h> 61167514Skmacy#include <net/ethernet.h> 62167514Skmacy#include <net/if.h> 63257176Sglebius#include <net/if_var.h> 64167514Skmacy#include <net/if_arp.h> 65167514Skmacy#include <net/if_dl.h> 66167514Skmacy#include <net/if_media.h> 67167514Skmacy#include <net/if_types.h> 68180583Skmacy#include <net/if_vlan_var.h> 69167514Skmacy 70167514Skmacy#include <netinet/in_systm.h> 71167514Skmacy#include <netinet/in.h> 72167514Skmacy#include <netinet/if_ether.h> 73167514Skmacy#include <netinet/ip.h> 74167514Skmacy#include <netinet/ip.h> 75167514Skmacy#include <netinet/tcp.h> 76167514Skmacy#include <netinet/udp.h> 77167514Skmacy 78167514Skmacy#include <dev/pci/pcireg.h> 79167514Skmacy#include <dev/pci/pcivar.h> 80167514Skmacy#include <dev/pci/pci_private.h> 81167514Skmacy 82170076Skmacy#include <cxgb_include.h> 83167514Skmacy 84167514Skmacy#ifdef PRIV_SUPPORTED 85167514Skmacy#include <sys/priv.h> 86167514Skmacy#endif 87167514Skmacy 88192933Sgnnstatic int cxgb_setup_interrupts(adapter_t *); 89192933Sgnnstatic void cxgb_teardown_interrupts(adapter_t *); 90167514Skmacystatic void cxgb_init(void *); 91202671Snpstatic int cxgb_init_locked(struct port_info *); 92202671Snpstatic int cxgb_uninit_locked(struct port_info *); 93194521Skmacystatic int cxgb_uninit_synchronized(struct port_info *); 94167514Skmacystatic int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t); 95167514Skmacystatic int cxgb_media_change(struct ifnet *); 96186282Sgnnstatic int cxgb_ifm_type(int); 97194921Snpstatic void cxgb_build_medialist(struct port_info *); 98167514Skmacystatic void cxgb_media_status(struct ifnet *, struct ifmediareq *); 99272222Snpstatic uint64_t cxgb_get_counter(struct ifnet *, ift_counter); 100167514Skmacystatic int setup_sge_qsets(adapter_t *); 101167514Skmacystatic void cxgb_async_intr(void *); 102170869Skmacystatic void cxgb_tick_handler(void *, int); 103167514Skmacystatic void cxgb_tick(void *); 104209841Snpstatic void link_check_callout(void *); 105209841Snpstatic void check_link_status(void *, int); 106167514Skmacystatic void setup_rss(adapter_t *sc); 107207643Snpstatic int alloc_filters(struct adapter *); 108207643Snpstatic int setup_hw_filters(struct adapter *); 109207643Snpstatic int set_filter(struct adapter *, int, const struct filter_info *); 110207643Snpstatic inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int, 111207643Snp unsigned int, u64, u64); 112207643Snpstatic inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int, 113207643Snp unsigned int, u64, u64); 114237263Snp#ifdef TCP_OFFLOAD 115237263Snpstatic int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *); 116237263Snp#endif 117167514Skmacy 118167514Skmacy/* Attachment glue for the PCI controller end of the device. Each port of 119167514Skmacy * the device is attached separately, as defined later. 120167514Skmacy */ 121167514Skmacystatic int cxgb_controller_probe(device_t); 122167514Skmacystatic int cxgb_controller_attach(device_t); 123167514Skmacystatic int cxgb_controller_detach(device_t); 124167514Skmacystatic void cxgb_free(struct adapter *); 125167514Skmacystatic __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 126167514Skmacy unsigned int end); 127182679Skmacystatic void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf); 128167514Skmacystatic int cxgb_get_regs_len(void); 129171978Skmacystatic void touch_bars(device_t dev); 130197791Snpstatic void cxgb_update_mac_settings(struct port_info *p); 131237263Snp#ifdef TCP_OFFLOAD 132237263Snpstatic int toe_capability(struct port_info *, int); 133237263Snp#endif 134167514Skmacy 135167514Skmacystatic device_method_t cxgb_controller_methods[] = { 136167514Skmacy DEVMETHOD(device_probe, cxgb_controller_probe), 137167514Skmacy DEVMETHOD(device_attach, cxgb_controller_attach), 138167514Skmacy DEVMETHOD(device_detach, cxgb_controller_detach), 139167514Skmacy 140227843Smarius DEVMETHOD_END 141167514Skmacy}; 142167514Skmacy 143167514Skmacystatic driver_t cxgb_controller_driver = { 144167514Skmacy "cxgbc", 145167514Skmacy cxgb_controller_methods, 146167514Skmacy sizeof(struct adapter) 147167514Skmacy}; 148167514Skmacy 149237263Snpstatic int cxgbc_mod_event(module_t, int, void *); 150167514Skmacystatic devclass_t cxgb_controller_devclass; 151237263SnpDRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, 152237263Snp cxgbc_mod_event, 0); 153237263SnpMODULE_VERSION(cxgbc, 1); 154250697SkibMODULE_DEPEND(cxgbc, firmware, 1, 1, 1); 155167514Skmacy 156167514Skmacy/* 157167514Skmacy * Attachment glue for the ports. Attachment is done directly to the 158167514Skmacy * controller device. 159167514Skmacy */ 160167514Skmacystatic int cxgb_port_probe(device_t); 161167514Skmacystatic int cxgb_port_attach(device_t); 162167514Skmacystatic int cxgb_port_detach(device_t); 163167514Skmacy 164167514Skmacystatic device_method_t cxgb_port_methods[] = { 165167514Skmacy DEVMETHOD(device_probe, cxgb_port_probe), 166167514Skmacy DEVMETHOD(device_attach, cxgb_port_attach), 167167514Skmacy DEVMETHOD(device_detach, cxgb_port_detach), 168167514Skmacy { 0, 0 } 169167514Skmacy}; 170167514Skmacy 171167514Skmacystatic driver_t cxgb_port_driver = { 172167514Skmacy "cxgb", 173167514Skmacy cxgb_port_methods, 174167514Skmacy 0 175167514Skmacy}; 176167514Skmacy 177167514Skmacystatic d_ioctl_t cxgb_extension_ioctl; 178170654Skmacystatic d_open_t cxgb_extension_open; 179170654Skmacystatic d_close_t cxgb_extension_close; 180167514Skmacy 181170654Skmacystatic struct cdevsw cxgb_cdevsw = { 182170654Skmacy .d_version = D_VERSION, 183170654Skmacy .d_flags = 0, 184170654Skmacy .d_open = cxgb_extension_open, 185170654Skmacy .d_close = cxgb_extension_close, 186170654Skmacy .d_ioctl = cxgb_extension_ioctl, 187170654Skmacy .d_name = "cxgb", 188170654Skmacy}; 189170654Skmacy 190167514Skmacystatic devclass_t cxgb_port_devclass; 191167514SkmacyDRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0); 192237263SnpMODULE_VERSION(cxgb, 1); 193167514Skmacy 194237263Snpstatic struct mtx t3_list_lock; 195237263Snpstatic SLIST_HEAD(, adapter) t3_list; 196237263Snp#ifdef TCP_OFFLOAD 197237263Snpstatic struct mtx t3_uld_list_lock; 198237263Snpstatic SLIST_HEAD(, uld_info) t3_uld_list; 199237263Snp#endif 200237263Snp 201167514Skmacy/* 202167514Skmacy * The driver uses the best interrupt scheme available on a platform in the 203167514Skmacy * order MSI-X, MSI, legacy pin interrupts. This parameter determines which 204167514Skmacy * of these schemes the driver may consider as follows: 205167514Skmacy * 206167514Skmacy * msi = 2: choose from among all three options 207167514Skmacy * msi = 1 : only consider MSI and pin interrupts 208167514Skmacy * msi = 0: force pin interrupts 209167514Skmacy */ 210167760Skmacystatic int msi_allowed = 2; 211170083Skmacy 212167514SkmacySYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters"); 213217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, 214167514Skmacy "MSI-X, MSI, INTx selector"); 215169978Skmacy 216169053Skmacy/* 217169978Skmacy * The driver uses an auto-queue algorithm by default. 218185165Skmacy * To disable it and force a single queue-set per port, use multiq = 0 219169978Skmacy */ 220185165Skmacystatic int multiq = 1; 221217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0, 222185165Skmacy "use min(ncpus/ports, 8) queue-sets per port"); 223167514Skmacy 224176572Skmacy/* 225185165Skmacy * By default the driver will not update the firmware unless 226185165Skmacy * it was compiled against a newer version 227185165Skmacy * 228176572Skmacy */ 229176572Skmacystatic int force_fw_update = 0; 230217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0, 231176572Skmacy "update firmware even if up to date"); 232175200Skmacy 233205950Snpint cxgb_use_16k_clusters = -1; 234205950SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN, 235175200Skmacy &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue "); 236175200Skmacy 237208887Snpstatic int nfilters = -1; 238208887SnpSYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN, 239208887Snp &nfilters, 0, "max number of entries in the filter table"); 240194039Sgnn 241167514Skmacyenum { 242167514Skmacy MAX_TXQ_ENTRIES = 16384, 243167514Skmacy MAX_CTRL_TXQ_ENTRIES = 1024, 244167514Skmacy MAX_RSPQ_ENTRIES = 16384, 245167514Skmacy MAX_RX_BUFFERS = 16384, 246167514Skmacy MAX_RX_JUMBO_BUFFERS = 16384, 247167514Skmacy MIN_TXQ_ENTRIES = 4, 248167514Skmacy MIN_CTRL_TXQ_ENTRIES = 4, 249167514Skmacy MIN_RSPQ_ENTRIES = 32, 250172096Skmacy MIN_FL_ENTRIES = 32, 251172096Skmacy MIN_FL_JUMBO_ENTRIES = 32 252167514Skmacy}; 253167514Skmacy 254171471Skmacystruct filter_info { 255171471Skmacy u32 sip; 256171471Skmacy u32 sip_mask; 257171471Skmacy u32 dip; 258171471Skmacy u16 sport; 259171471Skmacy u16 dport; 260171471Skmacy u32 vlan:12; 261171471Skmacy u32 vlan_prio:3; 262171471Skmacy u32 mac_hit:1; 263171471Skmacy u32 mac_idx:4; 264171471Skmacy u32 mac_vld:1; 265171471Skmacy u32 pkt_type:2; 266171471Skmacy u32 report_filter_id:1; 267171471Skmacy u32 pass:1; 268171471Skmacy u32 rss:1; 269171471Skmacy u32 qset:3; 270171471Skmacy u32 locked:1; 271171471Skmacy u32 valid:1; 272171471Skmacy}; 273171471Skmacy 274171471Skmacyenum { FILTER_NO_VLAN_PRI = 7 }; 275171471Skmacy 276182679Skmacy#define EEPROM_MAGIC 0x38E2F10C 277182679Skmacy 278167514Skmacy#define PORT_MASK ((1 << MAX_NPORTS) - 1) 279167514Skmacy 280167514Skmacy/* Table for probing the cards. The desc field isn't actually used */ 281167514Skmacystruct cxgb_ident { 282167514Skmacy uint16_t vendor; 283167514Skmacy uint16_t device; 284167514Skmacy int index; 285167514Skmacy char *desc; 286167514Skmacy} cxgb_identifiers[] = { 287167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"}, 288167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"}, 289167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"}, 290167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"}, 291167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"}, 292167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"}, 293167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"}, 294167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"}, 295167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"}, 296167514Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"}, 297170654Skmacy {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"}, 298197791Snp {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"}, 299197791Snp {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"}, 300197791Snp {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"}, 301167514Skmacy {0, 0, 0, NULL} 302167514Skmacy}; 303167514Skmacy 304171471Skmacystatic int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset); 305171471Skmacy 306176472Skmacy 307174708Skmacystatic __inline char 308171471Skmacyt3rev2char(struct adapter *adapter) 309171471Skmacy{ 310171471Skmacy char rev = 'z'; 311171471Skmacy 312171471Skmacy switch(adapter->params.rev) { 313171471Skmacy case T3_REV_A: 314171471Skmacy rev = 'a'; 315171471Skmacy break; 316171471Skmacy case T3_REV_B: 317171471Skmacy case T3_REV_B2: 318171471Skmacy rev = 'b'; 319171471Skmacy break; 320171471Skmacy case T3_REV_C: 321171471Skmacy rev = 'c'; 322171471Skmacy break; 323171471Skmacy } 324171471Skmacy return rev; 325171471Skmacy} 326171471Skmacy 327167514Skmacystatic struct cxgb_ident * 328167514Skmacycxgb_get_ident(device_t dev) 329167514Skmacy{ 330167514Skmacy struct cxgb_ident *id; 331167514Skmacy 332167514Skmacy for (id = cxgb_identifiers; id->desc != NULL; id++) { 333167514Skmacy if ((id->vendor == pci_get_vendor(dev)) && 334167514Skmacy (id->device == pci_get_device(dev))) { 335167514Skmacy return (id); 336167514Skmacy } 337167514Skmacy } 338167514Skmacy return (NULL); 339167514Skmacy} 340167514Skmacy 341167514Skmacystatic const struct adapter_info * 342167514Skmacycxgb_get_adapter_info(device_t dev) 343167514Skmacy{ 344167514Skmacy struct cxgb_ident *id; 345167514Skmacy const struct adapter_info *ai; 346183063Skmacy 347167514Skmacy id = cxgb_get_ident(dev); 348167514Skmacy if (id == NULL) 349167514Skmacy return (NULL); 350167514Skmacy 351167514Skmacy ai = t3_get_adapter_info(id->index); 352167514Skmacy 353167514Skmacy return (ai); 354167514Skmacy} 355167514Skmacy 356167514Skmacystatic int 357167514Skmacycxgb_controller_probe(device_t dev) 358167514Skmacy{ 359167514Skmacy const struct adapter_info *ai; 360167514Skmacy char *ports, buf[80]; 361170654Skmacy int nports; 362183063Skmacy 363167514Skmacy ai = cxgb_get_adapter_info(dev); 364167514Skmacy if (ai == NULL) 365167514Skmacy return (ENXIO); 366167514Skmacy 367170654Skmacy nports = ai->nports0 + ai->nports1; 368170654Skmacy if (nports == 1) 369167514Skmacy ports = "port"; 370167514Skmacy else 371167514Skmacy ports = "ports"; 372167514Skmacy 373199237Snp snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports); 374167514Skmacy device_set_desc_copy(dev, buf); 375167514Skmacy return (BUS_PROBE_DEFAULT); 376167514Skmacy} 377167514Skmacy 378176572Skmacy#define FW_FNAME "cxgb_t3fw" 379190330Sgnn#define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom" 380190330Sgnn#define TPSRAM_NAME "cxgb_t3%c_protocol_sram" 381171471Skmacy 382167514Skmacystatic int 383169978Skmacyupgrade_fw(adapter_t *sc) 384167514Skmacy{ 385167514Skmacy const struct firmware *fw; 386167514Skmacy int status; 387205944Snp u32 vers; 388167514Skmacy 389176572Skmacy if ((fw = firmware_get(FW_FNAME)) == NULL) { 390176572Skmacy device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME); 391169978Skmacy return (ENOENT); 392171471Skmacy } else 393205944Snp device_printf(sc->dev, "installing firmware on card\n"); 394167514Skmacy status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); 395167514Skmacy 396205944Snp if (status != 0) { 397205944Snp device_printf(sc->dev, "failed to install firmware: %d\n", 398205944Snp status); 399205944Snp } else { 400205944Snp t3_get_fw_version(sc, &vers); 401205944Snp snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", 402205944Snp G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), 403205944Snp G_FW_VERSION_MICRO(vers)); 404205944Snp } 405205944Snp 406167514Skmacy firmware_put(fw, FIRMWARE_UNLOAD); 407167514Skmacy 408167514Skmacy return (status); 409167514Skmacy} 410167514Skmacy 411192537Sgnn/* 412192537Sgnn * The cxgb_controller_attach function is responsible for the initial 413192537Sgnn * bringup of the device. Its responsibilities include: 414192537Sgnn * 415192537Sgnn * 1. Determine if the device supports MSI or MSI-X. 416192537Sgnn * 2. Allocate bus resources so that we can access the Base Address Register 417192537Sgnn * 3. Create and initialize mutexes for the controller and its control 418192537Sgnn * logic such as SGE and MDIO. 419192537Sgnn * 4. Call hardware specific setup routine for the adapter as a whole. 420192537Sgnn * 5. Allocate the BAR for doing MSI-X. 421192537Sgnn * 6. Setup the line interrupt iff MSI-X is not supported. 422192537Sgnn * 7. Create the driver's taskq. 423192584Sgnn * 8. Start one task queue service thread. 424192584Sgnn * 9. Check if the firmware and SRAM are up-to-date. They will be 425192584Sgnn * auto-updated later (before FULL_INIT_DONE), if required. 426192537Sgnn * 10. Create a child device for each MAC (port) 427192537Sgnn * 11. Initialize T3 private state. 428192537Sgnn * 12. Trigger the LED 429192537Sgnn * 13. Setup offload iff supported. 430192537Sgnn * 14. Reset/restart the tick callout. 431192537Sgnn * 15. Attach sysctls 432192537Sgnn * 433192537Sgnn * NOTE: Any modification or deviation from this list MUST be reflected in 434192537Sgnn * the above comment. Failure to do so will result in problems on various 435192537Sgnn * error conditions including link flapping. 436192537Sgnn */ 437167514Skmacystatic int 438167514Skmacycxgb_controller_attach(device_t dev) 439167514Skmacy{ 440167514Skmacy device_t child; 441167514Skmacy const struct adapter_info *ai; 442167514Skmacy struct adapter *sc; 443172109Skmacy int i, error = 0; 444167514Skmacy uint32_t vers; 445167760Skmacy int port_qsets = 1; 446172109Skmacy int msi_needed, reg; 447185655Sgnn char buf[80]; 448185655Sgnn 449167514Skmacy sc = device_get_softc(dev); 450167514Skmacy sc->dev = dev; 451169978Skmacy sc->msi_count = 0; 452172109Skmacy ai = cxgb_get_adapter_info(dev); 453172109Skmacy 454237263Snp snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", 455237263Snp device_get_unit(dev)); 456237263Snp ADAPTER_LOCK_INIT(sc, sc->lockbuf); 457237263Snp 458237263Snp snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", 459237263Snp device_get_unit(dev)); 460237263Snp snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", 461237263Snp device_get_unit(dev)); 462237263Snp snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", 463237263Snp device_get_unit(dev)); 464237263Snp 465237263Snp MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); 466237263Snp MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); 467237263Snp MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); 468237263Snp 469237263Snp mtx_lock(&t3_list_lock); 470237263Snp SLIST_INSERT_HEAD(&t3_list, sc, link); 471237263Snp mtx_unlock(&t3_list_lock); 472237263Snp 473167840Skmacy /* find the PCIe link width and set max read request to 4KB*/ 474219902Sjhb if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 475210505Sjhb uint16_t lnk; 476171471Skmacy 477240680Sgavin lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2); 478240680Sgavin sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; 479210505Sjhb if (sc->link_width < 8 && 480210505Sjhb (ai->caps & SUPPORTED_10000baseT_Full)) { 481210505Sjhb device_printf(sc->dev, 482210505Sjhb "PCIe x%d Link, expect reduced performance\n", 483210505Sjhb sc->link_width); 484210505Sjhb } 485210505Sjhb 486210505Sjhb pci_set_max_read_req(dev, 4096); 487167840Skmacy } 488204274Snp 489171978Skmacy touch_bars(dev); 490167514Skmacy pci_enable_busmaster(dev); 491167514Skmacy /* 492167514Skmacy * Allocate the registers and make them available to the driver. 493167514Skmacy * The registers that we care about for NIC mode are in BAR 0 494167514Skmacy */ 495167514Skmacy sc->regs_rid = PCIR_BAR(0); 496167514Skmacy if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 497167514Skmacy &sc->regs_rid, RF_ACTIVE)) == NULL) { 498176472Skmacy device_printf(dev, "Cannot allocate BAR region 0\n"); 499237263Snp error = ENXIO; 500237263Snp goto out; 501167514Skmacy } 502167514Skmacy 503167514Skmacy sc->bt = rman_get_bustag(sc->regs_res); 504167514Skmacy sc->bh = rman_get_bushandle(sc->regs_res); 505167514Skmacy sc->mmio_len = rman_get_size(sc->regs_res); 506167769Skmacy 507197791Snp for (i = 0; i < MAX_NPORTS; i++) 508197791Snp sc->port[i].adapter = sc; 509197791Snp 510167769Skmacy if (t3_prep_adapter(sc, ai, 1) < 0) { 511170654Skmacy printf("prep adapter failed\n"); 512167769Skmacy error = ENODEV; 513167769Skmacy goto out; 514167769Skmacy } 515231175Snp 516231175Snp sc->udbs_rid = PCIR_BAR(2); 517231175Snp sc->udbs_res = NULL; 518231175Snp if (is_offload(sc) && 519231175Snp ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 520231175Snp &sc->udbs_rid, RF_ACTIVE)) == NULL)) { 521231175Snp device_printf(dev, "Cannot allocate BAR region 1\n"); 522231175Snp error = ENXIO; 523231175Snp goto out; 524231175Snp } 525231175Snp 526177464Skmacy /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate 527167514Skmacy * enough messages for the queue sets. If that fails, try falling 528167514Skmacy * back to MSI. If that fails, then try falling back to the legacy 529167514Skmacy * interrupt pin model. 530167514Skmacy */ 531167514Skmacy sc->msix_regs_rid = 0x20; 532167514Skmacy if ((msi_allowed >= 2) && 533167514Skmacy (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 534167514Skmacy &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { 535167514Skmacy 536192933Sgnn if (multiq) 537192933Sgnn port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus); 538192933Sgnn msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1; 539167760Skmacy 540192933Sgnn if (pci_msix_count(dev) == 0 || 541192933Sgnn (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 || 542192933Sgnn sc->msi_count != msi_needed) { 543192933Sgnn device_printf(dev, "alloc msix failed - " 544192933Sgnn "msi_count=%d, msi_needed=%d, err=%d; " 545192933Sgnn "will try MSI\n", sc->msi_count, 546192933Sgnn msi_needed, error); 547169978Skmacy sc->msi_count = 0; 548192933Sgnn port_qsets = 1; 549167514Skmacy pci_release_msi(dev); 550167514Skmacy bus_release_resource(dev, SYS_RES_MEMORY, 551167514Skmacy sc->msix_regs_rid, sc->msix_regs_res); 552167514Skmacy sc->msix_regs_res = NULL; 553167514Skmacy } else { 554167514Skmacy sc->flags |= USING_MSIX; 555192933Sgnn sc->cxgb_intr = cxgb_async_intr; 556192933Sgnn device_printf(dev, 557192933Sgnn "using MSI-X interrupts (%u vectors)\n", 558192933Sgnn sc->msi_count); 559167514Skmacy } 560167514Skmacy } 561167514Skmacy 562169978Skmacy if ((msi_allowed >= 1) && (sc->msi_count == 0)) { 563169978Skmacy sc->msi_count = 1; 564192933Sgnn if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) { 565192933Sgnn device_printf(dev, "alloc msi failed - " 566192933Sgnn "err=%d; will try INTx\n", error); 567169978Skmacy sc->msi_count = 0; 568192933Sgnn port_qsets = 1; 569167514Skmacy pci_release_msi(dev); 570167514Skmacy } else { 571167514Skmacy sc->flags |= USING_MSI; 572170081Skmacy sc->cxgb_intr = t3_intr_msi; 573192933Sgnn device_printf(dev, "using MSI interrupts\n"); 574167514Skmacy } 575167514Skmacy } 576169978Skmacy if (sc->msi_count == 0) { 577167760Skmacy device_printf(dev, "using line interrupts\n"); 578170081Skmacy sc->cxgb_intr = t3b_intr; 579167514Skmacy } 580167514Skmacy 581167514Skmacy /* Create a private taskqueue thread for handling driver events */ 582167514Skmacy sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, 583167514Skmacy taskqueue_thread_enqueue, &sc->tq); 584167514Skmacy if (sc->tq == NULL) { 585167514Skmacy device_printf(dev, "failed to allocate controller task queue\n"); 586167514Skmacy goto out; 587167514Skmacy } 588171804Skmacy 589167514Skmacy taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 590167514Skmacy device_get_nameunit(dev)); 591170869Skmacy TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); 592167514Skmacy 593167514Skmacy 594167514Skmacy /* Create a periodic callout for checking adapter status */ 595283291Sjkim callout_init(&sc->cxgb_tick_ch, 1); 596167514Skmacy 597189643Sgnn if (t3_check_fw_version(sc) < 0 || force_fw_update) { 598167514Skmacy /* 599167514Skmacy * Warn user that a firmware update will be attempted in init. 600167514Skmacy */ 601169978Skmacy device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n", 602169978Skmacy FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); 603167514Skmacy sc->flags &= ~FW_UPTODATE; 604167514Skmacy } else { 605167514Skmacy sc->flags |= FW_UPTODATE; 606167514Skmacy } 607171471Skmacy 608189643Sgnn if (t3_check_tpsram_version(sc) < 0) { 609171471Skmacy /* 610171471Skmacy * Warn user that a firmware update will be attempted in init. 611171471Skmacy */ 612171471Skmacy device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", 613171471Skmacy t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 614171471Skmacy sc->flags &= ~TPS_UPTODATE; 615171471Skmacy } else { 616171471Skmacy sc->flags |= TPS_UPTODATE; 617171471Skmacy } 618237263Snp 619167514Skmacy /* 620167514Skmacy * Create a child device for each MAC. The ethernet attachment 621167514Skmacy * will be done in these children. 622167760Skmacy */ 623167760Skmacy for (i = 0; i < (sc)->params.nports; i++) { 624171978Skmacy struct port_info *pi; 625171978Skmacy 626167514Skmacy if ((child = device_add_child(dev, "cxgb", -1)) == NULL) { 627167514Skmacy device_printf(dev, "failed to add child port\n"); 628167514Skmacy error = EINVAL; 629167514Skmacy goto out; 630167514Skmacy } 631171978Skmacy pi = &sc->port[i]; 632171978Skmacy pi->adapter = sc; 633171978Skmacy pi->nqsets = port_qsets; 634171978Skmacy pi->first_qset = i*port_qsets; 635171978Skmacy pi->port_id = i; 636171978Skmacy pi->tx_chan = i >= ai->nports0; 637171978Skmacy pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i; 638171978Skmacy sc->rxpkt_map[pi->txpkt_intf] = i; 639174708Skmacy sc->port[i].tx_chan = i >= ai->nports0; 640171471Skmacy sc->portdev[i] = child; 641171978Skmacy device_set_softc(child, pi); 642167514Skmacy } 643167514Skmacy if ((error = bus_generic_attach(dev)) != 0) 644167514Skmacy goto out; 645167514Skmacy 646167514Skmacy /* initialize sge private state */ 647170654Skmacy t3_sge_init_adapter(sc); 648167514Skmacy 649167514Skmacy t3_led_ready(sc); 650237263Snp 651167514Skmacy error = t3_get_fw_version(sc, &vers); 652167514Skmacy if (error) 653167514Skmacy goto out; 654167514Skmacy 655169978Skmacy snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", 656169978Skmacy G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), 657169978Skmacy G_FW_VERSION_MICRO(vers)); 658169978Skmacy 659199237Snp snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s", 660199237Snp ai->desc, is_offload(sc) ? "R" : "", 661185655Sgnn sc->params.vpd.ec, sc->params.vpd.sn); 662185655Sgnn device_set_desc_copy(dev, buf); 663185655Sgnn 664192540Sgnn snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x", 665192540Sgnn sc->params.vpd.port_type[0], sc->params.vpd.port_type[1], 666192540Sgnn sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]); 667192540Sgnn 668176472Skmacy device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); 669209841Snp callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); 670174708Skmacy t3_add_attach_sysctls(sc); 671237263Snp 672237263Snp#ifdef TCP_OFFLOAD 673237263Snp for (i = 0; i < NUM_CPL_HANDLERS; i++) 674237263Snp sc->cpl_handler[i] = cpl_not_handled; 675237263Snp#endif 676239913Sjhb 677239913Sjhb t3_intr_clear(sc); 678239913Sjhb error = cxgb_setup_interrupts(sc); 679167514Skmacyout: 680167514Skmacy if (error) 681167514Skmacy cxgb_free(sc); 682167514Skmacy 683167514Skmacy return (error); 684167514Skmacy} 685167514Skmacy 686192537Sgnn/* 687192584Sgnn * The cxgb_controller_detach routine is called with the device is 688192537Sgnn * unloaded from the system. 689192537Sgnn */ 690192537Sgnn 691167514Skmacystatic int 692167514Skmacycxgb_controller_detach(device_t dev) 693167514Skmacy{ 694167514Skmacy struct adapter *sc; 695167514Skmacy 696167514Skmacy sc = device_get_softc(dev); 697167514Skmacy 698167514Skmacy cxgb_free(sc); 699167514Skmacy 700167514Skmacy return (0); 701167514Skmacy} 702167514Skmacy 703192537Sgnn/* 704192537Sgnn * The cxgb_free() is called by the cxgb_controller_detach() routine 705192537Sgnn * to tear down the structures that were built up in 706192537Sgnn * cxgb_controller_attach(), and should be the final piece of work 707192584Sgnn * done when fully unloading the driver. 708192537Sgnn * 709192537Sgnn * 710192537Sgnn * 1. Shutting down the threads started by the cxgb_controller_attach() 711192537Sgnn * routine. 712192537Sgnn * 2. Stopping the lower level device and all callouts (cxgb_down_locked()). 713192537Sgnn * 3. Detaching all of the port devices created during the 714192537Sgnn * cxgb_controller_attach() routine. 715192537Sgnn * 4. Removing the device children created via cxgb_controller_attach(). 716192933Sgnn * 5. Releasing PCI resources associated with the device. 717192537Sgnn * 6. Turning off the offload support, iff it was turned on. 718192537Sgnn * 7. Destroying the mutexes created in cxgb_controller_attach(). 719192537Sgnn * 720192537Sgnn */ 721167514Skmacystatic void 722167514Skmacycxgb_free(struct adapter *sc) 723167514Skmacy{ 724219946Snp int i, nqsets = 0; 725167514Skmacy 726176472Skmacy ADAPTER_LOCK(sc); 727176472Skmacy sc->flags |= CXGB_SHUTDOWN; 728176472Skmacy ADAPTER_UNLOCK(sc); 729192537Sgnn 730192537Sgnn /* 731194521Skmacy * Make sure all child devices are gone. 732192537Sgnn */ 733192537Sgnn bus_generic_detach(sc->dev); 734192537Sgnn for (i = 0; i < (sc)->params.nports; i++) { 735192584Sgnn if (sc->portdev[i] && 736192584Sgnn device_delete_child(sc->dev, sc->portdev[i]) != 0) 737192537Sgnn device_printf(sc->dev, "failed to delete child port\n"); 738219946Snp nqsets += sc->port[i].nqsets; 739192537Sgnn } 740192537Sgnn 741194521Skmacy /* 742194521Skmacy * At this point, it is as if cxgb_port_detach has run on all ports, and 743194521Skmacy * cxgb_down has run on the adapter. All interrupts have been silenced, 744194521Skmacy * all open devices have been closed. 745194521Skmacy */ 746194521Skmacy KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)", 747194521Skmacy __func__, sc->open_device_map)); 748194521Skmacy for (i = 0; i < sc->params.nports; i++) { 749194521Skmacy KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!", 750194521Skmacy __func__, i)); 751194521Skmacy } 752194521Skmacy 753194521Skmacy /* 754194521Skmacy * Finish off the adapter's callouts. 755194521Skmacy */ 756194521Skmacy callout_drain(&sc->cxgb_tick_ch); 757194521Skmacy callout_drain(&sc->sge_timer_ch); 758194521Skmacy 759194521Skmacy /* 760194521Skmacy * Release resources grabbed under FULL_INIT_DONE by cxgb_up. The 761194521Skmacy * sysctls are cleaned up by the kernel linker. 762194521Skmacy */ 763194521Skmacy if (sc->flags & FULL_INIT_DONE) { 764219946Snp t3_free_sge_resources(sc, nqsets); 765194521Skmacy sc->flags &= ~FULL_INIT_DONE; 766194521Skmacy } 767194521Skmacy 768194521Skmacy /* 769194521Skmacy * Release all interrupt resources. 770194521Skmacy */ 771192933Sgnn cxgb_teardown_interrupts(sc); 772169978Skmacy if (sc->flags & (USING_MSI | USING_MSIX)) { 773169978Skmacy device_printf(sc->dev, "releasing msi message(s)\n"); 774169978Skmacy pci_release_msi(sc->dev); 775169978Skmacy } else { 776169978Skmacy device_printf(sc->dev, "no msi message to release\n"); 777169978Skmacy } 778192933Sgnn 779169978Skmacy if (sc->msix_regs_res != NULL) { 780169978Skmacy bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, 781169978Skmacy sc->msix_regs_res); 782169978Skmacy } 783176472Skmacy 784194521Skmacy /* 785194521Skmacy * Free the adapter's taskqueue. 786194521Skmacy */ 787176472Skmacy if (sc->tq != NULL) { 788171978Skmacy taskqueue_free(sc->tq); 789176472Skmacy sc->tq = NULL; 790176472Skmacy } 791176472Skmacy 792171471Skmacy free(sc->filters, M_DEVBUF); 793167514Skmacy t3_sge_free(sc); 794194521Skmacy 795176472Skmacy if (sc->udbs_res != NULL) 796176472Skmacy bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, 797176472Skmacy sc->udbs_res); 798176472Skmacy 799167514Skmacy if (sc->regs_res != NULL) 800167514Skmacy bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, 801167514Skmacy sc->regs_res); 802167514Skmacy 803170869Skmacy MTX_DESTROY(&sc->mdio_lock); 804170869Skmacy MTX_DESTROY(&sc->sge.reg_lock); 805170869Skmacy MTX_DESTROY(&sc->elmer_lock); 806237263Snp mtx_lock(&t3_list_lock); 807237263Snp SLIST_REMOVE(&t3_list, sc, adapter, link); 808237263Snp mtx_unlock(&t3_list_lock); 809170869Skmacy ADAPTER_LOCK_DEINIT(sc); 810167514Skmacy} 811167514Skmacy 812167514Skmacy/** 813167514Skmacy * setup_sge_qsets - configure SGE Tx/Rx/response queues 814167514Skmacy * @sc: the controller softc 815167514Skmacy * 816167514Skmacy * Determines how many sets of SGE queues to use and initializes them. 817167514Skmacy * We support multiple queue sets per port if we have MSI-X, otherwise 818167514Skmacy * just one queue set per port. 819167514Skmacy */ 820167514Skmacystatic int 821167514Skmacysetup_sge_qsets(adapter_t *sc) 822167514Skmacy{ 823172096Skmacy int i, j, err, irq_idx = 0, qset_idx = 0; 824169978Skmacy u_int ntxq = SGE_TXQ_PER_SET; 825167514Skmacy 826167514Skmacy if ((err = t3_sge_alloc(sc)) != 0) { 827167760Skmacy device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); 828167514Skmacy return (err); 829167514Skmacy } 830167514Skmacy 831167514Skmacy if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) 832167514Skmacy irq_idx = -1; 833167514Skmacy 834172096Skmacy for (i = 0; i < (sc)->params.nports; i++) { 835167514Skmacy struct port_info *pi = &sc->port[i]; 836167514Skmacy 837171978Skmacy for (j = 0; j < pi->nqsets; j++, qset_idx++) { 838167760Skmacy err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, 839167514Skmacy (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, 840167514Skmacy &sc->params.sge.qset[qset_idx], ntxq, pi); 841167514Skmacy if (err) { 842219946Snp t3_free_sge_resources(sc, qset_idx); 843219946Snp device_printf(sc->dev, 844219946Snp "t3_sge_alloc_qset failed with %d\n", err); 845167514Skmacy return (err); 846167514Skmacy } 847167514Skmacy } 848167514Skmacy } 849167514Skmacy 850167514Skmacy return (0); 851167514Skmacy} 852167514Skmacy 853170654Skmacystatic void 854192933Sgnncxgb_teardown_interrupts(adapter_t *sc) 855170654Skmacy{ 856192933Sgnn int i; 857170654Skmacy 858192933Sgnn for (i = 0; i < SGE_QSETS; i++) { 859192933Sgnn if (sc->msix_intr_tag[i] == NULL) { 860192933Sgnn 861192933Sgnn /* Should have been setup fully or not at all */ 862192933Sgnn KASSERT(sc->msix_irq_res[i] == NULL && 863192933Sgnn sc->msix_irq_rid[i] == 0, 864192933Sgnn ("%s: half-done interrupt (%d).", __func__, i)); 865192933Sgnn 866192933Sgnn continue; 867170654Skmacy } 868192933Sgnn 869192933Sgnn bus_teardown_intr(sc->dev, sc->msix_irq_res[i], 870192933Sgnn sc->msix_intr_tag[i]); 871192933Sgnn bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i], 872192933Sgnn sc->msix_irq_res[i]); 873192933Sgnn 874192933Sgnn sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL; 875192933Sgnn sc->msix_irq_rid[i] = 0; 876170654Skmacy } 877192933Sgnn 878192933Sgnn if (sc->intr_tag) { 879192933Sgnn KASSERT(sc->irq_res != NULL, 880192933Sgnn ("%s: half-done interrupt.", __func__)); 881192933Sgnn 882192933Sgnn bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); 883192933Sgnn bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, 884192933Sgnn sc->irq_res); 885192933Sgnn 886192933Sgnn sc->irq_res = sc->intr_tag = NULL; 887192933Sgnn sc->irq_rid = 0; 888192933Sgnn } 889170654Skmacy} 890170654Skmacy 891167514Skmacystatic int 892192933Sgnncxgb_setup_interrupts(adapter_t *sc) 893167514Skmacy{ 894192933Sgnn struct resource *res; 895192933Sgnn void *tag; 896192933Sgnn int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX); 897167514Skmacy 898192933Sgnn sc->irq_rid = intr_flag ? 1 : 0; 899192933Sgnn sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, 900192933Sgnn RF_SHAREABLE | RF_ACTIVE); 901192933Sgnn if (sc->irq_res == NULL) { 902192933Sgnn device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n", 903192933Sgnn intr_flag, sc->irq_rid); 904192933Sgnn err = EINVAL; 905192933Sgnn sc->irq_rid = 0; 906192933Sgnn } else { 907192933Sgnn err = bus_setup_intr(sc->dev, sc->irq_res, 908204274Snp INTR_MPSAFE | INTR_TYPE_NET, NULL, 909204274Snp sc->cxgb_intr, sc, &sc->intr_tag); 910192933Sgnn 911192933Sgnn if (err) { 912192933Sgnn device_printf(sc->dev, 913192933Sgnn "Cannot set up interrupt (%x, %u, %d)\n", 914192933Sgnn intr_flag, sc->irq_rid, err); 915192933Sgnn bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, 916192933Sgnn sc->irq_res); 917192933Sgnn sc->irq_res = sc->intr_tag = NULL; 918192933Sgnn sc->irq_rid = 0; 919192933Sgnn } 920167514Skmacy } 921171804Skmacy 922192933Sgnn /* That's all for INTx or MSI */ 923192933Sgnn if (!(intr_flag & USING_MSIX) || err) 924192933Sgnn return (err); 925192933Sgnn 926239913Sjhb bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err"); 927192933Sgnn for (i = 0; i < sc->msi_count - 1; i++) { 928192933Sgnn rid = i + 2; 929192933Sgnn res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, 930192933Sgnn RF_SHAREABLE | RF_ACTIVE); 931192933Sgnn if (res == NULL) { 932192933Sgnn device_printf(sc->dev, "Cannot allocate interrupt " 933192933Sgnn "for message %d\n", rid); 934192933Sgnn err = EINVAL; 935192933Sgnn break; 936192933Sgnn } 937192933Sgnn 938192933Sgnn err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET, 939204274Snp NULL, t3_intr_msix, &sc->sge.qs[i], &tag); 940192933Sgnn if (err) { 941192933Sgnn device_printf(sc->dev, "Cannot set up interrupt " 942192933Sgnn "for message %d (%d)\n", rid, err); 943192933Sgnn bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res); 944192933Sgnn break; 945167514Skmacy } 946192933Sgnn 947192933Sgnn sc->msix_irq_rid[i] = rid; 948192933Sgnn sc->msix_irq_res[i] = res; 949192933Sgnn sc->msix_intr_tag[i] = tag; 950239913Sjhb bus_describe_intr(sc->dev, res, tag, "qs%d", i); 951167514Skmacy } 952167760Skmacy 953192933Sgnn if (err) 954192933Sgnn cxgb_teardown_interrupts(sc); 955192933Sgnn 956192933Sgnn return (err); 957167514Skmacy} 958167514Skmacy 959192933Sgnn 960167514Skmacystatic int 961167514Skmacycxgb_port_probe(device_t dev) 962167514Skmacy{ 963167514Skmacy struct port_info *p; 964167514Skmacy char buf[80]; 965176472Skmacy const char *desc; 966176472Skmacy 967167514Skmacy p = device_get_softc(dev); 968176472Skmacy desc = p->phy.desc; 969176472Skmacy snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc); 970167514Skmacy device_set_desc_copy(dev, buf); 971167514Skmacy return (0); 972167514Skmacy} 973167514Skmacy 974167514Skmacy 975167514Skmacystatic int 976167514Skmacycxgb_makedev(struct port_info *pi) 977167514Skmacy{ 978167514Skmacy 979170654Skmacy pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit, 980209115Snp UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp)); 981167514Skmacy 982167514Skmacy if (pi->port_cdev == NULL) 983167514Skmacy return (ENOMEM); 984167514Skmacy 985167514Skmacy pi->port_cdev->si_drv1 = (void *)pi; 986167514Skmacy 987167514Skmacy return (0); 988167514Skmacy} 989167514Skmacy 990204274Snp#define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ 991204348Snp IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ 992237832Snp IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6) 993231317Snp#define CXGB_CAP_ENABLE CXGB_CAP 994167514Skmacy 995167514Skmacystatic int 996167514Skmacycxgb_port_attach(device_t dev) 997167514Skmacy{ 998167514Skmacy struct port_info *p; 999167514Skmacy struct ifnet *ifp; 1000194921Snp int err; 1001176472Skmacy struct adapter *sc; 1002204274Snp 1003167514Skmacy p = device_get_softc(dev); 1004176472Skmacy sc = p->adapter; 1005170869Skmacy snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", 1006171803Skmacy device_get_unit(device_get_parent(dev)), p->port_id); 1007170869Skmacy PORT_LOCK_INIT(p, p->lockbuf); 1008167514Skmacy 1009283291Sjkim callout_init(&p->link_check_ch, 1); 1010209841Snp TASK_INIT(&p->link_check_task, 0, check_link_status, p); 1011209841Snp 1012167514Skmacy /* Allocate an ifnet object and set it up */ 1013167514Skmacy ifp = p->ifp = if_alloc(IFT_ETHER); 1014167514Skmacy if (ifp == NULL) { 1015167514Skmacy device_printf(dev, "Cannot allocate ifnet\n"); 1016167514Skmacy return (ENOMEM); 1017167514Skmacy } 1018167514Skmacy 1019167514Skmacy if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1020167514Skmacy ifp->if_init = cxgb_init; 1021167514Skmacy ifp->if_softc = p; 1022167514Skmacy ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1023167514Skmacy ifp->if_ioctl = cxgb_ioctl; 1024231116Snp ifp->if_transmit = cxgb_transmit; 1025231116Snp ifp->if_qflush = cxgb_qflush; 1026272222Snp ifp->if_get_counter = cxgb_get_counter; 1027174708Skmacy 1028204274Snp ifp->if_capabilities = CXGB_CAP; 1029237263Snp#ifdef TCP_OFFLOAD 1030237263Snp if (is_offload(sc)) 1031237263Snp ifp->if_capabilities |= IFCAP_TOE4; 1032237263Snp#endif 1033204274Snp ifp->if_capenable = CXGB_CAP_ENABLE; 1034237832Snp ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | 1035237832Snp CSUM_UDP_IPV6 | CSUM_TCP_IPV6; 1036204274Snp 1037171471Skmacy /* 1038204274Snp * Disable TSO on 4-port - it isn't supported by the firmware. 1039171471Skmacy */ 1040204274Snp if (sc->params.nports > 2) { 1041204348Snp ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO); 1042204348Snp ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO); 1043171471Skmacy ifp->if_hwassist &= ~CSUM_TSO; 1044171471Skmacy } 1045171471Skmacy 1046167514Skmacy ether_ifattach(ifp, p->hw_addr); 1047192537Sgnn 1048204274Snp#ifdef DEFAULT_JUMBO 1049204274Snp if (sc->params.nports <= 2) 1050180583Skmacy ifp->if_mtu = ETHERMTU_JUMBO; 1051204274Snp#endif 1052167514Skmacy if ((err = cxgb_makedev(p)) != 0) { 1053167514Skmacy printf("makedev failed %d\n", err); 1054167514Skmacy return (err); 1055167514Skmacy } 1056194921Snp 1057194921Snp /* Create a list of media supported by this port */ 1058167514Skmacy ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, 1059167514Skmacy cxgb_media_status); 1060194921Snp cxgb_build_medialist(p); 1061176472Skmacy 1062170654Skmacy t3_sge_init_port(p); 1063189643Sgnn 1064192537Sgnn return (err); 1065167514Skmacy} 1066167514Skmacy 1067192537Sgnn/* 1068192537Sgnn * cxgb_port_detach() is called via the device_detach methods when 1069192537Sgnn * cxgb_free() calls the bus_generic_detach. It is responsible for 1070192537Sgnn * removing the device from the view of the kernel, i.e. from all 1071192537Sgnn * interfaces lists etc. This routine is only called when the driver is 1072192537Sgnn * being unloaded, not when the link goes down. 1073192537Sgnn */ 1074167514Skmacystatic int 1075167514Skmacycxgb_port_detach(device_t dev) 1076167514Skmacy{ 1077167514Skmacy struct port_info *p; 1078192537Sgnn struct adapter *sc; 1079194521Skmacy int i; 1080167514Skmacy 1081167514Skmacy p = device_get_softc(dev); 1082192537Sgnn sc = p->adapter; 1083169978Skmacy 1084202671Snp /* Tell cxgb_ioctl and if_init that the port is going away */ 1085202671Snp ADAPTER_LOCK(sc); 1086202671Snp SET_DOOMED(p); 1087202671Snp wakeup(&sc->flags); 1088202671Snp while (IS_BUSY(sc)) 1089202671Snp mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0); 1090202671Snp SET_BUSY(sc); 1091202671Snp ADAPTER_UNLOCK(sc); 1092194521Skmacy 1093192537Sgnn if (p->port_cdev != NULL) 1094192537Sgnn destroy_dev(p->port_cdev); 1095194521Skmacy 1096194521Skmacy cxgb_uninit_synchronized(p); 1097192537Sgnn ether_ifdetach(p->ifp); 1098192537Sgnn 1099194521Skmacy for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 1100194521Skmacy struct sge_qset *qs = &sc->sge.qs[i]; 1101194521Skmacy struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1102194521Skmacy 1103194521Skmacy callout_drain(&txq->txq_watchdog); 1104194521Skmacy callout_drain(&txq->txq_timer); 1105192537Sgnn } 1106192537Sgnn 1107170869Skmacy PORT_LOCK_DEINIT(p); 1108167514Skmacy if_free(p->ifp); 1109194521Skmacy p->ifp = NULL; 1110194521Skmacy 1111202671Snp ADAPTER_LOCK(sc); 1112202671Snp CLR_BUSY(sc); 1113202671Snp wakeup_one(&sc->flags); 1114202671Snp ADAPTER_UNLOCK(sc); 1115167514Skmacy return (0); 1116167514Skmacy} 1117167514Skmacy 1118167514Skmacyvoid 1119167514Skmacyt3_fatal_err(struct adapter *sc) 1120167514Skmacy{ 1121167514Skmacy u_int fw_status[4]; 1122183062Skmacy 1123172096Skmacy if (sc->flags & FULL_INIT_DONE) { 1124172096Skmacy t3_sge_stop(sc); 1125172096Skmacy t3_write_reg(sc, A_XGM_TX_CTRL, 0); 1126172096Skmacy t3_write_reg(sc, A_XGM_RX_CTRL, 0); 1127172096Skmacy t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0); 1128172096Skmacy t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0); 1129172096Skmacy t3_intr_disable(sc); 1130172096Skmacy } 1131167514Skmacy device_printf(sc->dev,"encountered fatal error, operation suspended\n"); 1132167514Skmacy if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status)) 1133167514Skmacy device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", 1134167514Skmacy fw_status[0], fw_status[1], fw_status[2], fw_status[3]); 1135167514Skmacy} 1136167514Skmacy 1137167514Skmacyint 1138167514Skmacyt3_os_find_pci_capability(adapter_t *sc, int cap) 1139167514Skmacy{ 1140167514Skmacy device_t dev; 1141167514Skmacy struct pci_devinfo *dinfo; 1142167514Skmacy pcicfgregs *cfg; 1143167514Skmacy uint32_t status; 1144167514Skmacy uint8_t ptr; 1145167514Skmacy 1146167514Skmacy dev = sc->dev; 1147167514Skmacy dinfo = device_get_ivars(dev); 1148167514Skmacy cfg = &dinfo->cfg; 1149167514Skmacy 1150167514Skmacy status = pci_read_config(dev, PCIR_STATUS, 2); 1151167514Skmacy if (!(status & PCIM_STATUS_CAPPRESENT)) 1152167514Skmacy return (0); 1153167514Skmacy 1154167514Skmacy switch (cfg->hdrtype & PCIM_HDRTYPE) { 1155167514Skmacy case 0: 1156167514Skmacy case 1: 1157167514Skmacy ptr = PCIR_CAP_PTR; 1158167514Skmacy break; 1159167514Skmacy case 2: 1160167514Skmacy ptr = PCIR_CAP_PTR_2; 1161167514Skmacy break; 1162167514Skmacy default: 1163167514Skmacy return (0); 1164167514Skmacy break; 1165167514Skmacy } 1166167514Skmacy ptr = pci_read_config(dev, ptr, 1); 1167167514Skmacy 1168167514Skmacy while (ptr != 0) { 1169167514Skmacy if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap) 1170167514Skmacy return (ptr); 1171167514Skmacy ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); 1172167514Skmacy } 1173167514Skmacy 1174167514Skmacy return (0); 1175167514Skmacy} 1176167514Skmacy 1177167514Skmacyint 1178167514Skmacyt3_os_pci_save_state(struct adapter *sc) 1179167514Skmacy{ 1180167514Skmacy device_t dev; 1181167514Skmacy struct pci_devinfo *dinfo; 1182167514Skmacy 1183167514Skmacy dev = sc->dev; 1184167514Skmacy dinfo = device_get_ivars(dev); 1185167514Skmacy 1186167514Skmacy pci_cfg_save(dev, dinfo, 0); 1187167514Skmacy return (0); 1188167514Skmacy} 1189167514Skmacy 1190167514Skmacyint 1191167514Skmacyt3_os_pci_restore_state(struct adapter *sc) 1192167514Skmacy{ 1193167514Skmacy device_t dev; 1194167514Skmacy struct pci_devinfo *dinfo; 1195167514Skmacy 1196167514Skmacy dev = sc->dev; 1197167514Skmacy dinfo = device_get_ivars(dev); 1198167514Skmacy 1199167514Skmacy pci_cfg_restore(dev, dinfo); 1200167514Skmacy return (0); 1201167514Skmacy} 1202167514Skmacy 1203167514Skmacy/** 1204167514Skmacy * t3_os_link_changed - handle link status changes 1205197791Snp * @sc: the adapter associated with the link change 1206197791Snp * @port_id: the port index whose link status has changed 1207177340Skmacy * @link_status: the new status of the link 1208167514Skmacy * @speed: the new speed setting 1209167514Skmacy * @duplex: the new duplex setting 1210167514Skmacy * @fc: the new flow-control setting 1211167514Skmacy * 1212167514Skmacy * This is the OS-dependent handler for link status changes. The OS 1213167514Skmacy * neutral handler takes care of most of the processing for these events, 1214167514Skmacy * then calls this handler for any OS-specific processing. 1215167514Skmacy */ 1216167514Skmacyvoid 1217167514Skmacyt3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, 1218197791Snp int duplex, int fc, int mac_was_reset) 1219167514Skmacy{ 1220167514Skmacy struct port_info *pi = &adapter->port[port_id]; 1221194521Skmacy struct ifnet *ifp = pi->ifp; 1222167514Skmacy 1223194521Skmacy /* no race with detach, so ifp should always be good */ 1224194521Skmacy KASSERT(ifp, ("%s: if detached.", __func__)); 1225194521Skmacy 1226197791Snp /* Reapply mac settings if they were lost due to a reset */ 1227197791Snp if (mac_was_reset) { 1228197791Snp PORT_LOCK(pi); 1229197791Snp cxgb_update_mac_settings(pi); 1230197791Snp PORT_UNLOCK(pi); 1231197791Snp } 1232197791Snp 1233169978Skmacy if (link_status) { 1234194521Skmacy ifp->if_baudrate = IF_Mbps(speed); 1235194521Skmacy if_link_state_change(ifp, LINK_STATE_UP); 1236192540Sgnn } else 1237194521Skmacy if_link_state_change(ifp, LINK_STATE_DOWN); 1238167514Skmacy} 1239167514Skmacy 1240181614Skmacy/** 1241181614Skmacy * t3_os_phymod_changed - handle PHY module changes 1242181614Skmacy * @phy: the PHY reporting the module change 1243181614Skmacy * @mod_type: new module type 1244181614Skmacy * 1245181614Skmacy * This is the OS-dependent handler for PHY module changes. It is 1246181614Skmacy * invoked when a PHY module is removed or inserted for any OS-specific 1247181614Skmacy * processing. 1248181614Skmacy */ 1249181614Skmacyvoid t3_os_phymod_changed(struct adapter *adap, int port_id) 1250181614Skmacy{ 1251181614Skmacy static const char *mod_str[] = { 1252204921Snp NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown" 1253181614Skmacy }; 1254181614Skmacy struct port_info *pi = &adap->port[port_id]; 1255194921Snp int mod = pi->phy.modtype; 1256181614Skmacy 1257194921Snp if (mod != pi->media.ifm_cur->ifm_data) 1258194921Snp cxgb_build_medialist(pi); 1259194921Snp 1260194921Snp if (mod == phy_modtype_none) 1261194921Snp if_printf(pi->ifp, "PHY module unplugged\n"); 1262181614Skmacy else { 1263194921Snp KASSERT(mod < ARRAY_SIZE(mod_str), 1264194921Snp ("invalid PHY module type %d", mod)); 1265194921Snp if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]); 1266181614Skmacy } 1267181614Skmacy} 1268181614Skmacy 1269167514Skmacyvoid 1270167514Skmacyt3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]) 1271167514Skmacy{ 1272167514Skmacy 1273167514Skmacy /* 1274167514Skmacy * The ifnet might not be allocated before this gets called, 1275167514Skmacy * as this is called early on in attach by t3_prep_adapter 1276167514Skmacy * save the address off in the port structure 1277167514Skmacy */ 1278167514Skmacy if (cxgb_debug) 1279167514Skmacy printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":"); 1280167514Skmacy bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); 1281167514Skmacy} 1282167514Skmacy 1283194521Skmacy/* 1284194521Skmacy * Programs the XGMAC based on the settings in the ifnet. These settings 1285194521Skmacy * include MTU, MAC address, mcast addresses, etc. 1286167514Skmacy */ 1287167514Skmacystatic void 1288194521Skmacycxgb_update_mac_settings(struct port_info *p) 1289167514Skmacy{ 1290194521Skmacy struct ifnet *ifp = p->ifp; 1291167514Skmacy struct t3_rx_mode rm; 1292167514Skmacy struct cmac *mac = &p->mac; 1293180583Skmacy int mtu, hwtagging; 1294167514Skmacy 1295194521Skmacy PORT_LOCK_ASSERT_OWNED(p); 1296167514Skmacy 1297180583Skmacy bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN); 1298180583Skmacy 1299180583Skmacy mtu = ifp->if_mtu; 1300180583Skmacy if (ifp->if_capenable & IFCAP_VLAN_MTU) 1301180583Skmacy mtu += ETHER_VLAN_ENCAP_LEN; 1302180583Skmacy 1303180583Skmacy hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0; 1304180583Skmacy 1305180583Skmacy t3_mac_set_mtu(mac, mtu); 1306180583Skmacy t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging); 1307167514Skmacy t3_mac_set_address(mac, 0, p->hw_addr); 1308194521Skmacy t3_init_rx_mode(&rm, p); 1309167514Skmacy t3_mac_set_rx_mode(mac, &rm); 1310167514Skmacy} 1311167514Skmacy 1312176472Skmacy 1313176472Skmacystatic int 1314176472Skmacyawait_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 1315176472Skmacy unsigned long n) 1316176472Skmacy{ 1317176472Skmacy int attempts = 5; 1318176472Skmacy 1319176472Skmacy while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 1320176472Skmacy if (!--attempts) 1321176472Skmacy return (ETIMEDOUT); 1322176472Skmacy t3_os_sleep(10); 1323176472Skmacy } 1324176472Skmacy return 0; 1325176472Skmacy} 1326176472Skmacy 1327176472Skmacystatic int 1328176472Skmacyinit_tp_parity(struct adapter *adap) 1329176472Skmacy{ 1330176472Skmacy int i; 1331176472Skmacy struct mbuf *m; 1332176472Skmacy struct cpl_set_tcb_field *greq; 1333176472Skmacy unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; 1334176472Skmacy 1335176472Skmacy t3_tp_set_offload_mode(adap, 1); 1336176472Skmacy 1337176472Skmacy for (i = 0; i < 16; i++) { 1338176472Skmacy struct cpl_smt_write_req *req; 1339176472Skmacy 1340176472Skmacy m = m_gethdr(M_WAITOK, MT_DATA); 1341176472Skmacy req = mtod(m, struct cpl_smt_write_req *); 1342176472Skmacy m->m_len = m->m_pkthdr.len = sizeof(*req); 1343176472Skmacy memset(req, 0, sizeof(*req)); 1344194521Skmacy req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1345176472Skmacy OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); 1346176472Skmacy req->iff = i; 1347176472Skmacy t3_mgmt_tx(adap, m); 1348176472Skmacy } 1349176472Skmacy 1350176472Skmacy for (i = 0; i < 2048; i++) { 1351176472Skmacy struct cpl_l2t_write_req *req; 1352176472Skmacy 1353176472Skmacy m = m_gethdr(M_WAITOK, MT_DATA); 1354176472Skmacy req = mtod(m, struct cpl_l2t_write_req *); 1355176472Skmacy m->m_len = m->m_pkthdr.len = sizeof(*req); 1356176472Skmacy memset(req, 0, sizeof(*req)); 1357194521Skmacy req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1358176472Skmacy OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); 1359176472Skmacy req->params = htonl(V_L2T_W_IDX(i)); 1360176472Skmacy t3_mgmt_tx(adap, m); 1361176472Skmacy } 1362176472Skmacy 1363176472Skmacy for (i = 0; i < 2048; i++) { 1364176472Skmacy struct cpl_rte_write_req *req; 1365176472Skmacy 1366176472Skmacy m = m_gethdr(M_WAITOK, MT_DATA); 1367176472Skmacy req = mtod(m, struct cpl_rte_write_req *); 1368176472Skmacy m->m_len = m->m_pkthdr.len = sizeof(*req); 1369176472Skmacy memset(req, 0, sizeof(*req)); 1370194521Skmacy req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1371176472Skmacy OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); 1372176472Skmacy req->l2t_idx = htonl(V_L2T_W_IDX(i)); 1373176472Skmacy t3_mgmt_tx(adap, m); 1374176472Skmacy } 1375176472Skmacy 1376176472Skmacy m = m_gethdr(M_WAITOK, MT_DATA); 1377176472Skmacy greq = mtod(m, struct cpl_set_tcb_field *); 1378176472Skmacy m->m_len = m->m_pkthdr.len = sizeof(*greq); 1379176472Skmacy memset(greq, 0, sizeof(*greq)); 1380194521Skmacy greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1381176472Skmacy OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); 1382176472Skmacy greq->mask = htobe64(1); 1383176472Skmacy t3_mgmt_tx(adap, m); 1384176472Skmacy 1385176472Skmacy i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); 1386176472Skmacy t3_tp_set_offload_mode(adap, 0); 1387176472Skmacy return (i); 1388176472Skmacy} 1389176472Skmacy 1390167514Skmacy/** 1391167514Skmacy * setup_rss - configure Receive Side Steering (per-queue connection demux) 1392167514Skmacy * @adap: the adapter 1393167514Skmacy * 1394167514Skmacy * Sets up RSS to distribute packets to multiple receive queues. We 1395167514Skmacy * configure the RSS CPU lookup table to distribute to the number of HW 1396167514Skmacy * receive queues, and the response queue lookup table to narrow that 1397167514Skmacy * down to the response queues actually configured for each port. 1398167514Skmacy * We always configure the RSS mapping for two ports since the mapping 1399167514Skmacy * table has plenty of entries. 1400167514Skmacy */ 1401167514Skmacystatic void 1402167514Skmacysetup_rss(adapter_t *adap) 1403167514Skmacy{ 1404167514Skmacy int i; 1405171471Skmacy u_int nq[2]; 1406167514Skmacy uint8_t cpus[SGE_QSETS + 1]; 1407167514Skmacy uint16_t rspq_map[RSS_TABLE_SIZE]; 1408171471Skmacy 1409167514Skmacy for (i = 0; i < SGE_QSETS; ++i) 1410167514Skmacy cpus[i] = i; 1411167514Skmacy cpus[SGE_QSETS] = 0xff; 1412167514Skmacy 1413171978Skmacy nq[0] = nq[1] = 0; 1414171978Skmacy for_each_port(adap, i) { 1415171978Skmacy const struct port_info *pi = adap2pinfo(adap, i); 1416171978Skmacy 1417171978Skmacy nq[pi->tx_chan] += pi->nqsets; 1418171978Skmacy } 1419167514Skmacy for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { 1420176472Skmacy rspq_map[i] = nq[0] ? i % nq[0] : 0; 1421176472Skmacy rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0; 1422167514Skmacy } 1423196840Sjhb 1424171471Skmacy /* Calculate the reverse RSS map table */ 1425196840Sjhb for (i = 0; i < SGE_QSETS; ++i) 1426196840Sjhb adap->rrss_map[i] = 0xff; 1427171471Skmacy for (i = 0; i < RSS_TABLE_SIZE; ++i) 1428171471Skmacy if (adap->rrss_map[rspq_map[i]] == 0xff) 1429171471Skmacy adap->rrss_map[rspq_map[i]] = i; 1430167514Skmacy 1431167514Skmacy t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | 1432171471Skmacy F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN | 1433176472Skmacy F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, 1434176472Skmacy cpus, rspq_map); 1435171471Skmacy 1436167514Skmacy} 1437167514Skmacystatic void 1438167514Skmacysend_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, 1439167514Skmacy int hi, int port) 1440167514Skmacy{ 1441167514Skmacy struct mbuf *m; 1442167514Skmacy struct mngt_pktsched_wr *req; 1443167514Skmacy 1444243857Sglebius m = m_gethdr(M_NOWAIT, MT_DATA); 1445167848Skmacy if (m) { 1446169978Skmacy req = mtod(m, struct mngt_pktsched_wr *); 1447194521Skmacy req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); 1448167848Skmacy req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; 1449167848Skmacy req->sched = sched; 1450167848Skmacy req->idx = qidx; 1451167848Skmacy req->min = lo; 1452167848Skmacy req->max = hi; 1453167848Skmacy req->binding = port; 1454167848Skmacy m->m_len = m->m_pkthdr.len = sizeof(*req); 1455167848Skmacy t3_mgmt_tx(adap, m); 1456167848Skmacy } 1457167514Skmacy} 1458167514Skmacy 1459167514Skmacystatic void 1460167514Skmacybind_qsets(adapter_t *sc) 1461167514Skmacy{ 1462167514Skmacy int i, j; 1463167514Skmacy 1464167514Skmacy for (i = 0; i < (sc)->params.nports; ++i) { 1465167514Skmacy const struct port_info *pi = adap2pinfo(sc, i); 1466167514Skmacy 1467172096Skmacy for (j = 0; j < pi->nqsets; ++j) { 1468167514Skmacy send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, 1469172096Skmacy -1, pi->tx_chan); 1470172096Skmacy 1471172096Skmacy } 1472167514Skmacy } 1473167514Skmacy} 1474167514Skmacy 1475171471Skmacystatic void 1476171471Skmacyupdate_tpeeprom(struct adapter *adap) 1477171471Skmacy{ 1478171471Skmacy const struct firmware *tpeeprom; 1479172109Skmacy 1480171471Skmacy uint32_t version; 1481171471Skmacy unsigned int major, minor; 1482171471Skmacy int ret, len; 1483189643Sgnn char rev, name[32]; 1484171471Skmacy 1485171471Skmacy t3_seeprom_read(adap, TP_SRAM_OFFSET, &version); 1486171471Skmacy 1487171471Skmacy major = G_TP_VERSION_MAJOR(version); 1488171471Skmacy minor = G_TP_VERSION_MINOR(version); 1489171471Skmacy if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 1490171471Skmacy return; 1491171471Skmacy 1492171471Skmacy rev = t3rev2char(adap); 1493189643Sgnn snprintf(name, sizeof(name), TPEEPROM_NAME, rev); 1494171471Skmacy 1495189643Sgnn tpeeprom = firmware_get(name); 1496171471Skmacy if (tpeeprom == NULL) { 1497190330Sgnn device_printf(adap->dev, 1498190330Sgnn "could not load TP EEPROM: unable to load %s\n", 1499190330Sgnn name); 1500171471Skmacy return; 1501171471Skmacy } 1502171471Skmacy 1503171471Skmacy len = tpeeprom->datasize - 4; 1504171471Skmacy 1505171471Skmacy ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); 1506171471Skmacy if (ret) 1507171471Skmacy goto release_tpeeprom; 1508171471Skmacy 1509171471Skmacy if (len != TP_SRAM_LEN) { 1510190330Sgnn device_printf(adap->dev, 1511190330Sgnn "%s length is wrong len=%d expected=%d\n", name, 1512190330Sgnn len, TP_SRAM_LEN); 1513171471Skmacy return; 1514171471Skmacy } 1515171471Skmacy 1516171471Skmacy ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, 1517171471Skmacy TP_SRAM_OFFSET); 1518171471Skmacy 1519171471Skmacy if (!ret) { 1520171471Skmacy device_printf(adap->dev, 1521171471Skmacy "Protocol SRAM image updated in EEPROM to %d.%d.%d\n", 1522171471Skmacy TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); 1523171471Skmacy } else 1524190330Sgnn device_printf(adap->dev, 1525190330Sgnn "Protocol SRAM image update in EEPROM failed\n"); 1526171471Skmacy 1527171471Skmacyrelease_tpeeprom: 1528171471Skmacy firmware_put(tpeeprom, FIRMWARE_UNLOAD); 1529171471Skmacy 1530171471Skmacy return; 1531171471Skmacy} 1532171471Skmacy 1533171471Skmacystatic int 1534171471Skmacyupdate_tpsram(struct adapter *adap) 1535171471Skmacy{ 1536171471Skmacy const struct firmware *tpsram; 1537171471Skmacy int ret; 1538189643Sgnn char rev, name[32]; 1539171471Skmacy 1540171471Skmacy rev = t3rev2char(adap); 1541189643Sgnn snprintf(name, sizeof(name), TPSRAM_NAME, rev); 1542171471Skmacy 1543171471Skmacy update_tpeeprom(adap); 1544171471Skmacy 1545189643Sgnn tpsram = firmware_get(name); 1546171471Skmacy if (tpsram == NULL){ 1547176613Skmacy device_printf(adap->dev, "could not load TP SRAM\n"); 1548171471Skmacy return (EINVAL); 1549171471Skmacy } else 1550176613Skmacy device_printf(adap->dev, "updating TP SRAM\n"); 1551171471Skmacy 1552171471Skmacy ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); 1553171471Skmacy if (ret) 1554171471Skmacy goto release_tpsram; 1555171471Skmacy 1556171471Skmacy ret = t3_set_proto_sram(adap, tpsram->data); 1557171471Skmacy if (ret) 1558171471Skmacy device_printf(adap->dev, "loading protocol SRAM failed\n"); 1559171471Skmacy 1560171471Skmacyrelease_tpsram: 1561171471Skmacy firmware_put(tpsram, FIRMWARE_UNLOAD); 1562171471Skmacy 1563171471Skmacy return ret; 1564171471Skmacy} 1565171471Skmacy 1566169978Skmacy/** 1567169978Skmacy * cxgb_up - enable the adapter 1568169978Skmacy * @adap: adapter being enabled 1569169978Skmacy * 1570169978Skmacy * Called when the first port is enabled, this function performs the 1571169978Skmacy * actions necessary to make an adapter operational, such as completing 1572169978Skmacy * the initialization of HW modules, and enabling interrupts. 1573169978Skmacy */ 1574169978Skmacystatic int 1575169978Skmacycxgb_up(struct adapter *sc) 1576169978Skmacy{ 1577169978Skmacy int err = 0; 1578208887Snp unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS; 1579169978Skmacy 1580194521Skmacy KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)", 1581194521Skmacy __func__, sc->open_device_map)); 1582194521Skmacy 1583169978Skmacy if ((sc->flags & FULL_INIT_DONE) == 0) { 1584169978Skmacy 1585202671Snp ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1586202671Snp 1587169978Skmacy if ((sc->flags & FW_UPTODATE) == 0) 1588171471Skmacy if ((err = upgrade_fw(sc))) 1589171471Skmacy goto out; 1590194521Skmacy 1591171471Skmacy if ((sc->flags & TPS_UPTODATE) == 0) 1592171471Skmacy if ((err = update_tpsram(sc))) 1593171471Skmacy goto out; 1594194521Skmacy 1595208887Snp if (is_offload(sc) && nfilters != 0) { 1596207643Snp sc->params.mc5.nservers = 0; 1597208887Snp 1598208887Snp if (nfilters < 0) 1599208887Snp sc->params.mc5.nfilters = mxf; 1600208887Snp else 1601208887Snp sc->params.mc5.nfilters = min(nfilters, mxf); 1602207643Snp } 1603207643Snp 1604169978Skmacy err = t3_init_hw(sc, 0); 1605169978Skmacy if (err) 1606169978Skmacy goto out; 1607169978Skmacy 1608176472Skmacy t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); 1609169978Skmacy t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 1610169978Skmacy 1611169978Skmacy err = setup_sge_qsets(sc); 1612169978Skmacy if (err) 1613169978Skmacy goto out; 1614169978Skmacy 1615207643Snp alloc_filters(sc); 1616169978Skmacy setup_rss(sc); 1617192933Sgnn 1618174708Skmacy t3_add_configured_sysctls(sc); 1619169978Skmacy sc->flags |= FULL_INIT_DONE; 1620169978Skmacy } 1621169978Skmacy 1622169978Skmacy t3_intr_clear(sc); 1623169978Skmacy t3_sge_start(sc); 1624169978Skmacy t3_intr_enable(sc); 1625169978Skmacy 1626176472Skmacy if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && 1627176472Skmacy is_offload(sc) && init_tp_parity(sc) == 0) 1628176472Skmacy sc->flags |= TP_PARITY_INIT; 1629176472Skmacy 1630176472Skmacy if (sc->flags & TP_PARITY_INIT) { 1631194521Skmacy t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR); 1632176472Skmacy t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff); 1633176472Skmacy } 1634176472Skmacy 1635172096Skmacy if (!(sc->flags & QUEUES_BOUND)) { 1636169978Skmacy bind_qsets(sc); 1637207643Snp setup_hw_filters(sc); 1638171471Skmacy sc->flags |= QUEUES_BOUND; 1639171471Skmacy } 1640194521Skmacy 1641194521Skmacy t3_sge_reset_adapter(sc); 1642169978Skmacyout: 1643169978Skmacy return (err); 1644169978Skmacy} 1645169978Skmacy 1646169978Skmacy/* 1647194521Skmacy * Called when the last open device is closed. Does NOT undo all of cxgb_up's 1648194521Skmacy * work. Specifically, the resources grabbed under FULL_INIT_DONE are released 1649194521Skmacy * during controller_detach, not here. 1650169978Skmacy */ 1651167514Skmacystatic void 1652194521Skmacycxgb_down(struct adapter *sc) 1653169978Skmacy{ 1654169978Skmacy t3_sge_stop(sc); 1655169978Skmacy t3_intr_disable(sc); 1656169978Skmacy} 1657169978Skmacy 1658194521Skmacy/* 1659202671Snp * if_init for cxgb ports. 1660194521Skmacy */ 1661202671Snpstatic void 1662202671Snpcxgb_init(void *arg) 1663194521Skmacy{ 1664202671Snp struct port_info *p = arg; 1665194521Skmacy struct adapter *sc = p->adapter; 1666192537Sgnn 1667194521Skmacy ADAPTER_LOCK(sc); 1668202671Snp cxgb_init_locked(p); /* releases adapter lock */ 1669202671Snp ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1670202671Snp} 1671194521Skmacy 1672202671Snpstatic int 1673202671Snpcxgb_init_locked(struct port_info *p) 1674202671Snp{ 1675202671Snp struct adapter *sc = p->adapter; 1676202671Snp struct ifnet *ifp = p->ifp; 1677202671Snp struct cmac *mac = &p->mac; 1678211345Snp int i, rc = 0, may_sleep = 0, gave_up_lock = 0; 1679202671Snp 1680202671Snp ADAPTER_LOCK_ASSERT_OWNED(sc); 1681202671Snp 1682194521Skmacy while (!IS_DOOMED(p) && IS_BUSY(sc)) { 1683211345Snp gave_up_lock = 1; 1684202671Snp if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) { 1685194521Skmacy rc = EINTR; 1686194521Skmacy goto done; 1687194521Skmacy } 1688194521Skmacy } 1689202671Snp if (IS_DOOMED(p)) { 1690194521Skmacy rc = ENXIO; 1691202671Snp goto done; 1692194521Skmacy } 1693202671Snp KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1694194521Skmacy 1695194521Skmacy /* 1696202671Snp * The code that runs during one-time adapter initialization can sleep 1697202671Snp * so it's important not to hold any locks across it. 1698194521Skmacy */ 1699202671Snp may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1; 1700194521Skmacy 1701202671Snp if (may_sleep) { 1702202671Snp SET_BUSY(sc); 1703211345Snp gave_up_lock = 1; 1704202671Snp ADAPTER_UNLOCK(sc); 1705194521Skmacy } 1706194521Skmacy 1707237263Snp if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0)) 1708202671Snp goto done; 1709167514Skmacy 1710194521Skmacy PORT_LOCK(p); 1711202671Snp if (isset(&sc->open_device_map, p->port_id) && 1712202671Snp (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1713202671Snp PORT_UNLOCK(p); 1714202671Snp goto done; 1715202671Snp } 1716192540Sgnn t3_port_intr_enable(sc, p->port_id); 1717194521Skmacy if (!mac->multiport) 1718197791Snp t3_mac_init(mac); 1719194521Skmacy cxgb_update_mac_settings(p); 1720194521Skmacy t3_link_start(&p->phy, mac, &p->link_config); 1721194521Skmacy t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 1722194521Skmacy ifp->if_drv_flags |= IFF_DRV_RUNNING; 1723194521Skmacy ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1724194521Skmacy PORT_UNLOCK(p); 1725192540Sgnn 1726194521Skmacy for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 1727194521Skmacy struct sge_qset *qs = &sc->sge.qs[i]; 1728194521Skmacy struct sge_txq *txq = &qs->txq[TXQ_ETH]; 1729170869Skmacy 1730194521Skmacy callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs, 1731194521Skmacy txq->txq_watchdog.c_cpu); 1732194521Skmacy } 1733167514Skmacy 1734194521Skmacy /* all ok */ 1735194521Skmacy setbit(&sc->open_device_map, p->port_id); 1736209841Snp callout_reset(&p->link_check_ch, 1737209841Snp p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4, 1738209841Snp link_check_callout, p); 1739167760Skmacy 1740202671Snpdone: 1741202671Snp if (may_sleep) { 1742202671Snp ADAPTER_LOCK(sc); 1743202671Snp KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1744202671Snp CLR_BUSY(sc); 1745211345Snp } 1746211345Snp if (gave_up_lock) 1747202671Snp wakeup_one(&sc->flags); 1748202671Snp ADAPTER_UNLOCK(sc); 1749202671Snp return (rc); 1750167514Skmacy} 1751167514Skmacy 1752202671Snpstatic int 1753202671Snpcxgb_uninit_locked(struct port_info *p) 1754202671Snp{ 1755202671Snp struct adapter *sc = p->adapter; 1756202671Snp int rc; 1757202671Snp 1758202671Snp ADAPTER_LOCK_ASSERT_OWNED(sc); 1759202671Snp 1760202671Snp while (!IS_DOOMED(p) && IS_BUSY(sc)) { 1761202671Snp if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) { 1762202671Snp rc = EINTR; 1763202671Snp goto done; 1764202671Snp } 1765202671Snp } 1766202671Snp if (IS_DOOMED(p)) { 1767202671Snp rc = ENXIO; 1768202671Snp goto done; 1769202671Snp } 1770202671Snp KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); 1771202671Snp SET_BUSY(sc); 1772202671Snp ADAPTER_UNLOCK(sc); 1773202671Snp 1774202671Snp rc = cxgb_uninit_synchronized(p); 1775202671Snp 1776202671Snp ADAPTER_LOCK(sc); 1777202671Snp KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); 1778202671Snp CLR_BUSY(sc); 1779202671Snp wakeup_one(&sc->flags); 1780202671Snpdone: 1781202671Snp ADAPTER_UNLOCK(sc); 1782202671Snp return (rc); 1783202671Snp} 1784202671Snp 1785194521Skmacy/* 1786194521Skmacy * Called on "ifconfig down", and from port_detach 1787194521Skmacy */ 1788194521Skmacystatic int 1789194521Skmacycxgb_uninit_synchronized(struct port_info *pi) 1790167514Skmacy{ 1791194521Skmacy struct adapter *sc = pi->adapter; 1792194521Skmacy struct ifnet *ifp = pi->ifp; 1793167514Skmacy 1794194521Skmacy /* 1795202671Snp * taskqueue_drain may cause a deadlock if the adapter lock is held. 1796202671Snp */ 1797202671Snp ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1798202671Snp 1799202671Snp /* 1800194521Skmacy * Clear this port's bit from the open device map, and then drain all 1801194521Skmacy * the tasks that can access/manipulate this port's port_info or ifp. 1802218909Sbrucec * We disable this port's interrupts here and so the slow/ext 1803194521Skmacy * interrupt tasks won't be enqueued. The tick task will continue to 1804194521Skmacy * be enqueued every second but the runs after this drain will not see 1805194521Skmacy * this port in the open device map. 1806194521Skmacy * 1807194521Skmacy * A well behaved task must take open_device_map into account and ignore 1808194521Skmacy * ports that are not open. 1809194521Skmacy */ 1810194521Skmacy clrbit(&sc->open_device_map, pi->port_id); 1811194521Skmacy t3_port_intr_disable(sc, pi->port_id); 1812194521Skmacy taskqueue_drain(sc->tq, &sc->slow_intr_task); 1813194521Skmacy taskqueue_drain(sc->tq, &sc->tick_task); 1814194521Skmacy 1815209841Snp callout_drain(&pi->link_check_ch); 1816209841Snp taskqueue_drain(sc->tq, &pi->link_check_task); 1817209841Snp 1818194521Skmacy PORT_LOCK(pi); 1819169978Skmacy ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1820169978Skmacy 1821177340Skmacy /* disable pause frames */ 1822194521Skmacy t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0); 1823170869Skmacy 1824177340Skmacy /* Reset RX FIFO HWM */ 1825194521Skmacy t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset, 1826177340Skmacy V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0); 1827177340Skmacy 1828199240Snp DELAY(100 * 1000); 1829177340Skmacy 1830177340Skmacy /* Wait for TXFIFO empty */ 1831194521Skmacy t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset, 1832177340Skmacy F_TXFIFO_EMPTY, 1, 20, 5); 1833177340Skmacy 1834199240Snp DELAY(100 * 1000); 1835199240Snp t3_mac_disable(&pi->mac, MAC_DIRECTION_RX); 1836177340Skmacy 1837177340Skmacy pi->phy.ops->power_down(&pi->phy, 1); 1838177340Skmacy 1839194521Skmacy PORT_UNLOCK(pi); 1840167514Skmacy 1841194521Skmacy pi->link_config.link_ok = 0; 1842197791Snp t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0); 1843194521Skmacy 1844194521Skmacy if (sc->open_device_map == 0) 1845194521Skmacy cxgb_down(pi->adapter); 1846194521Skmacy 1847194521Skmacy return (0); 1848170654Skmacy} 1849170654Skmacy 1850181616Skmacy/* 1851181616Skmacy * Mark lro enabled or disabled in all qsets for this port 1852181616Skmacy */ 1853170654Skmacystatic int 1854181616Skmacycxgb_set_lro(struct port_info *p, int enabled) 1855181616Skmacy{ 1856181616Skmacy int i; 1857181616Skmacy struct adapter *adp = p->adapter; 1858181616Skmacy struct sge_qset *q; 1859181616Skmacy 1860181616Skmacy for (i = 0; i < p->nqsets; i++) { 1861181616Skmacy q = &adp->sge.qs[p->first_qset + i]; 1862181616Skmacy q->lro.enabled = (enabled != 0); 1863181616Skmacy } 1864181616Skmacy return (0); 1865181616Skmacy} 1866181616Skmacy 1867181616Skmacystatic int 1868167514Skmacycxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) 1869167514Skmacy{ 1870167514Skmacy struct port_info *p = ifp->if_softc; 1871202671Snp struct adapter *sc = p->adapter; 1872167514Skmacy struct ifreq *ifr = (struct ifreq *)data; 1873202671Snp int flags, error = 0, mtu; 1874167514Skmacy uint32_t mask; 1875167514Skmacy 1876167514Skmacy switch (command) { 1877167514Skmacy case SIOCSIFMTU: 1878202671Snp ADAPTER_LOCK(sc); 1879202671Snp error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1880202671Snp if (error) { 1881202671Snpfail: 1882202671Snp ADAPTER_UNLOCK(sc); 1883202671Snp return (error); 1884202671Snp } 1885202671Snp 1886194521Skmacy mtu = ifr->ifr_mtu; 1887194521Skmacy if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { 1888194521Skmacy error = EINVAL; 1889194521Skmacy } else { 1890194521Skmacy ifp->if_mtu = mtu; 1891194521Skmacy PORT_LOCK(p); 1892194521Skmacy cxgb_update_mac_settings(p); 1893194521Skmacy PORT_UNLOCK(p); 1894194521Skmacy } 1895202671Snp ADAPTER_UNLOCK(sc); 1896167514Skmacy break; 1897167514Skmacy case SIOCSIFFLAGS: 1898202671Snp ADAPTER_LOCK(sc); 1899202671Snp if (IS_DOOMED(p)) { 1900202671Snp error = ENXIO; 1901202671Snp goto fail; 1902202671Snp } 1903167514Skmacy if (ifp->if_flags & IFF_UP) { 1904167514Skmacy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1905167514Skmacy flags = p->if_flags; 1906167514Skmacy if (((ifp->if_flags ^ flags) & IFF_PROMISC) || 1907194521Skmacy ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) { 1908202671Snp if (IS_BUSY(sc)) { 1909202671Snp error = EBUSY; 1910202671Snp goto fail; 1911202671Snp } 1912194521Skmacy PORT_LOCK(p); 1913194521Skmacy cxgb_update_mac_settings(p); 1914194521Skmacy PORT_UNLOCK(p); 1915194521Skmacy } 1916202671Snp ADAPTER_UNLOCK(sc); 1917167514Skmacy } else 1918202671Snp error = cxgb_init_locked(p); 1919167760Skmacy p->if_flags = ifp->if_flags; 1920170869Skmacy } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1921202671Snp error = cxgb_uninit_locked(p); 1922202863Snp else 1923202863Snp ADAPTER_UNLOCK(sc); 1924202671Snp 1925202671Snp ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1926176472Skmacy break; 1927176472Skmacy case SIOCADDMULTI: 1928176472Skmacy case SIOCDELMULTI: 1929202671Snp ADAPTER_LOCK(sc); 1930202671Snp error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1931202671Snp if (error) 1932202671Snp goto fail; 1933202671Snp 1934170869Skmacy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1935194521Skmacy PORT_LOCK(p); 1936194521Skmacy cxgb_update_mac_settings(p); 1937194521Skmacy PORT_UNLOCK(p); 1938167514Skmacy } 1939202671Snp ADAPTER_UNLOCK(sc); 1940194521Skmacy 1941167514Skmacy break; 1942167514Skmacy case SIOCSIFCAP: 1943202671Snp ADAPTER_LOCK(sc); 1944202671Snp error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); 1945202671Snp if (error) 1946202671Snp goto fail; 1947202671Snp 1948167514Skmacy mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1949167514Skmacy if (mask & IFCAP_TXCSUM) { 1950204348Snp ifp->if_capenable ^= IFCAP_TXCSUM; 1951204348Snp ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1952204348Snp 1953237832Snp if (IFCAP_TSO4 & ifp->if_capenable && 1954204348Snp !(IFCAP_TXCSUM & ifp->if_capenable)) { 1955237832Snp ifp->if_capenable &= ~IFCAP_TSO4; 1956204348Snp if_printf(ifp, 1957237832Snp "tso4 disabled due to -txcsum.\n"); 1958167514Skmacy } 1959167514Skmacy } 1960237832Snp if (mask & IFCAP_TXCSUM_IPV6) { 1961237832Snp ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; 1962237832Snp ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1963237832Snp 1964237832Snp if (IFCAP_TSO6 & ifp->if_capenable && 1965237832Snp !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1966237832Snp ifp->if_capenable &= ~IFCAP_TSO6; 1967237832Snp if_printf(ifp, 1968237832Snp "tso6 disabled due to -txcsum6.\n"); 1969237832Snp } 1970237832Snp } 1971204348Snp if (mask & IFCAP_RXCSUM) 1972180583Skmacy ifp->if_capenable ^= IFCAP_RXCSUM; 1973237832Snp if (mask & IFCAP_RXCSUM_IPV6) 1974237832Snp ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; 1975204348Snp 1976237832Snp /* 1977237832Snp * Note that we leave CSUM_TSO alone (it is always set). The 1978237832Snp * kernel takes both IFCAP_TSOx and CSUM_TSO into account before 1979237832Snp * sending a TSO request our way, so it's sufficient to toggle 1980237832Snp * IFCAP_TSOx only. 1981237832Snp */ 1982237832Snp if (mask & IFCAP_TSO4) { 1983237832Snp if (!(IFCAP_TSO4 & ifp->if_capenable) && 1984237832Snp !(IFCAP_TXCSUM & ifp->if_capenable)) { 1985237832Snp if_printf(ifp, "enable txcsum first.\n"); 1986237832Snp error = EAGAIN; 1987237832Snp goto fail; 1988237832Snp } 1989237832Snp ifp->if_capenable ^= IFCAP_TSO4; 1990167514Skmacy } 1991237832Snp if (mask & IFCAP_TSO6) { 1992237832Snp if (!(IFCAP_TSO6 & ifp->if_capenable) && 1993237832Snp !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { 1994237832Snp if_printf(ifp, "enable txcsum6 first.\n"); 1995237832Snp error = EAGAIN; 1996237832Snp goto fail; 1997237832Snp } 1998237832Snp ifp->if_capenable ^= IFCAP_TSO6; 1999237832Snp } 2000181616Skmacy if (mask & IFCAP_LRO) { 2001181616Skmacy ifp->if_capenable ^= IFCAP_LRO; 2002181616Skmacy 2003181616Skmacy /* Safe to do this even if cxgb_up not called yet */ 2004181616Skmacy cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO); 2005181616Skmacy } 2006237263Snp#ifdef TCP_OFFLOAD 2007237263Snp if (mask & IFCAP_TOE4) { 2008237263Snp int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE4; 2009237263Snp 2010237263Snp error = toe_capability(p, enable); 2011237263Snp if (error == 0) 2012237263Snp ifp->if_capenable ^= mask; 2013237263Snp } 2014237263Snp#endif 2015180583Skmacy if (mask & IFCAP_VLAN_HWTAGGING) { 2016180583Skmacy ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2017194521Skmacy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2018194521Skmacy PORT_LOCK(p); 2019194521Skmacy cxgb_update_mac_settings(p); 2020194521Skmacy PORT_UNLOCK(p); 2021194521Skmacy } 2022180583Skmacy } 2023180583Skmacy if (mask & IFCAP_VLAN_MTU) { 2024180583Skmacy ifp->if_capenable ^= IFCAP_VLAN_MTU; 2025194521Skmacy if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2026194521Skmacy PORT_LOCK(p); 2027194521Skmacy cxgb_update_mac_settings(p); 2028194521Skmacy PORT_UNLOCK(p); 2029194521Skmacy } 2030180583Skmacy } 2031204348Snp if (mask & IFCAP_VLAN_HWTSO) 2032204348Snp ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2033202671Snp if (mask & IFCAP_VLAN_HWCSUM) 2034180583Skmacy ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2035180583Skmacy 2036180583Skmacy#ifdef VLAN_CAPABILITIES 2037180583Skmacy VLAN_CAPABILITIES(ifp); 2038180583Skmacy#endif 2039202671Snp ADAPTER_UNLOCK(sc); 2040167514Skmacy break; 2041202671Snp case SIOCSIFMEDIA: 2042202671Snp case SIOCGIFMEDIA: 2043202671Snp error = ifmedia_ioctl(ifp, ifr, &p->media, command); 2044202671Snp break; 2045167514Skmacy default: 2046202671Snp error = ether_ioctl(ifp, command, data); 2047167514Skmacy } 2048194521Skmacy 2049167514Skmacy return (error); 2050167514Skmacy} 2051167514Skmacy 2052174708Skmacystatic int 2053167514Skmacycxgb_media_change(struct ifnet *ifp) 2054167514Skmacy{ 2055194921Snp return (EOPNOTSUPP); 2056167514Skmacy} 2057167514Skmacy 2058186282Sgnn/* 2059194921Snp * Translates phy->modtype to the correct Ethernet media subtype. 2060186282Sgnn */ 2061186282Sgnnstatic int 2062194921Snpcxgb_ifm_type(int mod) 2063186282Sgnn{ 2064194921Snp switch (mod) { 2065186282Sgnn case phy_modtype_sr: 2066194921Snp return (IFM_10G_SR); 2067186282Sgnn case phy_modtype_lr: 2068194921Snp return (IFM_10G_LR); 2069186282Sgnn case phy_modtype_lrm: 2070194921Snp return (IFM_10G_LRM); 2071186282Sgnn case phy_modtype_twinax: 2072194921Snp return (IFM_10G_TWINAX); 2073186282Sgnn case phy_modtype_twinax_long: 2074194921Snp return (IFM_10G_TWINAX_LONG); 2075186282Sgnn case phy_modtype_none: 2076194921Snp return (IFM_NONE); 2077186282Sgnn case phy_modtype_unknown: 2078194921Snp return (IFM_UNKNOWN); 2079186282Sgnn } 2080186282Sgnn 2081194921Snp KASSERT(0, ("%s: modtype %d unknown", __func__, mod)); 2082194921Snp return (IFM_UNKNOWN); 2083186282Sgnn} 2084186282Sgnn 2085194921Snp/* 2086194921Snp * Rebuilds the ifmedia list for this port, and sets the current media. 2087194921Snp */ 2088167514Skmacystatic void 2089194921Snpcxgb_build_medialist(struct port_info *p) 2090194921Snp{ 2091194921Snp struct cphy *phy = &p->phy; 2092194921Snp struct ifmedia *media = &p->media; 2093194921Snp int mod = phy->modtype; 2094194921Snp int m = IFM_ETHER | IFM_FDX; 2095194921Snp 2096194921Snp PORT_LOCK(p); 2097194921Snp 2098194921Snp ifmedia_removeall(media); 2099194921Snp if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) { 2100194921Snp /* Copper (RJ45) */ 2101194921Snp 2102194921Snp if (phy->caps & SUPPORTED_10000baseT_Full) 2103194921Snp ifmedia_add(media, m | IFM_10G_T, mod, NULL); 2104194921Snp 2105194921Snp if (phy->caps & SUPPORTED_1000baseT_Full) 2106194921Snp ifmedia_add(media, m | IFM_1000_T, mod, NULL); 2107194921Snp 2108194921Snp if (phy->caps & SUPPORTED_100baseT_Full) 2109194921Snp ifmedia_add(media, m | IFM_100_TX, mod, NULL); 2110194921Snp 2111194921Snp if (phy->caps & SUPPORTED_10baseT_Full) 2112194921Snp ifmedia_add(media, m | IFM_10_T, mod, NULL); 2113194921Snp 2114194921Snp ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL); 2115194921Snp ifmedia_set(media, IFM_ETHER | IFM_AUTO); 2116194921Snp 2117194921Snp } else if (phy->caps & SUPPORTED_TP) { 2118194921Snp /* Copper (CX4) */ 2119194921Snp 2120194921Snp KASSERT(phy->caps & SUPPORTED_10000baseT_Full, 2121194921Snp ("%s: unexpected cap 0x%x", __func__, phy->caps)); 2122194921Snp 2123194921Snp ifmedia_add(media, m | IFM_10G_CX4, mod, NULL); 2124194921Snp ifmedia_set(media, m | IFM_10G_CX4); 2125194921Snp 2126194921Snp } else if (phy->caps & SUPPORTED_FIBRE && 2127194921Snp phy->caps & SUPPORTED_10000baseT_Full) { 2128194921Snp /* 10G optical (but includes SFP+ twinax) */ 2129194921Snp 2130194921Snp m |= cxgb_ifm_type(mod); 2131194921Snp if (IFM_SUBTYPE(m) == IFM_NONE) 2132194921Snp m &= ~IFM_FDX; 2133194921Snp 2134194921Snp ifmedia_add(media, m, mod, NULL); 2135194921Snp ifmedia_set(media, m); 2136194921Snp 2137194921Snp } else if (phy->caps & SUPPORTED_FIBRE && 2138194921Snp phy->caps & SUPPORTED_1000baseT_Full) { 2139194921Snp /* 1G optical */ 2140194921Snp 2141194921Snp /* XXX: Lie and claim to be SX, could actually be any 1G-X */ 2142194921Snp ifmedia_add(media, m | IFM_1000_SX, mod, NULL); 2143194921Snp ifmedia_set(media, m | IFM_1000_SX); 2144194921Snp 2145194921Snp } else { 2146194921Snp KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__, 2147194921Snp phy->caps)); 2148194921Snp } 2149194921Snp 2150194921Snp PORT_UNLOCK(p); 2151194921Snp} 2152194921Snp 2153194921Snpstatic void 2154167514Skmacycxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2155167514Skmacy{ 2156167514Skmacy struct port_info *p = ifp->if_softc; 2157186282Sgnn struct ifmedia_entry *cur = p->media.ifm_cur; 2158194921Snp int speed = p->link_config.speed; 2159167514Skmacy 2160194921Snp if (cur->ifm_data != p->phy.modtype) { 2161194921Snp cxgb_build_medialist(p); 2162194921Snp cur = p->media.ifm_cur; 2163186282Sgnn } 2164186282Sgnn 2165167514Skmacy ifmr->ifm_status = IFM_AVALID; 2166167514Skmacy if (!p->link_config.link_ok) 2167167514Skmacy return; 2168167514Skmacy 2169167514Skmacy ifmr->ifm_status |= IFM_ACTIVE; 2170167514Skmacy 2171194921Snp /* 2172194921Snp * active and current will differ iff current media is autoselect. That 2173194921Snp * can happen only for copper RJ45. 2174194921Snp */ 2175194921Snp if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) 2176194921Snp return; 2177194921Snp KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg, 2178194921Snp ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps)); 2179194921Snp 2180194921Snp ifmr->ifm_active = IFM_ETHER | IFM_FDX; 2181194921Snp if (speed == SPEED_10000) 2182194921Snp ifmr->ifm_active |= IFM_10G_T; 2183194921Snp else if (speed == SPEED_1000) 2184194921Snp ifmr->ifm_active |= IFM_1000_T; 2185194921Snp else if (speed == SPEED_100) 2186194921Snp ifmr->ifm_active |= IFM_100_TX; 2187194921Snp else if (speed == SPEED_10) 2188170654Skmacy ifmr->ifm_active |= IFM_10_T; 2189167514Skmacy else 2190194921Snp KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, 2191194921Snp speed)); 2192167514Skmacy} 2193167514Skmacy 2194272222Snpstatic uint64_t 2195272222Snpcxgb_get_counter(struct ifnet *ifp, ift_counter c) 2196272222Snp{ 2197272222Snp struct port_info *pi = ifp->if_softc; 2198272222Snp struct adapter *sc = pi->adapter; 2199272222Snp struct cmac *mac = &pi->mac; 2200272222Snp struct mac_stats *mstats = &mac->stats; 2201272222Snp 2202272222Snp cxgb_refresh_stats(pi); 2203272222Snp 2204272222Snp switch (c) { 2205272222Snp case IFCOUNTER_IPACKETS: 2206272222Snp return (mstats->rx_frames); 2207272222Snp 2208272222Snp case IFCOUNTER_IERRORS: 2209272222Snp return (mstats->rx_jabber + mstats->rx_data_errs + 2210272222Snp mstats->rx_sequence_errs + mstats->rx_runt + 2211272222Snp mstats->rx_too_long + mstats->rx_mac_internal_errs + 2212272222Snp mstats->rx_short + mstats->rx_fcs_errs); 2213272222Snp 2214272222Snp case IFCOUNTER_OPACKETS: 2215272222Snp return (mstats->tx_frames); 2216272222Snp 2217272222Snp case IFCOUNTER_OERRORS: 2218272222Snp return (mstats->tx_excess_collisions + mstats->tx_underrun + 2219272222Snp mstats->tx_len_errs + mstats->tx_mac_internal_errs + 2220272222Snp mstats->tx_excess_deferral + mstats->tx_fcs_errs); 2221272222Snp 2222272222Snp case IFCOUNTER_COLLISIONS: 2223272222Snp return (mstats->tx_total_collisions); 2224272222Snp 2225272222Snp case IFCOUNTER_IBYTES: 2226272222Snp return (mstats->rx_octets); 2227272222Snp 2228272222Snp case IFCOUNTER_OBYTES: 2229272222Snp return (mstats->tx_octets); 2230272222Snp 2231272222Snp case IFCOUNTER_IMCASTS: 2232272222Snp return (mstats->rx_mcast_frames); 2233272222Snp 2234272222Snp case IFCOUNTER_OMCASTS: 2235272222Snp return (mstats->tx_mcast_frames); 2236272222Snp 2237272222Snp case IFCOUNTER_IQDROPS: 2238272222Snp return (mstats->rx_cong_drops); 2239272222Snp 2240272222Snp case IFCOUNTER_OQDROPS: { 2241272222Snp int i; 2242272222Snp uint64_t drops; 2243272222Snp 2244272222Snp drops = 0; 2245272222Snp if (sc->flags & FULL_INIT_DONE) { 2246272222Snp for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) 2247272222Snp drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops; 2248272222Snp } 2249272222Snp 2250272222Snp return (drops); 2251272222Snp 2252272222Snp } 2253272222Snp 2254272222Snp default: 2255272222Snp return (if_get_counter_default(ifp, c)); 2256272222Snp } 2257272222Snp} 2258272222Snp 2259167514Skmacystatic void 2260167514Skmacycxgb_async_intr(void *data) 2261167514Skmacy{ 2262167760Skmacy adapter_t *sc = data; 2263167760Skmacy 2264209840Snp t3_write_reg(sc, A_PL_INT_ENABLE0, 0); 2265209840Snp (void) t3_read_reg(sc, A_PL_INT_ENABLE0); 2266170869Skmacy taskqueue_enqueue(sc->tq, &sc->slow_intr_task); 2267167514Skmacy} 2268167514Skmacy 2269209841Snpstatic void 2270209841Snplink_check_callout(void *arg) 2271197791Snp{ 2272209841Snp struct port_info *pi = arg; 2273209841Snp struct adapter *sc = pi->adapter; 2274197791Snp 2275209841Snp if (!isset(&sc->open_device_map, pi->port_id)) 2276209841Snp return; 2277197791Snp 2278209841Snp taskqueue_enqueue(sc->tq, &pi->link_check_task); 2279197791Snp} 2280197791Snp 2281167514Skmacystatic void 2282209841Snpcheck_link_status(void *arg, int pending) 2283167514Skmacy{ 2284209841Snp struct port_info *pi = arg; 2285209841Snp struct adapter *sc = pi->adapter; 2286167514Skmacy 2287209841Snp if (!isset(&sc->open_device_map, pi->port_id)) 2288209841Snp return; 2289167514Skmacy 2290209841Snp t3_link_changed(sc, pi->port_id); 2291194521Skmacy 2292276959Snp if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) || 2293276959Snp pi->link_config.link_ok == 0) 2294209841Snp callout_reset(&pi->link_check_ch, hz, link_check_callout, pi); 2295167514Skmacy} 2296167514Skmacy 2297209841Snpvoid 2298209841Snpt3_os_link_intr(struct port_info *pi) 2299209841Snp{ 2300209841Snp /* 2301209841Snp * Schedule a link check in the near future. If the link is flapping 2302209841Snp * rapidly we'll keep resetting the callout and delaying the check until 2303209841Snp * things stabilize a bit. 2304209841Snp */ 2305209841Snp callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi); 2306209841Snp} 2307209841Snp 2308167514Skmacystatic void 2309194521Skmacycheck_t3b2_mac(struct adapter *sc) 2310167514Skmacy{ 2311167514Skmacy int i; 2312167514Skmacy 2313194521Skmacy if (sc->flags & CXGB_SHUTDOWN) 2314176472Skmacy return; 2315194521Skmacy 2316194521Skmacy for_each_port(sc, i) { 2317194521Skmacy struct port_info *p = &sc->port[i]; 2318194521Skmacy int status; 2319194521Skmacy#ifdef INVARIANTS 2320167746Skmacy struct ifnet *ifp = p->ifp; 2321194521Skmacy#endif 2322194521Skmacy 2323197791Snp if (!isset(&sc->open_device_map, p->port_id) || p->link_fault || 2324197791Snp !p->link_config.link_ok) 2325167746Skmacy continue; 2326194521Skmacy 2327194521Skmacy KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, 2328194521Skmacy ("%s: state mismatch (drv_flags %x, device_map %x)", 2329194521Skmacy __func__, ifp->if_drv_flags, sc->open_device_map)); 2330194521Skmacy 2331167746Skmacy PORT_LOCK(p); 2332194521Skmacy status = t3b2_mac_watchdog_task(&p->mac); 2333167746Skmacy if (status == 1) 2334167746Skmacy p->mac.stats.num_toggled++; 2335167746Skmacy else if (status == 2) { 2336167746Skmacy struct cmac *mac = &p->mac; 2337167746Skmacy 2338194521Skmacy cxgb_update_mac_settings(p); 2339167746Skmacy t3_link_start(&p->phy, mac, &p->link_config); 2340167746Skmacy t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2341194521Skmacy t3_port_intr_enable(sc, p->port_id); 2342167746Skmacy p->mac.stats.num_resets++; 2343167746Skmacy } 2344167746Skmacy PORT_UNLOCK(p); 2345167514Skmacy } 2346167514Skmacy} 2347167514Skmacy 2348167746Skmacystatic void 2349167746Skmacycxgb_tick(void *arg) 2350167746Skmacy{ 2351167746Skmacy adapter_t *sc = (adapter_t *)arg; 2352170869Skmacy 2353194521Skmacy if (sc->flags & CXGB_SHUTDOWN) 2354176472Skmacy return; 2355174708Skmacy 2356185508Skmacy taskqueue_enqueue(sc->tq, &sc->tick_task); 2357209841Snp callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); 2358170869Skmacy} 2359170869Skmacy 2360272222Snpvoid 2361272222Snpcxgb_refresh_stats(struct port_info *pi) 2362272222Snp{ 2363272222Snp struct timeval tv; 2364272222Snp const struct timeval interval = {0, 250000}; /* 250ms */ 2365272222Snp 2366272222Snp getmicrotime(&tv); 2367272222Snp timevalsub(&tv, &interval); 2368272222Snp if (timevalcmp(&tv, &pi->last_refreshed, <)) 2369272222Snp return; 2370272222Snp 2371272222Snp PORT_LOCK(pi); 2372272222Snp t3_mac_update_stats(&pi->mac); 2373272222Snp PORT_UNLOCK(pi); 2374272222Snp getmicrotime(&pi->last_refreshed); 2375272222Snp} 2376272222Snp 2377170869Skmacystatic void 2378170869Skmacycxgb_tick_handler(void *arg, int count) 2379170869Skmacy{ 2380170869Skmacy adapter_t *sc = (adapter_t *)arg; 2381167746Skmacy const struct adapter_params *p = &sc->params; 2382181652Skmacy int i; 2383189643Sgnn uint32_t cause, reset; 2384167746Skmacy 2385194521Skmacy if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE)) 2386176472Skmacy return; 2387176472Skmacy 2388185508Skmacy if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) 2389185508Skmacy check_t3b2_mac(sc); 2390185508Skmacy 2391206109Snp cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY); 2392206109Snp if (cause) { 2393189643Sgnn struct sge_qset *qs = &sc->sge.qs[0]; 2394206109Snp uint32_t mask, v; 2395189643Sgnn 2396206109Snp v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00; 2397189643Sgnn 2398206109Snp mask = 1; 2399206109Snp for (i = 0; i < SGE_QSETS; i++) { 2400206109Snp if (v & mask) 2401206109Snp qs[i].rspq.starved++; 2402206109Snp mask <<= 1; 2403189643Sgnn } 2404206109Snp 2405206109Snp mask <<= SGE_QSETS; /* skip RSPQXDISABLED */ 2406206109Snp 2407206109Snp for (i = 0; i < SGE_QSETS * 2; i++) { 2408206109Snp if (v & mask) { 2409206109Snp qs[i / 2].fl[i % 2].empty++; 2410206109Snp } 2411206109Snp mask <<= 1; 2412206109Snp } 2413206109Snp 2414206109Snp /* clear */ 2415206109Snp t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v); 2416206109Snp t3_write_reg(sc, A_SG_INT_CAUSE, cause); 2417189643Sgnn } 2418189643Sgnn 2419185506Skmacy for (i = 0; i < sc->params.nports; i++) { 2420185506Skmacy struct port_info *pi = &sc->port[i]; 2421189643Sgnn struct cmac *mac = &pi->mac; 2422194521Skmacy 2423194521Skmacy if (!isset(&sc->open_device_map, pi->port_id)) 2424194521Skmacy continue; 2425194521Skmacy 2426272222Snp cxgb_refresh_stats(pi); 2427185508Skmacy 2428189643Sgnn if (mac->multiport) 2429189643Sgnn continue; 2430189643Sgnn 2431189643Sgnn /* Count rx fifo overflows, once per second */ 2432189643Sgnn cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset); 2433189643Sgnn reset = 0; 2434189643Sgnn if (cause & F_RXFIFO_OVERFLOW) { 2435189643Sgnn mac->stats.rx_fifo_ovfl++; 2436189643Sgnn reset |= F_RXFIFO_OVERFLOW; 2437189643Sgnn } 2438189643Sgnn t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset); 2439185506Skmacy } 2440167746Skmacy} 2441167746Skmacy 2442171978Skmacystatic void 2443171978Skmacytouch_bars(device_t dev) 2444171978Skmacy{ 2445171978Skmacy /* 2446171978Skmacy * Don't enable yet 2447171978Skmacy */ 2448171978Skmacy#if !defined(__LP64__) && 0 2449171978Skmacy u32 v; 2450171978Skmacy 2451171978Skmacy pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v); 2452171978Skmacy pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v); 2453171978Skmacy pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v); 2454171978Skmacy pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v); 2455171978Skmacy pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v); 2456171978Skmacy pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v); 2457171978Skmacy#endif 2458171978Skmacy} 2459171978Skmacy 2460167514Skmacystatic int 2461171471Skmacyset_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset) 2462171471Skmacy{ 2463171471Skmacy uint8_t *buf; 2464171471Skmacy int err = 0; 2465171471Skmacy u32 aligned_offset, aligned_len, *p; 2466171471Skmacy struct adapter *adapter = pi->adapter; 2467171471Skmacy 2468171471Skmacy 2469171471Skmacy aligned_offset = offset & ~3; 2470171471Skmacy aligned_len = (len + (offset & 3) + 3) & ~3; 2471171471Skmacy 2472171471Skmacy if (aligned_offset != offset || aligned_len != len) { 2473171471Skmacy buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO); 2474171471Skmacy if (!buf) 2475171471Skmacy return (ENOMEM); 2476171471Skmacy err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf); 2477171471Skmacy if (!err && aligned_len > 4) 2478171471Skmacy err = t3_seeprom_read(adapter, 2479171471Skmacy aligned_offset + aligned_len - 4, 2480171471Skmacy (u32 *)&buf[aligned_len - 4]); 2481171471Skmacy if (err) 2482171471Skmacy goto out; 2483171471Skmacy memcpy(buf + (offset & 3), data, len); 2484171471Skmacy } else 2485171471Skmacy buf = (uint8_t *)(uintptr_t)data; 2486171471Skmacy 2487171471Skmacy err = t3_seeprom_wp(adapter, 0); 2488171471Skmacy if (err) 2489171471Skmacy goto out; 2490171471Skmacy 2491171471Skmacy for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 2492171471Skmacy err = t3_seeprom_write(adapter, aligned_offset, *p); 2493171471Skmacy aligned_offset += 4; 2494171471Skmacy } 2495171471Skmacy 2496171471Skmacy if (!err) 2497171471Skmacy err = t3_seeprom_wp(adapter, 1); 2498171471Skmacyout: 2499171471Skmacy if (buf != data) 2500171471Skmacy free(buf, M_DEVBUF); 2501171471Skmacy return err; 2502171471Skmacy} 2503171471Skmacy 2504171471Skmacy 2505171471Skmacystatic int 2506167514Skmacyin_range(int val, int lo, int hi) 2507167514Skmacy{ 2508167514Skmacy return val < 0 || (val <= hi && val >= lo); 2509167514Skmacy} 2510167514Skmacy 2511167514Skmacystatic int 2512192450Simpcxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td) 2513170654Skmacy{ 2514170654Skmacy return (0); 2515170654Skmacy} 2516170654Skmacy 2517170654Skmacystatic int 2518192450Simpcxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td) 2519170654Skmacy{ 2520170654Skmacy return (0); 2521170654Skmacy} 2522170654Skmacy 2523170654Skmacystatic int 2524167514Skmacycxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, 2525167514Skmacy int fflag, struct thread *td) 2526167514Skmacy{ 2527167514Skmacy int mmd, error = 0; 2528167514Skmacy struct port_info *pi = dev->si_drv1; 2529167514Skmacy adapter_t *sc = pi->adapter; 2530167514Skmacy 2531167514Skmacy#ifdef PRIV_SUPPORTED 2532167514Skmacy if (priv_check(td, PRIV_DRIVER)) { 2533167514Skmacy if (cxgb_debug) 2534167514Skmacy printf("user does not have access to privileged ioctls\n"); 2535167514Skmacy return (EPERM); 2536167514Skmacy } 2537167514Skmacy#else 2538167514Skmacy if (suser(td)) { 2539167514Skmacy if (cxgb_debug) 2540167514Skmacy printf("user does not have access to privileged ioctls\n"); 2541167514Skmacy return (EPERM); 2542167514Skmacy } 2543167514Skmacy#endif 2544167514Skmacy 2545167514Skmacy switch (cmd) { 2546182679Skmacy case CHELSIO_GET_MIIREG: { 2547167514Skmacy uint32_t val; 2548167514Skmacy struct cphy *phy = &pi->phy; 2549182679Skmacy struct ch_mii_data *mid = (struct ch_mii_data *)data; 2550167514Skmacy 2551167514Skmacy if (!phy->mdio_read) 2552167514Skmacy return (EOPNOTSUPP); 2553167514Skmacy if (is_10G(sc)) { 2554167514Skmacy mmd = mid->phy_id >> 8; 2555167514Skmacy if (!mmd) 2556167514Skmacy mmd = MDIO_DEV_PCS; 2557190330Sgnn else if (mmd > MDIO_DEV_VEND2) 2558171471Skmacy return (EINVAL); 2559167514Skmacy 2560167514Skmacy error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, 2561167514Skmacy mid->reg_num, &val); 2562167514Skmacy } else 2563167514Skmacy error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, 2564167514Skmacy mid->reg_num & 0x1f, &val); 2565167514Skmacy if (error == 0) 2566167514Skmacy mid->val_out = val; 2567167514Skmacy break; 2568167514Skmacy } 2569182679Skmacy case CHELSIO_SET_MIIREG: { 2570167514Skmacy struct cphy *phy = &pi->phy; 2571182679Skmacy struct ch_mii_data *mid = (struct ch_mii_data *)data; 2572167514Skmacy 2573167514Skmacy if (!phy->mdio_write) 2574167514Skmacy return (EOPNOTSUPP); 2575167514Skmacy if (is_10G(sc)) { 2576167514Skmacy mmd = mid->phy_id >> 8; 2577167514Skmacy if (!mmd) 2578167514Skmacy mmd = MDIO_DEV_PCS; 2579190330Sgnn else if (mmd > MDIO_DEV_VEND2) 2580167514Skmacy return (EINVAL); 2581167514Skmacy 2582167514Skmacy error = phy->mdio_write(sc, mid->phy_id & 0x1f, 2583167514Skmacy mmd, mid->reg_num, mid->val_in); 2584167514Skmacy } else 2585167514Skmacy error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, 2586167514Skmacy mid->reg_num & 0x1f, 2587167514Skmacy mid->val_in); 2588167514Skmacy break; 2589167514Skmacy } 2590167514Skmacy case CHELSIO_SETREG: { 2591167514Skmacy struct ch_reg *edata = (struct ch_reg *)data; 2592167514Skmacy if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2593167514Skmacy return (EFAULT); 2594167514Skmacy t3_write_reg(sc, edata->addr, edata->val); 2595167514Skmacy break; 2596167514Skmacy } 2597167514Skmacy case CHELSIO_GETREG: { 2598167514Skmacy struct ch_reg *edata = (struct ch_reg *)data; 2599167514Skmacy if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) 2600167514Skmacy return (EFAULT); 2601167514Skmacy edata->val = t3_read_reg(sc, edata->addr); 2602167514Skmacy break; 2603167514Skmacy } 2604167514Skmacy case CHELSIO_GET_SGE_CONTEXT: { 2605167514Skmacy struct ch_cntxt *ecntxt = (struct ch_cntxt *)data; 2606176472Skmacy mtx_lock_spin(&sc->sge.reg_lock); 2607167514Skmacy switch (ecntxt->cntxt_type) { 2608167514Skmacy case CNTXT_TYPE_EGRESS: 2609182679Skmacy error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, 2610167514Skmacy ecntxt->data); 2611167514Skmacy break; 2612167514Skmacy case CNTXT_TYPE_FL: 2613182679Skmacy error = -t3_sge_read_fl(sc, ecntxt->cntxt_id, 2614167514Skmacy ecntxt->data); 2615167514Skmacy break; 2616167514Skmacy case CNTXT_TYPE_RSP: 2617182679Skmacy error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id, 2618167514Skmacy ecntxt->data); 2619167514Skmacy break; 2620167514Skmacy case CNTXT_TYPE_CQ: 2621182679Skmacy error = -t3_sge_read_cq(sc, ecntxt->cntxt_id, 2622167514Skmacy ecntxt->data); 2623167514Skmacy break; 2624167514Skmacy default: 2625167514Skmacy error = EINVAL; 2626167514Skmacy break; 2627167514Skmacy } 2628176472Skmacy mtx_unlock_spin(&sc->sge.reg_lock); 2629167514Skmacy break; 2630167514Skmacy } 2631167514Skmacy case CHELSIO_GET_SGE_DESC: { 2632167514Skmacy struct ch_desc *edesc = (struct ch_desc *)data; 2633167514Skmacy int ret; 2634167514Skmacy if (edesc->queue_num >= SGE_QSETS * 6) 2635167514Skmacy return (EINVAL); 2636167514Skmacy ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], 2637167514Skmacy edesc->queue_num % 6, edesc->idx, edesc->data); 2638167514Skmacy if (ret < 0) 2639167514Skmacy return (EINVAL); 2640167514Skmacy edesc->size = ret; 2641167514Skmacy break; 2642167514Skmacy } 2643182679Skmacy case CHELSIO_GET_QSET_PARAMS: { 2644167514Skmacy struct qset_params *q; 2645167514Skmacy struct ch_qset_params *t = (struct ch_qset_params *)data; 2646182679Skmacy int q1 = pi->first_qset; 2647182679Skmacy int nqsets = pi->nqsets; 2648176472Skmacy int i; 2649176472Skmacy 2650182679Skmacy if (t->qset_idx >= nqsets) 2651182679Skmacy return EINVAL; 2652167514Skmacy 2653182679Skmacy i = q1 + t->qset_idx; 2654182679Skmacy q = &sc->params.sge.qset[i]; 2655167514Skmacy t->rspq_size = q->rspq_size; 2656167514Skmacy t->txq_size[0] = q->txq_size[0]; 2657167514Skmacy t->txq_size[1] = q->txq_size[1]; 2658167514Skmacy t->txq_size[2] = q->txq_size[2]; 2659167514Skmacy t->fl_size[0] = q->fl_size; 2660167514Skmacy t->fl_size[1] = q->jumbo_size; 2661167514Skmacy t->polling = q->polling; 2662182679Skmacy t->lro = q->lro; 2663180583Skmacy t->intr_lat = q->coalesce_usecs; 2664167514Skmacy t->cong_thres = q->cong_thres; 2665182679Skmacy t->qnum = i; 2666182679Skmacy 2667205946Snp if ((sc->flags & FULL_INIT_DONE) == 0) 2668205946Snp t->vector = 0; 2669205946Snp else if (sc->flags & USING_MSIX) 2670182679Skmacy t->vector = rman_get_start(sc->msix_irq_res[i]); 2671182679Skmacy else 2672182679Skmacy t->vector = rman_get_start(sc->irq_res); 2673182679Skmacy 2674167514Skmacy break; 2675167514Skmacy } 2676182679Skmacy case CHELSIO_GET_QSET_NUM: { 2677167514Skmacy struct ch_reg *edata = (struct ch_reg *)data; 2678182679Skmacy edata->val = pi->nqsets; 2679182679Skmacy break; 2680182679Skmacy } 2681182679Skmacy case CHELSIO_LOAD_FW: { 2682182679Skmacy uint8_t *fw_data; 2683182679Skmacy uint32_t vers; 2684182679Skmacy struct ch_mem_range *t = (struct ch_mem_range *)data; 2685182679Skmacy 2686167514Skmacy /* 2687182679Skmacy * You're allowed to load a firmware only before FULL_INIT_DONE 2688182679Skmacy * 2689182679Skmacy * FW_UPTODATE is also set so the rest of the initialization 2690182679Skmacy * will not overwrite what was loaded here. This gives you the 2691182679Skmacy * flexibility to load any firmware (and maybe shoot yourself in 2692182679Skmacy * the foot). 2693167514Skmacy */ 2694182679Skmacy 2695182679Skmacy ADAPTER_LOCK(sc); 2696182679Skmacy if (sc->open_device_map || sc->flags & FULL_INIT_DONE) { 2697182679Skmacy ADAPTER_UNLOCK(sc); 2698182679Skmacy return (EBUSY); 2699182679Skmacy } 2700182679Skmacy 2701182679Skmacy fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT); 2702182679Skmacy if (!fw_data) 2703182679Skmacy error = ENOMEM; 2704182679Skmacy else 2705182679Skmacy error = copyin(t->buf, fw_data, t->len); 2706182679Skmacy 2707182679Skmacy if (!error) 2708182679Skmacy error = -t3_load_fw(sc, fw_data, t->len); 2709182679Skmacy 2710182679Skmacy if (t3_get_fw_version(sc, &vers) == 0) { 2711182679Skmacy snprintf(&sc->fw_version[0], sizeof(sc->fw_version), 2712182679Skmacy "%d.%d.%d", G_FW_VERSION_MAJOR(vers), 2713182679Skmacy G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); 2714182679Skmacy } 2715182679Skmacy 2716182679Skmacy if (!error) 2717182679Skmacy sc->flags |= FW_UPTODATE; 2718182679Skmacy 2719182679Skmacy free(fw_data, M_DEVBUF); 2720182679Skmacy ADAPTER_UNLOCK(sc); 2721167514Skmacy break; 2722167514Skmacy } 2723182679Skmacy case CHELSIO_LOAD_BOOT: { 2724182679Skmacy uint8_t *boot_data; 2725182679Skmacy struct ch_mem_range *t = (struct ch_mem_range *)data; 2726182679Skmacy 2727182679Skmacy boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT); 2728182679Skmacy if (!boot_data) 2729182679Skmacy return ENOMEM; 2730182679Skmacy 2731182679Skmacy error = copyin(t->buf, boot_data, t->len); 2732182679Skmacy if (!error) 2733182679Skmacy error = -t3_load_boot(sc, boot_data, t->len); 2734182679Skmacy 2735182679Skmacy free(boot_data, M_DEVBUF); 2736167514Skmacy break; 2737167514Skmacy } 2738182679Skmacy case CHELSIO_GET_PM: { 2739182679Skmacy struct ch_pm *m = (struct ch_pm *)data; 2740182679Skmacy struct tp_params *p = &sc->params.tp; 2741182679Skmacy 2742182679Skmacy if (!is_offload(sc)) 2743182679Skmacy return (EOPNOTSUPP); 2744182679Skmacy 2745182679Skmacy m->tx_pg_sz = p->tx_pg_size; 2746182679Skmacy m->tx_num_pg = p->tx_num_pgs; 2747182679Skmacy m->rx_pg_sz = p->rx_pg_size; 2748182679Skmacy m->rx_num_pg = p->rx_num_pgs; 2749182679Skmacy m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; 2750182679Skmacy 2751167514Skmacy break; 2752182679Skmacy } 2753182679Skmacy case CHELSIO_SET_PM: { 2754182679Skmacy struct ch_pm *m = (struct ch_pm *)data; 2755182679Skmacy struct tp_params *p = &sc->params.tp; 2756182679Skmacy 2757182679Skmacy if (!is_offload(sc)) 2758182679Skmacy return (EOPNOTSUPP); 2759182679Skmacy if (sc->flags & FULL_INIT_DONE) 2760182679Skmacy return (EBUSY); 2761182679Skmacy 2762182679Skmacy if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) || 2763182679Skmacy !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1))) 2764182679Skmacy return (EINVAL); /* not power of 2 */ 2765182679Skmacy if (!(m->rx_pg_sz & 0x14000)) 2766182679Skmacy return (EINVAL); /* not 16KB or 64KB */ 2767182679Skmacy if (!(m->tx_pg_sz & 0x1554000)) 2768182679Skmacy return (EINVAL); 2769182679Skmacy if (m->tx_num_pg == -1) 2770182679Skmacy m->tx_num_pg = p->tx_num_pgs; 2771182679Skmacy if (m->rx_num_pg == -1) 2772182679Skmacy m->rx_num_pg = p->rx_num_pgs; 2773182679Skmacy if (m->tx_num_pg % 24 || m->rx_num_pg % 24) 2774182679Skmacy return (EINVAL); 2775182679Skmacy if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size || 2776182679Skmacy m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size) 2777182679Skmacy return (EINVAL); 2778182679Skmacy 2779182679Skmacy p->rx_pg_size = m->rx_pg_sz; 2780182679Skmacy p->tx_pg_size = m->tx_pg_sz; 2781182679Skmacy p->rx_num_pgs = m->rx_num_pg; 2782182679Skmacy p->tx_num_pgs = m->tx_num_pg; 2783182679Skmacy break; 2784182679Skmacy } 2785169978Skmacy case CHELSIO_SETMTUTAB: { 2786169978Skmacy struct ch_mtus *m = (struct ch_mtus *)data; 2787169978Skmacy int i; 2788169978Skmacy 2789169978Skmacy if (!is_offload(sc)) 2790169978Skmacy return (EOPNOTSUPP); 2791169978Skmacy if (offload_running(sc)) 2792169978Skmacy return (EBUSY); 2793169978Skmacy if (m->nmtus != NMTUS) 2794169978Skmacy return (EINVAL); 2795169978Skmacy if (m->mtus[0] < 81) /* accommodate SACK */ 2796169978Skmacy return (EINVAL); 2797169978Skmacy 2798169978Skmacy /* 2799169978Skmacy * MTUs must be in ascending order 2800169978Skmacy */ 2801169978Skmacy for (i = 1; i < NMTUS; ++i) 2802169978Skmacy if (m->mtus[i] < m->mtus[i - 1]) 2803169978Skmacy return (EINVAL); 2804169978Skmacy 2805182679Skmacy memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus)); 2806169978Skmacy break; 2807169978Skmacy } 2808169978Skmacy case CHELSIO_GETMTUTAB: { 2809169978Skmacy struct ch_mtus *m = (struct ch_mtus *)data; 2810169978Skmacy 2811169978Skmacy if (!is_offload(sc)) 2812169978Skmacy return (EOPNOTSUPP); 2813169978Skmacy 2814169978Skmacy memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); 2815169978Skmacy m->nmtus = NMTUS; 2816169978Skmacy break; 2817171471Skmacy } 2818167514Skmacy case CHELSIO_GET_MEM: { 2819167514Skmacy struct ch_mem_range *t = (struct ch_mem_range *)data; 2820167514Skmacy struct mc7 *mem; 2821167514Skmacy uint8_t *useraddr; 2822167514Skmacy u64 buf[32]; 2823182679Skmacy 2824182679Skmacy /* 2825218909Sbrucec * Use these to avoid modifying len/addr in the return 2826182679Skmacy * struct 2827182679Skmacy */ 2828182679Skmacy uint32_t len = t->len, addr = t->addr; 2829182679Skmacy 2830167514Skmacy if (!is_offload(sc)) 2831167514Skmacy return (EOPNOTSUPP); 2832167514Skmacy if (!(sc->flags & FULL_INIT_DONE)) 2833167514Skmacy return (EIO); /* need the memory controllers */ 2834182679Skmacy if ((addr & 0x7) || (len & 0x7)) 2835167514Skmacy return (EINVAL); 2836167514Skmacy if (t->mem_id == MEM_CM) 2837167514Skmacy mem = &sc->cm; 2838167514Skmacy else if (t->mem_id == MEM_PMRX) 2839167514Skmacy mem = &sc->pmrx; 2840167514Skmacy else if (t->mem_id == MEM_PMTX) 2841167514Skmacy mem = &sc->pmtx; 2842167514Skmacy else 2843167514Skmacy return (EINVAL); 2844167514Skmacy 2845167514Skmacy /* 2846167514Skmacy * Version scheme: 2847167514Skmacy * bits 0..9: chip version 2848167514Skmacy * bits 10..15: chip revision 2849167514Skmacy */ 2850167514Skmacy t->version = 3 | (sc->params.rev << 10); 2851167514Skmacy 2852167514Skmacy /* 2853167514Skmacy * Read 256 bytes at a time as len can be large and we don't 2854167514Skmacy * want to use huge intermediate buffers. 2855167514Skmacy */ 2856174708Skmacy useraddr = (uint8_t *)t->buf; 2857182679Skmacy while (len) { 2858182679Skmacy unsigned int chunk = min(len, sizeof(buf)); 2859167514Skmacy 2860182679Skmacy error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf); 2861167514Skmacy if (error) 2862167514Skmacy return (-error); 2863167514Skmacy if (copyout(buf, useraddr, chunk)) 2864167514Skmacy return (EFAULT); 2865167514Skmacy useraddr += chunk; 2866182679Skmacy addr += chunk; 2867182679Skmacy len -= chunk; 2868167514Skmacy } 2869167514Skmacy break; 2870167514Skmacy } 2871169978Skmacy case CHELSIO_READ_TCAM_WORD: { 2872169978Skmacy struct ch_tcam_word *t = (struct ch_tcam_word *)data; 2873169978Skmacy 2874169978Skmacy if (!is_offload(sc)) 2875169978Skmacy return (EOPNOTSUPP); 2876171471Skmacy if (!(sc->flags & FULL_INIT_DONE)) 2877171471Skmacy return (EIO); /* need MC5 */ 2878169978Skmacy return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); 2879169978Skmacy break; 2880169978Skmacy } 2881167514Skmacy case CHELSIO_SET_TRACE_FILTER: { 2882167514Skmacy struct ch_trace *t = (struct ch_trace *)data; 2883167514Skmacy const struct trace_params *tp; 2884167514Skmacy 2885167514Skmacy tp = (const struct trace_params *)&t->sip; 2886167514Skmacy if (t->config_tx) 2887167514Skmacy t3_config_trace_filter(sc, tp, 0, t->invert_match, 2888167514Skmacy t->trace_tx); 2889167514Skmacy if (t->config_rx) 2890167514Skmacy t3_config_trace_filter(sc, tp, 1, t->invert_match, 2891167514Skmacy t->trace_rx); 2892167514Skmacy break; 2893167514Skmacy } 2894167514Skmacy case CHELSIO_SET_PKTSCHED: { 2895167514Skmacy struct ch_pktsched_params *p = (struct ch_pktsched_params *)data; 2896167514Skmacy if (sc->open_device_map == 0) 2897167514Skmacy return (EAGAIN); 2898167514Skmacy send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, 2899167514Skmacy p->binding); 2900167514Skmacy break; 2901167514Skmacy } 2902167514Skmacy case CHELSIO_IFCONF_GETREGS: { 2903182679Skmacy struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data; 2904167514Skmacy int reglen = cxgb_get_regs_len(); 2905182679Skmacy uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT); 2906167514Skmacy if (buf == NULL) { 2907167514Skmacy return (ENOMEM); 2908182679Skmacy } 2909182679Skmacy if (regs->len > reglen) 2910167514Skmacy regs->len = reglen; 2911182679Skmacy else if (regs->len < reglen) 2912189643Sgnn error = ENOBUFS; 2913182679Skmacy 2914182679Skmacy if (!error) { 2915182679Skmacy cxgb_get_regs(sc, regs, buf); 2916182679Skmacy error = copyout(buf, regs->data, reglen); 2917167514Skmacy } 2918167514Skmacy free(buf, M_DEVBUF); 2919167514Skmacy 2920167514Skmacy break; 2921167514Skmacy } 2922169978Skmacy case CHELSIO_SET_HW_SCHED: { 2923169978Skmacy struct ch_hw_sched *t = (struct ch_hw_sched *)data; 2924169978Skmacy unsigned int ticks_per_usec = core_ticks_per_usec(sc); 2925169978Skmacy 2926169978Skmacy if ((sc->flags & FULL_INIT_DONE) == 0) 2927169978Skmacy return (EAGAIN); /* need TP to be initialized */ 2928169978Skmacy if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || 2929169978Skmacy !in_range(t->channel, 0, 1) || 2930169978Skmacy !in_range(t->kbps, 0, 10000000) || 2931169978Skmacy !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || 2932169978Skmacy !in_range(t->flow_ipg, 0, 2933169978Skmacy dack_ticks_to_usec(sc, 0x7ff))) 2934169978Skmacy return (EINVAL); 2935169978Skmacy 2936169978Skmacy if (t->kbps >= 0) { 2937169978Skmacy error = t3_config_sched(sc, t->kbps, t->sched); 2938169978Skmacy if (error < 0) 2939169978Skmacy return (-error); 2940169978Skmacy } 2941169978Skmacy if (t->class_ipg >= 0) 2942169978Skmacy t3_set_sched_ipg(sc, t->sched, t->class_ipg); 2943169978Skmacy if (t->flow_ipg >= 0) { 2944169978Skmacy t->flow_ipg *= 1000; /* us -> ns */ 2945169978Skmacy t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); 2946169978Skmacy } 2947169978Skmacy if (t->mode >= 0) { 2948169978Skmacy int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); 2949169978Skmacy 2950169978Skmacy t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2951169978Skmacy bit, t->mode ? bit : 0); 2952169978Skmacy } 2953169978Skmacy if (t->channel >= 0) 2954169978Skmacy t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 2955169978Skmacy 1 << t->sched, t->channel << t->sched); 2956169978Skmacy break; 2957182679Skmacy } 2958182679Skmacy case CHELSIO_GET_EEPROM: { 2959182679Skmacy int i; 2960182679Skmacy struct ch_eeprom *e = (struct ch_eeprom *)data; 2961182679Skmacy uint8_t *buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT); 2962182679Skmacy 2963182679Skmacy if (buf == NULL) { 2964182679Skmacy return (ENOMEM); 2965182679Skmacy } 2966182679Skmacy e->magic = EEPROM_MAGIC; 2967182679Skmacy for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4) 2968182679Skmacy error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]); 2969182679Skmacy 2970182679Skmacy if (!error) 2971182679Skmacy error = copyout(buf + e->offset, e->data, e->len); 2972182679Skmacy 2973182679Skmacy free(buf, M_DEVBUF); 2974182679Skmacy break; 2975182679Skmacy } 2976182679Skmacy case CHELSIO_CLEAR_STATS: { 2977182679Skmacy if (!(sc->flags & FULL_INIT_DONE)) 2978182679Skmacy return EAGAIN; 2979182679Skmacy 2980182679Skmacy PORT_LOCK(pi); 2981182679Skmacy t3_mac_update_stats(&pi->mac); 2982182679Skmacy memset(&pi->mac.stats, 0, sizeof(pi->mac.stats)); 2983182679Skmacy PORT_UNLOCK(pi); 2984182679Skmacy break; 2985182679Skmacy } 2986189643Sgnn case CHELSIO_GET_UP_LA: { 2987189643Sgnn struct ch_up_la *la = (struct ch_up_la *)data; 2988189643Sgnn uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT); 2989189643Sgnn if (buf == NULL) { 2990189643Sgnn return (ENOMEM); 2991189643Sgnn } 2992189643Sgnn if (la->bufsize < LA_BUFSIZE) 2993189643Sgnn error = ENOBUFS; 2994189643Sgnn 2995189643Sgnn if (!error) 2996189643Sgnn error = -t3_get_up_la(sc, &la->stopped, &la->idx, 2997189643Sgnn &la->bufsize, buf); 2998189643Sgnn if (!error) 2999189643Sgnn error = copyout(buf, la->data, la->bufsize); 3000189643Sgnn 3001189643Sgnn free(buf, M_DEVBUF); 3002189643Sgnn break; 3003189643Sgnn } 3004189643Sgnn case CHELSIO_GET_UP_IOQS: { 3005189643Sgnn struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data; 3006189643Sgnn uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT); 3007189643Sgnn uint32_t *v; 3008189643Sgnn 3009189643Sgnn if (buf == NULL) { 3010189643Sgnn return (ENOMEM); 3011189643Sgnn } 3012189643Sgnn if (ioqs->bufsize < IOQS_BUFSIZE) 3013189643Sgnn error = ENOBUFS; 3014189643Sgnn 3015189643Sgnn if (!error) 3016189643Sgnn error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf); 3017189643Sgnn 3018189643Sgnn if (!error) { 3019189643Sgnn v = (uint32_t *)buf; 3020189643Sgnn 3021189643Sgnn ioqs->ioq_rx_enable = *v++; 3022189643Sgnn ioqs->ioq_tx_enable = *v++; 3023189643Sgnn ioqs->ioq_rx_status = *v++; 3024189643Sgnn ioqs->ioq_tx_status = *v++; 3025189643Sgnn 3026189643Sgnn error = copyout(v, ioqs->data, ioqs->bufsize); 3027189643Sgnn } 3028189643Sgnn 3029189643Sgnn free(buf, M_DEVBUF); 3030189643Sgnn break; 3031189643Sgnn } 3032207643Snp case CHELSIO_SET_FILTER: { 3033241844Seadler struct ch_filter *f = (struct ch_filter *)data; 3034207643Snp struct filter_info *p; 3035207643Snp unsigned int nfilters = sc->params.mc5.nfilters; 3036207643Snp 3037207643Snp if (!is_offload(sc)) 3038207643Snp return (EOPNOTSUPP); /* No TCAM */ 3039207643Snp if (!(sc->flags & FULL_INIT_DONE)) 3040207643Snp return (EAGAIN); /* mc5 not setup yet */ 3041207643Snp if (nfilters == 0) 3042207643Snp return (EBUSY); /* TOE will use TCAM */ 3043207643Snp 3044207643Snp /* sanity checks */ 3045207643Snp if (f->filter_id >= nfilters || 3046207643Snp (f->val.dip && f->mask.dip != 0xffffffff) || 3047207643Snp (f->val.sport && f->mask.sport != 0xffff) || 3048207643Snp (f->val.dport && f->mask.dport != 0xffff) || 3049207643Snp (f->val.vlan && f->mask.vlan != 0xfff) || 3050207643Snp (f->val.vlan_prio && 3051207643Snp f->mask.vlan_prio != FILTER_NO_VLAN_PRI) || 3052207643Snp (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || 3053207643Snp f->qset >= SGE_QSETS || 3054207643Snp sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) 3055207643Snp return (EINVAL); 3056207643Snp 3057207643Snp /* Was allocated with M_WAITOK */ 3058207643Snp KASSERT(sc->filters, ("filter table NULL\n")); 3059207643Snp 3060207643Snp p = &sc->filters[f->filter_id]; 3061207643Snp if (p->locked) 3062207643Snp return (EPERM); 3063207643Snp 3064207643Snp bzero(p, sizeof(*p)); 3065207643Snp p->sip = f->val.sip; 3066207643Snp p->sip_mask = f->mask.sip; 3067207643Snp p->dip = f->val.dip; 3068207643Snp p->sport = f->val.sport; 3069207643Snp p->dport = f->val.dport; 3070207643Snp p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; 3071207643Snp p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : 3072207643Snp FILTER_NO_VLAN_PRI; 3073207643Snp p->mac_hit = f->mac_hit; 3074207643Snp p->mac_vld = f->mac_addr_idx != 0xffff; 3075207643Snp p->mac_idx = f->mac_addr_idx; 3076207643Snp p->pkt_type = f->proto; 3077207643Snp p->report_filter_id = f->want_filter_id; 3078207643Snp p->pass = f->pass; 3079207643Snp p->rss = f->rss; 3080207643Snp p->qset = f->qset; 3081207643Snp 3082207643Snp error = set_filter(sc, f->filter_id, p); 3083207643Snp if (error == 0) 3084207643Snp p->valid = 1; 3085207643Snp break; 3086207643Snp } 3087207643Snp case CHELSIO_DEL_FILTER: { 3088207643Snp struct ch_filter *f = (struct ch_filter *)data; 3089207643Snp struct filter_info *p; 3090207643Snp unsigned int nfilters = sc->params.mc5.nfilters; 3091207643Snp 3092207643Snp if (!is_offload(sc)) 3093207643Snp return (EOPNOTSUPP); 3094207643Snp if (!(sc->flags & FULL_INIT_DONE)) 3095207643Snp return (EAGAIN); 3096207643Snp if (nfilters == 0 || sc->filters == NULL) 3097207643Snp return (EINVAL); 3098207643Snp if (f->filter_id >= nfilters) 3099207643Snp return (EINVAL); 3100207643Snp 3101207643Snp p = &sc->filters[f->filter_id]; 3102207643Snp if (p->locked) 3103207643Snp return (EPERM); 3104207643Snp if (!p->valid) 3105207643Snp return (EFAULT); /* Read "Bad address" as "Bad index" */ 3106207643Snp 3107207643Snp bzero(p, sizeof(*p)); 3108207643Snp p->sip = p->sip_mask = 0xffffffff; 3109207643Snp p->vlan = 0xfff; 3110207643Snp p->vlan_prio = FILTER_NO_VLAN_PRI; 3111207643Snp p->pkt_type = 1; 3112207643Snp error = set_filter(sc, f->filter_id, p); 3113207643Snp break; 3114207643Snp } 3115207643Snp case CHELSIO_GET_FILTER: { 3116207643Snp struct ch_filter *f = (struct ch_filter *)data; 3117207643Snp struct filter_info *p; 3118207643Snp unsigned int i, nfilters = sc->params.mc5.nfilters; 3119207643Snp 3120207643Snp if (!is_offload(sc)) 3121207643Snp return (EOPNOTSUPP); 3122207643Snp if (!(sc->flags & FULL_INIT_DONE)) 3123207643Snp return (EAGAIN); 3124207643Snp if (nfilters == 0 || sc->filters == NULL) 3125207643Snp return (EINVAL); 3126207643Snp 3127207643Snp i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1; 3128207643Snp for (; i < nfilters; i++) { 3129207643Snp p = &sc->filters[i]; 3130207643Snp if (!p->valid) 3131207643Snp continue; 3132207643Snp 3133207643Snp bzero(f, sizeof(*f)); 3134207643Snp 3135207643Snp f->filter_id = i; 3136207643Snp f->val.sip = p->sip; 3137207643Snp f->mask.sip = p->sip_mask; 3138207643Snp f->val.dip = p->dip; 3139207643Snp f->mask.dip = p->dip ? 0xffffffff : 0; 3140207643Snp f->val.sport = p->sport; 3141207643Snp f->mask.sport = p->sport ? 0xffff : 0; 3142207643Snp f->val.dport = p->dport; 3143207643Snp f->mask.dport = p->dport ? 0xffff : 0; 3144207643Snp f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan; 3145207643Snp f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff; 3146207643Snp f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 3147207643Snp 0 : p->vlan_prio; 3148207643Snp f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 3149207643Snp 0 : FILTER_NO_VLAN_PRI; 3150207643Snp f->mac_hit = p->mac_hit; 3151207643Snp f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff; 3152207643Snp f->proto = p->pkt_type; 3153207643Snp f->want_filter_id = p->report_filter_id; 3154207643Snp f->pass = p->pass; 3155207643Snp f->rss = p->rss; 3156207643Snp f->qset = p->qset; 3157207643Snp 3158207643Snp break; 3159207643Snp } 3160207643Snp 3161207643Snp if (i == nfilters) 3162207643Snp f->filter_id = 0xffffffff; 3163207643Snp break; 3164207643Snp } 3165167514Skmacy default: 3166167514Skmacy return (EOPNOTSUPP); 3167167514Skmacy break; 3168167514Skmacy } 3169167514Skmacy 3170167514Skmacy return (error); 3171167514Skmacy} 3172167514Skmacy 3173167514Skmacystatic __inline void 3174167514Skmacyreg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, 3175167514Skmacy unsigned int end) 3176167514Skmacy{ 3177182679Skmacy uint32_t *p = (uint32_t *)(buf + start); 3178167514Skmacy 3179167514Skmacy for ( ; start <= end; start += sizeof(uint32_t)) 3180167514Skmacy *p++ = t3_read_reg(ap, start); 3181167514Skmacy} 3182167514Skmacy 3183167514Skmacy#define T3_REGMAP_SIZE (3 * 1024) 3184167514Skmacystatic int 3185167514Skmacycxgb_get_regs_len(void) 3186167514Skmacy{ 3187167514Skmacy return T3_REGMAP_SIZE; 3188167514Skmacy} 3189167514Skmacy 3190167514Skmacystatic void 3191182679Skmacycxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf) 3192167514Skmacy{ 3193167514Skmacy 3194167514Skmacy /* 3195167514Skmacy * Version scheme: 3196167514Skmacy * bits 0..9: chip version 3197167514Skmacy * bits 10..15: chip revision 3198167514Skmacy * bit 31: set for PCIe cards 3199167514Skmacy */ 3200167514Skmacy regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); 3201167514Skmacy 3202167514Skmacy /* 3203167514Skmacy * We skip the MAC statistics registers because they are clear-on-read. 3204167514Skmacy * Also reading multi-register stats would need to synchronize with the 3205167514Skmacy * periodic mac stats accumulation. Hard to justify the complexity. 3206167514Skmacy */ 3207182679Skmacy memset(buf, 0, cxgb_get_regs_len()); 3208167514Skmacy reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN); 3209167514Skmacy reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); 3210167514Skmacy reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); 3211167514Skmacy reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); 3212167514Skmacy reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); 3213167514Skmacy reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0, 3214167514Skmacy XGM_REG(A_XGM_SERDES_STAT3, 1)); 3215167514Skmacy reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), 3216167514Skmacy XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); 3217167514Skmacy} 3218176572Skmacy 3219207643Snpstatic int 3220207643Snpalloc_filters(struct adapter *sc) 3221207643Snp{ 3222207643Snp struct filter_info *p; 3223207643Snp unsigned int nfilters = sc->params.mc5.nfilters; 3224176572Skmacy 3225207643Snp if (nfilters == 0) 3226207643Snp return (0); 3227207643Snp 3228207643Snp p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO); 3229207643Snp sc->filters = p; 3230207643Snp 3231207643Snp p = &sc->filters[nfilters - 1]; 3232207643Snp p->vlan = 0xfff; 3233207643Snp p->vlan_prio = FILTER_NO_VLAN_PRI; 3234207643Snp p->pass = p->rss = p->valid = p->locked = 1; 3235207643Snp 3236207643Snp return (0); 3237207643Snp} 3238207643Snp 3239207643Snpstatic int 3240207643Snpsetup_hw_filters(struct adapter *sc) 3241207643Snp{ 3242207643Snp int i, rc; 3243207643Snp unsigned int nfilters = sc->params.mc5.nfilters; 3244207643Snp 3245207643Snp if (!sc->filters) 3246207643Snp return (0); 3247207643Snp 3248207643Snp t3_enable_filters(sc); 3249207643Snp 3250207643Snp for (i = rc = 0; i < nfilters && !rc; i++) { 3251207643Snp if (sc->filters[i].locked) 3252207643Snp rc = set_filter(sc, i, &sc->filters[i]); 3253207643Snp } 3254207643Snp 3255207643Snp return (rc); 3256207643Snp} 3257207643Snp 3258207643Snpstatic int 3259207643Snpset_filter(struct adapter *sc, int id, const struct filter_info *f) 3260207643Snp{ 3261207643Snp int len; 3262207643Snp struct mbuf *m; 3263207643Snp struct ulp_txpkt *txpkt; 3264207643Snp struct work_request_hdr *wr; 3265207643Snp struct cpl_pass_open_req *oreq; 3266207643Snp struct cpl_set_tcb_field *sreq; 3267207643Snp 3268207643Snp len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq); 3269207643Snp KASSERT(len <= MHLEN, ("filter request too big for an mbuf")); 3270207643Snp 3271207643Snp id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes - 3272207643Snp sc->params.mc5.nfilters; 3273207643Snp 3274207643Snp m = m_gethdr(M_WAITOK, MT_DATA); 3275207643Snp m->m_len = m->m_pkthdr.len = len; 3276207643Snp bzero(mtod(m, char *), len); 3277207643Snp 3278207643Snp wr = mtod(m, struct work_request_hdr *); 3279207643Snp wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); 3280207643Snp 3281207643Snp oreq = (struct cpl_pass_open_req *)(wr + 1); 3282207643Snp txpkt = (struct ulp_txpkt *)oreq; 3283207643Snp txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 3284207643Snp txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); 3285207643Snp OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id)); 3286207643Snp oreq->local_port = htons(f->dport); 3287207643Snp oreq->peer_port = htons(f->sport); 3288207643Snp oreq->local_ip = htonl(f->dip); 3289207643Snp oreq->peer_ip = htonl(f->sip); 3290207643Snp oreq->peer_netmask = htonl(f->sip_mask); 3291207643Snp oreq->opt0h = 0; 3292207643Snp oreq->opt0l = htonl(F_NO_OFFLOAD); 3293207643Snp oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | 3294207643Snp V_CONN_POLICY(CPL_CONN_POLICY_FILTER) | 3295207643Snp V_VLAN_PRI(f->vlan_prio >> 1) | 3296207643Snp V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | 3297207643Snp V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | 3298207643Snp V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); 3299207643Snp 3300207643Snp sreq = (struct cpl_set_tcb_field *)(oreq + 1); 3301207643Snp set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL, 3302207643Snp (f->report_filter_id << 15) | (1 << 23) | 3303207643Snp ((u64)f->pass << 35) | ((u64)!f->rss << 36)); 3304207643Snp set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1); 3305207643Snp t3_mgmt_tx(sc, m); 3306207643Snp 3307207643Snp if (f->pass && !f->rss) { 3308207643Snp len = sizeof(*sreq); 3309207643Snp m = m_gethdr(M_WAITOK, MT_DATA); 3310207643Snp m->m_len = m->m_pkthdr.len = len; 3311207643Snp bzero(mtod(m, char *), len); 3312207643Snp sreq = mtod(m, struct cpl_set_tcb_field *); 3313207643Snp sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 3314207643Snp mk_set_tcb_field(sreq, id, 25, 0x3f80000, 3315207643Snp (u64)sc->rrss_map[f->qset] << 19); 3316207643Snp t3_mgmt_tx(sc, m); 3317207643Snp } 3318207643Snp return 0; 3319207643Snp} 3320207643Snp 3321207643Snpstatic inline void 3322207643Snpmk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid, 3323207643Snp unsigned int word, u64 mask, u64 val) 3324207643Snp{ 3325207643Snp OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 3326207643Snp req->reply = V_NO_REPLY(1); 3327207643Snp req->cpu_idx = 0; 3328207643Snp req->word = htons(word); 3329207643Snp req->mask = htobe64(mask); 3330207643Snp req->val = htobe64(val); 3331207643Snp} 3332207643Snp 3333207643Snpstatic inline void 3334207643Snpset_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid, 3335207643Snp unsigned int word, u64 mask, u64 val) 3336207643Snp{ 3337207643Snp struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; 3338207643Snp 3339207643Snp txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); 3340207643Snp txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); 3341207643Snp mk_set_tcb_field(req, tid, word, mask, val); 3342207643Snp} 3343237263Snp 3344237263Snpvoid 3345237263Snpt3_iterate(void (*func)(struct adapter *, void *), void *arg) 3346237263Snp{ 3347237263Snp struct adapter *sc; 3348237263Snp 3349237263Snp mtx_lock(&t3_list_lock); 3350237263Snp SLIST_FOREACH(sc, &t3_list, link) { 3351237263Snp /* 3352237263Snp * func should not make any assumptions about what state sc is 3353237263Snp * in - the only guarantee is that sc->sc_lock is a valid lock. 3354237263Snp */ 3355237263Snp func(sc, arg); 3356237263Snp } 3357237263Snp mtx_unlock(&t3_list_lock); 3358237263Snp} 3359237263Snp 3360237263Snp#ifdef TCP_OFFLOAD 3361237263Snpstatic int 3362237263Snptoe_capability(struct port_info *pi, int enable) 3363237263Snp{ 3364237263Snp int rc; 3365237263Snp struct adapter *sc = pi->adapter; 3366237263Snp 3367237263Snp ADAPTER_LOCK_ASSERT_OWNED(sc); 3368237263Snp 3369237263Snp if (!is_offload(sc)) 3370237263Snp return (ENODEV); 3371237263Snp 3372237263Snp if (enable) { 3373237263Snp if (!(sc->flags & FULL_INIT_DONE)) { 3374237263Snp log(LOG_WARNING, 3375237263Snp "You must enable a cxgb interface first\n"); 3376237263Snp return (EAGAIN); 3377237263Snp } 3378237263Snp 3379237263Snp if (isset(&sc->offload_map, pi->port_id)) 3380237263Snp return (0); 3381237263Snp 3382237263Snp if (!(sc->flags & TOM_INIT_DONE)) { 3383237263Snp rc = t3_activate_uld(sc, ULD_TOM); 3384237263Snp if (rc == EAGAIN) { 3385237263Snp log(LOG_WARNING, 3386237263Snp "You must kldload t3_tom.ko before trying " 3387237263Snp "to enable TOE on a cxgb interface.\n"); 3388237263Snp } 3389237263Snp if (rc != 0) 3390237263Snp return (rc); 3391237263Snp KASSERT(sc->tom_softc != NULL, 3392237263Snp ("%s: TOM activated but softc NULL", __func__)); 3393237263Snp KASSERT(sc->flags & TOM_INIT_DONE, 3394237263Snp ("%s: TOM activated but flag not set", __func__)); 3395237263Snp } 3396237263Snp 3397237263Snp setbit(&sc->offload_map, pi->port_id); 3398237263Snp 3399237263Snp /* 3400237263Snp * XXX: Temporary code to allow iWARP to be enabled when TOE is 3401237263Snp * enabled on any port. Need to figure out how to enable, 3402237263Snp * disable, load, and unload iWARP cleanly. 3403237263Snp */ 3404237263Snp if (!isset(&sc->offload_map, MAX_NPORTS) && 3405237263Snp t3_activate_uld(sc, ULD_IWARP) == 0) 3406237263Snp setbit(&sc->offload_map, MAX_NPORTS); 3407237263Snp } else { 3408237263Snp if (!isset(&sc->offload_map, pi->port_id)) 3409237263Snp return (0); 3410237263Snp 3411237263Snp KASSERT(sc->flags & TOM_INIT_DONE, 3412237263Snp ("%s: TOM never initialized?", __func__)); 3413237263Snp clrbit(&sc->offload_map, pi->port_id); 3414237263Snp } 3415237263Snp 3416237263Snp return (0); 3417237263Snp} 3418237263Snp 3419237263Snp/* 3420237263Snp * Add an upper layer driver to the global list. 3421237263Snp */ 3422237263Snpint 3423237263Snpt3_register_uld(struct uld_info *ui) 3424237263Snp{ 3425237263Snp int rc = 0; 3426237263Snp struct uld_info *u; 3427237263Snp 3428237263Snp mtx_lock(&t3_uld_list_lock); 3429237263Snp SLIST_FOREACH(u, &t3_uld_list, link) { 3430237263Snp if (u->uld_id == ui->uld_id) { 3431237263Snp rc = EEXIST; 3432237263Snp goto done; 3433237263Snp } 3434237263Snp } 3435237263Snp 3436237263Snp SLIST_INSERT_HEAD(&t3_uld_list, ui, link); 3437237263Snp ui->refcount = 0; 3438237263Snpdone: 3439237263Snp mtx_unlock(&t3_uld_list_lock); 3440237263Snp return (rc); 3441237263Snp} 3442237263Snp 3443237263Snpint 3444237263Snpt3_unregister_uld(struct uld_info *ui) 3445237263Snp{ 3446237263Snp int rc = EINVAL; 3447237263Snp struct uld_info *u; 3448237263Snp 3449237263Snp mtx_lock(&t3_uld_list_lock); 3450237263Snp 3451237263Snp SLIST_FOREACH(u, &t3_uld_list, link) { 3452237263Snp if (u == ui) { 3453237263Snp if (ui->refcount > 0) { 3454237263Snp rc = EBUSY; 3455237263Snp goto done; 3456237263Snp } 3457237263Snp 3458237263Snp SLIST_REMOVE(&t3_uld_list, ui, uld_info, link); 3459237263Snp rc = 0; 3460237263Snp goto done; 3461237263Snp } 3462237263Snp } 3463237263Snpdone: 3464237263Snp mtx_unlock(&t3_uld_list_lock); 3465237263Snp return (rc); 3466237263Snp} 3467237263Snp 3468237263Snpint 3469237263Snpt3_activate_uld(struct adapter *sc, int id) 3470237263Snp{ 3471237263Snp int rc = EAGAIN; 3472237263Snp struct uld_info *ui; 3473237263Snp 3474237263Snp mtx_lock(&t3_uld_list_lock); 3475237263Snp 3476237263Snp SLIST_FOREACH(ui, &t3_uld_list, link) { 3477237263Snp if (ui->uld_id == id) { 3478237263Snp rc = ui->activate(sc); 3479237263Snp if (rc == 0) 3480237263Snp ui->refcount++; 3481237263Snp goto done; 3482237263Snp } 3483237263Snp } 3484237263Snpdone: 3485237263Snp mtx_unlock(&t3_uld_list_lock); 3486237263Snp 3487237263Snp return (rc); 3488237263Snp} 3489237263Snp 3490237263Snpint 3491237263Snpt3_deactivate_uld(struct adapter *sc, int id) 3492237263Snp{ 3493237263Snp int rc = EINVAL; 3494237263Snp struct uld_info *ui; 3495237263Snp 3496237263Snp mtx_lock(&t3_uld_list_lock); 3497237263Snp 3498237263Snp SLIST_FOREACH(ui, &t3_uld_list, link) { 3499237263Snp if (ui->uld_id == id) { 3500237263Snp rc = ui->deactivate(sc); 3501237263Snp if (rc == 0) 3502237263Snp ui->refcount--; 3503237263Snp goto done; 3504237263Snp } 3505237263Snp } 3506237263Snpdone: 3507237263Snp mtx_unlock(&t3_uld_list_lock); 3508237263Snp 3509237263Snp return (rc); 3510237263Snp} 3511237263Snp 3512237263Snpstatic int 3513237263Snpcpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused, 3514237263Snp struct mbuf *m) 3515237263Snp{ 3516237263Snp m_freem(m); 3517237263Snp return (EDOOFUS); 3518237263Snp} 3519237263Snp 3520237263Snpint 3521237263Snpt3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 3522237263Snp{ 3523237263Snp uintptr_t *loc, new; 3524237263Snp 3525237263Snp if (opcode >= NUM_CPL_HANDLERS) 3526237263Snp return (EINVAL); 3527237263Snp 3528237263Snp new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; 3529237263Snp loc = (uintptr_t *) &sc->cpl_handler[opcode]; 3530237263Snp atomic_store_rel_ptr(loc, new); 3531237263Snp 3532237263Snp return (0); 3533237263Snp} 3534237263Snp#endif 3535237263Snp 3536237263Snpstatic int 3537237263Snpcxgbc_mod_event(module_t mod, int cmd, void *arg) 3538237263Snp{ 3539237263Snp int rc = 0; 3540237263Snp 3541237263Snp switch (cmd) { 3542237263Snp case MOD_LOAD: 3543237263Snp mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF); 3544237263Snp SLIST_INIT(&t3_list); 3545237263Snp#ifdef TCP_OFFLOAD 3546237263Snp mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF); 3547237263Snp SLIST_INIT(&t3_uld_list); 3548237263Snp#endif 3549237263Snp break; 3550237263Snp 3551237263Snp case MOD_UNLOAD: 3552237263Snp#ifdef TCP_OFFLOAD 3553237263Snp mtx_lock(&t3_uld_list_lock); 3554237263Snp if (!SLIST_EMPTY(&t3_uld_list)) { 3555237263Snp rc = EBUSY; 3556237263Snp mtx_unlock(&t3_uld_list_lock); 3557237263Snp break; 3558237263Snp } 3559237263Snp mtx_unlock(&t3_uld_list_lock); 3560237263Snp mtx_destroy(&t3_uld_list_lock); 3561237263Snp#endif 3562237263Snp mtx_lock(&t3_list_lock); 3563237263Snp if (!SLIST_EMPTY(&t3_list)) { 3564237263Snp rc = EBUSY; 3565237263Snp mtx_unlock(&t3_list_lock); 3566237263Snp break; 3567237263Snp } 3568237263Snp mtx_unlock(&t3_list_lock); 3569237263Snp mtx_destroy(&t3_list_lock); 3570237263Snp break; 3571237263Snp } 3572237263Snp 3573237263Snp return (rc); 3574237263Snp} 3575