ql_os.c revision 263102
1250661Sdavidcs/* 2250661Sdavidcs * Copyright (c) 2013-2014 Qlogic Corporation 3250661Sdavidcs * All rights reserved. 4250661Sdavidcs * 5250661Sdavidcs * Redistribution and use in source and binary forms, with or without 6250661Sdavidcs * modification, are permitted provided that the following conditions 7250661Sdavidcs * are met: 8250661Sdavidcs * 9250661Sdavidcs * 1. Redistributions of source code must retain the above copyright 10250661Sdavidcs * notice, this list of conditions and the following disclaimer. 11250661Sdavidcs * 2. Redistributions in binary form must reproduce the above copyright 12250661Sdavidcs * notice, this list of conditions and the following disclaimer in the 13250661Sdavidcs * documentation and/or other materials provided with the distribution. 14250661Sdavidcs * 15250661Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16250661Sdavidcs * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17250661Sdavidcs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18250661Sdavidcs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19250661Sdavidcs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20250661Sdavidcs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21250661Sdavidcs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22250661Sdavidcs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23250661Sdavidcs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24250661Sdavidcs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25250661Sdavidcs * POSSIBILITY OF SUCH DAMAGE. 26250661Sdavidcs */ 27250661Sdavidcs 28250661Sdavidcs/* 29250661Sdavidcs * File: ql_os.c 30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31250661Sdavidcs */ 32250661Sdavidcs 33250661Sdavidcs#include <sys/cdefs.h> 34250661Sdavidcs__FBSDID("$FreeBSD: head/sys/dev/qlxgbe/ql_os.c 263102 2014-03-13 03:42:24Z glebius $"); 35250661Sdavidcs 36250661Sdavidcs 37250661Sdavidcs#include "ql_os.h" 38250661Sdavidcs#include "ql_hw.h" 39250661Sdavidcs#include "ql_def.h" 40250661Sdavidcs#include "ql_inline.h" 41250661Sdavidcs#include "ql_ver.h" 42250661Sdavidcs#include "ql_glbl.h" 43250661Sdavidcs#include "ql_dbg.h" 44250661Sdavidcs#include <sys/smp.h> 45250661Sdavidcs 46250661Sdavidcs/* 47250661Sdavidcs * Some PCI Configuration Space Related Defines 48250661Sdavidcs */ 49250661Sdavidcs 50250661Sdavidcs#ifndef PCI_VENDOR_QLOGIC 51250661Sdavidcs#define PCI_VENDOR_QLOGIC 0x1077 52250661Sdavidcs#endif 53250661Sdavidcs 54250661Sdavidcs#ifndef PCI_PRODUCT_QLOGIC_ISP8030 55250661Sdavidcs#define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 56250661Sdavidcs#endif 57250661Sdavidcs 58250661Sdavidcs#define PCI_QLOGIC_ISP8030 \ 59250661Sdavidcs ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) 60250661Sdavidcs 61250661Sdavidcs/* 62250661Sdavidcs * static functions 63250661Sdavidcs */ 64250661Sdavidcsstatic int qla_alloc_parent_dma_tag(qla_host_t *ha); 65250661Sdavidcsstatic void qla_free_parent_dma_tag(qla_host_t *ha); 66250661Sdavidcsstatic int qla_alloc_xmt_bufs(qla_host_t *ha); 67250661Sdavidcsstatic void qla_free_xmt_bufs(qla_host_t *ha); 68250661Sdavidcsstatic int qla_alloc_rcv_bufs(qla_host_t *ha); 69250661Sdavidcsstatic void qla_free_rcv_bufs(qla_host_t *ha); 70250661Sdavidcsstatic void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); 71250661Sdavidcs 72250661Sdavidcsstatic void qla_init_ifnet(device_t dev, qla_host_t *ha); 73250661Sdavidcsstatic int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS); 74250661Sdavidcsstatic int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); 75250661Sdavidcsstatic void qla_release(qla_host_t *ha); 76250661Sdavidcsstatic void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 77250661Sdavidcs int error); 78250661Sdavidcsstatic void qla_stop(qla_host_t *ha); 79250661Sdavidcsstatic int qla_send(qla_host_t *ha, struct mbuf **m_headp); 80250661Sdavidcsstatic void qla_tx_done(void *context, int pending); 81250661Sdavidcsstatic void qla_get_peer(qla_host_t *ha); 82250661Sdavidcsstatic void qla_error_recovery(void *context, int pending); 83250661Sdavidcs 84250661Sdavidcs/* 85250661Sdavidcs * Hooks to the Operating Systems 86250661Sdavidcs */ 87250661Sdavidcsstatic int qla_pci_probe (device_t); 88250661Sdavidcsstatic int qla_pci_attach (device_t); 89250661Sdavidcsstatic int qla_pci_detach (device_t); 90250661Sdavidcs 91250661Sdavidcsstatic void qla_init(void *arg); 92250661Sdavidcsstatic int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 93250661Sdavidcsstatic int qla_media_change(struct ifnet *ifp); 94250661Sdavidcsstatic void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 95250661Sdavidcsstatic void qla_start(struct ifnet *ifp); 96250661Sdavidcs 97250661Sdavidcsstatic device_method_t qla_pci_methods[] = { 98250661Sdavidcs /* Device interface */ 99250661Sdavidcs DEVMETHOD(device_probe, qla_pci_probe), 100250661Sdavidcs DEVMETHOD(device_attach, qla_pci_attach), 101250661Sdavidcs DEVMETHOD(device_detach, qla_pci_detach), 102250661Sdavidcs { 0, 0 } 103250661Sdavidcs}; 104250661Sdavidcs 105250661Sdavidcsstatic driver_t qla_pci_driver = { 106250661Sdavidcs "ql", qla_pci_methods, sizeof (qla_host_t), 107250661Sdavidcs}; 108250661Sdavidcs 109250661Sdavidcsstatic devclass_t qla83xx_devclass; 110250661Sdavidcs 111250661SdavidcsDRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); 112250661Sdavidcs 113250661SdavidcsMODULE_DEPEND(qla83xx, pci, 1, 1, 1); 114250661SdavidcsMODULE_DEPEND(qla83xx, ether, 1, 1, 1); 115250661Sdavidcs 116250661SdavidcsMALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); 117250661Sdavidcs 118250661Sdavidcs#define QL_STD_REPLENISH_THRES 0 119250661Sdavidcs#define QL_JUMBO_REPLENISH_THRES 32 120250661Sdavidcs 121250661Sdavidcs 122250661Sdavidcsstatic char dev_str[64]; 123250661Sdavidcs 124250661Sdavidcs/* 125250661Sdavidcs * Name: qla_pci_probe 126250661Sdavidcs * Function: Validate the PCI device to be a QLA80XX device 127250661Sdavidcs */ 128250661Sdavidcsstatic int 129250661Sdavidcsqla_pci_probe(device_t dev) 130250661Sdavidcs{ 131250661Sdavidcs switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 132250661Sdavidcs case PCI_QLOGIC_ISP8030: 133250661Sdavidcs snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 134250661Sdavidcs "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", 135250661Sdavidcs QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 136250661Sdavidcs QLA_VERSION_BUILD); 137250661Sdavidcs device_set_desc(dev, dev_str); 138250661Sdavidcs break; 139250661Sdavidcs default: 140250661Sdavidcs return (ENXIO); 141250661Sdavidcs } 142250661Sdavidcs 143250661Sdavidcs if (bootverbose) 144250661Sdavidcs printf("%s: %s\n ", __func__, dev_str); 145250661Sdavidcs 146250661Sdavidcs return (BUS_PROBE_DEFAULT); 147250661Sdavidcs} 148250661Sdavidcs 149250661Sdavidcsstatic void 150250661Sdavidcsqla_add_sysctls(qla_host_t *ha) 151250661Sdavidcs{ 152250661Sdavidcs device_t dev = ha->pci_dev; 153250661Sdavidcs 154250661Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 155250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 156250661Sdavidcs OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, 157250661Sdavidcs (void *)ha, 0, 158250661Sdavidcs qla_sysctl_get_stats, "I", "Statistics"); 159250661Sdavidcs 160250661Sdavidcs SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 161250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 162250661Sdavidcs OID_AUTO, "fw_version", CTLFLAG_RD, 163250661Sdavidcs &ha->fw_ver_str, 0, "firmware version"); 164250661Sdavidcs 165250661Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 166250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 167250661Sdavidcs OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, 168250661Sdavidcs (void *)ha, 0, 169250661Sdavidcs qla_sysctl_get_link_status, "I", "Link Status"); 170250661Sdavidcs 171250661Sdavidcs ha->dbg_level = 0; 172250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 173250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 174250661Sdavidcs OID_AUTO, "debug", CTLFLAG_RW, 175250661Sdavidcs &ha->dbg_level, ha->dbg_level, "Debug Level"); 176250661Sdavidcs 177250661Sdavidcs ha->std_replenish = QL_STD_REPLENISH_THRES; 178250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 179250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 180250661Sdavidcs OID_AUTO, "std_replenish", CTLFLAG_RW, 181250661Sdavidcs &ha->std_replenish, ha->std_replenish, 182250661Sdavidcs "Threshold for Replenishing Standard Frames"); 183250661Sdavidcs 184250661Sdavidcs SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 185250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 186250661Sdavidcs OID_AUTO, "ipv4_lro", 187250661Sdavidcs CTLFLAG_RD, &ha->ipv4_lro, 188250661Sdavidcs "number of ipv4 lro completions"); 189250661Sdavidcs 190250661Sdavidcs SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 191250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 192250661Sdavidcs OID_AUTO, "ipv6_lro", 193250661Sdavidcs CTLFLAG_RD, &ha->ipv6_lro, 194250661Sdavidcs "number of ipv6 lro completions"); 195250661Sdavidcs 196250661Sdavidcs SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 197250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 198250661Sdavidcs OID_AUTO, "tx_tso_frames", 199250661Sdavidcs CTLFLAG_RD, &ha->tx_tso_frames, 200250661Sdavidcs "number of Tx TSO Frames"); 201250661Sdavidcs 202250661Sdavidcs SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 203250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 204250661Sdavidcs OID_AUTO, "hw_vlan_tx_frames", 205250661Sdavidcs CTLFLAG_RD, &ha->hw_vlan_tx_frames, 206250661Sdavidcs "number of Tx VLAN Frames"); 207250661Sdavidcs 208250661Sdavidcs return; 209250661Sdavidcs} 210250661Sdavidcs 211250661Sdavidcsstatic void 212250661Sdavidcsqla_watchdog(void *arg) 213250661Sdavidcs{ 214250661Sdavidcs qla_host_t *ha = arg; 215250661Sdavidcs qla_hw_t *hw; 216250661Sdavidcs struct ifnet *ifp; 217250661Sdavidcs uint32_t i; 218250661Sdavidcs qla_hw_tx_cntxt_t *hw_tx_cntxt; 219250661Sdavidcs 220250661Sdavidcs hw = &ha->hw; 221250661Sdavidcs ifp = ha->ifp; 222250661Sdavidcs 223250661Sdavidcs if (ha->flags.qla_watchdog_exit) { 224250661Sdavidcs ha->qla_watchdog_exited = 1; 225250661Sdavidcs return; 226250661Sdavidcs } 227250661Sdavidcs ha->qla_watchdog_exited = 0; 228250661Sdavidcs 229250661Sdavidcs if (!ha->flags.qla_watchdog_pause) { 230250661Sdavidcs if (ql_hw_check_health(ha) || ha->qla_initiate_recovery || 231250661Sdavidcs (ha->msg_from_peer == QL_PEER_MSG_RESET)) { 232250661Sdavidcs ha->qla_watchdog_paused = 1; 233250661Sdavidcs ha->flags.qla_watchdog_pause = 1; 234250661Sdavidcs ha->qla_initiate_recovery = 0; 235250661Sdavidcs ha->err_inject = 0; 236250661Sdavidcs taskqueue_enqueue(ha->err_tq, &ha->err_task); 237250661Sdavidcs } else { 238250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 239250661Sdavidcs hw_tx_cntxt = &hw->tx_cntxt[i]; 240250661Sdavidcs if (qla_le32_to_host(*(hw_tx_cntxt->tx_cons)) != 241250661Sdavidcs hw_tx_cntxt->txr_comp) { 242250661Sdavidcs taskqueue_enqueue(ha->tx_tq, 243250661Sdavidcs &ha->tx_task); 244250661Sdavidcs break; 245250661Sdavidcs } 246250661Sdavidcs } 247250661Sdavidcs 248250661Sdavidcs if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { 249250661Sdavidcs taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 250250661Sdavidcs } 251250661Sdavidcs ha->qla_watchdog_paused = 0; 252250661Sdavidcs } 253250661Sdavidcs 254250661Sdavidcs } else { 255250661Sdavidcs ha->qla_watchdog_paused = 1; 256250661Sdavidcs } 257250661Sdavidcs 258250661Sdavidcs ha->watchdog_ticks = ha->watchdog_ticks++ % 1000; 259250661Sdavidcs callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 260250661Sdavidcs qla_watchdog, ha); 261250661Sdavidcs} 262250661Sdavidcs 263250661Sdavidcs/* 264250661Sdavidcs * Name: qla_pci_attach 265250661Sdavidcs * Function: attaches the device to the operating system 266250661Sdavidcs */ 267250661Sdavidcsstatic int 268250661Sdavidcsqla_pci_attach(device_t dev) 269250661Sdavidcs{ 270250661Sdavidcs qla_host_t *ha = NULL; 271250661Sdavidcs uint32_t rsrc_len; 272250661Sdavidcs int i; 273250661Sdavidcs 274250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 275250661Sdavidcs 276250661Sdavidcs if ((ha = device_get_softc(dev)) == NULL) { 277250661Sdavidcs device_printf(dev, "cannot get softc\n"); 278250661Sdavidcs return (ENOMEM); 279250661Sdavidcs } 280250661Sdavidcs 281250661Sdavidcs memset(ha, 0, sizeof (qla_host_t)); 282250661Sdavidcs 283250661Sdavidcs if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { 284250661Sdavidcs device_printf(dev, "device is not ISP8030\n"); 285250661Sdavidcs return (ENXIO); 286250661Sdavidcs } 287250661Sdavidcs 288250661Sdavidcs ha->pci_func = pci_get_function(dev); 289250661Sdavidcs 290250661Sdavidcs ha->pci_dev = dev; 291250661Sdavidcs 292250661Sdavidcs pci_enable_busmaster(dev); 293250661Sdavidcs 294250661Sdavidcs ha->reg_rid = PCIR_BAR(0); 295250661Sdavidcs ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 296250661Sdavidcs RF_ACTIVE); 297250661Sdavidcs 298250661Sdavidcs if (ha->pci_reg == NULL) { 299250661Sdavidcs device_printf(dev, "unable to map any ports\n"); 300250661Sdavidcs goto qla_pci_attach_err; 301250661Sdavidcs } 302250661Sdavidcs 303250661Sdavidcs rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 304250661Sdavidcs ha->reg_rid); 305250661Sdavidcs 306250661Sdavidcs mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 307250661Sdavidcs 308250661Sdavidcs mtx_init(&ha->tx_lock, "qla83xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 309250661Sdavidcs 310250661Sdavidcs qla_add_sysctls(ha); 311250661Sdavidcs ql_hw_add_sysctls(ha); 312250661Sdavidcs 313250661Sdavidcs ha->flags.lock_init = 1; 314250661Sdavidcs 315250661Sdavidcs ha->reg_rid1 = PCIR_BAR(2); 316250661Sdavidcs ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 317250661Sdavidcs &ha->reg_rid1, RF_ACTIVE); 318250661Sdavidcs 319250661Sdavidcs ha->msix_count = pci_msix_count(dev); 320250661Sdavidcs 321250661Sdavidcs if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { 322250661Sdavidcs device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 323250661Sdavidcs ha->msix_count); 324250661Sdavidcs goto qla_pci_attach_err; 325250661Sdavidcs } 326250661Sdavidcs 327250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 328250661Sdavidcs " msix_count 0x%x pci_reg %p\n", __func__, ha, 329250661Sdavidcs ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg)); 330250661Sdavidcs 331250661Sdavidcs ha->msix_count = ha->hw.num_sds_rings + 1; 332250661Sdavidcs 333250661Sdavidcs if (pci_alloc_msix(dev, &ha->msix_count)) { 334250661Sdavidcs device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 335250661Sdavidcs ha->msix_count); 336250661Sdavidcs ha->msix_count = 0; 337250661Sdavidcs goto qla_pci_attach_err; 338250661Sdavidcs } 339250661Sdavidcs 340250661Sdavidcs ha->mbx_irq_rid = 1; 341250661Sdavidcs ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 342250661Sdavidcs &ha->mbx_irq_rid, 343250661Sdavidcs (RF_ACTIVE | RF_SHAREABLE)); 344250661Sdavidcs if (ha->mbx_irq == NULL) { 345250661Sdavidcs device_printf(dev, "could not allocate mbx interrupt\n"); 346250661Sdavidcs goto qla_pci_attach_err; 347250661Sdavidcs } 348250661Sdavidcs if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), 349250661Sdavidcs NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { 350250661Sdavidcs device_printf(dev, "could not setup mbx interrupt\n"); 351250661Sdavidcs goto qla_pci_attach_err; 352250661Sdavidcs } 353250661Sdavidcs 354250661Sdavidcs 355250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 356250661Sdavidcs ha->irq_vec[i].sds_idx = i; 357250661Sdavidcs ha->irq_vec[i].ha = ha; 358250661Sdavidcs ha->irq_vec[i].irq_rid = 2 + i; 359250661Sdavidcs 360250661Sdavidcs ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 361250661Sdavidcs &ha->irq_vec[i].irq_rid, 362250661Sdavidcs (RF_ACTIVE | RF_SHAREABLE)); 363250661Sdavidcs 364250661Sdavidcs if (ha->irq_vec[i].irq == NULL) { 365250661Sdavidcs device_printf(dev, "could not allocate interrupt\n"); 366250661Sdavidcs goto qla_pci_attach_err; 367250661Sdavidcs } 368250661Sdavidcs if (bus_setup_intr(dev, ha->irq_vec[i].irq, 369250661Sdavidcs (INTR_TYPE_NET | INTR_MPSAFE), 370250661Sdavidcs NULL, ql_isr, &ha->irq_vec[i], 371250661Sdavidcs &ha->irq_vec[i].handle)) { 372250661Sdavidcs device_printf(dev, "could not setup interrupt\n"); 373250661Sdavidcs goto qla_pci_attach_err; 374250661Sdavidcs } 375250661Sdavidcs } 376250661Sdavidcs 377250661Sdavidcs printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, 378250661Sdavidcs ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); 379250661Sdavidcs 380250661Sdavidcs /* initialize hardware */ 381250661Sdavidcs if (ql_init_hw(ha)) { 382250661Sdavidcs device_printf(dev, "%s: ql_init_hw failed\n", __func__); 383250661Sdavidcs goto qla_pci_attach_err; 384250661Sdavidcs } 385250661Sdavidcs 386250661Sdavidcs device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 387250661Sdavidcs ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 388250661Sdavidcs ha->fw_ver_build); 389250661Sdavidcs snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 390250661Sdavidcs ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 391250661Sdavidcs ha->fw_ver_build); 392250661Sdavidcs 393250661Sdavidcs ql_read_mac_addr(ha); 394250661Sdavidcs 395250661Sdavidcs /* allocate parent dma tag */ 396250661Sdavidcs if (qla_alloc_parent_dma_tag(ha)) { 397250661Sdavidcs device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 398250661Sdavidcs __func__); 399250661Sdavidcs goto qla_pci_attach_err; 400250661Sdavidcs } 401250661Sdavidcs 402250661Sdavidcs /* alloc all dma buffers */ 403250661Sdavidcs if (ql_alloc_dma(ha)) { 404250661Sdavidcs device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); 405250661Sdavidcs goto qla_pci_attach_err; 406250661Sdavidcs } 407250661Sdavidcs qla_get_peer(ha); 408250661Sdavidcs 409250661Sdavidcs /* create the o.s ethernet interface */ 410250661Sdavidcs qla_init_ifnet(dev, ha); 411250661Sdavidcs 412250661Sdavidcs ha->flags.qla_watchdog_active = 1; 413250661Sdavidcs ha->flags.qla_watchdog_pause = 1; 414250661Sdavidcs 415250661Sdavidcs 416250661Sdavidcs TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha); 417250661Sdavidcs ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, 418250661Sdavidcs taskqueue_thread_enqueue, &ha->tx_tq); 419250661Sdavidcs taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", 420250661Sdavidcs device_get_nameunit(ha->pci_dev)); 421250661Sdavidcs 422250661Sdavidcs callout_init(&ha->tx_callout, TRUE); 423250661Sdavidcs ha->flags.qla_callout_init = 1; 424250661Sdavidcs 425250661Sdavidcs /* create ioctl device interface */ 426250661Sdavidcs if (ql_make_cdev(ha)) { 427250661Sdavidcs device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 428250661Sdavidcs goto qla_pci_attach_err; 429250661Sdavidcs } 430250661Sdavidcs 431250661Sdavidcs callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 432250661Sdavidcs qla_watchdog, ha); 433250661Sdavidcs 434250661Sdavidcs TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); 435250661Sdavidcs ha->err_tq = taskqueue_create_fast("qla_errq", M_NOWAIT, 436250661Sdavidcs taskqueue_thread_enqueue, &ha->err_tq); 437250661Sdavidcs taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 438250661Sdavidcs device_get_nameunit(ha->pci_dev)); 439250661Sdavidcs 440250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); 441250661Sdavidcs return (0); 442250661Sdavidcs 443250661Sdavidcsqla_pci_attach_err: 444250661Sdavidcs 445250661Sdavidcs qla_release(ha); 446250661Sdavidcs 447250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); 448250661Sdavidcs return (ENXIO); 449250661Sdavidcs} 450250661Sdavidcs 451250661Sdavidcs/* 452250661Sdavidcs * Name: qla_pci_detach 453250661Sdavidcs * Function: Unhooks the device from the operating system 454250661Sdavidcs */ 455250661Sdavidcsstatic int 456250661Sdavidcsqla_pci_detach(device_t dev) 457250661Sdavidcs{ 458250661Sdavidcs qla_host_t *ha = NULL; 459250661Sdavidcs struct ifnet *ifp; 460250661Sdavidcs 461250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 462250661Sdavidcs 463250661Sdavidcs if ((ha = device_get_softc(dev)) == NULL) { 464250661Sdavidcs device_printf(dev, "cannot get softc\n"); 465250661Sdavidcs return (ENOMEM); 466250661Sdavidcs } 467250661Sdavidcs 468250661Sdavidcs ifp = ha->ifp; 469250661Sdavidcs 470250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 471250661Sdavidcs qla_stop(ha); 472250661Sdavidcs QLA_UNLOCK(ha, __func__); 473250661Sdavidcs 474250661Sdavidcs qla_release(ha); 475250661Sdavidcs 476250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 477250661Sdavidcs 478250661Sdavidcs return (0); 479250661Sdavidcs} 480250661Sdavidcs 481250661Sdavidcs/* 482250661Sdavidcs * SYSCTL Related Callbacks 483250661Sdavidcs */ 484250661Sdavidcsstatic int 485250661Sdavidcsqla_sysctl_get_stats(SYSCTL_HANDLER_ARGS) 486250661Sdavidcs{ 487250661Sdavidcs int err, ret = 0; 488250661Sdavidcs qla_host_t *ha; 489250661Sdavidcs 490250661Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 491250661Sdavidcs 492250661Sdavidcs if (err || !req->newptr) 493250661Sdavidcs return (err); 494250661Sdavidcs 495250661Sdavidcs if (ret == 1) { 496250661Sdavidcs ha = (qla_host_t *)arg1; 497250661Sdavidcs ql_get_stats(ha); 498250661Sdavidcs } 499250661Sdavidcs return (err); 500250661Sdavidcs} 501250661Sdavidcsstatic int 502250661Sdavidcsqla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) 503250661Sdavidcs{ 504250661Sdavidcs int err, ret = 0; 505250661Sdavidcs qla_host_t *ha; 506250661Sdavidcs 507250661Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 508250661Sdavidcs 509250661Sdavidcs if (err || !req->newptr) 510250661Sdavidcs return (err); 511250661Sdavidcs 512250661Sdavidcs if (ret == 1) { 513250661Sdavidcs ha = (qla_host_t *)arg1; 514250661Sdavidcs ql_hw_link_status(ha); 515250661Sdavidcs } 516250661Sdavidcs return (err); 517250661Sdavidcs} 518250661Sdavidcs 519250661Sdavidcs/* 520250661Sdavidcs * Name: qla_release 521250661Sdavidcs * Function: Releases the resources allocated for the device 522250661Sdavidcs */ 523250661Sdavidcsstatic void 524250661Sdavidcsqla_release(qla_host_t *ha) 525250661Sdavidcs{ 526250661Sdavidcs device_t dev; 527250661Sdavidcs int i; 528250661Sdavidcs 529250661Sdavidcs dev = ha->pci_dev; 530250661Sdavidcs 531250661Sdavidcs if (ha->err_tq) { 532250661Sdavidcs taskqueue_drain(ha->err_tq, &ha->err_task); 533250661Sdavidcs taskqueue_free(ha->err_tq); 534250661Sdavidcs } 535250661Sdavidcs 536250661Sdavidcs if (ha->tx_tq) { 537250661Sdavidcs taskqueue_drain(ha->tx_tq, &ha->tx_task); 538250661Sdavidcs taskqueue_free(ha->tx_tq); 539250661Sdavidcs } 540250661Sdavidcs 541250661Sdavidcs ql_del_cdev(ha); 542250661Sdavidcs 543250661Sdavidcs if (ha->flags.qla_watchdog_active) { 544250661Sdavidcs ha->flags.qla_watchdog_exit = 1; 545250661Sdavidcs 546250661Sdavidcs while (ha->qla_watchdog_exited == 0) 547250661Sdavidcs qla_mdelay(__func__, 1); 548250661Sdavidcs } 549250661Sdavidcs 550250661Sdavidcs if (ha->flags.qla_callout_init) 551250661Sdavidcs callout_stop(&ha->tx_callout); 552250661Sdavidcs 553250661Sdavidcs if (ha->ifp != NULL) 554250661Sdavidcs ether_ifdetach(ha->ifp); 555250661Sdavidcs 556250661Sdavidcs ql_free_dma(ha); 557250661Sdavidcs qla_free_parent_dma_tag(ha); 558250661Sdavidcs 559250661Sdavidcs if (ha->mbx_handle) 560250661Sdavidcs (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); 561250661Sdavidcs 562250661Sdavidcs if (ha->mbx_irq) 563250661Sdavidcs (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, 564250661Sdavidcs ha->mbx_irq); 565250661Sdavidcs 566250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 567250661Sdavidcs 568250661Sdavidcs if (ha->irq_vec[i].handle) { 569250661Sdavidcs (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 570250661Sdavidcs ha->irq_vec[i].handle); 571250661Sdavidcs } 572250661Sdavidcs 573250661Sdavidcs if (ha->irq_vec[i].irq) { 574250661Sdavidcs (void)bus_release_resource(dev, SYS_RES_IRQ, 575250661Sdavidcs ha->irq_vec[i].irq_rid, 576250661Sdavidcs ha->irq_vec[i].irq); 577250661Sdavidcs } 578250661Sdavidcs } 579250661Sdavidcs 580250661Sdavidcs if (ha->msix_count) 581250661Sdavidcs pci_release_msi(dev); 582250661Sdavidcs 583250661Sdavidcs if (ha->flags.lock_init) { 584250661Sdavidcs mtx_destroy(&ha->tx_lock); 585250661Sdavidcs mtx_destroy(&ha->hw_lock); 586250661Sdavidcs } 587250661Sdavidcs 588250661Sdavidcs if (ha->pci_reg) 589250661Sdavidcs (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 590250661Sdavidcs ha->pci_reg); 591250661Sdavidcs 592250661Sdavidcs if (ha->pci_reg1) 593250661Sdavidcs (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 594250661Sdavidcs ha->pci_reg1); 595250661Sdavidcs} 596250661Sdavidcs 597250661Sdavidcs/* 598250661Sdavidcs * DMA Related Functions 599250661Sdavidcs */ 600250661Sdavidcs 601250661Sdavidcsstatic void 602250661Sdavidcsqla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 603250661Sdavidcs{ 604250661Sdavidcs *((bus_addr_t *)arg) = 0; 605250661Sdavidcs 606250661Sdavidcs if (error) { 607250661Sdavidcs printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 608250661Sdavidcs return; 609250661Sdavidcs } 610250661Sdavidcs 611250661Sdavidcs *((bus_addr_t *)arg) = segs[0].ds_addr; 612250661Sdavidcs 613250661Sdavidcs return; 614250661Sdavidcs} 615250661Sdavidcs 616250661Sdavidcsint 617250661Sdavidcsql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 618250661Sdavidcs{ 619250661Sdavidcs int ret = 0; 620250661Sdavidcs device_t dev; 621250661Sdavidcs bus_addr_t b_addr; 622250661Sdavidcs 623250661Sdavidcs dev = ha->pci_dev; 624250661Sdavidcs 625250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 626250661Sdavidcs 627250661Sdavidcs ret = bus_dma_tag_create( 628250661Sdavidcs ha->parent_tag,/* parent */ 629250661Sdavidcs dma_buf->alignment, 630250661Sdavidcs ((bus_size_t)(1ULL << 32)),/* boundary */ 631250661Sdavidcs BUS_SPACE_MAXADDR, /* lowaddr */ 632250661Sdavidcs BUS_SPACE_MAXADDR, /* highaddr */ 633250661Sdavidcs NULL, NULL, /* filter, filterarg */ 634250661Sdavidcs dma_buf->size, /* maxsize */ 635250661Sdavidcs 1, /* nsegments */ 636250661Sdavidcs dma_buf->size, /* maxsegsize */ 637250661Sdavidcs 0, /* flags */ 638250661Sdavidcs NULL, NULL, /* lockfunc, lockarg */ 639250661Sdavidcs &dma_buf->dma_tag); 640250661Sdavidcs 641250661Sdavidcs if (ret) { 642250661Sdavidcs device_printf(dev, "%s: could not create dma tag\n", __func__); 643250661Sdavidcs goto ql_alloc_dmabuf_exit; 644250661Sdavidcs } 645250661Sdavidcs ret = bus_dmamem_alloc(dma_buf->dma_tag, 646250661Sdavidcs (void **)&dma_buf->dma_b, 647250661Sdavidcs (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 648250661Sdavidcs &dma_buf->dma_map); 649250661Sdavidcs if (ret) { 650250661Sdavidcs bus_dma_tag_destroy(dma_buf->dma_tag); 651250661Sdavidcs device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 652250661Sdavidcs goto ql_alloc_dmabuf_exit; 653250661Sdavidcs } 654250661Sdavidcs 655250661Sdavidcs ret = bus_dmamap_load(dma_buf->dma_tag, 656250661Sdavidcs dma_buf->dma_map, 657250661Sdavidcs dma_buf->dma_b, 658250661Sdavidcs dma_buf->size, 659250661Sdavidcs qla_dmamap_callback, 660250661Sdavidcs &b_addr, BUS_DMA_NOWAIT); 661250661Sdavidcs 662250661Sdavidcs if (ret || !b_addr) { 663250661Sdavidcs bus_dma_tag_destroy(dma_buf->dma_tag); 664250661Sdavidcs bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 665250661Sdavidcs dma_buf->dma_map); 666250661Sdavidcs ret = -1; 667250661Sdavidcs goto ql_alloc_dmabuf_exit; 668250661Sdavidcs } 669250661Sdavidcs 670250661Sdavidcs dma_buf->dma_addr = b_addr; 671250661Sdavidcs 672250661Sdavidcsql_alloc_dmabuf_exit: 673250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 674250661Sdavidcs __func__, ret, (void *)dma_buf->dma_tag, 675250661Sdavidcs (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 676250661Sdavidcs dma_buf->size)); 677250661Sdavidcs 678250661Sdavidcs return ret; 679250661Sdavidcs} 680250661Sdavidcs 681250661Sdavidcsvoid 682250661Sdavidcsql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 683250661Sdavidcs{ 684250661Sdavidcs bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 685250661Sdavidcs bus_dma_tag_destroy(dma_buf->dma_tag); 686250661Sdavidcs} 687250661Sdavidcs 688250661Sdavidcsstatic int 689250661Sdavidcsqla_alloc_parent_dma_tag(qla_host_t *ha) 690250661Sdavidcs{ 691250661Sdavidcs int ret; 692250661Sdavidcs device_t dev; 693250661Sdavidcs 694250661Sdavidcs dev = ha->pci_dev; 695250661Sdavidcs 696250661Sdavidcs /* 697250661Sdavidcs * Allocate parent DMA Tag 698250661Sdavidcs */ 699250661Sdavidcs ret = bus_dma_tag_create( 700250661Sdavidcs bus_get_dma_tag(dev), /* parent */ 701250661Sdavidcs 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 702250661Sdavidcs BUS_SPACE_MAXADDR, /* lowaddr */ 703250661Sdavidcs BUS_SPACE_MAXADDR, /* highaddr */ 704250661Sdavidcs NULL, NULL, /* filter, filterarg */ 705250661Sdavidcs BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 706250661Sdavidcs 0, /* nsegments */ 707250661Sdavidcs BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 708250661Sdavidcs 0, /* flags */ 709250661Sdavidcs NULL, NULL, /* lockfunc, lockarg */ 710250661Sdavidcs &ha->parent_tag); 711250661Sdavidcs 712250661Sdavidcs if (ret) { 713250661Sdavidcs device_printf(dev, "%s: could not create parent dma tag\n", 714250661Sdavidcs __func__); 715250661Sdavidcs return (-1); 716250661Sdavidcs } 717250661Sdavidcs 718250661Sdavidcs ha->flags.parent_tag = 1; 719250661Sdavidcs 720250661Sdavidcs return (0); 721250661Sdavidcs} 722250661Sdavidcs 723250661Sdavidcsstatic void 724250661Sdavidcsqla_free_parent_dma_tag(qla_host_t *ha) 725250661Sdavidcs{ 726250661Sdavidcs if (ha->flags.parent_tag) { 727250661Sdavidcs bus_dma_tag_destroy(ha->parent_tag); 728250661Sdavidcs ha->flags.parent_tag = 0; 729250661Sdavidcs } 730250661Sdavidcs} 731250661Sdavidcs 732250661Sdavidcs/* 733250661Sdavidcs * Name: qla_init_ifnet 734250661Sdavidcs * Function: Creates the Network Device Interface and Registers it with the O.S 735250661Sdavidcs */ 736250661Sdavidcs 737250661Sdavidcsstatic void 738250661Sdavidcsqla_init_ifnet(device_t dev, qla_host_t *ha) 739250661Sdavidcs{ 740250661Sdavidcs struct ifnet *ifp; 741250661Sdavidcs 742250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 743250661Sdavidcs 744250661Sdavidcs ifp = ha->ifp = if_alloc(IFT_ETHER); 745250661Sdavidcs 746250661Sdavidcs if (ifp == NULL) 747250661Sdavidcs panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 748250661Sdavidcs 749250661Sdavidcs if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 750250661Sdavidcs 751263102Sglebius ifp->if_baudrate = IF_Gbps(10); 752250661Sdavidcs ifp->if_capabilities = IFCAP_LINKSTATE; 753250661Sdavidcs 754250661Sdavidcs ifp->if_init = qla_init; 755250661Sdavidcs ifp->if_softc = ha; 756250661Sdavidcs ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 757250661Sdavidcs ifp->if_ioctl = qla_ioctl; 758250661Sdavidcs ifp->if_start = qla_start; 759250661Sdavidcs 760250661Sdavidcs IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 761250661Sdavidcs ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 762250661Sdavidcs IFQ_SET_READY(&ifp->if_snd); 763250661Sdavidcs 764250661Sdavidcs ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 765250661Sdavidcs 766250661Sdavidcs ether_ifattach(ifp, qla_get_mac_addr(ha)); 767250661Sdavidcs 768250661Sdavidcs ifp->if_capabilities = IFCAP_HWCSUM | 769250661Sdavidcs IFCAP_TSO4 | 770250661Sdavidcs IFCAP_JUMBO_MTU; 771250661Sdavidcs 772250661Sdavidcs ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 773250661Sdavidcs ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 774250661Sdavidcs 775250661Sdavidcs ifp->if_capenable = ifp->if_capabilities; 776250661Sdavidcs 777250661Sdavidcs ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 778250661Sdavidcs 779250661Sdavidcs ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 780250661Sdavidcs 781250661Sdavidcs ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 782250661Sdavidcs NULL); 783250661Sdavidcs ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 784250661Sdavidcs 785250661Sdavidcs ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 786250661Sdavidcs 787250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 788250661Sdavidcs 789250661Sdavidcs return; 790250661Sdavidcs} 791250661Sdavidcs 792250661Sdavidcsstatic void 793250661Sdavidcsqla_init_locked(qla_host_t *ha) 794250661Sdavidcs{ 795250661Sdavidcs struct ifnet *ifp = ha->ifp; 796250661Sdavidcs 797250661Sdavidcs qla_stop(ha); 798250661Sdavidcs 799250661Sdavidcs if (qla_alloc_xmt_bufs(ha) != 0) 800250661Sdavidcs return; 801250661Sdavidcs 802250661Sdavidcs if (qla_alloc_rcv_bufs(ha) != 0) 803250661Sdavidcs return; 804250661Sdavidcs 805250661Sdavidcs bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 806250661Sdavidcs 807250661Sdavidcs ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 808250661Sdavidcs 809250661Sdavidcs ha->flags.stop_rcv = 0; 810250661Sdavidcs if (ql_init_hw_if(ha) == 0) { 811250661Sdavidcs ifp = ha->ifp; 812250661Sdavidcs ifp->if_drv_flags |= IFF_DRV_RUNNING; 813250661Sdavidcs ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 814250661Sdavidcs ha->flags.qla_watchdog_pause = 0; 815250661Sdavidcs ha->hw_vlan_tx_frames = 0; 816250661Sdavidcs ha->tx_tso_frames = 0; 817250661Sdavidcs } 818250661Sdavidcs 819250661Sdavidcs return; 820250661Sdavidcs} 821250661Sdavidcs 822250661Sdavidcsstatic void 823250661Sdavidcsqla_init(void *arg) 824250661Sdavidcs{ 825250661Sdavidcs qla_host_t *ha; 826250661Sdavidcs 827250661Sdavidcs ha = (qla_host_t *)arg; 828250661Sdavidcs 829250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 830250661Sdavidcs 831250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 832250661Sdavidcs qla_init_locked(ha); 833250661Sdavidcs QLA_UNLOCK(ha, __func__); 834250661Sdavidcs 835250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 836250661Sdavidcs} 837250661Sdavidcs 838250661Sdavidcsstatic int 839250661Sdavidcsqla_set_multi(qla_host_t *ha, uint32_t add_multi) 840250661Sdavidcs{ 841250661Sdavidcs uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 842250661Sdavidcs struct ifmultiaddr *ifma; 843250661Sdavidcs int mcnt = 0; 844250661Sdavidcs struct ifnet *ifp = ha->ifp; 845250661Sdavidcs int ret = 0; 846250661Sdavidcs 847250661Sdavidcs if_maddr_rlock(ifp); 848250661Sdavidcs 849250661Sdavidcs TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 850250661Sdavidcs 851250661Sdavidcs if (ifma->ifma_addr->sa_family != AF_LINK) 852250661Sdavidcs continue; 853250661Sdavidcs 854250661Sdavidcs if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 855250661Sdavidcs break; 856250661Sdavidcs 857250661Sdavidcs bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 858250661Sdavidcs &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 859250661Sdavidcs 860250661Sdavidcs mcnt++; 861250661Sdavidcs } 862250661Sdavidcs 863250661Sdavidcs if_maddr_runlock(ifp); 864250661Sdavidcs 865250661Sdavidcs if (QLA_LOCK(ha, __func__, 1) == 0) { 866250661Sdavidcs ret = ql_hw_set_multi(ha, mta, mcnt, add_multi); 867250661Sdavidcs QLA_UNLOCK(ha, __func__); 868250661Sdavidcs } 869250661Sdavidcs 870250661Sdavidcs return (ret); 871250661Sdavidcs} 872250661Sdavidcs 873250661Sdavidcsstatic int 874250661Sdavidcsqla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 875250661Sdavidcs{ 876250661Sdavidcs int ret = 0; 877250661Sdavidcs struct ifreq *ifr = (struct ifreq *)data; 878250661Sdavidcs struct ifaddr *ifa = (struct ifaddr *)data; 879250661Sdavidcs qla_host_t *ha; 880250661Sdavidcs 881250661Sdavidcs ha = (qla_host_t *)ifp->if_softc; 882250661Sdavidcs 883250661Sdavidcs switch (cmd) { 884250661Sdavidcs case SIOCSIFADDR: 885250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 886250661Sdavidcs __func__, cmd)); 887250661Sdavidcs 888250661Sdavidcs if (ifa->ifa_addr->sa_family == AF_INET) { 889250661Sdavidcs ifp->if_flags |= IFF_UP; 890250661Sdavidcs if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 891250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 892250661Sdavidcs qla_init_locked(ha); 893250661Sdavidcs QLA_UNLOCK(ha, __func__); 894250661Sdavidcs } 895250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, 896250661Sdavidcs "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 897250661Sdavidcs __func__, cmd, 898250661Sdavidcs ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 899250661Sdavidcs 900250661Sdavidcs arp_ifinit(ifp, ifa); 901250661Sdavidcs } else { 902250661Sdavidcs ether_ioctl(ifp, cmd, data); 903250661Sdavidcs } 904250661Sdavidcs break; 905250661Sdavidcs 906250661Sdavidcs case SIOCSIFMTU: 907250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 908250661Sdavidcs __func__, cmd)); 909250661Sdavidcs 910250661Sdavidcs if (ifr->ifr_mtu > QLA_MAX_MTU) { 911250661Sdavidcs ret = EINVAL; 912250661Sdavidcs } else { 913250661Sdavidcs (void) QLA_LOCK(ha, __func__, 0); 914250661Sdavidcs ifp->if_mtu = ifr->ifr_mtu; 915250661Sdavidcs ha->max_frame_size = 916250661Sdavidcs ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 917250661Sdavidcs if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 918250661Sdavidcs ret = ql_set_max_mtu(ha, ha->max_frame_size, 919250661Sdavidcs ha->hw.rcv_cntxt_id); 920250661Sdavidcs } 921250661Sdavidcs 922250661Sdavidcs if (ifp->if_mtu > ETHERMTU) 923250661Sdavidcs ha->std_replenish = QL_JUMBO_REPLENISH_THRES; 924250661Sdavidcs else 925250661Sdavidcs ha->std_replenish = QL_STD_REPLENISH_THRES; 926250661Sdavidcs 927250661Sdavidcs 928250661Sdavidcs QLA_UNLOCK(ha, __func__); 929250661Sdavidcs 930250661Sdavidcs if (ret) 931250661Sdavidcs ret = EINVAL; 932250661Sdavidcs } 933250661Sdavidcs 934250661Sdavidcs break; 935250661Sdavidcs 936250661Sdavidcs case SIOCSIFFLAGS: 937250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 938250661Sdavidcs __func__, cmd)); 939250661Sdavidcs 940250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 941250661Sdavidcs 942250661Sdavidcs if (ifp->if_flags & IFF_UP) { 943250661Sdavidcs if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 944250661Sdavidcs if ((ifp->if_flags ^ ha->if_flags) & 945250661Sdavidcs IFF_PROMISC) { 946250661Sdavidcs ret = ql_set_promisc(ha); 947250661Sdavidcs } else if ((ifp->if_flags ^ ha->if_flags) & 948250661Sdavidcs IFF_ALLMULTI) { 949250661Sdavidcs ret = ql_set_allmulti(ha); 950250661Sdavidcs } 951250661Sdavidcs } else { 952250661Sdavidcs qla_init_locked(ha); 953250661Sdavidcs ha->max_frame_size = ifp->if_mtu + 954250661Sdavidcs ETHER_HDR_LEN + ETHER_CRC_LEN; 955250661Sdavidcs ret = ql_set_max_mtu(ha, ha->max_frame_size, 956250661Sdavidcs ha->hw.rcv_cntxt_id); 957250661Sdavidcs } 958250661Sdavidcs } else { 959250661Sdavidcs if (ifp->if_drv_flags & IFF_DRV_RUNNING) 960250661Sdavidcs qla_stop(ha); 961250661Sdavidcs ha->if_flags = ifp->if_flags; 962250661Sdavidcs } 963250661Sdavidcs 964250661Sdavidcs QLA_UNLOCK(ha, __func__); 965250661Sdavidcs break; 966250661Sdavidcs 967250661Sdavidcs case SIOCADDMULTI: 968250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, 969250661Sdavidcs "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 970250661Sdavidcs 971250661Sdavidcs if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 972250661Sdavidcs if (qla_set_multi(ha, 1)) 973250661Sdavidcs ret = EINVAL; 974250661Sdavidcs } 975250661Sdavidcs break; 976250661Sdavidcs 977250661Sdavidcs case SIOCDELMULTI: 978250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, 979250661Sdavidcs "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 980250661Sdavidcs 981250661Sdavidcs if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 982250661Sdavidcs if (qla_set_multi(ha, 0)) 983250661Sdavidcs ret = EINVAL; 984250661Sdavidcs } 985250661Sdavidcs break; 986250661Sdavidcs 987250661Sdavidcs case SIOCSIFMEDIA: 988250661Sdavidcs case SIOCGIFMEDIA: 989250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, 990250661Sdavidcs "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 991250661Sdavidcs __func__, cmd)); 992250661Sdavidcs ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 993250661Sdavidcs break; 994250661Sdavidcs 995250661Sdavidcs case SIOCSIFCAP: 996250661Sdavidcs { 997250661Sdavidcs int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 998250661Sdavidcs 999250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 1000250661Sdavidcs __func__, cmd)); 1001250661Sdavidcs 1002250661Sdavidcs if (mask & IFCAP_HWCSUM) 1003250661Sdavidcs ifp->if_capenable ^= IFCAP_HWCSUM; 1004250661Sdavidcs if (mask & IFCAP_TSO4) 1005250661Sdavidcs ifp->if_capenable ^= IFCAP_TSO4; 1006250661Sdavidcs if (mask & IFCAP_VLAN_HWTAGGING) 1007250661Sdavidcs ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1008250661Sdavidcs if (mask & IFCAP_VLAN_HWTSO) 1009250661Sdavidcs ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1010250661Sdavidcs 1011250661Sdavidcs if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 1012250661Sdavidcs qla_init(ha); 1013250661Sdavidcs 1014250661Sdavidcs VLAN_CAPABILITIES(ifp); 1015250661Sdavidcs break; 1016250661Sdavidcs } 1017250661Sdavidcs 1018250661Sdavidcs default: 1019250661Sdavidcs QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 1020250661Sdavidcs __func__, cmd)); 1021250661Sdavidcs ret = ether_ioctl(ifp, cmd, data); 1022250661Sdavidcs break; 1023250661Sdavidcs } 1024250661Sdavidcs 1025250661Sdavidcs return (ret); 1026250661Sdavidcs} 1027250661Sdavidcs 1028250661Sdavidcsstatic int 1029250661Sdavidcsqla_media_change(struct ifnet *ifp) 1030250661Sdavidcs{ 1031250661Sdavidcs qla_host_t *ha; 1032250661Sdavidcs struct ifmedia *ifm; 1033250661Sdavidcs int ret = 0; 1034250661Sdavidcs 1035250661Sdavidcs ha = (qla_host_t *)ifp->if_softc; 1036250661Sdavidcs 1037250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1038250661Sdavidcs 1039250661Sdavidcs ifm = &ha->media; 1040250661Sdavidcs 1041250661Sdavidcs if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1042250661Sdavidcs ret = EINVAL; 1043250661Sdavidcs 1044250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1045250661Sdavidcs 1046250661Sdavidcs return (ret); 1047250661Sdavidcs} 1048250661Sdavidcs 1049250661Sdavidcsstatic void 1050250661Sdavidcsqla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1051250661Sdavidcs{ 1052250661Sdavidcs qla_host_t *ha; 1053250661Sdavidcs 1054250661Sdavidcs ha = (qla_host_t *)ifp->if_softc; 1055250661Sdavidcs 1056250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1057250661Sdavidcs 1058250661Sdavidcs ifmr->ifm_status = IFM_AVALID; 1059250661Sdavidcs ifmr->ifm_active = IFM_ETHER; 1060250661Sdavidcs 1061250661Sdavidcs ql_update_link_state(ha); 1062250661Sdavidcs if (ha->hw.link_up) { 1063250661Sdavidcs ifmr->ifm_status |= IFM_ACTIVE; 1064250661Sdavidcs ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 1065250661Sdavidcs } 1066250661Sdavidcs 1067250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1068250661Sdavidcs (ha->hw.link_up ? "link_up" : "link_down"))); 1069250661Sdavidcs 1070250661Sdavidcs return; 1071250661Sdavidcs} 1072250661Sdavidcs 1073250661Sdavidcsstatic void 1074250661Sdavidcsqla_start(struct ifnet *ifp) 1075250661Sdavidcs{ 1076250661Sdavidcs struct mbuf *m_head; 1077250661Sdavidcs qla_host_t *ha = (qla_host_t *)ifp->if_softc; 1078250661Sdavidcs 1079250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1080250661Sdavidcs 1081250661Sdavidcs if (!mtx_trylock(&ha->tx_lock)) { 1082250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, 1083250661Sdavidcs "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); 1084250661Sdavidcs return; 1085250661Sdavidcs } 1086250661Sdavidcs 1087250661Sdavidcs if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1088250661Sdavidcs IFF_DRV_RUNNING) { 1089250661Sdavidcs QL_DPRINT8(ha, 1090250661Sdavidcs (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 1091250661Sdavidcs QLA_TX_UNLOCK(ha); 1092250661Sdavidcs return; 1093250661Sdavidcs } 1094250661Sdavidcs 1095250661Sdavidcs if (!ha->watchdog_ticks) 1096250661Sdavidcs ql_update_link_state(ha); 1097250661Sdavidcs 1098250661Sdavidcs if (!ha->hw.link_up) { 1099250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: link down\n", __func__)); 1100250661Sdavidcs QLA_TX_UNLOCK(ha); 1101250661Sdavidcs return; 1102250661Sdavidcs } 1103250661Sdavidcs 1104250661Sdavidcs while (ifp->if_snd.ifq_head != NULL) { 1105250661Sdavidcs IF_DEQUEUE(&ifp->if_snd, m_head); 1106250661Sdavidcs 1107250661Sdavidcs if (m_head == NULL) { 1108250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: m_head == NULL\n", 1109250661Sdavidcs __func__)); 1110250661Sdavidcs break; 1111250661Sdavidcs } 1112250661Sdavidcs 1113250661Sdavidcs if (qla_send(ha, &m_head)) { 1114250661Sdavidcs if (m_head == NULL) 1115250661Sdavidcs break; 1116250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: PREPEND\n", __func__)); 1117250661Sdavidcs ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1118250661Sdavidcs IF_PREPEND(&ifp->if_snd, m_head); 1119250661Sdavidcs break; 1120250661Sdavidcs } 1121250661Sdavidcs /* Send a copy of the frame to the BPF listener */ 1122250661Sdavidcs ETHER_BPF_MTAP(ifp, m_head); 1123250661Sdavidcs } 1124250661Sdavidcs QLA_TX_UNLOCK(ha); 1125250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1126250661Sdavidcs return; 1127250661Sdavidcs} 1128250661Sdavidcs 1129250661Sdavidcsstatic int 1130250661Sdavidcsqla_send(qla_host_t *ha, struct mbuf **m_headp) 1131250661Sdavidcs{ 1132250661Sdavidcs bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1133250661Sdavidcs bus_dmamap_t map; 1134250661Sdavidcs int nsegs; 1135250661Sdavidcs int ret = -1; 1136250661Sdavidcs uint32_t tx_idx; 1137250661Sdavidcs struct mbuf *m_head = *m_headp; 1138250661Sdavidcs uint32_t txr_idx = ha->txr_idx; 1139250661Sdavidcs 1140250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1141250661Sdavidcs 1142250661Sdavidcs if (m_head->m_flags & M_FLOWID) 1143250661Sdavidcs txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1); 1144250661Sdavidcs 1145250661Sdavidcs tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; 1146250661Sdavidcs map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1147250661Sdavidcs 1148250661Sdavidcs ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1149250661Sdavidcs BUS_DMA_NOWAIT); 1150250661Sdavidcs 1151250661Sdavidcs if (ret == EFBIG) { 1152250661Sdavidcs 1153250661Sdavidcs struct mbuf *m; 1154250661Sdavidcs 1155250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1156250661Sdavidcs m_head->m_pkthdr.len)); 1157250661Sdavidcs 1158250661Sdavidcs m = m_defrag(m_head, M_NOWAIT); 1159250661Sdavidcs if (m == NULL) { 1160250661Sdavidcs ha->err_tx_defrag++; 1161250661Sdavidcs m_freem(m_head); 1162250661Sdavidcs *m_headp = NULL; 1163250661Sdavidcs device_printf(ha->pci_dev, 1164250661Sdavidcs "%s: m_defrag() = NULL [%d]\n", 1165250661Sdavidcs __func__, ret); 1166250661Sdavidcs return (ENOBUFS); 1167250661Sdavidcs } 1168250661Sdavidcs m_head = m; 1169250661Sdavidcs *m_headp = m_head; 1170250661Sdavidcs 1171250661Sdavidcs if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1172250661Sdavidcs segs, &nsegs, BUS_DMA_NOWAIT))) { 1173250661Sdavidcs 1174250661Sdavidcs ha->err_tx_dmamap_load++; 1175250661Sdavidcs 1176250661Sdavidcs device_printf(ha->pci_dev, 1177250661Sdavidcs "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1178250661Sdavidcs __func__, ret, m_head->m_pkthdr.len); 1179250661Sdavidcs 1180250661Sdavidcs if (ret != ENOMEM) { 1181250661Sdavidcs m_freem(m_head); 1182250661Sdavidcs *m_headp = NULL; 1183250661Sdavidcs } 1184250661Sdavidcs return (ret); 1185250661Sdavidcs } 1186250661Sdavidcs 1187250661Sdavidcs } else if (ret) { 1188250661Sdavidcs 1189250661Sdavidcs ha->err_tx_dmamap_load++; 1190250661Sdavidcs 1191250661Sdavidcs device_printf(ha->pci_dev, 1192250661Sdavidcs "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1193250661Sdavidcs __func__, ret, m_head->m_pkthdr.len); 1194250661Sdavidcs 1195250661Sdavidcs if (ret != ENOMEM) { 1196250661Sdavidcs m_freem(m_head); 1197250661Sdavidcs *m_headp = NULL; 1198250661Sdavidcs } 1199250661Sdavidcs return (ret); 1200250661Sdavidcs } 1201250661Sdavidcs 1202250661Sdavidcs QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); 1203250661Sdavidcs 1204250661Sdavidcs bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1205250661Sdavidcs 1206250661Sdavidcs if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx))) { 1207250661Sdavidcs 1208250661Sdavidcs ha->tx_ring[txr_idx].count++; 1209250661Sdavidcs ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1210250661Sdavidcs } else { 1211250661Sdavidcs if (ret == EINVAL) { 1212250661Sdavidcs if (m_head) 1213250661Sdavidcs m_freem(m_head); 1214250661Sdavidcs *m_headp = NULL; 1215250661Sdavidcs } 1216250661Sdavidcs } 1217250661Sdavidcs 1218250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1219250661Sdavidcs return (ret); 1220250661Sdavidcs} 1221250661Sdavidcs 1222250661Sdavidcsstatic void 1223250661Sdavidcsqla_stop(qla_host_t *ha) 1224250661Sdavidcs{ 1225250661Sdavidcs struct ifnet *ifp = ha->ifp; 1226250661Sdavidcs device_t dev; 1227250661Sdavidcs 1228250661Sdavidcs dev = ha->pci_dev; 1229250661Sdavidcs 1230250661Sdavidcs ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1231250661Sdavidcs 1232250661Sdavidcs ha->flags.qla_watchdog_pause = 1; 1233250661Sdavidcs 1234250661Sdavidcs while (!ha->qla_watchdog_paused) 1235250661Sdavidcs qla_mdelay(__func__, 1); 1236250661Sdavidcs 1237250661Sdavidcs ha->flags.stop_rcv = 1; 1238250661Sdavidcs ql_hw_stop_rcv(ha); 1239250661Sdavidcs 1240250661Sdavidcs ql_del_hw_if(ha); 1241250661Sdavidcs 1242250661Sdavidcs qla_free_xmt_bufs(ha); 1243250661Sdavidcs qla_free_rcv_bufs(ha); 1244250661Sdavidcs 1245250661Sdavidcs return; 1246250661Sdavidcs} 1247250661Sdavidcs 1248250661Sdavidcs/* 1249250661Sdavidcs * Buffer Management Functions for Transmit and Receive Rings 1250250661Sdavidcs */ 1251250661Sdavidcsstatic int 1252250661Sdavidcsqla_alloc_xmt_bufs(qla_host_t *ha) 1253250661Sdavidcs{ 1254250661Sdavidcs int ret = 0; 1255250661Sdavidcs uint32_t i, j; 1256250661Sdavidcs qla_tx_buf_t *txb; 1257250661Sdavidcs 1258250661Sdavidcs if (bus_dma_tag_create(NULL, /* parent */ 1259250661Sdavidcs 1, 0, /* alignment, bounds */ 1260250661Sdavidcs BUS_SPACE_MAXADDR, /* lowaddr */ 1261250661Sdavidcs BUS_SPACE_MAXADDR, /* highaddr */ 1262250661Sdavidcs NULL, NULL, /* filter, filterarg */ 1263250661Sdavidcs QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1264250661Sdavidcs QLA_MAX_SEGMENTS, /* nsegments */ 1265250661Sdavidcs PAGE_SIZE, /* maxsegsize */ 1266250661Sdavidcs BUS_DMA_ALLOCNOW, /* flags */ 1267250661Sdavidcs NULL, /* lockfunc */ 1268250661Sdavidcs NULL, /* lockfuncarg */ 1269250661Sdavidcs &ha->tx_tag)) { 1270250661Sdavidcs device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1271250661Sdavidcs __func__); 1272250661Sdavidcs return (ENOMEM); 1273250661Sdavidcs } 1274250661Sdavidcs 1275250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 1276250661Sdavidcs bzero((void *)ha->tx_ring[i].tx_buf, 1277250661Sdavidcs (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1278250661Sdavidcs } 1279250661Sdavidcs 1280250661Sdavidcs for (j = 0; j < ha->hw.num_tx_rings; j++) { 1281250661Sdavidcs for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { 1282250661Sdavidcs 1283250661Sdavidcs txb = &ha->tx_ring[j].tx_buf[i]; 1284250661Sdavidcs 1285250661Sdavidcs if ((ret = bus_dmamap_create(ha->tx_tag, 1286250661Sdavidcs BUS_DMA_NOWAIT, &txb->map))) { 1287250661Sdavidcs 1288250661Sdavidcs ha->err_tx_dmamap_create++; 1289250661Sdavidcs device_printf(ha->pci_dev, 1290250661Sdavidcs "%s: bus_dmamap_create failed[%d]\n", 1291250661Sdavidcs __func__, ret); 1292250661Sdavidcs 1293250661Sdavidcs qla_free_xmt_bufs(ha); 1294250661Sdavidcs 1295250661Sdavidcs return (ret); 1296250661Sdavidcs } 1297250661Sdavidcs } 1298250661Sdavidcs } 1299250661Sdavidcs 1300250661Sdavidcs return 0; 1301250661Sdavidcs} 1302250661Sdavidcs 1303250661Sdavidcs/* 1304250661Sdavidcs * Release mbuf after it sent on the wire 1305250661Sdavidcs */ 1306250661Sdavidcsstatic void 1307250661Sdavidcsqla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1308250661Sdavidcs{ 1309250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1310250661Sdavidcs 1311250661Sdavidcs if (txb->m_head && txb->map) { 1312250661Sdavidcs 1313250661Sdavidcs bus_dmamap_unload(ha->tx_tag, txb->map); 1314250661Sdavidcs 1315250661Sdavidcs m_freem(txb->m_head); 1316250661Sdavidcs txb->m_head = NULL; 1317250661Sdavidcs } 1318250661Sdavidcs 1319250661Sdavidcs if (txb->map) 1320250661Sdavidcs bus_dmamap_destroy(ha->tx_tag, txb->map); 1321250661Sdavidcs 1322250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1323250661Sdavidcs} 1324250661Sdavidcs 1325250661Sdavidcsstatic void 1326250661Sdavidcsqla_free_xmt_bufs(qla_host_t *ha) 1327250661Sdavidcs{ 1328250661Sdavidcs int i, j; 1329250661Sdavidcs 1330250661Sdavidcs for (j = 0; j < ha->hw.num_tx_rings; j++) { 1331250661Sdavidcs for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1332250661Sdavidcs qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1333250661Sdavidcs } 1334250661Sdavidcs 1335250661Sdavidcs if (ha->tx_tag != NULL) { 1336250661Sdavidcs bus_dma_tag_destroy(ha->tx_tag); 1337250661Sdavidcs ha->tx_tag = NULL; 1338250661Sdavidcs } 1339250661Sdavidcs 1340250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 1341250661Sdavidcs bzero((void *)ha->tx_ring[i].tx_buf, 1342250661Sdavidcs (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1343250661Sdavidcs } 1344250661Sdavidcs return; 1345250661Sdavidcs} 1346250661Sdavidcs 1347250661Sdavidcs 1348250661Sdavidcsstatic int 1349250661Sdavidcsqla_alloc_rcv_std(qla_host_t *ha) 1350250661Sdavidcs{ 1351250661Sdavidcs int i, j, k, r, ret = 0; 1352250661Sdavidcs qla_rx_buf_t *rxb; 1353250661Sdavidcs qla_rx_ring_t *rx_ring; 1354250661Sdavidcs 1355250661Sdavidcs for (r = 0; r < ha->hw.num_rds_rings; r++) { 1356250661Sdavidcs 1357250661Sdavidcs rx_ring = &ha->rx_ring[r]; 1358250661Sdavidcs 1359250661Sdavidcs for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1360250661Sdavidcs 1361250661Sdavidcs rxb = &rx_ring->rx_buf[i]; 1362250661Sdavidcs 1363250661Sdavidcs ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, 1364250661Sdavidcs &rxb->map); 1365250661Sdavidcs 1366250661Sdavidcs if (ret) { 1367250661Sdavidcs device_printf(ha->pci_dev, 1368250661Sdavidcs "%s: dmamap[%d, %d] failed\n", 1369250661Sdavidcs __func__, r, i); 1370250661Sdavidcs 1371250661Sdavidcs for (k = 0; k < r; k++) { 1372250661Sdavidcs for (j = 0; j < NUM_RX_DESCRIPTORS; 1373250661Sdavidcs j++) { 1374250661Sdavidcs rxb = &ha->rx_ring[k].rx_buf[j]; 1375250661Sdavidcs bus_dmamap_destroy(ha->rx_tag, 1376250661Sdavidcs rxb->map); 1377250661Sdavidcs } 1378250661Sdavidcs } 1379250661Sdavidcs 1380250661Sdavidcs for (j = 0; j < i; j++) { 1381250661Sdavidcs bus_dmamap_destroy(ha->rx_tag, 1382250661Sdavidcs rx_ring->rx_buf[j].map); 1383250661Sdavidcs } 1384250661Sdavidcs goto qla_alloc_rcv_std_err; 1385250661Sdavidcs } 1386250661Sdavidcs } 1387250661Sdavidcs } 1388250661Sdavidcs 1389250661Sdavidcs qla_init_hw_rcv_descriptors(ha); 1390250661Sdavidcs 1391250661Sdavidcs 1392250661Sdavidcs for (r = 0; r < ha->hw.num_rds_rings; r++) { 1393250661Sdavidcs 1394250661Sdavidcs rx_ring = &ha->rx_ring[r]; 1395250661Sdavidcs 1396250661Sdavidcs for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1397250661Sdavidcs rxb = &rx_ring->rx_buf[i]; 1398250661Sdavidcs rxb->handle = i; 1399250661Sdavidcs if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { 1400250661Sdavidcs /* 1401250661Sdavidcs * set the physical address in the 1402250661Sdavidcs * corresponding descriptor entry in the 1403250661Sdavidcs * receive ring/queue for the hba 1404250661Sdavidcs */ 1405250661Sdavidcs qla_set_hw_rcv_desc(ha, r, i, rxb->handle, 1406250661Sdavidcs rxb->paddr, 1407250661Sdavidcs (rxb->m_head)->m_pkthdr.len); 1408250661Sdavidcs } else { 1409250661Sdavidcs device_printf(ha->pci_dev, 1410250661Sdavidcs "%s: ql_get_mbuf [%d, %d] failed\n", 1411250661Sdavidcs __func__, r, i); 1412250661Sdavidcs bus_dmamap_destroy(ha->rx_tag, rxb->map); 1413250661Sdavidcs goto qla_alloc_rcv_std_err; 1414250661Sdavidcs } 1415250661Sdavidcs } 1416250661Sdavidcs } 1417250661Sdavidcs return 0; 1418250661Sdavidcs 1419250661Sdavidcsqla_alloc_rcv_std_err: 1420250661Sdavidcs return (-1); 1421250661Sdavidcs} 1422250661Sdavidcs 1423250661Sdavidcsstatic void 1424250661Sdavidcsqla_free_rcv_std(qla_host_t *ha) 1425250661Sdavidcs{ 1426250661Sdavidcs int i, r; 1427250661Sdavidcs qla_rx_buf_t *rxb; 1428250661Sdavidcs 1429250661Sdavidcs for (r = 0; r < ha->hw.num_rds_rings; r++) { 1430250661Sdavidcs for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1431250661Sdavidcs rxb = &ha->rx_ring[r].rx_buf[i]; 1432250661Sdavidcs if (rxb->m_head != NULL) { 1433250661Sdavidcs bus_dmamap_unload(ha->rx_tag, rxb->map); 1434250661Sdavidcs bus_dmamap_destroy(ha->rx_tag, rxb->map); 1435250661Sdavidcs m_freem(rxb->m_head); 1436250661Sdavidcs rxb->m_head = NULL; 1437250661Sdavidcs } 1438250661Sdavidcs } 1439250661Sdavidcs } 1440250661Sdavidcs return; 1441250661Sdavidcs} 1442250661Sdavidcs 1443250661Sdavidcsstatic int 1444250661Sdavidcsqla_alloc_rcv_bufs(qla_host_t *ha) 1445250661Sdavidcs{ 1446250661Sdavidcs int i, ret = 0; 1447250661Sdavidcs 1448250661Sdavidcs if (bus_dma_tag_create(NULL, /* parent */ 1449250661Sdavidcs 1, 0, /* alignment, bounds */ 1450250661Sdavidcs BUS_SPACE_MAXADDR, /* lowaddr */ 1451250661Sdavidcs BUS_SPACE_MAXADDR, /* highaddr */ 1452250661Sdavidcs NULL, NULL, /* filter, filterarg */ 1453250661Sdavidcs MJUM9BYTES, /* maxsize */ 1454250661Sdavidcs 1, /* nsegments */ 1455250661Sdavidcs MJUM9BYTES, /* maxsegsize */ 1456250661Sdavidcs BUS_DMA_ALLOCNOW, /* flags */ 1457250661Sdavidcs NULL, /* lockfunc */ 1458250661Sdavidcs NULL, /* lockfuncarg */ 1459250661Sdavidcs &ha->rx_tag)) { 1460250661Sdavidcs 1461250661Sdavidcs device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1462250661Sdavidcs __func__); 1463250661Sdavidcs 1464250661Sdavidcs return (ENOMEM); 1465250661Sdavidcs } 1466250661Sdavidcs 1467250661Sdavidcs bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1468250661Sdavidcs 1469250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 1470250661Sdavidcs ha->hw.sds[i].sdsr_next = 0; 1471250661Sdavidcs ha->hw.sds[i].rxb_free = NULL; 1472250661Sdavidcs ha->hw.sds[i].rx_free = 0; 1473250661Sdavidcs } 1474250661Sdavidcs 1475250661Sdavidcs ret = qla_alloc_rcv_std(ha); 1476250661Sdavidcs 1477250661Sdavidcs return (ret); 1478250661Sdavidcs} 1479250661Sdavidcs 1480250661Sdavidcsstatic void 1481250661Sdavidcsqla_free_rcv_bufs(qla_host_t *ha) 1482250661Sdavidcs{ 1483250661Sdavidcs int i; 1484250661Sdavidcs 1485250661Sdavidcs qla_free_rcv_std(ha); 1486250661Sdavidcs 1487250661Sdavidcs if (ha->rx_tag != NULL) { 1488250661Sdavidcs bus_dma_tag_destroy(ha->rx_tag); 1489250661Sdavidcs ha->rx_tag = NULL; 1490250661Sdavidcs } 1491250661Sdavidcs 1492250661Sdavidcs bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1493250661Sdavidcs 1494250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 1495250661Sdavidcs ha->hw.sds[i].sdsr_next = 0; 1496250661Sdavidcs ha->hw.sds[i].rxb_free = NULL; 1497250661Sdavidcs ha->hw.sds[i].rx_free = 0; 1498250661Sdavidcs } 1499250661Sdavidcs 1500250661Sdavidcs return; 1501250661Sdavidcs} 1502250661Sdavidcs 1503250661Sdavidcsint 1504250661Sdavidcsql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1505250661Sdavidcs{ 1506250661Sdavidcs register struct mbuf *mp = nmp; 1507250661Sdavidcs struct ifnet *ifp; 1508250661Sdavidcs int ret = 0; 1509250661Sdavidcs uint32_t offset; 1510250661Sdavidcs bus_dma_segment_t segs[1]; 1511250661Sdavidcs int nsegs; 1512250661Sdavidcs 1513250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1514250661Sdavidcs 1515250661Sdavidcs ifp = ha->ifp; 1516250661Sdavidcs 1517250661Sdavidcs if (mp == NULL) { 1518250661Sdavidcs 1519250661Sdavidcs mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1520250661Sdavidcs 1521250661Sdavidcs if (mp == NULL) { 1522250661Sdavidcs ha->err_m_getcl++; 1523250661Sdavidcs ret = ENOBUFS; 1524250661Sdavidcs device_printf(ha->pci_dev, 1525250661Sdavidcs "%s: m_getcl failed\n", __func__); 1526250661Sdavidcs goto exit_ql_get_mbuf; 1527250661Sdavidcs } 1528250661Sdavidcs mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1529250661Sdavidcs } else { 1530250661Sdavidcs mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1531250661Sdavidcs mp->m_data = mp->m_ext.ext_buf; 1532250661Sdavidcs mp->m_next = NULL; 1533250661Sdavidcs } 1534250661Sdavidcs 1535250661Sdavidcs offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1536250661Sdavidcs if (offset) { 1537250661Sdavidcs offset = 8 - offset; 1538250661Sdavidcs m_adj(mp, offset); 1539250661Sdavidcs } 1540250661Sdavidcs 1541250661Sdavidcs /* 1542250661Sdavidcs * Using memory from the mbuf cluster pool, invoke the bus_dma 1543250661Sdavidcs * machinery to arrange the memory mapping. 1544250661Sdavidcs */ 1545250661Sdavidcs ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 1546250661Sdavidcs mp, segs, &nsegs, BUS_DMA_NOWAIT); 1547250661Sdavidcs rxb->paddr = segs[0].ds_addr; 1548250661Sdavidcs 1549250661Sdavidcs if (ret || !rxb->paddr || (nsegs != 1)) { 1550250661Sdavidcs m_free(mp); 1551250661Sdavidcs rxb->m_head = NULL; 1552250661Sdavidcs device_printf(ha->pci_dev, 1553250661Sdavidcs "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 1554250661Sdavidcs __func__, ret, (long long unsigned int)rxb->paddr, 1555250661Sdavidcs nsegs); 1556250661Sdavidcs ret = -1; 1557250661Sdavidcs goto exit_ql_get_mbuf; 1558250661Sdavidcs } 1559250661Sdavidcs rxb->m_head = mp; 1560250661Sdavidcs bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1561250661Sdavidcs 1562250661Sdavidcsexit_ql_get_mbuf: 1563250661Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1564250661Sdavidcs return (ret); 1565250661Sdavidcs} 1566250661Sdavidcs 1567250661Sdavidcsstatic void 1568250661Sdavidcsqla_tx_done(void *context, int pending) 1569250661Sdavidcs{ 1570250661Sdavidcs qla_host_t *ha = context; 1571250661Sdavidcs struct ifnet *ifp; 1572250661Sdavidcs 1573250661Sdavidcs ifp = ha->ifp; 1574250661Sdavidcs 1575250661Sdavidcs if (!ifp) 1576250661Sdavidcs return; 1577250661Sdavidcs 1578250661Sdavidcs if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1579250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 1580250661Sdavidcs return; 1581250661Sdavidcs } 1582250661Sdavidcs ql_hw_tx_done(ha); 1583250661Sdavidcs 1584250661Sdavidcs qla_start(ha->ifp); 1585250661Sdavidcs} 1586250661Sdavidcs 1587250661Sdavidcsstatic void 1588250661Sdavidcsqla_get_peer(qla_host_t *ha) 1589250661Sdavidcs{ 1590250661Sdavidcs device_t *peers; 1591250661Sdavidcs int count, i, slot; 1592250661Sdavidcs int my_slot = pci_get_slot(ha->pci_dev); 1593250661Sdavidcs 1594250661Sdavidcs if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) 1595250661Sdavidcs return; 1596250661Sdavidcs 1597250661Sdavidcs for (i = 0; i < count; i++) { 1598250661Sdavidcs slot = pci_get_slot(peers[i]); 1599250661Sdavidcs 1600250661Sdavidcs if ((slot >= 0) && (slot == my_slot) && 1601250661Sdavidcs (pci_get_device(peers[i]) == 1602250661Sdavidcs pci_get_device(ha->pci_dev))) { 1603250661Sdavidcs if (ha->pci_dev != peers[i]) 1604250661Sdavidcs ha->peer_dev = peers[i]; 1605250661Sdavidcs } 1606250661Sdavidcs } 1607250661Sdavidcs} 1608250661Sdavidcs 1609250661Sdavidcsstatic void 1610250661Sdavidcsqla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) 1611250661Sdavidcs{ 1612250661Sdavidcs qla_host_t *ha_peer; 1613250661Sdavidcs 1614250661Sdavidcs if (ha->peer_dev) { 1615250661Sdavidcs if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { 1616250661Sdavidcs 1617250661Sdavidcs ha_peer->msg_from_peer = msg_to_peer; 1618250661Sdavidcs } 1619250661Sdavidcs } 1620250661Sdavidcs} 1621250661Sdavidcs 1622250661Sdavidcsstatic void 1623250661Sdavidcsqla_error_recovery(void *context, int pending) 1624250661Sdavidcs{ 1625250661Sdavidcs qla_host_t *ha = context; 1626250661Sdavidcs uint32_t msecs_100 = 100; 1627250661Sdavidcs struct ifnet *ifp = ha->ifp; 1628250661Sdavidcs 1629250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 1630250661Sdavidcs 1631250661Sdavidcs ha->flags.stop_rcv = 1; 1632250661Sdavidcs 1633250661Sdavidcs ql_hw_stop_rcv(ha); 1634250661Sdavidcs 1635250661Sdavidcs ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1636250661Sdavidcs 1637250661Sdavidcs QLA_UNLOCK(ha, __func__); 1638250661Sdavidcs 1639250661Sdavidcs if ((ha->pci_func & 0x1) == 0) { 1640250661Sdavidcs 1641252580Sdavidcs if (!ha->msg_from_peer) { 1642250661Sdavidcs qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 1643250661Sdavidcs 1644252580Sdavidcs while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && 1645252580Sdavidcs msecs_100--) 1646252580Sdavidcs qla_mdelay(__func__, 100); 1647252580Sdavidcs } 1648250661Sdavidcs 1649250661Sdavidcs ha->msg_from_peer = 0; 1650250661Sdavidcs 1651255003Sdavidcs ql_minidump(ha); 1652255003Sdavidcs 1653250661Sdavidcs (void) ql_init_hw(ha); 1654250661Sdavidcs qla_free_xmt_bufs(ha); 1655250661Sdavidcs qla_free_rcv_bufs(ha); 1656250661Sdavidcs 1657250661Sdavidcs qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 1658250661Sdavidcs 1659250661Sdavidcs } else { 1660250661Sdavidcs if (ha->msg_from_peer == QL_PEER_MSG_RESET) { 1661250661Sdavidcs 1662250661Sdavidcs ha->msg_from_peer = 0; 1663250661Sdavidcs 1664250661Sdavidcs qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 1665250661Sdavidcs } else { 1666250661Sdavidcs qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 1667250661Sdavidcs } 1668250661Sdavidcs 1669250661Sdavidcs while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) 1670250661Sdavidcs qla_mdelay(__func__, 100); 1671250661Sdavidcs ha->msg_from_peer = 0; 1672250661Sdavidcs 1673250661Sdavidcs (void) ql_init_hw(ha); 1674250661Sdavidcs qla_free_xmt_bufs(ha); 1675250661Sdavidcs qla_free_rcv_bufs(ha); 1676250661Sdavidcs } 1677250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 1678250661Sdavidcs 1679250661Sdavidcs if (qla_alloc_xmt_bufs(ha) != 0) { 1680250661Sdavidcs QLA_UNLOCK(ha, __func__); 1681250661Sdavidcs return; 1682250661Sdavidcs } 1683250661Sdavidcs 1684250661Sdavidcs if (qla_alloc_rcv_bufs(ha) != 0) { 1685250661Sdavidcs QLA_UNLOCK(ha, __func__); 1686250661Sdavidcs return; 1687250661Sdavidcs } 1688250661Sdavidcs 1689250661Sdavidcs ha->flags.stop_rcv = 0; 1690250661Sdavidcs if (ql_init_hw_if(ha) == 0) { 1691250661Sdavidcs ifp = ha->ifp; 1692250661Sdavidcs ifp->if_drv_flags |= IFF_DRV_RUNNING; 1693250661Sdavidcs ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1694250661Sdavidcs ha->flags.qla_watchdog_pause = 0; 1695250661Sdavidcs } 1696250661Sdavidcs 1697250661Sdavidcs QLA_UNLOCK(ha, __func__); 1698250661Sdavidcs} 1699250661Sdavidcs 1700