1250661Sdavidcs/* 2284982Sdavidcs * Copyright (c) 2013-2016 Qlogic Corporation 3250661Sdavidcs * All rights reserved. 4250661Sdavidcs * 5250661Sdavidcs * Redistribution and use in source and binary forms, with or without 6250661Sdavidcs * modification, are permitted provided that the following conditions 7250661Sdavidcs * are met: 8250661Sdavidcs * 9250661Sdavidcs * 1. Redistributions of source code must retain the above copyright 10250661Sdavidcs * notice, this list of conditions and the following disclaimer. 11250661Sdavidcs * 2. Redistributions in binary form must reproduce the above copyright 12250661Sdavidcs * notice, this list of conditions and the following disclaimer in the 13250661Sdavidcs * documentation and/or other materials provided with the distribution. 14250661Sdavidcs * 15250661Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16250661Sdavidcs * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17250661Sdavidcs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18250661Sdavidcs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19250661Sdavidcs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20250661Sdavidcs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21250661Sdavidcs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22250661Sdavidcs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23250661Sdavidcs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24250661Sdavidcs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25250661Sdavidcs * POSSIBILITY OF SUCH DAMAGE. 26250661Sdavidcs */ 27250661Sdavidcs 28250661Sdavidcs/* 29250661Sdavidcs * File: ql_hw.c 30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31250661Sdavidcs * Content: Contains Hardware dependant functions 32250661Sdavidcs */ 33250661Sdavidcs 34250661Sdavidcs#include <sys/cdefs.h> 35250661Sdavidcs__FBSDID("$FreeBSD: releng/10.2/sys/dev/qlxgbe/ql_hw.c 284982 2015-06-30 20:59:07Z davidcs $"); 36250661Sdavidcs 37250661Sdavidcs#include "ql_os.h" 38250661Sdavidcs#include "ql_hw.h" 39250661Sdavidcs#include "ql_def.h" 40250661Sdavidcs#include "ql_inline.h" 41250661Sdavidcs#include "ql_ver.h" 42250661Sdavidcs#include "ql_glbl.h" 43250661Sdavidcs#include "ql_dbg.h" 44250661Sdavidcs 45250661Sdavidcs/* 46250661Sdavidcs * Static Functions 47250661Sdavidcs */ 48250661Sdavidcs 49250661Sdavidcsstatic void qla_del_rcv_cntxt(qla_host_t *ha); 50250661Sdavidcsstatic int qla_init_rcv_cntxt(qla_host_t *ha); 51250661Sdavidcsstatic void qla_del_xmt_cntxt(qla_host_t *ha); 52250661Sdavidcsstatic int qla_init_xmt_cntxt(qla_host_t *ha); 53250661Sdavidcsstatic void qla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx); 54250661Sdavidcsstatic int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 55250661Sdavidcs uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); 56284982Sdavidcsstatic int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, 57284982Sdavidcs uint32_t num_intrs, uint32_t create); 58250661Sdavidcsstatic int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); 59250661Sdavidcsstatic int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, 60284982Sdavidcs int tenable, int rcv); 61250661Sdavidcsstatic int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); 62250661Sdavidcsstatic int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); 63250661Sdavidcs 64250661Sdavidcsstatic int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, 65250661Sdavidcs uint8_t *hdr); 66250661Sdavidcsstatic int qla_hw_add_all_mcast(qla_host_t *ha); 67250661Sdavidcsstatic int qla_hw_del_all_mcast(qla_host_t *ha); 68284982Sdavidcsstatic int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); 69250661Sdavidcs 70284982Sdavidcsstatic int qla_init_nic_func(qla_host_t *ha); 71284982Sdavidcsstatic int qla_stop_nic_func(qla_host_t *ha); 72284982Sdavidcsstatic int qla_query_fw_dcbx_caps(qla_host_t *ha); 73284982Sdavidcsstatic int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); 74284982Sdavidcsstatic int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); 75284982Sdavidcsstatic void qla_get_quick_stats(qla_host_t *ha); 76284982Sdavidcs 77250661Sdavidcsstatic int qla_minidump_init(qla_host_t *ha); 78250661Sdavidcsstatic void qla_minidump_free(qla_host_t *ha); 79250661Sdavidcs 80250661Sdavidcs 81250661Sdavidcsstatic int 82250661Sdavidcsqla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS) 83250661Sdavidcs{ 84250661Sdavidcs int err = 0, ret; 85250661Sdavidcs qla_host_t *ha; 86250661Sdavidcs uint32_t i; 87250661Sdavidcs 88250661Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 89250661Sdavidcs 90250661Sdavidcs if (err || !req->newptr) 91250661Sdavidcs return (err); 92250661Sdavidcs 93250661Sdavidcs if (ret == 1) { 94250661Sdavidcs 95250661Sdavidcs ha = (qla_host_t *)arg1; 96250661Sdavidcs 97250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) 98250661Sdavidcs device_printf(ha->pci_dev, 99250661Sdavidcs "%s: sds_ring[%d] = %p\n", __func__,i, 100250661Sdavidcs (void *)ha->hw.sds[i].intr_count); 101250661Sdavidcs 102250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) 103250661Sdavidcs device_printf(ha->pci_dev, 104250661Sdavidcs "%s: tx[%d] = %p\n", __func__,i, 105250661Sdavidcs (void *)ha->tx_ring[i].count); 106250661Sdavidcs 107250661Sdavidcs for (i = 0; i < ha->hw.num_rds_rings; i++) 108250661Sdavidcs device_printf(ha->pci_dev, 109250661Sdavidcs "%s: rds_ring[%d] = %p\n", __func__,i, 110250661Sdavidcs (void *)ha->hw.rds[i].count); 111250661Sdavidcs 112250661Sdavidcs device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__, 113250661Sdavidcs (void *)ha->lro_pkt_count); 114250661Sdavidcs 115250661Sdavidcs device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__, 116250661Sdavidcs (void *)ha->lro_bytes); 117284982Sdavidcs 118284982Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV 119284982Sdavidcs device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__, 120284982Sdavidcs (void *)ha->hw.iscsi_pkt_count); 121284982Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 122284982Sdavidcs 123250661Sdavidcs } 124250661Sdavidcs return (err); 125250661Sdavidcs} 126250661Sdavidcs 127284982Sdavidcsstatic int 128284982Sdavidcsqla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS) 129284982Sdavidcs{ 130284982Sdavidcs int err, ret = 0; 131284982Sdavidcs qla_host_t *ha; 132284982Sdavidcs 133284982Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 134284982Sdavidcs 135284982Sdavidcs if (err || !req->newptr) 136284982Sdavidcs return (err); 137284982Sdavidcs 138284982Sdavidcs if (ret == 1) { 139284982Sdavidcs ha = (qla_host_t *)arg1; 140284982Sdavidcs qla_get_quick_stats(ha); 141284982Sdavidcs } 142284982Sdavidcs return (err); 143284982Sdavidcs} 144284982Sdavidcs 145250661Sdavidcs#ifdef QL_DBG 146250661Sdavidcs 147250661Sdavidcsstatic void 148250661Sdavidcsqla_stop_pegs(qla_host_t *ha) 149250661Sdavidcs{ 150250661Sdavidcs uint32_t val = 1; 151250661Sdavidcs 152250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); 153250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); 154250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); 155250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); 156250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); 157250661Sdavidcs device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); 158250661Sdavidcs} 159250661Sdavidcs 160250661Sdavidcsstatic int 161250661Sdavidcsqla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) 162250661Sdavidcs{ 163250661Sdavidcs int err, ret = 0; 164250661Sdavidcs qla_host_t *ha; 165250661Sdavidcs 166250661Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 167250661Sdavidcs 168250661Sdavidcs 169250661Sdavidcs if (err || !req->newptr) 170250661Sdavidcs return (err); 171250661Sdavidcs 172250661Sdavidcs if (ret == 1) { 173250661Sdavidcs ha = (qla_host_t *)arg1; 174250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 175250661Sdavidcs qla_stop_pegs(ha); 176250661Sdavidcs QLA_UNLOCK(ha, __func__); 177250661Sdavidcs } 178250661Sdavidcs 179250661Sdavidcs return err; 180250661Sdavidcs} 181250661Sdavidcs#endif /* #ifdef QL_DBG */ 182250661Sdavidcs 183284982Sdavidcsstatic int 184284982Sdavidcsqla_validate_set_port_cfg_bit(uint32_t bits) 185284982Sdavidcs{ 186284982Sdavidcs if ((bits & 0xF) > 1) 187284982Sdavidcs return (-1); 188284982Sdavidcs 189284982Sdavidcs if (((bits >> 4) & 0xF) > 2) 190284982Sdavidcs return (-1); 191284982Sdavidcs 192284982Sdavidcs if (((bits >> 8) & 0xF) > 2) 193284982Sdavidcs return (-1); 194284982Sdavidcs 195284982Sdavidcs return (0); 196284982Sdavidcs} 197284982Sdavidcs 198284982Sdavidcsstatic int 199284982Sdavidcsqla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) 200284982Sdavidcs{ 201284982Sdavidcs int err, ret = 0; 202284982Sdavidcs qla_host_t *ha; 203284982Sdavidcs uint32_t cfg_bits; 204284982Sdavidcs 205284982Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 206284982Sdavidcs 207284982Sdavidcs if (err || !req->newptr) 208284982Sdavidcs return (err); 209284982Sdavidcs 210284982Sdavidcs if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { 211284982Sdavidcs 212284982Sdavidcs ha = (qla_host_t *)arg1; 213284982Sdavidcs 214284982Sdavidcs err = qla_get_port_config(ha, &cfg_bits); 215284982Sdavidcs 216284982Sdavidcs if (err) 217284982Sdavidcs goto qla_sysctl_set_port_cfg_exit; 218284982Sdavidcs 219284982Sdavidcs if (ret & 0x1) { 220284982Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; 221284982Sdavidcs } else { 222284982Sdavidcs cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; 223284982Sdavidcs } 224284982Sdavidcs 225284982Sdavidcs ret = ret >> 4; 226284982Sdavidcs cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; 227284982Sdavidcs 228284982Sdavidcs if ((ret & 0xF) == 0) { 229284982Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; 230284982Sdavidcs } else if ((ret & 0xF) == 1){ 231284982Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; 232284982Sdavidcs } else { 233284982Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; 234284982Sdavidcs } 235284982Sdavidcs 236284982Sdavidcs ret = ret >> 4; 237284982Sdavidcs cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; 238284982Sdavidcs 239284982Sdavidcs if (ret == 0) { 240284982Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; 241284982Sdavidcs } else if (ret == 1){ 242284982Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; 243284982Sdavidcs } else { 244284982Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; 245284982Sdavidcs } 246284982Sdavidcs 247284982Sdavidcs err = qla_set_port_config(ha, cfg_bits); 248284982Sdavidcs } else { 249284982Sdavidcs ha = (qla_host_t *)arg1; 250284982Sdavidcs 251284982Sdavidcs err = qla_get_port_config(ha, &cfg_bits); 252284982Sdavidcs } 253284982Sdavidcs 254284982Sdavidcsqla_sysctl_set_port_cfg_exit: 255284982Sdavidcs return err; 256284982Sdavidcs} 257284982Sdavidcs 258250661Sdavidcs/* 259250661Sdavidcs * Name: ql_hw_add_sysctls 260250661Sdavidcs * Function: Add P3Plus specific sysctls 261250661Sdavidcs */ 262250661Sdavidcsvoid 263250661Sdavidcsql_hw_add_sysctls(qla_host_t *ha) 264250661Sdavidcs{ 265250661Sdavidcs device_t dev; 266250661Sdavidcs 267250661Sdavidcs dev = ha->pci_dev; 268250661Sdavidcs 269250661Sdavidcs ha->hw.num_sds_rings = MAX_SDS_RINGS; 270250661Sdavidcs ha->hw.num_rds_rings = MAX_RDS_RINGS; 271250661Sdavidcs ha->hw.num_tx_rings = NUM_TX_RINGS; 272250661Sdavidcs 273250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 274250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 275250661Sdavidcs OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, 276250661Sdavidcs ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); 277250661Sdavidcs 278250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 279250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 280250661Sdavidcs OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, 281250661Sdavidcs ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); 282250661Sdavidcs 283250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 284250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 285250661Sdavidcs OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, 286250661Sdavidcs ha->hw.num_tx_rings, "Number of Transmit Rings"); 287250661Sdavidcs 288250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 289250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 290250661Sdavidcs OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, 291250661Sdavidcs ha->txr_idx, "Tx Ring Used"); 292250661Sdavidcs 293250661Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 294250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 295250661Sdavidcs OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW, 296250661Sdavidcs (void *)ha, 0, 297250661Sdavidcs qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics"); 298250661Sdavidcs 299284982Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 300284982Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 301284982Sdavidcs OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW, 302284982Sdavidcs (void *)ha, 0, 303284982Sdavidcs qla_sysctl_get_quick_stats, "I", "Quick Statistics"); 304284982Sdavidcs 305250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 306250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 307250661Sdavidcs OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, 308250661Sdavidcs ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); 309250661Sdavidcs 310250661Sdavidcs ha->hw.sds_cidx_thres = 32; 311250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 312250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 313250661Sdavidcs OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, 314250661Sdavidcs ha->hw.sds_cidx_thres, 315250661Sdavidcs "Number of SDS entries to process before updating" 316250661Sdavidcs " SDS Ring Consumer Index"); 317250661Sdavidcs 318250661Sdavidcs ha->hw.rds_pidx_thres = 32; 319250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 320250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 321250661Sdavidcs OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, 322250661Sdavidcs ha->hw.rds_pidx_thres, 323250661Sdavidcs "Number of Rcv Rings Entries to post before updating" 324250661Sdavidcs " RDS Ring Producer Index"); 325250661Sdavidcs 326284982Sdavidcs ha->hw.rcv_intr_coalesce = (3 << 16) | 256; 327284982Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 328284982Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 329284982Sdavidcs OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, 330284982Sdavidcs &ha->hw.rcv_intr_coalesce, 331284982Sdavidcs ha->hw.rcv_intr_coalesce, 332284982Sdavidcs "Rcv Intr Coalescing Parameters\n" 333284982Sdavidcs "\tbits 15:0 max packets\n" 334284982Sdavidcs "\tbits 31:16 max micro-seconds to wait\n" 335284982Sdavidcs "\tplease run\n" 336284982Sdavidcs "\tifconfig <if> down && ifconfig <if> up\n" 337284982Sdavidcs "\tto take effect \n"); 338258457Sdavidcs 339284982Sdavidcs ha->hw.xmt_intr_coalesce = (64 << 16) | 64; 340284982Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 341284982Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 342284982Sdavidcs OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, 343284982Sdavidcs &ha->hw.xmt_intr_coalesce, 344284982Sdavidcs ha->hw.xmt_intr_coalesce, 345284982Sdavidcs "Xmt Intr Coalescing Parameters\n" 346284982Sdavidcs "\tbits 15:0 max packets\n" 347284982Sdavidcs "\tbits 31:16 max micro-seconds to wait\n" 348284982Sdavidcs "\tplease run\n" 349284982Sdavidcs "\tifconfig <if> down && ifconfig <if> up\n" 350284982Sdavidcs "\tto take effect \n"); 351284982Sdavidcs 352284982Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 353284982Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 354284982Sdavidcs OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW, 355284982Sdavidcs (void *)ha, 0, 356284982Sdavidcs qla_sysctl_port_cfg, "I", 357284982Sdavidcs "Set Port Configuration if values below " 358284982Sdavidcs "otherwise Get Port Configuration\n" 359284982Sdavidcs "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" 360284982Sdavidcs "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" 361284982Sdavidcs "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" 362284982Sdavidcs " 1 = xmt only; 2 = rcv only;\n" 363284982Sdavidcs ); 364284982Sdavidcs 365284982Sdavidcs ha->hw.enable_9kb = 1; 366284982Sdavidcs 367284982Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 368284982Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 369284982Sdavidcs OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, 370284982Sdavidcs ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); 371284982Sdavidcs 372250661Sdavidcs ha->hw.mdump_active = 0; 373250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 374250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 375250661Sdavidcs OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, 376250661Sdavidcs ha->hw.mdump_active, 377250661Sdavidcs "Minidump Utility is Active \n" 378250661Sdavidcs "\t 0 = Minidump Utility is not active\n" 379250661Sdavidcs "\t 1 = Minidump Utility is retrieved on this port\n" 380250661Sdavidcs "\t 2 = Minidump Utility is retrieved on the other port\n"); 381250661Sdavidcs 382250661Sdavidcs ha->hw.mdump_start = 0; 383250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 384250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 385250661Sdavidcs OID_AUTO, "minidump_start", CTLFLAG_RW, 386250661Sdavidcs &ha->hw.mdump_start, ha->hw.mdump_start, 387250661Sdavidcs "Minidump Utility can start minidump process"); 388250661Sdavidcs#ifdef QL_DBG 389250661Sdavidcs 390250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 391250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 392250661Sdavidcs OID_AUTO, "err_inject", 393250661Sdavidcs CTLFLAG_RW, &ha->err_inject, ha->err_inject, 394250661Sdavidcs "Error to be injected\n" 395250661Sdavidcs "\t\t\t 0: No Errors\n" 396250661Sdavidcs "\t\t\t 1: rcv: rxb struct invalid\n" 397250661Sdavidcs "\t\t\t 2: rcv: mp == NULL\n" 398250661Sdavidcs "\t\t\t 3: lro: rxb struct invalid\n" 399250661Sdavidcs "\t\t\t 4: lro: mp == NULL\n" 400250661Sdavidcs "\t\t\t 5: rcv: num handles invalid\n" 401250661Sdavidcs "\t\t\t 6: reg: indirect reg rd_wr failure\n" 402250661Sdavidcs "\t\t\t 7: ocm: offchip memory rd_wr failure\n" 403250661Sdavidcs "\t\t\t 8: mbx: mailbox command failure\n" 404250661Sdavidcs "\t\t\t 9: heartbeat failure\n" 405250661Sdavidcs "\t\t\t A: temperature failure\n" ); 406250661Sdavidcs 407250661Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 408250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 409250661Sdavidcs OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW, 410250661Sdavidcs (void *)ha, 0, 411250661Sdavidcs qla_sysctl_stop_pegs, "I", "Peg Stop"); 412250661Sdavidcs 413250661Sdavidcs#endif /* #ifdef QL_DBG */ 414250661Sdavidcs 415284982Sdavidcs ha->hw.user_pri_nic = 0; 416284982Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 417284982Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 418284982Sdavidcs OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, 419284982Sdavidcs ha->hw.user_pri_nic, 420284982Sdavidcs "VLAN Tag User Priority for Normal Ethernet Packets"); 421284982Sdavidcs 422284982Sdavidcs ha->hw.user_pri_iscsi = 4; 423284982Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 424284982Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 425284982Sdavidcs OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, 426284982Sdavidcs ha->hw.user_pri_iscsi, 427284982Sdavidcs "VLAN Tag User Priority for iSCSI Packets"); 428284982Sdavidcs 429250661Sdavidcs} 430250661Sdavidcs 431250661Sdavidcsvoid 432250661Sdavidcsql_hw_link_status(qla_host_t *ha) 433250661Sdavidcs{ 434250661Sdavidcs device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); 435250661Sdavidcs 436250661Sdavidcs if (ha->hw.link_up) { 437250661Sdavidcs device_printf(ha->pci_dev, "link Up\n"); 438250661Sdavidcs } else { 439250661Sdavidcs device_printf(ha->pci_dev, "link Down\n"); 440250661Sdavidcs } 441250661Sdavidcs 442250661Sdavidcs if (ha->hw.flags.fduplex) { 443250661Sdavidcs device_printf(ha->pci_dev, "Full Duplex\n"); 444250661Sdavidcs } else { 445250661Sdavidcs device_printf(ha->pci_dev, "Half Duplex\n"); 446250661Sdavidcs } 447250661Sdavidcs 448250661Sdavidcs if (ha->hw.flags.autoneg) { 449250661Sdavidcs device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); 450250661Sdavidcs } else { 451250661Sdavidcs device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); 452250661Sdavidcs } 453250661Sdavidcs 454250661Sdavidcs switch (ha->hw.link_speed) { 455250661Sdavidcs case 0x710: 456250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); 457250661Sdavidcs break; 458250661Sdavidcs 459250661Sdavidcs case 0x3E8: 460250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); 461250661Sdavidcs break; 462250661Sdavidcs 463250661Sdavidcs case 0x64: 464250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); 465250661Sdavidcs break; 466250661Sdavidcs 467250661Sdavidcs default: 468250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); 469250661Sdavidcs break; 470250661Sdavidcs } 471250661Sdavidcs 472250661Sdavidcs switch (ha->hw.module_type) { 473250661Sdavidcs 474250661Sdavidcs case 0x01: 475250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); 476250661Sdavidcs break; 477250661Sdavidcs 478250661Sdavidcs case 0x02: 479250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); 480250661Sdavidcs break; 481250661Sdavidcs 482250661Sdavidcs case 0x03: 483250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); 484250661Sdavidcs break; 485250661Sdavidcs 486250661Sdavidcs case 0x04: 487250661Sdavidcs device_printf(ha->pci_dev, 488250661Sdavidcs "Module Type 10GE Passive Copper(Compliant)[%d m]\n", 489250661Sdavidcs ha->hw.cable_length); 490250661Sdavidcs break; 491250661Sdavidcs 492250661Sdavidcs case 0x05: 493250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GE Active" 494250661Sdavidcs " Limiting Copper(Compliant)[%d m]\n", 495250661Sdavidcs ha->hw.cable_length); 496250661Sdavidcs break; 497250661Sdavidcs 498250661Sdavidcs case 0x06: 499250661Sdavidcs device_printf(ha->pci_dev, 500250661Sdavidcs "Module Type 10GE Passive Copper" 501250661Sdavidcs " (Legacy, Best Effort)[%d m]\n", 502250661Sdavidcs ha->hw.cable_length); 503250661Sdavidcs break; 504250661Sdavidcs 505250661Sdavidcs case 0x07: 506250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); 507250661Sdavidcs break; 508250661Sdavidcs 509250661Sdavidcs case 0x08: 510250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); 511250661Sdavidcs break; 512250661Sdavidcs 513250661Sdavidcs case 0x09: 514250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); 515250661Sdavidcs break; 516250661Sdavidcs 517250661Sdavidcs case 0x0A: 518250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); 519250661Sdavidcs break; 520250661Sdavidcs 521250661Sdavidcs case 0x0B: 522250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" 523250661Sdavidcs "(Legacy, Best Effort)\n"); 524250661Sdavidcs break; 525250661Sdavidcs 526250661Sdavidcs default: 527250661Sdavidcs device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", 528250661Sdavidcs ha->hw.module_type); 529250661Sdavidcs break; 530250661Sdavidcs } 531250661Sdavidcs 532250661Sdavidcs if (ha->hw.link_faults == 1) 533250661Sdavidcs device_printf(ha->pci_dev, "SFP Power Fault\n"); 534250661Sdavidcs} 535250661Sdavidcs 536250661Sdavidcs/* 537250661Sdavidcs * Name: ql_free_dma 538250661Sdavidcs * Function: Frees the DMA'able memory allocated in ql_alloc_dma() 539250661Sdavidcs */ 540250661Sdavidcsvoid 541250661Sdavidcsql_free_dma(qla_host_t *ha) 542250661Sdavidcs{ 543250661Sdavidcs uint32_t i; 544250661Sdavidcs 545250661Sdavidcs if (ha->hw.dma_buf.flags.sds_ring) { 546250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 547250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 548250661Sdavidcs } 549250661Sdavidcs ha->hw.dma_buf.flags.sds_ring = 0; 550250661Sdavidcs } 551250661Sdavidcs 552250661Sdavidcs if (ha->hw.dma_buf.flags.rds_ring) { 553250661Sdavidcs for (i = 0; i < ha->hw.num_rds_rings; i++) { 554250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 555250661Sdavidcs } 556250661Sdavidcs ha->hw.dma_buf.flags.rds_ring = 0; 557250661Sdavidcs } 558250661Sdavidcs 559250661Sdavidcs if (ha->hw.dma_buf.flags.tx_ring) { 560250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 561250661Sdavidcs ha->hw.dma_buf.flags.tx_ring = 0; 562250661Sdavidcs } 563250661Sdavidcs qla_minidump_free(ha); 564250661Sdavidcs} 565250661Sdavidcs 566250661Sdavidcs/* 567250661Sdavidcs * Name: ql_alloc_dma 568250661Sdavidcs * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 569250661Sdavidcs */ 570250661Sdavidcsint 571250661Sdavidcsql_alloc_dma(qla_host_t *ha) 572250661Sdavidcs{ 573250661Sdavidcs device_t dev; 574250661Sdavidcs uint32_t i, j, size, tx_ring_size; 575250661Sdavidcs qla_hw_t *hw; 576250661Sdavidcs qla_hw_tx_cntxt_t *tx_cntxt; 577250661Sdavidcs uint8_t *vaddr; 578250661Sdavidcs bus_addr_t paddr; 579250661Sdavidcs 580250661Sdavidcs dev = ha->pci_dev; 581250661Sdavidcs 582250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 583250661Sdavidcs 584250661Sdavidcs hw = &ha->hw; 585250661Sdavidcs /* 586250661Sdavidcs * Allocate Transmit Ring 587250661Sdavidcs */ 588250661Sdavidcs tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); 589250661Sdavidcs size = (tx_ring_size * ha->hw.num_tx_rings); 590250661Sdavidcs 591250661Sdavidcs hw->dma_buf.tx_ring.alignment = 8; 592250661Sdavidcs hw->dma_buf.tx_ring.size = size + PAGE_SIZE; 593250661Sdavidcs 594250661Sdavidcs if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { 595250661Sdavidcs device_printf(dev, "%s: tx ring alloc failed\n", __func__); 596250661Sdavidcs goto ql_alloc_dma_exit; 597250661Sdavidcs } 598250661Sdavidcs 599250661Sdavidcs vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; 600250661Sdavidcs paddr = hw->dma_buf.tx_ring.dma_addr; 601250661Sdavidcs 602250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 603250661Sdavidcs tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 604250661Sdavidcs 605250661Sdavidcs tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; 606250661Sdavidcs tx_cntxt->tx_ring_paddr = paddr; 607250661Sdavidcs 608250661Sdavidcs vaddr += tx_ring_size; 609250661Sdavidcs paddr += tx_ring_size; 610250661Sdavidcs } 611250661Sdavidcs 612250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 613250661Sdavidcs tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 614250661Sdavidcs 615250661Sdavidcs tx_cntxt->tx_cons = (uint32_t *)vaddr; 616250661Sdavidcs tx_cntxt->tx_cons_paddr = paddr; 617250661Sdavidcs 618250661Sdavidcs vaddr += sizeof (uint32_t); 619250661Sdavidcs paddr += sizeof (uint32_t); 620250661Sdavidcs } 621250661Sdavidcs 622250661Sdavidcs ha->hw.dma_buf.flags.tx_ring = 1; 623250661Sdavidcs 624250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", 625250661Sdavidcs __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), 626250661Sdavidcs hw->dma_buf.tx_ring.dma_b)); 627250661Sdavidcs /* 628250661Sdavidcs * Allocate Receive Descriptor Rings 629250661Sdavidcs */ 630250661Sdavidcs 631250661Sdavidcs for (i = 0; i < hw->num_rds_rings; i++) { 632250661Sdavidcs 633250661Sdavidcs hw->dma_buf.rds_ring[i].alignment = 8; 634250661Sdavidcs hw->dma_buf.rds_ring[i].size = 635250661Sdavidcs (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 636250661Sdavidcs 637250661Sdavidcs if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { 638250661Sdavidcs device_printf(dev, "%s: rds ring[%d] alloc failed\n", 639250661Sdavidcs __func__, i); 640250661Sdavidcs 641250661Sdavidcs for (j = 0; j < i; j++) 642250661Sdavidcs ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); 643250661Sdavidcs 644250661Sdavidcs goto ql_alloc_dma_exit; 645250661Sdavidcs } 646250661Sdavidcs QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", 647250661Sdavidcs __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), 648250661Sdavidcs hw->dma_buf.rds_ring[i].dma_b)); 649250661Sdavidcs } 650250661Sdavidcs 651250661Sdavidcs hw->dma_buf.flags.rds_ring = 1; 652250661Sdavidcs 653250661Sdavidcs /* 654250661Sdavidcs * Allocate Status Descriptor Rings 655250661Sdavidcs */ 656250661Sdavidcs 657250661Sdavidcs for (i = 0; i < hw->num_sds_rings; i++) { 658250661Sdavidcs hw->dma_buf.sds_ring[i].alignment = 8; 659250661Sdavidcs hw->dma_buf.sds_ring[i].size = 660250661Sdavidcs (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 661250661Sdavidcs 662250661Sdavidcs if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { 663250661Sdavidcs device_printf(dev, "%s: sds ring alloc failed\n", 664250661Sdavidcs __func__); 665250661Sdavidcs 666250661Sdavidcs for (j = 0; j < i; j++) 667250661Sdavidcs ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); 668250661Sdavidcs 669250661Sdavidcs goto ql_alloc_dma_exit; 670250661Sdavidcs } 671250661Sdavidcs QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", 672250661Sdavidcs __func__, i, 673250661Sdavidcs (void *)(hw->dma_buf.sds_ring[i].dma_addr), 674250661Sdavidcs hw->dma_buf.sds_ring[i].dma_b)); 675250661Sdavidcs } 676250661Sdavidcs for (i = 0; i < hw->num_sds_rings; i++) { 677250661Sdavidcs hw->sds[i].sds_ring_base = 678250661Sdavidcs (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 679250661Sdavidcs } 680250661Sdavidcs 681250661Sdavidcs hw->dma_buf.flags.sds_ring = 1; 682250661Sdavidcs 683250661Sdavidcs return 0; 684250661Sdavidcs 685250661Sdavidcsql_alloc_dma_exit: 686250661Sdavidcs ql_free_dma(ha); 687250661Sdavidcs return -1; 688250661Sdavidcs} 689250661Sdavidcs 690250661Sdavidcs#define Q8_MBX_MSEC_DELAY 5000 691250661Sdavidcs 692250661Sdavidcsstatic int 693250661Sdavidcsqla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 694250661Sdavidcs uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) 695250661Sdavidcs{ 696250661Sdavidcs uint32_t i; 697250661Sdavidcs uint32_t data; 698250661Sdavidcs int ret = 0; 699250661Sdavidcs 700250661Sdavidcs if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) { 701250661Sdavidcs ret = -3; 702250661Sdavidcs ha->qla_initiate_recovery = 1; 703250661Sdavidcs goto exit_qla_mbx_cmd; 704250661Sdavidcs } 705250661Sdavidcs 706250661Sdavidcs if (no_pause) 707250661Sdavidcs i = 1000; 708250661Sdavidcs else 709250661Sdavidcs i = Q8_MBX_MSEC_DELAY; 710250661Sdavidcs 711250661Sdavidcs while (i) { 712250661Sdavidcs data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); 713250661Sdavidcs if (data == 0) 714250661Sdavidcs break; 715250661Sdavidcs if (no_pause) { 716250661Sdavidcs DELAY(1000); 717250661Sdavidcs } else { 718250661Sdavidcs qla_mdelay(__func__, 1); 719250661Sdavidcs } 720250661Sdavidcs i--; 721250661Sdavidcs } 722250661Sdavidcs 723250661Sdavidcs if (i == 0) { 724250661Sdavidcs device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", 725250661Sdavidcs __func__, data); 726250661Sdavidcs ret = -1; 727250661Sdavidcs ha->qla_initiate_recovery = 1; 728250661Sdavidcs goto exit_qla_mbx_cmd; 729250661Sdavidcs } 730250661Sdavidcs 731250661Sdavidcs for (i = 0; i < n_hmbox; i++) { 732250661Sdavidcs WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); 733250661Sdavidcs h_mbox++; 734250661Sdavidcs } 735250661Sdavidcs 736250661Sdavidcs WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); 737250661Sdavidcs 738250661Sdavidcs 739250661Sdavidcs i = Q8_MBX_MSEC_DELAY; 740250661Sdavidcs while (i) { 741250661Sdavidcs data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); 742250661Sdavidcs 743250661Sdavidcs if ((data & 0x3) == 1) { 744250661Sdavidcs data = READ_REG32(ha, Q8_FW_MBOX0); 745250661Sdavidcs if ((data & 0xF000) != 0x8000) 746250661Sdavidcs break; 747250661Sdavidcs } 748250661Sdavidcs if (no_pause) { 749250661Sdavidcs DELAY(1000); 750250661Sdavidcs } else { 751250661Sdavidcs qla_mdelay(__func__, 1); 752250661Sdavidcs } 753250661Sdavidcs i--; 754250661Sdavidcs } 755250661Sdavidcs if (i == 0) { 756250661Sdavidcs device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", 757250661Sdavidcs __func__, data); 758250661Sdavidcs ret = -2; 759250661Sdavidcs ha->qla_initiate_recovery = 1; 760250661Sdavidcs goto exit_qla_mbx_cmd; 761250661Sdavidcs } 762250661Sdavidcs 763250661Sdavidcs for (i = 0; i < n_fwmbox; i++) { 764250661Sdavidcs *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); 765250661Sdavidcs } 766250661Sdavidcs 767250661Sdavidcs WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); 768250661Sdavidcs WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 769250661Sdavidcs 770250661Sdavidcsexit_qla_mbx_cmd: 771250661Sdavidcs return (ret); 772250661Sdavidcs} 773250661Sdavidcs 774284982Sdavidcsint 775284982Sdavidcsqla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, 776284982Sdavidcs uint32_t *num_rcvq) 777250661Sdavidcs{ 778250661Sdavidcs uint32_t *mbox, err; 779250661Sdavidcs device_t dev = ha->pci_dev; 780250661Sdavidcs 781250661Sdavidcs bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); 782250661Sdavidcs 783250661Sdavidcs mbox = ha->hw.mbox; 784250661Sdavidcs 785250661Sdavidcs mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 786250661Sdavidcs 787250661Sdavidcs if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { 788250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 789250661Sdavidcs return (-1); 790250661Sdavidcs } 791250661Sdavidcs err = mbox[0] >> 25; 792250661Sdavidcs 793284982Sdavidcs if (supports_9kb != NULL) { 794284982Sdavidcs if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ 795284982Sdavidcs *supports_9kb = 1; 796284982Sdavidcs else 797284982Sdavidcs *supports_9kb = 0; 798284982Sdavidcs } 799284982Sdavidcs 800284982Sdavidcs if (num_rcvq != NULL) 801284982Sdavidcs *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); 802284982Sdavidcs 803250661Sdavidcs if ((err != 1) && (err != 0)) { 804250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 805250661Sdavidcs return (-1); 806250661Sdavidcs } 807250661Sdavidcs return 0; 808250661Sdavidcs} 809250661Sdavidcs 810250661Sdavidcsstatic int 811284982Sdavidcsqla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, 812284982Sdavidcs uint32_t create) 813250661Sdavidcs{ 814250661Sdavidcs uint32_t i, err; 815250661Sdavidcs device_t dev = ha->pci_dev; 816250661Sdavidcs q80_config_intr_t *c_intr; 817250661Sdavidcs q80_config_intr_rsp_t *c_intr_rsp; 818250661Sdavidcs 819250661Sdavidcs c_intr = (q80_config_intr_t *)ha->hw.mbox; 820250661Sdavidcs bzero(c_intr, (sizeof (q80_config_intr_t))); 821250661Sdavidcs 822250661Sdavidcs c_intr->opcode = Q8_MBX_CONFIG_INTR; 823250661Sdavidcs 824250661Sdavidcs c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); 825250661Sdavidcs c_intr->count_version |= Q8_MBX_CMD_VERSION; 826250661Sdavidcs 827250661Sdavidcs c_intr->nentries = num_intrs; 828250661Sdavidcs 829250661Sdavidcs for (i = 0; i < num_intrs; i++) { 830250661Sdavidcs if (create) { 831250661Sdavidcs c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; 832284982Sdavidcs c_intr->intr[i].msix_index = start_idx + 1 + i; 833250661Sdavidcs } else { 834250661Sdavidcs c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; 835284982Sdavidcs c_intr->intr[i].msix_index = 836284982Sdavidcs ha->hw.intr_id[(start_idx + i)]; 837250661Sdavidcs } 838250661Sdavidcs 839250661Sdavidcs c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; 840250661Sdavidcs } 841250661Sdavidcs 842250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)c_intr, 843250661Sdavidcs (sizeof (q80_config_intr_t) >> 2), 844250661Sdavidcs ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { 845250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 846250661Sdavidcs return (-1); 847250661Sdavidcs } 848250661Sdavidcs 849250661Sdavidcs c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; 850250661Sdavidcs 851250661Sdavidcs err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); 852250661Sdavidcs 853250661Sdavidcs if (err) { 854250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err, 855250661Sdavidcs c_intr_rsp->nentries); 856250661Sdavidcs 857250661Sdavidcs for (i = 0; i < c_intr_rsp->nentries; i++) { 858250661Sdavidcs device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", 859250661Sdavidcs __func__, i, 860250661Sdavidcs c_intr_rsp->intr[i].status, 861250661Sdavidcs c_intr_rsp->intr[i].intr_id, 862250661Sdavidcs c_intr_rsp->intr[i].intr_src); 863250661Sdavidcs } 864250661Sdavidcs 865250661Sdavidcs return (-1); 866250661Sdavidcs } 867250661Sdavidcs 868250661Sdavidcs for (i = 0; ((i < num_intrs) && create); i++) { 869250661Sdavidcs if (!c_intr_rsp->intr[i].status) { 870284982Sdavidcs ha->hw.intr_id[(start_idx + i)] = 871284982Sdavidcs c_intr_rsp->intr[i].intr_id; 872284982Sdavidcs ha->hw.intr_src[(start_idx + i)] = 873284982Sdavidcs c_intr_rsp->intr[i].intr_src; 874250661Sdavidcs } 875250661Sdavidcs } 876250661Sdavidcs 877250661Sdavidcs return (0); 878250661Sdavidcs} 879250661Sdavidcs 880250661Sdavidcs/* 881250661Sdavidcs * Name: qla_config_rss 882250661Sdavidcs * Function: Configure RSS for the context/interface. 883250661Sdavidcs */ 884250661Sdavidcsstatic const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 885250661Sdavidcs 0x8030f20c77cb2da3ULL, 886250661Sdavidcs 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 887250661Sdavidcs 0x255b0ec26d5a56daULL }; 888250661Sdavidcs 889250661Sdavidcsstatic int 890250661Sdavidcsqla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 891250661Sdavidcs{ 892250661Sdavidcs q80_config_rss_t *c_rss; 893250661Sdavidcs q80_config_rss_rsp_t *c_rss_rsp; 894250661Sdavidcs uint32_t err, i; 895250661Sdavidcs device_t dev = ha->pci_dev; 896250661Sdavidcs 897250661Sdavidcs c_rss = (q80_config_rss_t *)ha->hw.mbox; 898250661Sdavidcs bzero(c_rss, (sizeof (q80_config_rss_t))); 899250661Sdavidcs 900250661Sdavidcs c_rss->opcode = Q8_MBX_CONFIG_RSS; 901250661Sdavidcs 902250661Sdavidcs c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); 903250661Sdavidcs c_rss->count_version |= Q8_MBX_CMD_VERSION; 904250661Sdavidcs 905250661Sdavidcs c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | 906250661Sdavidcs Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); 907284982Sdavidcs //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | 908284982Sdavidcs // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); 909250661Sdavidcs 910250661Sdavidcs c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; 911250661Sdavidcs c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; 912250661Sdavidcs 913250661Sdavidcs c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; 914250661Sdavidcs 915250661Sdavidcs c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; 916250661Sdavidcs c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; 917250661Sdavidcs 918250661Sdavidcs c_rss->cntxt_id = cntxt_id; 919250661Sdavidcs 920250661Sdavidcs for (i = 0; i < 5; i++) { 921250661Sdavidcs c_rss->rss_key[i] = rss_key[i]; 922250661Sdavidcs } 923250661Sdavidcs 924250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)c_rss, 925250661Sdavidcs (sizeof (q80_config_rss_t) >> 2), 926250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { 927250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 928250661Sdavidcs return (-1); 929250661Sdavidcs } 930250661Sdavidcs c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; 931250661Sdavidcs 932250661Sdavidcs err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); 933250661Sdavidcs 934250661Sdavidcs if (err) { 935250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 936250661Sdavidcs return (-1); 937250661Sdavidcs } 938250661Sdavidcs return 0; 939250661Sdavidcs} 940250661Sdavidcs 941250661Sdavidcsstatic int 942250661Sdavidcsqla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, 943250661Sdavidcs uint16_t cntxt_id, uint8_t *ind_table) 944250661Sdavidcs{ 945250661Sdavidcs q80_config_rss_ind_table_t *c_rss_ind; 946250661Sdavidcs q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; 947250661Sdavidcs uint32_t err; 948250661Sdavidcs device_t dev = ha->pci_dev; 949250661Sdavidcs 950250661Sdavidcs if ((count > Q8_RSS_IND_TBL_SIZE) || 951250661Sdavidcs ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { 952250661Sdavidcs device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, 953250661Sdavidcs start_idx, count); 954250661Sdavidcs return (-1); 955250661Sdavidcs } 956250661Sdavidcs 957250661Sdavidcs c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; 958250661Sdavidcs bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); 959250661Sdavidcs 960250661Sdavidcs c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; 961250661Sdavidcs c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); 962250661Sdavidcs c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; 963250661Sdavidcs 964250661Sdavidcs c_rss_ind->start_idx = start_idx; 965250661Sdavidcs c_rss_ind->end_idx = start_idx + count - 1; 966250661Sdavidcs c_rss_ind->cntxt_id = cntxt_id; 967250661Sdavidcs bcopy(ind_table, c_rss_ind->ind_table, count); 968250661Sdavidcs 969250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, 970250661Sdavidcs (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, 971250661Sdavidcs (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { 972250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 973250661Sdavidcs return (-1); 974250661Sdavidcs } 975250661Sdavidcs 976250661Sdavidcs c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; 977250661Sdavidcs err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); 978250661Sdavidcs 979250661Sdavidcs if (err) { 980250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 981250661Sdavidcs return (-1); 982250661Sdavidcs } 983250661Sdavidcs return 0; 984250661Sdavidcs} 985250661Sdavidcs 986250661Sdavidcs/* 987250661Sdavidcs * Name: qla_config_intr_coalesce 988250661Sdavidcs * Function: Configure Interrupt Coalescing. 989250661Sdavidcs */ 990250661Sdavidcsstatic int 991284982Sdavidcsqla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, 992284982Sdavidcs int rcv) 993250661Sdavidcs{ 994250661Sdavidcs q80_config_intr_coalesc_t *intrc; 995250661Sdavidcs q80_config_intr_coalesc_rsp_t *intrc_rsp; 996250661Sdavidcs uint32_t err, i; 997250661Sdavidcs device_t dev = ha->pci_dev; 998250661Sdavidcs 999250661Sdavidcs intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; 1000250661Sdavidcs bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); 1001250661Sdavidcs 1002250661Sdavidcs intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; 1003250661Sdavidcs intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); 1004250661Sdavidcs intrc->count_version |= Q8_MBX_CMD_VERSION; 1005250661Sdavidcs 1006284982Sdavidcs if (rcv) { 1007284982Sdavidcs intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; 1008284982Sdavidcs intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; 1009284982Sdavidcs intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; 1010284982Sdavidcs } else { 1011284982Sdavidcs intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; 1012284982Sdavidcs intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; 1013284982Sdavidcs intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; 1014284982Sdavidcs } 1015284982Sdavidcs 1016250661Sdavidcs intrc->cntxt_id = cntxt_id; 1017250661Sdavidcs 1018250661Sdavidcs if (tenable) { 1019250661Sdavidcs intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; 1020250661Sdavidcs intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; 1021250661Sdavidcs 1022250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 1023250661Sdavidcs intrc->sds_ring_mask |= (1 << i); 1024250661Sdavidcs } 1025250661Sdavidcs intrc->ms_timeout = 1000; 1026250661Sdavidcs } 1027250661Sdavidcs 1028250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)intrc, 1029250661Sdavidcs (sizeof (q80_config_intr_coalesc_t) >> 2), 1030250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { 1031250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 1032250661Sdavidcs return (-1); 1033250661Sdavidcs } 1034250661Sdavidcs intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; 1035250661Sdavidcs 1036250661Sdavidcs err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); 1037250661Sdavidcs 1038250661Sdavidcs if (err) { 1039250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1040250661Sdavidcs return (-1); 1041250661Sdavidcs } 1042250661Sdavidcs 1043250661Sdavidcs return 0; 1044250661Sdavidcs} 1045250661Sdavidcs 1046250661Sdavidcs 1047250661Sdavidcs/* 1048250661Sdavidcs * Name: qla_config_mac_addr 1049250661Sdavidcs * Function: binds a MAC address to the context/interface. 1050250661Sdavidcs * Can be unicast, multicast or broadcast. 1051250661Sdavidcs */ 1052250661Sdavidcsstatic int 1053250661Sdavidcsqla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 1054250661Sdavidcs{ 1055250661Sdavidcs q80_config_mac_addr_t *cmac; 1056250661Sdavidcs q80_config_mac_addr_rsp_t *cmac_rsp; 1057250661Sdavidcs uint32_t err; 1058250661Sdavidcs device_t dev = ha->pci_dev; 1059250661Sdavidcs 1060250661Sdavidcs cmac = (q80_config_mac_addr_t *)ha->hw.mbox; 1061250661Sdavidcs bzero(cmac, (sizeof (q80_config_mac_addr_t))); 1062250661Sdavidcs 1063250661Sdavidcs cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; 1064250661Sdavidcs cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; 1065250661Sdavidcs cmac->count_version |= Q8_MBX_CMD_VERSION; 1066250661Sdavidcs 1067250661Sdavidcs if (add_mac) 1068250661Sdavidcs cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; 1069250661Sdavidcs else 1070250661Sdavidcs cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; 1071250661Sdavidcs 1072250661Sdavidcs cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; 1073250661Sdavidcs 1074250661Sdavidcs cmac->nmac_entries = 1; 1075250661Sdavidcs cmac->cntxt_id = ha->hw.rcv_cntxt_id; 1076250661Sdavidcs bcopy(mac_addr, cmac->mac_addr[0].addr, 6); 1077250661Sdavidcs 1078250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)cmac, 1079250661Sdavidcs (sizeof (q80_config_mac_addr_t) >> 2), 1080250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { 1081250661Sdavidcs device_printf(dev, "%s: %s failed0\n", __func__, 1082250661Sdavidcs (add_mac ? "Add" : "Del")); 1083250661Sdavidcs return (-1); 1084250661Sdavidcs } 1085250661Sdavidcs cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; 1086250661Sdavidcs 1087250661Sdavidcs err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); 1088250661Sdavidcs 1089250661Sdavidcs if (err) { 1090250661Sdavidcs device_printf(dev, "%s: %s " 1091250661Sdavidcs "%02x:%02x:%02x:%02x:%02x:%02x failed1 [0x%08x]\n", 1092250661Sdavidcs __func__, (add_mac ? "Add" : "Del"), 1093250661Sdavidcs mac_addr[0], mac_addr[1], mac_addr[2], 1094250661Sdavidcs mac_addr[3], mac_addr[4], mac_addr[5], err); 1095250661Sdavidcs return (-1); 1096250661Sdavidcs } 1097250661Sdavidcs 1098250661Sdavidcs return 0; 1099250661Sdavidcs} 1100250661Sdavidcs 1101250661Sdavidcs 1102250661Sdavidcs/* 1103250661Sdavidcs * Name: qla_set_mac_rcv_mode 1104250661Sdavidcs * Function: Enable/Disable AllMulticast and Promiscous Modes. 1105250661Sdavidcs */ 1106250661Sdavidcsstatic int 1107250661Sdavidcsqla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) 1108250661Sdavidcs{ 1109250661Sdavidcs q80_config_mac_rcv_mode_t *rcv_mode; 1110250661Sdavidcs uint32_t err; 1111250661Sdavidcs q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; 1112250661Sdavidcs device_t dev = ha->pci_dev; 1113250661Sdavidcs 1114250661Sdavidcs rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; 1115250661Sdavidcs bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); 1116250661Sdavidcs 1117250661Sdavidcs rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; 1118250661Sdavidcs rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; 1119250661Sdavidcs rcv_mode->count_version |= Q8_MBX_CMD_VERSION; 1120250661Sdavidcs 1121250661Sdavidcs rcv_mode->mode = mode; 1122250661Sdavidcs 1123250661Sdavidcs rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; 1124250661Sdavidcs 1125250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, 1126250661Sdavidcs (sizeof (q80_config_mac_rcv_mode_t) >> 2), 1127250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { 1128250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 1129250661Sdavidcs return (-1); 1130250661Sdavidcs } 1131250661Sdavidcs rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; 1132250661Sdavidcs 1133250661Sdavidcs err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); 1134250661Sdavidcs 1135250661Sdavidcs if (err) { 1136250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1137250661Sdavidcs return (-1); 1138250661Sdavidcs } 1139250661Sdavidcs 1140250661Sdavidcs return 0; 1141250661Sdavidcs} 1142250661Sdavidcs 1143250661Sdavidcsint 1144250661Sdavidcsql_set_promisc(qla_host_t *ha) 1145250661Sdavidcs{ 1146250661Sdavidcs int ret; 1147250661Sdavidcs 1148250661Sdavidcs ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1149250661Sdavidcs ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1150250661Sdavidcs return (ret); 1151250661Sdavidcs} 1152250661Sdavidcs 1153284982Sdavidcsvoid 1154284982Sdavidcsqla_reset_promisc(qla_host_t *ha) 1155284982Sdavidcs{ 1156284982Sdavidcs ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1157284982Sdavidcs (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1158284982Sdavidcs} 1159284982Sdavidcs 1160250661Sdavidcsint 1161250661Sdavidcsql_set_allmulti(qla_host_t *ha) 1162250661Sdavidcs{ 1163250661Sdavidcs int ret; 1164250661Sdavidcs 1165250661Sdavidcs ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; 1166250661Sdavidcs ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1167250661Sdavidcs return (ret); 1168250661Sdavidcs} 1169250661Sdavidcs 1170284982Sdavidcsvoid 1171284982Sdavidcsqla_reset_allmulti(qla_host_t *ha) 1172284982Sdavidcs{ 1173284982Sdavidcs ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; 1174284982Sdavidcs (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1175284982Sdavidcs} 1176250661Sdavidcs 1177250661Sdavidcs/* 1178250661Sdavidcs * Name: ql_set_max_mtu 1179250661Sdavidcs * Function: 1180250661Sdavidcs * Sets the maximum transfer unit size for the specified rcv context. 1181250661Sdavidcs */ 1182250661Sdavidcsint 1183250661Sdavidcsql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1184250661Sdavidcs{ 1185250661Sdavidcs device_t dev; 1186250661Sdavidcs q80_set_max_mtu_t *max_mtu; 1187250661Sdavidcs q80_set_max_mtu_rsp_t *max_mtu_rsp; 1188250661Sdavidcs uint32_t err; 1189250661Sdavidcs 1190250661Sdavidcs dev = ha->pci_dev; 1191250661Sdavidcs 1192250661Sdavidcs max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; 1193250661Sdavidcs bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); 1194250661Sdavidcs 1195250661Sdavidcs max_mtu->opcode = Q8_MBX_SET_MAX_MTU; 1196250661Sdavidcs max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); 1197250661Sdavidcs max_mtu->count_version |= Q8_MBX_CMD_VERSION; 1198250661Sdavidcs 1199250661Sdavidcs max_mtu->cntxt_id = cntxt_id; 1200250661Sdavidcs max_mtu->mtu = mtu; 1201250661Sdavidcs 1202250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, 1203250661Sdavidcs (sizeof (q80_set_max_mtu_t) >> 2), 1204250661Sdavidcs ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { 1205250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1206250661Sdavidcs return -1; 1207250661Sdavidcs } 1208250661Sdavidcs 1209250661Sdavidcs max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; 1210250661Sdavidcs 1211250661Sdavidcs err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); 1212250661Sdavidcs 1213250661Sdavidcs if (err) { 1214250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1215250661Sdavidcs } 1216250661Sdavidcs 1217250661Sdavidcs return 0; 1218250661Sdavidcs} 1219250661Sdavidcs 1220250661Sdavidcsstatic int 1221250661Sdavidcsqla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) 1222250661Sdavidcs{ 1223250661Sdavidcs device_t dev; 1224250661Sdavidcs q80_link_event_t *lnk; 1225250661Sdavidcs q80_link_event_rsp_t *lnk_rsp; 1226250661Sdavidcs uint32_t err; 1227250661Sdavidcs 1228250661Sdavidcs dev = ha->pci_dev; 1229250661Sdavidcs 1230250661Sdavidcs lnk = (q80_link_event_t *)ha->hw.mbox; 1231250661Sdavidcs bzero(lnk, (sizeof (q80_link_event_t))); 1232250661Sdavidcs 1233250661Sdavidcs lnk->opcode = Q8_MBX_LINK_EVENT_REQ; 1234250661Sdavidcs lnk->count_version = (sizeof (q80_link_event_t) >> 2); 1235250661Sdavidcs lnk->count_version |= Q8_MBX_CMD_VERSION; 1236250661Sdavidcs 1237250661Sdavidcs lnk->cntxt_id = cntxt_id; 1238250661Sdavidcs lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; 1239250661Sdavidcs 1240250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), 1241250661Sdavidcs ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { 1242250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1243250661Sdavidcs return -1; 1244250661Sdavidcs } 1245250661Sdavidcs 1246250661Sdavidcs lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; 1247250661Sdavidcs 1248250661Sdavidcs err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); 1249250661Sdavidcs 1250250661Sdavidcs if (err) { 1251250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1252250661Sdavidcs } 1253250661Sdavidcs 1254250661Sdavidcs return 0; 1255250661Sdavidcs} 1256250661Sdavidcs 1257250661Sdavidcsstatic int 1258250661Sdavidcsqla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) 1259250661Sdavidcs{ 1260250661Sdavidcs device_t dev; 1261250661Sdavidcs q80_config_fw_lro_t *fw_lro; 1262250661Sdavidcs q80_config_fw_lro_rsp_t *fw_lro_rsp; 1263250661Sdavidcs uint32_t err; 1264250661Sdavidcs 1265250661Sdavidcs dev = ha->pci_dev; 1266250661Sdavidcs 1267250661Sdavidcs fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; 1268250661Sdavidcs bzero(fw_lro, sizeof(q80_config_fw_lro_t)); 1269250661Sdavidcs 1270250661Sdavidcs fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; 1271250661Sdavidcs fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); 1272250661Sdavidcs fw_lro->count_version |= Q8_MBX_CMD_VERSION; 1273250661Sdavidcs 1274250661Sdavidcs fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; 1275284982Sdavidcs fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; 1276250661Sdavidcs 1277250661Sdavidcs fw_lro->cntxt_id = cntxt_id; 1278250661Sdavidcs 1279250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, 1280250661Sdavidcs (sizeof (q80_config_fw_lro_t) >> 2), 1281250661Sdavidcs ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { 1282250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1283250661Sdavidcs return -1; 1284250661Sdavidcs } 1285250661Sdavidcs 1286250661Sdavidcs fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; 1287250661Sdavidcs 1288250661Sdavidcs err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); 1289250661Sdavidcs 1290250661Sdavidcs if (err) { 1291250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1292250661Sdavidcs } 1293250661Sdavidcs 1294250661Sdavidcs return 0; 1295250661Sdavidcs} 1296250661Sdavidcs 1297250661Sdavidcsstatic void 1298284982Sdavidcsqla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i) 1299250661Sdavidcs{ 1300250661Sdavidcs device_t dev = ha->pci_dev; 1301250661Sdavidcs 1302284982Sdavidcs if (i < ha->hw.num_tx_rings) { 1303284982Sdavidcs device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n", 1304284982Sdavidcs __func__, i, xstat->total_bytes); 1305284982Sdavidcs device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n", 1306284982Sdavidcs __func__, i, xstat->total_pkts); 1307284982Sdavidcs device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n", 1308284982Sdavidcs __func__, i, xstat->errors); 1309284982Sdavidcs device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n", 1310284982Sdavidcs __func__, i, xstat->pkts_dropped); 1311284982Sdavidcs device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n", 1312284982Sdavidcs __func__, i, xstat->switch_pkts); 1313284982Sdavidcs device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n", 1314284982Sdavidcs __func__, i, xstat->num_buffers); 1315284982Sdavidcs } else { 1316284982Sdavidcs device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", 1317284982Sdavidcs __func__, xstat->total_bytes); 1318284982Sdavidcs device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", 1319284982Sdavidcs __func__, xstat->total_pkts); 1320284982Sdavidcs device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n", 1321284982Sdavidcs __func__, xstat->errors); 1322284982Sdavidcs device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n", 1323284982Sdavidcs __func__, xstat->pkts_dropped); 1324284982Sdavidcs device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n", 1325284982Sdavidcs __func__, xstat->switch_pkts); 1326284982Sdavidcs device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n", 1327284982Sdavidcs __func__, xstat->num_buffers); 1328284982Sdavidcs } 1329250661Sdavidcs} 1330250661Sdavidcs 1331250661Sdavidcsstatic void 1332250661Sdavidcsqla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat) 1333250661Sdavidcs{ 1334250661Sdavidcs device_t dev = ha->pci_dev; 1335250661Sdavidcs 1336250661Sdavidcs device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__, 1337250661Sdavidcs rstat->total_bytes); 1338250661Sdavidcs device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__, 1339250661Sdavidcs rstat->total_pkts); 1340250661Sdavidcs device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__, 1341250661Sdavidcs rstat->lro_pkt_count); 1342284982Sdavidcs device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__, 1343250661Sdavidcs rstat->sw_pkt_count); 1344250661Sdavidcs device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__, 1345250661Sdavidcs rstat->ip_chksum_err); 1346250661Sdavidcs device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__, 1347250661Sdavidcs rstat->pkts_wo_acntxts); 1348250661Sdavidcs device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n", 1349250661Sdavidcs __func__, rstat->pkts_dropped_no_sds_card); 1350250661Sdavidcs device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n", 1351250661Sdavidcs __func__, rstat->pkts_dropped_no_sds_host); 1352250661Sdavidcs device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__, 1353250661Sdavidcs rstat->oversized_pkts); 1354250661Sdavidcs device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n", 1355250661Sdavidcs __func__, rstat->pkts_dropped_no_rds); 1356250661Sdavidcs device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n", 1357250661Sdavidcs __func__, rstat->unxpctd_mcast_pkts); 1358250661Sdavidcs device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__, 1359250661Sdavidcs rstat->re1_fbq_error); 1360250661Sdavidcs device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__, 1361250661Sdavidcs rstat->invalid_mac_addr); 1362250661Sdavidcs device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__, 1363250661Sdavidcs rstat->rds_prime_trys); 1364250661Sdavidcs device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__, 1365250661Sdavidcs rstat->rds_prime_success); 1366250661Sdavidcs device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__, 1367250661Sdavidcs rstat->lro_flows_added); 1368250661Sdavidcs device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__, 1369250661Sdavidcs rstat->lro_flows_deleted); 1370250661Sdavidcs device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__, 1371250661Sdavidcs rstat->lro_flows_active); 1372250661Sdavidcs device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n", 1373250661Sdavidcs __func__, rstat->pkts_droped_unknown); 1374250661Sdavidcs} 1375250661Sdavidcs 1376250661Sdavidcsstatic void 1377250661Sdavidcsqla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat) 1378250661Sdavidcs{ 1379250661Sdavidcs device_t dev = ha->pci_dev; 1380250661Sdavidcs 1381250661Sdavidcs device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__, 1382250661Sdavidcs mstat->xmt_frames); 1383250661Sdavidcs device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__, 1384250661Sdavidcs mstat->xmt_bytes); 1385250661Sdavidcs device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1386250661Sdavidcs mstat->xmt_mcast_pkts); 1387250661Sdavidcs device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1388250661Sdavidcs mstat->xmt_bcast_pkts); 1389250661Sdavidcs device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__, 1390250661Sdavidcs mstat->xmt_pause_frames); 1391250661Sdavidcs device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1392250661Sdavidcs mstat->xmt_cntrl_pkts); 1393250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1394250661Sdavidcs __func__, mstat->xmt_pkt_lt_64bytes); 1395250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1396250661Sdavidcs __func__, mstat->xmt_pkt_lt_127bytes); 1397250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1398250661Sdavidcs __func__, mstat->xmt_pkt_lt_255bytes); 1399250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1400250661Sdavidcs __func__, mstat->xmt_pkt_lt_511bytes); 1401284982Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1402250661Sdavidcs __func__, mstat->xmt_pkt_lt_1023bytes); 1403284982Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1404250661Sdavidcs __func__, mstat->xmt_pkt_lt_1518bytes); 1405284982Sdavidcs device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1406250661Sdavidcs __func__, mstat->xmt_pkt_gt_1518bytes); 1407250661Sdavidcs 1408250661Sdavidcs device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__, 1409250661Sdavidcs mstat->rcv_frames); 1410250661Sdavidcs device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__, 1411250661Sdavidcs mstat->rcv_bytes); 1412250661Sdavidcs device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1413250661Sdavidcs mstat->rcv_mcast_pkts); 1414250661Sdavidcs device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1415250661Sdavidcs mstat->rcv_bcast_pkts); 1416250661Sdavidcs device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__, 1417250661Sdavidcs mstat->rcv_pause_frames); 1418250661Sdavidcs device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1419250661Sdavidcs mstat->rcv_cntrl_pkts); 1420250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1421250661Sdavidcs __func__, mstat->rcv_pkt_lt_64bytes); 1422250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1423250661Sdavidcs __func__, mstat->rcv_pkt_lt_127bytes); 1424250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1425250661Sdavidcs __func__, mstat->rcv_pkt_lt_255bytes); 1426250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1427250661Sdavidcs __func__, mstat->rcv_pkt_lt_511bytes); 1428284982Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1429250661Sdavidcs __func__, mstat->rcv_pkt_lt_1023bytes); 1430284982Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1431250661Sdavidcs __func__, mstat->rcv_pkt_lt_1518bytes); 1432284982Sdavidcs device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1433250661Sdavidcs __func__, mstat->rcv_pkt_gt_1518bytes); 1434250661Sdavidcs 1435250661Sdavidcs device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__, 1436250661Sdavidcs mstat->rcv_len_error); 1437250661Sdavidcs device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__, 1438250661Sdavidcs mstat->rcv_len_small); 1439250661Sdavidcs device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__, 1440250661Sdavidcs mstat->rcv_len_large); 1441250661Sdavidcs device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__, 1442250661Sdavidcs mstat->rcv_jabber); 1443250661Sdavidcs device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__, 1444250661Sdavidcs mstat->rcv_dropped); 1445250661Sdavidcs device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__, 1446250661Sdavidcs mstat->fcs_error); 1447250661Sdavidcs device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__, 1448250661Sdavidcs mstat->align_error); 1449250661Sdavidcs} 1450250661Sdavidcs 1451250661Sdavidcs 1452250661Sdavidcsstatic int 1453284982Sdavidcsqla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) 1454250661Sdavidcs{ 1455250661Sdavidcs device_t dev; 1456250661Sdavidcs q80_get_stats_t *stat; 1457250661Sdavidcs q80_get_stats_rsp_t *stat_rsp; 1458250661Sdavidcs uint32_t err; 1459250661Sdavidcs 1460250661Sdavidcs dev = ha->pci_dev; 1461250661Sdavidcs 1462250661Sdavidcs stat = (q80_get_stats_t *)ha->hw.mbox; 1463250661Sdavidcs bzero(stat, (sizeof (q80_get_stats_t))); 1464250661Sdavidcs 1465250661Sdavidcs stat->opcode = Q8_MBX_GET_STATS; 1466250661Sdavidcs stat->count_version = 2; 1467250661Sdavidcs stat->count_version |= Q8_MBX_CMD_VERSION; 1468250661Sdavidcs 1469250661Sdavidcs stat->cmd = cmd; 1470250661Sdavidcs 1471250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, 1472284982Sdavidcs ha->hw.mbox, (rsp_size >> 2), 0)) { 1473250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1474250661Sdavidcs return -1; 1475250661Sdavidcs } 1476250661Sdavidcs 1477250661Sdavidcs stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1478250661Sdavidcs 1479250661Sdavidcs err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); 1480250661Sdavidcs 1481250661Sdavidcs if (err) { 1482250661Sdavidcs return -1; 1483250661Sdavidcs } 1484250661Sdavidcs 1485250661Sdavidcs return 0; 1486250661Sdavidcs} 1487250661Sdavidcs 1488250661Sdavidcsvoid 1489250661Sdavidcsql_get_stats(qla_host_t *ha) 1490250661Sdavidcs{ 1491250661Sdavidcs q80_get_stats_rsp_t *stat_rsp; 1492250661Sdavidcs q80_mac_stats_t *mstat; 1493250661Sdavidcs q80_xmt_stats_t *xstat; 1494250661Sdavidcs q80_rcv_stats_t *rstat; 1495250661Sdavidcs uint32_t cmd; 1496284982Sdavidcs int i; 1497250661Sdavidcs 1498250661Sdavidcs stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1499250661Sdavidcs /* 1500250661Sdavidcs * Get MAC Statistics 1501250661Sdavidcs */ 1502250661Sdavidcs cmd = Q8_GET_STATS_CMD_TYPE_MAC; 1503284982Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1504250661Sdavidcs 1505250661Sdavidcs cmd |= ((ha->pci_func & 0x1) << 16); 1506250661Sdavidcs 1507284982Sdavidcs if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1508250661Sdavidcs mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; 1509250661Sdavidcs qla_mac_stats(ha, mstat); 1510250661Sdavidcs } else { 1511250661Sdavidcs device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", 1512250661Sdavidcs __func__, ha->hw.mbox[0]); 1513250661Sdavidcs } 1514250661Sdavidcs /* 1515250661Sdavidcs * Get RCV Statistics 1516250661Sdavidcs */ 1517250661Sdavidcs cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; 1518284982Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1519250661Sdavidcs cmd |= (ha->hw.rcv_cntxt_id << 16); 1520250661Sdavidcs 1521284982Sdavidcs if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1522250661Sdavidcs rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; 1523250661Sdavidcs qla_rcv_stats(ha, rstat); 1524250661Sdavidcs } else { 1525250661Sdavidcs device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", 1526250661Sdavidcs __func__, ha->hw.mbox[0]); 1527250661Sdavidcs } 1528250661Sdavidcs /* 1529250661Sdavidcs * Get XMT Statistics 1530250661Sdavidcs */ 1531284982Sdavidcs for (i = 0 ; i < ha->hw.num_tx_rings; i++) { 1532284982Sdavidcs cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; 1533284982Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1534284982Sdavidcs cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); 1535250661Sdavidcs 1536284982Sdavidcs if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) 1537284982Sdavidcs == 0) { 1538284982Sdavidcs xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; 1539284982Sdavidcs qla_xmt_stats(ha, xstat, i); 1540284982Sdavidcs } else { 1541284982Sdavidcs device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", 1542284982Sdavidcs __func__, ha->hw.mbox[0]); 1543284982Sdavidcs } 1544284982Sdavidcs } 1545284982Sdavidcs return; 1546284982Sdavidcs} 1547250661Sdavidcs 1548284982Sdavidcsstatic void 1549284982Sdavidcsqla_get_quick_stats(qla_host_t *ha) 1550284982Sdavidcs{ 1551284982Sdavidcs q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp; 1552284982Sdavidcs q80_mac_stats_t *mstat; 1553284982Sdavidcs q80_xmt_stats_t *xstat; 1554284982Sdavidcs q80_rcv_stats_t *rstat; 1555284982Sdavidcs uint32_t cmd; 1556284982Sdavidcs 1557284982Sdavidcs stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox; 1558284982Sdavidcs 1559284982Sdavidcs cmd = Q8_GET_STATS_CMD_TYPE_ALL; 1560284982Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1561284982Sdavidcs 1562284982Sdavidcs// cmd |= ((ha->pci_func & 0x3) << 16); 1563284982Sdavidcs cmd |= (0xFFFF << 16); 1564284982Sdavidcs 1565284982Sdavidcs if (qla_get_hw_stats(ha, cmd, 1566284982Sdavidcs sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) { 1567284982Sdavidcs 1568284982Sdavidcs mstat = (q80_mac_stats_t *)&stat_rsp->mac; 1569284982Sdavidcs rstat = (q80_rcv_stats_t *)&stat_rsp->rcv; 1570284982Sdavidcs xstat = (q80_xmt_stats_t *)&stat_rsp->xmt; 1571284982Sdavidcs qla_mac_stats(ha, mstat); 1572284982Sdavidcs qla_rcv_stats(ha, rstat); 1573284982Sdavidcs qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings); 1574250661Sdavidcs } else { 1575284982Sdavidcs device_printf(ha->pci_dev, "%s: failed [0x%08x]\n", 1576250661Sdavidcs __func__, ha->hw.mbox[0]); 1577250661Sdavidcs } 1578284982Sdavidcs return; 1579250661Sdavidcs} 1580250661Sdavidcs 1581250661Sdavidcs/* 1582250661Sdavidcs * Name: qla_tx_tso 1583250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for 1584250661Sdavidcs * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 1585250661Sdavidcs * Ring Structure are plugged in. 1586250661Sdavidcs */ 1587250661Sdavidcsstatic int 1588250661Sdavidcsqla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 1589250661Sdavidcs{ 1590250661Sdavidcs struct ether_vlan_header *eh; 1591250661Sdavidcs struct ip *ip = NULL; 1592250661Sdavidcs struct ip6_hdr *ip6 = NULL; 1593250661Sdavidcs struct tcphdr *th = NULL; 1594250661Sdavidcs uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; 1595250661Sdavidcs uint16_t etype, opcode, offload = 1; 1596250661Sdavidcs device_t dev; 1597250661Sdavidcs 1598250661Sdavidcs dev = ha->pci_dev; 1599250661Sdavidcs 1600250661Sdavidcs 1601250661Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 1602250661Sdavidcs 1603250661Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1604250661Sdavidcs ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1605250661Sdavidcs etype = ntohs(eh->evl_proto); 1606250661Sdavidcs } else { 1607250661Sdavidcs ehdrlen = ETHER_HDR_LEN; 1608250661Sdavidcs etype = ntohs(eh->evl_encap_proto); 1609250661Sdavidcs } 1610250661Sdavidcs 1611250661Sdavidcs hdrlen = 0; 1612250661Sdavidcs 1613250661Sdavidcs switch (etype) { 1614250661Sdavidcs case ETHERTYPE_IP: 1615250661Sdavidcs 1616250661Sdavidcs tcp_opt_off = ehdrlen + sizeof(struct ip) + 1617250661Sdavidcs sizeof(struct tcphdr); 1618250661Sdavidcs 1619250661Sdavidcs if (mp->m_len < tcp_opt_off) { 1620250661Sdavidcs m_copydata(mp, 0, tcp_opt_off, hdr); 1621250661Sdavidcs ip = (struct ip *)(hdr + ehdrlen); 1622250661Sdavidcs } else { 1623250661Sdavidcs ip = (struct ip *)(mp->m_data + ehdrlen); 1624250661Sdavidcs } 1625250661Sdavidcs 1626250661Sdavidcs ip_hlen = ip->ip_hl << 2; 1627250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 1628250661Sdavidcs 1629250661Sdavidcs 1630250661Sdavidcs if ((ip->ip_p != IPPROTO_TCP) || 1631250661Sdavidcs (ip_hlen != sizeof (struct ip))){ 1632250661Sdavidcs /* IP Options are not supported */ 1633250661Sdavidcs 1634250661Sdavidcs offload = 0; 1635250661Sdavidcs } else 1636250661Sdavidcs th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 1637250661Sdavidcs 1638250661Sdavidcs break; 1639250661Sdavidcs 1640250661Sdavidcs case ETHERTYPE_IPV6: 1641250661Sdavidcs 1642250661Sdavidcs tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + 1643250661Sdavidcs sizeof (struct tcphdr); 1644250661Sdavidcs 1645250661Sdavidcs if (mp->m_len < tcp_opt_off) { 1646250661Sdavidcs m_copydata(mp, 0, tcp_opt_off, hdr); 1647250661Sdavidcs ip6 = (struct ip6_hdr *)(hdr + ehdrlen); 1648250661Sdavidcs } else { 1649250661Sdavidcs ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1650250661Sdavidcs } 1651250661Sdavidcs 1652250661Sdavidcs ip_hlen = sizeof(struct ip6_hdr); 1653250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; 1654250661Sdavidcs 1655250661Sdavidcs if (ip6->ip6_nxt != IPPROTO_TCP) { 1656250661Sdavidcs //device_printf(dev, "%s: ipv6\n", __func__); 1657250661Sdavidcs offload = 0; 1658250661Sdavidcs } else 1659250661Sdavidcs th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); 1660250661Sdavidcs break; 1661250661Sdavidcs 1662250661Sdavidcs default: 1663250661Sdavidcs QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__)); 1664250661Sdavidcs offload = 0; 1665250661Sdavidcs break; 1666250661Sdavidcs } 1667250661Sdavidcs 1668250661Sdavidcs if (!offload) 1669250661Sdavidcs return (-1); 1670250661Sdavidcs 1671250661Sdavidcs tcp_hlen = th->th_off << 2; 1672250661Sdavidcs hdrlen = ehdrlen + ip_hlen + tcp_hlen; 1673250661Sdavidcs 1674250661Sdavidcs if (mp->m_len < hdrlen) { 1675250661Sdavidcs if (mp->m_len < tcp_opt_off) { 1676250661Sdavidcs if (tcp_hlen > sizeof(struct tcphdr)) { 1677250661Sdavidcs m_copydata(mp, tcp_opt_off, 1678250661Sdavidcs (tcp_hlen - sizeof(struct tcphdr)), 1679250661Sdavidcs &hdr[tcp_opt_off]); 1680250661Sdavidcs } 1681250661Sdavidcs } else { 1682250661Sdavidcs m_copydata(mp, 0, hdrlen, hdr); 1683250661Sdavidcs } 1684250661Sdavidcs } 1685250661Sdavidcs 1686250661Sdavidcs tx_cmd->mss = mp->m_pkthdr.tso_segsz; 1687250661Sdavidcs 1688250661Sdavidcs tx_cmd->flags_opcode = opcode ; 1689250661Sdavidcs tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 1690250661Sdavidcs tx_cmd->total_hdr_len = hdrlen; 1691250661Sdavidcs 1692250661Sdavidcs /* Check for Multicast least significant bit of MSB == 1 */ 1693250661Sdavidcs if (eh->evl_dhost[0] & 0x01) { 1694250661Sdavidcs tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; 1695250661Sdavidcs } 1696250661Sdavidcs 1697250661Sdavidcs if (mp->m_len < hdrlen) { 1698250661Sdavidcs printf("%d\n", hdrlen); 1699250661Sdavidcs return (1); 1700250661Sdavidcs } 1701250661Sdavidcs 1702250661Sdavidcs return (0); 1703250661Sdavidcs} 1704250661Sdavidcs 1705250661Sdavidcs/* 1706250661Sdavidcs * Name: qla_tx_chksum 1707250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for 1708250661Sdavidcs * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 1709250661Sdavidcs * Ring Structure are plugged in. 1710250661Sdavidcs */ 1711250661Sdavidcsstatic int 1712250661Sdavidcsqla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, 1713250661Sdavidcs uint32_t *tcp_hdr_off) 1714250661Sdavidcs{ 1715250661Sdavidcs struct ether_vlan_header *eh; 1716250661Sdavidcs struct ip *ip; 1717250661Sdavidcs struct ip6_hdr *ip6; 1718250661Sdavidcs uint32_t ehdrlen, ip_hlen; 1719250661Sdavidcs uint16_t etype, opcode, offload = 1; 1720250661Sdavidcs device_t dev; 1721250661Sdavidcs uint8_t buf[sizeof(struct ip6_hdr)]; 1722250661Sdavidcs 1723250661Sdavidcs dev = ha->pci_dev; 1724250661Sdavidcs 1725250661Sdavidcs *op_code = 0; 1726250661Sdavidcs 1727250661Sdavidcs if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) 1728250661Sdavidcs return (-1); 1729250661Sdavidcs 1730250661Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 1731250661Sdavidcs 1732250661Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1733250661Sdavidcs ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1734250661Sdavidcs etype = ntohs(eh->evl_proto); 1735250661Sdavidcs } else { 1736250661Sdavidcs ehdrlen = ETHER_HDR_LEN; 1737250661Sdavidcs etype = ntohs(eh->evl_encap_proto); 1738250661Sdavidcs } 1739250661Sdavidcs 1740250661Sdavidcs 1741250661Sdavidcs switch (etype) { 1742250661Sdavidcs case ETHERTYPE_IP: 1743250661Sdavidcs ip = (struct ip *)(mp->m_data + ehdrlen); 1744250661Sdavidcs 1745250661Sdavidcs ip_hlen = sizeof (struct ip); 1746250661Sdavidcs 1747250661Sdavidcs if (mp->m_len < (ehdrlen + ip_hlen)) { 1748250661Sdavidcs m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 1749250661Sdavidcs ip = (struct ip *)buf; 1750250661Sdavidcs } 1751250661Sdavidcs 1752250661Sdavidcs if (ip->ip_p == IPPROTO_TCP) 1753250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 1754250661Sdavidcs else if (ip->ip_p == IPPROTO_UDP) 1755250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 1756250661Sdavidcs else { 1757250661Sdavidcs //device_printf(dev, "%s: ipv4\n", __func__); 1758250661Sdavidcs offload = 0; 1759250661Sdavidcs } 1760250661Sdavidcs break; 1761250661Sdavidcs 1762250661Sdavidcs case ETHERTYPE_IPV6: 1763250661Sdavidcs ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1764250661Sdavidcs 1765250661Sdavidcs ip_hlen = sizeof(struct ip6_hdr); 1766250661Sdavidcs 1767250661Sdavidcs if (mp->m_len < (ehdrlen + ip_hlen)) { 1768250661Sdavidcs m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 1769250661Sdavidcs buf); 1770250661Sdavidcs ip6 = (struct ip6_hdr *)buf; 1771250661Sdavidcs } 1772250661Sdavidcs 1773250661Sdavidcs if (ip6->ip6_nxt == IPPROTO_TCP) 1774250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 1775250661Sdavidcs else if (ip6->ip6_nxt == IPPROTO_UDP) 1776250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 1777250661Sdavidcs else { 1778250661Sdavidcs //device_printf(dev, "%s: ipv6\n", __func__); 1779250661Sdavidcs offload = 0; 1780250661Sdavidcs } 1781250661Sdavidcs break; 1782250661Sdavidcs 1783250661Sdavidcs default: 1784250661Sdavidcs offload = 0; 1785250661Sdavidcs break; 1786250661Sdavidcs } 1787250661Sdavidcs if (!offload) 1788250661Sdavidcs return (-1); 1789250661Sdavidcs 1790250661Sdavidcs *op_code = opcode; 1791250661Sdavidcs *tcp_hdr_off = (ip_hlen + ehdrlen); 1792250661Sdavidcs 1793250661Sdavidcs return (0); 1794250661Sdavidcs} 1795250661Sdavidcs 1796250661Sdavidcs#define QLA_TX_MIN_FREE 2 1797250661Sdavidcs/* 1798250661Sdavidcs * Name: ql_hw_send 1799250661Sdavidcs * Function: Transmits a packet. It first checks if the packet is a 1800250661Sdavidcs * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 1801250661Sdavidcs * offload. If either of these creteria are not met, it is transmitted 1802250661Sdavidcs * as a regular ethernet frame. 1803250661Sdavidcs */ 1804250661Sdavidcsint 1805250661Sdavidcsql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 1806284982Sdavidcs uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) 1807250661Sdavidcs{ 1808250661Sdavidcs struct ether_vlan_header *eh; 1809250661Sdavidcs qla_hw_t *hw = &ha->hw; 1810250661Sdavidcs q80_tx_cmd_t *tx_cmd, tso_cmd; 1811250661Sdavidcs bus_dma_segment_t *c_seg; 1812250661Sdavidcs uint32_t num_tx_cmds, hdr_len = 0; 1813250661Sdavidcs uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; 1814250661Sdavidcs device_t dev; 1815250661Sdavidcs int i, ret; 1816250661Sdavidcs uint8_t *src = NULL, *dst = NULL; 1817250661Sdavidcs uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; 1818250661Sdavidcs uint32_t op_code = 0; 1819250661Sdavidcs uint32_t tcp_hdr_off = 0; 1820250661Sdavidcs 1821250661Sdavidcs dev = ha->pci_dev; 1822250661Sdavidcs 1823250661Sdavidcs /* 1824250661Sdavidcs * Always make sure there is atleast one empty slot in the tx_ring 1825250661Sdavidcs * tx_ring is considered full when there only one entry available 1826250661Sdavidcs */ 1827250661Sdavidcs num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 1828250661Sdavidcs 1829250661Sdavidcs total_length = mp->m_pkthdr.len; 1830250661Sdavidcs if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 1831250661Sdavidcs device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 1832250661Sdavidcs __func__, total_length); 1833250661Sdavidcs return (-1); 1834250661Sdavidcs } 1835250661Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 1836250661Sdavidcs 1837250661Sdavidcs if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 1838250661Sdavidcs 1839250661Sdavidcs bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 1840250661Sdavidcs 1841250661Sdavidcs src = frame_hdr; 1842250661Sdavidcs ret = qla_tx_tso(ha, mp, &tso_cmd, src); 1843250661Sdavidcs 1844250661Sdavidcs if (!(ret & ~1)) { 1845250661Sdavidcs /* find the additional tx_cmd descriptors required */ 1846250661Sdavidcs 1847250661Sdavidcs if (mp->m_flags & M_VLANTAG) 1848250661Sdavidcs tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; 1849250661Sdavidcs 1850250661Sdavidcs hdr_len = tso_cmd.total_hdr_len; 1851250661Sdavidcs 1852250661Sdavidcs bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 1853250661Sdavidcs bytes = QL_MIN(bytes, hdr_len); 1854250661Sdavidcs 1855250661Sdavidcs num_tx_cmds++; 1856250661Sdavidcs hdr_len -= bytes; 1857250661Sdavidcs 1858250661Sdavidcs while (hdr_len) { 1859250661Sdavidcs bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 1860250661Sdavidcs hdr_len -= bytes; 1861250661Sdavidcs num_tx_cmds++; 1862250661Sdavidcs } 1863250661Sdavidcs hdr_len = tso_cmd.total_hdr_len; 1864250661Sdavidcs 1865250661Sdavidcs if (ret == 0) 1866250661Sdavidcs src = (uint8_t *)eh; 1867250661Sdavidcs } else 1868250661Sdavidcs return (EINVAL); 1869250661Sdavidcs } else { 1870250661Sdavidcs (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); 1871250661Sdavidcs } 1872250661Sdavidcs 1873284982Sdavidcs if (iscsi_pdu) 1874284982Sdavidcs ha->hw.iscsi_pkt_count++; 1875284982Sdavidcs 1876250661Sdavidcs if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 1877250661Sdavidcs qla_hw_tx_done_locked(ha, txr_idx); 1878250661Sdavidcs if (hw->tx_cntxt[txr_idx].txr_free <= 1879250661Sdavidcs (num_tx_cmds + QLA_TX_MIN_FREE)) { 1880250661Sdavidcs QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " 1881250661Sdavidcs "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 1882250661Sdavidcs __func__)); 1883250661Sdavidcs return (-1); 1884250661Sdavidcs } 1885250661Sdavidcs } 1886250661Sdavidcs 1887250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; 1888250661Sdavidcs 1889250661Sdavidcs if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { 1890250661Sdavidcs 1891250661Sdavidcs if (nsegs > ha->hw.max_tx_segs) 1892250661Sdavidcs ha->hw.max_tx_segs = nsegs; 1893250661Sdavidcs 1894250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1895250661Sdavidcs 1896250661Sdavidcs if (op_code) { 1897250661Sdavidcs tx_cmd->flags_opcode = op_code; 1898250661Sdavidcs tx_cmd->tcp_hdr_off = tcp_hdr_off; 1899250661Sdavidcs 1900250661Sdavidcs } else { 1901250661Sdavidcs tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 1902250661Sdavidcs } 1903250661Sdavidcs } else { 1904250661Sdavidcs bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 1905250661Sdavidcs ha->tx_tso_frames++; 1906250661Sdavidcs } 1907250661Sdavidcs 1908250661Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1909250661Sdavidcs tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 1910284982Sdavidcs 1911284982Sdavidcs if (iscsi_pdu) 1912284982Sdavidcs eh->evl_tag |= ha->hw.user_pri_iscsi << 13; 1913284982Sdavidcs 1914250661Sdavidcs } else if (mp->m_flags & M_VLANTAG) { 1915250661Sdavidcs 1916250661Sdavidcs if (hdr_len) { /* TSO */ 1917250661Sdavidcs tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 1918250661Sdavidcs Q8_TX_CMD_FLAGS_HW_VLAN_ID); 1919250661Sdavidcs tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; 1920250661Sdavidcs } else 1921250661Sdavidcs tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; 1922250661Sdavidcs 1923250661Sdavidcs ha->hw_vlan_tx_frames++; 1924250661Sdavidcs tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 1925284982Sdavidcs 1926284982Sdavidcs if (iscsi_pdu) { 1927284982Sdavidcs tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; 1928284982Sdavidcs mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; 1929284982Sdavidcs } 1930250661Sdavidcs } 1931250661Sdavidcs 1932250661Sdavidcs 1933250661Sdavidcs tx_cmd->n_bufs = (uint8_t)nsegs; 1934250661Sdavidcs tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 1935250661Sdavidcs tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 1936250661Sdavidcs tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 1937250661Sdavidcs 1938250661Sdavidcs c_seg = segs; 1939250661Sdavidcs 1940250661Sdavidcs while (1) { 1941250661Sdavidcs for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 1942250661Sdavidcs 1943250661Sdavidcs switch (i) { 1944250661Sdavidcs case 0: 1945250661Sdavidcs tx_cmd->buf1_addr = c_seg->ds_addr; 1946250661Sdavidcs tx_cmd->buf1_len = c_seg->ds_len; 1947250661Sdavidcs break; 1948250661Sdavidcs 1949250661Sdavidcs case 1: 1950250661Sdavidcs tx_cmd->buf2_addr = c_seg->ds_addr; 1951250661Sdavidcs tx_cmd->buf2_len = c_seg->ds_len; 1952250661Sdavidcs break; 1953250661Sdavidcs 1954250661Sdavidcs case 2: 1955250661Sdavidcs tx_cmd->buf3_addr = c_seg->ds_addr; 1956250661Sdavidcs tx_cmd->buf3_len = c_seg->ds_len; 1957250661Sdavidcs break; 1958250661Sdavidcs 1959250661Sdavidcs case 3: 1960250661Sdavidcs tx_cmd->buf4_addr = c_seg->ds_addr; 1961250661Sdavidcs tx_cmd->buf4_len = c_seg->ds_len; 1962250661Sdavidcs break; 1963250661Sdavidcs } 1964250661Sdavidcs 1965250661Sdavidcs c_seg++; 1966250661Sdavidcs nsegs--; 1967250661Sdavidcs } 1968250661Sdavidcs 1969250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next = 1970250661Sdavidcs (hw->tx_cntxt[txr_idx].txr_next + 1) & 1971250661Sdavidcs (NUM_TX_DESCRIPTORS - 1); 1972250661Sdavidcs tx_cmd_count++; 1973250661Sdavidcs 1974250661Sdavidcs if (!nsegs) 1975250661Sdavidcs break; 1976250661Sdavidcs 1977250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 1978250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1979250661Sdavidcs } 1980250661Sdavidcs 1981250661Sdavidcs if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 1982250661Sdavidcs 1983250661Sdavidcs /* TSO : Copy the header in the following tx cmd descriptors */ 1984250661Sdavidcs 1985250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next; 1986250661Sdavidcs 1987250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 1988250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 1989250661Sdavidcs 1990250661Sdavidcs bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 1991250661Sdavidcs bytes = QL_MIN(bytes, hdr_len); 1992250661Sdavidcs 1993250661Sdavidcs dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 1994250661Sdavidcs 1995250661Sdavidcs if (mp->m_flags & M_VLANTAG) { 1996250661Sdavidcs /* first copy the src/dst MAC addresses */ 1997250661Sdavidcs bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 1998250661Sdavidcs dst += (ETHER_ADDR_LEN * 2); 1999250661Sdavidcs src += (ETHER_ADDR_LEN * 2); 2000250661Sdavidcs 2001250661Sdavidcs *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 2002250661Sdavidcs dst += 2; 2003250661Sdavidcs *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); 2004250661Sdavidcs dst += 2; 2005250661Sdavidcs 2006250661Sdavidcs /* bytes left in src header */ 2007250661Sdavidcs hdr_len -= ((ETHER_ADDR_LEN * 2) + 2008250661Sdavidcs ETHER_VLAN_ENCAP_LEN); 2009250661Sdavidcs 2010250661Sdavidcs /* bytes left in TxCmd Entry */ 2011250661Sdavidcs bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); 2012250661Sdavidcs 2013250661Sdavidcs 2014250661Sdavidcs bcopy(src, dst, bytes); 2015250661Sdavidcs src += bytes; 2016250661Sdavidcs hdr_len -= bytes; 2017250661Sdavidcs } else { 2018250661Sdavidcs bcopy(src, dst, bytes); 2019250661Sdavidcs src += bytes; 2020250661Sdavidcs hdr_len -= bytes; 2021250661Sdavidcs } 2022250661Sdavidcs 2023250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next = 2024250661Sdavidcs (hw->tx_cntxt[txr_idx].txr_next + 1) & 2025250661Sdavidcs (NUM_TX_DESCRIPTORS - 1); 2026250661Sdavidcs tx_cmd_count++; 2027250661Sdavidcs 2028250661Sdavidcs while (hdr_len) { 2029250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2030250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2031250661Sdavidcs 2032250661Sdavidcs bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2033250661Sdavidcs 2034250661Sdavidcs bcopy(src, tx_cmd, bytes); 2035250661Sdavidcs src += bytes; 2036250661Sdavidcs hdr_len -= bytes; 2037250661Sdavidcs 2038250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next = 2039250661Sdavidcs (hw->tx_cntxt[txr_idx].txr_next + 1) & 2040250661Sdavidcs (NUM_TX_DESCRIPTORS - 1); 2041250661Sdavidcs tx_cmd_count++; 2042250661Sdavidcs } 2043250661Sdavidcs } 2044250661Sdavidcs 2045250661Sdavidcs hw->tx_cntxt[txr_idx].txr_free = 2046250661Sdavidcs hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; 2047250661Sdavidcs 2048250661Sdavidcs QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ 2049250661Sdavidcs txr_idx); 2050250661Sdavidcs QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); 2051250661Sdavidcs 2052250661Sdavidcs return (0); 2053250661Sdavidcs} 2054250661Sdavidcs 2055250661Sdavidcs 2056284982Sdavidcs 2057284982Sdavidcs#define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ 2058250661Sdavidcsstatic int 2059250661Sdavidcsqla_config_rss_ind_table(qla_host_t *ha) 2060250661Sdavidcs{ 2061250661Sdavidcs uint32_t i, count; 2062284982Sdavidcs uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; 2063250661Sdavidcs 2064250661Sdavidcs 2065284982Sdavidcs for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { 2066250661Sdavidcs rss_ind_tbl[i] = i % ha->hw.num_sds_rings; 2067250661Sdavidcs } 2068250661Sdavidcs 2069284982Sdavidcs for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; 2070284982Sdavidcs i = i + Q8_CONFIG_IND_TBL_SIZE) { 2071250661Sdavidcs 2072284982Sdavidcs if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { 2073250661Sdavidcs count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; 2074250661Sdavidcs } else { 2075284982Sdavidcs count = Q8_CONFIG_IND_TBL_SIZE; 2076250661Sdavidcs } 2077250661Sdavidcs 2078250661Sdavidcs if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, 2079250661Sdavidcs rss_ind_tbl)) 2080250661Sdavidcs return (-1); 2081250661Sdavidcs } 2082250661Sdavidcs 2083250661Sdavidcs return (0); 2084250661Sdavidcs} 2085250661Sdavidcs 2086250661Sdavidcs/* 2087250661Sdavidcs * Name: ql_del_hw_if 2088250661Sdavidcs * Function: Destroys the hardware specific entities corresponding to an 2089250661Sdavidcs * Ethernet Interface 2090250661Sdavidcs */ 2091250661Sdavidcsvoid 2092250661Sdavidcsql_del_hw_if(qla_host_t *ha) 2093250661Sdavidcs{ 2094284982Sdavidcs uint32_t i; 2095284982Sdavidcs uint32_t num_msix; 2096250661Sdavidcs 2097284982Sdavidcs (void)qla_stop_nic_func(ha); 2098284982Sdavidcs 2099250661Sdavidcs qla_del_rcv_cntxt(ha); 2100250661Sdavidcs qla_del_xmt_cntxt(ha); 2101250661Sdavidcs 2102250661Sdavidcs if (ha->hw.flags.init_intr_cnxt) { 2103284982Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; ) { 2104284982Sdavidcs 2105284982Sdavidcs if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2106284982Sdavidcs num_msix = Q8_MAX_INTR_VECTORS; 2107284982Sdavidcs else 2108284982Sdavidcs num_msix = ha->hw.num_sds_rings - i; 2109284982Sdavidcs qla_config_intr_cntxt(ha, i, num_msix, 0); 2110284982Sdavidcs 2111284982Sdavidcs i += num_msix; 2112284982Sdavidcs } 2113284982Sdavidcs 2114250661Sdavidcs ha->hw.flags.init_intr_cnxt = 0; 2115250661Sdavidcs } 2116284982Sdavidcs return; 2117250661Sdavidcs} 2118250661Sdavidcs 2119284982Sdavidcsvoid 2120284982Sdavidcsqla_confirm_9kb_enable(qla_host_t *ha) 2121284982Sdavidcs{ 2122284982Sdavidcs uint32_t supports_9kb = 0; 2123284982Sdavidcs 2124284982Sdavidcs ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); 2125284982Sdavidcs 2126284982Sdavidcs /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ 2127284982Sdavidcs WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); 2128284982Sdavidcs WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 2129284982Sdavidcs 2130284982Sdavidcs qla_get_nic_partition(ha, &supports_9kb, NULL); 2131284982Sdavidcs 2132284982Sdavidcs if (!supports_9kb) 2133284982Sdavidcs ha->hw.enable_9kb = 0; 2134284982Sdavidcs 2135284982Sdavidcs return; 2136284982Sdavidcs} 2137284982Sdavidcs 2138284982Sdavidcs 2139250661Sdavidcs/* 2140250661Sdavidcs * Name: ql_init_hw_if 2141250661Sdavidcs * Function: Creates the hardware specific entities corresponding to an 2142250661Sdavidcs * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 2143250661Sdavidcs * corresponding to the interface. Enables LRO if allowed. 2144250661Sdavidcs */ 2145250661Sdavidcsint 2146250661Sdavidcsql_init_hw_if(qla_host_t *ha) 2147250661Sdavidcs{ 2148250661Sdavidcs device_t dev; 2149250661Sdavidcs uint32_t i; 2150250661Sdavidcs uint8_t bcast_mac[6]; 2151250661Sdavidcs qla_rdesc_t *rdesc; 2152284982Sdavidcs uint32_t num_msix; 2153250661Sdavidcs 2154250661Sdavidcs dev = ha->pci_dev; 2155250661Sdavidcs 2156250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 2157250661Sdavidcs bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 2158250661Sdavidcs ha->hw.dma_buf.sds_ring[i].size); 2159250661Sdavidcs } 2160250661Sdavidcs 2161284982Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; ) { 2162250661Sdavidcs 2163284982Sdavidcs if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2164284982Sdavidcs num_msix = Q8_MAX_INTR_VECTORS; 2165284982Sdavidcs else 2166284982Sdavidcs num_msix = ha->hw.num_sds_rings - i; 2167250661Sdavidcs 2168284982Sdavidcs if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { 2169250661Sdavidcs 2170284982Sdavidcs if (i > 0) { 2171284982Sdavidcs 2172284982Sdavidcs num_msix = i; 2173284982Sdavidcs 2174284982Sdavidcs for (i = 0; i < num_msix; ) { 2175284982Sdavidcs qla_config_intr_cntxt(ha, i, 2176284982Sdavidcs Q8_MAX_INTR_VECTORS, 0); 2177284982Sdavidcs i += Q8_MAX_INTR_VECTORS; 2178284982Sdavidcs } 2179284982Sdavidcs } 2180284982Sdavidcs return (-1); 2181284982Sdavidcs } 2182284982Sdavidcs 2183284982Sdavidcs i = i + num_msix; 2184284982Sdavidcs } 2185284982Sdavidcs 2186284982Sdavidcs ha->hw.flags.init_intr_cnxt = 1; 2187284982Sdavidcs 2188250661Sdavidcs if (ha->hw.mdump_init == 0) { 2189250661Sdavidcs qla_minidump_init(ha); 2190250661Sdavidcs } 2191250661Sdavidcs 2192250661Sdavidcs /* 2193250661Sdavidcs * Create Receive Context 2194250661Sdavidcs */ 2195250661Sdavidcs if (qla_init_rcv_cntxt(ha)) { 2196250661Sdavidcs return (-1); 2197250661Sdavidcs } 2198250661Sdavidcs 2199250661Sdavidcs for (i = 0; i < ha->hw.num_rds_rings; i++) { 2200250661Sdavidcs rdesc = &ha->hw.rds[i]; 2201250661Sdavidcs rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; 2202250661Sdavidcs rdesc->rx_in = 0; 2203250661Sdavidcs /* Update the RDS Producer Indices */ 2204250661Sdavidcs QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ 2205250661Sdavidcs rdesc->rx_next); 2206250661Sdavidcs } 2207250661Sdavidcs 2208250661Sdavidcs 2209250661Sdavidcs /* 2210250661Sdavidcs * Create Transmit Context 2211250661Sdavidcs */ 2212250661Sdavidcs if (qla_init_xmt_cntxt(ha)) { 2213250661Sdavidcs qla_del_rcv_cntxt(ha); 2214250661Sdavidcs return (-1); 2215250661Sdavidcs } 2216250661Sdavidcs ha->hw.max_tx_segs = 0; 2217250661Sdavidcs 2218250661Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1)) 2219250661Sdavidcs return(-1); 2220250661Sdavidcs 2221250661Sdavidcs ha->hw.flags.unicast_mac = 1; 2222250661Sdavidcs 2223250661Sdavidcs bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2224250661Sdavidcs bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2225250661Sdavidcs 2226250661Sdavidcs if (qla_config_mac_addr(ha, bcast_mac, 1)) 2227250661Sdavidcs return (-1); 2228250661Sdavidcs 2229250661Sdavidcs ha->hw.flags.bcast_mac = 1; 2230250661Sdavidcs 2231250661Sdavidcs /* 2232250661Sdavidcs * program any cached multicast addresses 2233250661Sdavidcs */ 2234250661Sdavidcs if (qla_hw_add_all_mcast(ha)) 2235250661Sdavidcs return (-1); 2236250661Sdavidcs 2237250661Sdavidcs if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) 2238250661Sdavidcs return (-1); 2239250661Sdavidcs 2240250661Sdavidcs if (qla_config_rss_ind_table(ha)) 2241250661Sdavidcs return (-1); 2242250661Sdavidcs 2243284982Sdavidcs if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) 2244250661Sdavidcs return (-1); 2245250661Sdavidcs 2246250661Sdavidcs if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) 2247250661Sdavidcs return (-1); 2248250661Sdavidcs 2249250661Sdavidcs if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) 2250250661Sdavidcs return (-1); 2251250661Sdavidcs 2252284982Sdavidcs if (qla_init_nic_func(ha)) 2253284982Sdavidcs return (-1); 2254284982Sdavidcs 2255284982Sdavidcs if (qla_query_fw_dcbx_caps(ha)) 2256284982Sdavidcs return (-1); 2257284982Sdavidcs 2258250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) 2259250661Sdavidcs QL_ENABLE_INTERRUPTS(ha, i); 2260250661Sdavidcs 2261250661Sdavidcs return (0); 2262250661Sdavidcs} 2263250661Sdavidcs 2264250661Sdavidcsstatic int 2265284982Sdavidcsqla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) 2266250661Sdavidcs{ 2267250661Sdavidcs device_t dev = ha->pci_dev; 2268250661Sdavidcs q80_rq_map_sds_to_rds_t *map_rings; 2269284982Sdavidcs q80_rsp_map_sds_to_rds_t *map_rings_rsp; 2270250661Sdavidcs uint32_t i, err; 2271250661Sdavidcs qla_hw_t *hw = &ha->hw; 2272250661Sdavidcs 2273250661Sdavidcs map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; 2274250661Sdavidcs bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); 2275250661Sdavidcs 2276250661Sdavidcs map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; 2277250661Sdavidcs map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); 2278250661Sdavidcs map_rings->count_version |= Q8_MBX_CMD_VERSION; 2279250661Sdavidcs 2280250661Sdavidcs map_rings->cntxt_id = hw->rcv_cntxt_id; 2281284982Sdavidcs map_rings->num_rings = num_idx; 2282250661Sdavidcs 2283284982Sdavidcs for (i = 0; i < num_idx; i++) { 2284284982Sdavidcs map_rings->sds_rds[i].sds_ring = i + start_idx; 2285284982Sdavidcs map_rings->sds_rds[i].rds_ring = i + start_idx; 2286284982Sdavidcs } 2287250661Sdavidcs 2288250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)map_rings, 2289250661Sdavidcs (sizeof (q80_rq_map_sds_to_rds_t) >> 2), 2290250661Sdavidcs ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2291250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2292250661Sdavidcs return (-1); 2293250661Sdavidcs } 2294250661Sdavidcs 2295284982Sdavidcs map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; 2296250661Sdavidcs 2297250661Sdavidcs err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); 2298250661Sdavidcs 2299250661Sdavidcs if (err) { 2300250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2301250661Sdavidcs return (-1); 2302250661Sdavidcs } 2303250661Sdavidcs 2304250661Sdavidcs return (0); 2305250661Sdavidcs} 2306250661Sdavidcs 2307250661Sdavidcs/* 2308250661Sdavidcs * Name: qla_init_rcv_cntxt 2309250661Sdavidcs * Function: Creates the Receive Context. 2310250661Sdavidcs */ 2311250661Sdavidcsstatic int 2312250661Sdavidcsqla_init_rcv_cntxt(qla_host_t *ha) 2313250661Sdavidcs{ 2314250661Sdavidcs q80_rq_rcv_cntxt_t *rcntxt; 2315250661Sdavidcs q80_rsp_rcv_cntxt_t *rcntxt_rsp; 2316250661Sdavidcs q80_stat_desc_t *sdesc; 2317250661Sdavidcs int i, j; 2318250661Sdavidcs qla_hw_t *hw = &ha->hw; 2319250661Sdavidcs device_t dev; 2320250661Sdavidcs uint32_t err; 2321250661Sdavidcs uint32_t rcntxt_sds_rings; 2322250661Sdavidcs uint32_t rcntxt_rds_rings; 2323284982Sdavidcs uint32_t max_idx; 2324250661Sdavidcs 2325250661Sdavidcs dev = ha->pci_dev; 2326250661Sdavidcs 2327250661Sdavidcs /* 2328250661Sdavidcs * Create Receive Context 2329250661Sdavidcs */ 2330250661Sdavidcs 2331250661Sdavidcs for (i = 0; i < hw->num_sds_rings; i++) { 2332250661Sdavidcs sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 2333250661Sdavidcs 2334250661Sdavidcs for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 2335250661Sdavidcs sdesc->data[0] = 1ULL; 2336250661Sdavidcs sdesc->data[1] = 1ULL; 2337250661Sdavidcs } 2338250661Sdavidcs } 2339250661Sdavidcs 2340250661Sdavidcs rcntxt_sds_rings = hw->num_sds_rings; 2341250661Sdavidcs if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) 2342250661Sdavidcs rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; 2343250661Sdavidcs 2344250661Sdavidcs rcntxt_rds_rings = hw->num_rds_rings; 2345250661Sdavidcs 2346250661Sdavidcs if (hw->num_rds_rings > MAX_RDS_RING_SETS) 2347250661Sdavidcs rcntxt_rds_rings = MAX_RDS_RING_SETS; 2348250661Sdavidcs 2349250661Sdavidcs rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; 2350250661Sdavidcs bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); 2351250661Sdavidcs 2352250661Sdavidcs rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; 2353250661Sdavidcs rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); 2354250661Sdavidcs rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2355250661Sdavidcs 2356250661Sdavidcs rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | 2357250661Sdavidcs Q8_RCV_CNTXT_CAP0_LRO | 2358250661Sdavidcs Q8_RCV_CNTXT_CAP0_HW_LRO | 2359250661Sdavidcs Q8_RCV_CNTXT_CAP0_RSS | 2360250661Sdavidcs Q8_RCV_CNTXT_CAP0_SGL_LRO; 2361250661Sdavidcs 2362284982Sdavidcs if (ha->hw.enable_9kb) 2363284982Sdavidcs rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; 2364284982Sdavidcs else 2365284982Sdavidcs rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; 2366284982Sdavidcs 2367250661Sdavidcs if (ha->hw.num_rds_rings > 1) { 2368250661Sdavidcs rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); 2369250661Sdavidcs rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; 2370250661Sdavidcs } else 2371250661Sdavidcs rcntxt->nrds_sets_rings = 0x1 | (1 << 5); 2372250661Sdavidcs 2373250661Sdavidcs rcntxt->nsds_rings = rcntxt_sds_rings; 2374250661Sdavidcs 2375250661Sdavidcs rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; 2376250661Sdavidcs 2377250661Sdavidcs rcntxt->rcv_vpid = 0; 2378250661Sdavidcs 2379250661Sdavidcs for (i = 0; i < rcntxt_sds_rings; i++) { 2380250661Sdavidcs rcntxt->sds[i].paddr = 2381250661Sdavidcs qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 2382250661Sdavidcs rcntxt->sds[i].size = 2383250661Sdavidcs qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2384250661Sdavidcs if (ha->msix_count == 2) { 2385250661Sdavidcs rcntxt->sds[i].intr_id = 2386250661Sdavidcs qla_host_to_le16(hw->intr_id[0]); 2387250661Sdavidcs rcntxt->sds[i].intr_src_bit = qla_host_to_le16((i)); 2388250661Sdavidcs } else { 2389250661Sdavidcs rcntxt->sds[i].intr_id = 2390250661Sdavidcs qla_host_to_le16(hw->intr_id[i]); 2391250661Sdavidcs rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); 2392250661Sdavidcs } 2393250661Sdavidcs } 2394250661Sdavidcs 2395250661Sdavidcs for (i = 0; i < rcntxt_rds_rings; i++) { 2396250661Sdavidcs rcntxt->rds[i].paddr_std = 2397250661Sdavidcs qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 2398284982Sdavidcs 2399284982Sdavidcs if (ha->hw.enable_9kb) 2400284982Sdavidcs rcntxt->rds[i].std_bsize = 2401284982Sdavidcs qla_host_to_le64(MJUM9BYTES); 2402284982Sdavidcs else 2403284982Sdavidcs rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2404284982Sdavidcs 2405250661Sdavidcs rcntxt->rds[i].std_nentries = 2406250661Sdavidcs qla_host_to_le32(NUM_RX_DESCRIPTORS); 2407250661Sdavidcs } 2408250661Sdavidcs 2409250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2410250661Sdavidcs (sizeof (q80_rq_rcv_cntxt_t) >> 2), 2411250661Sdavidcs ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { 2412250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2413250661Sdavidcs return (-1); 2414250661Sdavidcs } 2415250661Sdavidcs 2416250661Sdavidcs rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; 2417250661Sdavidcs 2418250661Sdavidcs err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2419250661Sdavidcs 2420250661Sdavidcs if (err) { 2421250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2422250661Sdavidcs return (-1); 2423250661Sdavidcs } 2424250661Sdavidcs 2425250661Sdavidcs for (i = 0; i < rcntxt_sds_rings; i++) { 2426250661Sdavidcs hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; 2427250661Sdavidcs } 2428250661Sdavidcs 2429250661Sdavidcs for (i = 0; i < rcntxt_rds_rings; i++) { 2430250661Sdavidcs hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; 2431250661Sdavidcs } 2432250661Sdavidcs 2433250661Sdavidcs hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; 2434250661Sdavidcs 2435250661Sdavidcs ha->hw.flags.init_rx_cnxt = 1; 2436250661Sdavidcs 2437250661Sdavidcs if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { 2438284982Sdavidcs 2439284982Sdavidcs for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { 2440284982Sdavidcs 2441284982Sdavidcs if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) 2442284982Sdavidcs max_idx = MAX_RCNTXT_SDS_RINGS; 2443284982Sdavidcs else 2444284982Sdavidcs max_idx = hw->num_sds_rings - i; 2445284982Sdavidcs 2446284982Sdavidcs err = qla_add_rcv_rings(ha, i, max_idx); 2447284982Sdavidcs if (err) 2448284982Sdavidcs return -1; 2449284982Sdavidcs 2450284982Sdavidcs i += max_idx; 2451284982Sdavidcs } 2452250661Sdavidcs } 2453250661Sdavidcs 2454284982Sdavidcs if (hw->num_rds_rings > 1) { 2455284982Sdavidcs 2456284982Sdavidcs for (i = 0; i < hw->num_rds_rings; ) { 2457284982Sdavidcs 2458284982Sdavidcs if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) 2459284982Sdavidcs max_idx = MAX_SDS_TO_RDS_MAP; 2460284982Sdavidcs else 2461284982Sdavidcs max_idx = hw->num_rds_rings - i; 2462284982Sdavidcs 2463284982Sdavidcs err = qla_map_sds_to_rds(ha, i, max_idx); 2464284982Sdavidcs if (err) 2465284982Sdavidcs return -1; 2466284982Sdavidcs 2467284982Sdavidcs i += max_idx; 2468284982Sdavidcs } 2469250661Sdavidcs } 2470250661Sdavidcs 2471250661Sdavidcs return (0); 2472250661Sdavidcs} 2473250661Sdavidcs 2474250661Sdavidcsstatic int 2475284982Sdavidcsqla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) 2476250661Sdavidcs{ 2477250661Sdavidcs device_t dev = ha->pci_dev; 2478250661Sdavidcs q80_rq_add_rcv_rings_t *add_rcv; 2479250661Sdavidcs q80_rsp_add_rcv_rings_t *add_rcv_rsp; 2480250661Sdavidcs uint32_t i,j, err; 2481250661Sdavidcs qla_hw_t *hw = &ha->hw; 2482250661Sdavidcs 2483250661Sdavidcs add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; 2484250661Sdavidcs bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); 2485250661Sdavidcs 2486250661Sdavidcs add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; 2487250661Sdavidcs add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); 2488250661Sdavidcs add_rcv->count_version |= Q8_MBX_CMD_VERSION; 2489250661Sdavidcs 2490284982Sdavidcs add_rcv->nrds_sets_rings = nsds | (1 << 5); 2491250661Sdavidcs add_rcv->nsds_rings = nsds; 2492250661Sdavidcs add_rcv->cntxt_id = hw->rcv_cntxt_id; 2493250661Sdavidcs 2494250661Sdavidcs for (i = 0; i < nsds; i++) { 2495250661Sdavidcs 2496250661Sdavidcs j = i + sds_idx; 2497250661Sdavidcs 2498250661Sdavidcs add_rcv->sds[i].paddr = 2499250661Sdavidcs qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); 2500250661Sdavidcs 2501250661Sdavidcs add_rcv->sds[i].size = 2502250661Sdavidcs qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2503250661Sdavidcs 2504250661Sdavidcs if (ha->msix_count == 2) { 2505250661Sdavidcs add_rcv->sds[i].intr_id = 2506250661Sdavidcs qla_host_to_le16(hw->intr_id[0]); 2507250661Sdavidcs add_rcv->sds[i].intr_src_bit = qla_host_to_le16(j); 2508250661Sdavidcs } else { 2509250661Sdavidcs add_rcv->sds[i].intr_id = 2510250661Sdavidcs qla_host_to_le16(hw->intr_id[j]); 2511250661Sdavidcs add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); 2512250661Sdavidcs } 2513250661Sdavidcs 2514250661Sdavidcs } 2515284982Sdavidcs for (i = 0; (i < nsds); i++) { 2516250661Sdavidcs j = i + sds_idx; 2517284982Sdavidcs 2518250661Sdavidcs add_rcv->rds[i].paddr_std = 2519250661Sdavidcs qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); 2520284982Sdavidcs 2521284982Sdavidcs if (ha->hw.enable_9kb) 2522284982Sdavidcs add_rcv->rds[i].std_bsize = 2523284982Sdavidcs qla_host_to_le64(MJUM9BYTES); 2524284982Sdavidcs else 2525284982Sdavidcs add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2526284982Sdavidcs 2527250661Sdavidcs add_rcv->rds[i].std_nentries = 2528250661Sdavidcs qla_host_to_le32(NUM_RX_DESCRIPTORS); 2529250661Sdavidcs } 2530250661Sdavidcs 2531250661Sdavidcs 2532250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, 2533250661Sdavidcs (sizeof (q80_rq_add_rcv_rings_t) >> 2), 2534250661Sdavidcs ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2535250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2536250661Sdavidcs return (-1); 2537250661Sdavidcs } 2538250661Sdavidcs 2539250661Sdavidcs add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; 2540250661Sdavidcs 2541250661Sdavidcs err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); 2542250661Sdavidcs 2543250661Sdavidcs if (err) { 2544250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2545250661Sdavidcs return (-1); 2546250661Sdavidcs } 2547250661Sdavidcs 2548284982Sdavidcs for (i = 0; i < nsds; i++) { 2549284982Sdavidcs hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; 2550250661Sdavidcs } 2551284982Sdavidcs 2552284982Sdavidcs for (i = 0; i < nsds; i++) { 2553284982Sdavidcs hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; 2554250661Sdavidcs } 2555284982Sdavidcs 2556250661Sdavidcs return (0); 2557250661Sdavidcs} 2558250661Sdavidcs 2559250661Sdavidcs/* 2560250661Sdavidcs * Name: qla_del_rcv_cntxt 2561250661Sdavidcs * Function: Destroys the Receive Context. 2562250661Sdavidcs */ 2563250661Sdavidcsstatic void 2564250661Sdavidcsqla_del_rcv_cntxt(qla_host_t *ha) 2565250661Sdavidcs{ 2566250661Sdavidcs device_t dev = ha->pci_dev; 2567250661Sdavidcs q80_rcv_cntxt_destroy_t *rcntxt; 2568250661Sdavidcs q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; 2569250661Sdavidcs uint32_t err; 2570250661Sdavidcs uint8_t bcast_mac[6]; 2571250661Sdavidcs 2572250661Sdavidcs if (!ha->hw.flags.init_rx_cnxt) 2573250661Sdavidcs return; 2574250661Sdavidcs 2575250661Sdavidcs if (qla_hw_del_all_mcast(ha)) 2576250661Sdavidcs return; 2577250661Sdavidcs 2578250661Sdavidcs if (ha->hw.flags.bcast_mac) { 2579250661Sdavidcs 2580250661Sdavidcs bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2581250661Sdavidcs bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2582250661Sdavidcs 2583250661Sdavidcs if (qla_config_mac_addr(ha, bcast_mac, 0)) 2584250661Sdavidcs return; 2585250661Sdavidcs ha->hw.flags.bcast_mac = 0; 2586250661Sdavidcs 2587250661Sdavidcs } 2588250661Sdavidcs 2589250661Sdavidcs if (ha->hw.flags.unicast_mac) { 2590250661Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0)) 2591250661Sdavidcs return; 2592250661Sdavidcs ha->hw.flags.unicast_mac = 0; 2593250661Sdavidcs } 2594250661Sdavidcs 2595250661Sdavidcs rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; 2596250661Sdavidcs bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); 2597250661Sdavidcs 2598250661Sdavidcs rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; 2599250661Sdavidcs rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); 2600250661Sdavidcs rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2601250661Sdavidcs 2602250661Sdavidcs rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; 2603250661Sdavidcs 2604250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2605250661Sdavidcs (sizeof (q80_rcv_cntxt_destroy_t) >> 2), 2606250661Sdavidcs ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { 2607250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2608250661Sdavidcs return; 2609250661Sdavidcs } 2610250661Sdavidcs rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; 2611250661Sdavidcs 2612250661Sdavidcs err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2613250661Sdavidcs 2614250661Sdavidcs if (err) { 2615250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2616250661Sdavidcs } 2617250661Sdavidcs 2618250661Sdavidcs ha->hw.flags.init_rx_cnxt = 0; 2619250661Sdavidcs return; 2620250661Sdavidcs} 2621250661Sdavidcs 2622250661Sdavidcs/* 2623250661Sdavidcs * Name: qla_init_xmt_cntxt 2624250661Sdavidcs * Function: Creates the Transmit Context. 2625250661Sdavidcs */ 2626250661Sdavidcsstatic int 2627250661Sdavidcsqla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2628250661Sdavidcs{ 2629250661Sdavidcs device_t dev; 2630250661Sdavidcs qla_hw_t *hw = &ha->hw; 2631250661Sdavidcs q80_rq_tx_cntxt_t *tcntxt; 2632250661Sdavidcs q80_rsp_tx_cntxt_t *tcntxt_rsp; 2633250661Sdavidcs uint32_t err; 2634250661Sdavidcs qla_hw_tx_cntxt_t *hw_tx_cntxt; 2635250661Sdavidcs 2636250661Sdavidcs hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 2637250661Sdavidcs 2638250661Sdavidcs dev = ha->pci_dev; 2639250661Sdavidcs 2640250661Sdavidcs /* 2641250661Sdavidcs * Create Transmit Context 2642250661Sdavidcs */ 2643250661Sdavidcs tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; 2644250661Sdavidcs bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); 2645250661Sdavidcs 2646250661Sdavidcs tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; 2647250661Sdavidcs tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); 2648250661Sdavidcs tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2649250661Sdavidcs 2650284982Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV 2651284982Sdavidcs 2652284982Sdavidcs tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | 2653284982Sdavidcs Q8_TX_CNTXT_CAP0_TC; 2654284982Sdavidcs 2655284982Sdavidcs if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { 2656284982Sdavidcs tcntxt->traffic_class = 1; 2657284982Sdavidcs } 2658284982Sdavidcs 2659284982Sdavidcs#else 2660284982Sdavidcs 2661250661Sdavidcs tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; 2662250661Sdavidcs 2663284982Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 2664284982Sdavidcs 2665250661Sdavidcs tcntxt->ntx_rings = 1; 2666250661Sdavidcs 2667250661Sdavidcs tcntxt->tx_ring[0].paddr = 2668250661Sdavidcs qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); 2669250661Sdavidcs tcntxt->tx_ring[0].tx_consumer = 2670250661Sdavidcs qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); 2671250661Sdavidcs tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); 2672250661Sdavidcs 2673250661Sdavidcs tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[0]); 2674250661Sdavidcs tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); 2675250661Sdavidcs 2676250661Sdavidcs 2677250661Sdavidcs hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; 2678250661Sdavidcs hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; 2679250661Sdavidcs 2680250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2681250661Sdavidcs (sizeof (q80_rq_tx_cntxt_t) >> 2), 2682250661Sdavidcs ha->hw.mbox, 2683250661Sdavidcs (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { 2684250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2685250661Sdavidcs return (-1); 2686250661Sdavidcs } 2687250661Sdavidcs tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; 2688250661Sdavidcs 2689250661Sdavidcs err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 2690250661Sdavidcs 2691250661Sdavidcs if (err) { 2692250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2693250661Sdavidcs return -1; 2694250661Sdavidcs } 2695250661Sdavidcs 2696250661Sdavidcs hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; 2697250661Sdavidcs hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; 2698250661Sdavidcs 2699284982Sdavidcs if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) 2700284982Sdavidcs return (-1); 2701284982Sdavidcs 2702250661Sdavidcs return (0); 2703250661Sdavidcs} 2704250661Sdavidcs 2705250661Sdavidcs 2706250661Sdavidcs/* 2707250661Sdavidcs * Name: qla_del_xmt_cntxt 2708250661Sdavidcs * Function: Destroys the Transmit Context. 2709250661Sdavidcs */ 2710250661Sdavidcsstatic int 2711250661Sdavidcsqla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2712250661Sdavidcs{ 2713250661Sdavidcs device_t dev = ha->pci_dev; 2714250661Sdavidcs q80_tx_cntxt_destroy_t *tcntxt; 2715250661Sdavidcs q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; 2716250661Sdavidcs uint32_t err; 2717250661Sdavidcs 2718250661Sdavidcs tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; 2719250661Sdavidcs bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); 2720250661Sdavidcs 2721250661Sdavidcs tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; 2722250661Sdavidcs tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); 2723250661Sdavidcs tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2724250661Sdavidcs 2725250661Sdavidcs tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; 2726250661Sdavidcs 2727250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2728250661Sdavidcs (sizeof (q80_tx_cntxt_destroy_t) >> 2), 2729250661Sdavidcs ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { 2730250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2731250661Sdavidcs return (-1); 2732250661Sdavidcs } 2733250661Sdavidcs tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; 2734250661Sdavidcs 2735250661Sdavidcs err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 2736250661Sdavidcs 2737250661Sdavidcs if (err) { 2738250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2739250661Sdavidcs return (-1); 2740250661Sdavidcs } 2741250661Sdavidcs 2742250661Sdavidcs return (0); 2743250661Sdavidcs} 2744250661Sdavidcsstatic void 2745250661Sdavidcsqla_del_xmt_cntxt(qla_host_t *ha) 2746250661Sdavidcs{ 2747250661Sdavidcs uint32_t i; 2748250661Sdavidcs 2749250661Sdavidcs if (!ha->hw.flags.init_tx_cnxt) 2750250661Sdavidcs return; 2751250661Sdavidcs 2752250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 2753250661Sdavidcs if (qla_del_xmt_cntxt_i(ha, i)) 2754250661Sdavidcs break; 2755250661Sdavidcs } 2756250661Sdavidcs ha->hw.flags.init_tx_cnxt = 0; 2757250661Sdavidcs} 2758250661Sdavidcs 2759250661Sdavidcsstatic int 2760250661Sdavidcsqla_init_xmt_cntxt(qla_host_t *ha) 2761250661Sdavidcs{ 2762250661Sdavidcs uint32_t i, j; 2763250661Sdavidcs 2764250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 2765250661Sdavidcs if (qla_init_xmt_cntxt_i(ha, i) != 0) { 2766250661Sdavidcs for (j = 0; j < i; j++) 2767250661Sdavidcs qla_del_xmt_cntxt_i(ha, j); 2768250661Sdavidcs return (-1); 2769250661Sdavidcs } 2770250661Sdavidcs } 2771250661Sdavidcs ha->hw.flags.init_tx_cnxt = 1; 2772250661Sdavidcs return (0); 2773250661Sdavidcs} 2774250661Sdavidcs 2775250661Sdavidcsstatic int 2776250661Sdavidcsqla_hw_add_all_mcast(qla_host_t *ha) 2777250661Sdavidcs{ 2778250661Sdavidcs int i, nmcast; 2779250661Sdavidcs 2780250661Sdavidcs nmcast = ha->hw.nmcast; 2781250661Sdavidcs 2782250661Sdavidcs for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { 2783250661Sdavidcs if ((ha->hw.mcast[i].addr[0] != 0) || 2784250661Sdavidcs (ha->hw.mcast[i].addr[1] != 0) || 2785250661Sdavidcs (ha->hw.mcast[i].addr[2] != 0) || 2786250661Sdavidcs (ha->hw.mcast[i].addr[3] != 0) || 2787250661Sdavidcs (ha->hw.mcast[i].addr[4] != 0) || 2788250661Sdavidcs (ha->hw.mcast[i].addr[5] != 0)) { 2789250661Sdavidcs 2790250661Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 1)) { 2791250661Sdavidcs device_printf(ha->pci_dev, "%s: failed\n", 2792250661Sdavidcs __func__); 2793250661Sdavidcs return (-1); 2794250661Sdavidcs } 2795250661Sdavidcs 2796250661Sdavidcs nmcast--; 2797250661Sdavidcs } 2798250661Sdavidcs } 2799250661Sdavidcs return 0; 2800250661Sdavidcs} 2801250661Sdavidcs 2802250661Sdavidcsstatic int 2803250661Sdavidcsqla_hw_del_all_mcast(qla_host_t *ha) 2804250661Sdavidcs{ 2805250661Sdavidcs int i, nmcast; 2806250661Sdavidcs 2807250661Sdavidcs nmcast = ha->hw.nmcast; 2808250661Sdavidcs 2809250661Sdavidcs for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { 2810250661Sdavidcs if ((ha->hw.mcast[i].addr[0] != 0) || 2811250661Sdavidcs (ha->hw.mcast[i].addr[1] != 0) || 2812250661Sdavidcs (ha->hw.mcast[i].addr[2] != 0) || 2813250661Sdavidcs (ha->hw.mcast[i].addr[3] != 0) || 2814250661Sdavidcs (ha->hw.mcast[i].addr[4] != 0) || 2815250661Sdavidcs (ha->hw.mcast[i].addr[5] != 0)) { 2816250661Sdavidcs 2817250661Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mcast[i].addr, 0)) 2818250661Sdavidcs return (-1); 2819250661Sdavidcs 2820250661Sdavidcs nmcast--; 2821250661Sdavidcs } 2822250661Sdavidcs } 2823250661Sdavidcs return 0; 2824250661Sdavidcs} 2825250661Sdavidcs 2826250661Sdavidcsstatic int 2827250661Sdavidcsqla_hw_add_mcast(qla_host_t *ha, uint8_t *mta) 2828250661Sdavidcs{ 2829250661Sdavidcs int i; 2830250661Sdavidcs 2831250661Sdavidcs for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 2832250661Sdavidcs 2833250661Sdavidcs if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) 2834250661Sdavidcs return 0; /* its been already added */ 2835250661Sdavidcs } 2836250661Sdavidcs 2837250661Sdavidcs for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 2838250661Sdavidcs 2839250661Sdavidcs if ((ha->hw.mcast[i].addr[0] == 0) && 2840250661Sdavidcs (ha->hw.mcast[i].addr[1] == 0) && 2841250661Sdavidcs (ha->hw.mcast[i].addr[2] == 0) && 2842250661Sdavidcs (ha->hw.mcast[i].addr[3] == 0) && 2843250661Sdavidcs (ha->hw.mcast[i].addr[4] == 0) && 2844250661Sdavidcs (ha->hw.mcast[i].addr[5] == 0)) { 2845250661Sdavidcs 2846250661Sdavidcs if (qla_config_mac_addr(ha, mta, 1)) 2847250661Sdavidcs return (-1); 2848250661Sdavidcs 2849250661Sdavidcs bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); 2850250661Sdavidcs ha->hw.nmcast++; 2851250661Sdavidcs 2852250661Sdavidcs return 0; 2853250661Sdavidcs } 2854250661Sdavidcs } 2855250661Sdavidcs return 0; 2856250661Sdavidcs} 2857250661Sdavidcs 2858250661Sdavidcsstatic int 2859250661Sdavidcsqla_hw_del_mcast(qla_host_t *ha, uint8_t *mta) 2860250661Sdavidcs{ 2861250661Sdavidcs int i; 2862250661Sdavidcs 2863250661Sdavidcs for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 2864250661Sdavidcs if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { 2865250661Sdavidcs 2866250661Sdavidcs if (qla_config_mac_addr(ha, mta, 0)) 2867250661Sdavidcs return (-1); 2868250661Sdavidcs 2869250661Sdavidcs ha->hw.mcast[i].addr[0] = 0; 2870250661Sdavidcs ha->hw.mcast[i].addr[1] = 0; 2871250661Sdavidcs ha->hw.mcast[i].addr[2] = 0; 2872250661Sdavidcs ha->hw.mcast[i].addr[3] = 0; 2873250661Sdavidcs ha->hw.mcast[i].addr[4] = 0; 2874250661Sdavidcs ha->hw.mcast[i].addr[5] = 0; 2875250661Sdavidcs 2876250661Sdavidcs ha->hw.nmcast--; 2877250661Sdavidcs 2878250661Sdavidcs return 0; 2879250661Sdavidcs } 2880250661Sdavidcs } 2881250661Sdavidcs return 0; 2882250661Sdavidcs} 2883250661Sdavidcs 2884250661Sdavidcs/* 2885250661Sdavidcs * Name: ql_hw_set_multi 2886250661Sdavidcs * Function: Sets the Multicast Addresses provided the host O.S into the 2887250661Sdavidcs * hardware (for the given interface) 2888250661Sdavidcs */ 2889250661Sdavidcsint 2890250661Sdavidcsql_hw_set_multi(qla_host_t *ha, uint8_t *mcast, uint32_t mcnt, 2891250661Sdavidcs uint32_t add_mac) 2892250661Sdavidcs{ 2893250661Sdavidcs int i; 2894250661Sdavidcs uint8_t *mta = mcast; 2895250661Sdavidcs int ret = 0; 2896250661Sdavidcs 2897250661Sdavidcs for (i = 0; i < mcnt; i++) { 2898250661Sdavidcs if (add_mac) { 2899250661Sdavidcs ret = qla_hw_add_mcast(ha, mta); 2900250661Sdavidcs if (ret) 2901250661Sdavidcs break; 2902250661Sdavidcs } else { 2903250661Sdavidcs ret = qla_hw_del_mcast(ha, mta); 2904250661Sdavidcs if (ret) 2905250661Sdavidcs break; 2906250661Sdavidcs } 2907250661Sdavidcs 2908250661Sdavidcs mta += Q8_MAC_ADDR_LEN; 2909250661Sdavidcs } 2910250661Sdavidcs return (ret); 2911250661Sdavidcs} 2912250661Sdavidcs 2913250661Sdavidcs/* 2914250661Sdavidcs * Name: qla_hw_tx_done_locked 2915250661Sdavidcs * Function: Handle Transmit Completions 2916250661Sdavidcs */ 2917250661Sdavidcsstatic void 2918250661Sdavidcsqla_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) 2919250661Sdavidcs{ 2920250661Sdavidcs qla_tx_buf_t *txb; 2921250661Sdavidcs qla_hw_t *hw = &ha->hw; 2922250661Sdavidcs uint32_t comp_idx, comp_count = 0; 2923250661Sdavidcs qla_hw_tx_cntxt_t *hw_tx_cntxt; 2924250661Sdavidcs 2925250661Sdavidcs hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 2926250661Sdavidcs 2927250661Sdavidcs /* retrieve index of last entry in tx ring completed */ 2928250661Sdavidcs comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); 2929250661Sdavidcs 2930250661Sdavidcs while (comp_idx != hw_tx_cntxt->txr_comp) { 2931250661Sdavidcs 2932250661Sdavidcs txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; 2933250661Sdavidcs 2934250661Sdavidcs hw_tx_cntxt->txr_comp++; 2935250661Sdavidcs if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) 2936250661Sdavidcs hw_tx_cntxt->txr_comp = 0; 2937250661Sdavidcs 2938250661Sdavidcs comp_count++; 2939250661Sdavidcs 2940250661Sdavidcs if (txb->m_head) { 2941250661Sdavidcs ha->ifp->if_opackets++; 2942250661Sdavidcs 2943250661Sdavidcs bus_dmamap_sync(ha->tx_tag, txb->map, 2944250661Sdavidcs BUS_DMASYNC_POSTWRITE); 2945250661Sdavidcs bus_dmamap_unload(ha->tx_tag, txb->map); 2946250661Sdavidcs m_freem(txb->m_head); 2947250661Sdavidcs 2948250661Sdavidcs txb->m_head = NULL; 2949250661Sdavidcs } 2950250661Sdavidcs } 2951250661Sdavidcs 2952250661Sdavidcs hw_tx_cntxt->txr_free += comp_count; 2953250661Sdavidcs return; 2954250661Sdavidcs} 2955250661Sdavidcs 2956250661Sdavidcs/* 2957250661Sdavidcs * Name: ql_hw_tx_done 2958250661Sdavidcs * Function: Handle Transmit Completions 2959250661Sdavidcs */ 2960250661Sdavidcsvoid 2961250661Sdavidcsql_hw_tx_done(qla_host_t *ha) 2962250661Sdavidcs{ 2963250661Sdavidcs int i; 2964250661Sdavidcs uint32_t flag = 0; 2965250661Sdavidcs 2966250661Sdavidcs if (!mtx_trylock(&ha->tx_lock)) { 2967250661Sdavidcs QL_DPRINT8(ha, (ha->pci_dev, 2968250661Sdavidcs "%s: !mtx_trylock(&ha->tx_lock)\n", __func__)); 2969250661Sdavidcs return; 2970250661Sdavidcs } 2971250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 2972250661Sdavidcs qla_hw_tx_done_locked(ha, i); 2973250661Sdavidcs if (ha->hw.tx_cntxt[i].txr_free <= (NUM_TX_DESCRIPTORS >> 1)) 2974250661Sdavidcs flag = 1; 2975250661Sdavidcs } 2976250661Sdavidcs 2977250661Sdavidcs if (!flag) 2978250661Sdavidcs ha->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2979250661Sdavidcs 2980250661Sdavidcs QLA_TX_UNLOCK(ha); 2981250661Sdavidcs return; 2982250661Sdavidcs} 2983250661Sdavidcs 2984250661Sdavidcsvoid 2985250661Sdavidcsql_update_link_state(qla_host_t *ha) 2986250661Sdavidcs{ 2987250661Sdavidcs uint32_t link_state; 2988250661Sdavidcs uint32_t prev_link_state; 2989250661Sdavidcs 2990250661Sdavidcs if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2991250661Sdavidcs ha->hw.link_up = 0; 2992250661Sdavidcs return; 2993250661Sdavidcs } 2994250661Sdavidcs link_state = READ_REG32(ha, Q8_LINK_STATE); 2995250661Sdavidcs 2996250661Sdavidcs prev_link_state = ha->hw.link_up; 2997250661Sdavidcs 2998250661Sdavidcs if (ha->pci_func == 0) 2999250661Sdavidcs ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0); 3000250661Sdavidcs else 3001250661Sdavidcs ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 3002250661Sdavidcs 3003250661Sdavidcs if (prev_link_state != ha->hw.link_up) { 3004250661Sdavidcs if (ha->hw.link_up) { 3005250661Sdavidcs if_link_state_change(ha->ifp, LINK_STATE_UP); 3006250661Sdavidcs } else { 3007250661Sdavidcs if_link_state_change(ha->ifp, LINK_STATE_DOWN); 3008250661Sdavidcs } 3009250661Sdavidcs } 3010250661Sdavidcs return; 3011250661Sdavidcs} 3012250661Sdavidcs 3013250661Sdavidcsvoid 3014250661Sdavidcsql_hw_stop_rcv(qla_host_t *ha) 3015250661Sdavidcs{ 3016250661Sdavidcs int i, done, count = 100; 3017250661Sdavidcs 3018284982Sdavidcs while (count) { 3019250661Sdavidcs done = 1; 3020250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 3021250661Sdavidcs if (ha->hw.sds[i].rcv_active) 3022250661Sdavidcs done = 0; 3023250661Sdavidcs } 3024250661Sdavidcs if (done) 3025250661Sdavidcs break; 3026250661Sdavidcs else 3027250661Sdavidcs qla_mdelay(__func__, 10); 3028284982Sdavidcs count--; 3029250661Sdavidcs } 3030250661Sdavidcs if (!count) 3031250661Sdavidcs device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__); 3032250661Sdavidcs 3033250661Sdavidcs return; 3034250661Sdavidcs} 3035250661Sdavidcs 3036250661Sdavidcsint 3037250661Sdavidcsql_hw_check_health(qla_host_t *ha) 3038250661Sdavidcs{ 3039250661Sdavidcs uint32_t val; 3040250661Sdavidcs 3041250661Sdavidcs ha->hw.health_count++; 3042250661Sdavidcs 3043250661Sdavidcs if (ha->hw.health_count < 1000) 3044250661Sdavidcs return 0; 3045250661Sdavidcs 3046250661Sdavidcs ha->hw.health_count = 0; 3047250661Sdavidcs 3048250661Sdavidcs val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); 3049250661Sdavidcs 3050250661Sdavidcs if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || 3051250661Sdavidcs (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { 3052250661Sdavidcs device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n", 3053250661Sdavidcs __func__, val); 3054250661Sdavidcs return -1; 3055250661Sdavidcs } 3056250661Sdavidcs 3057250661Sdavidcs val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); 3058250661Sdavidcs 3059250661Sdavidcs if ((val != ha->hw.hbeat_value) && 3060250661Sdavidcs (!(QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE)))) { 3061250661Sdavidcs ha->hw.hbeat_value = val; 3062250661Sdavidcs return 0; 3063250661Sdavidcs } 3064250661Sdavidcs device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n", 3065250661Sdavidcs __func__, val); 3066250661Sdavidcs 3067250661Sdavidcs return -1; 3068250661Sdavidcs} 3069250661Sdavidcs 3070250661Sdavidcsstatic int 3071284982Sdavidcsqla_init_nic_func(qla_host_t *ha) 3072284982Sdavidcs{ 3073284982Sdavidcs device_t dev; 3074284982Sdavidcs q80_init_nic_func_t *init_nic; 3075284982Sdavidcs q80_init_nic_func_rsp_t *init_nic_rsp; 3076284982Sdavidcs uint32_t err; 3077284982Sdavidcs 3078284982Sdavidcs dev = ha->pci_dev; 3079284982Sdavidcs 3080284982Sdavidcs init_nic = (q80_init_nic_func_t *)ha->hw.mbox; 3081284982Sdavidcs bzero(init_nic, sizeof(q80_init_nic_func_t)); 3082284982Sdavidcs 3083284982Sdavidcs init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; 3084284982Sdavidcs init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); 3085284982Sdavidcs init_nic->count_version |= Q8_MBX_CMD_VERSION; 3086284982Sdavidcs 3087284982Sdavidcs init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; 3088284982Sdavidcs init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; 3089284982Sdavidcs init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; 3090284982Sdavidcs 3091284982Sdavidcs//qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); 3092284982Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)init_nic, 3093284982Sdavidcs (sizeof (q80_init_nic_func_t) >> 2), 3094284982Sdavidcs ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { 3095284982Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3096284982Sdavidcs return -1; 3097284982Sdavidcs } 3098284982Sdavidcs 3099284982Sdavidcs init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; 3100284982Sdavidcs// qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); 3101284982Sdavidcs 3102284982Sdavidcs err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); 3103284982Sdavidcs 3104284982Sdavidcs if (err) { 3105284982Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3106284982Sdavidcs } 3107284982Sdavidcs 3108284982Sdavidcs return 0; 3109284982Sdavidcs} 3110284982Sdavidcs 3111284982Sdavidcsstatic int 3112284982Sdavidcsqla_stop_nic_func(qla_host_t *ha) 3113284982Sdavidcs{ 3114284982Sdavidcs device_t dev; 3115284982Sdavidcs q80_stop_nic_func_t *stop_nic; 3116284982Sdavidcs q80_stop_nic_func_rsp_t *stop_nic_rsp; 3117284982Sdavidcs uint32_t err; 3118284982Sdavidcs 3119284982Sdavidcs dev = ha->pci_dev; 3120284982Sdavidcs 3121284982Sdavidcs stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; 3122284982Sdavidcs bzero(stop_nic, sizeof(q80_stop_nic_func_t)); 3123284982Sdavidcs 3124284982Sdavidcs stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; 3125284982Sdavidcs stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); 3126284982Sdavidcs stop_nic->count_version |= Q8_MBX_CMD_VERSION; 3127284982Sdavidcs 3128284982Sdavidcs stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; 3129284982Sdavidcs stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; 3130284982Sdavidcs 3131284982Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); 3132284982Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, 3133284982Sdavidcs (sizeof (q80_stop_nic_func_t) >> 2), 3134284982Sdavidcs ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { 3135284982Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3136284982Sdavidcs return -1; 3137284982Sdavidcs } 3138284982Sdavidcs 3139284982Sdavidcs stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; 3140284982Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); 3141284982Sdavidcs 3142284982Sdavidcs err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); 3143284982Sdavidcs 3144284982Sdavidcs if (err) { 3145284982Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3146284982Sdavidcs } 3147284982Sdavidcs 3148284982Sdavidcs return 0; 3149284982Sdavidcs} 3150284982Sdavidcs 3151284982Sdavidcsstatic int 3152284982Sdavidcsqla_query_fw_dcbx_caps(qla_host_t *ha) 3153284982Sdavidcs{ 3154284982Sdavidcs device_t dev; 3155284982Sdavidcs q80_query_fw_dcbx_caps_t *fw_dcbx; 3156284982Sdavidcs q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; 3157284982Sdavidcs uint32_t err; 3158284982Sdavidcs 3159284982Sdavidcs dev = ha->pci_dev; 3160284982Sdavidcs 3161284982Sdavidcs fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; 3162284982Sdavidcs bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); 3163284982Sdavidcs 3164284982Sdavidcs fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; 3165284982Sdavidcs fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); 3166284982Sdavidcs fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; 3167284982Sdavidcs 3168284982Sdavidcs ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); 3169284982Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, 3170284982Sdavidcs (sizeof (q80_query_fw_dcbx_caps_t) >> 2), 3171284982Sdavidcs ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { 3172284982Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3173284982Sdavidcs return -1; 3174284982Sdavidcs } 3175284982Sdavidcs 3176284982Sdavidcs fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; 3177284982Sdavidcs ql_dump_buf8(ha, __func__, fw_dcbx_rsp, 3178284982Sdavidcs sizeof (q80_query_fw_dcbx_caps_rsp_t)); 3179284982Sdavidcs 3180284982Sdavidcs err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); 3181284982Sdavidcs 3182284982Sdavidcs if (err) { 3183284982Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3184284982Sdavidcs } 3185284982Sdavidcs 3186284982Sdavidcs return 0; 3187284982Sdavidcs} 3188284982Sdavidcs 3189284982Sdavidcsstatic int 3190284982Sdavidcsqla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, 3191284982Sdavidcs uint32_t aen_mb3, uint32_t aen_mb4) 3192284982Sdavidcs{ 3193284982Sdavidcs device_t dev; 3194284982Sdavidcs q80_idc_ack_t *idc_ack; 3195284982Sdavidcs q80_idc_ack_rsp_t *idc_ack_rsp; 3196284982Sdavidcs uint32_t err; 3197284982Sdavidcs int count = 300; 3198284982Sdavidcs 3199284982Sdavidcs dev = ha->pci_dev; 3200284982Sdavidcs 3201284982Sdavidcs idc_ack = (q80_idc_ack_t *)ha->hw.mbox; 3202284982Sdavidcs bzero(idc_ack, sizeof(q80_idc_ack_t)); 3203284982Sdavidcs 3204284982Sdavidcs idc_ack->opcode = Q8_MBX_IDC_ACK; 3205284982Sdavidcs idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); 3206284982Sdavidcs idc_ack->count_version |= Q8_MBX_CMD_VERSION; 3207284982Sdavidcs 3208284982Sdavidcs idc_ack->aen_mb1 = aen_mb1; 3209284982Sdavidcs idc_ack->aen_mb2 = aen_mb2; 3210284982Sdavidcs idc_ack->aen_mb3 = aen_mb3; 3211284982Sdavidcs idc_ack->aen_mb4 = aen_mb4; 3212284982Sdavidcs 3213284982Sdavidcs ha->hw.imd_compl= 0; 3214284982Sdavidcs 3215284982Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, 3216284982Sdavidcs (sizeof (q80_idc_ack_t) >> 2), 3217284982Sdavidcs ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { 3218284982Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3219284982Sdavidcs return -1; 3220284982Sdavidcs } 3221284982Sdavidcs 3222284982Sdavidcs idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; 3223284982Sdavidcs 3224284982Sdavidcs err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); 3225284982Sdavidcs 3226284982Sdavidcs if (err) { 3227284982Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3228284982Sdavidcs return(-1); 3229284982Sdavidcs } 3230284982Sdavidcs 3231284982Sdavidcs while (count && !ha->hw.imd_compl) { 3232284982Sdavidcs qla_mdelay(__func__, 100); 3233284982Sdavidcs count--; 3234284982Sdavidcs } 3235284982Sdavidcs 3236284982Sdavidcs if (!count) 3237284982Sdavidcs return -1; 3238284982Sdavidcs else 3239284982Sdavidcs device_printf(dev, "%s: count %d\n", __func__, count); 3240284982Sdavidcs 3241284982Sdavidcs return (0); 3242284982Sdavidcs} 3243284982Sdavidcs 3244284982Sdavidcsstatic int 3245284982Sdavidcsqla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) 3246284982Sdavidcs{ 3247284982Sdavidcs device_t dev; 3248284982Sdavidcs q80_set_port_cfg_t *pcfg; 3249284982Sdavidcs q80_set_port_cfg_rsp_t *pfg_rsp; 3250284982Sdavidcs uint32_t err; 3251284982Sdavidcs int count = 300; 3252284982Sdavidcs 3253284982Sdavidcs dev = ha->pci_dev; 3254284982Sdavidcs 3255284982Sdavidcs pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; 3256284982Sdavidcs bzero(pcfg, sizeof(q80_set_port_cfg_t)); 3257284982Sdavidcs 3258284982Sdavidcs pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; 3259284982Sdavidcs pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); 3260284982Sdavidcs pcfg->count_version |= Q8_MBX_CMD_VERSION; 3261284982Sdavidcs 3262284982Sdavidcs pcfg->cfg_bits = cfg_bits; 3263284982Sdavidcs 3264284982Sdavidcs device_printf(dev, "%s: cfg_bits" 3265284982Sdavidcs " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3266284982Sdavidcs " [0x%x, 0x%x, 0x%x]\n", __func__, 3267284982Sdavidcs ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3268284982Sdavidcs ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3269284982Sdavidcs ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); 3270284982Sdavidcs 3271284982Sdavidcs ha->hw.imd_compl= 0; 3272284982Sdavidcs 3273284982Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3274284982Sdavidcs (sizeof (q80_set_port_cfg_t) >> 2), 3275284982Sdavidcs ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { 3276284982Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3277284982Sdavidcs return -1; 3278284982Sdavidcs } 3279284982Sdavidcs 3280284982Sdavidcs pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; 3281284982Sdavidcs 3282284982Sdavidcs err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); 3283284982Sdavidcs 3284284982Sdavidcs if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { 3285284982Sdavidcs while (count && !ha->hw.imd_compl) { 3286284982Sdavidcs qla_mdelay(__func__, 100); 3287284982Sdavidcs count--; 3288284982Sdavidcs } 3289284982Sdavidcs if (count) { 3290284982Sdavidcs device_printf(dev, "%s: count %d\n", __func__, count); 3291284982Sdavidcs 3292284982Sdavidcs err = 0; 3293284982Sdavidcs } 3294284982Sdavidcs } 3295284982Sdavidcs 3296284982Sdavidcs if (err) { 3297284982Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3298284982Sdavidcs return(-1); 3299284982Sdavidcs } 3300284982Sdavidcs 3301284982Sdavidcs return (0); 3302284982Sdavidcs} 3303284982Sdavidcs 3304284982Sdavidcs 3305284982Sdavidcsstatic int 3306250661Sdavidcsqla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) 3307250661Sdavidcs{ 3308250661Sdavidcs uint32_t err; 3309250661Sdavidcs device_t dev = ha->pci_dev; 3310250661Sdavidcs q80_config_md_templ_size_t *md_size; 3311250661Sdavidcs q80_config_md_templ_size_rsp_t *md_size_rsp; 3312250661Sdavidcs 3313284982Sdavidcs#ifdef QL_LDFLASH_FW 3314284982Sdavidcs 3315284982Sdavidcs *size = ql83xx_minidump_len; 3316284982Sdavidcs return (0); 3317284982Sdavidcs 3318284982Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */ 3319284982Sdavidcs 3320250661Sdavidcs md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; 3321250661Sdavidcs bzero(md_size, sizeof(q80_config_md_templ_size_t)); 3322250661Sdavidcs 3323250661Sdavidcs md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; 3324250661Sdavidcs md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); 3325250661Sdavidcs md_size->count_version |= Q8_MBX_CMD_VERSION; 3326250661Sdavidcs 3327250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *) md_size, 3328250661Sdavidcs (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, 3329250661Sdavidcs (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { 3330250661Sdavidcs 3331250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3332250661Sdavidcs 3333250661Sdavidcs return (-1); 3334250661Sdavidcs } 3335250661Sdavidcs 3336250661Sdavidcs md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; 3337250661Sdavidcs 3338250661Sdavidcs err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); 3339250661Sdavidcs 3340250661Sdavidcs if (err) { 3341250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3342250661Sdavidcs return(-1); 3343250661Sdavidcs } 3344250661Sdavidcs 3345250661Sdavidcs *size = md_size_rsp->templ_size; 3346250661Sdavidcs 3347250661Sdavidcs return (0); 3348250661Sdavidcs} 3349250661Sdavidcs 3350250661Sdavidcsstatic int 3351284982Sdavidcsqla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) 3352284982Sdavidcs{ 3353284982Sdavidcs device_t dev; 3354284982Sdavidcs q80_get_port_cfg_t *pcfg; 3355284982Sdavidcs q80_get_port_cfg_rsp_t *pcfg_rsp; 3356284982Sdavidcs uint32_t err; 3357284982Sdavidcs 3358284982Sdavidcs dev = ha->pci_dev; 3359284982Sdavidcs 3360284982Sdavidcs pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; 3361284982Sdavidcs bzero(pcfg, sizeof(q80_get_port_cfg_t)); 3362284982Sdavidcs 3363284982Sdavidcs pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; 3364284982Sdavidcs pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); 3365284982Sdavidcs pcfg->count_version |= Q8_MBX_CMD_VERSION; 3366284982Sdavidcs 3367284982Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3368284982Sdavidcs (sizeof (q80_get_port_cfg_t) >> 2), 3369284982Sdavidcs ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { 3370284982Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3371284982Sdavidcs return -1; 3372284982Sdavidcs } 3373284982Sdavidcs 3374284982Sdavidcs pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; 3375284982Sdavidcs 3376284982Sdavidcs err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); 3377284982Sdavidcs 3378284982Sdavidcs if (err) { 3379284982Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3380284982Sdavidcs return(-1); 3381284982Sdavidcs } 3382284982Sdavidcs 3383284982Sdavidcs device_printf(dev, "%s: [cfg_bits, port type]" 3384284982Sdavidcs " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3385284982Sdavidcs " [0x%x, 0x%x, 0x%x]\n", __func__, 3386284982Sdavidcs pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, 3387284982Sdavidcs ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3388284982Sdavidcs ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3389284982Sdavidcs ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) 3390284982Sdavidcs ); 3391284982Sdavidcs 3392284982Sdavidcs *cfg_bits = pcfg_rsp->cfg_bits; 3393284982Sdavidcs 3394284982Sdavidcs return (0); 3395284982Sdavidcs} 3396284982Sdavidcs 3397284982Sdavidcsint 3398284982Sdavidcsqla_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) 3399284982Sdavidcs{ 3400284982Sdavidcs struct ether_vlan_header *eh; 3401284982Sdavidcs uint16_t etype; 3402284982Sdavidcs struct ip *ip = NULL; 3403284982Sdavidcs struct ip6_hdr *ip6 = NULL; 3404284982Sdavidcs struct tcphdr *th = NULL; 3405284982Sdavidcs uint32_t hdrlen; 3406284982Sdavidcs uint32_t offset; 3407284982Sdavidcs uint8_t buf[sizeof(struct ip6_hdr)]; 3408284982Sdavidcs 3409284982Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 3410284982Sdavidcs 3411284982Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3412284982Sdavidcs hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3413284982Sdavidcs etype = ntohs(eh->evl_proto); 3414284982Sdavidcs } else { 3415284982Sdavidcs hdrlen = ETHER_HDR_LEN; 3416284982Sdavidcs etype = ntohs(eh->evl_encap_proto); 3417284982Sdavidcs } 3418284982Sdavidcs 3419284982Sdavidcs if (etype == ETHERTYPE_IP) { 3420284982Sdavidcs 3421284982Sdavidcs offset = (hdrlen + sizeof (struct ip)); 3422284982Sdavidcs 3423284982Sdavidcs if (mp->m_len >= offset) { 3424284982Sdavidcs ip = (struct ip *)(mp->m_data + hdrlen); 3425284982Sdavidcs } else { 3426284982Sdavidcs m_copydata(mp, hdrlen, sizeof (struct ip), buf); 3427284982Sdavidcs ip = (struct ip *)buf; 3428284982Sdavidcs } 3429284982Sdavidcs 3430284982Sdavidcs if (ip->ip_p == IPPROTO_TCP) { 3431284982Sdavidcs 3432284982Sdavidcs hdrlen += ip->ip_hl << 2; 3433284982Sdavidcs offset = hdrlen + 4; 3434284982Sdavidcs 3435284982Sdavidcs if (mp->m_len >= offset) { 3436284982Sdavidcs th = (struct tcphdr *)(mp->m_data + hdrlen);; 3437284982Sdavidcs } else { 3438284982Sdavidcs m_copydata(mp, hdrlen, 4, buf); 3439284982Sdavidcs th = (struct tcphdr *)buf; 3440284982Sdavidcs } 3441284982Sdavidcs } 3442284982Sdavidcs 3443284982Sdavidcs } else if (etype == ETHERTYPE_IPV6) { 3444284982Sdavidcs 3445284982Sdavidcs offset = (hdrlen + sizeof (struct ip6_hdr)); 3446284982Sdavidcs 3447284982Sdavidcs if (mp->m_len >= offset) { 3448284982Sdavidcs ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); 3449284982Sdavidcs } else { 3450284982Sdavidcs m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); 3451284982Sdavidcs ip6 = (struct ip6_hdr *)buf; 3452284982Sdavidcs } 3453284982Sdavidcs 3454284982Sdavidcs if (ip6->ip6_nxt == IPPROTO_TCP) { 3455284982Sdavidcs 3456284982Sdavidcs hdrlen += sizeof(struct ip6_hdr); 3457284982Sdavidcs offset = hdrlen + 4; 3458284982Sdavidcs 3459284982Sdavidcs if (mp->m_len >= offset) { 3460284982Sdavidcs th = (struct tcphdr *)(mp->m_data + hdrlen);; 3461284982Sdavidcs } else { 3462284982Sdavidcs m_copydata(mp, hdrlen, 4, buf); 3463284982Sdavidcs th = (struct tcphdr *)buf; 3464284982Sdavidcs } 3465284982Sdavidcs } 3466284982Sdavidcs } 3467284982Sdavidcs 3468284982Sdavidcs if (th != NULL) { 3469284982Sdavidcs if ((th->th_sport == htons(3260)) || 3470284982Sdavidcs (th->th_dport == htons(3260))) 3471284982Sdavidcs return 0; 3472284982Sdavidcs } 3473284982Sdavidcs return (-1); 3474284982Sdavidcs} 3475284982Sdavidcs 3476284982Sdavidcsvoid 3477284982Sdavidcsqla_hw_async_event(qla_host_t *ha) 3478284982Sdavidcs{ 3479284982Sdavidcs switch (ha->hw.aen_mb0) { 3480284982Sdavidcs case 0x8101: 3481284982Sdavidcs (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, 3482284982Sdavidcs ha->hw.aen_mb3, ha->hw.aen_mb4); 3483284982Sdavidcs 3484284982Sdavidcs break; 3485284982Sdavidcs 3486284982Sdavidcs default: 3487284982Sdavidcs break; 3488284982Sdavidcs } 3489284982Sdavidcs 3490284982Sdavidcs return; 3491284982Sdavidcs} 3492284982Sdavidcs 3493284982Sdavidcs#ifdef QL_LDFLASH_FW 3494284982Sdavidcsstatic int 3495250661Sdavidcsqla_get_minidump_template(qla_host_t *ha) 3496250661Sdavidcs{ 3497250661Sdavidcs uint32_t err; 3498250661Sdavidcs device_t dev = ha->pci_dev; 3499250661Sdavidcs q80_config_md_templ_cmd_t *md_templ; 3500250661Sdavidcs q80_config_md_templ_cmd_rsp_t *md_templ_rsp; 3501250661Sdavidcs 3502250661Sdavidcs md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; 3503250661Sdavidcs bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); 3504250661Sdavidcs 3505250661Sdavidcs md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; 3506250661Sdavidcs md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); 3507250661Sdavidcs md_templ->count_version |= Q8_MBX_CMD_VERSION; 3508250661Sdavidcs 3509250661Sdavidcs md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; 3510250661Sdavidcs md_templ->buff_size = ha->hw.dma_buf.minidump.size; 3511250661Sdavidcs 3512250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *) md_templ, 3513250661Sdavidcs (sizeof(q80_config_md_templ_cmd_t) >> 2), 3514250661Sdavidcs ha->hw.mbox, 3515250661Sdavidcs (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { 3516250661Sdavidcs 3517250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3518250661Sdavidcs 3519250661Sdavidcs return (-1); 3520250661Sdavidcs } 3521250661Sdavidcs 3522250661Sdavidcs md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; 3523250661Sdavidcs 3524250661Sdavidcs err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); 3525250661Sdavidcs 3526250661Sdavidcs if (err) { 3527250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3528250661Sdavidcs return (-1); 3529250661Sdavidcs } 3530250661Sdavidcs 3531250661Sdavidcs return (0); 3532250661Sdavidcs 3533250661Sdavidcs} 3534284982Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */ 3535250661Sdavidcs 3536250661Sdavidcsstatic int 3537250661Sdavidcsqla_minidump_init(qla_host_t *ha) 3538250661Sdavidcs{ 3539284982Sdavidcs int ret = 0; 3540250661Sdavidcs uint32_t template_size = 0; 3541250661Sdavidcs device_t dev = ha->pci_dev; 3542250661Sdavidcs 3543250661Sdavidcs /* 3544250661Sdavidcs * Get Minidump Template Size 3545250661Sdavidcs */ 3546250661Sdavidcs ret = qla_get_minidump_tmplt_size(ha, &template_size); 3547250661Sdavidcs 3548250661Sdavidcs if (ret || (template_size == 0)) { 3549250661Sdavidcs device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, 3550250661Sdavidcs template_size); 3551250661Sdavidcs return (-1); 3552250661Sdavidcs } 3553250661Sdavidcs 3554250661Sdavidcs /* 3555250661Sdavidcs * Allocate Memory for Minidump Template 3556250661Sdavidcs */ 3557250661Sdavidcs 3558250661Sdavidcs ha->hw.dma_buf.minidump.alignment = 8; 3559250661Sdavidcs ha->hw.dma_buf.minidump.size = template_size; 3560250661Sdavidcs 3561284982Sdavidcs#ifdef QL_LDFLASH_FW 3562250661Sdavidcs if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { 3563250661Sdavidcs 3564250661Sdavidcs device_printf(dev, "%s: minidump dma alloc failed\n", __func__); 3565250661Sdavidcs 3566250661Sdavidcs return (-1); 3567250661Sdavidcs } 3568250661Sdavidcs ha->hw.dma_buf.flags.minidump = 1; 3569250661Sdavidcs 3570250661Sdavidcs /* 3571250661Sdavidcs * Retrieve Minidump Template 3572250661Sdavidcs */ 3573250661Sdavidcs ret = qla_get_minidump_template(ha); 3574284982Sdavidcs#else 3575284982Sdavidcs ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; 3576284982Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */ 3577250661Sdavidcs 3578250661Sdavidcs if (ret) { 3579250661Sdavidcs qla_minidump_free(ha); 3580250661Sdavidcs } else { 3581250661Sdavidcs ha->hw.mdump_init = 1; 3582250661Sdavidcs } 3583250661Sdavidcs 3584250661Sdavidcs return (ret); 3585250661Sdavidcs} 3586250661Sdavidcs 3587250661Sdavidcs 3588250661Sdavidcsstatic void 3589250661Sdavidcsqla_minidump_free(qla_host_t *ha) 3590250661Sdavidcs{ 3591250661Sdavidcs ha->hw.mdump_init = 0; 3592250661Sdavidcs if (ha->hw.dma_buf.flags.minidump) { 3593250661Sdavidcs ha->hw.dma_buf.flags.minidump = 0; 3594250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); 3595250661Sdavidcs } 3596250661Sdavidcs return; 3597250661Sdavidcs} 3598250661Sdavidcs 3599250661Sdavidcsvoid 3600250661Sdavidcsql_minidump(qla_host_t *ha) 3601250661Sdavidcs{ 3602250661Sdavidcs uint32_t delay = 6000; 3603250661Sdavidcs 3604250661Sdavidcs if (!ha->hw.mdump_init) 3605250661Sdavidcs return; 3606250661Sdavidcs 3607250661Sdavidcs if (!ha->hw.mdump_active) 3608250661Sdavidcs return; 3609250661Sdavidcs 3610250661Sdavidcs if (ha->hw.mdump_active == 1) { 3611250661Sdavidcs ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); 3612250661Sdavidcs ha->hw.mdump_start = 1; 3613250661Sdavidcs } 3614250661Sdavidcs 3615250661Sdavidcs while (delay-- && ha->hw.mdump_active) { 3616250661Sdavidcs qla_mdelay(__func__, 100); 3617250661Sdavidcs } 3618250661Sdavidcs ha->hw.mdump_start = 0; 3619250661Sdavidcs ql_start_sequence(ha, ha->hw.mdump_start_seq_index); 3620250661Sdavidcs 3621250661Sdavidcs return; 3622250661Sdavidcs} 3623