ql_hw.c revision 313070
1250661Sdavidcs/* 2284741Sdavidcs * Copyright (c) 2013-2016 Qlogic Corporation 3250661Sdavidcs * All rights reserved. 4250661Sdavidcs * 5250661Sdavidcs * Redistribution and use in source and binary forms, with or without 6250661Sdavidcs * modification, are permitted provided that the following conditions 7250661Sdavidcs * are met: 8250661Sdavidcs * 9250661Sdavidcs * 1. Redistributions of source code must retain the above copyright 10250661Sdavidcs * notice, this list of conditions and the following disclaimer. 11250661Sdavidcs * 2. Redistributions in binary form must reproduce the above copyright 12250661Sdavidcs * notice, this list of conditions and the following disclaimer in the 13250661Sdavidcs * documentation and/or other materials provided with the distribution. 14250661Sdavidcs * 15250661Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16250661Sdavidcs * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17250661Sdavidcs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18250661Sdavidcs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19250661Sdavidcs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20250661Sdavidcs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21250661Sdavidcs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22250661Sdavidcs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23250661Sdavidcs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24250661Sdavidcs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25250661Sdavidcs * POSSIBILITY OF SUCH DAMAGE. 26250661Sdavidcs */ 27250661Sdavidcs 28250661Sdavidcs/* 29250661Sdavidcs * File: ql_hw.c 30250661Sdavidcs * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31298955Spfg * Content: Contains Hardware dependent functions 32250661Sdavidcs */ 33250661Sdavidcs 34250661Sdavidcs#include <sys/cdefs.h> 35250661Sdavidcs__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_hw.c 313070 2017-02-02 00:12:24Z davidcs $"); 36250661Sdavidcs 37250661Sdavidcs#include "ql_os.h" 38250661Sdavidcs#include "ql_hw.h" 39250661Sdavidcs#include "ql_def.h" 40250661Sdavidcs#include "ql_inline.h" 41250661Sdavidcs#include "ql_ver.h" 42250661Sdavidcs#include "ql_glbl.h" 43250661Sdavidcs#include "ql_dbg.h" 44305487Sdavidcs#include "ql_minidump.h" 45250661Sdavidcs 46250661Sdavidcs/* 47250661Sdavidcs * Static Functions 48250661Sdavidcs */ 49250661Sdavidcs 50250661Sdavidcsstatic void qla_del_rcv_cntxt(qla_host_t *ha); 51250661Sdavidcsstatic int qla_init_rcv_cntxt(qla_host_t *ha); 52250661Sdavidcsstatic void qla_del_xmt_cntxt(qla_host_t *ha); 53250661Sdavidcsstatic int qla_init_xmt_cntxt(qla_host_t *ha); 54250661Sdavidcsstatic int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 55250661Sdavidcs uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); 56284741Sdavidcsstatic int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, 57284741Sdavidcs uint32_t num_intrs, uint32_t create); 58250661Sdavidcsstatic int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); 59250661Sdavidcsstatic int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, 60284741Sdavidcs int tenable, int rcv); 61250661Sdavidcsstatic int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); 62250661Sdavidcsstatic int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); 63250661Sdavidcs 64250661Sdavidcsstatic int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, 65250661Sdavidcs uint8_t *hdr); 66250661Sdavidcsstatic int qla_hw_add_all_mcast(qla_host_t *ha); 67250661Sdavidcsstatic int qla_hw_del_all_mcast(qla_host_t *ha); 68284741Sdavidcsstatic int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); 69250661Sdavidcs 70284741Sdavidcsstatic int qla_init_nic_func(qla_host_t *ha); 71284741Sdavidcsstatic int qla_stop_nic_func(qla_host_t *ha); 72284741Sdavidcsstatic int qla_query_fw_dcbx_caps(qla_host_t *ha); 73284741Sdavidcsstatic int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); 74284741Sdavidcsstatic int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); 75284741Sdavidcsstatic void qla_get_quick_stats(qla_host_t *ha); 76305488Sdavidcsstatic int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode); 77305488Sdavidcsstatic int qla_get_cam_search_mode(qla_host_t *ha); 78284741Sdavidcs 79305487Sdavidcsstatic void ql_minidump_free(qla_host_t *ha); 80250661Sdavidcs 81250661Sdavidcs 82250661Sdavidcsstatic int 83250661Sdavidcsqla_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS) 84250661Sdavidcs{ 85250661Sdavidcs int err = 0, ret; 86250661Sdavidcs qla_host_t *ha; 87250661Sdavidcs uint32_t i; 88250661Sdavidcs 89250661Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 90250661Sdavidcs 91250661Sdavidcs if (err || !req->newptr) 92250661Sdavidcs return (err); 93250661Sdavidcs 94250661Sdavidcs if (ret == 1) { 95250661Sdavidcs 96250661Sdavidcs ha = (qla_host_t *)arg1; 97250661Sdavidcs 98305488Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 99305488Sdavidcs 100250661Sdavidcs device_printf(ha->pci_dev, 101250661Sdavidcs "%s: sds_ring[%d] = %p\n", __func__,i, 102250661Sdavidcs (void *)ha->hw.sds[i].intr_count); 103250661Sdavidcs 104305488Sdavidcs device_printf(ha->pci_dev, 105305488Sdavidcs "%s: sds_ring[%d].spurious_intr_count = %p\n", 106305488Sdavidcs __func__, 107305488Sdavidcs i, (void *)ha->hw.sds[i].spurious_intr_count); 108305488Sdavidcs 109305488Sdavidcs device_printf(ha->pci_dev, 110305488Sdavidcs "%s: sds_ring[%d].rx_free = %d\n", __func__,i, 111305488Sdavidcs ha->hw.sds[i].rx_free); 112305488Sdavidcs } 113305488Sdavidcs 114250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) 115250661Sdavidcs device_printf(ha->pci_dev, 116250661Sdavidcs "%s: tx[%d] = %p\n", __func__,i, 117250661Sdavidcs (void *)ha->tx_ring[i].count); 118250661Sdavidcs 119250661Sdavidcs for (i = 0; i < ha->hw.num_rds_rings; i++) 120250661Sdavidcs device_printf(ha->pci_dev, 121250661Sdavidcs "%s: rds_ring[%d] = %p\n", __func__,i, 122250661Sdavidcs (void *)ha->hw.rds[i].count); 123250661Sdavidcs 124250661Sdavidcs device_printf(ha->pci_dev, "%s: lro_pkt_count = %p\n", __func__, 125250661Sdavidcs (void *)ha->lro_pkt_count); 126250661Sdavidcs 127250661Sdavidcs device_printf(ha->pci_dev, "%s: lro_bytes = %p\n", __func__, 128250661Sdavidcs (void *)ha->lro_bytes); 129284741Sdavidcs 130284741Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV 131284741Sdavidcs device_printf(ha->pci_dev, "%s: iscsi_pkts = %p\n", __func__, 132284741Sdavidcs (void *)ha->hw.iscsi_pkt_count); 133284741Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 134284741Sdavidcs 135250661Sdavidcs } 136250661Sdavidcs return (err); 137250661Sdavidcs} 138250661Sdavidcs 139284741Sdavidcsstatic int 140284741Sdavidcsqla_sysctl_get_quick_stats(SYSCTL_HANDLER_ARGS) 141284741Sdavidcs{ 142284741Sdavidcs int err, ret = 0; 143284741Sdavidcs qla_host_t *ha; 144284741Sdavidcs 145284741Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 146284741Sdavidcs 147284741Sdavidcs if (err || !req->newptr) 148284741Sdavidcs return (err); 149284741Sdavidcs 150284741Sdavidcs if (ret == 1) { 151284741Sdavidcs ha = (qla_host_t *)arg1; 152284741Sdavidcs qla_get_quick_stats(ha); 153284741Sdavidcs } 154284741Sdavidcs return (err); 155284741Sdavidcs} 156284741Sdavidcs 157250661Sdavidcs#ifdef QL_DBG 158250661Sdavidcs 159250661Sdavidcsstatic void 160250661Sdavidcsqla_stop_pegs(qla_host_t *ha) 161250661Sdavidcs{ 162250661Sdavidcs uint32_t val = 1; 163250661Sdavidcs 164250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); 165250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); 166250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); 167250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); 168250661Sdavidcs ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); 169250661Sdavidcs device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); 170250661Sdavidcs} 171250661Sdavidcs 172250661Sdavidcsstatic int 173250661Sdavidcsqla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) 174250661Sdavidcs{ 175250661Sdavidcs int err, ret = 0; 176250661Sdavidcs qla_host_t *ha; 177250661Sdavidcs 178250661Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 179250661Sdavidcs 180250661Sdavidcs 181250661Sdavidcs if (err || !req->newptr) 182250661Sdavidcs return (err); 183250661Sdavidcs 184250661Sdavidcs if (ret == 1) { 185250661Sdavidcs ha = (qla_host_t *)arg1; 186250661Sdavidcs (void)QLA_LOCK(ha, __func__, 0); 187250661Sdavidcs qla_stop_pegs(ha); 188250661Sdavidcs QLA_UNLOCK(ha, __func__); 189250661Sdavidcs } 190250661Sdavidcs 191250661Sdavidcs return err; 192250661Sdavidcs} 193250661Sdavidcs#endif /* #ifdef QL_DBG */ 194250661Sdavidcs 195284741Sdavidcsstatic int 196284741Sdavidcsqla_validate_set_port_cfg_bit(uint32_t bits) 197284741Sdavidcs{ 198284741Sdavidcs if ((bits & 0xF) > 1) 199284741Sdavidcs return (-1); 200284741Sdavidcs 201284741Sdavidcs if (((bits >> 4) & 0xF) > 2) 202284741Sdavidcs return (-1); 203284741Sdavidcs 204284741Sdavidcs if (((bits >> 8) & 0xF) > 2) 205284741Sdavidcs return (-1); 206284741Sdavidcs 207284741Sdavidcs return (0); 208284741Sdavidcs} 209284741Sdavidcs 210284741Sdavidcsstatic int 211284741Sdavidcsqla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) 212284741Sdavidcs{ 213284741Sdavidcs int err, ret = 0; 214284741Sdavidcs qla_host_t *ha; 215284741Sdavidcs uint32_t cfg_bits; 216284741Sdavidcs 217284741Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 218284741Sdavidcs 219284741Sdavidcs if (err || !req->newptr) 220284741Sdavidcs return (err); 221284741Sdavidcs 222284741Sdavidcs if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { 223284741Sdavidcs 224284741Sdavidcs ha = (qla_host_t *)arg1; 225284741Sdavidcs 226284741Sdavidcs err = qla_get_port_config(ha, &cfg_bits); 227284741Sdavidcs 228284741Sdavidcs if (err) 229284741Sdavidcs goto qla_sysctl_set_port_cfg_exit; 230284741Sdavidcs 231284741Sdavidcs if (ret & 0x1) { 232284741Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; 233284741Sdavidcs } else { 234284741Sdavidcs cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; 235284741Sdavidcs } 236284741Sdavidcs 237284741Sdavidcs ret = ret >> 4; 238284741Sdavidcs cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; 239284741Sdavidcs 240284741Sdavidcs if ((ret & 0xF) == 0) { 241284741Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; 242284741Sdavidcs } else if ((ret & 0xF) == 1){ 243284741Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; 244284741Sdavidcs } else { 245284741Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; 246284741Sdavidcs } 247284741Sdavidcs 248284741Sdavidcs ret = ret >> 4; 249284741Sdavidcs cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; 250284741Sdavidcs 251284741Sdavidcs if (ret == 0) { 252284741Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; 253284741Sdavidcs } else if (ret == 1){ 254284741Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; 255284741Sdavidcs } else { 256284741Sdavidcs cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; 257284741Sdavidcs } 258284741Sdavidcs 259284741Sdavidcs err = qla_set_port_config(ha, cfg_bits); 260284741Sdavidcs } else { 261284741Sdavidcs ha = (qla_host_t *)arg1; 262284741Sdavidcs 263284741Sdavidcs err = qla_get_port_config(ha, &cfg_bits); 264284741Sdavidcs } 265284741Sdavidcs 266284741Sdavidcsqla_sysctl_set_port_cfg_exit: 267284741Sdavidcs return err; 268284741Sdavidcs} 269284741Sdavidcs 270305488Sdavidcsstatic int 271305488Sdavidcsqla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS) 272305488Sdavidcs{ 273305488Sdavidcs int err, ret = 0; 274305488Sdavidcs qla_host_t *ha; 275305488Sdavidcs 276305488Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 277305488Sdavidcs 278305488Sdavidcs if (err || !req->newptr) 279305488Sdavidcs return (err); 280305488Sdavidcs 281305488Sdavidcs ha = (qla_host_t *)arg1; 282305488Sdavidcs 283305488Sdavidcs if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) || 284305488Sdavidcs (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) { 285305488Sdavidcs err = qla_set_cam_search_mode(ha, (uint32_t)ret); 286305488Sdavidcs } else { 287305488Sdavidcs device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret); 288305488Sdavidcs } 289305488Sdavidcs 290305488Sdavidcs return (err); 291305488Sdavidcs} 292305488Sdavidcs 293305488Sdavidcsstatic int 294305488Sdavidcsqla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS) 295305488Sdavidcs{ 296305488Sdavidcs int err, ret = 0; 297305488Sdavidcs qla_host_t *ha; 298305488Sdavidcs 299305488Sdavidcs err = sysctl_handle_int(oidp, &ret, 0, req); 300305488Sdavidcs 301305488Sdavidcs if (err || !req->newptr) 302305488Sdavidcs return (err); 303305488Sdavidcs 304305488Sdavidcs ha = (qla_host_t *)arg1; 305305488Sdavidcs err = qla_get_cam_search_mode(ha); 306305488Sdavidcs 307305488Sdavidcs return (err); 308305488Sdavidcs} 309305488Sdavidcs 310305488Sdavidcs 311250661Sdavidcs/* 312250661Sdavidcs * Name: ql_hw_add_sysctls 313250661Sdavidcs * Function: Add P3Plus specific sysctls 314250661Sdavidcs */ 315250661Sdavidcsvoid 316250661Sdavidcsql_hw_add_sysctls(qla_host_t *ha) 317250661Sdavidcs{ 318250661Sdavidcs device_t dev; 319250661Sdavidcs 320250661Sdavidcs dev = ha->pci_dev; 321250661Sdavidcs 322250661Sdavidcs ha->hw.num_sds_rings = MAX_SDS_RINGS; 323250661Sdavidcs ha->hw.num_rds_rings = MAX_RDS_RINGS; 324250661Sdavidcs ha->hw.num_tx_rings = NUM_TX_RINGS; 325250661Sdavidcs 326250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 327250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 328250661Sdavidcs OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, 329250661Sdavidcs ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); 330250661Sdavidcs 331250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 332250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 333250661Sdavidcs OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, 334250661Sdavidcs ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); 335250661Sdavidcs 336250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 337250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 338250661Sdavidcs OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, 339250661Sdavidcs ha->hw.num_tx_rings, "Number of Transmit Rings"); 340250661Sdavidcs 341250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 342250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 343250661Sdavidcs OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, 344250661Sdavidcs ha->txr_idx, "Tx Ring Used"); 345250661Sdavidcs 346250661Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 347250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 348250661Sdavidcs OID_AUTO, "drvr_stats", CTLTYPE_INT | CTLFLAG_RW, 349250661Sdavidcs (void *)ha, 0, 350250661Sdavidcs qla_sysctl_get_drvr_stats, "I", "Driver Maintained Statistics"); 351250661Sdavidcs 352284741Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 353284741Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 354284741Sdavidcs OID_AUTO, "quick_stats", CTLTYPE_INT | CTLFLAG_RW, 355284741Sdavidcs (void *)ha, 0, 356284741Sdavidcs qla_sysctl_get_quick_stats, "I", "Quick Statistics"); 357284741Sdavidcs 358250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 359250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 360250661Sdavidcs OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, 361250661Sdavidcs ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); 362250661Sdavidcs 363250661Sdavidcs ha->hw.sds_cidx_thres = 32; 364250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 365250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 366250661Sdavidcs OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, 367250661Sdavidcs ha->hw.sds_cidx_thres, 368250661Sdavidcs "Number of SDS entries to process before updating" 369250661Sdavidcs " SDS Ring Consumer Index"); 370250661Sdavidcs 371250661Sdavidcs ha->hw.rds_pidx_thres = 32; 372250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 373250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 374250661Sdavidcs OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, 375250661Sdavidcs ha->hw.rds_pidx_thres, 376250661Sdavidcs "Number of Rcv Rings Entries to post before updating" 377250661Sdavidcs " RDS Ring Producer Index"); 378250661Sdavidcs 379284741Sdavidcs ha->hw.rcv_intr_coalesce = (3 << 16) | 256; 380284741Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 381284741Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 382284741Sdavidcs OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, 383284741Sdavidcs &ha->hw.rcv_intr_coalesce, 384284741Sdavidcs ha->hw.rcv_intr_coalesce, 385284741Sdavidcs "Rcv Intr Coalescing Parameters\n" 386284741Sdavidcs "\tbits 15:0 max packets\n" 387284741Sdavidcs "\tbits 31:16 max micro-seconds to wait\n" 388284741Sdavidcs "\tplease run\n" 389284741Sdavidcs "\tifconfig <if> down && ifconfig <if> up\n" 390284741Sdavidcs "\tto take effect \n"); 391258155Sdavidcs 392284741Sdavidcs ha->hw.xmt_intr_coalesce = (64 << 16) | 64; 393284741Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 394284741Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 395284741Sdavidcs OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, 396284741Sdavidcs &ha->hw.xmt_intr_coalesce, 397284741Sdavidcs ha->hw.xmt_intr_coalesce, 398284741Sdavidcs "Xmt Intr Coalescing Parameters\n" 399284741Sdavidcs "\tbits 15:0 max packets\n" 400284741Sdavidcs "\tbits 31:16 max micro-seconds to wait\n" 401284741Sdavidcs "\tplease run\n" 402284741Sdavidcs "\tifconfig <if> down && ifconfig <if> up\n" 403284741Sdavidcs "\tto take effect \n"); 404284741Sdavidcs 405284741Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 406284741Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 407284741Sdavidcs OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW, 408284741Sdavidcs (void *)ha, 0, 409284741Sdavidcs qla_sysctl_port_cfg, "I", 410284741Sdavidcs "Set Port Configuration if values below " 411284741Sdavidcs "otherwise Get Port Configuration\n" 412284741Sdavidcs "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" 413284741Sdavidcs "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" 414284741Sdavidcs "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" 415284741Sdavidcs " 1 = xmt only; 2 = rcv only;\n" 416284741Sdavidcs ); 417284741Sdavidcs 418305488Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 419305488Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 420305488Sdavidcs OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 421305488Sdavidcs (void *)ha, 0, 422305488Sdavidcs qla_sysctl_set_cam_search_mode, "I", 423305488Sdavidcs "Set CAM Search Mode" 424305488Sdavidcs "\t 1 = search mode internal\n" 425305488Sdavidcs "\t 2 = search mode auto\n"); 426305488Sdavidcs 427305488Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 428305488Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 429305488Sdavidcs OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 430305488Sdavidcs (void *)ha, 0, 431305488Sdavidcs qla_sysctl_get_cam_search_mode, "I", 432305488Sdavidcs "Get CAM Search Mode" 433305488Sdavidcs "\t 1 = search mode internal\n" 434305488Sdavidcs "\t 2 = search mode auto\n"); 435305488Sdavidcs 436284741Sdavidcs ha->hw.enable_9kb = 1; 437284741Sdavidcs 438284741Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 439284741Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 440284741Sdavidcs OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, 441284741Sdavidcs ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); 442284741Sdavidcs 443250661Sdavidcs ha->hw.mdump_active = 0; 444250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 445250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 446250661Sdavidcs OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, 447250661Sdavidcs ha->hw.mdump_active, 448305487Sdavidcs "Minidump retrieval is Active"); 449250661Sdavidcs 450305487Sdavidcs ha->hw.mdump_done = 0; 451250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 452250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 453305487Sdavidcs OID_AUTO, "mdump_done", CTLFLAG_RW, 454305487Sdavidcs &ha->hw.mdump_done, ha->hw.mdump_done, 455305487Sdavidcs "Minidump has been done and available for retrieval"); 456305487Sdavidcs 457305487Sdavidcs ha->hw.mdump_capture_mask = 0xF; 458305487Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 459305487Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 460305487Sdavidcs OID_AUTO, "minidump_capture_mask", CTLFLAG_RW, 461305487Sdavidcs &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask, 462305487Sdavidcs "Minidump capture mask"); 463250661Sdavidcs#ifdef QL_DBG 464250661Sdavidcs 465289635Sdavidcs ha->err_inject = 0; 466250661Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 467250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 468250661Sdavidcs OID_AUTO, "err_inject", 469250661Sdavidcs CTLFLAG_RW, &ha->err_inject, ha->err_inject, 470250661Sdavidcs "Error to be injected\n" 471250661Sdavidcs "\t\t\t 0: No Errors\n" 472250661Sdavidcs "\t\t\t 1: rcv: rxb struct invalid\n" 473250661Sdavidcs "\t\t\t 2: rcv: mp == NULL\n" 474250661Sdavidcs "\t\t\t 3: lro: rxb struct invalid\n" 475250661Sdavidcs "\t\t\t 4: lro: mp == NULL\n" 476250661Sdavidcs "\t\t\t 5: rcv: num handles invalid\n" 477250661Sdavidcs "\t\t\t 6: reg: indirect reg rd_wr failure\n" 478250661Sdavidcs "\t\t\t 7: ocm: offchip memory rd_wr failure\n" 479250661Sdavidcs "\t\t\t 8: mbx: mailbox command failure\n" 480250661Sdavidcs "\t\t\t 9: heartbeat failure\n" 481305488Sdavidcs "\t\t\t A: temperature failure\n" 482305488Sdavidcs "\t\t\t 11: m_getcl or m_getjcl failure\n" ); 483250661Sdavidcs 484250661Sdavidcs SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 485250661Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 486250661Sdavidcs OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW, 487250661Sdavidcs (void *)ha, 0, 488250661Sdavidcs qla_sysctl_stop_pegs, "I", "Peg Stop"); 489250661Sdavidcs 490250661Sdavidcs#endif /* #ifdef QL_DBG */ 491250661Sdavidcs 492284741Sdavidcs ha->hw.user_pri_nic = 0; 493284741Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 494284741Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 495284741Sdavidcs OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, 496284741Sdavidcs ha->hw.user_pri_nic, 497284741Sdavidcs "VLAN Tag User Priority for Normal Ethernet Packets"); 498284741Sdavidcs 499284741Sdavidcs ha->hw.user_pri_iscsi = 4; 500284741Sdavidcs SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 501284741Sdavidcs SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 502284741Sdavidcs OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, 503284741Sdavidcs ha->hw.user_pri_iscsi, 504284741Sdavidcs "VLAN Tag User Priority for iSCSI Packets"); 505284741Sdavidcs 506250661Sdavidcs} 507250661Sdavidcs 508250661Sdavidcsvoid 509250661Sdavidcsql_hw_link_status(qla_host_t *ha) 510250661Sdavidcs{ 511250661Sdavidcs device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); 512250661Sdavidcs 513250661Sdavidcs if (ha->hw.link_up) { 514250661Sdavidcs device_printf(ha->pci_dev, "link Up\n"); 515250661Sdavidcs } else { 516250661Sdavidcs device_printf(ha->pci_dev, "link Down\n"); 517250661Sdavidcs } 518250661Sdavidcs 519250661Sdavidcs if (ha->hw.flags.fduplex) { 520250661Sdavidcs device_printf(ha->pci_dev, "Full Duplex\n"); 521250661Sdavidcs } else { 522250661Sdavidcs device_printf(ha->pci_dev, "Half Duplex\n"); 523250661Sdavidcs } 524250661Sdavidcs 525250661Sdavidcs if (ha->hw.flags.autoneg) { 526250661Sdavidcs device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); 527250661Sdavidcs } else { 528250661Sdavidcs device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); 529250661Sdavidcs } 530250661Sdavidcs 531250661Sdavidcs switch (ha->hw.link_speed) { 532250661Sdavidcs case 0x710: 533250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); 534250661Sdavidcs break; 535250661Sdavidcs 536250661Sdavidcs case 0x3E8: 537250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); 538250661Sdavidcs break; 539250661Sdavidcs 540250661Sdavidcs case 0x64: 541250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); 542250661Sdavidcs break; 543250661Sdavidcs 544250661Sdavidcs default: 545250661Sdavidcs device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); 546250661Sdavidcs break; 547250661Sdavidcs } 548250661Sdavidcs 549250661Sdavidcs switch (ha->hw.module_type) { 550250661Sdavidcs 551250661Sdavidcs case 0x01: 552250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); 553250661Sdavidcs break; 554250661Sdavidcs 555250661Sdavidcs case 0x02: 556250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); 557250661Sdavidcs break; 558250661Sdavidcs 559250661Sdavidcs case 0x03: 560250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); 561250661Sdavidcs break; 562250661Sdavidcs 563250661Sdavidcs case 0x04: 564250661Sdavidcs device_printf(ha->pci_dev, 565250661Sdavidcs "Module Type 10GE Passive Copper(Compliant)[%d m]\n", 566250661Sdavidcs ha->hw.cable_length); 567250661Sdavidcs break; 568250661Sdavidcs 569250661Sdavidcs case 0x05: 570250661Sdavidcs device_printf(ha->pci_dev, "Module Type 10GE Active" 571250661Sdavidcs " Limiting Copper(Compliant)[%d m]\n", 572250661Sdavidcs ha->hw.cable_length); 573250661Sdavidcs break; 574250661Sdavidcs 575250661Sdavidcs case 0x06: 576250661Sdavidcs device_printf(ha->pci_dev, 577250661Sdavidcs "Module Type 10GE Passive Copper" 578250661Sdavidcs " (Legacy, Best Effort)[%d m]\n", 579250661Sdavidcs ha->hw.cable_length); 580250661Sdavidcs break; 581250661Sdavidcs 582250661Sdavidcs case 0x07: 583250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); 584250661Sdavidcs break; 585250661Sdavidcs 586250661Sdavidcs case 0x08: 587250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); 588250661Sdavidcs break; 589250661Sdavidcs 590250661Sdavidcs case 0x09: 591250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); 592250661Sdavidcs break; 593250661Sdavidcs 594250661Sdavidcs case 0x0A: 595250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); 596250661Sdavidcs break; 597250661Sdavidcs 598250661Sdavidcs case 0x0B: 599250661Sdavidcs device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" 600250661Sdavidcs "(Legacy, Best Effort)\n"); 601250661Sdavidcs break; 602250661Sdavidcs 603250661Sdavidcs default: 604250661Sdavidcs device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", 605250661Sdavidcs ha->hw.module_type); 606250661Sdavidcs break; 607250661Sdavidcs } 608250661Sdavidcs 609250661Sdavidcs if (ha->hw.link_faults == 1) 610250661Sdavidcs device_printf(ha->pci_dev, "SFP Power Fault\n"); 611250661Sdavidcs} 612250661Sdavidcs 613250661Sdavidcs/* 614250661Sdavidcs * Name: ql_free_dma 615250661Sdavidcs * Function: Frees the DMA'able memory allocated in ql_alloc_dma() 616250661Sdavidcs */ 617250661Sdavidcsvoid 618250661Sdavidcsql_free_dma(qla_host_t *ha) 619250661Sdavidcs{ 620250661Sdavidcs uint32_t i; 621250661Sdavidcs 622250661Sdavidcs if (ha->hw.dma_buf.flags.sds_ring) { 623250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 624250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 625250661Sdavidcs } 626250661Sdavidcs ha->hw.dma_buf.flags.sds_ring = 0; 627250661Sdavidcs } 628250661Sdavidcs 629250661Sdavidcs if (ha->hw.dma_buf.flags.rds_ring) { 630250661Sdavidcs for (i = 0; i < ha->hw.num_rds_rings; i++) { 631250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 632250661Sdavidcs } 633250661Sdavidcs ha->hw.dma_buf.flags.rds_ring = 0; 634250661Sdavidcs } 635250661Sdavidcs 636250661Sdavidcs if (ha->hw.dma_buf.flags.tx_ring) { 637250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 638250661Sdavidcs ha->hw.dma_buf.flags.tx_ring = 0; 639250661Sdavidcs } 640305487Sdavidcs ql_minidump_free(ha); 641250661Sdavidcs} 642250661Sdavidcs 643250661Sdavidcs/* 644250661Sdavidcs * Name: ql_alloc_dma 645250661Sdavidcs * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 646250661Sdavidcs */ 647250661Sdavidcsint 648250661Sdavidcsql_alloc_dma(qla_host_t *ha) 649250661Sdavidcs{ 650250661Sdavidcs device_t dev; 651250661Sdavidcs uint32_t i, j, size, tx_ring_size; 652250661Sdavidcs qla_hw_t *hw; 653250661Sdavidcs qla_hw_tx_cntxt_t *tx_cntxt; 654250661Sdavidcs uint8_t *vaddr; 655250661Sdavidcs bus_addr_t paddr; 656250661Sdavidcs 657250661Sdavidcs dev = ha->pci_dev; 658250661Sdavidcs 659250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 660250661Sdavidcs 661250661Sdavidcs hw = &ha->hw; 662250661Sdavidcs /* 663250661Sdavidcs * Allocate Transmit Ring 664250661Sdavidcs */ 665250661Sdavidcs tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); 666250661Sdavidcs size = (tx_ring_size * ha->hw.num_tx_rings); 667250661Sdavidcs 668250661Sdavidcs hw->dma_buf.tx_ring.alignment = 8; 669250661Sdavidcs hw->dma_buf.tx_ring.size = size + PAGE_SIZE; 670250661Sdavidcs 671250661Sdavidcs if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { 672250661Sdavidcs device_printf(dev, "%s: tx ring alloc failed\n", __func__); 673250661Sdavidcs goto ql_alloc_dma_exit; 674250661Sdavidcs } 675250661Sdavidcs 676250661Sdavidcs vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; 677250661Sdavidcs paddr = hw->dma_buf.tx_ring.dma_addr; 678250661Sdavidcs 679250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 680250661Sdavidcs tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 681250661Sdavidcs 682250661Sdavidcs tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; 683250661Sdavidcs tx_cntxt->tx_ring_paddr = paddr; 684250661Sdavidcs 685250661Sdavidcs vaddr += tx_ring_size; 686250661Sdavidcs paddr += tx_ring_size; 687250661Sdavidcs } 688250661Sdavidcs 689250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 690250661Sdavidcs tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 691250661Sdavidcs 692250661Sdavidcs tx_cntxt->tx_cons = (uint32_t *)vaddr; 693250661Sdavidcs tx_cntxt->tx_cons_paddr = paddr; 694250661Sdavidcs 695250661Sdavidcs vaddr += sizeof (uint32_t); 696250661Sdavidcs paddr += sizeof (uint32_t); 697250661Sdavidcs } 698250661Sdavidcs 699250661Sdavidcs ha->hw.dma_buf.flags.tx_ring = 1; 700250661Sdavidcs 701250661Sdavidcs QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", 702250661Sdavidcs __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), 703250661Sdavidcs hw->dma_buf.tx_ring.dma_b)); 704250661Sdavidcs /* 705250661Sdavidcs * Allocate Receive Descriptor Rings 706250661Sdavidcs */ 707250661Sdavidcs 708250661Sdavidcs for (i = 0; i < hw->num_rds_rings; i++) { 709250661Sdavidcs 710250661Sdavidcs hw->dma_buf.rds_ring[i].alignment = 8; 711250661Sdavidcs hw->dma_buf.rds_ring[i].size = 712250661Sdavidcs (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 713250661Sdavidcs 714250661Sdavidcs if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { 715250661Sdavidcs device_printf(dev, "%s: rds ring[%d] alloc failed\n", 716250661Sdavidcs __func__, i); 717250661Sdavidcs 718250661Sdavidcs for (j = 0; j < i; j++) 719250661Sdavidcs ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); 720250661Sdavidcs 721250661Sdavidcs goto ql_alloc_dma_exit; 722250661Sdavidcs } 723250661Sdavidcs QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", 724250661Sdavidcs __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), 725250661Sdavidcs hw->dma_buf.rds_ring[i].dma_b)); 726250661Sdavidcs } 727250661Sdavidcs 728250661Sdavidcs hw->dma_buf.flags.rds_ring = 1; 729250661Sdavidcs 730250661Sdavidcs /* 731250661Sdavidcs * Allocate Status Descriptor Rings 732250661Sdavidcs */ 733250661Sdavidcs 734250661Sdavidcs for (i = 0; i < hw->num_sds_rings; i++) { 735250661Sdavidcs hw->dma_buf.sds_ring[i].alignment = 8; 736250661Sdavidcs hw->dma_buf.sds_ring[i].size = 737250661Sdavidcs (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 738250661Sdavidcs 739250661Sdavidcs if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { 740250661Sdavidcs device_printf(dev, "%s: sds ring alloc failed\n", 741250661Sdavidcs __func__); 742250661Sdavidcs 743250661Sdavidcs for (j = 0; j < i; j++) 744250661Sdavidcs ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); 745250661Sdavidcs 746250661Sdavidcs goto ql_alloc_dma_exit; 747250661Sdavidcs } 748250661Sdavidcs QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", 749250661Sdavidcs __func__, i, 750250661Sdavidcs (void *)(hw->dma_buf.sds_ring[i].dma_addr), 751250661Sdavidcs hw->dma_buf.sds_ring[i].dma_b)); 752250661Sdavidcs } 753250661Sdavidcs for (i = 0; i < hw->num_sds_rings; i++) { 754250661Sdavidcs hw->sds[i].sds_ring_base = 755250661Sdavidcs (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 756250661Sdavidcs } 757250661Sdavidcs 758250661Sdavidcs hw->dma_buf.flags.sds_ring = 1; 759250661Sdavidcs 760250661Sdavidcs return 0; 761250661Sdavidcs 762250661Sdavidcsql_alloc_dma_exit: 763250661Sdavidcs ql_free_dma(ha); 764250661Sdavidcs return -1; 765250661Sdavidcs} 766250661Sdavidcs 767250661Sdavidcs#define Q8_MBX_MSEC_DELAY 5000 768250661Sdavidcs 769250661Sdavidcsstatic int 770250661Sdavidcsqla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 771250661Sdavidcs uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) 772250661Sdavidcs{ 773250661Sdavidcs uint32_t i; 774250661Sdavidcs uint32_t data; 775250661Sdavidcs int ret = 0; 776250661Sdavidcs 777250661Sdavidcs if (QL_ERR_INJECT(ha, INJCT_MBX_CMD_FAILURE)) { 778250661Sdavidcs ret = -3; 779250661Sdavidcs ha->qla_initiate_recovery = 1; 780250661Sdavidcs goto exit_qla_mbx_cmd; 781250661Sdavidcs } 782250661Sdavidcs 783250661Sdavidcs if (no_pause) 784250661Sdavidcs i = 1000; 785250661Sdavidcs else 786250661Sdavidcs i = Q8_MBX_MSEC_DELAY; 787250661Sdavidcs 788250661Sdavidcs while (i) { 789250661Sdavidcs data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); 790250661Sdavidcs if (data == 0) 791250661Sdavidcs break; 792250661Sdavidcs if (no_pause) { 793250661Sdavidcs DELAY(1000); 794250661Sdavidcs } else { 795250661Sdavidcs qla_mdelay(__func__, 1); 796250661Sdavidcs } 797250661Sdavidcs i--; 798250661Sdavidcs } 799250661Sdavidcs 800250661Sdavidcs if (i == 0) { 801250661Sdavidcs device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", 802250661Sdavidcs __func__, data); 803250661Sdavidcs ret = -1; 804250661Sdavidcs ha->qla_initiate_recovery = 1; 805250661Sdavidcs goto exit_qla_mbx_cmd; 806250661Sdavidcs } 807250661Sdavidcs 808250661Sdavidcs for (i = 0; i < n_hmbox; i++) { 809250661Sdavidcs WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); 810250661Sdavidcs h_mbox++; 811250661Sdavidcs } 812250661Sdavidcs 813250661Sdavidcs WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); 814250661Sdavidcs 815250661Sdavidcs 816250661Sdavidcs i = Q8_MBX_MSEC_DELAY; 817250661Sdavidcs while (i) { 818250661Sdavidcs data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); 819250661Sdavidcs 820250661Sdavidcs if ((data & 0x3) == 1) { 821250661Sdavidcs data = READ_REG32(ha, Q8_FW_MBOX0); 822250661Sdavidcs if ((data & 0xF000) != 0x8000) 823250661Sdavidcs break; 824250661Sdavidcs } 825250661Sdavidcs if (no_pause) { 826250661Sdavidcs DELAY(1000); 827250661Sdavidcs } else { 828250661Sdavidcs qla_mdelay(__func__, 1); 829250661Sdavidcs } 830250661Sdavidcs i--; 831250661Sdavidcs } 832250661Sdavidcs if (i == 0) { 833250661Sdavidcs device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", 834250661Sdavidcs __func__, data); 835250661Sdavidcs ret = -2; 836250661Sdavidcs ha->qla_initiate_recovery = 1; 837250661Sdavidcs goto exit_qla_mbx_cmd; 838250661Sdavidcs } 839250661Sdavidcs 840250661Sdavidcs for (i = 0; i < n_fwmbox; i++) { 841250661Sdavidcs *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); 842250661Sdavidcs } 843250661Sdavidcs 844250661Sdavidcs WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); 845250661Sdavidcs WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 846250661Sdavidcs 847250661Sdavidcsexit_qla_mbx_cmd: 848250661Sdavidcs return (ret); 849250661Sdavidcs} 850250661Sdavidcs 851284741Sdavidcsint 852284741Sdavidcsqla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, 853284741Sdavidcs uint32_t *num_rcvq) 854250661Sdavidcs{ 855250661Sdavidcs uint32_t *mbox, err; 856250661Sdavidcs device_t dev = ha->pci_dev; 857250661Sdavidcs 858250661Sdavidcs bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); 859250661Sdavidcs 860250661Sdavidcs mbox = ha->hw.mbox; 861250661Sdavidcs 862250661Sdavidcs mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 863250661Sdavidcs 864250661Sdavidcs if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { 865250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 866250661Sdavidcs return (-1); 867250661Sdavidcs } 868250661Sdavidcs err = mbox[0] >> 25; 869250661Sdavidcs 870284741Sdavidcs if (supports_9kb != NULL) { 871284741Sdavidcs if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ 872284741Sdavidcs *supports_9kb = 1; 873284741Sdavidcs else 874284741Sdavidcs *supports_9kb = 0; 875284741Sdavidcs } 876284741Sdavidcs 877284741Sdavidcs if (num_rcvq != NULL) 878284741Sdavidcs *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); 879284741Sdavidcs 880250661Sdavidcs if ((err != 1) && (err != 0)) { 881250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 882250661Sdavidcs return (-1); 883250661Sdavidcs } 884250661Sdavidcs return 0; 885250661Sdavidcs} 886250661Sdavidcs 887250661Sdavidcsstatic int 888284741Sdavidcsqla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, 889284741Sdavidcs uint32_t create) 890250661Sdavidcs{ 891250661Sdavidcs uint32_t i, err; 892250661Sdavidcs device_t dev = ha->pci_dev; 893250661Sdavidcs q80_config_intr_t *c_intr; 894250661Sdavidcs q80_config_intr_rsp_t *c_intr_rsp; 895250661Sdavidcs 896250661Sdavidcs c_intr = (q80_config_intr_t *)ha->hw.mbox; 897250661Sdavidcs bzero(c_intr, (sizeof (q80_config_intr_t))); 898250661Sdavidcs 899250661Sdavidcs c_intr->opcode = Q8_MBX_CONFIG_INTR; 900250661Sdavidcs 901250661Sdavidcs c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); 902250661Sdavidcs c_intr->count_version |= Q8_MBX_CMD_VERSION; 903250661Sdavidcs 904250661Sdavidcs c_intr->nentries = num_intrs; 905250661Sdavidcs 906250661Sdavidcs for (i = 0; i < num_intrs; i++) { 907250661Sdavidcs if (create) { 908250661Sdavidcs c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; 909284741Sdavidcs c_intr->intr[i].msix_index = start_idx + 1 + i; 910250661Sdavidcs } else { 911250661Sdavidcs c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; 912284741Sdavidcs c_intr->intr[i].msix_index = 913284741Sdavidcs ha->hw.intr_id[(start_idx + i)]; 914250661Sdavidcs } 915250661Sdavidcs 916250661Sdavidcs c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; 917250661Sdavidcs } 918250661Sdavidcs 919250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)c_intr, 920250661Sdavidcs (sizeof (q80_config_intr_t) >> 2), 921250661Sdavidcs ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { 922250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 923250661Sdavidcs return (-1); 924250661Sdavidcs } 925250661Sdavidcs 926250661Sdavidcs c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; 927250661Sdavidcs 928250661Sdavidcs err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); 929250661Sdavidcs 930250661Sdavidcs if (err) { 931250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x, %d]\n", __func__, err, 932250661Sdavidcs c_intr_rsp->nentries); 933250661Sdavidcs 934250661Sdavidcs for (i = 0; i < c_intr_rsp->nentries; i++) { 935250661Sdavidcs device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", 936250661Sdavidcs __func__, i, 937250661Sdavidcs c_intr_rsp->intr[i].status, 938250661Sdavidcs c_intr_rsp->intr[i].intr_id, 939250661Sdavidcs c_intr_rsp->intr[i].intr_src); 940250661Sdavidcs } 941250661Sdavidcs 942250661Sdavidcs return (-1); 943250661Sdavidcs } 944250661Sdavidcs 945250661Sdavidcs for (i = 0; ((i < num_intrs) && create); i++) { 946250661Sdavidcs if (!c_intr_rsp->intr[i].status) { 947284741Sdavidcs ha->hw.intr_id[(start_idx + i)] = 948284741Sdavidcs c_intr_rsp->intr[i].intr_id; 949284741Sdavidcs ha->hw.intr_src[(start_idx + i)] = 950284741Sdavidcs c_intr_rsp->intr[i].intr_src; 951250661Sdavidcs } 952250661Sdavidcs } 953250661Sdavidcs 954250661Sdavidcs return (0); 955250661Sdavidcs} 956250661Sdavidcs 957250661Sdavidcs/* 958250661Sdavidcs * Name: qla_config_rss 959250661Sdavidcs * Function: Configure RSS for the context/interface. 960250661Sdavidcs */ 961250661Sdavidcsstatic const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 962250661Sdavidcs 0x8030f20c77cb2da3ULL, 963250661Sdavidcs 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 964250661Sdavidcs 0x255b0ec26d5a56daULL }; 965250661Sdavidcs 966250661Sdavidcsstatic int 967250661Sdavidcsqla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 968250661Sdavidcs{ 969250661Sdavidcs q80_config_rss_t *c_rss; 970250661Sdavidcs q80_config_rss_rsp_t *c_rss_rsp; 971250661Sdavidcs uint32_t err, i; 972250661Sdavidcs device_t dev = ha->pci_dev; 973250661Sdavidcs 974250661Sdavidcs c_rss = (q80_config_rss_t *)ha->hw.mbox; 975250661Sdavidcs bzero(c_rss, (sizeof (q80_config_rss_t))); 976250661Sdavidcs 977250661Sdavidcs c_rss->opcode = Q8_MBX_CONFIG_RSS; 978250661Sdavidcs 979250661Sdavidcs c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); 980250661Sdavidcs c_rss->count_version |= Q8_MBX_CMD_VERSION; 981250661Sdavidcs 982250661Sdavidcs c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | 983250661Sdavidcs Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); 984284741Sdavidcs //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | 985284741Sdavidcs // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); 986250661Sdavidcs 987250661Sdavidcs c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; 988250661Sdavidcs c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; 989250661Sdavidcs 990250661Sdavidcs c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; 991250661Sdavidcs 992250661Sdavidcs c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; 993250661Sdavidcs c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; 994250661Sdavidcs 995250661Sdavidcs c_rss->cntxt_id = cntxt_id; 996250661Sdavidcs 997250661Sdavidcs for (i = 0; i < 5; i++) { 998250661Sdavidcs c_rss->rss_key[i] = rss_key[i]; 999250661Sdavidcs } 1000250661Sdavidcs 1001250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)c_rss, 1002250661Sdavidcs (sizeof (q80_config_rss_t) >> 2), 1003250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { 1004250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 1005250661Sdavidcs return (-1); 1006250661Sdavidcs } 1007250661Sdavidcs c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; 1008250661Sdavidcs 1009250661Sdavidcs err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); 1010250661Sdavidcs 1011250661Sdavidcs if (err) { 1012250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1013250661Sdavidcs return (-1); 1014250661Sdavidcs } 1015250661Sdavidcs return 0; 1016250661Sdavidcs} 1017250661Sdavidcs 1018250661Sdavidcsstatic int 1019250661Sdavidcsqla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, 1020250661Sdavidcs uint16_t cntxt_id, uint8_t *ind_table) 1021250661Sdavidcs{ 1022250661Sdavidcs q80_config_rss_ind_table_t *c_rss_ind; 1023250661Sdavidcs q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; 1024250661Sdavidcs uint32_t err; 1025250661Sdavidcs device_t dev = ha->pci_dev; 1026250661Sdavidcs 1027250661Sdavidcs if ((count > Q8_RSS_IND_TBL_SIZE) || 1028250661Sdavidcs ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { 1029250661Sdavidcs device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, 1030250661Sdavidcs start_idx, count); 1031250661Sdavidcs return (-1); 1032250661Sdavidcs } 1033250661Sdavidcs 1034250661Sdavidcs c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; 1035250661Sdavidcs bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); 1036250661Sdavidcs 1037250661Sdavidcs c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; 1038250661Sdavidcs c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); 1039250661Sdavidcs c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; 1040250661Sdavidcs 1041250661Sdavidcs c_rss_ind->start_idx = start_idx; 1042250661Sdavidcs c_rss_ind->end_idx = start_idx + count - 1; 1043250661Sdavidcs c_rss_ind->cntxt_id = cntxt_id; 1044250661Sdavidcs bcopy(ind_table, c_rss_ind->ind_table, count); 1045250661Sdavidcs 1046250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, 1047250661Sdavidcs (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, 1048250661Sdavidcs (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { 1049250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 1050250661Sdavidcs return (-1); 1051250661Sdavidcs } 1052250661Sdavidcs 1053250661Sdavidcs c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; 1054250661Sdavidcs err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); 1055250661Sdavidcs 1056250661Sdavidcs if (err) { 1057250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1058250661Sdavidcs return (-1); 1059250661Sdavidcs } 1060250661Sdavidcs return 0; 1061250661Sdavidcs} 1062250661Sdavidcs 1063250661Sdavidcs/* 1064250661Sdavidcs * Name: qla_config_intr_coalesce 1065250661Sdavidcs * Function: Configure Interrupt Coalescing. 1066250661Sdavidcs */ 1067250661Sdavidcsstatic int 1068284741Sdavidcsqla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, 1069284741Sdavidcs int rcv) 1070250661Sdavidcs{ 1071250661Sdavidcs q80_config_intr_coalesc_t *intrc; 1072250661Sdavidcs q80_config_intr_coalesc_rsp_t *intrc_rsp; 1073250661Sdavidcs uint32_t err, i; 1074250661Sdavidcs device_t dev = ha->pci_dev; 1075250661Sdavidcs 1076250661Sdavidcs intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; 1077250661Sdavidcs bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); 1078250661Sdavidcs 1079250661Sdavidcs intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; 1080250661Sdavidcs intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); 1081250661Sdavidcs intrc->count_version |= Q8_MBX_CMD_VERSION; 1082250661Sdavidcs 1083284741Sdavidcs if (rcv) { 1084284741Sdavidcs intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; 1085284741Sdavidcs intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; 1086284741Sdavidcs intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; 1087284741Sdavidcs } else { 1088284741Sdavidcs intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; 1089284741Sdavidcs intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; 1090284741Sdavidcs intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; 1091284741Sdavidcs } 1092284741Sdavidcs 1093250661Sdavidcs intrc->cntxt_id = cntxt_id; 1094250661Sdavidcs 1095250661Sdavidcs if (tenable) { 1096250661Sdavidcs intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; 1097250661Sdavidcs intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; 1098250661Sdavidcs 1099250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 1100250661Sdavidcs intrc->sds_ring_mask |= (1 << i); 1101250661Sdavidcs } 1102250661Sdavidcs intrc->ms_timeout = 1000; 1103250661Sdavidcs } 1104250661Sdavidcs 1105250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)intrc, 1106250661Sdavidcs (sizeof (q80_config_intr_coalesc_t) >> 2), 1107250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { 1108250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 1109250661Sdavidcs return (-1); 1110250661Sdavidcs } 1111250661Sdavidcs intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; 1112250661Sdavidcs 1113250661Sdavidcs err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); 1114250661Sdavidcs 1115250661Sdavidcs if (err) { 1116250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1117250661Sdavidcs return (-1); 1118250661Sdavidcs } 1119250661Sdavidcs 1120250661Sdavidcs return 0; 1121250661Sdavidcs} 1122250661Sdavidcs 1123250661Sdavidcs 1124250661Sdavidcs/* 1125250661Sdavidcs * Name: qla_config_mac_addr 1126250661Sdavidcs * Function: binds a MAC address to the context/interface. 1127250661Sdavidcs * Can be unicast, multicast or broadcast. 1128250661Sdavidcs */ 1129250661Sdavidcsstatic int 1130307524Sdavidcsqla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, 1131307524Sdavidcs uint32_t num_mac) 1132250661Sdavidcs{ 1133250661Sdavidcs q80_config_mac_addr_t *cmac; 1134250661Sdavidcs q80_config_mac_addr_rsp_t *cmac_rsp; 1135250661Sdavidcs uint32_t err; 1136250661Sdavidcs device_t dev = ha->pci_dev; 1137307524Sdavidcs int i; 1138307524Sdavidcs uint8_t *mac_cpy = mac_addr; 1139250661Sdavidcs 1140307524Sdavidcs if (num_mac > Q8_MAX_MAC_ADDRS) { 1141307524Sdavidcs device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n", 1142307524Sdavidcs __func__, (add_mac ? "Add" : "Del"), num_mac); 1143307524Sdavidcs return (-1); 1144307524Sdavidcs } 1145307524Sdavidcs 1146250661Sdavidcs cmac = (q80_config_mac_addr_t *)ha->hw.mbox; 1147250661Sdavidcs bzero(cmac, (sizeof (q80_config_mac_addr_t))); 1148250661Sdavidcs 1149250661Sdavidcs cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; 1150250661Sdavidcs cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; 1151250661Sdavidcs cmac->count_version |= Q8_MBX_CMD_VERSION; 1152250661Sdavidcs 1153250661Sdavidcs if (add_mac) 1154250661Sdavidcs cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; 1155250661Sdavidcs else 1156250661Sdavidcs cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; 1157250661Sdavidcs 1158250661Sdavidcs cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; 1159250661Sdavidcs 1160307524Sdavidcs cmac->nmac_entries = num_mac; 1161250661Sdavidcs cmac->cntxt_id = ha->hw.rcv_cntxt_id; 1162250661Sdavidcs 1163307524Sdavidcs for (i = 0; i < num_mac; i++) { 1164307524Sdavidcs bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 1165307524Sdavidcs mac_addr = mac_addr + ETHER_ADDR_LEN; 1166307524Sdavidcs } 1167307524Sdavidcs 1168250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)cmac, 1169250661Sdavidcs (sizeof (q80_config_mac_addr_t) >> 2), 1170250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { 1171250661Sdavidcs device_printf(dev, "%s: %s failed0\n", __func__, 1172250661Sdavidcs (add_mac ? "Add" : "Del")); 1173250661Sdavidcs return (-1); 1174250661Sdavidcs } 1175250661Sdavidcs cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; 1176250661Sdavidcs 1177250661Sdavidcs err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); 1178250661Sdavidcs 1179250661Sdavidcs if (err) { 1180307524Sdavidcs device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__, 1181307524Sdavidcs (add_mac ? "Add" : "Del"), err); 1182307524Sdavidcs for (i = 0; i < num_mac; i++) { 1183307524Sdavidcs device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", 1184307524Sdavidcs __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2], 1185307524Sdavidcs mac_cpy[3], mac_cpy[4], mac_cpy[5]); 1186307524Sdavidcs mac_cpy += ETHER_ADDR_LEN; 1187307524Sdavidcs } 1188250661Sdavidcs return (-1); 1189250661Sdavidcs } 1190250661Sdavidcs 1191250661Sdavidcs return 0; 1192250661Sdavidcs} 1193250661Sdavidcs 1194250661Sdavidcs 1195250661Sdavidcs/* 1196250661Sdavidcs * Name: qla_set_mac_rcv_mode 1197305487Sdavidcs * Function: Enable/Disable AllMulticast and Promiscous Modes. 1198250661Sdavidcs */ 1199250661Sdavidcsstatic int 1200250661Sdavidcsqla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) 1201250661Sdavidcs{ 1202250661Sdavidcs q80_config_mac_rcv_mode_t *rcv_mode; 1203250661Sdavidcs uint32_t err; 1204250661Sdavidcs q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; 1205250661Sdavidcs device_t dev = ha->pci_dev; 1206250661Sdavidcs 1207250661Sdavidcs rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; 1208250661Sdavidcs bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); 1209250661Sdavidcs 1210250661Sdavidcs rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; 1211250661Sdavidcs rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; 1212250661Sdavidcs rcv_mode->count_version |= Q8_MBX_CMD_VERSION; 1213250661Sdavidcs 1214250661Sdavidcs rcv_mode->mode = mode; 1215250661Sdavidcs 1216250661Sdavidcs rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; 1217250661Sdavidcs 1218250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, 1219250661Sdavidcs (sizeof (q80_config_mac_rcv_mode_t) >> 2), 1220250661Sdavidcs ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { 1221250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 1222250661Sdavidcs return (-1); 1223250661Sdavidcs } 1224250661Sdavidcs rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; 1225250661Sdavidcs 1226250661Sdavidcs err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); 1227250661Sdavidcs 1228250661Sdavidcs if (err) { 1229250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1230250661Sdavidcs return (-1); 1231250661Sdavidcs } 1232250661Sdavidcs 1233250661Sdavidcs return 0; 1234250661Sdavidcs} 1235250661Sdavidcs 1236250661Sdavidcsint 1237250661Sdavidcsql_set_promisc(qla_host_t *ha) 1238250661Sdavidcs{ 1239250661Sdavidcs int ret; 1240250661Sdavidcs 1241250661Sdavidcs ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1242250661Sdavidcs ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1243250661Sdavidcs return (ret); 1244250661Sdavidcs} 1245250661Sdavidcs 1246284741Sdavidcsvoid 1247284741Sdavidcsqla_reset_promisc(qla_host_t *ha) 1248284741Sdavidcs{ 1249284741Sdavidcs ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1250284741Sdavidcs (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1251284741Sdavidcs} 1252284741Sdavidcs 1253250661Sdavidcsint 1254250661Sdavidcsql_set_allmulti(qla_host_t *ha) 1255250661Sdavidcs{ 1256250661Sdavidcs int ret; 1257250661Sdavidcs 1258250661Sdavidcs ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; 1259250661Sdavidcs ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1260250661Sdavidcs return (ret); 1261250661Sdavidcs} 1262250661Sdavidcs 1263284741Sdavidcsvoid 1264284741Sdavidcsqla_reset_allmulti(qla_host_t *ha) 1265284741Sdavidcs{ 1266284741Sdavidcs ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; 1267284741Sdavidcs (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1268284741Sdavidcs} 1269250661Sdavidcs 1270250661Sdavidcs/* 1271250661Sdavidcs * Name: ql_set_max_mtu 1272250661Sdavidcs * Function: 1273250661Sdavidcs * Sets the maximum transfer unit size for the specified rcv context. 1274250661Sdavidcs */ 1275250661Sdavidcsint 1276250661Sdavidcsql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1277250661Sdavidcs{ 1278250661Sdavidcs device_t dev; 1279250661Sdavidcs q80_set_max_mtu_t *max_mtu; 1280250661Sdavidcs q80_set_max_mtu_rsp_t *max_mtu_rsp; 1281250661Sdavidcs uint32_t err; 1282250661Sdavidcs 1283250661Sdavidcs dev = ha->pci_dev; 1284250661Sdavidcs 1285250661Sdavidcs max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; 1286250661Sdavidcs bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); 1287250661Sdavidcs 1288250661Sdavidcs max_mtu->opcode = Q8_MBX_SET_MAX_MTU; 1289250661Sdavidcs max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); 1290250661Sdavidcs max_mtu->count_version |= Q8_MBX_CMD_VERSION; 1291250661Sdavidcs 1292250661Sdavidcs max_mtu->cntxt_id = cntxt_id; 1293250661Sdavidcs max_mtu->mtu = mtu; 1294250661Sdavidcs 1295250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, 1296250661Sdavidcs (sizeof (q80_set_max_mtu_t) >> 2), 1297250661Sdavidcs ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { 1298250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1299250661Sdavidcs return -1; 1300250661Sdavidcs } 1301250661Sdavidcs 1302250661Sdavidcs max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; 1303250661Sdavidcs 1304250661Sdavidcs err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); 1305250661Sdavidcs 1306250661Sdavidcs if (err) { 1307250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1308250661Sdavidcs } 1309250661Sdavidcs 1310250661Sdavidcs return 0; 1311250661Sdavidcs} 1312250661Sdavidcs 1313250661Sdavidcsstatic int 1314250661Sdavidcsqla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) 1315250661Sdavidcs{ 1316250661Sdavidcs device_t dev; 1317250661Sdavidcs q80_link_event_t *lnk; 1318250661Sdavidcs q80_link_event_rsp_t *lnk_rsp; 1319250661Sdavidcs uint32_t err; 1320250661Sdavidcs 1321250661Sdavidcs dev = ha->pci_dev; 1322250661Sdavidcs 1323250661Sdavidcs lnk = (q80_link_event_t *)ha->hw.mbox; 1324250661Sdavidcs bzero(lnk, (sizeof (q80_link_event_t))); 1325250661Sdavidcs 1326250661Sdavidcs lnk->opcode = Q8_MBX_LINK_EVENT_REQ; 1327250661Sdavidcs lnk->count_version = (sizeof (q80_link_event_t) >> 2); 1328250661Sdavidcs lnk->count_version |= Q8_MBX_CMD_VERSION; 1329250661Sdavidcs 1330250661Sdavidcs lnk->cntxt_id = cntxt_id; 1331250661Sdavidcs lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; 1332250661Sdavidcs 1333250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), 1334250661Sdavidcs ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { 1335250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1336250661Sdavidcs return -1; 1337250661Sdavidcs } 1338250661Sdavidcs 1339250661Sdavidcs lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; 1340250661Sdavidcs 1341250661Sdavidcs err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); 1342250661Sdavidcs 1343250661Sdavidcs if (err) { 1344250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1345250661Sdavidcs } 1346250661Sdavidcs 1347250661Sdavidcs return 0; 1348250661Sdavidcs} 1349250661Sdavidcs 1350250661Sdavidcsstatic int 1351250661Sdavidcsqla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) 1352250661Sdavidcs{ 1353250661Sdavidcs device_t dev; 1354250661Sdavidcs q80_config_fw_lro_t *fw_lro; 1355250661Sdavidcs q80_config_fw_lro_rsp_t *fw_lro_rsp; 1356250661Sdavidcs uint32_t err; 1357250661Sdavidcs 1358250661Sdavidcs dev = ha->pci_dev; 1359250661Sdavidcs 1360250661Sdavidcs fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; 1361250661Sdavidcs bzero(fw_lro, sizeof(q80_config_fw_lro_t)); 1362250661Sdavidcs 1363250661Sdavidcs fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; 1364250661Sdavidcs fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); 1365250661Sdavidcs fw_lro->count_version |= Q8_MBX_CMD_VERSION; 1366250661Sdavidcs 1367250661Sdavidcs fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; 1368284741Sdavidcs fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; 1369250661Sdavidcs 1370250661Sdavidcs fw_lro->cntxt_id = cntxt_id; 1371250661Sdavidcs 1372250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, 1373250661Sdavidcs (sizeof (q80_config_fw_lro_t) >> 2), 1374250661Sdavidcs ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { 1375250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1376250661Sdavidcs return -1; 1377250661Sdavidcs } 1378250661Sdavidcs 1379250661Sdavidcs fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; 1380250661Sdavidcs 1381250661Sdavidcs err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); 1382250661Sdavidcs 1383250661Sdavidcs if (err) { 1384250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1385250661Sdavidcs } 1386250661Sdavidcs 1387250661Sdavidcs return 0; 1388250661Sdavidcs} 1389250661Sdavidcs 1390305488Sdavidcsstatic int 1391305488Sdavidcsqla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode) 1392305488Sdavidcs{ 1393305488Sdavidcs device_t dev; 1394305488Sdavidcs q80_hw_config_t *hw_config; 1395305488Sdavidcs q80_hw_config_rsp_t *hw_config_rsp; 1396305488Sdavidcs uint32_t err; 1397305488Sdavidcs 1398305488Sdavidcs dev = ha->pci_dev; 1399305488Sdavidcs 1400305488Sdavidcs hw_config = (q80_hw_config_t *)ha->hw.mbox; 1401305488Sdavidcs bzero(hw_config, sizeof (q80_hw_config_t)); 1402305488Sdavidcs 1403305488Sdavidcs hw_config->opcode = Q8_MBX_HW_CONFIG; 1404305488Sdavidcs hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT; 1405305488Sdavidcs hw_config->count_version |= Q8_MBX_CMD_VERSION; 1406305488Sdavidcs 1407305488Sdavidcs hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE; 1408305488Sdavidcs 1409305488Sdavidcs hw_config->u.set_cam_search_mode.mode = search_mode; 1410305488Sdavidcs 1411305488Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 1412305488Sdavidcs (sizeof (q80_hw_config_t) >> 2), 1413305488Sdavidcs ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 1414305488Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1415305488Sdavidcs return -1; 1416305488Sdavidcs } 1417305488Sdavidcs hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 1418305488Sdavidcs 1419305488Sdavidcs err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 1420305488Sdavidcs 1421305488Sdavidcs if (err) { 1422305488Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1423305488Sdavidcs } 1424305488Sdavidcs 1425305488Sdavidcs return 0; 1426305488Sdavidcs} 1427305488Sdavidcs 1428305488Sdavidcsstatic int 1429305488Sdavidcsqla_get_cam_search_mode(qla_host_t *ha) 1430305488Sdavidcs{ 1431305488Sdavidcs device_t dev; 1432305488Sdavidcs q80_hw_config_t *hw_config; 1433305488Sdavidcs q80_hw_config_rsp_t *hw_config_rsp; 1434305488Sdavidcs uint32_t err; 1435305488Sdavidcs 1436305488Sdavidcs dev = ha->pci_dev; 1437305488Sdavidcs 1438305488Sdavidcs hw_config = (q80_hw_config_t *)ha->hw.mbox; 1439305488Sdavidcs bzero(hw_config, sizeof (q80_hw_config_t)); 1440305488Sdavidcs 1441305488Sdavidcs hw_config->opcode = Q8_MBX_HW_CONFIG; 1442305488Sdavidcs hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT; 1443305488Sdavidcs hw_config->count_version |= Q8_MBX_CMD_VERSION; 1444305488Sdavidcs 1445305488Sdavidcs hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE; 1446305488Sdavidcs 1447305488Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 1448305488Sdavidcs (sizeof (q80_hw_config_t) >> 2), 1449305488Sdavidcs ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 1450305488Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1451305488Sdavidcs return -1; 1452305488Sdavidcs } 1453305488Sdavidcs hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 1454305488Sdavidcs 1455305488Sdavidcs err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 1456305488Sdavidcs 1457305488Sdavidcs if (err) { 1458305488Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1459305488Sdavidcs } else { 1460305488Sdavidcs device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__, 1461305488Sdavidcs hw_config_rsp->u.get_cam_search_mode.mode); 1462305488Sdavidcs } 1463305488Sdavidcs 1464305488Sdavidcs return 0; 1465305488Sdavidcs} 1466305488Sdavidcs 1467305488Sdavidcs 1468305488Sdavidcs 1469250661Sdavidcsstatic void 1470284741Sdavidcsqla_xmt_stats(qla_host_t *ha, q80_xmt_stats_t *xstat, int i) 1471250661Sdavidcs{ 1472250661Sdavidcs device_t dev = ha->pci_dev; 1473250661Sdavidcs 1474284741Sdavidcs if (i < ha->hw.num_tx_rings) { 1475284741Sdavidcs device_printf(dev, "%s[%d]: total_bytes\t\t%" PRIu64 "\n", 1476284741Sdavidcs __func__, i, xstat->total_bytes); 1477284741Sdavidcs device_printf(dev, "%s[%d]: total_pkts\t\t%" PRIu64 "\n", 1478284741Sdavidcs __func__, i, xstat->total_pkts); 1479284741Sdavidcs device_printf(dev, "%s[%d]: errors\t\t%" PRIu64 "\n", 1480284741Sdavidcs __func__, i, xstat->errors); 1481284741Sdavidcs device_printf(dev, "%s[%d]: pkts_dropped\t%" PRIu64 "\n", 1482284741Sdavidcs __func__, i, xstat->pkts_dropped); 1483284741Sdavidcs device_printf(dev, "%s[%d]: switch_pkts\t\t%" PRIu64 "\n", 1484284741Sdavidcs __func__, i, xstat->switch_pkts); 1485284741Sdavidcs device_printf(dev, "%s[%d]: num_buffers\t\t%" PRIu64 "\n", 1486284741Sdavidcs __func__, i, xstat->num_buffers); 1487284741Sdavidcs } else { 1488284741Sdavidcs device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", 1489284741Sdavidcs __func__, xstat->total_bytes); 1490284741Sdavidcs device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", 1491284741Sdavidcs __func__, xstat->total_pkts); 1492284741Sdavidcs device_printf(dev, "%s: errors\t\t\t%" PRIu64 "\n", 1493284741Sdavidcs __func__, xstat->errors); 1494284741Sdavidcs device_printf(dev, "%s: pkts_dropped\t\t\t%" PRIu64 "\n", 1495284741Sdavidcs __func__, xstat->pkts_dropped); 1496284741Sdavidcs device_printf(dev, "%s: switch_pkts\t\t\t%" PRIu64 "\n", 1497284741Sdavidcs __func__, xstat->switch_pkts); 1498284741Sdavidcs device_printf(dev, "%s: num_buffers\t\t\t%" PRIu64 "\n", 1499284741Sdavidcs __func__, xstat->num_buffers); 1500284741Sdavidcs } 1501250661Sdavidcs} 1502250661Sdavidcs 1503250661Sdavidcsstatic void 1504250661Sdavidcsqla_rcv_stats(qla_host_t *ha, q80_rcv_stats_t *rstat) 1505250661Sdavidcs{ 1506250661Sdavidcs device_t dev = ha->pci_dev; 1507250661Sdavidcs 1508250661Sdavidcs device_printf(dev, "%s: total_bytes\t\t\t%" PRIu64 "\n", __func__, 1509250661Sdavidcs rstat->total_bytes); 1510250661Sdavidcs device_printf(dev, "%s: total_pkts\t\t\t%" PRIu64 "\n", __func__, 1511250661Sdavidcs rstat->total_pkts); 1512250661Sdavidcs device_printf(dev, "%s: lro_pkt_count\t\t%" PRIu64 "\n", __func__, 1513250661Sdavidcs rstat->lro_pkt_count); 1514284741Sdavidcs device_printf(dev, "%s: sw_pkt_count\t\t\t%" PRIu64 "\n", __func__, 1515250661Sdavidcs rstat->sw_pkt_count); 1516250661Sdavidcs device_printf(dev, "%s: ip_chksum_err\t\t%" PRIu64 "\n", __func__, 1517250661Sdavidcs rstat->ip_chksum_err); 1518250661Sdavidcs device_printf(dev, "%s: pkts_wo_acntxts\t\t%" PRIu64 "\n", __func__, 1519250661Sdavidcs rstat->pkts_wo_acntxts); 1520250661Sdavidcs device_printf(dev, "%s: pkts_dropped_no_sds_card\t%" PRIu64 "\n", 1521250661Sdavidcs __func__, rstat->pkts_dropped_no_sds_card); 1522250661Sdavidcs device_printf(dev, "%s: pkts_dropped_no_sds_host\t%" PRIu64 "\n", 1523250661Sdavidcs __func__, rstat->pkts_dropped_no_sds_host); 1524250661Sdavidcs device_printf(dev, "%s: oversized_pkts\t\t%" PRIu64 "\n", __func__, 1525250661Sdavidcs rstat->oversized_pkts); 1526250661Sdavidcs device_printf(dev, "%s: pkts_dropped_no_rds\t\t%" PRIu64 "\n", 1527250661Sdavidcs __func__, rstat->pkts_dropped_no_rds); 1528250661Sdavidcs device_printf(dev, "%s: unxpctd_mcast_pkts\t\t%" PRIu64 "\n", 1529250661Sdavidcs __func__, rstat->unxpctd_mcast_pkts); 1530250661Sdavidcs device_printf(dev, "%s: re1_fbq_error\t\t%" PRIu64 "\n", __func__, 1531250661Sdavidcs rstat->re1_fbq_error); 1532250661Sdavidcs device_printf(dev, "%s: invalid_mac_addr\t\t%" PRIu64 "\n", __func__, 1533250661Sdavidcs rstat->invalid_mac_addr); 1534250661Sdavidcs device_printf(dev, "%s: rds_prime_trys\t\t%" PRIu64 "\n", __func__, 1535250661Sdavidcs rstat->rds_prime_trys); 1536250661Sdavidcs device_printf(dev, "%s: rds_prime_success\t\t%" PRIu64 "\n", __func__, 1537250661Sdavidcs rstat->rds_prime_success); 1538250661Sdavidcs device_printf(dev, "%s: lro_flows_added\t\t%" PRIu64 "\n", __func__, 1539250661Sdavidcs rstat->lro_flows_added); 1540250661Sdavidcs device_printf(dev, "%s: lro_flows_deleted\t\t%" PRIu64 "\n", __func__, 1541250661Sdavidcs rstat->lro_flows_deleted); 1542250661Sdavidcs device_printf(dev, "%s: lro_flows_active\t\t%" PRIu64 "\n", __func__, 1543250661Sdavidcs rstat->lro_flows_active); 1544250661Sdavidcs device_printf(dev, "%s: pkts_droped_unknown\t\t%" PRIu64 "\n", 1545250661Sdavidcs __func__, rstat->pkts_droped_unknown); 1546250661Sdavidcs} 1547250661Sdavidcs 1548250661Sdavidcsstatic void 1549250661Sdavidcsqla_mac_stats(qla_host_t *ha, q80_mac_stats_t *mstat) 1550250661Sdavidcs{ 1551250661Sdavidcs device_t dev = ha->pci_dev; 1552250661Sdavidcs 1553250661Sdavidcs device_printf(dev, "%s: xmt_frames\t\t\t%" PRIu64 "\n", __func__, 1554250661Sdavidcs mstat->xmt_frames); 1555250661Sdavidcs device_printf(dev, "%s: xmt_bytes\t\t\t%" PRIu64 "\n", __func__, 1556250661Sdavidcs mstat->xmt_bytes); 1557250661Sdavidcs device_printf(dev, "%s: xmt_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1558250661Sdavidcs mstat->xmt_mcast_pkts); 1559250661Sdavidcs device_printf(dev, "%s: xmt_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1560250661Sdavidcs mstat->xmt_bcast_pkts); 1561250661Sdavidcs device_printf(dev, "%s: xmt_pause_frames\t\t%" PRIu64 "\n", __func__, 1562250661Sdavidcs mstat->xmt_pause_frames); 1563250661Sdavidcs device_printf(dev, "%s: xmt_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1564250661Sdavidcs mstat->xmt_cntrl_pkts); 1565250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1566250661Sdavidcs __func__, mstat->xmt_pkt_lt_64bytes); 1567250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1568250661Sdavidcs __func__, mstat->xmt_pkt_lt_127bytes); 1569250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1570250661Sdavidcs __func__, mstat->xmt_pkt_lt_255bytes); 1571250661Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1572250661Sdavidcs __func__, mstat->xmt_pkt_lt_511bytes); 1573284741Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1574250661Sdavidcs __func__, mstat->xmt_pkt_lt_1023bytes); 1575284741Sdavidcs device_printf(dev, "%s: xmt_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1576250661Sdavidcs __func__, mstat->xmt_pkt_lt_1518bytes); 1577284741Sdavidcs device_printf(dev, "%s: xmt_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1578250661Sdavidcs __func__, mstat->xmt_pkt_gt_1518bytes); 1579250661Sdavidcs 1580250661Sdavidcs device_printf(dev, "%s: rcv_frames\t\t\t%" PRIu64 "\n", __func__, 1581250661Sdavidcs mstat->rcv_frames); 1582250661Sdavidcs device_printf(dev, "%s: rcv_bytes\t\t\t%" PRIu64 "\n", __func__, 1583250661Sdavidcs mstat->rcv_bytes); 1584250661Sdavidcs device_printf(dev, "%s: rcv_mcast_pkts\t\t%" PRIu64 "\n", __func__, 1585250661Sdavidcs mstat->rcv_mcast_pkts); 1586250661Sdavidcs device_printf(dev, "%s: rcv_bcast_pkts\t\t%" PRIu64 "\n", __func__, 1587250661Sdavidcs mstat->rcv_bcast_pkts); 1588250661Sdavidcs device_printf(dev, "%s: rcv_pause_frames\t\t%" PRIu64 "\n", __func__, 1589250661Sdavidcs mstat->rcv_pause_frames); 1590250661Sdavidcs device_printf(dev, "%s: rcv_cntrl_pkts\t\t%" PRIu64 "\n", __func__, 1591250661Sdavidcs mstat->rcv_cntrl_pkts); 1592250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_64bytes\t\t%" PRIu64 "\n", 1593250661Sdavidcs __func__, mstat->rcv_pkt_lt_64bytes); 1594250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_127bytes\t\t%" PRIu64 "\n", 1595250661Sdavidcs __func__, mstat->rcv_pkt_lt_127bytes); 1596250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_255bytes\t\t%" PRIu64 "\n", 1597250661Sdavidcs __func__, mstat->rcv_pkt_lt_255bytes); 1598250661Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_511bytes\t\t%" PRIu64 "\n", 1599250661Sdavidcs __func__, mstat->rcv_pkt_lt_511bytes); 1600284741Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_1023bytes\t\t%" PRIu64 "\n", 1601250661Sdavidcs __func__, mstat->rcv_pkt_lt_1023bytes); 1602284741Sdavidcs device_printf(dev, "%s: rcv_pkt_lt_1518bytes\t\t%" PRIu64 "\n", 1603250661Sdavidcs __func__, mstat->rcv_pkt_lt_1518bytes); 1604284741Sdavidcs device_printf(dev, "%s: rcv_pkt_gt_1518bytes\t\t%" PRIu64 "\n", 1605250661Sdavidcs __func__, mstat->rcv_pkt_gt_1518bytes); 1606250661Sdavidcs 1607250661Sdavidcs device_printf(dev, "%s: rcv_len_error\t\t%" PRIu64 "\n", __func__, 1608250661Sdavidcs mstat->rcv_len_error); 1609250661Sdavidcs device_printf(dev, "%s: rcv_len_small\t\t%" PRIu64 "\n", __func__, 1610250661Sdavidcs mstat->rcv_len_small); 1611250661Sdavidcs device_printf(dev, "%s: rcv_len_large\t\t%" PRIu64 "\n", __func__, 1612250661Sdavidcs mstat->rcv_len_large); 1613250661Sdavidcs device_printf(dev, "%s: rcv_jabber\t\t\t%" PRIu64 "\n", __func__, 1614250661Sdavidcs mstat->rcv_jabber); 1615250661Sdavidcs device_printf(dev, "%s: rcv_dropped\t\t\t%" PRIu64 "\n", __func__, 1616250661Sdavidcs mstat->rcv_dropped); 1617250661Sdavidcs device_printf(dev, "%s: fcs_error\t\t\t%" PRIu64 "\n", __func__, 1618250661Sdavidcs mstat->fcs_error); 1619250661Sdavidcs device_printf(dev, "%s: align_error\t\t\t%" PRIu64 "\n", __func__, 1620250661Sdavidcs mstat->align_error); 1621250661Sdavidcs} 1622250661Sdavidcs 1623250661Sdavidcs 1624250661Sdavidcsstatic int 1625284741Sdavidcsqla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) 1626250661Sdavidcs{ 1627250661Sdavidcs device_t dev; 1628250661Sdavidcs q80_get_stats_t *stat; 1629250661Sdavidcs q80_get_stats_rsp_t *stat_rsp; 1630250661Sdavidcs uint32_t err; 1631250661Sdavidcs 1632250661Sdavidcs dev = ha->pci_dev; 1633250661Sdavidcs 1634250661Sdavidcs stat = (q80_get_stats_t *)ha->hw.mbox; 1635250661Sdavidcs bzero(stat, (sizeof (q80_get_stats_t))); 1636250661Sdavidcs 1637250661Sdavidcs stat->opcode = Q8_MBX_GET_STATS; 1638250661Sdavidcs stat->count_version = 2; 1639250661Sdavidcs stat->count_version |= Q8_MBX_CMD_VERSION; 1640250661Sdavidcs 1641250661Sdavidcs stat->cmd = cmd; 1642250661Sdavidcs 1643250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, 1644284741Sdavidcs ha->hw.mbox, (rsp_size >> 2), 0)) { 1645250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 1646250661Sdavidcs return -1; 1647250661Sdavidcs } 1648250661Sdavidcs 1649250661Sdavidcs stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1650250661Sdavidcs 1651250661Sdavidcs err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); 1652250661Sdavidcs 1653250661Sdavidcs if (err) { 1654250661Sdavidcs return -1; 1655250661Sdavidcs } 1656250661Sdavidcs 1657250661Sdavidcs return 0; 1658250661Sdavidcs} 1659250661Sdavidcs 1660250661Sdavidcsvoid 1661250661Sdavidcsql_get_stats(qla_host_t *ha) 1662250661Sdavidcs{ 1663250661Sdavidcs q80_get_stats_rsp_t *stat_rsp; 1664250661Sdavidcs q80_mac_stats_t *mstat; 1665250661Sdavidcs q80_xmt_stats_t *xstat; 1666250661Sdavidcs q80_rcv_stats_t *rstat; 1667250661Sdavidcs uint32_t cmd; 1668284741Sdavidcs int i; 1669250661Sdavidcs 1670250661Sdavidcs stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 1671250661Sdavidcs /* 1672250661Sdavidcs * Get MAC Statistics 1673250661Sdavidcs */ 1674250661Sdavidcs cmd = Q8_GET_STATS_CMD_TYPE_MAC; 1675284741Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1676250661Sdavidcs 1677250661Sdavidcs cmd |= ((ha->pci_func & 0x1) << 16); 1678250661Sdavidcs 1679284741Sdavidcs if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1680250661Sdavidcs mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; 1681250661Sdavidcs qla_mac_stats(ha, mstat); 1682250661Sdavidcs } else { 1683250661Sdavidcs device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", 1684250661Sdavidcs __func__, ha->hw.mbox[0]); 1685250661Sdavidcs } 1686250661Sdavidcs /* 1687250661Sdavidcs * Get RCV Statistics 1688250661Sdavidcs */ 1689250661Sdavidcs cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; 1690284741Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1691250661Sdavidcs cmd |= (ha->hw.rcv_cntxt_id << 16); 1692250661Sdavidcs 1693284741Sdavidcs if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 1694250661Sdavidcs rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; 1695250661Sdavidcs qla_rcv_stats(ha, rstat); 1696250661Sdavidcs } else { 1697250661Sdavidcs device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", 1698250661Sdavidcs __func__, ha->hw.mbox[0]); 1699250661Sdavidcs } 1700250661Sdavidcs /* 1701250661Sdavidcs * Get XMT Statistics 1702250661Sdavidcs */ 1703284741Sdavidcs for (i = 0 ; i < ha->hw.num_tx_rings; i++) { 1704284741Sdavidcs cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; 1705284741Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1706284741Sdavidcs cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); 1707250661Sdavidcs 1708284741Sdavidcs if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) 1709284741Sdavidcs == 0) { 1710284741Sdavidcs xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; 1711284741Sdavidcs qla_xmt_stats(ha, xstat, i); 1712284741Sdavidcs } else { 1713284741Sdavidcs device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", 1714284741Sdavidcs __func__, ha->hw.mbox[0]); 1715284741Sdavidcs } 1716284741Sdavidcs } 1717284741Sdavidcs return; 1718284741Sdavidcs} 1719250661Sdavidcs 1720284741Sdavidcsstatic void 1721284741Sdavidcsqla_get_quick_stats(qla_host_t *ha) 1722284741Sdavidcs{ 1723284741Sdavidcs q80_get_mac_rcv_xmt_stats_rsp_t *stat_rsp; 1724284741Sdavidcs q80_mac_stats_t *mstat; 1725284741Sdavidcs q80_xmt_stats_t *xstat; 1726284741Sdavidcs q80_rcv_stats_t *rstat; 1727284741Sdavidcs uint32_t cmd; 1728284741Sdavidcs 1729284741Sdavidcs stat_rsp = (q80_get_mac_rcv_xmt_stats_rsp_t *)ha->hw.mbox; 1730284741Sdavidcs 1731284741Sdavidcs cmd = Q8_GET_STATS_CMD_TYPE_ALL; 1732284741Sdavidcs// cmd |= Q8_GET_STATS_CMD_CLEAR; 1733284741Sdavidcs 1734284741Sdavidcs// cmd |= ((ha->pci_func & 0x3) << 16); 1735284741Sdavidcs cmd |= (0xFFFF << 16); 1736284741Sdavidcs 1737284741Sdavidcs if (qla_get_hw_stats(ha, cmd, 1738284741Sdavidcs sizeof (q80_get_mac_rcv_xmt_stats_rsp_t)) == 0) { 1739284741Sdavidcs 1740284741Sdavidcs mstat = (q80_mac_stats_t *)&stat_rsp->mac; 1741284741Sdavidcs rstat = (q80_rcv_stats_t *)&stat_rsp->rcv; 1742284741Sdavidcs xstat = (q80_xmt_stats_t *)&stat_rsp->xmt; 1743284741Sdavidcs qla_mac_stats(ha, mstat); 1744284741Sdavidcs qla_rcv_stats(ha, rstat); 1745284741Sdavidcs qla_xmt_stats(ha, xstat, ha->hw.num_tx_rings); 1746250661Sdavidcs } else { 1747284741Sdavidcs device_printf(ha->pci_dev, "%s: failed [0x%08x]\n", 1748250661Sdavidcs __func__, ha->hw.mbox[0]); 1749250661Sdavidcs } 1750284741Sdavidcs return; 1751250661Sdavidcs} 1752250661Sdavidcs 1753250661Sdavidcs/* 1754250661Sdavidcs * Name: qla_tx_tso 1755250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for 1756250661Sdavidcs * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 1757250661Sdavidcs * Ring Structure are plugged in. 1758250661Sdavidcs */ 1759250661Sdavidcsstatic int 1760250661Sdavidcsqla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 1761250661Sdavidcs{ 1762250661Sdavidcs struct ether_vlan_header *eh; 1763250661Sdavidcs struct ip *ip = NULL; 1764250661Sdavidcs struct ip6_hdr *ip6 = NULL; 1765250661Sdavidcs struct tcphdr *th = NULL; 1766250661Sdavidcs uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; 1767250661Sdavidcs uint16_t etype, opcode, offload = 1; 1768250661Sdavidcs device_t dev; 1769250661Sdavidcs 1770250661Sdavidcs dev = ha->pci_dev; 1771250661Sdavidcs 1772250661Sdavidcs 1773250661Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 1774250661Sdavidcs 1775250661Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1776250661Sdavidcs ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1777250661Sdavidcs etype = ntohs(eh->evl_proto); 1778250661Sdavidcs } else { 1779250661Sdavidcs ehdrlen = ETHER_HDR_LEN; 1780250661Sdavidcs etype = ntohs(eh->evl_encap_proto); 1781250661Sdavidcs } 1782250661Sdavidcs 1783250661Sdavidcs hdrlen = 0; 1784250661Sdavidcs 1785250661Sdavidcs switch (etype) { 1786250661Sdavidcs case ETHERTYPE_IP: 1787250661Sdavidcs 1788250661Sdavidcs tcp_opt_off = ehdrlen + sizeof(struct ip) + 1789250661Sdavidcs sizeof(struct tcphdr); 1790250661Sdavidcs 1791250661Sdavidcs if (mp->m_len < tcp_opt_off) { 1792250661Sdavidcs m_copydata(mp, 0, tcp_opt_off, hdr); 1793250661Sdavidcs ip = (struct ip *)(hdr + ehdrlen); 1794250661Sdavidcs } else { 1795250661Sdavidcs ip = (struct ip *)(mp->m_data + ehdrlen); 1796250661Sdavidcs } 1797250661Sdavidcs 1798250661Sdavidcs ip_hlen = ip->ip_hl << 2; 1799250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 1800250661Sdavidcs 1801250661Sdavidcs 1802250661Sdavidcs if ((ip->ip_p != IPPROTO_TCP) || 1803250661Sdavidcs (ip_hlen != sizeof (struct ip))){ 1804250661Sdavidcs /* IP Options are not supported */ 1805250661Sdavidcs 1806250661Sdavidcs offload = 0; 1807250661Sdavidcs } else 1808250661Sdavidcs th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 1809250661Sdavidcs 1810250661Sdavidcs break; 1811250661Sdavidcs 1812250661Sdavidcs case ETHERTYPE_IPV6: 1813250661Sdavidcs 1814250661Sdavidcs tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + 1815250661Sdavidcs sizeof (struct tcphdr); 1816250661Sdavidcs 1817250661Sdavidcs if (mp->m_len < tcp_opt_off) { 1818250661Sdavidcs m_copydata(mp, 0, tcp_opt_off, hdr); 1819250661Sdavidcs ip6 = (struct ip6_hdr *)(hdr + ehdrlen); 1820250661Sdavidcs } else { 1821250661Sdavidcs ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1822250661Sdavidcs } 1823250661Sdavidcs 1824250661Sdavidcs ip_hlen = sizeof(struct ip6_hdr); 1825250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; 1826250661Sdavidcs 1827250661Sdavidcs if (ip6->ip6_nxt != IPPROTO_TCP) { 1828250661Sdavidcs //device_printf(dev, "%s: ipv6\n", __func__); 1829250661Sdavidcs offload = 0; 1830250661Sdavidcs } else 1831250661Sdavidcs th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); 1832250661Sdavidcs break; 1833250661Sdavidcs 1834250661Sdavidcs default: 1835250661Sdavidcs QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__)); 1836250661Sdavidcs offload = 0; 1837250661Sdavidcs break; 1838250661Sdavidcs } 1839250661Sdavidcs 1840250661Sdavidcs if (!offload) 1841250661Sdavidcs return (-1); 1842250661Sdavidcs 1843250661Sdavidcs tcp_hlen = th->th_off << 2; 1844250661Sdavidcs hdrlen = ehdrlen + ip_hlen + tcp_hlen; 1845250661Sdavidcs 1846250661Sdavidcs if (mp->m_len < hdrlen) { 1847250661Sdavidcs if (mp->m_len < tcp_opt_off) { 1848250661Sdavidcs if (tcp_hlen > sizeof(struct tcphdr)) { 1849250661Sdavidcs m_copydata(mp, tcp_opt_off, 1850250661Sdavidcs (tcp_hlen - sizeof(struct tcphdr)), 1851250661Sdavidcs &hdr[tcp_opt_off]); 1852250661Sdavidcs } 1853250661Sdavidcs } else { 1854250661Sdavidcs m_copydata(mp, 0, hdrlen, hdr); 1855250661Sdavidcs } 1856250661Sdavidcs } 1857250661Sdavidcs 1858250661Sdavidcs tx_cmd->mss = mp->m_pkthdr.tso_segsz; 1859250661Sdavidcs 1860250661Sdavidcs tx_cmd->flags_opcode = opcode ; 1861250661Sdavidcs tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 1862250661Sdavidcs tx_cmd->total_hdr_len = hdrlen; 1863250661Sdavidcs 1864250661Sdavidcs /* Check for Multicast least significant bit of MSB == 1 */ 1865250661Sdavidcs if (eh->evl_dhost[0] & 0x01) { 1866250661Sdavidcs tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; 1867250661Sdavidcs } 1868250661Sdavidcs 1869250661Sdavidcs if (mp->m_len < hdrlen) { 1870250661Sdavidcs printf("%d\n", hdrlen); 1871250661Sdavidcs return (1); 1872250661Sdavidcs } 1873250661Sdavidcs 1874250661Sdavidcs return (0); 1875250661Sdavidcs} 1876250661Sdavidcs 1877250661Sdavidcs/* 1878250661Sdavidcs * Name: qla_tx_chksum 1879250661Sdavidcs * Function: Checks if the packet to be transmitted is a candidate for 1880250661Sdavidcs * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 1881250661Sdavidcs * Ring Structure are plugged in. 1882250661Sdavidcs */ 1883250661Sdavidcsstatic int 1884250661Sdavidcsqla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, 1885250661Sdavidcs uint32_t *tcp_hdr_off) 1886250661Sdavidcs{ 1887250661Sdavidcs struct ether_vlan_header *eh; 1888250661Sdavidcs struct ip *ip; 1889250661Sdavidcs struct ip6_hdr *ip6; 1890250661Sdavidcs uint32_t ehdrlen, ip_hlen; 1891250661Sdavidcs uint16_t etype, opcode, offload = 1; 1892250661Sdavidcs device_t dev; 1893250661Sdavidcs uint8_t buf[sizeof(struct ip6_hdr)]; 1894250661Sdavidcs 1895250661Sdavidcs dev = ha->pci_dev; 1896250661Sdavidcs 1897250661Sdavidcs *op_code = 0; 1898250661Sdavidcs 1899250661Sdavidcs if ((mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) == 0) 1900250661Sdavidcs return (-1); 1901250661Sdavidcs 1902250661Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 1903250661Sdavidcs 1904250661Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1905250661Sdavidcs ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1906250661Sdavidcs etype = ntohs(eh->evl_proto); 1907250661Sdavidcs } else { 1908250661Sdavidcs ehdrlen = ETHER_HDR_LEN; 1909250661Sdavidcs etype = ntohs(eh->evl_encap_proto); 1910250661Sdavidcs } 1911250661Sdavidcs 1912250661Sdavidcs 1913250661Sdavidcs switch (etype) { 1914250661Sdavidcs case ETHERTYPE_IP: 1915250661Sdavidcs ip = (struct ip *)(mp->m_data + ehdrlen); 1916250661Sdavidcs 1917250661Sdavidcs ip_hlen = sizeof (struct ip); 1918250661Sdavidcs 1919250661Sdavidcs if (mp->m_len < (ehdrlen + ip_hlen)) { 1920250661Sdavidcs m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 1921250661Sdavidcs ip = (struct ip *)buf; 1922250661Sdavidcs } 1923250661Sdavidcs 1924250661Sdavidcs if (ip->ip_p == IPPROTO_TCP) 1925250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 1926250661Sdavidcs else if (ip->ip_p == IPPROTO_UDP) 1927250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 1928250661Sdavidcs else { 1929250661Sdavidcs //device_printf(dev, "%s: ipv4\n", __func__); 1930250661Sdavidcs offload = 0; 1931250661Sdavidcs } 1932250661Sdavidcs break; 1933250661Sdavidcs 1934250661Sdavidcs case ETHERTYPE_IPV6: 1935250661Sdavidcs ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1936250661Sdavidcs 1937250661Sdavidcs ip_hlen = sizeof(struct ip6_hdr); 1938250661Sdavidcs 1939250661Sdavidcs if (mp->m_len < (ehdrlen + ip_hlen)) { 1940250661Sdavidcs m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 1941250661Sdavidcs buf); 1942250661Sdavidcs ip6 = (struct ip6_hdr *)buf; 1943250661Sdavidcs } 1944250661Sdavidcs 1945250661Sdavidcs if (ip6->ip6_nxt == IPPROTO_TCP) 1946250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 1947250661Sdavidcs else if (ip6->ip6_nxt == IPPROTO_UDP) 1948250661Sdavidcs opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 1949250661Sdavidcs else { 1950250661Sdavidcs //device_printf(dev, "%s: ipv6\n", __func__); 1951250661Sdavidcs offload = 0; 1952250661Sdavidcs } 1953250661Sdavidcs break; 1954250661Sdavidcs 1955250661Sdavidcs default: 1956250661Sdavidcs offload = 0; 1957250661Sdavidcs break; 1958250661Sdavidcs } 1959250661Sdavidcs if (!offload) 1960250661Sdavidcs return (-1); 1961250661Sdavidcs 1962250661Sdavidcs *op_code = opcode; 1963250661Sdavidcs *tcp_hdr_off = (ip_hlen + ehdrlen); 1964250661Sdavidcs 1965250661Sdavidcs return (0); 1966250661Sdavidcs} 1967250661Sdavidcs 1968250661Sdavidcs#define QLA_TX_MIN_FREE 2 1969250661Sdavidcs/* 1970250661Sdavidcs * Name: ql_hw_send 1971250661Sdavidcs * Function: Transmits a packet. It first checks if the packet is a 1972250661Sdavidcs * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 1973250661Sdavidcs * offload. If either of these creteria are not met, it is transmitted 1974250661Sdavidcs * as a regular ethernet frame. 1975250661Sdavidcs */ 1976250661Sdavidcsint 1977250661Sdavidcsql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 1978284741Sdavidcs uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) 1979250661Sdavidcs{ 1980250661Sdavidcs struct ether_vlan_header *eh; 1981250661Sdavidcs qla_hw_t *hw = &ha->hw; 1982250661Sdavidcs q80_tx_cmd_t *tx_cmd, tso_cmd; 1983250661Sdavidcs bus_dma_segment_t *c_seg; 1984250661Sdavidcs uint32_t num_tx_cmds, hdr_len = 0; 1985250661Sdavidcs uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; 1986250661Sdavidcs device_t dev; 1987250661Sdavidcs int i, ret; 1988250661Sdavidcs uint8_t *src = NULL, *dst = NULL; 1989250661Sdavidcs uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; 1990250661Sdavidcs uint32_t op_code = 0; 1991250661Sdavidcs uint32_t tcp_hdr_off = 0; 1992250661Sdavidcs 1993250661Sdavidcs dev = ha->pci_dev; 1994250661Sdavidcs 1995250661Sdavidcs /* 1996250661Sdavidcs * Always make sure there is atleast one empty slot in the tx_ring 1997250661Sdavidcs * tx_ring is considered full when there only one entry available 1998250661Sdavidcs */ 1999250661Sdavidcs num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 2000250661Sdavidcs 2001250661Sdavidcs total_length = mp->m_pkthdr.len; 2002250661Sdavidcs if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 2003250661Sdavidcs device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 2004250661Sdavidcs __func__, total_length); 2005250661Sdavidcs return (-1); 2006250661Sdavidcs } 2007250661Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 2008250661Sdavidcs 2009250661Sdavidcs if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2010250661Sdavidcs 2011250661Sdavidcs bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 2012250661Sdavidcs 2013250661Sdavidcs src = frame_hdr; 2014250661Sdavidcs ret = qla_tx_tso(ha, mp, &tso_cmd, src); 2015250661Sdavidcs 2016250661Sdavidcs if (!(ret & ~1)) { 2017250661Sdavidcs /* find the additional tx_cmd descriptors required */ 2018250661Sdavidcs 2019250661Sdavidcs if (mp->m_flags & M_VLANTAG) 2020250661Sdavidcs tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; 2021250661Sdavidcs 2022250661Sdavidcs hdr_len = tso_cmd.total_hdr_len; 2023250661Sdavidcs 2024250661Sdavidcs bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2025250661Sdavidcs bytes = QL_MIN(bytes, hdr_len); 2026250661Sdavidcs 2027250661Sdavidcs num_tx_cmds++; 2028250661Sdavidcs hdr_len -= bytes; 2029250661Sdavidcs 2030250661Sdavidcs while (hdr_len) { 2031250661Sdavidcs bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2032250661Sdavidcs hdr_len -= bytes; 2033250661Sdavidcs num_tx_cmds++; 2034250661Sdavidcs } 2035250661Sdavidcs hdr_len = tso_cmd.total_hdr_len; 2036250661Sdavidcs 2037250661Sdavidcs if (ret == 0) 2038250661Sdavidcs src = (uint8_t *)eh; 2039250661Sdavidcs } else 2040250661Sdavidcs return (EINVAL); 2041250661Sdavidcs } else { 2042250661Sdavidcs (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); 2043250661Sdavidcs } 2044250661Sdavidcs 2045284741Sdavidcs if (iscsi_pdu) 2046284741Sdavidcs ha->hw.iscsi_pkt_count++; 2047284741Sdavidcs 2048250661Sdavidcs if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 2049313070Sdavidcs ql_hw_tx_done_locked(ha, txr_idx); 2050250661Sdavidcs if (hw->tx_cntxt[txr_idx].txr_free <= 2051250661Sdavidcs (num_tx_cmds + QLA_TX_MIN_FREE)) { 2052250661Sdavidcs QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " 2053250661Sdavidcs "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 2054250661Sdavidcs __func__)); 2055250661Sdavidcs return (-1); 2056250661Sdavidcs } 2057250661Sdavidcs } 2058250661Sdavidcs 2059250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; 2060250661Sdavidcs 2061250661Sdavidcs if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { 2062250661Sdavidcs 2063250661Sdavidcs if (nsegs > ha->hw.max_tx_segs) 2064250661Sdavidcs ha->hw.max_tx_segs = nsegs; 2065250661Sdavidcs 2066250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2067250661Sdavidcs 2068250661Sdavidcs if (op_code) { 2069250661Sdavidcs tx_cmd->flags_opcode = op_code; 2070250661Sdavidcs tx_cmd->tcp_hdr_off = tcp_hdr_off; 2071250661Sdavidcs 2072250661Sdavidcs } else { 2073250661Sdavidcs tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 2074250661Sdavidcs } 2075250661Sdavidcs } else { 2076250661Sdavidcs bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 2077250661Sdavidcs ha->tx_tso_frames++; 2078250661Sdavidcs } 2079250661Sdavidcs 2080250661Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2081250661Sdavidcs tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 2082284741Sdavidcs 2083284741Sdavidcs if (iscsi_pdu) 2084284741Sdavidcs eh->evl_tag |= ha->hw.user_pri_iscsi << 13; 2085284741Sdavidcs 2086250661Sdavidcs } else if (mp->m_flags & M_VLANTAG) { 2087250661Sdavidcs 2088250661Sdavidcs if (hdr_len) { /* TSO */ 2089250661Sdavidcs tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 2090250661Sdavidcs Q8_TX_CMD_FLAGS_HW_VLAN_ID); 2091250661Sdavidcs tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; 2092250661Sdavidcs } else 2093250661Sdavidcs tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; 2094250661Sdavidcs 2095250661Sdavidcs ha->hw_vlan_tx_frames++; 2096250661Sdavidcs tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 2097284741Sdavidcs 2098284741Sdavidcs if (iscsi_pdu) { 2099284741Sdavidcs tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; 2100284741Sdavidcs mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; 2101284741Sdavidcs } 2102250661Sdavidcs } 2103250661Sdavidcs 2104250661Sdavidcs 2105250661Sdavidcs tx_cmd->n_bufs = (uint8_t)nsegs; 2106250661Sdavidcs tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 2107250661Sdavidcs tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 2108250661Sdavidcs tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 2109250661Sdavidcs 2110250661Sdavidcs c_seg = segs; 2111250661Sdavidcs 2112250661Sdavidcs while (1) { 2113250661Sdavidcs for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 2114250661Sdavidcs 2115250661Sdavidcs switch (i) { 2116250661Sdavidcs case 0: 2117250661Sdavidcs tx_cmd->buf1_addr = c_seg->ds_addr; 2118250661Sdavidcs tx_cmd->buf1_len = c_seg->ds_len; 2119250661Sdavidcs break; 2120250661Sdavidcs 2121250661Sdavidcs case 1: 2122250661Sdavidcs tx_cmd->buf2_addr = c_seg->ds_addr; 2123250661Sdavidcs tx_cmd->buf2_len = c_seg->ds_len; 2124250661Sdavidcs break; 2125250661Sdavidcs 2126250661Sdavidcs case 2: 2127250661Sdavidcs tx_cmd->buf3_addr = c_seg->ds_addr; 2128250661Sdavidcs tx_cmd->buf3_len = c_seg->ds_len; 2129250661Sdavidcs break; 2130250661Sdavidcs 2131250661Sdavidcs case 3: 2132250661Sdavidcs tx_cmd->buf4_addr = c_seg->ds_addr; 2133250661Sdavidcs tx_cmd->buf4_len = c_seg->ds_len; 2134250661Sdavidcs break; 2135250661Sdavidcs } 2136250661Sdavidcs 2137250661Sdavidcs c_seg++; 2138250661Sdavidcs nsegs--; 2139250661Sdavidcs } 2140250661Sdavidcs 2141250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next = 2142250661Sdavidcs (hw->tx_cntxt[txr_idx].txr_next + 1) & 2143250661Sdavidcs (NUM_TX_DESCRIPTORS - 1); 2144250661Sdavidcs tx_cmd_count++; 2145250661Sdavidcs 2146250661Sdavidcs if (!nsegs) 2147250661Sdavidcs break; 2148250661Sdavidcs 2149250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2150250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2151250661Sdavidcs } 2152250661Sdavidcs 2153250661Sdavidcs if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2154250661Sdavidcs 2155250661Sdavidcs /* TSO : Copy the header in the following tx cmd descriptors */ 2156250661Sdavidcs 2157250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next; 2158250661Sdavidcs 2159250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2160250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2161250661Sdavidcs 2162250661Sdavidcs bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2163250661Sdavidcs bytes = QL_MIN(bytes, hdr_len); 2164250661Sdavidcs 2165250661Sdavidcs dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 2166250661Sdavidcs 2167250661Sdavidcs if (mp->m_flags & M_VLANTAG) { 2168250661Sdavidcs /* first copy the src/dst MAC addresses */ 2169250661Sdavidcs bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 2170250661Sdavidcs dst += (ETHER_ADDR_LEN * 2); 2171250661Sdavidcs src += (ETHER_ADDR_LEN * 2); 2172250661Sdavidcs 2173250661Sdavidcs *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 2174250661Sdavidcs dst += 2; 2175250661Sdavidcs *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); 2176250661Sdavidcs dst += 2; 2177250661Sdavidcs 2178250661Sdavidcs /* bytes left in src header */ 2179250661Sdavidcs hdr_len -= ((ETHER_ADDR_LEN * 2) + 2180250661Sdavidcs ETHER_VLAN_ENCAP_LEN); 2181250661Sdavidcs 2182250661Sdavidcs /* bytes left in TxCmd Entry */ 2183250661Sdavidcs bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); 2184250661Sdavidcs 2185250661Sdavidcs 2186250661Sdavidcs bcopy(src, dst, bytes); 2187250661Sdavidcs src += bytes; 2188250661Sdavidcs hdr_len -= bytes; 2189250661Sdavidcs } else { 2190250661Sdavidcs bcopy(src, dst, bytes); 2191250661Sdavidcs src += bytes; 2192250661Sdavidcs hdr_len -= bytes; 2193250661Sdavidcs } 2194250661Sdavidcs 2195250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next = 2196250661Sdavidcs (hw->tx_cntxt[txr_idx].txr_next + 1) & 2197250661Sdavidcs (NUM_TX_DESCRIPTORS - 1); 2198250661Sdavidcs tx_cmd_count++; 2199250661Sdavidcs 2200250661Sdavidcs while (hdr_len) { 2201250661Sdavidcs tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2202250661Sdavidcs bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2203250661Sdavidcs 2204250661Sdavidcs bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2205250661Sdavidcs 2206250661Sdavidcs bcopy(src, tx_cmd, bytes); 2207250661Sdavidcs src += bytes; 2208250661Sdavidcs hdr_len -= bytes; 2209250661Sdavidcs 2210250661Sdavidcs txr_next = hw->tx_cntxt[txr_idx].txr_next = 2211250661Sdavidcs (hw->tx_cntxt[txr_idx].txr_next + 1) & 2212250661Sdavidcs (NUM_TX_DESCRIPTORS - 1); 2213250661Sdavidcs tx_cmd_count++; 2214250661Sdavidcs } 2215250661Sdavidcs } 2216250661Sdavidcs 2217250661Sdavidcs hw->tx_cntxt[txr_idx].txr_free = 2218250661Sdavidcs hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; 2219250661Sdavidcs 2220250661Sdavidcs QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ 2221250661Sdavidcs txr_idx); 2222250661Sdavidcs QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); 2223250661Sdavidcs 2224250661Sdavidcs return (0); 2225250661Sdavidcs} 2226250661Sdavidcs 2227250661Sdavidcs 2228284741Sdavidcs 2229284741Sdavidcs#define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ 2230250661Sdavidcsstatic int 2231250661Sdavidcsqla_config_rss_ind_table(qla_host_t *ha) 2232250661Sdavidcs{ 2233250661Sdavidcs uint32_t i, count; 2234284741Sdavidcs uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; 2235250661Sdavidcs 2236250661Sdavidcs 2237284741Sdavidcs for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { 2238250661Sdavidcs rss_ind_tbl[i] = i % ha->hw.num_sds_rings; 2239250661Sdavidcs } 2240250661Sdavidcs 2241284741Sdavidcs for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; 2242284741Sdavidcs i = i + Q8_CONFIG_IND_TBL_SIZE) { 2243250661Sdavidcs 2244284741Sdavidcs if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { 2245250661Sdavidcs count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; 2246250661Sdavidcs } else { 2247284741Sdavidcs count = Q8_CONFIG_IND_TBL_SIZE; 2248250661Sdavidcs } 2249250661Sdavidcs 2250250661Sdavidcs if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, 2251250661Sdavidcs rss_ind_tbl)) 2252250661Sdavidcs return (-1); 2253250661Sdavidcs } 2254250661Sdavidcs 2255250661Sdavidcs return (0); 2256250661Sdavidcs} 2257250661Sdavidcs 2258250661Sdavidcs/* 2259250661Sdavidcs * Name: ql_del_hw_if 2260250661Sdavidcs * Function: Destroys the hardware specific entities corresponding to an 2261250661Sdavidcs * Ethernet Interface 2262250661Sdavidcs */ 2263250661Sdavidcsvoid 2264250661Sdavidcsql_del_hw_if(qla_host_t *ha) 2265250661Sdavidcs{ 2266284741Sdavidcs uint32_t i; 2267284741Sdavidcs uint32_t num_msix; 2268250661Sdavidcs 2269284741Sdavidcs (void)qla_stop_nic_func(ha); 2270284741Sdavidcs 2271250661Sdavidcs qla_del_rcv_cntxt(ha); 2272307524Sdavidcs 2273250661Sdavidcs qla_del_xmt_cntxt(ha); 2274250661Sdavidcs 2275250661Sdavidcs if (ha->hw.flags.init_intr_cnxt) { 2276284741Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; ) { 2277284741Sdavidcs 2278284741Sdavidcs if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2279284741Sdavidcs num_msix = Q8_MAX_INTR_VECTORS; 2280284741Sdavidcs else 2281284741Sdavidcs num_msix = ha->hw.num_sds_rings - i; 2282284741Sdavidcs qla_config_intr_cntxt(ha, i, num_msix, 0); 2283284741Sdavidcs 2284284741Sdavidcs i += num_msix; 2285284741Sdavidcs } 2286284741Sdavidcs 2287250661Sdavidcs ha->hw.flags.init_intr_cnxt = 0; 2288250661Sdavidcs } 2289307524Sdavidcs 2290284741Sdavidcs return; 2291250661Sdavidcs} 2292250661Sdavidcs 2293284741Sdavidcsvoid 2294284741Sdavidcsqla_confirm_9kb_enable(qla_host_t *ha) 2295284741Sdavidcs{ 2296284741Sdavidcs uint32_t supports_9kb = 0; 2297284741Sdavidcs 2298284741Sdavidcs ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); 2299284741Sdavidcs 2300284741Sdavidcs /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ 2301284741Sdavidcs WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); 2302284741Sdavidcs WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 2303284741Sdavidcs 2304284741Sdavidcs qla_get_nic_partition(ha, &supports_9kb, NULL); 2305284741Sdavidcs 2306284741Sdavidcs if (!supports_9kb) 2307284741Sdavidcs ha->hw.enable_9kb = 0; 2308284741Sdavidcs 2309284741Sdavidcs return; 2310284741Sdavidcs} 2311284741Sdavidcs 2312284741Sdavidcs 2313250661Sdavidcs/* 2314250661Sdavidcs * Name: ql_init_hw_if 2315250661Sdavidcs * Function: Creates the hardware specific entities corresponding to an 2316250661Sdavidcs * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 2317250661Sdavidcs * corresponding to the interface. Enables LRO if allowed. 2318250661Sdavidcs */ 2319250661Sdavidcsint 2320250661Sdavidcsql_init_hw_if(qla_host_t *ha) 2321250661Sdavidcs{ 2322250661Sdavidcs device_t dev; 2323250661Sdavidcs uint32_t i; 2324250661Sdavidcs uint8_t bcast_mac[6]; 2325250661Sdavidcs qla_rdesc_t *rdesc; 2326284741Sdavidcs uint32_t num_msix; 2327250661Sdavidcs 2328250661Sdavidcs dev = ha->pci_dev; 2329250661Sdavidcs 2330250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 2331250661Sdavidcs bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 2332250661Sdavidcs ha->hw.dma_buf.sds_ring[i].size); 2333250661Sdavidcs } 2334250661Sdavidcs 2335284741Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; ) { 2336250661Sdavidcs 2337284741Sdavidcs if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2338284741Sdavidcs num_msix = Q8_MAX_INTR_VECTORS; 2339284741Sdavidcs else 2340284741Sdavidcs num_msix = ha->hw.num_sds_rings - i; 2341250661Sdavidcs 2342284741Sdavidcs if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { 2343250661Sdavidcs 2344284741Sdavidcs if (i > 0) { 2345284741Sdavidcs 2346284741Sdavidcs num_msix = i; 2347284741Sdavidcs 2348284741Sdavidcs for (i = 0; i < num_msix; ) { 2349284741Sdavidcs qla_config_intr_cntxt(ha, i, 2350284741Sdavidcs Q8_MAX_INTR_VECTORS, 0); 2351284741Sdavidcs i += Q8_MAX_INTR_VECTORS; 2352284741Sdavidcs } 2353284741Sdavidcs } 2354284741Sdavidcs return (-1); 2355284741Sdavidcs } 2356284741Sdavidcs 2357284741Sdavidcs i = i + num_msix; 2358284741Sdavidcs } 2359284741Sdavidcs 2360284741Sdavidcs ha->hw.flags.init_intr_cnxt = 1; 2361284741Sdavidcs 2362250661Sdavidcs /* 2363250661Sdavidcs * Create Receive Context 2364250661Sdavidcs */ 2365250661Sdavidcs if (qla_init_rcv_cntxt(ha)) { 2366250661Sdavidcs return (-1); 2367250661Sdavidcs } 2368250661Sdavidcs 2369250661Sdavidcs for (i = 0; i < ha->hw.num_rds_rings; i++) { 2370250661Sdavidcs rdesc = &ha->hw.rds[i]; 2371250661Sdavidcs rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; 2372250661Sdavidcs rdesc->rx_in = 0; 2373250661Sdavidcs /* Update the RDS Producer Indices */ 2374250661Sdavidcs QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ 2375250661Sdavidcs rdesc->rx_next); 2376250661Sdavidcs } 2377250661Sdavidcs 2378250661Sdavidcs 2379250661Sdavidcs /* 2380250661Sdavidcs * Create Transmit Context 2381250661Sdavidcs */ 2382250661Sdavidcs if (qla_init_xmt_cntxt(ha)) { 2383250661Sdavidcs qla_del_rcv_cntxt(ha); 2384250661Sdavidcs return (-1); 2385250661Sdavidcs } 2386250661Sdavidcs ha->hw.max_tx_segs = 0; 2387250661Sdavidcs 2388307524Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1)) 2389250661Sdavidcs return(-1); 2390250661Sdavidcs 2391250661Sdavidcs ha->hw.flags.unicast_mac = 1; 2392250661Sdavidcs 2393250661Sdavidcs bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2394250661Sdavidcs bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2395250661Sdavidcs 2396307524Sdavidcs if (qla_config_mac_addr(ha, bcast_mac, 1, 1)) 2397250661Sdavidcs return (-1); 2398250661Sdavidcs 2399250661Sdavidcs ha->hw.flags.bcast_mac = 1; 2400250661Sdavidcs 2401250661Sdavidcs /* 2402250661Sdavidcs * program any cached multicast addresses 2403250661Sdavidcs */ 2404250661Sdavidcs if (qla_hw_add_all_mcast(ha)) 2405250661Sdavidcs return (-1); 2406250661Sdavidcs 2407250661Sdavidcs if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) 2408250661Sdavidcs return (-1); 2409250661Sdavidcs 2410250661Sdavidcs if (qla_config_rss_ind_table(ha)) 2411250661Sdavidcs return (-1); 2412250661Sdavidcs 2413284741Sdavidcs if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) 2414250661Sdavidcs return (-1); 2415250661Sdavidcs 2416250661Sdavidcs if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) 2417250661Sdavidcs return (-1); 2418250661Sdavidcs 2419250661Sdavidcs if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) 2420250661Sdavidcs return (-1); 2421250661Sdavidcs 2422284741Sdavidcs if (qla_init_nic_func(ha)) 2423284741Sdavidcs return (-1); 2424284741Sdavidcs 2425284741Sdavidcs if (qla_query_fw_dcbx_caps(ha)) 2426284741Sdavidcs return (-1); 2427284741Sdavidcs 2428250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) 2429250661Sdavidcs QL_ENABLE_INTERRUPTS(ha, i); 2430250661Sdavidcs 2431250661Sdavidcs return (0); 2432250661Sdavidcs} 2433250661Sdavidcs 2434250661Sdavidcsstatic int 2435284741Sdavidcsqla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) 2436250661Sdavidcs{ 2437250661Sdavidcs device_t dev = ha->pci_dev; 2438250661Sdavidcs q80_rq_map_sds_to_rds_t *map_rings; 2439284741Sdavidcs q80_rsp_map_sds_to_rds_t *map_rings_rsp; 2440250661Sdavidcs uint32_t i, err; 2441250661Sdavidcs qla_hw_t *hw = &ha->hw; 2442250661Sdavidcs 2443250661Sdavidcs map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; 2444250661Sdavidcs bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); 2445250661Sdavidcs 2446250661Sdavidcs map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; 2447250661Sdavidcs map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); 2448250661Sdavidcs map_rings->count_version |= Q8_MBX_CMD_VERSION; 2449250661Sdavidcs 2450250661Sdavidcs map_rings->cntxt_id = hw->rcv_cntxt_id; 2451284741Sdavidcs map_rings->num_rings = num_idx; 2452250661Sdavidcs 2453284741Sdavidcs for (i = 0; i < num_idx; i++) { 2454284741Sdavidcs map_rings->sds_rds[i].sds_ring = i + start_idx; 2455284741Sdavidcs map_rings->sds_rds[i].rds_ring = i + start_idx; 2456284741Sdavidcs } 2457250661Sdavidcs 2458250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)map_rings, 2459250661Sdavidcs (sizeof (q80_rq_map_sds_to_rds_t) >> 2), 2460250661Sdavidcs ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2461250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2462250661Sdavidcs return (-1); 2463250661Sdavidcs } 2464250661Sdavidcs 2465284741Sdavidcs map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; 2466250661Sdavidcs 2467250661Sdavidcs err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); 2468250661Sdavidcs 2469250661Sdavidcs if (err) { 2470250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2471250661Sdavidcs return (-1); 2472250661Sdavidcs } 2473250661Sdavidcs 2474250661Sdavidcs return (0); 2475250661Sdavidcs} 2476250661Sdavidcs 2477250661Sdavidcs/* 2478250661Sdavidcs * Name: qla_init_rcv_cntxt 2479250661Sdavidcs * Function: Creates the Receive Context. 2480250661Sdavidcs */ 2481250661Sdavidcsstatic int 2482250661Sdavidcsqla_init_rcv_cntxt(qla_host_t *ha) 2483250661Sdavidcs{ 2484250661Sdavidcs q80_rq_rcv_cntxt_t *rcntxt; 2485250661Sdavidcs q80_rsp_rcv_cntxt_t *rcntxt_rsp; 2486250661Sdavidcs q80_stat_desc_t *sdesc; 2487250661Sdavidcs int i, j; 2488250661Sdavidcs qla_hw_t *hw = &ha->hw; 2489250661Sdavidcs device_t dev; 2490250661Sdavidcs uint32_t err; 2491250661Sdavidcs uint32_t rcntxt_sds_rings; 2492250661Sdavidcs uint32_t rcntxt_rds_rings; 2493284741Sdavidcs uint32_t max_idx; 2494250661Sdavidcs 2495250661Sdavidcs dev = ha->pci_dev; 2496250661Sdavidcs 2497250661Sdavidcs /* 2498250661Sdavidcs * Create Receive Context 2499250661Sdavidcs */ 2500250661Sdavidcs 2501250661Sdavidcs for (i = 0; i < hw->num_sds_rings; i++) { 2502250661Sdavidcs sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 2503250661Sdavidcs 2504250661Sdavidcs for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 2505250661Sdavidcs sdesc->data[0] = 1ULL; 2506250661Sdavidcs sdesc->data[1] = 1ULL; 2507250661Sdavidcs } 2508250661Sdavidcs } 2509250661Sdavidcs 2510250661Sdavidcs rcntxt_sds_rings = hw->num_sds_rings; 2511250661Sdavidcs if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) 2512250661Sdavidcs rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; 2513250661Sdavidcs 2514250661Sdavidcs rcntxt_rds_rings = hw->num_rds_rings; 2515250661Sdavidcs 2516250661Sdavidcs if (hw->num_rds_rings > MAX_RDS_RING_SETS) 2517250661Sdavidcs rcntxt_rds_rings = MAX_RDS_RING_SETS; 2518250661Sdavidcs 2519250661Sdavidcs rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; 2520250661Sdavidcs bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); 2521250661Sdavidcs 2522250661Sdavidcs rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; 2523250661Sdavidcs rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); 2524250661Sdavidcs rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2525250661Sdavidcs 2526250661Sdavidcs rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | 2527250661Sdavidcs Q8_RCV_CNTXT_CAP0_LRO | 2528250661Sdavidcs Q8_RCV_CNTXT_CAP0_HW_LRO | 2529250661Sdavidcs Q8_RCV_CNTXT_CAP0_RSS | 2530250661Sdavidcs Q8_RCV_CNTXT_CAP0_SGL_LRO; 2531250661Sdavidcs 2532284741Sdavidcs if (ha->hw.enable_9kb) 2533284741Sdavidcs rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; 2534284741Sdavidcs else 2535284741Sdavidcs rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; 2536284741Sdavidcs 2537250661Sdavidcs if (ha->hw.num_rds_rings > 1) { 2538250661Sdavidcs rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); 2539250661Sdavidcs rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; 2540250661Sdavidcs } else 2541250661Sdavidcs rcntxt->nrds_sets_rings = 0x1 | (1 << 5); 2542250661Sdavidcs 2543250661Sdavidcs rcntxt->nsds_rings = rcntxt_sds_rings; 2544250661Sdavidcs 2545250661Sdavidcs rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; 2546250661Sdavidcs 2547250661Sdavidcs rcntxt->rcv_vpid = 0; 2548250661Sdavidcs 2549250661Sdavidcs for (i = 0; i < rcntxt_sds_rings; i++) { 2550250661Sdavidcs rcntxt->sds[i].paddr = 2551250661Sdavidcs qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 2552250661Sdavidcs rcntxt->sds[i].size = 2553250661Sdavidcs qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2554313070Sdavidcs rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); 2555313070Sdavidcs rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); 2556250661Sdavidcs } 2557250661Sdavidcs 2558250661Sdavidcs for (i = 0; i < rcntxt_rds_rings; i++) { 2559250661Sdavidcs rcntxt->rds[i].paddr_std = 2560250661Sdavidcs qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 2561284741Sdavidcs 2562284741Sdavidcs if (ha->hw.enable_9kb) 2563284741Sdavidcs rcntxt->rds[i].std_bsize = 2564284741Sdavidcs qla_host_to_le64(MJUM9BYTES); 2565284741Sdavidcs else 2566284741Sdavidcs rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2567284741Sdavidcs 2568250661Sdavidcs rcntxt->rds[i].std_nentries = 2569250661Sdavidcs qla_host_to_le32(NUM_RX_DESCRIPTORS); 2570250661Sdavidcs } 2571250661Sdavidcs 2572250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2573250661Sdavidcs (sizeof (q80_rq_rcv_cntxt_t) >> 2), 2574250661Sdavidcs ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { 2575250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2576250661Sdavidcs return (-1); 2577250661Sdavidcs } 2578250661Sdavidcs 2579250661Sdavidcs rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; 2580250661Sdavidcs 2581250661Sdavidcs err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2582250661Sdavidcs 2583250661Sdavidcs if (err) { 2584250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2585250661Sdavidcs return (-1); 2586250661Sdavidcs } 2587250661Sdavidcs 2588250661Sdavidcs for (i = 0; i < rcntxt_sds_rings; i++) { 2589250661Sdavidcs hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; 2590250661Sdavidcs } 2591250661Sdavidcs 2592250661Sdavidcs for (i = 0; i < rcntxt_rds_rings; i++) { 2593250661Sdavidcs hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; 2594250661Sdavidcs } 2595250661Sdavidcs 2596250661Sdavidcs hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; 2597250661Sdavidcs 2598250661Sdavidcs ha->hw.flags.init_rx_cnxt = 1; 2599250661Sdavidcs 2600250661Sdavidcs if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { 2601284741Sdavidcs 2602284741Sdavidcs for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { 2603284741Sdavidcs 2604284741Sdavidcs if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) 2605284741Sdavidcs max_idx = MAX_RCNTXT_SDS_RINGS; 2606284741Sdavidcs else 2607284741Sdavidcs max_idx = hw->num_sds_rings - i; 2608284741Sdavidcs 2609284741Sdavidcs err = qla_add_rcv_rings(ha, i, max_idx); 2610284741Sdavidcs if (err) 2611284741Sdavidcs return -1; 2612284741Sdavidcs 2613284741Sdavidcs i += max_idx; 2614284741Sdavidcs } 2615250661Sdavidcs } 2616250661Sdavidcs 2617284741Sdavidcs if (hw->num_rds_rings > 1) { 2618284741Sdavidcs 2619284741Sdavidcs for (i = 0; i < hw->num_rds_rings; ) { 2620284741Sdavidcs 2621284741Sdavidcs if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) 2622284741Sdavidcs max_idx = MAX_SDS_TO_RDS_MAP; 2623284741Sdavidcs else 2624284741Sdavidcs max_idx = hw->num_rds_rings - i; 2625284741Sdavidcs 2626284741Sdavidcs err = qla_map_sds_to_rds(ha, i, max_idx); 2627284741Sdavidcs if (err) 2628284741Sdavidcs return -1; 2629284741Sdavidcs 2630284741Sdavidcs i += max_idx; 2631284741Sdavidcs } 2632250661Sdavidcs } 2633250661Sdavidcs 2634250661Sdavidcs return (0); 2635250661Sdavidcs} 2636250661Sdavidcs 2637250661Sdavidcsstatic int 2638284741Sdavidcsqla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) 2639250661Sdavidcs{ 2640250661Sdavidcs device_t dev = ha->pci_dev; 2641250661Sdavidcs q80_rq_add_rcv_rings_t *add_rcv; 2642250661Sdavidcs q80_rsp_add_rcv_rings_t *add_rcv_rsp; 2643250661Sdavidcs uint32_t i,j, err; 2644250661Sdavidcs qla_hw_t *hw = &ha->hw; 2645250661Sdavidcs 2646250661Sdavidcs add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; 2647250661Sdavidcs bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); 2648250661Sdavidcs 2649250661Sdavidcs add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; 2650250661Sdavidcs add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); 2651250661Sdavidcs add_rcv->count_version |= Q8_MBX_CMD_VERSION; 2652250661Sdavidcs 2653284741Sdavidcs add_rcv->nrds_sets_rings = nsds | (1 << 5); 2654250661Sdavidcs add_rcv->nsds_rings = nsds; 2655250661Sdavidcs add_rcv->cntxt_id = hw->rcv_cntxt_id; 2656250661Sdavidcs 2657250661Sdavidcs for (i = 0; i < nsds; i++) { 2658250661Sdavidcs 2659250661Sdavidcs j = i + sds_idx; 2660250661Sdavidcs 2661250661Sdavidcs add_rcv->sds[i].paddr = 2662250661Sdavidcs qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); 2663250661Sdavidcs 2664250661Sdavidcs add_rcv->sds[i].size = 2665250661Sdavidcs qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 2666250661Sdavidcs 2667313070Sdavidcs add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); 2668313070Sdavidcs add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); 2669250661Sdavidcs 2670250661Sdavidcs } 2671313070Sdavidcs 2672284741Sdavidcs for (i = 0; (i < nsds); i++) { 2673250661Sdavidcs j = i + sds_idx; 2674284741Sdavidcs 2675250661Sdavidcs add_rcv->rds[i].paddr_std = 2676250661Sdavidcs qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); 2677284741Sdavidcs 2678284741Sdavidcs if (ha->hw.enable_9kb) 2679284741Sdavidcs add_rcv->rds[i].std_bsize = 2680284741Sdavidcs qla_host_to_le64(MJUM9BYTES); 2681284741Sdavidcs else 2682284741Sdavidcs add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 2683284741Sdavidcs 2684250661Sdavidcs add_rcv->rds[i].std_nentries = 2685250661Sdavidcs qla_host_to_le32(NUM_RX_DESCRIPTORS); 2686250661Sdavidcs } 2687250661Sdavidcs 2688250661Sdavidcs 2689250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, 2690250661Sdavidcs (sizeof (q80_rq_add_rcv_rings_t) >> 2), 2691250661Sdavidcs ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 2692250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2693250661Sdavidcs return (-1); 2694250661Sdavidcs } 2695250661Sdavidcs 2696250661Sdavidcs add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; 2697250661Sdavidcs 2698250661Sdavidcs err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); 2699250661Sdavidcs 2700250661Sdavidcs if (err) { 2701250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2702250661Sdavidcs return (-1); 2703250661Sdavidcs } 2704250661Sdavidcs 2705284741Sdavidcs for (i = 0; i < nsds; i++) { 2706284741Sdavidcs hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; 2707250661Sdavidcs } 2708284741Sdavidcs 2709284741Sdavidcs for (i = 0; i < nsds; i++) { 2710284741Sdavidcs hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; 2711250661Sdavidcs } 2712284741Sdavidcs 2713250661Sdavidcs return (0); 2714250661Sdavidcs} 2715250661Sdavidcs 2716250661Sdavidcs/* 2717250661Sdavidcs * Name: qla_del_rcv_cntxt 2718250661Sdavidcs * Function: Destroys the Receive Context. 2719250661Sdavidcs */ 2720250661Sdavidcsstatic void 2721250661Sdavidcsqla_del_rcv_cntxt(qla_host_t *ha) 2722250661Sdavidcs{ 2723250661Sdavidcs device_t dev = ha->pci_dev; 2724250661Sdavidcs q80_rcv_cntxt_destroy_t *rcntxt; 2725250661Sdavidcs q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; 2726250661Sdavidcs uint32_t err; 2727250661Sdavidcs uint8_t bcast_mac[6]; 2728250661Sdavidcs 2729250661Sdavidcs if (!ha->hw.flags.init_rx_cnxt) 2730250661Sdavidcs return; 2731250661Sdavidcs 2732250661Sdavidcs if (qla_hw_del_all_mcast(ha)) 2733250661Sdavidcs return; 2734250661Sdavidcs 2735250661Sdavidcs if (ha->hw.flags.bcast_mac) { 2736250661Sdavidcs 2737250661Sdavidcs bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 2738250661Sdavidcs bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 2739250661Sdavidcs 2740307524Sdavidcs if (qla_config_mac_addr(ha, bcast_mac, 0, 1)) 2741250661Sdavidcs return; 2742250661Sdavidcs ha->hw.flags.bcast_mac = 0; 2743250661Sdavidcs 2744250661Sdavidcs } 2745250661Sdavidcs 2746250661Sdavidcs if (ha->hw.flags.unicast_mac) { 2747307524Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1)) 2748250661Sdavidcs return; 2749250661Sdavidcs ha->hw.flags.unicast_mac = 0; 2750250661Sdavidcs } 2751250661Sdavidcs 2752250661Sdavidcs rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; 2753250661Sdavidcs bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); 2754250661Sdavidcs 2755250661Sdavidcs rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; 2756250661Sdavidcs rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); 2757250661Sdavidcs rcntxt->count_version |= Q8_MBX_CMD_VERSION; 2758250661Sdavidcs 2759250661Sdavidcs rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; 2760250661Sdavidcs 2761250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 2762250661Sdavidcs (sizeof (q80_rcv_cntxt_destroy_t) >> 2), 2763250661Sdavidcs ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { 2764250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2765250661Sdavidcs return; 2766250661Sdavidcs } 2767250661Sdavidcs rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; 2768250661Sdavidcs 2769250661Sdavidcs err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 2770250661Sdavidcs 2771250661Sdavidcs if (err) { 2772250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2773250661Sdavidcs } 2774250661Sdavidcs 2775250661Sdavidcs ha->hw.flags.init_rx_cnxt = 0; 2776250661Sdavidcs return; 2777250661Sdavidcs} 2778250661Sdavidcs 2779250661Sdavidcs/* 2780250661Sdavidcs * Name: qla_init_xmt_cntxt 2781250661Sdavidcs * Function: Creates the Transmit Context. 2782250661Sdavidcs */ 2783250661Sdavidcsstatic int 2784250661Sdavidcsqla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2785250661Sdavidcs{ 2786250661Sdavidcs device_t dev; 2787250661Sdavidcs qla_hw_t *hw = &ha->hw; 2788250661Sdavidcs q80_rq_tx_cntxt_t *tcntxt; 2789250661Sdavidcs q80_rsp_tx_cntxt_t *tcntxt_rsp; 2790250661Sdavidcs uint32_t err; 2791250661Sdavidcs qla_hw_tx_cntxt_t *hw_tx_cntxt; 2792313070Sdavidcs uint32_t intr_idx; 2793250661Sdavidcs 2794250661Sdavidcs hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 2795250661Sdavidcs 2796250661Sdavidcs dev = ha->pci_dev; 2797250661Sdavidcs 2798250661Sdavidcs /* 2799250661Sdavidcs * Create Transmit Context 2800250661Sdavidcs */ 2801250661Sdavidcs tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; 2802250661Sdavidcs bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); 2803250661Sdavidcs 2804250661Sdavidcs tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; 2805250661Sdavidcs tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); 2806250661Sdavidcs tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2807250661Sdavidcs 2808313070Sdavidcs intr_idx = txr_idx; 2809313070Sdavidcs 2810284741Sdavidcs#ifdef QL_ENABLE_ISCSI_TLV 2811284741Sdavidcs 2812284741Sdavidcs tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | 2813284741Sdavidcs Q8_TX_CNTXT_CAP0_TC; 2814284741Sdavidcs 2815284741Sdavidcs if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { 2816284741Sdavidcs tcntxt->traffic_class = 1; 2817284741Sdavidcs } 2818284741Sdavidcs 2819313070Sdavidcs intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); 2820313070Sdavidcs 2821284741Sdavidcs#else 2822250661Sdavidcs tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; 2823250661Sdavidcs 2824284741Sdavidcs#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 2825284741Sdavidcs 2826250661Sdavidcs tcntxt->ntx_rings = 1; 2827250661Sdavidcs 2828250661Sdavidcs tcntxt->tx_ring[0].paddr = 2829250661Sdavidcs qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); 2830250661Sdavidcs tcntxt->tx_ring[0].tx_consumer = 2831250661Sdavidcs qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); 2832250661Sdavidcs tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); 2833250661Sdavidcs 2834313070Sdavidcs tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); 2835250661Sdavidcs tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); 2836250661Sdavidcs 2837250661Sdavidcs hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; 2838250661Sdavidcs hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; 2839250661Sdavidcs 2840250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2841250661Sdavidcs (sizeof (q80_rq_tx_cntxt_t) >> 2), 2842250661Sdavidcs ha->hw.mbox, 2843250661Sdavidcs (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { 2844250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2845250661Sdavidcs return (-1); 2846250661Sdavidcs } 2847250661Sdavidcs tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; 2848250661Sdavidcs 2849250661Sdavidcs err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 2850250661Sdavidcs 2851250661Sdavidcs if (err) { 2852250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2853250661Sdavidcs return -1; 2854250661Sdavidcs } 2855250661Sdavidcs 2856250661Sdavidcs hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; 2857250661Sdavidcs hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; 2858250661Sdavidcs 2859284741Sdavidcs if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) 2860284741Sdavidcs return (-1); 2861284741Sdavidcs 2862250661Sdavidcs return (0); 2863250661Sdavidcs} 2864250661Sdavidcs 2865250661Sdavidcs 2866250661Sdavidcs/* 2867250661Sdavidcs * Name: qla_del_xmt_cntxt 2868250661Sdavidcs * Function: Destroys the Transmit Context. 2869250661Sdavidcs */ 2870250661Sdavidcsstatic int 2871250661Sdavidcsqla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 2872250661Sdavidcs{ 2873250661Sdavidcs device_t dev = ha->pci_dev; 2874250661Sdavidcs q80_tx_cntxt_destroy_t *tcntxt; 2875250661Sdavidcs q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; 2876250661Sdavidcs uint32_t err; 2877250661Sdavidcs 2878250661Sdavidcs tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; 2879250661Sdavidcs bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); 2880250661Sdavidcs 2881250661Sdavidcs tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; 2882250661Sdavidcs tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); 2883250661Sdavidcs tcntxt->count_version |= Q8_MBX_CMD_VERSION; 2884250661Sdavidcs 2885250661Sdavidcs tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; 2886250661Sdavidcs 2887250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 2888250661Sdavidcs (sizeof (q80_tx_cntxt_destroy_t) >> 2), 2889250661Sdavidcs ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { 2890250661Sdavidcs device_printf(dev, "%s: failed0\n", __func__); 2891250661Sdavidcs return (-1); 2892250661Sdavidcs } 2893250661Sdavidcs tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; 2894250661Sdavidcs 2895250661Sdavidcs err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 2896250661Sdavidcs 2897250661Sdavidcs if (err) { 2898250661Sdavidcs device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 2899250661Sdavidcs return (-1); 2900250661Sdavidcs } 2901250661Sdavidcs 2902250661Sdavidcs return (0); 2903250661Sdavidcs} 2904250661Sdavidcsstatic void 2905250661Sdavidcsqla_del_xmt_cntxt(qla_host_t *ha) 2906250661Sdavidcs{ 2907250661Sdavidcs uint32_t i; 2908250661Sdavidcs 2909250661Sdavidcs if (!ha->hw.flags.init_tx_cnxt) 2910250661Sdavidcs return; 2911250661Sdavidcs 2912250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 2913250661Sdavidcs if (qla_del_xmt_cntxt_i(ha, i)) 2914250661Sdavidcs break; 2915250661Sdavidcs } 2916250661Sdavidcs ha->hw.flags.init_tx_cnxt = 0; 2917250661Sdavidcs} 2918250661Sdavidcs 2919250661Sdavidcsstatic int 2920250661Sdavidcsqla_init_xmt_cntxt(qla_host_t *ha) 2921250661Sdavidcs{ 2922250661Sdavidcs uint32_t i, j; 2923250661Sdavidcs 2924250661Sdavidcs for (i = 0; i < ha->hw.num_tx_rings; i++) { 2925250661Sdavidcs if (qla_init_xmt_cntxt_i(ha, i) != 0) { 2926250661Sdavidcs for (j = 0; j < i; j++) 2927250661Sdavidcs qla_del_xmt_cntxt_i(ha, j); 2928250661Sdavidcs return (-1); 2929250661Sdavidcs } 2930250661Sdavidcs } 2931250661Sdavidcs ha->hw.flags.init_tx_cnxt = 1; 2932250661Sdavidcs return (0); 2933250661Sdavidcs} 2934250661Sdavidcs 2935250661Sdavidcsstatic int 2936307524Sdavidcsqla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast) 2937250661Sdavidcs{ 2938250661Sdavidcs int i, nmcast; 2939307524Sdavidcs uint32_t count = 0; 2940307524Sdavidcs uint8_t *mcast; 2941250661Sdavidcs 2942250661Sdavidcs nmcast = ha->hw.nmcast; 2943250661Sdavidcs 2944307524Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, 2945307524Sdavidcs "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast)); 2946307524Sdavidcs 2947307524Sdavidcs mcast = ha->hw.mac_addr_arr; 2948307524Sdavidcs memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 2949307524Sdavidcs 2950250661Sdavidcs for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { 2951250661Sdavidcs if ((ha->hw.mcast[i].addr[0] != 0) || 2952250661Sdavidcs (ha->hw.mcast[i].addr[1] != 0) || 2953250661Sdavidcs (ha->hw.mcast[i].addr[2] != 0) || 2954250661Sdavidcs (ha->hw.mcast[i].addr[3] != 0) || 2955250661Sdavidcs (ha->hw.mcast[i].addr[4] != 0) || 2956250661Sdavidcs (ha->hw.mcast[i].addr[5] != 0)) { 2957250661Sdavidcs 2958307524Sdavidcs bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN); 2959307524Sdavidcs mcast = mcast + ETHER_ADDR_LEN; 2960307524Sdavidcs count++; 2961307524Sdavidcs 2962307524Sdavidcs if (count == Q8_MAX_MAC_ADDRS) { 2963307524Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 2964307524Sdavidcs add_mcast, count)) { 2965307524Sdavidcs device_printf(ha->pci_dev, 2966307524Sdavidcs "%s: failed\n", __func__); 2967307524Sdavidcs return (-1); 2968307524Sdavidcs } 2969307524Sdavidcs 2970307524Sdavidcs count = 0; 2971307524Sdavidcs mcast = ha->hw.mac_addr_arr; 2972307524Sdavidcs memset(mcast, 0, 2973307524Sdavidcs (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 2974250661Sdavidcs } 2975250661Sdavidcs 2976250661Sdavidcs nmcast--; 2977250661Sdavidcs } 2978250661Sdavidcs } 2979307524Sdavidcs 2980307524Sdavidcs if (count) { 2981307524Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, 2982307524Sdavidcs count)) { 2983307524Sdavidcs device_printf(ha->pci_dev, "%s: failed\n", __func__); 2984307524Sdavidcs return (-1); 2985307524Sdavidcs } 2986307524Sdavidcs } 2987307524Sdavidcs QL_DPRINT2(ha, (ha->pci_dev, 2988307524Sdavidcs "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast)); 2989307524Sdavidcs 2990250661Sdavidcs return 0; 2991250661Sdavidcs} 2992250661Sdavidcs 2993250661Sdavidcsstatic int 2994307524Sdavidcsqla_hw_add_all_mcast(qla_host_t *ha) 2995307524Sdavidcs{ 2996307524Sdavidcs int ret; 2997307524Sdavidcs 2998307524Sdavidcs ret = qla_hw_all_mcast(ha, 1); 2999307524Sdavidcs 3000307524Sdavidcs return (ret); 3001307524Sdavidcs} 3002307524Sdavidcs 3003307524Sdavidcsstatic int 3004250661Sdavidcsqla_hw_del_all_mcast(qla_host_t *ha) 3005250661Sdavidcs{ 3006307524Sdavidcs int ret; 3007250661Sdavidcs 3008307524Sdavidcs ret = qla_hw_all_mcast(ha, 0); 3009250661Sdavidcs 3010307524Sdavidcs bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS)); 3011307524Sdavidcs ha->hw.nmcast = 0; 3012250661Sdavidcs 3013307524Sdavidcs return (ret); 3014250661Sdavidcs} 3015250661Sdavidcs 3016250661Sdavidcsstatic int 3017307524Sdavidcsqla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta) 3018250661Sdavidcs{ 3019250661Sdavidcs int i; 3020250661Sdavidcs 3021250661Sdavidcs for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3022250661Sdavidcs if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) 3023307524Sdavidcs return (0); /* its been already added */ 3024250661Sdavidcs } 3025307524Sdavidcs return (-1); 3026307524Sdavidcs} 3027250661Sdavidcs 3028307524Sdavidcsstatic int 3029307524Sdavidcsqla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3030307524Sdavidcs{ 3031307524Sdavidcs int i; 3032307524Sdavidcs 3033250661Sdavidcs for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3034250661Sdavidcs 3035250661Sdavidcs if ((ha->hw.mcast[i].addr[0] == 0) && 3036250661Sdavidcs (ha->hw.mcast[i].addr[1] == 0) && 3037250661Sdavidcs (ha->hw.mcast[i].addr[2] == 0) && 3038250661Sdavidcs (ha->hw.mcast[i].addr[3] == 0) && 3039250661Sdavidcs (ha->hw.mcast[i].addr[4] == 0) && 3040250661Sdavidcs (ha->hw.mcast[i].addr[5] == 0)) { 3041250661Sdavidcs 3042250661Sdavidcs bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); 3043250661Sdavidcs ha->hw.nmcast++; 3044250661Sdavidcs 3045307524Sdavidcs mta = mta + ETHER_ADDR_LEN; 3046307524Sdavidcs nmcast--; 3047307524Sdavidcs 3048307524Sdavidcs if (nmcast == 0) 3049307524Sdavidcs break; 3050250661Sdavidcs } 3051307524Sdavidcs 3052250661Sdavidcs } 3053250661Sdavidcs return 0; 3054250661Sdavidcs} 3055250661Sdavidcs 3056250661Sdavidcsstatic int 3057307524Sdavidcsqla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3058250661Sdavidcs{ 3059250661Sdavidcs int i; 3060250661Sdavidcs 3061250661Sdavidcs for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3062250661Sdavidcs if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { 3063250661Sdavidcs 3064250661Sdavidcs ha->hw.mcast[i].addr[0] = 0; 3065250661Sdavidcs ha->hw.mcast[i].addr[1] = 0; 3066250661Sdavidcs ha->hw.mcast[i].addr[2] = 0; 3067250661Sdavidcs ha->hw.mcast[i].addr[3] = 0; 3068250661Sdavidcs ha->hw.mcast[i].addr[4] = 0; 3069250661Sdavidcs ha->hw.mcast[i].addr[5] = 0; 3070250661Sdavidcs 3071250661Sdavidcs ha->hw.nmcast--; 3072250661Sdavidcs 3073307524Sdavidcs mta = mta + ETHER_ADDR_LEN; 3074307524Sdavidcs nmcast--; 3075307524Sdavidcs 3076307524Sdavidcs if (nmcast == 0) 3077307524Sdavidcs break; 3078250661Sdavidcs } 3079250661Sdavidcs } 3080250661Sdavidcs return 0; 3081250661Sdavidcs} 3082250661Sdavidcs 3083250661Sdavidcs/* 3084250661Sdavidcs * Name: ql_hw_set_multi 3085307524Sdavidcs * Function: Sets the Multicast Addresses provided by the host O.S into the 3086250661Sdavidcs * hardware (for the given interface) 3087250661Sdavidcs */ 3088250661Sdavidcsint 3089307524Sdavidcsql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, 3090250661Sdavidcs uint32_t add_mac) 3091250661Sdavidcs{ 3092307524Sdavidcs uint8_t *mta = mcast_addr; 3093250661Sdavidcs int i; 3094250661Sdavidcs int ret = 0; 3095307524Sdavidcs uint32_t count = 0; 3096307524Sdavidcs uint8_t *mcast; 3097250661Sdavidcs 3098307524Sdavidcs mcast = ha->hw.mac_addr_arr; 3099307524Sdavidcs memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3100307524Sdavidcs 3101250661Sdavidcs for (i = 0; i < mcnt; i++) { 3102307524Sdavidcs if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) { 3103307524Sdavidcs if (add_mac) { 3104307524Sdavidcs if (qla_hw_mac_addr_present(ha, mta) != 0) { 3105307524Sdavidcs bcopy(mta, mcast, ETHER_ADDR_LEN); 3106307524Sdavidcs mcast = mcast + ETHER_ADDR_LEN; 3107307524Sdavidcs count++; 3108307524Sdavidcs } 3109307524Sdavidcs } else { 3110307524Sdavidcs if (qla_hw_mac_addr_present(ha, mta) == 0) { 3111307524Sdavidcs bcopy(mta, mcast, ETHER_ADDR_LEN); 3112307524Sdavidcs mcast = mcast + ETHER_ADDR_LEN; 3113307524Sdavidcs count++; 3114307524Sdavidcs } 3115307524Sdavidcs } 3116250661Sdavidcs } 3117307524Sdavidcs if (count == Q8_MAX_MAC_ADDRS) { 3118307524Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 3119307524Sdavidcs add_mac, count)) { 3120307524Sdavidcs device_printf(ha->pci_dev, "%s: failed\n", 3121307524Sdavidcs __func__); 3122307524Sdavidcs return (-1); 3123307524Sdavidcs } 3124307524Sdavidcs 3125307524Sdavidcs if (add_mac) { 3126307524Sdavidcs qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, 3127307524Sdavidcs count); 3128307524Sdavidcs } else { 3129307524Sdavidcs qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, 3130307524Sdavidcs count); 3131307524Sdavidcs } 3132307524Sdavidcs 3133307524Sdavidcs count = 0; 3134307524Sdavidcs mcast = ha->hw.mac_addr_arr; 3135307524Sdavidcs memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3136307524Sdavidcs } 3137250661Sdavidcs 3138250661Sdavidcs mta += Q8_MAC_ADDR_LEN; 3139250661Sdavidcs } 3140307524Sdavidcs 3141307524Sdavidcs if (count) { 3142307524Sdavidcs if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, 3143307524Sdavidcs count)) { 3144307524Sdavidcs device_printf(ha->pci_dev, "%s: failed\n", __func__); 3145307524Sdavidcs return (-1); 3146307524Sdavidcs } 3147307524Sdavidcs if (add_mac) { 3148307524Sdavidcs qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); 3149307524Sdavidcs } else { 3150307524Sdavidcs qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); 3151307524Sdavidcs } 3152307524Sdavidcs } 3153307524Sdavidcs 3154250661Sdavidcs return (ret); 3155250661Sdavidcs} 3156250661Sdavidcs 3157250661Sdavidcs/* 3158313070Sdavidcs * Name: ql_hw_tx_done_locked 3159250661Sdavidcs * Function: Handle Transmit Completions 3160250661Sdavidcs */ 3161313070Sdavidcsvoid 3162313070Sdavidcsql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) 3163250661Sdavidcs{ 3164250661Sdavidcs qla_tx_buf_t *txb; 3165250661Sdavidcs qla_hw_t *hw = &ha->hw; 3166250661Sdavidcs uint32_t comp_idx, comp_count = 0; 3167250661Sdavidcs qla_hw_tx_cntxt_t *hw_tx_cntxt; 3168250661Sdavidcs 3169250661Sdavidcs hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 3170250661Sdavidcs 3171250661Sdavidcs /* retrieve index of last entry in tx ring completed */ 3172250661Sdavidcs comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); 3173250661Sdavidcs 3174250661Sdavidcs while (comp_idx != hw_tx_cntxt->txr_comp) { 3175250661Sdavidcs 3176250661Sdavidcs txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; 3177250661Sdavidcs 3178250661Sdavidcs hw_tx_cntxt->txr_comp++; 3179250661Sdavidcs if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) 3180250661Sdavidcs hw_tx_cntxt->txr_comp = 0; 3181250661Sdavidcs 3182250661Sdavidcs comp_count++; 3183250661Sdavidcs 3184250661Sdavidcs if (txb->m_head) { 3185271849Sglebius if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); 3186250661Sdavidcs 3187250661Sdavidcs bus_dmamap_sync(ha->tx_tag, txb->map, 3188250661Sdavidcs BUS_DMASYNC_POSTWRITE); 3189250661Sdavidcs bus_dmamap_unload(ha->tx_tag, txb->map); 3190250661Sdavidcs m_freem(txb->m_head); 3191250661Sdavidcs 3192250661Sdavidcs txb->m_head = NULL; 3193250661Sdavidcs } 3194250661Sdavidcs } 3195250661Sdavidcs 3196250661Sdavidcs hw_tx_cntxt->txr_free += comp_count; 3197250661Sdavidcs return; 3198250661Sdavidcs} 3199250661Sdavidcs 3200250661Sdavidcsvoid 3201250661Sdavidcsql_update_link_state(qla_host_t *ha) 3202250661Sdavidcs{ 3203250661Sdavidcs uint32_t link_state; 3204250661Sdavidcs uint32_t prev_link_state; 3205250661Sdavidcs 3206250661Sdavidcs if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3207250661Sdavidcs ha->hw.link_up = 0; 3208250661Sdavidcs return; 3209250661Sdavidcs } 3210250661Sdavidcs link_state = READ_REG32(ha, Q8_LINK_STATE); 3211250661Sdavidcs 3212250661Sdavidcs prev_link_state = ha->hw.link_up; 3213250661Sdavidcs 3214250661Sdavidcs if (ha->pci_func == 0) 3215250661Sdavidcs ha->hw.link_up = (((link_state & 0xF) == 1)? 1 : 0); 3216250661Sdavidcs else 3217250661Sdavidcs ha->hw.link_up = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 3218250661Sdavidcs 3219250661Sdavidcs if (prev_link_state != ha->hw.link_up) { 3220250661Sdavidcs if (ha->hw.link_up) { 3221250661Sdavidcs if_link_state_change(ha->ifp, LINK_STATE_UP); 3222250661Sdavidcs } else { 3223250661Sdavidcs if_link_state_change(ha->ifp, LINK_STATE_DOWN); 3224250661Sdavidcs } 3225250661Sdavidcs } 3226250661Sdavidcs return; 3227250661Sdavidcs} 3228250661Sdavidcs 3229250661Sdavidcsvoid 3230250661Sdavidcsql_hw_stop_rcv(qla_host_t *ha) 3231250661Sdavidcs{ 3232250661Sdavidcs int i, done, count = 100; 3233250661Sdavidcs 3234305487Sdavidcs ha->flags.stop_rcv = 1; 3235305487Sdavidcs 3236284741Sdavidcs while (count) { 3237250661Sdavidcs done = 1; 3238250661Sdavidcs for (i = 0; i < ha->hw.num_sds_rings; i++) { 3239250661Sdavidcs if (ha->hw.sds[i].rcv_active) 3240250661Sdavidcs done = 0; 3241250661Sdavidcs } 3242250661Sdavidcs if (done) 3243250661Sdavidcs break; 3244250661Sdavidcs else 3245250661Sdavidcs qla_mdelay(__func__, 10); 3246284741Sdavidcs count--; 3247250661Sdavidcs } 3248250661Sdavidcs if (!count) 3249250661Sdavidcs device_printf(ha->pci_dev, "%s: Counter expired.\n", __func__); 3250250661Sdavidcs 3251250661Sdavidcs return; 3252250661Sdavidcs} 3253250661Sdavidcs 3254250661Sdavidcsint 3255250661Sdavidcsql_hw_check_health(qla_host_t *ha) 3256250661Sdavidcs{ 3257250661Sdavidcs uint32_t val; 3258250661Sdavidcs 3259250661Sdavidcs ha->hw.health_count++; 3260250661Sdavidcs 3261250661Sdavidcs if (ha->hw.health_count < 1000) 3262250661Sdavidcs return 0; 3263250661Sdavidcs 3264250661Sdavidcs ha->hw.health_count = 0; 3265250661Sdavidcs 3266250661Sdavidcs val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); 3267250661Sdavidcs 3268250661Sdavidcs if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || 3269250661Sdavidcs (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { 3270250661Sdavidcs device_printf(ha->pci_dev, "%s: Temperature Alert [0x%08x]\n", 3271250661Sdavidcs __func__, val); 3272250661Sdavidcs return -1; 3273250661Sdavidcs } 3274250661Sdavidcs 3275250661Sdavidcs val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); 3276250661Sdavidcs 3277250661Sdavidcs if ((val != ha->hw.hbeat_value) && 3278289635Sdavidcs (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) { 3279250661Sdavidcs ha->hw.hbeat_value = val; 3280250661Sdavidcs return 0; 3281250661Sdavidcs } 3282250661Sdavidcs device_printf(ha->pci_dev, "%s: Heartbeat Failue [0x%08x]\n", 3283250661Sdavidcs __func__, val); 3284250661Sdavidcs 3285250661Sdavidcs return -1; 3286250661Sdavidcs} 3287250661Sdavidcs 3288250661Sdavidcsstatic int 3289284741Sdavidcsqla_init_nic_func(qla_host_t *ha) 3290284741Sdavidcs{ 3291284741Sdavidcs device_t dev; 3292284741Sdavidcs q80_init_nic_func_t *init_nic; 3293284741Sdavidcs q80_init_nic_func_rsp_t *init_nic_rsp; 3294284741Sdavidcs uint32_t err; 3295284741Sdavidcs 3296284741Sdavidcs dev = ha->pci_dev; 3297284741Sdavidcs 3298284741Sdavidcs init_nic = (q80_init_nic_func_t *)ha->hw.mbox; 3299284741Sdavidcs bzero(init_nic, sizeof(q80_init_nic_func_t)); 3300284741Sdavidcs 3301284741Sdavidcs init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; 3302284741Sdavidcs init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); 3303284741Sdavidcs init_nic->count_version |= Q8_MBX_CMD_VERSION; 3304284741Sdavidcs 3305284741Sdavidcs init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; 3306284741Sdavidcs init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; 3307284741Sdavidcs init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; 3308284741Sdavidcs 3309284741Sdavidcs//qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); 3310284741Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)init_nic, 3311284741Sdavidcs (sizeof (q80_init_nic_func_t) >> 2), 3312284741Sdavidcs ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { 3313284741Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3314284741Sdavidcs return -1; 3315284741Sdavidcs } 3316284741Sdavidcs 3317284741Sdavidcs init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; 3318284741Sdavidcs// qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); 3319284741Sdavidcs 3320284741Sdavidcs err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); 3321284741Sdavidcs 3322284741Sdavidcs if (err) { 3323284741Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3324284741Sdavidcs } 3325284741Sdavidcs 3326284741Sdavidcs return 0; 3327284741Sdavidcs} 3328284741Sdavidcs 3329284741Sdavidcsstatic int 3330284741Sdavidcsqla_stop_nic_func(qla_host_t *ha) 3331284741Sdavidcs{ 3332284741Sdavidcs device_t dev; 3333284741Sdavidcs q80_stop_nic_func_t *stop_nic; 3334284741Sdavidcs q80_stop_nic_func_rsp_t *stop_nic_rsp; 3335284741Sdavidcs uint32_t err; 3336284741Sdavidcs 3337284741Sdavidcs dev = ha->pci_dev; 3338284741Sdavidcs 3339284741Sdavidcs stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; 3340284741Sdavidcs bzero(stop_nic, sizeof(q80_stop_nic_func_t)); 3341284741Sdavidcs 3342284741Sdavidcs stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; 3343284741Sdavidcs stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); 3344284741Sdavidcs stop_nic->count_version |= Q8_MBX_CMD_VERSION; 3345284741Sdavidcs 3346284741Sdavidcs stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; 3347284741Sdavidcs stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; 3348284741Sdavidcs 3349284741Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); 3350284741Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, 3351284741Sdavidcs (sizeof (q80_stop_nic_func_t) >> 2), 3352284741Sdavidcs ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { 3353284741Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3354284741Sdavidcs return -1; 3355284741Sdavidcs } 3356284741Sdavidcs 3357284741Sdavidcs stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; 3358284741Sdavidcs//qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); 3359284741Sdavidcs 3360284741Sdavidcs err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); 3361284741Sdavidcs 3362284741Sdavidcs if (err) { 3363284741Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3364284741Sdavidcs } 3365284741Sdavidcs 3366284741Sdavidcs return 0; 3367284741Sdavidcs} 3368284741Sdavidcs 3369284741Sdavidcsstatic int 3370284741Sdavidcsqla_query_fw_dcbx_caps(qla_host_t *ha) 3371284741Sdavidcs{ 3372284741Sdavidcs device_t dev; 3373284741Sdavidcs q80_query_fw_dcbx_caps_t *fw_dcbx; 3374284741Sdavidcs q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; 3375284741Sdavidcs uint32_t err; 3376284741Sdavidcs 3377284741Sdavidcs dev = ha->pci_dev; 3378284741Sdavidcs 3379284741Sdavidcs fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; 3380284741Sdavidcs bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); 3381284741Sdavidcs 3382284741Sdavidcs fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; 3383284741Sdavidcs fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); 3384284741Sdavidcs fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; 3385284741Sdavidcs 3386284741Sdavidcs ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); 3387284741Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, 3388284741Sdavidcs (sizeof (q80_query_fw_dcbx_caps_t) >> 2), 3389284741Sdavidcs ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { 3390284741Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3391284741Sdavidcs return -1; 3392284741Sdavidcs } 3393284741Sdavidcs 3394284741Sdavidcs fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; 3395284741Sdavidcs ql_dump_buf8(ha, __func__, fw_dcbx_rsp, 3396284741Sdavidcs sizeof (q80_query_fw_dcbx_caps_rsp_t)); 3397284741Sdavidcs 3398284741Sdavidcs err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); 3399284741Sdavidcs 3400284741Sdavidcs if (err) { 3401284741Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3402284741Sdavidcs } 3403284741Sdavidcs 3404284741Sdavidcs return 0; 3405284741Sdavidcs} 3406284741Sdavidcs 3407284741Sdavidcsstatic int 3408284741Sdavidcsqla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, 3409284741Sdavidcs uint32_t aen_mb3, uint32_t aen_mb4) 3410284741Sdavidcs{ 3411284741Sdavidcs device_t dev; 3412284741Sdavidcs q80_idc_ack_t *idc_ack; 3413284741Sdavidcs q80_idc_ack_rsp_t *idc_ack_rsp; 3414284741Sdavidcs uint32_t err; 3415284741Sdavidcs int count = 300; 3416284741Sdavidcs 3417284741Sdavidcs dev = ha->pci_dev; 3418284741Sdavidcs 3419284741Sdavidcs idc_ack = (q80_idc_ack_t *)ha->hw.mbox; 3420284741Sdavidcs bzero(idc_ack, sizeof(q80_idc_ack_t)); 3421284741Sdavidcs 3422284741Sdavidcs idc_ack->opcode = Q8_MBX_IDC_ACK; 3423284741Sdavidcs idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); 3424284741Sdavidcs idc_ack->count_version |= Q8_MBX_CMD_VERSION; 3425284741Sdavidcs 3426284741Sdavidcs idc_ack->aen_mb1 = aen_mb1; 3427284741Sdavidcs idc_ack->aen_mb2 = aen_mb2; 3428284741Sdavidcs idc_ack->aen_mb3 = aen_mb3; 3429284741Sdavidcs idc_ack->aen_mb4 = aen_mb4; 3430284741Sdavidcs 3431284741Sdavidcs ha->hw.imd_compl= 0; 3432284741Sdavidcs 3433284741Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, 3434284741Sdavidcs (sizeof (q80_idc_ack_t) >> 2), 3435284741Sdavidcs ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { 3436284741Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3437284741Sdavidcs return -1; 3438284741Sdavidcs } 3439284741Sdavidcs 3440284741Sdavidcs idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; 3441284741Sdavidcs 3442284741Sdavidcs err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); 3443284741Sdavidcs 3444284741Sdavidcs if (err) { 3445284741Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3446284741Sdavidcs return(-1); 3447284741Sdavidcs } 3448284741Sdavidcs 3449284741Sdavidcs while (count && !ha->hw.imd_compl) { 3450284741Sdavidcs qla_mdelay(__func__, 100); 3451284741Sdavidcs count--; 3452284741Sdavidcs } 3453284741Sdavidcs 3454284741Sdavidcs if (!count) 3455284741Sdavidcs return -1; 3456284741Sdavidcs else 3457284741Sdavidcs device_printf(dev, "%s: count %d\n", __func__, count); 3458284741Sdavidcs 3459284741Sdavidcs return (0); 3460284741Sdavidcs} 3461284741Sdavidcs 3462284741Sdavidcsstatic int 3463284741Sdavidcsqla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) 3464284741Sdavidcs{ 3465284741Sdavidcs device_t dev; 3466284741Sdavidcs q80_set_port_cfg_t *pcfg; 3467284741Sdavidcs q80_set_port_cfg_rsp_t *pfg_rsp; 3468284741Sdavidcs uint32_t err; 3469284741Sdavidcs int count = 300; 3470284741Sdavidcs 3471284741Sdavidcs dev = ha->pci_dev; 3472284741Sdavidcs 3473284741Sdavidcs pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; 3474284741Sdavidcs bzero(pcfg, sizeof(q80_set_port_cfg_t)); 3475284741Sdavidcs 3476284741Sdavidcs pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; 3477284741Sdavidcs pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); 3478284741Sdavidcs pcfg->count_version |= Q8_MBX_CMD_VERSION; 3479284741Sdavidcs 3480284741Sdavidcs pcfg->cfg_bits = cfg_bits; 3481284741Sdavidcs 3482284741Sdavidcs device_printf(dev, "%s: cfg_bits" 3483284741Sdavidcs " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3484284741Sdavidcs " [0x%x, 0x%x, 0x%x]\n", __func__, 3485284741Sdavidcs ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3486284741Sdavidcs ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3487284741Sdavidcs ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); 3488284741Sdavidcs 3489284741Sdavidcs ha->hw.imd_compl= 0; 3490284741Sdavidcs 3491284741Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3492284741Sdavidcs (sizeof (q80_set_port_cfg_t) >> 2), 3493284741Sdavidcs ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { 3494284741Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3495284741Sdavidcs return -1; 3496284741Sdavidcs } 3497284741Sdavidcs 3498284741Sdavidcs pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; 3499284741Sdavidcs 3500284741Sdavidcs err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); 3501284741Sdavidcs 3502284741Sdavidcs if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { 3503284741Sdavidcs while (count && !ha->hw.imd_compl) { 3504284741Sdavidcs qla_mdelay(__func__, 100); 3505284741Sdavidcs count--; 3506284741Sdavidcs } 3507284741Sdavidcs if (count) { 3508284741Sdavidcs device_printf(dev, "%s: count %d\n", __func__, count); 3509284741Sdavidcs 3510284741Sdavidcs err = 0; 3511284741Sdavidcs } 3512284741Sdavidcs } 3513284741Sdavidcs 3514284741Sdavidcs if (err) { 3515284741Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3516284741Sdavidcs return(-1); 3517284741Sdavidcs } 3518284741Sdavidcs 3519284741Sdavidcs return (0); 3520284741Sdavidcs} 3521284741Sdavidcs 3522284741Sdavidcs 3523284741Sdavidcsstatic int 3524250661Sdavidcsqla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) 3525250661Sdavidcs{ 3526250661Sdavidcs uint32_t err; 3527250661Sdavidcs device_t dev = ha->pci_dev; 3528250661Sdavidcs q80_config_md_templ_size_t *md_size; 3529250661Sdavidcs q80_config_md_templ_size_rsp_t *md_size_rsp; 3530250661Sdavidcs 3531305487Sdavidcs#ifndef QL_LDFLASH_FW 3532284741Sdavidcs 3533305487Sdavidcs ql_minidump_template_hdr_t *hdr; 3534305487Sdavidcs 3535305487Sdavidcs hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump; 3536305487Sdavidcs *size = hdr->size_of_template; 3537284741Sdavidcs return (0); 3538284741Sdavidcs 3539284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */ 3540284741Sdavidcs 3541250661Sdavidcs md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; 3542250661Sdavidcs bzero(md_size, sizeof(q80_config_md_templ_size_t)); 3543250661Sdavidcs 3544250661Sdavidcs md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; 3545250661Sdavidcs md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); 3546250661Sdavidcs md_size->count_version |= Q8_MBX_CMD_VERSION; 3547250661Sdavidcs 3548250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *) md_size, 3549250661Sdavidcs (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, 3550250661Sdavidcs (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { 3551250661Sdavidcs 3552250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3553250661Sdavidcs 3554250661Sdavidcs return (-1); 3555250661Sdavidcs } 3556250661Sdavidcs 3557250661Sdavidcs md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; 3558250661Sdavidcs 3559250661Sdavidcs err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); 3560250661Sdavidcs 3561250661Sdavidcs if (err) { 3562250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3563250661Sdavidcs return(-1); 3564250661Sdavidcs } 3565250661Sdavidcs 3566250661Sdavidcs *size = md_size_rsp->templ_size; 3567250661Sdavidcs 3568250661Sdavidcs return (0); 3569250661Sdavidcs} 3570250661Sdavidcs 3571250661Sdavidcsstatic int 3572284741Sdavidcsqla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) 3573284741Sdavidcs{ 3574284741Sdavidcs device_t dev; 3575284741Sdavidcs q80_get_port_cfg_t *pcfg; 3576284741Sdavidcs q80_get_port_cfg_rsp_t *pcfg_rsp; 3577284741Sdavidcs uint32_t err; 3578284741Sdavidcs 3579284741Sdavidcs dev = ha->pci_dev; 3580284741Sdavidcs 3581284741Sdavidcs pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; 3582284741Sdavidcs bzero(pcfg, sizeof(q80_get_port_cfg_t)); 3583284741Sdavidcs 3584284741Sdavidcs pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; 3585284741Sdavidcs pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); 3586284741Sdavidcs pcfg->count_version |= Q8_MBX_CMD_VERSION; 3587284741Sdavidcs 3588284741Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 3589284741Sdavidcs (sizeof (q80_get_port_cfg_t) >> 2), 3590284741Sdavidcs ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { 3591284741Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3592284741Sdavidcs return -1; 3593284741Sdavidcs } 3594284741Sdavidcs 3595284741Sdavidcs pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; 3596284741Sdavidcs 3597284741Sdavidcs err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); 3598284741Sdavidcs 3599284741Sdavidcs if (err) { 3600284741Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3601284741Sdavidcs return(-1); 3602284741Sdavidcs } 3603284741Sdavidcs 3604284741Sdavidcs device_printf(dev, "%s: [cfg_bits, port type]" 3605284741Sdavidcs " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 3606284741Sdavidcs " [0x%x, 0x%x, 0x%x]\n", __func__, 3607284741Sdavidcs pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, 3608284741Sdavidcs ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 3609284741Sdavidcs ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 3610284741Sdavidcs ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) 3611284741Sdavidcs ); 3612284741Sdavidcs 3613284741Sdavidcs *cfg_bits = pcfg_rsp->cfg_bits; 3614284741Sdavidcs 3615284741Sdavidcs return (0); 3616284741Sdavidcs} 3617284741Sdavidcs 3618284741Sdavidcsint 3619313070Sdavidcsql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) 3620284741Sdavidcs{ 3621284741Sdavidcs struct ether_vlan_header *eh; 3622284741Sdavidcs uint16_t etype; 3623284741Sdavidcs struct ip *ip = NULL; 3624284741Sdavidcs struct ip6_hdr *ip6 = NULL; 3625284741Sdavidcs struct tcphdr *th = NULL; 3626284741Sdavidcs uint32_t hdrlen; 3627284741Sdavidcs uint32_t offset; 3628284741Sdavidcs uint8_t buf[sizeof(struct ip6_hdr)]; 3629284741Sdavidcs 3630284741Sdavidcs eh = mtod(mp, struct ether_vlan_header *); 3631284741Sdavidcs 3632284741Sdavidcs if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3633284741Sdavidcs hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3634284741Sdavidcs etype = ntohs(eh->evl_proto); 3635284741Sdavidcs } else { 3636284741Sdavidcs hdrlen = ETHER_HDR_LEN; 3637284741Sdavidcs etype = ntohs(eh->evl_encap_proto); 3638284741Sdavidcs } 3639284741Sdavidcs 3640284741Sdavidcs if (etype == ETHERTYPE_IP) { 3641284741Sdavidcs 3642284741Sdavidcs offset = (hdrlen + sizeof (struct ip)); 3643284741Sdavidcs 3644284741Sdavidcs if (mp->m_len >= offset) { 3645284741Sdavidcs ip = (struct ip *)(mp->m_data + hdrlen); 3646284741Sdavidcs } else { 3647284741Sdavidcs m_copydata(mp, hdrlen, sizeof (struct ip), buf); 3648284741Sdavidcs ip = (struct ip *)buf; 3649284741Sdavidcs } 3650284741Sdavidcs 3651284741Sdavidcs if (ip->ip_p == IPPROTO_TCP) { 3652284741Sdavidcs 3653284741Sdavidcs hdrlen += ip->ip_hl << 2; 3654284741Sdavidcs offset = hdrlen + 4; 3655284741Sdavidcs 3656284741Sdavidcs if (mp->m_len >= offset) { 3657305487Sdavidcs th = (struct tcphdr *)(mp->m_data + hdrlen);; 3658284741Sdavidcs } else { 3659284741Sdavidcs m_copydata(mp, hdrlen, 4, buf); 3660284741Sdavidcs th = (struct tcphdr *)buf; 3661284741Sdavidcs } 3662284741Sdavidcs } 3663284741Sdavidcs 3664284741Sdavidcs } else if (etype == ETHERTYPE_IPV6) { 3665284741Sdavidcs 3666284741Sdavidcs offset = (hdrlen + sizeof (struct ip6_hdr)); 3667284741Sdavidcs 3668284741Sdavidcs if (mp->m_len >= offset) { 3669284741Sdavidcs ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); 3670284741Sdavidcs } else { 3671284741Sdavidcs m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); 3672284741Sdavidcs ip6 = (struct ip6_hdr *)buf; 3673284741Sdavidcs } 3674284741Sdavidcs 3675284741Sdavidcs if (ip6->ip6_nxt == IPPROTO_TCP) { 3676284741Sdavidcs 3677284741Sdavidcs hdrlen += sizeof(struct ip6_hdr); 3678284741Sdavidcs offset = hdrlen + 4; 3679284741Sdavidcs 3680284741Sdavidcs if (mp->m_len >= offset) { 3681305487Sdavidcs th = (struct tcphdr *)(mp->m_data + hdrlen);; 3682284741Sdavidcs } else { 3683284741Sdavidcs m_copydata(mp, hdrlen, 4, buf); 3684284741Sdavidcs th = (struct tcphdr *)buf; 3685284741Sdavidcs } 3686284741Sdavidcs } 3687284741Sdavidcs } 3688284741Sdavidcs 3689284741Sdavidcs if (th != NULL) { 3690284741Sdavidcs if ((th->th_sport == htons(3260)) || 3691284741Sdavidcs (th->th_dport == htons(3260))) 3692284741Sdavidcs return 0; 3693284741Sdavidcs } 3694284741Sdavidcs return (-1); 3695284741Sdavidcs} 3696284741Sdavidcs 3697284741Sdavidcsvoid 3698284741Sdavidcsqla_hw_async_event(qla_host_t *ha) 3699284741Sdavidcs{ 3700284741Sdavidcs switch (ha->hw.aen_mb0) { 3701284741Sdavidcs case 0x8101: 3702284741Sdavidcs (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, 3703284741Sdavidcs ha->hw.aen_mb3, ha->hw.aen_mb4); 3704284741Sdavidcs 3705284741Sdavidcs break; 3706284741Sdavidcs 3707284741Sdavidcs default: 3708284741Sdavidcs break; 3709284741Sdavidcs } 3710284741Sdavidcs 3711284741Sdavidcs return; 3712284741Sdavidcs} 3713284741Sdavidcs 3714284741Sdavidcs#ifdef QL_LDFLASH_FW 3715284741Sdavidcsstatic int 3716305487Sdavidcsql_get_minidump_template(qla_host_t *ha) 3717250661Sdavidcs{ 3718250661Sdavidcs uint32_t err; 3719250661Sdavidcs device_t dev = ha->pci_dev; 3720250661Sdavidcs q80_config_md_templ_cmd_t *md_templ; 3721250661Sdavidcs q80_config_md_templ_cmd_rsp_t *md_templ_rsp; 3722250661Sdavidcs 3723250661Sdavidcs md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; 3724250661Sdavidcs bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); 3725250661Sdavidcs 3726250661Sdavidcs md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; 3727250661Sdavidcs md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); 3728250661Sdavidcs md_templ->count_version |= Q8_MBX_CMD_VERSION; 3729250661Sdavidcs 3730250661Sdavidcs md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; 3731250661Sdavidcs md_templ->buff_size = ha->hw.dma_buf.minidump.size; 3732250661Sdavidcs 3733250661Sdavidcs if (qla_mbx_cmd(ha, (uint32_t *) md_templ, 3734250661Sdavidcs (sizeof(q80_config_md_templ_cmd_t) >> 2), 3735250661Sdavidcs ha->hw.mbox, 3736250661Sdavidcs (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { 3737250661Sdavidcs 3738250661Sdavidcs device_printf(dev, "%s: failed\n", __func__); 3739250661Sdavidcs 3740250661Sdavidcs return (-1); 3741250661Sdavidcs } 3742250661Sdavidcs 3743250661Sdavidcs md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; 3744250661Sdavidcs 3745250661Sdavidcs err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); 3746250661Sdavidcs 3747250661Sdavidcs if (err) { 3748250661Sdavidcs device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3749250661Sdavidcs return (-1); 3750250661Sdavidcs } 3751250661Sdavidcs 3752250661Sdavidcs return (0); 3753250661Sdavidcs 3754250661Sdavidcs} 3755284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */ 3756250661Sdavidcs 3757305487Sdavidcs/* 3758305487Sdavidcs * Minidump related functionality 3759305487Sdavidcs */ 3760305487Sdavidcs 3761305487Sdavidcsstatic int ql_parse_template(qla_host_t *ha); 3762305487Sdavidcs 3763305487Sdavidcsstatic uint32_t ql_rdcrb(qla_host_t *ha, 3764305487Sdavidcs ql_minidump_entry_rdcrb_t *crb_entry, 3765305487Sdavidcs uint32_t * data_buff); 3766305487Sdavidcs 3767305487Sdavidcsstatic uint32_t ql_pollrd(qla_host_t *ha, 3768305487Sdavidcs ql_minidump_entry_pollrd_t *entry, 3769305487Sdavidcs uint32_t * data_buff); 3770305487Sdavidcs 3771305487Sdavidcsstatic uint32_t ql_pollrd_modify_write(qla_host_t *ha, 3772305487Sdavidcs ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 3773305487Sdavidcs uint32_t *data_buff); 3774305487Sdavidcs 3775305487Sdavidcsstatic uint32_t ql_L2Cache(qla_host_t *ha, 3776305487Sdavidcs ql_minidump_entry_cache_t *cacheEntry, 3777305487Sdavidcs uint32_t * data_buff); 3778305487Sdavidcs 3779305487Sdavidcsstatic uint32_t ql_L1Cache(qla_host_t *ha, 3780305487Sdavidcs ql_minidump_entry_cache_t *cacheEntry, 3781305487Sdavidcs uint32_t *data_buff); 3782305487Sdavidcs 3783305487Sdavidcsstatic uint32_t ql_rdocm(qla_host_t *ha, 3784305487Sdavidcs ql_minidump_entry_rdocm_t *ocmEntry, 3785305487Sdavidcs uint32_t *data_buff); 3786305487Sdavidcs 3787305487Sdavidcsstatic uint32_t ql_rdmem(qla_host_t *ha, 3788305487Sdavidcs ql_minidump_entry_rdmem_t *mem_entry, 3789305487Sdavidcs uint32_t *data_buff); 3790305487Sdavidcs 3791305487Sdavidcsstatic uint32_t ql_rdrom(qla_host_t *ha, 3792305487Sdavidcs ql_minidump_entry_rdrom_t *romEntry, 3793305487Sdavidcs uint32_t *data_buff); 3794305487Sdavidcs 3795305487Sdavidcsstatic uint32_t ql_rdmux(qla_host_t *ha, 3796305487Sdavidcs ql_minidump_entry_mux_t *muxEntry, 3797305487Sdavidcs uint32_t *data_buff); 3798305487Sdavidcs 3799305487Sdavidcsstatic uint32_t ql_rdmux2(qla_host_t *ha, 3800305487Sdavidcs ql_minidump_entry_mux2_t *muxEntry, 3801305487Sdavidcs uint32_t *data_buff); 3802305487Sdavidcs 3803305487Sdavidcsstatic uint32_t ql_rdqueue(qla_host_t *ha, 3804305487Sdavidcs ql_minidump_entry_queue_t *queueEntry, 3805305487Sdavidcs uint32_t *data_buff); 3806305487Sdavidcs 3807305487Sdavidcsstatic uint32_t ql_cntrl(qla_host_t *ha, 3808305487Sdavidcs ql_minidump_template_hdr_t *template_hdr, 3809305487Sdavidcs ql_minidump_entry_cntrl_t *crbEntry); 3810305487Sdavidcs 3811305487Sdavidcs 3812305487Sdavidcsstatic uint32_t 3813305487Sdavidcsql_minidump_size(qla_host_t *ha) 3814305487Sdavidcs{ 3815305487Sdavidcs uint32_t i, k; 3816305487Sdavidcs uint32_t size = 0; 3817305487Sdavidcs ql_minidump_template_hdr_t *hdr; 3818305487Sdavidcs 3819305487Sdavidcs hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b; 3820305487Sdavidcs 3821305487Sdavidcs i = 0x2; 3822305487Sdavidcs 3823305487Sdavidcs for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) { 3824305487Sdavidcs if (i & ha->hw.mdump_capture_mask) 3825305487Sdavidcs size += hdr->capture_size_array[k]; 3826305487Sdavidcs i = i << 1; 3827305487Sdavidcs } 3828305487Sdavidcs return (size); 3829305487Sdavidcs} 3830305487Sdavidcs 3831305487Sdavidcsstatic void 3832305487Sdavidcsql_free_minidump_buffer(qla_host_t *ha) 3833305487Sdavidcs{ 3834305487Sdavidcs if (ha->hw.mdump_buffer != NULL) { 3835305487Sdavidcs free(ha->hw.mdump_buffer, M_QLA83XXBUF); 3836305487Sdavidcs ha->hw.mdump_buffer = NULL; 3837305487Sdavidcs ha->hw.mdump_buffer_size = 0; 3838305487Sdavidcs } 3839305487Sdavidcs return; 3840305487Sdavidcs} 3841305487Sdavidcs 3842250661Sdavidcsstatic int 3843305487Sdavidcsql_alloc_minidump_buffer(qla_host_t *ha) 3844250661Sdavidcs{ 3845305487Sdavidcs ha->hw.mdump_buffer_size = ql_minidump_size(ha); 3846305487Sdavidcs 3847305487Sdavidcs if (!ha->hw.mdump_buffer_size) 3848305487Sdavidcs return (-1); 3849305487Sdavidcs 3850305487Sdavidcs ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF, 3851305487Sdavidcs M_NOWAIT); 3852305487Sdavidcs 3853305487Sdavidcs if (ha->hw.mdump_buffer == NULL) 3854305487Sdavidcs return (-1); 3855305487Sdavidcs 3856305487Sdavidcs return (0); 3857305487Sdavidcs} 3858305487Sdavidcs 3859305487Sdavidcsstatic void 3860305487Sdavidcsql_free_minidump_template_buffer(qla_host_t *ha) 3861305487Sdavidcs{ 3862305487Sdavidcs if (ha->hw.mdump_template != NULL) { 3863305487Sdavidcs free(ha->hw.mdump_template, M_QLA83XXBUF); 3864305487Sdavidcs ha->hw.mdump_template = NULL; 3865305487Sdavidcs ha->hw.mdump_template_size = 0; 3866305487Sdavidcs } 3867305487Sdavidcs return; 3868305487Sdavidcs} 3869305487Sdavidcs 3870305487Sdavidcsstatic int 3871305487Sdavidcsql_alloc_minidump_template_buffer(qla_host_t *ha) 3872305487Sdavidcs{ 3873305487Sdavidcs ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size; 3874305487Sdavidcs 3875305487Sdavidcs ha->hw.mdump_template = malloc(ha->hw.mdump_template_size, 3876305487Sdavidcs M_QLA83XXBUF, M_NOWAIT); 3877305487Sdavidcs 3878305487Sdavidcs if (ha->hw.mdump_template == NULL) 3879305487Sdavidcs return (-1); 3880305487Sdavidcs 3881305487Sdavidcs return (0); 3882305487Sdavidcs} 3883305487Sdavidcs 3884305487Sdavidcsstatic int 3885305487Sdavidcsql_alloc_minidump_buffers(qla_host_t *ha) 3886305487Sdavidcs{ 3887305487Sdavidcs int ret; 3888305487Sdavidcs 3889305487Sdavidcs ret = ql_alloc_minidump_template_buffer(ha); 3890305487Sdavidcs 3891305487Sdavidcs if (ret) 3892305487Sdavidcs return (ret); 3893305487Sdavidcs 3894305487Sdavidcs ret = ql_alloc_minidump_buffer(ha); 3895305487Sdavidcs 3896305487Sdavidcs if (ret) 3897305487Sdavidcs ql_free_minidump_template_buffer(ha); 3898305487Sdavidcs 3899305487Sdavidcs return (ret); 3900305487Sdavidcs} 3901305487Sdavidcs 3902305487Sdavidcs 3903305487Sdavidcsstatic uint32_t 3904305487Sdavidcsql_validate_minidump_checksum(qla_host_t *ha) 3905305487Sdavidcs{ 3906305487Sdavidcs uint64_t sum = 0; 3907305487Sdavidcs int count; 3908305487Sdavidcs uint32_t *template_buff; 3909305487Sdavidcs 3910305487Sdavidcs count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t); 3911305487Sdavidcs template_buff = ha->hw.dma_buf.minidump.dma_b; 3912305487Sdavidcs 3913305487Sdavidcs while (count-- > 0) { 3914305487Sdavidcs sum += *template_buff++; 3915305487Sdavidcs } 3916305487Sdavidcs 3917305487Sdavidcs while (sum >> 32) { 3918305487Sdavidcs sum = (sum & 0xFFFFFFFF) + (sum >> 32); 3919305487Sdavidcs } 3920305487Sdavidcs 3921305487Sdavidcs return (~sum); 3922305487Sdavidcs} 3923305487Sdavidcs 3924305487Sdavidcsint 3925305487Sdavidcsql_minidump_init(qla_host_t *ha) 3926305487Sdavidcs{ 3927284741Sdavidcs int ret = 0; 3928250661Sdavidcs uint32_t template_size = 0; 3929250661Sdavidcs device_t dev = ha->pci_dev; 3930250661Sdavidcs 3931250661Sdavidcs /* 3932250661Sdavidcs * Get Minidump Template Size 3933250661Sdavidcs */ 3934250661Sdavidcs ret = qla_get_minidump_tmplt_size(ha, &template_size); 3935250661Sdavidcs 3936250661Sdavidcs if (ret || (template_size == 0)) { 3937250661Sdavidcs device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, 3938250661Sdavidcs template_size); 3939250661Sdavidcs return (-1); 3940250661Sdavidcs } 3941250661Sdavidcs 3942250661Sdavidcs /* 3943250661Sdavidcs * Allocate Memory for Minidump Template 3944250661Sdavidcs */ 3945250661Sdavidcs 3946250661Sdavidcs ha->hw.dma_buf.minidump.alignment = 8; 3947250661Sdavidcs ha->hw.dma_buf.minidump.size = template_size; 3948250661Sdavidcs 3949284741Sdavidcs#ifdef QL_LDFLASH_FW 3950250661Sdavidcs if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { 3951250661Sdavidcs 3952250661Sdavidcs device_printf(dev, "%s: minidump dma alloc failed\n", __func__); 3953250661Sdavidcs 3954250661Sdavidcs return (-1); 3955250661Sdavidcs } 3956250661Sdavidcs ha->hw.dma_buf.flags.minidump = 1; 3957250661Sdavidcs 3958250661Sdavidcs /* 3959250661Sdavidcs * Retrieve Minidump Template 3960250661Sdavidcs */ 3961305487Sdavidcs ret = ql_get_minidump_template(ha); 3962284741Sdavidcs#else 3963284741Sdavidcs ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; 3964305487Sdavidcs 3965284741Sdavidcs#endif /* #ifdef QL_LDFLASH_FW */ 3966250661Sdavidcs 3967305487Sdavidcs if (ret == 0) { 3968305487Sdavidcs 3969305487Sdavidcs ret = ql_validate_minidump_checksum(ha); 3970305487Sdavidcs 3971305487Sdavidcs if (ret == 0) { 3972305487Sdavidcs 3973305487Sdavidcs ret = ql_alloc_minidump_buffers(ha); 3974305487Sdavidcs 3975305487Sdavidcs if (ret == 0) 3976305487Sdavidcs ha->hw.mdump_init = 1; 3977305487Sdavidcs else 3978305487Sdavidcs device_printf(dev, 3979305487Sdavidcs "%s: ql_alloc_minidump_buffers" 3980305487Sdavidcs " failed\n", __func__); 3981305487Sdavidcs } else { 3982305487Sdavidcs device_printf(dev, "%s: ql_validate_minidump_checksum" 3983305487Sdavidcs " failed\n", __func__); 3984305487Sdavidcs } 3985250661Sdavidcs } else { 3986305487Sdavidcs device_printf(dev, "%s: ql_get_minidump_template failed\n", 3987305487Sdavidcs __func__); 3988250661Sdavidcs } 3989250661Sdavidcs 3990305487Sdavidcs if (ret) 3991305487Sdavidcs ql_minidump_free(ha); 3992305487Sdavidcs 3993250661Sdavidcs return (ret); 3994250661Sdavidcs} 3995250661Sdavidcs 3996250661Sdavidcsstatic void 3997305487Sdavidcsql_minidump_free(qla_host_t *ha) 3998250661Sdavidcs{ 3999250661Sdavidcs ha->hw.mdump_init = 0; 4000250661Sdavidcs if (ha->hw.dma_buf.flags.minidump) { 4001250661Sdavidcs ha->hw.dma_buf.flags.minidump = 0; 4002250661Sdavidcs ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); 4003250661Sdavidcs } 4004305487Sdavidcs 4005305487Sdavidcs ql_free_minidump_template_buffer(ha); 4006305487Sdavidcs ql_free_minidump_buffer(ha); 4007305487Sdavidcs 4008250661Sdavidcs return; 4009250661Sdavidcs} 4010250661Sdavidcs 4011250661Sdavidcsvoid 4012250661Sdavidcsql_minidump(qla_host_t *ha) 4013250661Sdavidcs{ 4014250661Sdavidcs if (!ha->hw.mdump_init) 4015250661Sdavidcs return; 4016250661Sdavidcs 4017305487Sdavidcs if (ha->hw.mdump_done) 4018250661Sdavidcs return; 4019250661Sdavidcs 4020250661Sdavidcs ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); 4021250661Sdavidcs 4022305487Sdavidcs bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size); 4023305487Sdavidcs bzero(ha->hw.mdump_template, ha->hw.mdump_template_size); 4024305487Sdavidcs 4025305487Sdavidcs bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template, 4026305487Sdavidcs ha->hw.mdump_template_size); 4027305487Sdavidcs 4028305487Sdavidcs ql_parse_template(ha); 4029305487Sdavidcs 4030250661Sdavidcs ql_start_sequence(ha, ha->hw.mdump_start_seq_index); 4031250661Sdavidcs 4032305487Sdavidcs ha->hw.mdump_done = 1; 4033305487Sdavidcs 4034250661Sdavidcs return; 4035250661Sdavidcs} 4036305487Sdavidcs 4037305487Sdavidcs 4038305487Sdavidcs/* 4039305487Sdavidcs * helper routines 4040305487Sdavidcs */ 4041305487Sdavidcsstatic void 4042305487Sdavidcsql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize) 4043305487Sdavidcs{ 4044305487Sdavidcs if (esize != entry->hdr.entry_capture_size) { 4045305487Sdavidcs entry->hdr.entry_capture_size = esize; 4046305487Sdavidcs entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG; 4047305487Sdavidcs } 4048305487Sdavidcs return; 4049305487Sdavidcs} 4050305487Sdavidcs 4051305487Sdavidcs 4052305487Sdavidcsstatic int 4053305487Sdavidcsql_parse_template(qla_host_t *ha) 4054305487Sdavidcs{ 4055305487Sdavidcs uint32_t num_of_entries, buff_level, e_cnt, esize; 4056305487Sdavidcs uint32_t end_cnt, rv = 0; 4057305487Sdavidcs char *dump_buff, *dbuff; 4058305487Sdavidcs int sane_start = 0, sane_end = 0; 4059305487Sdavidcs ql_minidump_template_hdr_t *template_hdr; 4060305487Sdavidcs ql_minidump_entry_t *entry; 4061305487Sdavidcs uint32_t capture_mask; 4062305487Sdavidcs uint32_t dump_size; 4063305487Sdavidcs 4064305487Sdavidcs /* Setup parameters */ 4065305487Sdavidcs template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template; 4066305487Sdavidcs 4067305487Sdavidcs if (template_hdr->entry_type == TLHDR) 4068305487Sdavidcs sane_start = 1; 4069305487Sdavidcs 4070305487Sdavidcs dump_buff = (char *) ha->hw.mdump_buffer; 4071305487Sdavidcs 4072305487Sdavidcs num_of_entries = template_hdr->num_of_entries; 4073305487Sdavidcs 4074305487Sdavidcs entry = (ql_minidump_entry_t *) ((char *)template_hdr 4075305487Sdavidcs + template_hdr->first_entry_offset ); 4076305487Sdavidcs 4077305487Sdavidcs template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] = 4078305487Sdavidcs template_hdr->ocm_window_array[ha->pci_func]; 4079305487Sdavidcs template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func; 4080305487Sdavidcs 4081305487Sdavidcs capture_mask = ha->hw.mdump_capture_mask; 4082305487Sdavidcs dump_size = ha->hw.mdump_buffer_size; 4083305487Sdavidcs 4084305487Sdavidcs template_hdr->driver_capture_mask = capture_mask; 4085305487Sdavidcs 4086305487Sdavidcs QL_DPRINT80(ha, (ha->pci_dev, 4087305487Sdavidcs "%s: sane_start = %d num_of_entries = %d " 4088305487Sdavidcs "capture_mask = 0x%x dump_size = %d \n", 4089305487Sdavidcs __func__, sane_start, num_of_entries, capture_mask, dump_size)); 4090305487Sdavidcs 4091305487Sdavidcs for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) { 4092305487Sdavidcs 4093305487Sdavidcs /* 4094305487Sdavidcs * If the capture_mask of the entry does not match capture mask 4095305487Sdavidcs * skip the entry after marking the driver_flags indicator. 4096305487Sdavidcs */ 4097305487Sdavidcs 4098305487Sdavidcs if (!(entry->hdr.entry_capture_mask & capture_mask)) { 4099305487Sdavidcs 4100305487Sdavidcs entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4101305487Sdavidcs entry = (ql_minidump_entry_t *) ((char *) entry 4102305487Sdavidcs + entry->hdr.entry_size); 4103305487Sdavidcs continue; 4104305487Sdavidcs } 4105305487Sdavidcs 4106305487Sdavidcs /* 4107305487Sdavidcs * This is ONLY needed in implementations where 4108305487Sdavidcs * the capture buffer allocated is too small to capture 4109305487Sdavidcs * all of the required entries for a given capture mask. 4110305487Sdavidcs * We need to empty the buffer contents to a file 4111305487Sdavidcs * if possible, before processing the next entry 4112305487Sdavidcs * If the buff_full_flag is set, no further capture will happen 4113305487Sdavidcs * and all remaining non-control entries will be skipped. 4114305487Sdavidcs */ 4115305487Sdavidcs if (entry->hdr.entry_capture_size != 0) { 4116305487Sdavidcs if ((buff_level + entry->hdr.entry_capture_size) > 4117305487Sdavidcs dump_size) { 4118305487Sdavidcs /* Try to recover by emptying buffer to file */ 4119305487Sdavidcs entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4120305487Sdavidcs entry = (ql_minidump_entry_t *) ((char *) entry 4121305487Sdavidcs + entry->hdr.entry_size); 4122305487Sdavidcs continue; 4123305487Sdavidcs } 4124305487Sdavidcs } 4125305487Sdavidcs 4126305487Sdavidcs /* 4127305487Sdavidcs * Decode the entry type and process it accordingly 4128305487Sdavidcs */ 4129305487Sdavidcs 4130305487Sdavidcs switch (entry->hdr.entry_type) { 4131305487Sdavidcs case RDNOP: 4132305487Sdavidcs break; 4133305487Sdavidcs 4134305487Sdavidcs case RDEND: 4135305487Sdavidcs if (sane_end == 0) { 4136305487Sdavidcs end_cnt = e_cnt; 4137305487Sdavidcs } 4138305487Sdavidcs sane_end++; 4139305487Sdavidcs break; 4140305487Sdavidcs 4141305487Sdavidcs case RDCRB: 4142305487Sdavidcs dbuff = dump_buff + buff_level; 4143305487Sdavidcs esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff); 4144305487Sdavidcs ql_entry_err_chk(entry, esize); 4145305487Sdavidcs buff_level += esize; 4146305487Sdavidcs break; 4147305487Sdavidcs 4148305487Sdavidcs case POLLRD: 4149305487Sdavidcs dbuff = dump_buff + buff_level; 4150305487Sdavidcs esize = ql_pollrd(ha, (void *)entry, (void *)dbuff); 4151305487Sdavidcs ql_entry_err_chk(entry, esize); 4152305487Sdavidcs buff_level += esize; 4153305487Sdavidcs break; 4154305487Sdavidcs 4155305487Sdavidcs case POLLRDMWR: 4156305487Sdavidcs dbuff = dump_buff + buff_level; 4157305487Sdavidcs esize = ql_pollrd_modify_write(ha, (void *)entry, 4158305487Sdavidcs (void *)dbuff); 4159305487Sdavidcs ql_entry_err_chk(entry, esize); 4160305487Sdavidcs buff_level += esize; 4161305487Sdavidcs break; 4162305487Sdavidcs 4163305487Sdavidcs case L2ITG: 4164305487Sdavidcs case L2DTG: 4165305487Sdavidcs case L2DAT: 4166305487Sdavidcs case L2INS: 4167305487Sdavidcs dbuff = dump_buff + buff_level; 4168305487Sdavidcs esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff); 4169305487Sdavidcs if (esize == -1) { 4170305487Sdavidcs entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4171305487Sdavidcs } else { 4172305487Sdavidcs ql_entry_err_chk(entry, esize); 4173305487Sdavidcs buff_level += esize; 4174305487Sdavidcs } 4175305487Sdavidcs break; 4176305487Sdavidcs 4177305487Sdavidcs case L1DAT: 4178305487Sdavidcs case L1INS: 4179305487Sdavidcs dbuff = dump_buff + buff_level; 4180305487Sdavidcs esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff); 4181305487Sdavidcs ql_entry_err_chk(entry, esize); 4182305487Sdavidcs buff_level += esize; 4183305487Sdavidcs break; 4184305487Sdavidcs 4185305487Sdavidcs case RDOCM: 4186305487Sdavidcs dbuff = dump_buff + buff_level; 4187305487Sdavidcs esize = ql_rdocm(ha, (void *)entry, (void *)dbuff); 4188305487Sdavidcs ql_entry_err_chk(entry, esize); 4189305487Sdavidcs buff_level += esize; 4190305487Sdavidcs break; 4191305487Sdavidcs 4192305487Sdavidcs case RDMEM: 4193305487Sdavidcs dbuff = dump_buff + buff_level; 4194305487Sdavidcs esize = ql_rdmem(ha, (void *)entry, (void *)dbuff); 4195305487Sdavidcs ql_entry_err_chk(entry, esize); 4196305487Sdavidcs buff_level += esize; 4197305487Sdavidcs break; 4198305487Sdavidcs 4199305487Sdavidcs case BOARD: 4200305487Sdavidcs case RDROM: 4201305487Sdavidcs dbuff = dump_buff + buff_level; 4202305487Sdavidcs esize = ql_rdrom(ha, (void *)entry, (void *)dbuff); 4203305487Sdavidcs ql_entry_err_chk(entry, esize); 4204305487Sdavidcs buff_level += esize; 4205305487Sdavidcs break; 4206305487Sdavidcs 4207305487Sdavidcs case RDMUX: 4208305487Sdavidcs dbuff = dump_buff + buff_level; 4209305487Sdavidcs esize = ql_rdmux(ha, (void *)entry, (void *)dbuff); 4210305487Sdavidcs ql_entry_err_chk(entry, esize); 4211305487Sdavidcs buff_level += esize; 4212305487Sdavidcs break; 4213305487Sdavidcs 4214305487Sdavidcs case RDMUX2: 4215305487Sdavidcs dbuff = dump_buff + buff_level; 4216305487Sdavidcs esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff); 4217305487Sdavidcs ql_entry_err_chk(entry, esize); 4218305487Sdavidcs buff_level += esize; 4219305487Sdavidcs break; 4220305487Sdavidcs 4221305487Sdavidcs case QUEUE: 4222305487Sdavidcs dbuff = dump_buff + buff_level; 4223305487Sdavidcs esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff); 4224305487Sdavidcs ql_entry_err_chk(entry, esize); 4225305487Sdavidcs buff_level += esize; 4226305487Sdavidcs break; 4227305487Sdavidcs 4228305487Sdavidcs case CNTRL: 4229305487Sdavidcs if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) { 4230305487Sdavidcs entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4231305487Sdavidcs } 4232305487Sdavidcs break; 4233305487Sdavidcs default: 4234305487Sdavidcs entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4235305487Sdavidcs break; 4236305487Sdavidcs } 4237305487Sdavidcs /* next entry in the template */ 4238305487Sdavidcs entry = (ql_minidump_entry_t *) ((char *) entry 4239305487Sdavidcs + entry->hdr.entry_size); 4240305487Sdavidcs } 4241305487Sdavidcs 4242305487Sdavidcs if (!sane_start || (sane_end > 1)) { 4243305487Sdavidcs device_printf(ha->pci_dev, 4244305487Sdavidcs "\n%s: Template configuration error. Check Template\n", 4245305487Sdavidcs __func__); 4246305487Sdavidcs } 4247305487Sdavidcs 4248305487Sdavidcs QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n", 4249305487Sdavidcs __func__, template_hdr->num_of_entries)); 4250305487Sdavidcs 4251305487Sdavidcs return 0; 4252305487Sdavidcs} 4253305487Sdavidcs 4254305487Sdavidcs/* 4255305487Sdavidcs * Read CRB operation. 4256305487Sdavidcs */ 4257305487Sdavidcsstatic uint32_t 4258305487Sdavidcsql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry, 4259305487Sdavidcs uint32_t * data_buff) 4260305487Sdavidcs{ 4261305487Sdavidcs int loop_cnt; 4262305487Sdavidcs int ret; 4263305487Sdavidcs uint32_t op_count, addr, stride, value = 0; 4264305487Sdavidcs 4265305487Sdavidcs addr = crb_entry->addr; 4266305487Sdavidcs op_count = crb_entry->op_count; 4267305487Sdavidcs stride = crb_entry->addr_stride; 4268305487Sdavidcs 4269305487Sdavidcs for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 4270305487Sdavidcs 4271305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr, &value, 1); 4272305487Sdavidcs 4273305487Sdavidcs if (ret) 4274305487Sdavidcs return (0); 4275305487Sdavidcs 4276305487Sdavidcs *data_buff++ = addr; 4277305487Sdavidcs *data_buff++ = value; 4278305487Sdavidcs addr = addr + stride; 4279305487Sdavidcs } 4280305487Sdavidcs 4281305487Sdavidcs /* 4282305487Sdavidcs * for testing purpose we return amount of data written 4283305487Sdavidcs */ 4284305487Sdavidcs return (op_count * (2 * sizeof(uint32_t))); 4285305487Sdavidcs} 4286305487Sdavidcs 4287305487Sdavidcs/* 4288305487Sdavidcs * Handle L2 Cache. 4289305487Sdavidcs */ 4290305487Sdavidcs 4291305487Sdavidcsstatic uint32_t 4292305487Sdavidcsql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, 4293305487Sdavidcs uint32_t * data_buff) 4294305487Sdavidcs{ 4295305487Sdavidcs int i, k; 4296305487Sdavidcs int loop_cnt; 4297305487Sdavidcs int ret; 4298305487Sdavidcs 4299305487Sdavidcs uint32_t read_value; 4300305487Sdavidcs uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w; 4301305487Sdavidcs uint32_t tag_value, read_cnt; 4302305487Sdavidcs volatile uint8_t cntl_value_r; 4303305487Sdavidcs long timeout; 4304305487Sdavidcs uint32_t data; 4305305487Sdavidcs 4306305487Sdavidcs loop_cnt = cacheEntry->op_count; 4307305487Sdavidcs 4308305487Sdavidcs read_addr = cacheEntry->read_addr; 4309305487Sdavidcs cntrl_addr = cacheEntry->control_addr; 4310305487Sdavidcs cntl_value_w = (uint32_t) cacheEntry->write_value; 4311305487Sdavidcs 4312305487Sdavidcs tag_reg_addr = cacheEntry->tag_reg_addr; 4313305487Sdavidcs 4314305487Sdavidcs tag_value = cacheEntry->init_tag_value; 4315305487Sdavidcs read_cnt = cacheEntry->read_addr_cnt; 4316305487Sdavidcs 4317305487Sdavidcs for (i = 0; i < loop_cnt; i++) { 4318305487Sdavidcs 4319305487Sdavidcs ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 4320305487Sdavidcs if (ret) 4321305487Sdavidcs return (0); 4322305487Sdavidcs 4323305487Sdavidcs if (cacheEntry->write_value != 0) { 4324305487Sdavidcs 4325305487Sdavidcs ret = ql_rdwr_indreg32(ha, cntrl_addr, 4326305487Sdavidcs &cntl_value_w, 0); 4327305487Sdavidcs if (ret) 4328305487Sdavidcs return (0); 4329305487Sdavidcs } 4330305487Sdavidcs 4331305487Sdavidcs if (cacheEntry->poll_mask != 0) { 4332305487Sdavidcs 4333305487Sdavidcs timeout = cacheEntry->poll_wait; 4334305487Sdavidcs 4335305487Sdavidcs ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); 4336305487Sdavidcs if (ret) 4337305487Sdavidcs return (0); 4338305487Sdavidcs 4339305487Sdavidcs cntl_value_r = (uint8_t)data; 4340305487Sdavidcs 4341305487Sdavidcs while ((cntl_value_r & cacheEntry->poll_mask) != 0) { 4342305487Sdavidcs 4343305487Sdavidcs if (timeout) { 4344305487Sdavidcs qla_mdelay(__func__, 1); 4345305487Sdavidcs timeout--; 4346305487Sdavidcs } else 4347305487Sdavidcs break; 4348305487Sdavidcs 4349305487Sdavidcs ret = ql_rdwr_indreg32(ha, cntrl_addr, 4350305487Sdavidcs &data, 1); 4351305487Sdavidcs if (ret) 4352305487Sdavidcs return (0); 4353305487Sdavidcs 4354305487Sdavidcs cntl_value_r = (uint8_t)data; 4355305487Sdavidcs } 4356305487Sdavidcs if (!timeout) { 4357305487Sdavidcs /* Report timeout error. 4358305487Sdavidcs * core dump capture failed 4359305487Sdavidcs * Skip remaining entries. 4360305487Sdavidcs * Write buffer out to file 4361305487Sdavidcs * Use driver specific fields in template header 4362305487Sdavidcs * to report this error. 4363305487Sdavidcs */ 4364305487Sdavidcs return (-1); 4365305487Sdavidcs } 4366305487Sdavidcs } 4367305487Sdavidcs 4368305487Sdavidcs addr = read_addr; 4369305487Sdavidcs for (k = 0; k < read_cnt; k++) { 4370305487Sdavidcs 4371305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4372305487Sdavidcs if (ret) 4373305487Sdavidcs return (0); 4374305487Sdavidcs 4375305487Sdavidcs *data_buff++ = read_value; 4376305487Sdavidcs addr += cacheEntry->read_addr_stride; 4377305487Sdavidcs } 4378305487Sdavidcs 4379305487Sdavidcs tag_value += cacheEntry->tag_value_stride; 4380305487Sdavidcs } 4381305487Sdavidcs 4382305487Sdavidcs return (read_cnt * loop_cnt * sizeof(uint32_t)); 4383305487Sdavidcs} 4384305487Sdavidcs 4385305487Sdavidcs/* 4386305487Sdavidcs * Handle L1 Cache. 4387305487Sdavidcs */ 4388305487Sdavidcs 4389305487Sdavidcsstatic uint32_t 4390305487Sdavidcsql_L1Cache(qla_host_t *ha, 4391305487Sdavidcs ql_minidump_entry_cache_t *cacheEntry, 4392305487Sdavidcs uint32_t *data_buff) 4393305487Sdavidcs{ 4394305487Sdavidcs int ret; 4395305487Sdavidcs int i, k; 4396305487Sdavidcs int loop_cnt; 4397305487Sdavidcs 4398305487Sdavidcs uint32_t read_value; 4399305487Sdavidcs uint32_t addr, read_addr, cntrl_addr, tag_reg_addr; 4400305487Sdavidcs uint32_t tag_value, read_cnt; 4401305487Sdavidcs uint32_t cntl_value_w; 4402305487Sdavidcs 4403305487Sdavidcs loop_cnt = cacheEntry->op_count; 4404305487Sdavidcs 4405305487Sdavidcs read_addr = cacheEntry->read_addr; 4406305487Sdavidcs cntrl_addr = cacheEntry->control_addr; 4407305487Sdavidcs cntl_value_w = (uint32_t) cacheEntry->write_value; 4408305487Sdavidcs 4409305487Sdavidcs tag_reg_addr = cacheEntry->tag_reg_addr; 4410305487Sdavidcs 4411305487Sdavidcs tag_value = cacheEntry->init_tag_value; 4412305487Sdavidcs read_cnt = cacheEntry->read_addr_cnt; 4413305487Sdavidcs 4414305487Sdavidcs for (i = 0; i < loop_cnt; i++) { 4415305487Sdavidcs 4416305487Sdavidcs ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 4417305487Sdavidcs if (ret) 4418305487Sdavidcs return (0); 4419305487Sdavidcs 4420305487Sdavidcs ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); 4421305487Sdavidcs if (ret) 4422305487Sdavidcs return (0); 4423305487Sdavidcs 4424305487Sdavidcs addr = read_addr; 4425305487Sdavidcs for (k = 0; k < read_cnt; k++) { 4426305487Sdavidcs 4427305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4428305487Sdavidcs if (ret) 4429305487Sdavidcs return (0); 4430305487Sdavidcs 4431305487Sdavidcs *data_buff++ = read_value; 4432305487Sdavidcs addr += cacheEntry->read_addr_stride; 4433305487Sdavidcs } 4434305487Sdavidcs 4435305487Sdavidcs tag_value += cacheEntry->tag_value_stride; 4436305487Sdavidcs } 4437305487Sdavidcs 4438305487Sdavidcs return (read_cnt * loop_cnt * sizeof(uint32_t)); 4439305487Sdavidcs} 4440305487Sdavidcs 4441305487Sdavidcs/* 4442305487Sdavidcs * Reading OCM memory 4443305487Sdavidcs */ 4444305487Sdavidcs 4445305487Sdavidcsstatic uint32_t 4446305487Sdavidcsql_rdocm(qla_host_t *ha, 4447305487Sdavidcs ql_minidump_entry_rdocm_t *ocmEntry, 4448305487Sdavidcs uint32_t *data_buff) 4449305487Sdavidcs{ 4450305487Sdavidcs int i, loop_cnt; 4451305487Sdavidcs volatile uint32_t addr; 4452305487Sdavidcs volatile uint32_t value; 4453305487Sdavidcs 4454305487Sdavidcs addr = ocmEntry->read_addr; 4455305487Sdavidcs loop_cnt = ocmEntry->op_count; 4456305487Sdavidcs 4457305487Sdavidcs for (i = 0; i < loop_cnt; i++) { 4458305487Sdavidcs value = READ_REG32(ha, addr); 4459305487Sdavidcs *data_buff++ = value; 4460305487Sdavidcs addr += ocmEntry->read_addr_stride; 4461305487Sdavidcs } 4462305487Sdavidcs return (loop_cnt * sizeof(value)); 4463305487Sdavidcs} 4464305487Sdavidcs 4465305487Sdavidcs/* 4466305487Sdavidcs * Read memory 4467305487Sdavidcs */ 4468305487Sdavidcs 4469305487Sdavidcsstatic uint32_t 4470305487Sdavidcsql_rdmem(qla_host_t *ha, 4471305487Sdavidcs ql_minidump_entry_rdmem_t *mem_entry, 4472305487Sdavidcs uint32_t *data_buff) 4473305487Sdavidcs{ 4474305487Sdavidcs int ret; 4475305487Sdavidcs int i, loop_cnt; 4476305487Sdavidcs volatile uint32_t addr; 4477305487Sdavidcs q80_offchip_mem_val_t val; 4478305487Sdavidcs 4479305487Sdavidcs addr = mem_entry->read_addr; 4480305487Sdavidcs 4481305487Sdavidcs /* size in bytes / 16 */ 4482305487Sdavidcs loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4); 4483305487Sdavidcs 4484305487Sdavidcs for (i = 0; i < loop_cnt; i++) { 4485305487Sdavidcs 4486305487Sdavidcs ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1); 4487305487Sdavidcs if (ret) 4488305487Sdavidcs return (0); 4489305487Sdavidcs 4490305487Sdavidcs *data_buff++ = val.data_lo; 4491305487Sdavidcs *data_buff++ = val.data_hi; 4492305487Sdavidcs *data_buff++ = val.data_ulo; 4493305487Sdavidcs *data_buff++ = val.data_uhi; 4494305487Sdavidcs 4495305487Sdavidcs addr += (sizeof(uint32_t) * 4); 4496305487Sdavidcs } 4497305487Sdavidcs 4498305487Sdavidcs return (loop_cnt * (sizeof(uint32_t) * 4)); 4499305487Sdavidcs} 4500305487Sdavidcs 4501305487Sdavidcs/* 4502305487Sdavidcs * Read Rom 4503305487Sdavidcs */ 4504305487Sdavidcs 4505305487Sdavidcsstatic uint32_t 4506305487Sdavidcsql_rdrom(qla_host_t *ha, 4507305487Sdavidcs ql_minidump_entry_rdrom_t *romEntry, 4508305487Sdavidcs uint32_t *data_buff) 4509305487Sdavidcs{ 4510305487Sdavidcs int ret; 4511305487Sdavidcs int i, loop_cnt; 4512305487Sdavidcs uint32_t addr; 4513305487Sdavidcs uint32_t value; 4514305487Sdavidcs 4515305487Sdavidcs addr = romEntry->read_addr; 4516305487Sdavidcs loop_cnt = romEntry->read_data_size; /* This is size in bytes */ 4517305487Sdavidcs loop_cnt /= sizeof(value); 4518305487Sdavidcs 4519305487Sdavidcs for (i = 0; i < loop_cnt; i++) { 4520305487Sdavidcs 4521305487Sdavidcs ret = ql_rd_flash32(ha, addr, &value); 4522305487Sdavidcs if (ret) 4523305487Sdavidcs return (0); 4524305487Sdavidcs 4525305487Sdavidcs *data_buff++ = value; 4526305487Sdavidcs addr += sizeof(value); 4527305487Sdavidcs } 4528305487Sdavidcs 4529305487Sdavidcs return (loop_cnt * sizeof(value)); 4530305487Sdavidcs} 4531305487Sdavidcs 4532305487Sdavidcs/* 4533305487Sdavidcs * Read MUX data 4534305487Sdavidcs */ 4535305487Sdavidcs 4536305487Sdavidcsstatic uint32_t 4537305487Sdavidcsql_rdmux(qla_host_t *ha, 4538305487Sdavidcs ql_minidump_entry_mux_t *muxEntry, 4539305487Sdavidcs uint32_t *data_buff) 4540305487Sdavidcs{ 4541305487Sdavidcs int ret; 4542305487Sdavidcs int loop_cnt; 4543305487Sdavidcs uint32_t read_value, sel_value; 4544305487Sdavidcs uint32_t read_addr, select_addr; 4545305487Sdavidcs 4546305487Sdavidcs select_addr = muxEntry->select_addr; 4547305487Sdavidcs sel_value = muxEntry->select_value; 4548305487Sdavidcs read_addr = muxEntry->read_addr; 4549305487Sdavidcs 4550305487Sdavidcs for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { 4551305487Sdavidcs 4552305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0); 4553305487Sdavidcs if (ret) 4554305487Sdavidcs return (0); 4555305487Sdavidcs 4556305487Sdavidcs ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4557305487Sdavidcs if (ret) 4558305487Sdavidcs return (0); 4559305487Sdavidcs 4560305487Sdavidcs *data_buff++ = sel_value; 4561305487Sdavidcs *data_buff++ = read_value; 4562305487Sdavidcs 4563305487Sdavidcs sel_value += muxEntry->select_value_stride; 4564305487Sdavidcs } 4565305487Sdavidcs 4566305487Sdavidcs return (loop_cnt * (2 * sizeof(uint32_t))); 4567305487Sdavidcs} 4568305487Sdavidcs 4569305487Sdavidcsstatic uint32_t 4570305487Sdavidcsql_rdmux2(qla_host_t *ha, 4571305487Sdavidcs ql_minidump_entry_mux2_t *muxEntry, 4572305487Sdavidcs uint32_t *data_buff) 4573305487Sdavidcs{ 4574305487Sdavidcs int ret; 4575305487Sdavidcs int loop_cnt; 4576305487Sdavidcs 4577305487Sdavidcs uint32_t select_addr_1, select_addr_2; 4578305487Sdavidcs uint32_t select_value_1, select_value_2; 4579305487Sdavidcs uint32_t select_value_count, select_value_mask; 4580305487Sdavidcs uint32_t read_addr, read_value; 4581305487Sdavidcs 4582305487Sdavidcs select_addr_1 = muxEntry->select_addr_1; 4583305487Sdavidcs select_addr_2 = muxEntry->select_addr_2; 4584305487Sdavidcs select_value_1 = muxEntry->select_value_1; 4585305487Sdavidcs select_value_2 = muxEntry->select_value_2; 4586305487Sdavidcs select_value_count = muxEntry->select_value_count; 4587305487Sdavidcs select_value_mask = muxEntry->select_value_mask; 4588305487Sdavidcs 4589305487Sdavidcs read_addr = muxEntry->read_addr; 4590305487Sdavidcs 4591305487Sdavidcs for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count; 4592305487Sdavidcs loop_cnt++) { 4593305487Sdavidcs 4594305487Sdavidcs uint32_t temp_sel_val; 4595305487Sdavidcs 4596305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0); 4597305487Sdavidcs if (ret) 4598305487Sdavidcs return (0); 4599305487Sdavidcs 4600305487Sdavidcs temp_sel_val = select_value_1 & select_value_mask; 4601305487Sdavidcs 4602305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 4603305487Sdavidcs if (ret) 4604305487Sdavidcs return (0); 4605305487Sdavidcs 4606305487Sdavidcs ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4607305487Sdavidcs if (ret) 4608305487Sdavidcs return (0); 4609305487Sdavidcs 4610305487Sdavidcs *data_buff++ = temp_sel_val; 4611305487Sdavidcs *data_buff++ = read_value; 4612305487Sdavidcs 4613305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0); 4614305487Sdavidcs if (ret) 4615305487Sdavidcs return (0); 4616305487Sdavidcs 4617305487Sdavidcs temp_sel_val = select_value_2 & select_value_mask; 4618305487Sdavidcs 4619305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 4620305487Sdavidcs if (ret) 4621305487Sdavidcs return (0); 4622305487Sdavidcs 4623305487Sdavidcs ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4624305487Sdavidcs if (ret) 4625305487Sdavidcs return (0); 4626305487Sdavidcs 4627305487Sdavidcs *data_buff++ = temp_sel_val; 4628305487Sdavidcs *data_buff++ = read_value; 4629305487Sdavidcs 4630305487Sdavidcs select_value_1 += muxEntry->select_value_stride; 4631305487Sdavidcs select_value_2 += muxEntry->select_value_stride; 4632305487Sdavidcs } 4633305487Sdavidcs 4634305487Sdavidcs return (loop_cnt * (4 * sizeof(uint32_t))); 4635305487Sdavidcs} 4636305487Sdavidcs 4637305487Sdavidcs/* 4638305487Sdavidcs * Handling Queue State Reads. 4639305487Sdavidcs */ 4640305487Sdavidcs 4641305487Sdavidcsstatic uint32_t 4642305487Sdavidcsql_rdqueue(qla_host_t *ha, 4643305487Sdavidcs ql_minidump_entry_queue_t *queueEntry, 4644305487Sdavidcs uint32_t *data_buff) 4645305487Sdavidcs{ 4646305487Sdavidcs int ret; 4647305487Sdavidcs int loop_cnt, k; 4648305487Sdavidcs uint32_t read_value; 4649305487Sdavidcs uint32_t read_addr, read_stride, select_addr; 4650305487Sdavidcs uint32_t queue_id, read_cnt; 4651305487Sdavidcs 4652305487Sdavidcs read_cnt = queueEntry->read_addr_cnt; 4653305487Sdavidcs read_stride = queueEntry->read_addr_stride; 4654305487Sdavidcs select_addr = queueEntry->select_addr; 4655305487Sdavidcs 4656305487Sdavidcs for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; 4657305487Sdavidcs loop_cnt++) { 4658305487Sdavidcs 4659305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0); 4660305487Sdavidcs if (ret) 4661305487Sdavidcs return (0); 4662305487Sdavidcs 4663305487Sdavidcs read_addr = queueEntry->read_addr; 4664305487Sdavidcs 4665305487Sdavidcs for (k = 0; k < read_cnt; k++) { 4666305487Sdavidcs 4667305487Sdavidcs ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 4668305487Sdavidcs if (ret) 4669305487Sdavidcs return (0); 4670305487Sdavidcs 4671305487Sdavidcs *data_buff++ = read_value; 4672305487Sdavidcs read_addr += read_stride; 4673305487Sdavidcs } 4674305487Sdavidcs 4675305487Sdavidcs queue_id += queueEntry->queue_id_stride; 4676305487Sdavidcs } 4677305487Sdavidcs 4678305487Sdavidcs return (loop_cnt * (read_cnt * sizeof(uint32_t))); 4679305487Sdavidcs} 4680305487Sdavidcs 4681305487Sdavidcs/* 4682305487Sdavidcs * Handling control entries. 4683305487Sdavidcs */ 4684305487Sdavidcs 4685305487Sdavidcsstatic uint32_t 4686305487Sdavidcsql_cntrl(qla_host_t *ha, 4687305487Sdavidcs ql_minidump_template_hdr_t *template_hdr, 4688305487Sdavidcs ql_minidump_entry_cntrl_t *crbEntry) 4689305487Sdavidcs{ 4690305487Sdavidcs int ret; 4691305487Sdavidcs int count; 4692305487Sdavidcs uint32_t opcode, read_value, addr, entry_addr; 4693305487Sdavidcs long timeout; 4694305487Sdavidcs 4695305487Sdavidcs entry_addr = crbEntry->addr; 4696305487Sdavidcs 4697305487Sdavidcs for (count = 0; count < crbEntry->op_count; count++) { 4698305487Sdavidcs opcode = crbEntry->opcode; 4699305487Sdavidcs 4700305487Sdavidcs if (opcode & QL_DBG_OPCODE_WR) { 4701305487Sdavidcs 4702305487Sdavidcs ret = ql_rdwr_indreg32(ha, entry_addr, 4703305487Sdavidcs &crbEntry->value_1, 0); 4704305487Sdavidcs if (ret) 4705305487Sdavidcs return (0); 4706305487Sdavidcs 4707305487Sdavidcs opcode &= ~QL_DBG_OPCODE_WR; 4708305487Sdavidcs } 4709305487Sdavidcs 4710305487Sdavidcs if (opcode & QL_DBG_OPCODE_RW) { 4711305487Sdavidcs 4712305487Sdavidcs ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4713305487Sdavidcs if (ret) 4714305487Sdavidcs return (0); 4715305487Sdavidcs 4716305487Sdavidcs ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4717305487Sdavidcs if (ret) 4718305487Sdavidcs return (0); 4719305487Sdavidcs 4720305487Sdavidcs opcode &= ~QL_DBG_OPCODE_RW; 4721305487Sdavidcs } 4722305487Sdavidcs 4723305487Sdavidcs if (opcode & QL_DBG_OPCODE_AND) { 4724305487Sdavidcs 4725305487Sdavidcs ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4726305487Sdavidcs if (ret) 4727305487Sdavidcs return (0); 4728305487Sdavidcs 4729305487Sdavidcs read_value &= crbEntry->value_2; 4730305487Sdavidcs opcode &= ~QL_DBG_OPCODE_AND; 4731305487Sdavidcs 4732305487Sdavidcs if (opcode & QL_DBG_OPCODE_OR) { 4733305487Sdavidcs read_value |= crbEntry->value_3; 4734305487Sdavidcs opcode &= ~QL_DBG_OPCODE_OR; 4735305487Sdavidcs } 4736305487Sdavidcs 4737305487Sdavidcs ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4738305487Sdavidcs if (ret) 4739305487Sdavidcs return (0); 4740305487Sdavidcs } 4741305487Sdavidcs 4742305487Sdavidcs if (opcode & QL_DBG_OPCODE_OR) { 4743305487Sdavidcs 4744305487Sdavidcs ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 4745305487Sdavidcs if (ret) 4746305487Sdavidcs return (0); 4747305487Sdavidcs 4748305487Sdavidcs read_value |= crbEntry->value_3; 4749305487Sdavidcs 4750305487Sdavidcs ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 4751305487Sdavidcs if (ret) 4752305487Sdavidcs return (0); 4753305487Sdavidcs 4754305487Sdavidcs opcode &= ~QL_DBG_OPCODE_OR; 4755305487Sdavidcs } 4756305487Sdavidcs 4757305487Sdavidcs if (opcode & QL_DBG_OPCODE_POLL) { 4758305487Sdavidcs 4759305487Sdavidcs opcode &= ~QL_DBG_OPCODE_POLL; 4760305487Sdavidcs timeout = crbEntry->poll_timeout; 4761305487Sdavidcs addr = entry_addr; 4762305487Sdavidcs 4763305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4764305487Sdavidcs if (ret) 4765305487Sdavidcs return (0); 4766305487Sdavidcs 4767305487Sdavidcs while ((read_value & crbEntry->value_2) 4768305487Sdavidcs != crbEntry->value_1) { 4769305487Sdavidcs 4770305487Sdavidcs if (timeout) { 4771305487Sdavidcs qla_mdelay(__func__, 1); 4772305487Sdavidcs timeout--; 4773305487Sdavidcs } else 4774305487Sdavidcs break; 4775305487Sdavidcs 4776305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr, 4777305487Sdavidcs &read_value, 1); 4778305487Sdavidcs if (ret) 4779305487Sdavidcs return (0); 4780305487Sdavidcs } 4781305487Sdavidcs 4782305487Sdavidcs if (!timeout) { 4783305487Sdavidcs /* 4784305487Sdavidcs * Report timeout error. 4785305487Sdavidcs * core dump capture failed 4786305487Sdavidcs * Skip remaining entries. 4787305487Sdavidcs * Write buffer out to file 4788305487Sdavidcs * Use driver specific fields in template header 4789305487Sdavidcs * to report this error. 4790305487Sdavidcs */ 4791305487Sdavidcs return (-1); 4792305487Sdavidcs } 4793305487Sdavidcs } 4794305487Sdavidcs 4795305487Sdavidcs if (opcode & QL_DBG_OPCODE_RDSTATE) { 4796305487Sdavidcs /* 4797305487Sdavidcs * decide which address to use. 4798305487Sdavidcs */ 4799305487Sdavidcs if (crbEntry->state_index_a) { 4800305487Sdavidcs addr = template_hdr->saved_state_array[ 4801305487Sdavidcs crbEntry-> state_index_a]; 4802305487Sdavidcs } else { 4803305487Sdavidcs addr = entry_addr; 4804305487Sdavidcs } 4805305487Sdavidcs 4806305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 4807305487Sdavidcs if (ret) 4808305487Sdavidcs return (0); 4809305487Sdavidcs 4810305487Sdavidcs template_hdr->saved_state_array[crbEntry->state_index_v] 4811305487Sdavidcs = read_value; 4812305487Sdavidcs opcode &= ~QL_DBG_OPCODE_RDSTATE; 4813305487Sdavidcs } 4814305487Sdavidcs 4815305487Sdavidcs if (opcode & QL_DBG_OPCODE_WRSTATE) { 4816305487Sdavidcs /* 4817305487Sdavidcs * decide which value to use. 4818305487Sdavidcs */ 4819305487Sdavidcs if (crbEntry->state_index_v) { 4820305487Sdavidcs read_value = template_hdr->saved_state_array[ 4821305487Sdavidcs crbEntry->state_index_v]; 4822305487Sdavidcs } else { 4823305487Sdavidcs read_value = crbEntry->value_1; 4824305487Sdavidcs } 4825305487Sdavidcs /* 4826305487Sdavidcs * decide which address to use. 4827305487Sdavidcs */ 4828305487Sdavidcs if (crbEntry->state_index_a) { 4829305487Sdavidcs addr = template_hdr->saved_state_array[ 4830305487Sdavidcs crbEntry-> state_index_a]; 4831305487Sdavidcs } else { 4832305487Sdavidcs addr = entry_addr; 4833305487Sdavidcs } 4834305487Sdavidcs 4835305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr, &read_value, 0); 4836305487Sdavidcs if (ret) 4837305487Sdavidcs return (0); 4838305487Sdavidcs 4839305487Sdavidcs opcode &= ~QL_DBG_OPCODE_WRSTATE; 4840305487Sdavidcs } 4841305487Sdavidcs 4842305487Sdavidcs if (opcode & QL_DBG_OPCODE_MDSTATE) { 4843305487Sdavidcs /* Read value from saved state using index */ 4844305487Sdavidcs read_value = template_hdr->saved_state_array[ 4845305487Sdavidcs crbEntry->state_index_v]; 4846305487Sdavidcs 4847305487Sdavidcs read_value <<= crbEntry->shl; /*Shift left operation */ 4848305487Sdavidcs read_value >>= crbEntry->shr; /*Shift right operation */ 4849305487Sdavidcs 4850305487Sdavidcs if (crbEntry->value_2) { 4851305487Sdavidcs /* check if AND mask is provided */ 4852305487Sdavidcs read_value &= crbEntry->value_2; 4853305487Sdavidcs } 4854305487Sdavidcs 4855305487Sdavidcs read_value |= crbEntry->value_3; /* OR operation */ 4856305487Sdavidcs read_value += crbEntry->value_1; /* increment op */ 4857305487Sdavidcs 4858305487Sdavidcs /* Write value back to state area. */ 4859305487Sdavidcs 4860305487Sdavidcs template_hdr->saved_state_array[crbEntry->state_index_v] 4861305487Sdavidcs = read_value; 4862305487Sdavidcs opcode &= ~QL_DBG_OPCODE_MDSTATE; 4863305487Sdavidcs } 4864305487Sdavidcs 4865305487Sdavidcs entry_addr += crbEntry->addr_stride; 4866305487Sdavidcs } 4867305487Sdavidcs 4868305487Sdavidcs return (0); 4869305487Sdavidcs} 4870305487Sdavidcs 4871305487Sdavidcs/* 4872305487Sdavidcs * Handling rd poll entry. 4873305487Sdavidcs */ 4874305487Sdavidcs 4875305487Sdavidcsstatic uint32_t 4876305487Sdavidcsql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, 4877305487Sdavidcs uint32_t *data_buff) 4878305487Sdavidcs{ 4879305487Sdavidcs int ret; 4880305487Sdavidcs int loop_cnt; 4881305487Sdavidcs uint32_t op_count, select_addr, select_value_stride, select_value; 4882305487Sdavidcs uint32_t read_addr, poll, mask, data_size, data; 4883305487Sdavidcs uint32_t wait_count = 0; 4884305487Sdavidcs 4885305487Sdavidcs select_addr = entry->select_addr; 4886305487Sdavidcs read_addr = entry->read_addr; 4887305487Sdavidcs select_value = entry->select_value; 4888305487Sdavidcs select_value_stride = entry->select_value_stride; 4889305487Sdavidcs op_count = entry->op_count; 4890305487Sdavidcs poll = entry->poll; 4891305487Sdavidcs mask = entry->mask; 4892305487Sdavidcs data_size = entry->data_size; 4893305487Sdavidcs 4894305487Sdavidcs for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 4895305487Sdavidcs 4896305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0); 4897305487Sdavidcs if (ret) 4898305487Sdavidcs return (0); 4899305487Sdavidcs 4900305487Sdavidcs wait_count = 0; 4901305487Sdavidcs 4902305487Sdavidcs while (wait_count < poll) { 4903305487Sdavidcs 4904305487Sdavidcs uint32_t temp; 4905305487Sdavidcs 4906305487Sdavidcs ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1); 4907305487Sdavidcs if (ret) 4908305487Sdavidcs return (0); 4909305487Sdavidcs 4910305487Sdavidcs if ( (temp & mask) != 0 ) { 4911305487Sdavidcs break; 4912305487Sdavidcs } 4913305487Sdavidcs wait_count++; 4914305487Sdavidcs } 4915305487Sdavidcs 4916305487Sdavidcs if (wait_count == poll) { 4917305487Sdavidcs device_printf(ha->pci_dev, 4918305487Sdavidcs "%s: Error in processing entry\n", __func__); 4919305487Sdavidcs device_printf(ha->pci_dev, 4920305487Sdavidcs "%s: wait_count <0x%x> poll <0x%x>\n", 4921305487Sdavidcs __func__, wait_count, poll); 4922305487Sdavidcs return 0; 4923305487Sdavidcs } 4924305487Sdavidcs 4925305487Sdavidcs ret = ql_rdwr_indreg32(ha, read_addr, &data, 1); 4926305487Sdavidcs if (ret) 4927305487Sdavidcs return (0); 4928305487Sdavidcs 4929305487Sdavidcs *data_buff++ = select_value; 4930305487Sdavidcs *data_buff++ = data; 4931305487Sdavidcs select_value = select_value + select_value_stride; 4932305487Sdavidcs } 4933305487Sdavidcs 4934305487Sdavidcs /* 4935305487Sdavidcs * for testing purpose we return amount of data written 4936305487Sdavidcs */ 4937305487Sdavidcs return (loop_cnt * (2 * sizeof(uint32_t))); 4938305487Sdavidcs} 4939305487Sdavidcs 4940305487Sdavidcs 4941305487Sdavidcs/* 4942305487Sdavidcs * Handling rd modify write poll entry. 4943305487Sdavidcs */ 4944305487Sdavidcs 4945305487Sdavidcsstatic uint32_t 4946305487Sdavidcsql_pollrd_modify_write(qla_host_t *ha, 4947305487Sdavidcs ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 4948305487Sdavidcs uint32_t *data_buff) 4949305487Sdavidcs{ 4950305487Sdavidcs int ret; 4951305487Sdavidcs uint32_t addr_1, addr_2, value_1, value_2, data; 4952305487Sdavidcs uint32_t poll, mask, data_size, modify_mask; 4953305487Sdavidcs uint32_t wait_count = 0; 4954305487Sdavidcs 4955305487Sdavidcs addr_1 = entry->addr_1; 4956305487Sdavidcs addr_2 = entry->addr_2; 4957305487Sdavidcs value_1 = entry->value_1; 4958305487Sdavidcs value_2 = entry->value_2; 4959305487Sdavidcs 4960305487Sdavidcs poll = entry->poll; 4961305487Sdavidcs mask = entry->mask; 4962305487Sdavidcs modify_mask = entry->modify_mask; 4963305487Sdavidcs data_size = entry->data_size; 4964305487Sdavidcs 4965305487Sdavidcs 4966305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0); 4967305487Sdavidcs if (ret) 4968305487Sdavidcs return (0); 4969305487Sdavidcs 4970305487Sdavidcs wait_count = 0; 4971305487Sdavidcs while (wait_count < poll) { 4972305487Sdavidcs 4973305487Sdavidcs uint32_t temp; 4974305487Sdavidcs 4975305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 4976305487Sdavidcs if (ret) 4977305487Sdavidcs return (0); 4978305487Sdavidcs 4979305487Sdavidcs if ( (temp & mask) != 0 ) { 4980305487Sdavidcs break; 4981305487Sdavidcs } 4982305487Sdavidcs wait_count++; 4983305487Sdavidcs } 4984305487Sdavidcs 4985305487Sdavidcs if (wait_count == poll) { 4986305487Sdavidcs device_printf(ha->pci_dev, "%s Error in processing entry\n", 4987305487Sdavidcs __func__); 4988305487Sdavidcs } else { 4989305487Sdavidcs 4990305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr_2, &data, 1); 4991305487Sdavidcs if (ret) 4992305487Sdavidcs return (0); 4993305487Sdavidcs 4994305487Sdavidcs data = (data & modify_mask); 4995305487Sdavidcs 4996305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr_2, &data, 0); 4997305487Sdavidcs if (ret) 4998305487Sdavidcs return (0); 4999305487Sdavidcs 5000305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0); 5001305487Sdavidcs if (ret) 5002305487Sdavidcs return (0); 5003305487Sdavidcs 5004305487Sdavidcs /* Poll again */ 5005305487Sdavidcs wait_count = 0; 5006305487Sdavidcs while (wait_count < poll) { 5007305487Sdavidcs 5008305487Sdavidcs uint32_t temp; 5009305487Sdavidcs 5010305487Sdavidcs ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 5011305487Sdavidcs if (ret) 5012305487Sdavidcs return (0); 5013305487Sdavidcs 5014305487Sdavidcs if ( (temp & mask) != 0 ) { 5015305487Sdavidcs break; 5016305487Sdavidcs } 5017305487Sdavidcs wait_count++; 5018305487Sdavidcs } 5019305487Sdavidcs *data_buff++ = addr_2; 5020305487Sdavidcs *data_buff++ = data; 5021305487Sdavidcs } 5022305487Sdavidcs 5023305487Sdavidcs /* 5024305487Sdavidcs * for testing purpose we return amount of data written 5025305487Sdavidcs */ 5026305487Sdavidcs return (2 * sizeof(uint32_t)); 5027305487Sdavidcs} 5028305487Sdavidcs 5029305487Sdavidcs 5030