ql_hw.c revision 330555
1295367Sdes/* 2285031Sdes * Copyright (c) 2013-2016 Qlogic Corporation 3285031Sdes * All rights reserved. 4285031Sdes * 5285031Sdes * Redistribution and use in source and binary forms, with or without 6285031Sdes * modification, are permitted provided that the following conditions 7285031Sdes * are met: 8285031Sdes * 9285031Sdes * 1. Redistributions of source code must retain the above copyright 10285031Sdes * notice, this list of conditions and the following disclaimer. 11285031Sdes * 2. Redistributions in binary form must reproduce the above copyright 12285031Sdes * notice, this list of conditions and the following disclaimer in the 13285031Sdes * documentation and/or other materials provided with the distribution. 14285031Sdes * 15285031Sdes * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16285031Sdes * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17285031Sdes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18285031Sdes * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19285031Sdes * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20285031Sdes * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21285031Sdes * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22285031Sdes * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23285031Sdes * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24285031Sdes * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25285031Sdes * POSSIBILITY OF SUCH DAMAGE. 26285031Sdes */ 27285031Sdes 28285031Sdes/* 29285031Sdes * File: ql_hw.c 30285031Sdes * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31285031Sdes * Content: Contains Hardware dependent functions 32285031Sdes */ 33285031Sdes 34285031Sdes#include <sys/cdefs.h> 35285031Sdes__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgbe/ql_hw.c 330555 2018-03-06 23:12:32Z davidcs $"); 36285031Sdes 37285031Sdes#include "ql_os.h" 38285031Sdes#include "ql_hw.h" 39285031Sdes#include "ql_def.h" 40285031Sdes#include "ql_inline.h" 41285031Sdes#include "ql_ver.h" 42285031Sdes#include "ql_glbl.h" 43285031Sdes#include "ql_dbg.h" 44285031Sdes#include "ql_minidump.h" 45285031Sdes 46285031Sdes/* 47285031Sdes * Static Functions 48285031Sdes */ 49285031Sdes 50285031Sdesstatic void qla_del_rcv_cntxt(qla_host_t *ha); 51285031Sdesstatic int qla_init_rcv_cntxt(qla_host_t *ha); 52285031Sdesstatic int qla_del_xmt_cntxt(qla_host_t *ha); 53285031Sdesstatic int qla_init_xmt_cntxt(qla_host_t *ha); 54285031Sdesstatic int qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 55285031Sdes uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause); 56285031Sdesstatic int qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, 57285031Sdes uint32_t num_intrs, uint32_t create); 58285031Sdesstatic int qla_config_rss(qla_host_t *ha, uint16_t cntxt_id); 59285031Sdesstatic int qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, 60285031Sdes int tenable, int rcv); 61285031Sdesstatic int qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode); 62285031Sdesstatic int qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id); 63285031Sdes 64285031Sdesstatic int qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, 65285031Sdes uint8_t *hdr); 66285031Sdesstatic int qla_hw_add_all_mcast(qla_host_t *ha); 67285031Sdesstatic int qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds); 68285031Sdes 69285031Sdesstatic int qla_init_nic_func(qla_host_t *ha); 70285031Sdesstatic int qla_stop_nic_func(qla_host_t *ha); 71285031Sdesstatic int qla_query_fw_dcbx_caps(qla_host_t *ha); 72285031Sdesstatic int qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits); 73285031Sdesstatic int qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits); 74285031Sdesstatic int qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode); 75285031Sdesstatic int qla_get_cam_search_mode(qla_host_t *ha); 76285031Sdes 77285031Sdesstatic void ql_minidump_free(qla_host_t *ha); 78285031Sdes 79285031Sdes#ifdef QL_DBG 80285031Sdes 81285031Sdesstatic void 82285031Sdesqla_stop_pegs(qla_host_t *ha) 83285031Sdes{ 84285031Sdes uint32_t val = 1; 85285031Sdes 86285031Sdes ql_rdwr_indreg32(ha, Q8_CRB_PEG_0, &val, 0); 87285031Sdes ql_rdwr_indreg32(ha, Q8_CRB_PEG_1, &val, 0); 88295367Sdes ql_rdwr_indreg32(ha, Q8_CRB_PEG_2, &val, 0); 89285031Sdes ql_rdwr_indreg32(ha, Q8_CRB_PEG_3, &val, 0); 90285031Sdes ql_rdwr_indreg32(ha, Q8_CRB_PEG_4, &val, 0); 91285031Sdes device_printf(ha->pci_dev, "%s PEGS HALTED!!!!!\n", __func__); 92285031Sdes} 93285031Sdes 94285031Sdesstatic int 95285031Sdesqla_sysctl_stop_pegs(SYSCTL_HANDLER_ARGS) 96285031Sdes{ 97285031Sdes int err, ret = 0; 98285031Sdes qla_host_t *ha; 99285031Sdes 100285031Sdes err = sysctl_handle_int(oidp, &ret, 0, req); 101285031Sdes 102295367Sdes 103295367Sdes if (err || !req->newptr) 104295367Sdes return (err); 105285031Sdes 106285031Sdes if (ret == 1) { 107285031Sdes ha = (qla_host_t *)arg1; 108285031Sdes if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { 109285031Sdes qla_stop_pegs(ha); 110285031Sdes QLA_UNLOCK(ha, __func__); 111285031Sdes } 112285031Sdes } 113285031Sdes 114285031Sdes return err; 115285031Sdes} 116285031Sdes#endif /* #ifdef QL_DBG */ 117285031Sdes 118285031Sdesstatic int 119285031Sdesqla_validate_set_port_cfg_bit(uint32_t bits) 120285031Sdes{ 121285031Sdes if ((bits & 0xF) > 1) 122285031Sdes return (-1); 123285031Sdes 124285031Sdes if (((bits >> 4) & 0xF) > 2) 125285031Sdes return (-1); 126285031Sdes 127285031Sdes if (((bits >> 8) & 0xF) > 2) 128285031Sdes return (-1); 129285031Sdes 130285031Sdes return (0); 131285031Sdes} 132285031Sdes 133285031Sdesstatic int 134285031Sdesqla_sysctl_port_cfg(SYSCTL_HANDLER_ARGS) 135285031Sdes{ 136285031Sdes int err, ret = 0; 137285031Sdes qla_host_t *ha; 138285031Sdes uint32_t cfg_bits; 139285031Sdes 140285031Sdes err = sysctl_handle_int(oidp, &ret, 0, req); 141285031Sdes 142285031Sdes if (err || !req->newptr) 143285031Sdes return (err); 144285031Sdes 145285031Sdes ha = (qla_host_t *)arg1; 146285031Sdes 147285031Sdes if ((qla_validate_set_port_cfg_bit((uint32_t)ret) == 0)) { 148285031Sdes 149285031Sdes err = qla_get_port_config(ha, &cfg_bits); 150285031Sdes 151285031Sdes if (err) 152285031Sdes goto qla_sysctl_set_port_cfg_exit; 153285031Sdes 154285031Sdes if (ret & 0x1) { 155285031Sdes cfg_bits |= Q8_PORT_CFG_BITS_DCBX_ENABLE; 156285031Sdes } else { 157285031Sdes cfg_bits &= ~Q8_PORT_CFG_BITS_DCBX_ENABLE; 158285031Sdes } 159285031Sdes 160285031Sdes ret = ret >> 4; 161285031Sdes cfg_bits &= ~Q8_PORT_CFG_BITS_PAUSE_CFG_MASK; 162285031Sdes 163285031Sdes if ((ret & 0xF) == 0) { 164285031Sdes cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_DISABLED; 165285031Sdes } else if ((ret & 0xF) == 1){ 166285031Sdes cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_STD; 167285031Sdes } else { 168285031Sdes cfg_bits |= Q8_PORT_CFG_BITS_PAUSE_PPM; 169285031Sdes } 170285031Sdes 171285031Sdes ret = ret >> 4; 172285031Sdes cfg_bits &= ~Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK; 173285031Sdes 174295367Sdes if (ret == 0) { 175285031Sdes cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT_RCV; 176285031Sdes } else if (ret == 1){ 177285031Sdes cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_XMT; 178285031Sdes } else { 179285031Sdes cfg_bits |= Q8_PORT_CFG_BITS_STDPAUSE_RCV; 180285031Sdes } 181285031Sdes 182285031Sdes if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { 183285031Sdes err = qla_set_port_config(ha, cfg_bits); 184285031Sdes QLA_UNLOCK(ha, __func__); 185285031Sdes } else { 186285031Sdes device_printf(ha->pci_dev, "%s: failed\n", __func__); 187285031Sdes } 188285031Sdes } else { 189285031Sdes if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { 190285031Sdes err = qla_get_port_config(ha, &cfg_bits); 191285031Sdes QLA_UNLOCK(ha, __func__); 192285031Sdes } else { 193285031Sdes device_printf(ha->pci_dev, "%s: failed\n", __func__); 194285031Sdes } 195285031Sdes } 196285031Sdes 197285031Sdesqla_sysctl_set_port_cfg_exit: 198285031Sdes return err; 199285031Sdes} 200285031Sdes 201285031Sdesstatic int 202285031Sdesqla_sysctl_set_cam_search_mode(SYSCTL_HANDLER_ARGS) 203{ 204 int err, ret = 0; 205 qla_host_t *ha; 206 207 err = sysctl_handle_int(oidp, &ret, 0, req); 208 209 if (err || !req->newptr) 210 return (err); 211 212 ha = (qla_host_t *)arg1; 213 214 if ((ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_INTERNAL) || 215 (ret == Q8_HW_CONFIG_CAM_SEARCH_MODE_AUTO)) { 216 217 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { 218 err = qla_set_cam_search_mode(ha, (uint32_t)ret); 219 QLA_UNLOCK(ha, __func__); 220 } else { 221 device_printf(ha->pci_dev, "%s: failed\n", __func__); 222 } 223 224 } else { 225 device_printf(ha->pci_dev, "%s: ret = %d\n", __func__, ret); 226 } 227 228 return (err); 229} 230 231static int 232qla_sysctl_get_cam_search_mode(SYSCTL_HANDLER_ARGS) 233{ 234 int err, ret = 0; 235 qla_host_t *ha; 236 237 err = sysctl_handle_int(oidp, &ret, 0, req); 238 239 if (err || !req->newptr) 240 return (err); 241 242 ha = (qla_host_t *)arg1; 243 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) == 0) { 244 err = qla_get_cam_search_mode(ha); 245 QLA_UNLOCK(ha, __func__); 246 } else { 247 device_printf(ha->pci_dev, "%s: failed\n", __func__); 248 } 249 250 return (err); 251} 252 253static void 254qlnx_add_hw_mac_stats_sysctls(qla_host_t *ha) 255{ 256 struct sysctl_ctx_list *ctx; 257 struct sysctl_oid_list *children; 258 struct sysctl_oid *ctx_oid; 259 260 ctx = device_get_sysctl_ctx(ha->pci_dev); 261 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 262 263 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_mac", 264 CTLFLAG_RD, NULL, "stats_hw_mac"); 265 children = SYSCTL_CHILDREN(ctx_oid); 266 267 SYSCTL_ADD_QUAD(ctx, children, 268 OID_AUTO, "xmt_frames", 269 CTLFLAG_RD, &ha->hw.mac.xmt_frames, 270 "xmt_frames"); 271 272 SYSCTL_ADD_QUAD(ctx, children, 273 OID_AUTO, "xmt_bytes", 274 CTLFLAG_RD, &ha->hw.mac.xmt_bytes, 275 "xmt_frames"); 276 277 SYSCTL_ADD_QUAD(ctx, children, 278 OID_AUTO, "xmt_mcast_pkts", 279 CTLFLAG_RD, &ha->hw.mac.xmt_mcast_pkts, 280 "xmt_mcast_pkts"); 281 282 SYSCTL_ADD_QUAD(ctx, children, 283 OID_AUTO, "xmt_bcast_pkts", 284 CTLFLAG_RD, &ha->hw.mac.xmt_bcast_pkts, 285 "xmt_bcast_pkts"); 286 287 SYSCTL_ADD_QUAD(ctx, children, 288 OID_AUTO, "xmt_pause_frames", 289 CTLFLAG_RD, &ha->hw.mac.xmt_pause_frames, 290 "xmt_pause_frames"); 291 292 SYSCTL_ADD_QUAD(ctx, children, 293 OID_AUTO, "xmt_cntrl_pkts", 294 CTLFLAG_RD, &ha->hw.mac.xmt_cntrl_pkts, 295 "xmt_cntrl_pkts"); 296 297 SYSCTL_ADD_QUAD(ctx, children, 298 OID_AUTO, "xmt_pkt_lt_64bytes", 299 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_64bytes, 300 "xmt_pkt_lt_64bytes"); 301 302 SYSCTL_ADD_QUAD(ctx, children, 303 OID_AUTO, "xmt_pkt_lt_127bytes", 304 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_127bytes, 305 "xmt_pkt_lt_127bytes"); 306 307 SYSCTL_ADD_QUAD(ctx, children, 308 OID_AUTO, "xmt_pkt_lt_255bytes", 309 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_255bytes, 310 "xmt_pkt_lt_255bytes"); 311 312 SYSCTL_ADD_QUAD(ctx, children, 313 OID_AUTO, "xmt_pkt_lt_511bytes", 314 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_511bytes, 315 "xmt_pkt_lt_511bytes"); 316 317 SYSCTL_ADD_QUAD(ctx, children, 318 OID_AUTO, "xmt_pkt_lt_1023bytes", 319 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1023bytes, 320 "xmt_pkt_lt_1023bytes"); 321 322 SYSCTL_ADD_QUAD(ctx, children, 323 OID_AUTO, "xmt_pkt_lt_1518bytes", 324 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_lt_1518bytes, 325 "xmt_pkt_lt_1518bytes"); 326 327 SYSCTL_ADD_QUAD(ctx, children, 328 OID_AUTO, "xmt_pkt_gt_1518bytes", 329 CTLFLAG_RD, &ha->hw.mac.xmt_pkt_gt_1518bytes, 330 "xmt_pkt_gt_1518bytes"); 331 332 SYSCTL_ADD_QUAD(ctx, children, 333 OID_AUTO, "rcv_frames", 334 CTLFLAG_RD, &ha->hw.mac.rcv_frames, 335 "rcv_frames"); 336 337 SYSCTL_ADD_QUAD(ctx, children, 338 OID_AUTO, "rcv_bytes", 339 CTLFLAG_RD, &ha->hw.mac.rcv_bytes, 340 "rcv_bytes"); 341 342 SYSCTL_ADD_QUAD(ctx, children, 343 OID_AUTO, "rcv_mcast_pkts", 344 CTLFLAG_RD, &ha->hw.mac.rcv_mcast_pkts, 345 "rcv_mcast_pkts"); 346 347 SYSCTL_ADD_QUAD(ctx, children, 348 OID_AUTO, "rcv_bcast_pkts", 349 CTLFLAG_RD, &ha->hw.mac.rcv_bcast_pkts, 350 "rcv_bcast_pkts"); 351 352 SYSCTL_ADD_QUAD(ctx, children, 353 OID_AUTO, "rcv_pause_frames", 354 CTLFLAG_RD, &ha->hw.mac.rcv_pause_frames, 355 "rcv_pause_frames"); 356 357 SYSCTL_ADD_QUAD(ctx, children, 358 OID_AUTO, "rcv_cntrl_pkts", 359 CTLFLAG_RD, &ha->hw.mac.rcv_cntrl_pkts, 360 "rcv_cntrl_pkts"); 361 362 SYSCTL_ADD_QUAD(ctx, children, 363 OID_AUTO, "rcv_pkt_lt_64bytes", 364 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_64bytes, 365 "rcv_pkt_lt_64bytes"); 366 367 SYSCTL_ADD_QUAD(ctx, children, 368 OID_AUTO, "rcv_pkt_lt_127bytes", 369 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_127bytes, 370 "rcv_pkt_lt_127bytes"); 371 372 SYSCTL_ADD_QUAD(ctx, children, 373 OID_AUTO, "rcv_pkt_lt_255bytes", 374 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_255bytes, 375 "rcv_pkt_lt_255bytes"); 376 377 SYSCTL_ADD_QUAD(ctx, children, 378 OID_AUTO, "rcv_pkt_lt_511bytes", 379 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_511bytes, 380 "rcv_pkt_lt_511bytes"); 381 382 SYSCTL_ADD_QUAD(ctx, children, 383 OID_AUTO, "rcv_pkt_lt_1023bytes", 384 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1023bytes, 385 "rcv_pkt_lt_1023bytes"); 386 387 SYSCTL_ADD_QUAD(ctx, children, 388 OID_AUTO, "rcv_pkt_lt_1518bytes", 389 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_lt_1518bytes, 390 "rcv_pkt_lt_1518bytes"); 391 392 SYSCTL_ADD_QUAD(ctx, children, 393 OID_AUTO, "rcv_pkt_gt_1518bytes", 394 CTLFLAG_RD, &ha->hw.mac.rcv_pkt_gt_1518bytes, 395 "rcv_pkt_gt_1518bytes"); 396 397 SYSCTL_ADD_QUAD(ctx, children, 398 OID_AUTO, "rcv_len_error", 399 CTLFLAG_RD, &ha->hw.mac.rcv_len_error, 400 "rcv_len_error"); 401 402 SYSCTL_ADD_QUAD(ctx, children, 403 OID_AUTO, "rcv_len_small", 404 CTLFLAG_RD, &ha->hw.mac.rcv_len_small, 405 "rcv_len_small"); 406 407 SYSCTL_ADD_QUAD(ctx, children, 408 OID_AUTO, "rcv_len_large", 409 CTLFLAG_RD, &ha->hw.mac.rcv_len_large, 410 "rcv_len_large"); 411 412 SYSCTL_ADD_QUAD(ctx, children, 413 OID_AUTO, "rcv_jabber", 414 CTLFLAG_RD, &ha->hw.mac.rcv_jabber, 415 "rcv_jabber"); 416 417 SYSCTL_ADD_QUAD(ctx, children, 418 OID_AUTO, "rcv_dropped", 419 CTLFLAG_RD, &ha->hw.mac.rcv_dropped, 420 "rcv_dropped"); 421 422 SYSCTL_ADD_QUAD(ctx, children, 423 OID_AUTO, "fcs_error", 424 CTLFLAG_RD, &ha->hw.mac.fcs_error, 425 "fcs_error"); 426 427 SYSCTL_ADD_QUAD(ctx, children, 428 OID_AUTO, "align_error", 429 CTLFLAG_RD, &ha->hw.mac.align_error, 430 "align_error"); 431 432 SYSCTL_ADD_QUAD(ctx, children, 433 OID_AUTO, "eswitched_frames", 434 CTLFLAG_RD, &ha->hw.mac.eswitched_frames, 435 "eswitched_frames"); 436 437 SYSCTL_ADD_QUAD(ctx, children, 438 OID_AUTO, "eswitched_bytes", 439 CTLFLAG_RD, &ha->hw.mac.eswitched_bytes, 440 "eswitched_bytes"); 441 442 SYSCTL_ADD_QUAD(ctx, children, 443 OID_AUTO, "eswitched_mcast_frames", 444 CTLFLAG_RD, &ha->hw.mac.eswitched_mcast_frames, 445 "eswitched_mcast_frames"); 446 447 SYSCTL_ADD_QUAD(ctx, children, 448 OID_AUTO, "eswitched_bcast_frames", 449 CTLFLAG_RD, &ha->hw.mac.eswitched_bcast_frames, 450 "eswitched_bcast_frames"); 451 452 SYSCTL_ADD_QUAD(ctx, children, 453 OID_AUTO, "eswitched_ucast_frames", 454 CTLFLAG_RD, &ha->hw.mac.eswitched_ucast_frames, 455 "eswitched_ucast_frames"); 456 457 SYSCTL_ADD_QUAD(ctx, children, 458 OID_AUTO, "eswitched_err_free_frames", 459 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_frames, 460 "eswitched_err_free_frames"); 461 462 SYSCTL_ADD_QUAD(ctx, children, 463 OID_AUTO, "eswitched_err_free_bytes", 464 CTLFLAG_RD, &ha->hw.mac.eswitched_err_free_bytes, 465 "eswitched_err_free_bytes"); 466 467 return; 468} 469 470static void 471qlnx_add_hw_rcv_stats_sysctls(qla_host_t *ha) 472{ 473 struct sysctl_ctx_list *ctx; 474 struct sysctl_oid_list *children; 475 struct sysctl_oid *ctx_oid; 476 477 ctx = device_get_sysctl_ctx(ha->pci_dev); 478 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 479 480 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_rcv", 481 CTLFLAG_RD, NULL, "stats_hw_rcv"); 482 children = SYSCTL_CHILDREN(ctx_oid); 483 484 SYSCTL_ADD_QUAD(ctx, children, 485 OID_AUTO, "total_bytes", 486 CTLFLAG_RD, &ha->hw.rcv.total_bytes, 487 "total_bytes"); 488 489 SYSCTL_ADD_QUAD(ctx, children, 490 OID_AUTO, "total_pkts", 491 CTLFLAG_RD, &ha->hw.rcv.total_pkts, 492 "total_pkts"); 493 494 SYSCTL_ADD_QUAD(ctx, children, 495 OID_AUTO, "lro_pkt_count", 496 CTLFLAG_RD, &ha->hw.rcv.lro_pkt_count, 497 "lro_pkt_count"); 498 499 SYSCTL_ADD_QUAD(ctx, children, 500 OID_AUTO, "sw_pkt_count", 501 CTLFLAG_RD, &ha->hw.rcv.sw_pkt_count, 502 "sw_pkt_count"); 503 504 SYSCTL_ADD_QUAD(ctx, children, 505 OID_AUTO, "ip_chksum_err", 506 CTLFLAG_RD, &ha->hw.rcv.ip_chksum_err, 507 "ip_chksum_err"); 508 509 SYSCTL_ADD_QUAD(ctx, children, 510 OID_AUTO, "pkts_wo_acntxts", 511 CTLFLAG_RD, &ha->hw.rcv.pkts_wo_acntxts, 512 "pkts_wo_acntxts"); 513 514 SYSCTL_ADD_QUAD(ctx, children, 515 OID_AUTO, "pkts_dropped_no_sds_card", 516 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_card, 517 "pkts_dropped_no_sds_card"); 518 519 SYSCTL_ADD_QUAD(ctx, children, 520 OID_AUTO, "pkts_dropped_no_sds_host", 521 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_sds_host, 522 "pkts_dropped_no_sds_host"); 523 524 SYSCTL_ADD_QUAD(ctx, children, 525 OID_AUTO, "oversized_pkts", 526 CTLFLAG_RD, &ha->hw.rcv.oversized_pkts, 527 "oversized_pkts"); 528 529 SYSCTL_ADD_QUAD(ctx, children, 530 OID_AUTO, "pkts_dropped_no_rds", 531 CTLFLAG_RD, &ha->hw.rcv.pkts_dropped_no_rds, 532 "pkts_dropped_no_rds"); 533 534 SYSCTL_ADD_QUAD(ctx, children, 535 OID_AUTO, "unxpctd_mcast_pkts", 536 CTLFLAG_RD, &ha->hw.rcv.unxpctd_mcast_pkts, 537 "unxpctd_mcast_pkts"); 538 539 SYSCTL_ADD_QUAD(ctx, children, 540 OID_AUTO, "re1_fbq_error", 541 CTLFLAG_RD, &ha->hw.rcv.re1_fbq_error, 542 "re1_fbq_error"); 543 544 SYSCTL_ADD_QUAD(ctx, children, 545 OID_AUTO, "invalid_mac_addr", 546 CTLFLAG_RD, &ha->hw.rcv.invalid_mac_addr, 547 "invalid_mac_addr"); 548 549 SYSCTL_ADD_QUAD(ctx, children, 550 OID_AUTO, "rds_prime_trys", 551 CTLFLAG_RD, &ha->hw.rcv.rds_prime_trys, 552 "rds_prime_trys"); 553 554 SYSCTL_ADD_QUAD(ctx, children, 555 OID_AUTO, "rds_prime_success", 556 CTLFLAG_RD, &ha->hw.rcv.rds_prime_success, 557 "rds_prime_success"); 558 559 SYSCTL_ADD_QUAD(ctx, children, 560 OID_AUTO, "lro_flows_added", 561 CTLFLAG_RD, &ha->hw.rcv.lro_flows_added, 562 "lro_flows_added"); 563 564 SYSCTL_ADD_QUAD(ctx, children, 565 OID_AUTO, "lro_flows_deleted", 566 CTLFLAG_RD, &ha->hw.rcv.lro_flows_deleted, 567 "lro_flows_deleted"); 568 569 SYSCTL_ADD_QUAD(ctx, children, 570 OID_AUTO, "lro_flows_active", 571 CTLFLAG_RD, &ha->hw.rcv.lro_flows_active, 572 "lro_flows_active"); 573 574 SYSCTL_ADD_QUAD(ctx, children, 575 OID_AUTO, "pkts_droped_unknown", 576 CTLFLAG_RD, &ha->hw.rcv.pkts_droped_unknown, 577 "pkts_droped_unknown"); 578 579 SYSCTL_ADD_QUAD(ctx, children, 580 OID_AUTO, "pkts_cnt_oversized", 581 CTLFLAG_RD, &ha->hw.rcv.pkts_cnt_oversized, 582 "pkts_cnt_oversized"); 583 584 return; 585} 586 587static void 588qlnx_add_hw_xmt_stats_sysctls(qla_host_t *ha) 589{ 590 struct sysctl_ctx_list *ctx; 591 struct sysctl_oid_list *children; 592 struct sysctl_oid_list *node_children; 593 struct sysctl_oid *ctx_oid; 594 int i; 595 uint8_t name_str[16]; 596 597 ctx = device_get_sysctl_ctx(ha->pci_dev); 598 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 599 600 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_hw_xmt", 601 CTLFLAG_RD, NULL, "stats_hw_xmt"); 602 children = SYSCTL_CHILDREN(ctx_oid); 603 604 for (i = 0; i < ha->hw.num_tx_rings; i++) { 605 606 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 607 snprintf(name_str, sizeof(name_str), "%d", i); 608 609 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 610 CTLFLAG_RD, NULL, name_str); 611 node_children = SYSCTL_CHILDREN(ctx_oid); 612 613 /* Tx Related */ 614 615 SYSCTL_ADD_QUAD(ctx, node_children, 616 OID_AUTO, "total_bytes", 617 CTLFLAG_RD, &ha->hw.xmt[i].total_bytes, 618 "total_bytes"); 619 620 SYSCTL_ADD_QUAD(ctx, node_children, 621 OID_AUTO, "total_pkts", 622 CTLFLAG_RD, &ha->hw.xmt[i].total_pkts, 623 "total_pkts"); 624 625 SYSCTL_ADD_QUAD(ctx, node_children, 626 OID_AUTO, "errors", 627 CTLFLAG_RD, &ha->hw.xmt[i].errors, 628 "errors"); 629 630 SYSCTL_ADD_QUAD(ctx, node_children, 631 OID_AUTO, "pkts_dropped", 632 CTLFLAG_RD, &ha->hw.xmt[i].pkts_dropped, 633 "pkts_dropped"); 634 635 SYSCTL_ADD_QUAD(ctx, node_children, 636 OID_AUTO, "switch_pkts", 637 CTLFLAG_RD, &ha->hw.xmt[i].switch_pkts, 638 "switch_pkts"); 639 640 SYSCTL_ADD_QUAD(ctx, node_children, 641 OID_AUTO, "num_buffers", 642 CTLFLAG_RD, &ha->hw.xmt[i].num_buffers, 643 "num_buffers"); 644 } 645 646 return; 647} 648 649static void 650qlnx_add_hw_mbx_cmpl_stats_sysctls(qla_host_t *ha) 651{ 652 struct sysctl_ctx_list *ctx; 653 struct sysctl_oid_list *node_children; 654 655 ctx = device_get_sysctl_ctx(ha->pci_dev); 656 node_children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 657 658 SYSCTL_ADD_QUAD(ctx, node_children, 659 OID_AUTO, "mbx_completion_time_lt_200ms", 660 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[0], 661 "mbx_completion_time_lt_200ms"); 662 663 SYSCTL_ADD_QUAD(ctx, node_children, 664 OID_AUTO, "mbx_completion_time_200ms_400ms", 665 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[1], 666 "mbx_completion_time_200ms_400ms"); 667 668 SYSCTL_ADD_QUAD(ctx, node_children, 669 OID_AUTO, "mbx_completion_time_400ms_600ms", 670 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[2], 671 "mbx_completion_time_400ms_600ms"); 672 673 SYSCTL_ADD_QUAD(ctx, node_children, 674 OID_AUTO, "mbx_completion_time_600ms_800ms", 675 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[3], 676 "mbx_completion_time_600ms_800ms"); 677 678 SYSCTL_ADD_QUAD(ctx, node_children, 679 OID_AUTO, "mbx_completion_time_800ms_1000ms", 680 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[4], 681 "mbx_completion_time_800ms_1000ms"); 682 683 SYSCTL_ADD_QUAD(ctx, node_children, 684 OID_AUTO, "mbx_completion_time_1000ms_1200ms", 685 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[5], 686 "mbx_completion_time_1000ms_1200ms"); 687 688 SYSCTL_ADD_QUAD(ctx, node_children, 689 OID_AUTO, "mbx_completion_time_1200ms_1400ms", 690 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[6], 691 "mbx_completion_time_1200ms_1400ms"); 692 693 SYSCTL_ADD_QUAD(ctx, node_children, 694 OID_AUTO, "mbx_completion_time_1400ms_1600ms", 695 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[7], 696 "mbx_completion_time_1400ms_1600ms"); 697 698 SYSCTL_ADD_QUAD(ctx, node_children, 699 OID_AUTO, "mbx_completion_time_1600ms_1800ms", 700 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[8], 701 "mbx_completion_time_1600ms_1800ms"); 702 703 SYSCTL_ADD_QUAD(ctx, node_children, 704 OID_AUTO, "mbx_completion_time_1800ms_2000ms", 705 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[9], 706 "mbx_completion_time_1800ms_2000ms"); 707 708 SYSCTL_ADD_QUAD(ctx, node_children, 709 OID_AUTO, "mbx_completion_time_2000ms_2200ms", 710 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[10], 711 "mbx_completion_time_2000ms_2200ms"); 712 713 SYSCTL_ADD_QUAD(ctx, node_children, 714 OID_AUTO, "mbx_completion_time_2200ms_2400ms", 715 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[11], 716 "mbx_completion_time_2200ms_2400ms"); 717 718 SYSCTL_ADD_QUAD(ctx, node_children, 719 OID_AUTO, "mbx_completion_time_2400ms_2600ms", 720 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[12], 721 "mbx_completion_time_2400ms_2600ms"); 722 723 SYSCTL_ADD_QUAD(ctx, node_children, 724 OID_AUTO, "mbx_completion_time_2600ms_2800ms", 725 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[13], 726 "mbx_completion_time_2600ms_2800ms"); 727 728 SYSCTL_ADD_QUAD(ctx, node_children, 729 OID_AUTO, "mbx_completion_time_2800ms_3000ms", 730 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[14], 731 "mbx_completion_time_2800ms_3000ms"); 732 733 SYSCTL_ADD_QUAD(ctx, node_children, 734 OID_AUTO, "mbx_completion_time_3000ms_4000ms", 735 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[15], 736 "mbx_completion_time_3000ms_4000ms"); 737 738 SYSCTL_ADD_QUAD(ctx, node_children, 739 OID_AUTO, "mbx_completion_time_4000ms_5000ms", 740 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[16], 741 "mbx_completion_time_4000ms_5000ms"); 742 743 SYSCTL_ADD_QUAD(ctx, node_children, 744 OID_AUTO, "mbx_completion_host_mbx_cntrl_timeout", 745 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[17], 746 "mbx_completion_host_mbx_cntrl_timeout"); 747 748 SYSCTL_ADD_QUAD(ctx, node_children, 749 OID_AUTO, "mbx_completion_fw_mbx_cntrl_timeout", 750 CTLFLAG_RD, &ha->hw.mbx_comp_msecs[18], 751 "mbx_completion_fw_mbx_cntrl_timeout"); 752 return; 753} 754 755static void 756qlnx_add_hw_stats_sysctls(qla_host_t *ha) 757{ 758 qlnx_add_hw_mac_stats_sysctls(ha); 759 qlnx_add_hw_rcv_stats_sysctls(ha); 760 qlnx_add_hw_xmt_stats_sysctls(ha); 761 qlnx_add_hw_mbx_cmpl_stats_sysctls(ha); 762 763 return; 764} 765 766static void 767qlnx_add_drvr_sds_stats(qla_host_t *ha) 768{ 769 struct sysctl_ctx_list *ctx; 770 struct sysctl_oid_list *children; 771 struct sysctl_oid_list *node_children; 772 struct sysctl_oid *ctx_oid; 773 int i; 774 uint8_t name_str[16]; 775 776 ctx = device_get_sysctl_ctx(ha->pci_dev); 777 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 778 779 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_sds", 780 CTLFLAG_RD, NULL, "stats_drvr_sds"); 781 children = SYSCTL_CHILDREN(ctx_oid); 782 783 for (i = 0; i < ha->hw.num_sds_rings; i++) { 784 785 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 786 snprintf(name_str, sizeof(name_str), "%d", i); 787 788 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 789 CTLFLAG_RD, NULL, name_str); 790 node_children = SYSCTL_CHILDREN(ctx_oid); 791 792 SYSCTL_ADD_QUAD(ctx, node_children, 793 OID_AUTO, "intr_count", 794 CTLFLAG_RD, &ha->hw.sds[i].intr_count, 795 "intr_count"); 796 797 SYSCTL_ADD_UINT(ctx, node_children, 798 OID_AUTO, "rx_free", 799 CTLFLAG_RD, &ha->hw.sds[i].rx_free, 800 ha->hw.sds[i].rx_free, "rx_free"); 801 } 802 803 return; 804} 805static void 806qlnx_add_drvr_rds_stats(qla_host_t *ha) 807{ 808 struct sysctl_ctx_list *ctx; 809 struct sysctl_oid_list *children; 810 struct sysctl_oid_list *node_children; 811 struct sysctl_oid *ctx_oid; 812 int i; 813 uint8_t name_str[16]; 814 815 ctx = device_get_sysctl_ctx(ha->pci_dev); 816 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 817 818 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_rds", 819 CTLFLAG_RD, NULL, "stats_drvr_rds"); 820 children = SYSCTL_CHILDREN(ctx_oid); 821 822 for (i = 0; i < ha->hw.num_rds_rings; i++) { 823 824 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 825 snprintf(name_str, sizeof(name_str), "%d", i); 826 827 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 828 CTLFLAG_RD, NULL, name_str); 829 node_children = SYSCTL_CHILDREN(ctx_oid); 830 831 SYSCTL_ADD_QUAD(ctx, node_children, 832 OID_AUTO, "count", 833 CTLFLAG_RD, &ha->hw.rds[i].count, 834 "count"); 835 836 SYSCTL_ADD_QUAD(ctx, node_children, 837 OID_AUTO, "lro_pkt_count", 838 CTLFLAG_RD, &ha->hw.rds[i].lro_pkt_count, 839 "lro_pkt_count"); 840 841 SYSCTL_ADD_QUAD(ctx, node_children, 842 OID_AUTO, "lro_bytes", 843 CTLFLAG_RD, &ha->hw.rds[i].lro_bytes, 844 "lro_bytes"); 845 } 846 847 return; 848} 849 850static void 851qlnx_add_drvr_tx_stats(qla_host_t *ha) 852{ 853 struct sysctl_ctx_list *ctx; 854 struct sysctl_oid_list *children; 855 struct sysctl_oid_list *node_children; 856 struct sysctl_oid *ctx_oid; 857 int i; 858 uint8_t name_str[16]; 859 860 ctx = device_get_sysctl_ctx(ha->pci_dev); 861 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 862 863 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats_drvr_xmt", 864 CTLFLAG_RD, NULL, "stats_drvr_xmt"); 865 children = SYSCTL_CHILDREN(ctx_oid); 866 867 for (i = 0; i < ha->hw.num_tx_rings; i++) { 868 869 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 870 snprintf(name_str, sizeof(name_str), "%d", i); 871 872 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 873 CTLFLAG_RD, NULL, name_str); 874 node_children = SYSCTL_CHILDREN(ctx_oid); 875 876 SYSCTL_ADD_QUAD(ctx, node_children, 877 OID_AUTO, "count", 878 CTLFLAG_RD, &ha->tx_ring[i].count, 879 "count"); 880 881#ifdef QL_ENABLE_ISCSI_TLV 882 SYSCTL_ADD_QUAD(ctx, node_children, 883 OID_AUTO, "iscsi_pkt_count", 884 CTLFLAG_RD, &ha->tx_ring[i].iscsi_pkt_count, 885 "iscsi_pkt_count"); 886#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 887 } 888 889 return; 890} 891 892static void 893qlnx_add_drvr_stats_sysctls(qla_host_t *ha) 894{ 895 qlnx_add_drvr_sds_stats(ha); 896 qlnx_add_drvr_rds_stats(ha); 897 qlnx_add_drvr_tx_stats(ha); 898 return; 899} 900 901/* 902 * Name: ql_hw_add_sysctls 903 * Function: Add P3Plus specific sysctls 904 */ 905void 906ql_hw_add_sysctls(qla_host_t *ha) 907{ 908 device_t dev; 909 910 dev = ha->pci_dev; 911 912 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 913 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 914 OID_AUTO, "num_rds_rings", CTLFLAG_RD, &ha->hw.num_rds_rings, 915 ha->hw.num_rds_rings, "Number of Rcv Descriptor Rings"); 916 917 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 918 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 919 OID_AUTO, "num_sds_rings", CTLFLAG_RD, &ha->hw.num_sds_rings, 920 ha->hw.num_sds_rings, "Number of Status Descriptor Rings"); 921 922 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 923 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 924 OID_AUTO, "num_tx_rings", CTLFLAG_RD, &ha->hw.num_tx_rings, 925 ha->hw.num_tx_rings, "Number of Transmit Rings"); 926 927 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 928 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 929 OID_AUTO, "tx_ring_index", CTLFLAG_RW, &ha->txr_idx, 930 ha->txr_idx, "Tx Ring Used"); 931 932 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 933 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 934 OID_AUTO, "max_tx_segs", CTLFLAG_RD, &ha->hw.max_tx_segs, 935 ha->hw.max_tx_segs, "Max # of Segments in a non-TSO pkt"); 936 937 ha->hw.sds_cidx_thres = 32; 938 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 939 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 940 OID_AUTO, "sds_cidx_thres", CTLFLAG_RW, &ha->hw.sds_cidx_thres, 941 ha->hw.sds_cidx_thres, 942 "Number of SDS entries to process before updating" 943 " SDS Ring Consumer Index"); 944 945 ha->hw.rds_pidx_thres = 32; 946 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 947 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 948 OID_AUTO, "rds_pidx_thres", CTLFLAG_RW, &ha->hw.rds_pidx_thres, 949 ha->hw.rds_pidx_thres, 950 "Number of Rcv Rings Entries to post before updating" 951 " RDS Ring Producer Index"); 952 953 ha->hw.rcv_intr_coalesce = (3 << 16) | 256; 954 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 955 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 956 OID_AUTO, "rcv_intr_coalesce", CTLFLAG_RW, 957 &ha->hw.rcv_intr_coalesce, 958 ha->hw.rcv_intr_coalesce, 959 "Rcv Intr Coalescing Parameters\n" 960 "\tbits 15:0 max packets\n" 961 "\tbits 31:16 max micro-seconds to wait\n" 962 "\tplease run\n" 963 "\tifconfig <if> down && ifconfig <if> up\n" 964 "\tto take effect \n"); 965 966 ha->hw.xmt_intr_coalesce = (64 << 16) | 64; 967 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 968 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 969 OID_AUTO, "xmt_intr_coalesce", CTLFLAG_RW, 970 &ha->hw.xmt_intr_coalesce, 971 ha->hw.xmt_intr_coalesce, 972 "Xmt Intr Coalescing Parameters\n" 973 "\tbits 15:0 max packets\n" 974 "\tbits 31:16 max micro-seconds to wait\n" 975 "\tplease run\n" 976 "\tifconfig <if> down && ifconfig <if> up\n" 977 "\tto take effect \n"); 978 979 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 980 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 981 OID_AUTO, "port_cfg", CTLTYPE_INT | CTLFLAG_RW, 982 (void *)ha, 0, 983 qla_sysctl_port_cfg, "I", 984 "Set Port Configuration if values below " 985 "otherwise Get Port Configuration\n" 986 "\tBits 0-3 ; 1 = DCBX Enable; 0 = DCBX Disable\n" 987 "\tBits 4-7 : 0 = no pause; 1 = std ; 2 = ppm \n" 988 "\tBits 8-11: std pause cfg; 0 = xmt and rcv;" 989 " 1 = xmt only; 2 = rcv only;\n" 990 ); 991 992 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 993 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 994 OID_AUTO, "set_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 995 (void *)ha, 0, 996 qla_sysctl_set_cam_search_mode, "I", 997 "Set CAM Search Mode" 998 "\t 1 = search mode internal\n" 999 "\t 2 = search mode auto\n"); 1000 1001 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1002 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1003 OID_AUTO, "get_cam_search_mode", CTLTYPE_INT | CTLFLAG_RW, 1004 (void *)ha, 0, 1005 qla_sysctl_get_cam_search_mode, "I", 1006 "Get CAM Search Mode" 1007 "\t 1 = search mode internal\n" 1008 "\t 2 = search mode auto\n"); 1009 1010 ha->hw.enable_9kb = 1; 1011 1012 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1013 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1014 OID_AUTO, "enable_9kb", CTLFLAG_RW, &ha->hw.enable_9kb, 1015 ha->hw.enable_9kb, "Enable 9Kbyte Buffers when MTU = 9000"); 1016 1017 ha->hw.enable_hw_lro = 1; 1018 1019 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1020 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1021 OID_AUTO, "enable_hw_lro", CTLFLAG_RW, &ha->hw.enable_hw_lro, 1022 ha->hw.enable_hw_lro, "Enable Hardware LRO; Default is true \n" 1023 "\t 1 : Hardware LRO if LRO is enabled\n" 1024 "\t 0 : Software LRO if LRO is enabled\n" 1025 "\t Any change requires ifconfig down/up to take effect\n" 1026 "\t Note that LRO may be turned off/on via ifconfig\n"); 1027 1028 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1029 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1030 OID_AUTO, "sp_log_index", CTLFLAG_RW, &ha->hw.sp_log_index, 1031 ha->hw.sp_log_index, "sp_log_index"); 1032 1033 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1034 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1035 OID_AUTO, "sp_log_stop", CTLFLAG_RW, &ha->hw.sp_log_stop, 1036 ha->hw.sp_log_stop, "sp_log_stop"); 1037 1038 ha->hw.sp_log_stop_events = 0; 1039 1040 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1041 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1042 OID_AUTO, "sp_log_stop_events", CTLFLAG_RW, 1043 &ha->hw.sp_log_stop_events, 1044 ha->hw.sp_log_stop_events, "Slow path event log is stopped" 1045 " when OR of the following events occur \n" 1046 "\t 0x01 : Heart beat Failure\n" 1047 "\t 0x02 : Temperature Failure\n" 1048 "\t 0x04 : HW Initialization Failure\n" 1049 "\t 0x08 : Interface Initialization Failure\n" 1050 "\t 0x10 : Error Recovery Failure\n"); 1051 1052 ha->hw.mdump_active = 0; 1053 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1054 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1055 OID_AUTO, "minidump_active", CTLFLAG_RW, &ha->hw.mdump_active, 1056 ha->hw.mdump_active, 1057 "Minidump retrieval is Active"); 1058 1059 ha->hw.mdump_done = 0; 1060 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1061 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1062 OID_AUTO, "mdump_done", CTLFLAG_RW, 1063 &ha->hw.mdump_done, ha->hw.mdump_done, 1064 "Minidump has been done and available for retrieval"); 1065 1066 ha->hw.mdump_capture_mask = 0xF; 1067 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1068 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1069 OID_AUTO, "minidump_capture_mask", CTLFLAG_RW, 1070 &ha->hw.mdump_capture_mask, ha->hw.mdump_capture_mask, 1071 "Minidump capture mask"); 1072#ifdef QL_DBG 1073 1074 ha->err_inject = 0; 1075 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1076 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1077 OID_AUTO, "err_inject", 1078 CTLFLAG_RW, &ha->err_inject, ha->err_inject, 1079 "Error to be injected\n" 1080 "\t\t\t 0: No Errors\n" 1081 "\t\t\t 1: rcv: rxb struct invalid\n" 1082 "\t\t\t 2: rcv: mp == NULL\n" 1083 "\t\t\t 3: lro: rxb struct invalid\n" 1084 "\t\t\t 4: lro: mp == NULL\n" 1085 "\t\t\t 5: rcv: num handles invalid\n" 1086 "\t\t\t 6: reg: indirect reg rd_wr failure\n" 1087 "\t\t\t 7: ocm: offchip memory rd_wr failure\n" 1088 "\t\t\t 8: mbx: mailbox command failure\n" 1089 "\t\t\t 9: heartbeat failure\n" 1090 "\t\t\t A: temperature failure\n" 1091 "\t\t\t 11: m_getcl or m_getjcl failure\n" ); 1092 1093 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 1094 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1095 OID_AUTO, "peg_stop", CTLTYPE_INT | CTLFLAG_RW, 1096 (void *)ha, 0, 1097 qla_sysctl_stop_pegs, "I", "Peg Stop"); 1098 1099#endif /* #ifdef QL_DBG */ 1100 1101 ha->hw.user_pri_nic = 0; 1102 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1103 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1104 OID_AUTO, "user_pri_nic", CTLFLAG_RW, &ha->hw.user_pri_nic, 1105 ha->hw.user_pri_nic, 1106 "VLAN Tag User Priority for Normal Ethernet Packets"); 1107 1108 ha->hw.user_pri_iscsi = 4; 1109 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 1110 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 1111 OID_AUTO, "user_pri_iscsi", CTLFLAG_RW, &ha->hw.user_pri_iscsi, 1112 ha->hw.user_pri_iscsi, 1113 "VLAN Tag User Priority for iSCSI Packets"); 1114 1115 qlnx_add_hw_stats_sysctls(ha); 1116 qlnx_add_drvr_stats_sysctls(ha); 1117 1118 return; 1119} 1120 1121void 1122ql_hw_link_status(qla_host_t *ha) 1123{ 1124 device_printf(ha->pci_dev, "cable_oui\t\t 0x%08x\n", ha->hw.cable_oui); 1125 1126 if (ha->hw.link_up) { 1127 device_printf(ha->pci_dev, "link Up\n"); 1128 } else { 1129 device_printf(ha->pci_dev, "link Down\n"); 1130 } 1131 1132 if (ha->hw.fduplex) { 1133 device_printf(ha->pci_dev, "Full Duplex\n"); 1134 } else { 1135 device_printf(ha->pci_dev, "Half Duplex\n"); 1136 } 1137 1138 if (ha->hw.autoneg) { 1139 device_printf(ha->pci_dev, "Auto Negotiation Enabled\n"); 1140 } else { 1141 device_printf(ha->pci_dev, "Auto Negotiation Disabled\n"); 1142 } 1143 1144 switch (ha->hw.link_speed) { 1145 case 0x710: 1146 device_printf(ha->pci_dev, "link speed\t\t 10Gps\n"); 1147 break; 1148 1149 case 0x3E8: 1150 device_printf(ha->pci_dev, "link speed\t\t 1Gps\n"); 1151 break; 1152 1153 case 0x64: 1154 device_printf(ha->pci_dev, "link speed\t\t 100Mbps\n"); 1155 break; 1156 1157 default: 1158 device_printf(ha->pci_dev, "link speed\t\t Unknown\n"); 1159 break; 1160 } 1161 1162 switch (ha->hw.module_type) { 1163 1164 case 0x01: 1165 device_printf(ha->pci_dev, "Module Type 10GBase-LRM\n"); 1166 break; 1167 1168 case 0x02: 1169 device_printf(ha->pci_dev, "Module Type 10GBase-LR\n"); 1170 break; 1171 1172 case 0x03: 1173 device_printf(ha->pci_dev, "Module Type 10GBase-SR\n"); 1174 break; 1175 1176 case 0x04: 1177 device_printf(ha->pci_dev, 1178 "Module Type 10GE Passive Copper(Compliant)[%d m]\n", 1179 ha->hw.cable_length); 1180 break; 1181 1182 case 0x05: 1183 device_printf(ha->pci_dev, "Module Type 10GE Active" 1184 " Limiting Copper(Compliant)[%d m]\n", 1185 ha->hw.cable_length); 1186 break; 1187 1188 case 0x06: 1189 device_printf(ha->pci_dev, 1190 "Module Type 10GE Passive Copper" 1191 " (Legacy, Best Effort)[%d m]\n", 1192 ha->hw.cable_length); 1193 break; 1194 1195 case 0x07: 1196 device_printf(ha->pci_dev, "Module Type 1000Base-SX\n"); 1197 break; 1198 1199 case 0x08: 1200 device_printf(ha->pci_dev, "Module Type 1000Base-LX\n"); 1201 break; 1202 1203 case 0x09: 1204 device_printf(ha->pci_dev, "Module Type 1000Base-CX\n"); 1205 break; 1206 1207 case 0x0A: 1208 device_printf(ha->pci_dev, "Module Type 1000Base-T\n"); 1209 break; 1210 1211 case 0x0B: 1212 device_printf(ha->pci_dev, "Module Type 1GE Passive Copper" 1213 "(Legacy, Best Effort)\n"); 1214 break; 1215 1216 default: 1217 device_printf(ha->pci_dev, "Unknown Module Type 0x%x\n", 1218 ha->hw.module_type); 1219 break; 1220 } 1221 1222 if (ha->hw.link_faults == 1) 1223 device_printf(ha->pci_dev, "SFP Power Fault\n"); 1224} 1225 1226/* 1227 * Name: ql_free_dma 1228 * Function: Frees the DMA'able memory allocated in ql_alloc_dma() 1229 */ 1230void 1231ql_free_dma(qla_host_t *ha) 1232{ 1233 uint32_t i; 1234 1235 if (ha->hw.dma_buf.flags.sds_ring) { 1236 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1237 ql_free_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i]); 1238 } 1239 ha->hw.dma_buf.flags.sds_ring = 0; 1240 } 1241 1242 if (ha->hw.dma_buf.flags.rds_ring) { 1243 for (i = 0; i < ha->hw.num_rds_rings; i++) { 1244 ql_free_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i]); 1245 } 1246 ha->hw.dma_buf.flags.rds_ring = 0; 1247 } 1248 1249 if (ha->hw.dma_buf.flags.tx_ring) { 1250 ql_free_dmabuf(ha, &ha->hw.dma_buf.tx_ring); 1251 ha->hw.dma_buf.flags.tx_ring = 0; 1252 } 1253 ql_minidump_free(ha); 1254} 1255 1256/* 1257 * Name: ql_alloc_dma 1258 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts. 1259 */ 1260int 1261ql_alloc_dma(qla_host_t *ha) 1262{ 1263 device_t dev; 1264 uint32_t i, j, size, tx_ring_size; 1265 qla_hw_t *hw; 1266 qla_hw_tx_cntxt_t *tx_cntxt; 1267 uint8_t *vaddr; 1268 bus_addr_t paddr; 1269 1270 dev = ha->pci_dev; 1271 1272 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 1273 1274 hw = &ha->hw; 1275 /* 1276 * Allocate Transmit Ring 1277 */ 1278 tx_ring_size = (sizeof(q80_tx_cmd_t) * NUM_TX_DESCRIPTORS); 1279 size = (tx_ring_size * ha->hw.num_tx_rings); 1280 1281 hw->dma_buf.tx_ring.alignment = 8; 1282 hw->dma_buf.tx_ring.size = size + PAGE_SIZE; 1283 1284 if (ql_alloc_dmabuf(ha, &hw->dma_buf.tx_ring)) { 1285 device_printf(dev, "%s: tx ring alloc failed\n", __func__); 1286 goto ql_alloc_dma_exit; 1287 } 1288 1289 vaddr = (uint8_t *)hw->dma_buf.tx_ring.dma_b; 1290 paddr = hw->dma_buf.tx_ring.dma_addr; 1291 1292 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1293 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 1294 1295 tx_cntxt->tx_ring_base = (q80_tx_cmd_t *)vaddr; 1296 tx_cntxt->tx_ring_paddr = paddr; 1297 1298 vaddr += tx_ring_size; 1299 paddr += tx_ring_size; 1300 } 1301 1302 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1303 tx_cntxt = (qla_hw_tx_cntxt_t *)&hw->tx_cntxt[i]; 1304 1305 tx_cntxt->tx_cons = (uint32_t *)vaddr; 1306 tx_cntxt->tx_cons_paddr = paddr; 1307 1308 vaddr += sizeof (uint32_t); 1309 paddr += sizeof (uint32_t); 1310 } 1311 1312 ha->hw.dma_buf.flags.tx_ring = 1; 1313 1314 QL_DPRINT2(ha, (dev, "%s: tx_ring phys %p virt %p\n", 1315 __func__, (void *)(hw->dma_buf.tx_ring.dma_addr), 1316 hw->dma_buf.tx_ring.dma_b)); 1317 /* 1318 * Allocate Receive Descriptor Rings 1319 */ 1320 1321 for (i = 0; i < hw->num_rds_rings; i++) { 1322 1323 hw->dma_buf.rds_ring[i].alignment = 8; 1324 hw->dma_buf.rds_ring[i].size = 1325 (sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS; 1326 1327 if (ql_alloc_dmabuf(ha, &hw->dma_buf.rds_ring[i])) { 1328 device_printf(dev, "%s: rds ring[%d] alloc failed\n", 1329 __func__, i); 1330 1331 for (j = 0; j < i; j++) 1332 ql_free_dmabuf(ha, &hw->dma_buf.rds_ring[j]); 1333 1334 goto ql_alloc_dma_exit; 1335 } 1336 QL_DPRINT4(ha, (dev, "%s: rx_ring[%d] phys %p virt %p\n", 1337 __func__, i, (void *)(hw->dma_buf.rds_ring[i].dma_addr), 1338 hw->dma_buf.rds_ring[i].dma_b)); 1339 } 1340 1341 hw->dma_buf.flags.rds_ring = 1; 1342 1343 /* 1344 * Allocate Status Descriptor Rings 1345 */ 1346 1347 for (i = 0; i < hw->num_sds_rings; i++) { 1348 hw->dma_buf.sds_ring[i].alignment = 8; 1349 hw->dma_buf.sds_ring[i].size = 1350 (sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS; 1351 1352 if (ql_alloc_dmabuf(ha, &hw->dma_buf.sds_ring[i])) { 1353 device_printf(dev, "%s: sds ring alloc failed\n", 1354 __func__); 1355 1356 for (j = 0; j < i; j++) 1357 ql_free_dmabuf(ha, &hw->dma_buf.sds_ring[j]); 1358 1359 goto ql_alloc_dma_exit; 1360 } 1361 QL_DPRINT4(ha, (dev, "%s: sds_ring[%d] phys %p virt %p\n", 1362 __func__, i, 1363 (void *)(hw->dma_buf.sds_ring[i].dma_addr), 1364 hw->dma_buf.sds_ring[i].dma_b)); 1365 } 1366 for (i = 0; i < hw->num_sds_rings; i++) { 1367 hw->sds[i].sds_ring_base = 1368 (q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b; 1369 } 1370 1371 hw->dma_buf.flags.sds_ring = 1; 1372 1373 return 0; 1374 1375ql_alloc_dma_exit: 1376 ql_free_dma(ha); 1377 return -1; 1378} 1379 1380#define Q8_MBX_MSEC_DELAY 5000 1381 1382static int 1383qla_mbx_cmd(qla_host_t *ha, uint32_t *h_mbox, uint32_t n_hmbox, 1384 uint32_t *fw_mbox, uint32_t n_fwmbox, uint32_t no_pause) 1385{ 1386 uint32_t i; 1387 uint32_t data; 1388 int ret = 0; 1389 uint64_t start_usecs; 1390 uint64_t end_usecs; 1391 uint64_t msecs_200; 1392 1393 ql_sp_log(ha, 0, 5, no_pause, h_mbox[0], h_mbox[1], h_mbox[2], h_mbox[3]); 1394 1395 if (ha->offline || ha->qla_initiate_recovery) { 1396 ql_sp_log(ha, 1, 2, ha->offline, ha->qla_initiate_recovery, 0, 0, 0); 1397 goto exit_qla_mbx_cmd; 1398 } 1399 1400 if (((ha->err_inject & 0xFFFF) == INJCT_MBX_CMD_FAILURE) && 1401 (((ha->err_inject & ~0xFFFF) == ((h_mbox[0] & 0xFFFF) << 16))|| 1402 !(ha->err_inject & ~0xFFFF))) { 1403 ret = -3; 1404 QL_INITIATE_RECOVERY(ha); 1405 goto exit_qla_mbx_cmd; 1406 } 1407 1408 start_usecs = qla_get_usec_timestamp(); 1409 1410 if (no_pause) 1411 i = 1000; 1412 else 1413 i = Q8_MBX_MSEC_DELAY; 1414 1415 while (i) { 1416 1417 if (ha->qla_initiate_recovery) { 1418 ql_sp_log(ha, 2, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); 1419 return (-1); 1420 } 1421 1422 data = READ_REG32(ha, Q8_HOST_MBOX_CNTRL); 1423 if (data == 0) 1424 break; 1425 if (no_pause) { 1426 DELAY(1000); 1427 } else { 1428 qla_mdelay(__func__, 1); 1429 } 1430 i--; 1431 } 1432 1433 if (i == 0) { 1434 device_printf(ha->pci_dev, "%s: host_mbx_cntrl 0x%08x\n", 1435 __func__, data); 1436 ql_sp_log(ha, 3, 1, data, 0, 0, 0, 0); 1437 ret = -1; 1438 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 2)]++; 1439 QL_INITIATE_RECOVERY(ha); 1440 goto exit_qla_mbx_cmd; 1441 } 1442 1443 for (i = 0; i < n_hmbox; i++) { 1444 WRITE_REG32(ha, (Q8_HOST_MBOX0 + (i << 2)), *h_mbox); 1445 h_mbox++; 1446 } 1447 1448 WRITE_REG32(ha, Q8_HOST_MBOX_CNTRL, 0x1); 1449 1450 1451 i = Q8_MBX_MSEC_DELAY; 1452 while (i) { 1453 1454 if (ha->qla_initiate_recovery) { 1455 ql_sp_log(ha, 4, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); 1456 return (-1); 1457 } 1458 1459 data = READ_REG32(ha, Q8_FW_MBOX_CNTRL); 1460 1461 if ((data & 0x3) == 1) { 1462 data = READ_REG32(ha, Q8_FW_MBOX0); 1463 if ((data & 0xF000) != 0x8000) 1464 break; 1465 } 1466 if (no_pause) { 1467 DELAY(1000); 1468 } else { 1469 qla_mdelay(__func__, 1); 1470 } 1471 i--; 1472 } 1473 if (i == 0) { 1474 device_printf(ha->pci_dev, "%s: fw_mbx_cntrl 0x%08x\n", 1475 __func__, data); 1476 ql_sp_log(ha, 5, 1, data, 0, 0, 0, 0); 1477 ret = -2; 1478 ha->hw.mbx_comp_msecs[(Q8_MBX_COMP_MSECS - 1)]++; 1479 QL_INITIATE_RECOVERY(ha); 1480 goto exit_qla_mbx_cmd; 1481 } 1482 1483 for (i = 0; i < n_fwmbox; i++) { 1484 1485 if (ha->qla_initiate_recovery) { 1486 ql_sp_log(ha, 6, 1, ha->qla_initiate_recovery, 0, 0, 0, 0); 1487 return (-1); 1488 } 1489 1490 *fw_mbox++ = READ_REG32(ha, (Q8_FW_MBOX0 + (i << 2))); 1491 } 1492 1493 WRITE_REG32(ha, Q8_FW_MBOX_CNTRL, 0x0); 1494 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 1495 1496 end_usecs = qla_get_usec_timestamp(); 1497 1498 if (end_usecs > start_usecs) { 1499 msecs_200 = (end_usecs - start_usecs)/(1000 * 200); 1500 1501 if (msecs_200 < 15) 1502 ha->hw.mbx_comp_msecs[msecs_200]++; 1503 else if (msecs_200 < 20) 1504 ha->hw.mbx_comp_msecs[15]++; 1505 else { 1506 device_printf(ha->pci_dev, "%s: [%ld, %ld] %ld\n", __func__, 1507 start_usecs, end_usecs, msecs_200); 1508 ha->hw.mbx_comp_msecs[16]++; 1509 } 1510 } 1511 ql_sp_log(ha, 7, 5, fw_mbox[0], fw_mbox[1], fw_mbox[2], fw_mbox[3], fw_mbox[4]); 1512 1513 1514exit_qla_mbx_cmd: 1515 return (ret); 1516} 1517 1518int 1519qla_get_nic_partition(qla_host_t *ha, uint32_t *supports_9kb, 1520 uint32_t *num_rcvq) 1521{ 1522 uint32_t *mbox, err; 1523 device_t dev = ha->pci_dev; 1524 1525 bzero(ha->hw.mbox, (sizeof (uint32_t) * Q8_NUM_MBOX)); 1526 1527 mbox = ha->hw.mbox; 1528 1529 mbox[0] = Q8_MBX_GET_NIC_PARTITION | (0x2 << 16) | (0x2 << 29); 1530 1531 if (qla_mbx_cmd(ha, mbox, 2, mbox, 19, 0)) { 1532 device_printf(dev, "%s: failed0\n", __func__); 1533 return (-1); 1534 } 1535 err = mbox[0] >> 25; 1536 1537 if (supports_9kb != NULL) { 1538 if (mbox[16] & 0x80) /* bit 7 of mbox 16 */ 1539 *supports_9kb = 1; 1540 else 1541 *supports_9kb = 0; 1542 } 1543 1544 if (num_rcvq != NULL) 1545 *num_rcvq = ((mbox[6] >> 16) & 0xFFFF); 1546 1547 if ((err != 1) && (err != 0)) { 1548 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1549 return (-1); 1550 } 1551 return 0; 1552} 1553 1554static int 1555qla_config_intr_cntxt(qla_host_t *ha, uint32_t start_idx, uint32_t num_intrs, 1556 uint32_t create) 1557{ 1558 uint32_t i, err; 1559 device_t dev = ha->pci_dev; 1560 q80_config_intr_t *c_intr; 1561 q80_config_intr_rsp_t *c_intr_rsp; 1562 1563 c_intr = (q80_config_intr_t *)ha->hw.mbox; 1564 bzero(c_intr, (sizeof (q80_config_intr_t))); 1565 1566 c_intr->opcode = Q8_MBX_CONFIG_INTR; 1567 1568 c_intr->count_version = (sizeof (q80_config_intr_t) >> 2); 1569 c_intr->count_version |= Q8_MBX_CMD_VERSION; 1570 1571 c_intr->nentries = num_intrs; 1572 1573 for (i = 0; i < num_intrs; i++) { 1574 if (create) { 1575 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_CREATE; 1576 c_intr->intr[i].msix_index = start_idx + 1 + i; 1577 } else { 1578 c_intr->intr[i].cmd_type = Q8_MBX_CONFIG_INTR_DELETE; 1579 c_intr->intr[i].msix_index = 1580 ha->hw.intr_id[(start_idx + i)]; 1581 } 1582 1583 c_intr->intr[i].cmd_type |= Q8_MBX_CONFIG_INTR_TYPE_MSI_X; 1584 } 1585 1586 if (qla_mbx_cmd(ha, (uint32_t *)c_intr, 1587 (sizeof (q80_config_intr_t) >> 2), 1588 ha->hw.mbox, (sizeof (q80_config_intr_rsp_t) >> 2), 0)) { 1589 device_printf(dev, "%s: %s failed0\n", __func__, 1590 (create ? "create" : "delete")); 1591 return (-1); 1592 } 1593 1594 c_intr_rsp = (q80_config_intr_rsp_t *)ha->hw.mbox; 1595 1596 err = Q8_MBX_RSP_STATUS(c_intr_rsp->regcnt_status); 1597 1598 if (err) { 1599 device_printf(dev, "%s: %s failed1 [0x%08x, %d]\n", __func__, 1600 (create ? "create" : "delete"), err, c_intr_rsp->nentries); 1601 1602 for (i = 0; i < c_intr_rsp->nentries; i++) { 1603 device_printf(dev, "%s: [%d]:[0x%x 0x%x 0x%x]\n", 1604 __func__, i, 1605 c_intr_rsp->intr[i].status, 1606 c_intr_rsp->intr[i].intr_id, 1607 c_intr_rsp->intr[i].intr_src); 1608 } 1609 1610 return (-1); 1611 } 1612 1613 for (i = 0; ((i < num_intrs) && create); i++) { 1614 if (!c_intr_rsp->intr[i].status) { 1615 ha->hw.intr_id[(start_idx + i)] = 1616 c_intr_rsp->intr[i].intr_id; 1617 ha->hw.intr_src[(start_idx + i)] = 1618 c_intr_rsp->intr[i].intr_src; 1619 } 1620 } 1621 1622 return (0); 1623} 1624 1625/* 1626 * Name: qla_config_rss 1627 * Function: Configure RSS for the context/interface. 1628 */ 1629static const uint64_t rss_key[] = { 0xbeac01fa6a42b73bULL, 1630 0x8030f20c77cb2da3ULL, 1631 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 1632 0x255b0ec26d5a56daULL }; 1633 1634static int 1635qla_config_rss(qla_host_t *ha, uint16_t cntxt_id) 1636{ 1637 q80_config_rss_t *c_rss; 1638 q80_config_rss_rsp_t *c_rss_rsp; 1639 uint32_t err, i; 1640 device_t dev = ha->pci_dev; 1641 1642 c_rss = (q80_config_rss_t *)ha->hw.mbox; 1643 bzero(c_rss, (sizeof (q80_config_rss_t))); 1644 1645 c_rss->opcode = Q8_MBX_CONFIG_RSS; 1646 1647 c_rss->count_version = (sizeof (q80_config_rss_t) >> 2); 1648 c_rss->count_version |= Q8_MBX_CMD_VERSION; 1649 1650 c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP_IP | 1651 Q8_MBX_RSS_HASH_TYPE_IPV6_TCP_IP); 1652 //c_rss->hash_type = (Q8_MBX_RSS_HASH_TYPE_IPV4_TCP | 1653 // Q8_MBX_RSS_HASH_TYPE_IPV6_TCP); 1654 1655 c_rss->flags = Q8_MBX_RSS_FLAGS_ENABLE_RSS; 1656 c_rss->flags |= Q8_MBX_RSS_FLAGS_USE_IND_TABLE; 1657 1658 c_rss->indtbl_mask = Q8_MBX_RSS_INDTBL_MASK; 1659 1660 c_rss->indtbl_mask |= Q8_MBX_RSS_FLAGS_MULTI_RSS_VALID; 1661 c_rss->flags |= Q8_MBX_RSS_FLAGS_TYPE_CRSS; 1662 1663 c_rss->cntxt_id = cntxt_id; 1664 1665 for (i = 0; i < 5; i++) { 1666 c_rss->rss_key[i] = rss_key[i]; 1667 } 1668 1669 if (qla_mbx_cmd(ha, (uint32_t *)c_rss, 1670 (sizeof (q80_config_rss_t) >> 2), 1671 ha->hw.mbox, (sizeof(q80_config_rss_rsp_t) >> 2), 0)) { 1672 device_printf(dev, "%s: failed0\n", __func__); 1673 return (-1); 1674 } 1675 c_rss_rsp = (q80_config_rss_rsp_t *)ha->hw.mbox; 1676 1677 err = Q8_MBX_RSP_STATUS(c_rss_rsp->regcnt_status); 1678 1679 if (err) { 1680 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1681 return (-1); 1682 } 1683 return 0; 1684} 1685 1686static int 1687qla_set_rss_ind_table(qla_host_t *ha, uint32_t start_idx, uint32_t count, 1688 uint16_t cntxt_id, uint8_t *ind_table) 1689{ 1690 q80_config_rss_ind_table_t *c_rss_ind; 1691 q80_config_rss_ind_table_rsp_t *c_rss_ind_rsp; 1692 uint32_t err; 1693 device_t dev = ha->pci_dev; 1694 1695 if ((count > Q8_RSS_IND_TBL_SIZE) || 1696 ((start_idx + count - 1) > Q8_RSS_IND_TBL_MAX_IDX)) { 1697 device_printf(dev, "%s: illegal count [%d, %d]\n", __func__, 1698 start_idx, count); 1699 return (-1); 1700 } 1701 1702 c_rss_ind = (q80_config_rss_ind_table_t *)ha->hw.mbox; 1703 bzero(c_rss_ind, sizeof (q80_config_rss_ind_table_t)); 1704 1705 c_rss_ind->opcode = Q8_MBX_CONFIG_RSS_TABLE; 1706 c_rss_ind->count_version = (sizeof (q80_config_rss_ind_table_t) >> 2); 1707 c_rss_ind->count_version |= Q8_MBX_CMD_VERSION; 1708 1709 c_rss_ind->start_idx = start_idx; 1710 c_rss_ind->end_idx = start_idx + count - 1; 1711 c_rss_ind->cntxt_id = cntxt_id; 1712 bcopy(ind_table, c_rss_ind->ind_table, count); 1713 1714 if (qla_mbx_cmd(ha, (uint32_t *)c_rss_ind, 1715 (sizeof (q80_config_rss_ind_table_t) >> 2), ha->hw.mbox, 1716 (sizeof(q80_config_rss_ind_table_rsp_t) >> 2), 0)) { 1717 device_printf(dev, "%s: failed0\n", __func__); 1718 return (-1); 1719 } 1720 1721 c_rss_ind_rsp = (q80_config_rss_ind_table_rsp_t *)ha->hw.mbox; 1722 err = Q8_MBX_RSP_STATUS(c_rss_ind_rsp->regcnt_status); 1723 1724 if (err) { 1725 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1726 return (-1); 1727 } 1728 return 0; 1729} 1730 1731/* 1732 * Name: qla_config_intr_coalesce 1733 * Function: Configure Interrupt Coalescing. 1734 */ 1735static int 1736qla_config_intr_coalesce(qla_host_t *ha, uint16_t cntxt_id, int tenable, 1737 int rcv) 1738{ 1739 q80_config_intr_coalesc_t *intrc; 1740 q80_config_intr_coalesc_rsp_t *intrc_rsp; 1741 uint32_t err, i; 1742 device_t dev = ha->pci_dev; 1743 1744 intrc = (q80_config_intr_coalesc_t *)ha->hw.mbox; 1745 bzero(intrc, (sizeof (q80_config_intr_coalesc_t))); 1746 1747 intrc->opcode = Q8_MBX_CONFIG_INTR_COALESCE; 1748 intrc->count_version = (sizeof (q80_config_intr_coalesc_t) >> 2); 1749 intrc->count_version |= Q8_MBX_CMD_VERSION; 1750 1751 if (rcv) { 1752 intrc->flags = Q8_MBX_INTRC_FLAGS_RCV; 1753 intrc->max_pkts = ha->hw.rcv_intr_coalesce & 0xFFFF; 1754 intrc->max_mswait = (ha->hw.rcv_intr_coalesce >> 16) & 0xFFFF; 1755 } else { 1756 intrc->flags = Q8_MBX_INTRC_FLAGS_XMT; 1757 intrc->max_pkts = ha->hw.xmt_intr_coalesce & 0xFFFF; 1758 intrc->max_mswait = (ha->hw.xmt_intr_coalesce >> 16) & 0xFFFF; 1759 } 1760 1761 intrc->cntxt_id = cntxt_id; 1762 1763 if (tenable) { 1764 intrc->flags |= Q8_MBX_INTRC_FLAGS_PERIODIC; 1765 intrc->timer_type = Q8_MBX_INTRC_TIMER_PERIODIC; 1766 1767 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1768 intrc->sds_ring_mask |= (1 << i); 1769 } 1770 intrc->ms_timeout = 1000; 1771 } 1772 1773 if (qla_mbx_cmd(ha, (uint32_t *)intrc, 1774 (sizeof (q80_config_intr_coalesc_t) >> 2), 1775 ha->hw.mbox, (sizeof(q80_config_intr_coalesc_rsp_t) >> 2), 0)) { 1776 device_printf(dev, "%s: failed0\n", __func__); 1777 return (-1); 1778 } 1779 intrc_rsp = (q80_config_intr_coalesc_rsp_t *)ha->hw.mbox; 1780 1781 err = Q8_MBX_RSP_STATUS(intrc_rsp->regcnt_status); 1782 1783 if (err) { 1784 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1785 return (-1); 1786 } 1787 1788 return 0; 1789} 1790 1791 1792/* 1793 * Name: qla_config_mac_addr 1794 * Function: binds a MAC address to the context/interface. 1795 * Can be unicast, multicast or broadcast. 1796 */ 1797static int 1798qla_config_mac_addr(qla_host_t *ha, uint8_t *mac_addr, uint32_t add_mac, 1799 uint32_t num_mac) 1800{ 1801 q80_config_mac_addr_t *cmac; 1802 q80_config_mac_addr_rsp_t *cmac_rsp; 1803 uint32_t err; 1804 device_t dev = ha->pci_dev; 1805 int i; 1806 uint8_t *mac_cpy = mac_addr; 1807 1808 if (num_mac > Q8_MAX_MAC_ADDRS) { 1809 device_printf(dev, "%s: %s num_mac [0x%x] > Q8_MAX_MAC_ADDRS\n", 1810 __func__, (add_mac ? "Add" : "Del"), num_mac); 1811 return (-1); 1812 } 1813 1814 cmac = (q80_config_mac_addr_t *)ha->hw.mbox; 1815 bzero(cmac, (sizeof (q80_config_mac_addr_t))); 1816 1817 cmac->opcode = Q8_MBX_CONFIG_MAC_ADDR; 1818 cmac->count_version = sizeof (q80_config_mac_addr_t) >> 2; 1819 cmac->count_version |= Q8_MBX_CMD_VERSION; 1820 1821 if (add_mac) 1822 cmac->cmd = Q8_MBX_CMAC_CMD_ADD_MAC_ADDR; 1823 else 1824 cmac->cmd = Q8_MBX_CMAC_CMD_DEL_MAC_ADDR; 1825 1826 cmac->cmd |= Q8_MBX_CMAC_CMD_CAM_INGRESS; 1827 1828 cmac->nmac_entries = num_mac; 1829 cmac->cntxt_id = ha->hw.rcv_cntxt_id; 1830 1831 for (i = 0; i < num_mac; i++) { 1832 bcopy(mac_addr, cmac->mac_addr[i].addr, Q8_ETHER_ADDR_LEN); 1833 mac_addr = mac_addr + ETHER_ADDR_LEN; 1834 } 1835 1836 if (qla_mbx_cmd(ha, (uint32_t *)cmac, 1837 (sizeof (q80_config_mac_addr_t) >> 2), 1838 ha->hw.mbox, (sizeof(q80_config_mac_addr_rsp_t) >> 2), 1)) { 1839 device_printf(dev, "%s: %s failed0\n", __func__, 1840 (add_mac ? "Add" : "Del")); 1841 return (-1); 1842 } 1843 cmac_rsp = (q80_config_mac_addr_rsp_t *)ha->hw.mbox; 1844 1845 err = Q8_MBX_RSP_STATUS(cmac_rsp->regcnt_status); 1846 1847 if (err) { 1848 device_printf(dev, "%s: %s failed1 [0x%08x]\n", __func__, 1849 (add_mac ? "Add" : "Del"), err); 1850 for (i = 0; i < num_mac; i++) { 1851 device_printf(dev, "%s: %02x:%02x:%02x:%02x:%02x:%02x\n", 1852 __func__, mac_cpy[0], mac_cpy[1], mac_cpy[2], 1853 mac_cpy[3], mac_cpy[4], mac_cpy[5]); 1854 mac_cpy += ETHER_ADDR_LEN; 1855 } 1856 return (-1); 1857 } 1858 1859 return 0; 1860} 1861 1862 1863/* 1864 * Name: qla_set_mac_rcv_mode 1865 * Function: Enable/Disable AllMulticast and Promiscous Modes. 1866 */ 1867static int 1868qla_set_mac_rcv_mode(qla_host_t *ha, uint32_t mode) 1869{ 1870 q80_config_mac_rcv_mode_t *rcv_mode; 1871 uint32_t err; 1872 q80_config_mac_rcv_mode_rsp_t *rcv_mode_rsp; 1873 device_t dev = ha->pci_dev; 1874 1875 rcv_mode = (q80_config_mac_rcv_mode_t *)ha->hw.mbox; 1876 bzero(rcv_mode, (sizeof (q80_config_mac_rcv_mode_t))); 1877 1878 rcv_mode->opcode = Q8_MBX_CONFIG_MAC_RX_MODE; 1879 rcv_mode->count_version = sizeof (q80_config_mac_rcv_mode_t) >> 2; 1880 rcv_mode->count_version |= Q8_MBX_CMD_VERSION; 1881 1882 rcv_mode->mode = mode; 1883 1884 rcv_mode->cntxt_id = ha->hw.rcv_cntxt_id; 1885 1886 if (qla_mbx_cmd(ha, (uint32_t *)rcv_mode, 1887 (sizeof (q80_config_mac_rcv_mode_t) >> 2), 1888 ha->hw.mbox, (sizeof(q80_config_mac_rcv_mode_rsp_t) >> 2), 1)) { 1889 device_printf(dev, "%s: failed0\n", __func__); 1890 return (-1); 1891 } 1892 rcv_mode_rsp = (q80_config_mac_rcv_mode_rsp_t *)ha->hw.mbox; 1893 1894 err = Q8_MBX_RSP_STATUS(rcv_mode_rsp->regcnt_status); 1895 1896 if (err) { 1897 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 1898 return (-1); 1899 } 1900 1901 return 0; 1902} 1903 1904int 1905ql_set_promisc(qla_host_t *ha) 1906{ 1907 int ret; 1908 1909 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1910 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1911 return (ret); 1912} 1913 1914void 1915qla_reset_promisc(qla_host_t *ha) 1916{ 1917 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_RCV_PROMISC_ENABLE; 1918 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1919} 1920 1921int 1922ql_set_allmulti(qla_host_t *ha) 1923{ 1924 int ret; 1925 1926 ha->hw.mac_rcv_mode |= Q8_MBX_MAC_ALL_MULTI_ENABLE; 1927 ret = qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1928 return (ret); 1929} 1930 1931void 1932qla_reset_allmulti(qla_host_t *ha) 1933{ 1934 ha->hw.mac_rcv_mode &= ~Q8_MBX_MAC_ALL_MULTI_ENABLE; 1935 (void)qla_set_mac_rcv_mode(ha, ha->hw.mac_rcv_mode); 1936} 1937 1938/* 1939 * Name: ql_set_max_mtu 1940 * Function: 1941 * Sets the maximum transfer unit size for the specified rcv context. 1942 */ 1943int 1944ql_set_max_mtu(qla_host_t *ha, uint32_t mtu, uint16_t cntxt_id) 1945{ 1946 device_t dev; 1947 q80_set_max_mtu_t *max_mtu; 1948 q80_set_max_mtu_rsp_t *max_mtu_rsp; 1949 uint32_t err; 1950 1951 dev = ha->pci_dev; 1952 1953 max_mtu = (q80_set_max_mtu_t *)ha->hw.mbox; 1954 bzero(max_mtu, (sizeof (q80_set_max_mtu_t))); 1955 1956 max_mtu->opcode = Q8_MBX_SET_MAX_MTU; 1957 max_mtu->count_version = (sizeof (q80_set_max_mtu_t) >> 2); 1958 max_mtu->count_version |= Q8_MBX_CMD_VERSION; 1959 1960 max_mtu->cntxt_id = cntxt_id; 1961 max_mtu->mtu = mtu; 1962 1963 if (qla_mbx_cmd(ha, (uint32_t *)max_mtu, 1964 (sizeof (q80_set_max_mtu_t) >> 2), 1965 ha->hw.mbox, (sizeof (q80_set_max_mtu_rsp_t) >> 2), 1)) { 1966 device_printf(dev, "%s: failed\n", __func__); 1967 return -1; 1968 } 1969 1970 max_mtu_rsp = (q80_set_max_mtu_rsp_t *)ha->hw.mbox; 1971 1972 err = Q8_MBX_RSP_STATUS(max_mtu_rsp->regcnt_status); 1973 1974 if (err) { 1975 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 1976 } 1977 1978 return 0; 1979} 1980 1981static int 1982qla_link_event_req(qla_host_t *ha, uint16_t cntxt_id) 1983{ 1984 device_t dev; 1985 q80_link_event_t *lnk; 1986 q80_link_event_rsp_t *lnk_rsp; 1987 uint32_t err; 1988 1989 dev = ha->pci_dev; 1990 1991 lnk = (q80_link_event_t *)ha->hw.mbox; 1992 bzero(lnk, (sizeof (q80_link_event_t))); 1993 1994 lnk->opcode = Q8_MBX_LINK_EVENT_REQ; 1995 lnk->count_version = (sizeof (q80_link_event_t) >> 2); 1996 lnk->count_version |= Q8_MBX_CMD_VERSION; 1997 1998 lnk->cntxt_id = cntxt_id; 1999 lnk->cmd = Q8_LINK_EVENT_CMD_ENABLE_ASYNC; 2000 2001 if (qla_mbx_cmd(ha, (uint32_t *)lnk, (sizeof (q80_link_event_t) >> 2), 2002 ha->hw.mbox, (sizeof (q80_link_event_rsp_t) >> 2), 0)) { 2003 device_printf(dev, "%s: failed\n", __func__); 2004 return -1; 2005 } 2006 2007 lnk_rsp = (q80_link_event_rsp_t *)ha->hw.mbox; 2008 2009 err = Q8_MBX_RSP_STATUS(lnk_rsp->regcnt_status); 2010 2011 if (err) { 2012 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 2013 } 2014 2015 return 0; 2016} 2017 2018static int 2019qla_config_fw_lro(qla_host_t *ha, uint16_t cntxt_id) 2020{ 2021 device_t dev; 2022 q80_config_fw_lro_t *fw_lro; 2023 q80_config_fw_lro_rsp_t *fw_lro_rsp; 2024 uint32_t err; 2025 2026 dev = ha->pci_dev; 2027 2028 fw_lro = (q80_config_fw_lro_t *)ha->hw.mbox; 2029 bzero(fw_lro, sizeof(q80_config_fw_lro_t)); 2030 2031 fw_lro->opcode = Q8_MBX_CONFIG_FW_LRO; 2032 fw_lro->count_version = (sizeof (q80_config_fw_lro_t) >> 2); 2033 fw_lro->count_version |= Q8_MBX_CMD_VERSION; 2034 2035 fw_lro->flags |= Q8_MBX_FW_LRO_IPV4 | Q8_MBX_FW_LRO_IPV4_WO_DST_IP_CHK; 2036 fw_lro->flags |= Q8_MBX_FW_LRO_IPV6 | Q8_MBX_FW_LRO_IPV6_WO_DST_IP_CHK; 2037 2038 fw_lro->cntxt_id = cntxt_id; 2039 2040 if (qla_mbx_cmd(ha, (uint32_t *)fw_lro, 2041 (sizeof (q80_config_fw_lro_t) >> 2), 2042 ha->hw.mbox, (sizeof (q80_config_fw_lro_rsp_t) >> 2), 0)) { 2043 device_printf(dev, "%s: failed\n", __func__); 2044 return -1; 2045 } 2046 2047 fw_lro_rsp = (q80_config_fw_lro_rsp_t *)ha->hw.mbox; 2048 2049 err = Q8_MBX_RSP_STATUS(fw_lro_rsp->regcnt_status); 2050 2051 if (err) { 2052 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 2053 } 2054 2055 return 0; 2056} 2057 2058static int 2059qla_set_cam_search_mode(qla_host_t *ha, uint32_t search_mode) 2060{ 2061 device_t dev; 2062 q80_hw_config_t *hw_config; 2063 q80_hw_config_rsp_t *hw_config_rsp; 2064 uint32_t err; 2065 2066 dev = ha->pci_dev; 2067 2068 hw_config = (q80_hw_config_t *)ha->hw.mbox; 2069 bzero(hw_config, sizeof (q80_hw_config_t)); 2070 2071 hw_config->opcode = Q8_MBX_HW_CONFIG; 2072 hw_config->count_version = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE_COUNT; 2073 hw_config->count_version |= Q8_MBX_CMD_VERSION; 2074 2075 hw_config->cmd = Q8_HW_CONFIG_SET_CAM_SEARCH_MODE; 2076 2077 hw_config->u.set_cam_search_mode.mode = search_mode; 2078 2079 if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 2080 (sizeof (q80_hw_config_t) >> 2), 2081 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 2082 device_printf(dev, "%s: failed\n", __func__); 2083 return -1; 2084 } 2085 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 2086 2087 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 2088 2089 if (err) { 2090 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 2091 } 2092 2093 return 0; 2094} 2095 2096static int 2097qla_get_cam_search_mode(qla_host_t *ha) 2098{ 2099 device_t dev; 2100 q80_hw_config_t *hw_config; 2101 q80_hw_config_rsp_t *hw_config_rsp; 2102 uint32_t err; 2103 2104 dev = ha->pci_dev; 2105 2106 hw_config = (q80_hw_config_t *)ha->hw.mbox; 2107 bzero(hw_config, sizeof (q80_hw_config_t)); 2108 2109 hw_config->opcode = Q8_MBX_HW_CONFIG; 2110 hw_config->count_version = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE_COUNT; 2111 hw_config->count_version |= Q8_MBX_CMD_VERSION; 2112 2113 hw_config->cmd = Q8_HW_CONFIG_GET_CAM_SEARCH_MODE; 2114 2115 if (qla_mbx_cmd(ha, (uint32_t *)hw_config, 2116 (sizeof (q80_hw_config_t) >> 2), 2117 ha->hw.mbox, (sizeof (q80_hw_config_rsp_t) >> 2), 0)) { 2118 device_printf(dev, "%s: failed\n", __func__); 2119 return -1; 2120 } 2121 hw_config_rsp = (q80_hw_config_rsp_t *)ha->hw.mbox; 2122 2123 err = Q8_MBX_RSP_STATUS(hw_config_rsp->regcnt_status); 2124 2125 if (err) { 2126 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 2127 } else { 2128 device_printf(dev, "%s: cam search mode [0x%08x]\n", __func__, 2129 hw_config_rsp->u.get_cam_search_mode.mode); 2130 } 2131 2132 return 0; 2133} 2134 2135static int 2136qla_get_hw_stats(qla_host_t *ha, uint32_t cmd, uint32_t rsp_size) 2137{ 2138 device_t dev; 2139 q80_get_stats_t *stat; 2140 q80_get_stats_rsp_t *stat_rsp; 2141 uint32_t err; 2142 2143 dev = ha->pci_dev; 2144 2145 stat = (q80_get_stats_t *)ha->hw.mbox; 2146 bzero(stat, (sizeof (q80_get_stats_t))); 2147 2148 stat->opcode = Q8_MBX_GET_STATS; 2149 stat->count_version = 2; 2150 stat->count_version |= Q8_MBX_CMD_VERSION; 2151 2152 stat->cmd = cmd; 2153 2154 if (qla_mbx_cmd(ha, (uint32_t *)stat, 2, 2155 ha->hw.mbox, (rsp_size >> 2), 0)) { 2156 device_printf(dev, "%s: failed\n", __func__); 2157 return -1; 2158 } 2159 2160 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 2161 2162 err = Q8_MBX_RSP_STATUS(stat_rsp->regcnt_status); 2163 2164 if (err) { 2165 return -1; 2166 } 2167 2168 return 0; 2169} 2170 2171void 2172ql_get_stats(qla_host_t *ha) 2173{ 2174 q80_get_stats_rsp_t *stat_rsp; 2175 q80_mac_stats_t *mstat; 2176 q80_xmt_stats_t *xstat; 2177 q80_rcv_stats_t *rstat; 2178 uint32_t cmd; 2179 int i; 2180 struct ifnet *ifp = ha->ifp; 2181 2182 if (ifp == NULL) 2183 return; 2184 2185 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 0) != 0) { 2186 device_printf(ha->pci_dev, "%s: failed\n", __func__); 2187 return; 2188 } 2189 2190 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2191 QLA_UNLOCK(ha, __func__); 2192 return; 2193 } 2194 2195 stat_rsp = (q80_get_stats_rsp_t *)ha->hw.mbox; 2196 /* 2197 * Get MAC Statistics 2198 */ 2199 cmd = Q8_GET_STATS_CMD_TYPE_MAC; 2200// cmd |= Q8_GET_STATS_CMD_CLEAR; 2201 2202 cmd |= ((ha->pci_func & 0x1) << 16); 2203 2204 if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || 2205 ha->offline) 2206 goto ql_get_stats_exit; 2207 2208 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 2209 mstat = (q80_mac_stats_t *)&stat_rsp->u.mac; 2210 bcopy(mstat, &ha->hw.mac, sizeof(q80_mac_stats_t)); 2211 } else { 2212 device_printf(ha->pci_dev, "%s: mac failed [0x%08x]\n", 2213 __func__, ha->hw.mbox[0]); 2214 } 2215 /* 2216 * Get RCV Statistics 2217 */ 2218 cmd = Q8_GET_STATS_CMD_RCV | Q8_GET_STATS_CMD_TYPE_CNTXT; 2219// cmd |= Q8_GET_STATS_CMD_CLEAR; 2220 cmd |= (ha->hw.rcv_cntxt_id << 16); 2221 2222 if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || 2223 ha->offline) 2224 goto ql_get_stats_exit; 2225 2226 if (qla_get_hw_stats(ha, cmd, sizeof (q80_get_stats_rsp_t)) == 0) { 2227 rstat = (q80_rcv_stats_t *)&stat_rsp->u.rcv; 2228 bcopy(rstat, &ha->hw.rcv, sizeof(q80_rcv_stats_t)); 2229 } else { 2230 device_printf(ha->pci_dev, "%s: rcv failed [0x%08x]\n", 2231 __func__, ha->hw.mbox[0]); 2232 } 2233 2234 if (ha->qla_watchdog_pause || (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || 2235 ha->offline) 2236 goto ql_get_stats_exit; 2237 /* 2238 * Get XMT Statistics 2239 */ 2240 for (i = 0 ; (i < ha->hw.num_tx_rings); i++) { 2241 if (ha->qla_watchdog_pause || 2242 (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || 2243 ha->offline) 2244 goto ql_get_stats_exit; 2245 2246 cmd = Q8_GET_STATS_CMD_XMT | Q8_GET_STATS_CMD_TYPE_CNTXT; 2247// cmd |= Q8_GET_STATS_CMD_CLEAR; 2248 cmd |= (ha->hw.tx_cntxt[i].tx_cntxt_id << 16); 2249 2250 if (qla_get_hw_stats(ha, cmd, sizeof(q80_get_stats_rsp_t)) 2251 == 0) { 2252 xstat = (q80_xmt_stats_t *)&stat_rsp->u.xmt; 2253 bcopy(xstat, &ha->hw.xmt[i], sizeof(q80_xmt_stats_t)); 2254 } else { 2255 device_printf(ha->pci_dev, "%s: xmt failed [0x%08x]\n", 2256 __func__, ha->hw.mbox[0]); 2257 } 2258 } 2259 2260ql_get_stats_exit: 2261 QLA_UNLOCK(ha, __func__); 2262 2263 return; 2264} 2265 2266/* 2267 * Name: qla_tx_tso 2268 * Function: Checks if the packet to be transmitted is a candidate for 2269 * Large TCP Segment Offload. If yes, the appropriate fields in the Tx 2270 * Ring Structure are plugged in. 2271 */ 2272static int 2273qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) 2274{ 2275 struct ether_vlan_header *eh; 2276 struct ip *ip = NULL; 2277 struct ip6_hdr *ip6 = NULL; 2278 struct tcphdr *th = NULL; 2279 uint32_t ehdrlen, hdrlen, ip_hlen, tcp_hlen, tcp_opt_off; 2280 uint16_t etype, opcode, offload = 1; 2281 device_t dev; 2282 2283 dev = ha->pci_dev; 2284 2285 2286 eh = mtod(mp, struct ether_vlan_header *); 2287 2288 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2289 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2290 etype = ntohs(eh->evl_proto); 2291 } else { 2292 ehdrlen = ETHER_HDR_LEN; 2293 etype = ntohs(eh->evl_encap_proto); 2294 } 2295 2296 hdrlen = 0; 2297 2298 switch (etype) { 2299 case ETHERTYPE_IP: 2300 2301 tcp_opt_off = ehdrlen + sizeof(struct ip) + 2302 sizeof(struct tcphdr); 2303 2304 if (mp->m_len < tcp_opt_off) { 2305 m_copydata(mp, 0, tcp_opt_off, hdr); 2306 ip = (struct ip *)(hdr + ehdrlen); 2307 } else { 2308 ip = (struct ip *)(mp->m_data + ehdrlen); 2309 } 2310 2311 ip_hlen = ip->ip_hl << 2; 2312 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO; 2313 2314 2315 if ((ip->ip_p != IPPROTO_TCP) || 2316 (ip_hlen != sizeof (struct ip))){ 2317 /* IP Options are not supported */ 2318 2319 offload = 0; 2320 } else 2321 th = (struct tcphdr *)((caddr_t)ip + ip_hlen); 2322 2323 break; 2324 2325 case ETHERTYPE_IPV6: 2326 2327 tcp_opt_off = ehdrlen + sizeof(struct ip6_hdr) + 2328 sizeof (struct tcphdr); 2329 2330 if (mp->m_len < tcp_opt_off) { 2331 m_copydata(mp, 0, tcp_opt_off, hdr); 2332 ip6 = (struct ip6_hdr *)(hdr + ehdrlen); 2333 } else { 2334 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2335 } 2336 2337 ip_hlen = sizeof(struct ip6_hdr); 2338 opcode = Q8_TX_CMD_OP_XMT_TCP_LSO_IPV6; 2339 2340 if (ip6->ip6_nxt != IPPROTO_TCP) { 2341 //device_printf(dev, "%s: ipv6\n", __func__); 2342 offload = 0; 2343 } else 2344 th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); 2345 break; 2346 2347 default: 2348 QL_DPRINT8(ha, (dev, "%s: type!=ip\n", __func__)); 2349 offload = 0; 2350 break; 2351 } 2352 2353 if (!offload) 2354 return (-1); 2355 2356 tcp_hlen = th->th_off << 2; 2357 hdrlen = ehdrlen + ip_hlen + tcp_hlen; 2358 2359 if (mp->m_len < hdrlen) { 2360 if (mp->m_len < tcp_opt_off) { 2361 if (tcp_hlen > sizeof(struct tcphdr)) { 2362 m_copydata(mp, tcp_opt_off, 2363 (tcp_hlen - sizeof(struct tcphdr)), 2364 &hdr[tcp_opt_off]); 2365 } 2366 } else { 2367 m_copydata(mp, 0, hdrlen, hdr); 2368 } 2369 } 2370 2371 tx_cmd->mss = mp->m_pkthdr.tso_segsz; 2372 2373 tx_cmd->flags_opcode = opcode ; 2374 tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen; 2375 tx_cmd->total_hdr_len = hdrlen; 2376 2377 /* Check for Multicast least significant bit of MSB == 1 */ 2378 if (eh->evl_dhost[0] & 0x01) { 2379 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_MULTICAST; 2380 } 2381 2382 if (mp->m_len < hdrlen) { 2383 printf("%d\n", hdrlen); 2384 return (1); 2385 } 2386 2387 return (0); 2388} 2389 2390/* 2391 * Name: qla_tx_chksum 2392 * Function: Checks if the packet to be transmitted is a candidate for 2393 * TCP/UDP Checksum offload. If yes, the appropriate fields in the Tx 2394 * Ring Structure are plugged in. 2395 */ 2396static int 2397qla_tx_chksum(qla_host_t *ha, struct mbuf *mp, uint32_t *op_code, 2398 uint32_t *tcp_hdr_off) 2399{ 2400 struct ether_vlan_header *eh; 2401 struct ip *ip; 2402 struct ip6_hdr *ip6; 2403 uint32_t ehdrlen, ip_hlen; 2404 uint16_t etype, opcode, offload = 1; 2405 device_t dev; 2406 uint8_t buf[sizeof(struct ip6_hdr)]; 2407 2408 dev = ha->pci_dev; 2409 2410 *op_code = 0; 2411 2412 if ((mp->m_pkthdr.csum_flags & 2413 (CSUM_TCP|CSUM_UDP|CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) == 0) 2414 return (-1); 2415 2416 eh = mtod(mp, struct ether_vlan_header *); 2417 2418 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2419 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2420 etype = ntohs(eh->evl_proto); 2421 } else { 2422 ehdrlen = ETHER_HDR_LEN; 2423 etype = ntohs(eh->evl_encap_proto); 2424 } 2425 2426 2427 switch (etype) { 2428 case ETHERTYPE_IP: 2429 ip = (struct ip *)(mp->m_data + ehdrlen); 2430 2431 ip_hlen = sizeof (struct ip); 2432 2433 if (mp->m_len < (ehdrlen + ip_hlen)) { 2434 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 2435 ip = (struct ip *)buf; 2436 } 2437 2438 if (ip->ip_p == IPPROTO_TCP) 2439 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM; 2440 else if (ip->ip_p == IPPROTO_UDP) 2441 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM; 2442 else { 2443 //device_printf(dev, "%s: ipv4\n", __func__); 2444 offload = 0; 2445 } 2446 break; 2447 2448 case ETHERTYPE_IPV6: 2449 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2450 2451 ip_hlen = sizeof(struct ip6_hdr); 2452 2453 if (mp->m_len < (ehdrlen + ip_hlen)) { 2454 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 2455 buf); 2456 ip6 = (struct ip6_hdr *)buf; 2457 } 2458 2459 if (ip6->ip6_nxt == IPPROTO_TCP) 2460 opcode = Q8_TX_CMD_OP_XMT_TCP_CHKSUM_IPV6; 2461 else if (ip6->ip6_nxt == IPPROTO_UDP) 2462 opcode = Q8_TX_CMD_OP_XMT_UDP_CHKSUM_IPV6; 2463 else { 2464 //device_printf(dev, "%s: ipv6\n", __func__); 2465 offload = 0; 2466 } 2467 break; 2468 2469 default: 2470 offload = 0; 2471 break; 2472 } 2473 if (!offload) 2474 return (-1); 2475 2476 *op_code = opcode; 2477 *tcp_hdr_off = (ip_hlen + ehdrlen); 2478 2479 return (0); 2480} 2481 2482#define QLA_TX_MIN_FREE 2 2483/* 2484 * Name: ql_hw_send 2485 * Function: Transmits a packet. It first checks if the packet is a 2486 * candidate for Large TCP Segment Offload and then for UDP/TCP checksum 2487 * offload. If either of these creteria are not met, it is transmitted 2488 * as a regular ethernet frame. 2489 */ 2490int 2491ql_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs, 2492 uint32_t tx_idx, struct mbuf *mp, uint32_t txr_idx, uint32_t iscsi_pdu) 2493{ 2494 struct ether_vlan_header *eh; 2495 qla_hw_t *hw = &ha->hw; 2496 q80_tx_cmd_t *tx_cmd, tso_cmd; 2497 bus_dma_segment_t *c_seg; 2498 uint32_t num_tx_cmds, hdr_len = 0; 2499 uint32_t total_length = 0, bytes, tx_cmd_count = 0, txr_next; 2500 device_t dev; 2501 int i, ret; 2502 uint8_t *src = NULL, *dst = NULL; 2503 uint8_t frame_hdr[QL_FRAME_HDR_SIZE]; 2504 uint32_t op_code = 0; 2505 uint32_t tcp_hdr_off = 0; 2506 2507 dev = ha->pci_dev; 2508 2509 /* 2510 * Always make sure there is atleast one empty slot in the tx_ring 2511 * tx_ring is considered full when there only one entry available 2512 */ 2513 num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2; 2514 2515 total_length = mp->m_pkthdr.len; 2516 if (total_length > QLA_MAX_TSO_FRAME_SIZE) { 2517 device_printf(dev, "%s: total length exceeds maxlen(%d)\n", 2518 __func__, total_length); 2519 return (EINVAL); 2520 } 2521 eh = mtod(mp, struct ether_vlan_header *); 2522 2523 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2524 2525 bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t)); 2526 2527 src = frame_hdr; 2528 ret = qla_tx_tso(ha, mp, &tso_cmd, src); 2529 2530 if (!(ret & ~1)) { 2531 /* find the additional tx_cmd descriptors required */ 2532 2533 if (mp->m_flags & M_VLANTAG) 2534 tso_cmd.total_hdr_len += ETHER_VLAN_ENCAP_LEN; 2535 2536 hdr_len = tso_cmd.total_hdr_len; 2537 2538 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2539 bytes = QL_MIN(bytes, hdr_len); 2540 2541 num_tx_cmds++; 2542 hdr_len -= bytes; 2543 2544 while (hdr_len) { 2545 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2546 hdr_len -= bytes; 2547 num_tx_cmds++; 2548 } 2549 hdr_len = tso_cmd.total_hdr_len; 2550 2551 if (ret == 0) 2552 src = (uint8_t *)eh; 2553 } else 2554 return (EINVAL); 2555 } else { 2556 (void)qla_tx_chksum(ha, mp, &op_code, &tcp_hdr_off); 2557 } 2558 2559 if (hw->tx_cntxt[txr_idx].txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) { 2560 ql_hw_tx_done_locked(ha, txr_idx); 2561 if (hw->tx_cntxt[txr_idx].txr_free <= 2562 (num_tx_cmds + QLA_TX_MIN_FREE)) { 2563 QL_DPRINT8(ha, (dev, "%s: (hw->txr_free <= " 2564 "(num_tx_cmds + QLA_TX_MIN_FREE))\n", 2565 __func__)); 2566 return (-1); 2567 } 2568 } 2569 2570 for (i = 0; i < num_tx_cmds; i++) { 2571 int j; 2572 2573 j = (tx_idx+i) & (NUM_TX_DESCRIPTORS - 1); 2574 2575 if (NULL != ha->tx_ring[txr_idx].tx_buf[j].m_head) { 2576 QL_ASSERT(ha, 0, \ 2577 ("%s [%d]: txr_idx = %d tx_idx = %d mbuf = %p\n",\ 2578 __func__, __LINE__, txr_idx, j,\ 2579 ha->tx_ring[txr_idx].tx_buf[j].m_head)); 2580 return (EINVAL); 2581 } 2582 } 2583 2584 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[tx_idx]; 2585 2586 if (!(mp->m_pkthdr.csum_flags & CSUM_TSO)) { 2587 2588 if (nsegs > ha->hw.max_tx_segs) 2589 ha->hw.max_tx_segs = nsegs; 2590 2591 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2592 2593 if (op_code) { 2594 tx_cmd->flags_opcode = op_code; 2595 tx_cmd->tcp_hdr_off = tcp_hdr_off; 2596 2597 } else { 2598 tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER; 2599 } 2600 } else { 2601 bcopy(&tso_cmd, tx_cmd, sizeof(q80_tx_cmd_t)); 2602 ha->tx_tso_frames++; 2603 } 2604 2605 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2606 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_VLAN_TAGGED; 2607 2608 if (iscsi_pdu) 2609 eh->evl_tag |= ha->hw.user_pri_iscsi << 13; 2610 2611 } else if (mp->m_flags & M_VLANTAG) { 2612 2613 if (hdr_len) { /* TSO */ 2614 tx_cmd->flags_opcode |= (Q8_TX_CMD_FLAGS_VLAN_TAGGED | 2615 Q8_TX_CMD_FLAGS_HW_VLAN_ID); 2616 tx_cmd->tcp_hdr_off += ETHER_VLAN_ENCAP_LEN; 2617 } else 2618 tx_cmd->flags_opcode |= Q8_TX_CMD_FLAGS_HW_VLAN_ID; 2619 2620 ha->hw_vlan_tx_frames++; 2621 tx_cmd->vlan_tci = mp->m_pkthdr.ether_vtag; 2622 2623 if (iscsi_pdu) { 2624 tx_cmd->vlan_tci |= ha->hw.user_pri_iscsi << 13; 2625 mp->m_pkthdr.ether_vtag = tx_cmd->vlan_tci; 2626 } 2627 } 2628 2629 2630 tx_cmd->n_bufs = (uint8_t)nsegs; 2631 tx_cmd->data_len_lo = (uint8_t)(total_length & 0xFF); 2632 tx_cmd->data_len_hi = qla_host_to_le16(((uint16_t)(total_length >> 8))); 2633 tx_cmd->cntxtid = Q8_TX_CMD_PORT_CNXTID(ha->pci_func); 2634 2635 c_seg = segs; 2636 2637 while (1) { 2638 for (i = 0; ((i < Q8_TX_CMD_MAX_SEGMENTS) && nsegs); i++) { 2639 2640 switch (i) { 2641 case 0: 2642 tx_cmd->buf1_addr = c_seg->ds_addr; 2643 tx_cmd->buf1_len = c_seg->ds_len; 2644 break; 2645 2646 case 1: 2647 tx_cmd->buf2_addr = c_seg->ds_addr; 2648 tx_cmd->buf2_len = c_seg->ds_len; 2649 break; 2650 2651 case 2: 2652 tx_cmd->buf3_addr = c_seg->ds_addr; 2653 tx_cmd->buf3_len = c_seg->ds_len; 2654 break; 2655 2656 case 3: 2657 tx_cmd->buf4_addr = c_seg->ds_addr; 2658 tx_cmd->buf4_len = c_seg->ds_len; 2659 break; 2660 } 2661 2662 c_seg++; 2663 nsegs--; 2664 } 2665 2666 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2667 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2668 (NUM_TX_DESCRIPTORS - 1); 2669 tx_cmd_count++; 2670 2671 if (!nsegs) 2672 break; 2673 2674 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2675 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2676 } 2677 2678 if (mp->m_pkthdr.csum_flags & CSUM_TSO) { 2679 2680 /* TSO : Copy the header in the following tx cmd descriptors */ 2681 2682 txr_next = hw->tx_cntxt[txr_idx].txr_next; 2683 2684 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2685 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2686 2687 bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN; 2688 bytes = QL_MIN(bytes, hdr_len); 2689 2690 dst = (uint8_t *)tx_cmd + Q8_TX_CMD_TSO_ALIGN; 2691 2692 if (mp->m_flags & M_VLANTAG) { 2693 /* first copy the src/dst MAC addresses */ 2694 bcopy(src, dst, (ETHER_ADDR_LEN * 2)); 2695 dst += (ETHER_ADDR_LEN * 2); 2696 src += (ETHER_ADDR_LEN * 2); 2697 2698 *((uint16_t *)dst) = htons(ETHERTYPE_VLAN); 2699 dst += 2; 2700 *((uint16_t *)dst) = htons(mp->m_pkthdr.ether_vtag); 2701 dst += 2; 2702 2703 /* bytes left in src header */ 2704 hdr_len -= ((ETHER_ADDR_LEN * 2) + 2705 ETHER_VLAN_ENCAP_LEN); 2706 2707 /* bytes left in TxCmd Entry */ 2708 bytes -= ((ETHER_ADDR_LEN * 2) + ETHER_VLAN_ENCAP_LEN); 2709 2710 2711 bcopy(src, dst, bytes); 2712 src += bytes; 2713 hdr_len -= bytes; 2714 } else { 2715 bcopy(src, dst, bytes); 2716 src += bytes; 2717 hdr_len -= bytes; 2718 } 2719 2720 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2721 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2722 (NUM_TX_DESCRIPTORS - 1); 2723 tx_cmd_count++; 2724 2725 while (hdr_len) { 2726 tx_cmd = &hw->tx_cntxt[txr_idx].tx_ring_base[txr_next]; 2727 bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); 2728 2729 bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len); 2730 2731 bcopy(src, tx_cmd, bytes); 2732 src += bytes; 2733 hdr_len -= bytes; 2734 2735 txr_next = hw->tx_cntxt[txr_idx].txr_next = 2736 (hw->tx_cntxt[txr_idx].txr_next + 1) & 2737 (NUM_TX_DESCRIPTORS - 1); 2738 tx_cmd_count++; 2739 } 2740 } 2741 2742 hw->tx_cntxt[txr_idx].txr_free = 2743 hw->tx_cntxt[txr_idx].txr_free - tx_cmd_count; 2744 2745 QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->tx_cntxt[txr_idx].txr_next,\ 2746 txr_idx); 2747 QL_DPRINT8(ha, (dev, "%s: return\n", __func__)); 2748 2749 return (0); 2750} 2751 2752 2753 2754#define Q8_CONFIG_IND_TBL_SIZE 32 /* < Q8_RSS_IND_TBL_SIZE and power of 2 */ 2755static int 2756qla_config_rss_ind_table(qla_host_t *ha) 2757{ 2758 uint32_t i, count; 2759 uint8_t rss_ind_tbl[Q8_CONFIG_IND_TBL_SIZE]; 2760 2761 2762 for (i = 0; i < Q8_CONFIG_IND_TBL_SIZE; i++) { 2763 rss_ind_tbl[i] = i % ha->hw.num_sds_rings; 2764 } 2765 2766 for (i = 0; i <= Q8_RSS_IND_TBL_MAX_IDX ; 2767 i = i + Q8_CONFIG_IND_TBL_SIZE) { 2768 2769 if ((i + Q8_CONFIG_IND_TBL_SIZE) > Q8_RSS_IND_TBL_MAX_IDX) { 2770 count = Q8_RSS_IND_TBL_MAX_IDX - i + 1; 2771 } else { 2772 count = Q8_CONFIG_IND_TBL_SIZE; 2773 } 2774 2775 if (qla_set_rss_ind_table(ha, i, count, ha->hw.rcv_cntxt_id, 2776 rss_ind_tbl)) 2777 return (-1); 2778 } 2779 2780 return (0); 2781} 2782 2783static int 2784qla_config_soft_lro(qla_host_t *ha) 2785{ 2786 int i; 2787 qla_hw_t *hw = &ha->hw; 2788 struct lro_ctrl *lro; 2789 2790 for (i = 0; i < hw->num_sds_rings; i++) { 2791 lro = &hw->sds[i].lro; 2792 2793 bzero(lro, sizeof(struct lro_ctrl)); 2794 2795#if (__FreeBSD_version >= 1100101) 2796 if (tcp_lro_init_args(lro, ha->ifp, 0, NUM_RX_DESCRIPTORS)) { 2797 device_printf(ha->pci_dev, 2798 "%s: tcp_lro_init_args [%d] failed\n", 2799 __func__, i); 2800 return (-1); 2801 } 2802#else 2803 if (tcp_lro_init(lro)) { 2804 device_printf(ha->pci_dev, 2805 "%s: tcp_lro_init [%d] failed\n", 2806 __func__, i); 2807 return (-1); 2808 } 2809#endif /* #if (__FreeBSD_version >= 1100101) */ 2810 2811 lro->ifp = ha->ifp; 2812 } 2813 2814 QL_DPRINT2(ha, (ha->pci_dev, "%s: LRO initialized\n", __func__)); 2815 return (0); 2816} 2817 2818static void 2819qla_drain_soft_lro(qla_host_t *ha) 2820{ 2821 int i; 2822 qla_hw_t *hw = &ha->hw; 2823 struct lro_ctrl *lro; 2824 2825 for (i = 0; i < hw->num_sds_rings; i++) { 2826 lro = &hw->sds[i].lro; 2827 2828#if (__FreeBSD_version >= 1100101) 2829 tcp_lro_flush_all(lro); 2830#else 2831 struct lro_entry *queued; 2832 2833 while ((!SLIST_EMPTY(&lro->lro_active))) { 2834 queued = SLIST_FIRST(&lro->lro_active); 2835 SLIST_REMOVE_HEAD(&lro->lro_active, next); 2836 tcp_lro_flush(lro, queued); 2837 } 2838#endif /* #if (__FreeBSD_version >= 1100101) */ 2839 } 2840 2841 return; 2842} 2843 2844static void 2845qla_free_soft_lro(qla_host_t *ha) 2846{ 2847 int i; 2848 qla_hw_t *hw = &ha->hw; 2849 struct lro_ctrl *lro; 2850 2851 for (i = 0; i < hw->num_sds_rings; i++) { 2852 lro = &hw->sds[i].lro; 2853 tcp_lro_free(lro); 2854 } 2855 2856 return; 2857} 2858 2859 2860/* 2861 * Name: ql_del_hw_if 2862 * Function: Destroys the hardware specific entities corresponding to an 2863 * Ethernet Interface 2864 */ 2865void 2866ql_del_hw_if(qla_host_t *ha) 2867{ 2868 uint32_t i; 2869 uint32_t num_msix; 2870 2871 (void)qla_stop_nic_func(ha); 2872 2873 qla_del_rcv_cntxt(ha); 2874 2875 if(qla_del_xmt_cntxt(ha)) 2876 goto ql_del_hw_if_exit; 2877 2878 if (ha->hw.flags.init_intr_cnxt) { 2879 for (i = 0; i < ha->hw.num_sds_rings; ) { 2880 2881 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2882 num_msix = Q8_MAX_INTR_VECTORS; 2883 else 2884 num_msix = ha->hw.num_sds_rings - i; 2885 2886 if (qla_config_intr_cntxt(ha, i, num_msix, 0)) 2887 break; 2888 2889 i += num_msix; 2890 } 2891 2892 ha->hw.flags.init_intr_cnxt = 0; 2893 } 2894 2895ql_del_hw_if_exit: 2896 if (ha->hw.enable_soft_lro) { 2897 qla_drain_soft_lro(ha); 2898 qla_free_soft_lro(ha); 2899 } 2900 2901 return; 2902} 2903 2904void 2905qla_confirm_9kb_enable(qla_host_t *ha) 2906{ 2907 uint32_t supports_9kb = 0; 2908 2909 ha->hw.mbx_intr_mask_offset = READ_REG32(ha, Q8_MBOX_INT_MASK_MSIX); 2910 2911 /* Use MSI-X vector 0; Enable Firmware Mailbox Interrupt */ 2912 WRITE_REG32(ha, Q8_MBOX_INT_ENABLE, BIT_2); 2913 WRITE_REG32(ha, ha->hw.mbx_intr_mask_offset, 0x0); 2914 2915 qla_get_nic_partition(ha, &supports_9kb, NULL); 2916 2917 if (!supports_9kb) 2918 ha->hw.enable_9kb = 0; 2919 2920 return; 2921} 2922 2923/* 2924 * Name: ql_init_hw_if 2925 * Function: Creates the hardware specific entities corresponding to an 2926 * Ethernet Interface - Transmit and Receive Contexts. Sets the MAC Address 2927 * corresponding to the interface. Enables LRO if allowed. 2928 */ 2929int 2930ql_init_hw_if(qla_host_t *ha) 2931{ 2932 device_t dev; 2933 uint32_t i; 2934 uint8_t bcast_mac[6]; 2935 qla_rdesc_t *rdesc; 2936 uint32_t num_msix; 2937 2938 dev = ha->pci_dev; 2939 2940 for (i = 0; i < ha->hw.num_sds_rings; i++) { 2941 bzero(ha->hw.dma_buf.sds_ring[i].dma_b, 2942 ha->hw.dma_buf.sds_ring[i].size); 2943 } 2944 2945 for (i = 0; i < ha->hw.num_sds_rings; ) { 2946 2947 if ((i + Q8_MAX_INTR_VECTORS) < ha->hw.num_sds_rings) 2948 num_msix = Q8_MAX_INTR_VECTORS; 2949 else 2950 num_msix = ha->hw.num_sds_rings - i; 2951 2952 if (qla_config_intr_cntxt(ha, i, num_msix, 1)) { 2953 2954 if (i > 0) { 2955 2956 num_msix = i; 2957 2958 for (i = 0; i < num_msix; ) { 2959 qla_config_intr_cntxt(ha, i, 2960 Q8_MAX_INTR_VECTORS, 0); 2961 i += Q8_MAX_INTR_VECTORS; 2962 } 2963 } 2964 return (-1); 2965 } 2966 2967 i = i + num_msix; 2968 } 2969 2970 ha->hw.flags.init_intr_cnxt = 1; 2971 2972 /* 2973 * Create Receive Context 2974 */ 2975 if (qla_init_rcv_cntxt(ha)) { 2976 return (-1); 2977 } 2978 2979 for (i = 0; i < ha->hw.num_rds_rings; i++) { 2980 rdesc = &ha->hw.rds[i]; 2981 rdesc->rx_next = NUM_RX_DESCRIPTORS - 2; 2982 rdesc->rx_in = 0; 2983 /* Update the RDS Producer Indices */ 2984 QL_UPDATE_RDS_PRODUCER_INDEX(ha, rdesc->prod_std,\ 2985 rdesc->rx_next); 2986 } 2987 2988 /* 2989 * Create Transmit Context 2990 */ 2991 if (qla_init_xmt_cntxt(ha)) { 2992 qla_del_rcv_cntxt(ha); 2993 return (-1); 2994 } 2995 ha->hw.max_tx_segs = 0; 2996 2997 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 1, 1)) 2998 return(-1); 2999 3000 ha->hw.flags.unicast_mac = 1; 3001 3002 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 3003 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 3004 3005 if (qla_config_mac_addr(ha, bcast_mac, 1, 1)) 3006 return (-1); 3007 3008 ha->hw.flags.bcast_mac = 1; 3009 3010 /* 3011 * program any cached multicast addresses 3012 */ 3013 if (qla_hw_add_all_mcast(ha)) 3014 return (-1); 3015 3016 if (ql_set_max_mtu(ha, ha->max_frame_size, ha->hw.rcv_cntxt_id)) 3017 return (-1); 3018 3019 if (qla_config_rss(ha, ha->hw.rcv_cntxt_id)) 3020 return (-1); 3021 3022 if (qla_config_rss_ind_table(ha)) 3023 return (-1); 3024 3025 if (qla_config_intr_coalesce(ha, ha->hw.rcv_cntxt_id, 0, 1)) 3026 return (-1); 3027 3028 if (qla_link_event_req(ha, ha->hw.rcv_cntxt_id)) 3029 return (-1); 3030 3031 if (ha->ifp->if_capenable & IFCAP_LRO) { 3032 if (ha->hw.enable_hw_lro) { 3033 ha->hw.enable_soft_lro = 0; 3034 3035 if (qla_config_fw_lro(ha, ha->hw.rcv_cntxt_id)) 3036 return (-1); 3037 } else { 3038 ha->hw.enable_soft_lro = 1; 3039 3040 if (qla_config_soft_lro(ha)) 3041 return (-1); 3042 } 3043 } 3044 3045 if (qla_init_nic_func(ha)) 3046 return (-1); 3047 3048 if (qla_query_fw_dcbx_caps(ha)) 3049 return (-1); 3050 3051 for (i = 0; i < ha->hw.num_sds_rings; i++) 3052 QL_ENABLE_INTERRUPTS(ha, i); 3053 3054 return (0); 3055} 3056 3057static int 3058qla_map_sds_to_rds(qla_host_t *ha, uint32_t start_idx, uint32_t num_idx) 3059{ 3060 device_t dev = ha->pci_dev; 3061 q80_rq_map_sds_to_rds_t *map_rings; 3062 q80_rsp_map_sds_to_rds_t *map_rings_rsp; 3063 uint32_t i, err; 3064 qla_hw_t *hw = &ha->hw; 3065 3066 map_rings = (q80_rq_map_sds_to_rds_t *)ha->hw.mbox; 3067 bzero(map_rings, sizeof(q80_rq_map_sds_to_rds_t)); 3068 3069 map_rings->opcode = Q8_MBX_MAP_SDS_TO_RDS; 3070 map_rings->count_version = (sizeof (q80_rq_map_sds_to_rds_t) >> 2); 3071 map_rings->count_version |= Q8_MBX_CMD_VERSION; 3072 3073 map_rings->cntxt_id = hw->rcv_cntxt_id; 3074 map_rings->num_rings = num_idx; 3075 3076 for (i = 0; i < num_idx; i++) { 3077 map_rings->sds_rds[i].sds_ring = i + start_idx; 3078 map_rings->sds_rds[i].rds_ring = i + start_idx; 3079 } 3080 3081 if (qla_mbx_cmd(ha, (uint32_t *)map_rings, 3082 (sizeof (q80_rq_map_sds_to_rds_t) >> 2), 3083 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 3084 device_printf(dev, "%s: failed0\n", __func__); 3085 return (-1); 3086 } 3087 3088 map_rings_rsp = (q80_rsp_map_sds_to_rds_t *)ha->hw.mbox; 3089 3090 err = Q8_MBX_RSP_STATUS(map_rings_rsp->regcnt_status); 3091 3092 if (err) { 3093 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 3094 return (-1); 3095 } 3096 3097 return (0); 3098} 3099 3100/* 3101 * Name: qla_init_rcv_cntxt 3102 * Function: Creates the Receive Context. 3103 */ 3104static int 3105qla_init_rcv_cntxt(qla_host_t *ha) 3106{ 3107 q80_rq_rcv_cntxt_t *rcntxt; 3108 q80_rsp_rcv_cntxt_t *rcntxt_rsp; 3109 q80_stat_desc_t *sdesc; 3110 int i, j; 3111 qla_hw_t *hw = &ha->hw; 3112 device_t dev; 3113 uint32_t err; 3114 uint32_t rcntxt_sds_rings; 3115 uint32_t rcntxt_rds_rings; 3116 uint32_t max_idx; 3117 3118 dev = ha->pci_dev; 3119 3120 /* 3121 * Create Receive Context 3122 */ 3123 3124 for (i = 0; i < hw->num_sds_rings; i++) { 3125 sdesc = (q80_stat_desc_t *)&hw->sds[i].sds_ring_base[0]; 3126 3127 for (j = 0; j < NUM_STATUS_DESCRIPTORS; j++) { 3128 sdesc->data[0] = 1ULL; 3129 sdesc->data[1] = 1ULL; 3130 } 3131 } 3132 3133 rcntxt_sds_rings = hw->num_sds_rings; 3134 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) 3135 rcntxt_sds_rings = MAX_RCNTXT_SDS_RINGS; 3136 3137 rcntxt_rds_rings = hw->num_rds_rings; 3138 3139 if (hw->num_rds_rings > MAX_RDS_RING_SETS) 3140 rcntxt_rds_rings = MAX_RDS_RING_SETS; 3141 3142 rcntxt = (q80_rq_rcv_cntxt_t *)ha->hw.mbox; 3143 bzero(rcntxt, (sizeof (q80_rq_rcv_cntxt_t))); 3144 3145 rcntxt->opcode = Q8_MBX_CREATE_RX_CNTXT; 3146 rcntxt->count_version = (sizeof (q80_rq_rcv_cntxt_t) >> 2); 3147 rcntxt->count_version |= Q8_MBX_CMD_VERSION; 3148 3149 rcntxt->cap0 = Q8_RCV_CNTXT_CAP0_BASEFW | 3150 Q8_RCV_CNTXT_CAP0_LRO | 3151 Q8_RCV_CNTXT_CAP0_HW_LRO | 3152 Q8_RCV_CNTXT_CAP0_RSS | 3153 Q8_RCV_CNTXT_CAP0_SGL_LRO; 3154 3155 if (ha->hw.enable_9kb) 3156 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SINGLE_JUMBO; 3157 else 3158 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_SGL_JUMBO; 3159 3160 if (ha->hw.num_rds_rings > 1) { 3161 rcntxt->nrds_sets_rings = rcntxt_rds_rings | (1 << 5); 3162 rcntxt->cap0 |= Q8_RCV_CNTXT_CAP0_MULTI_RDS; 3163 } else 3164 rcntxt->nrds_sets_rings = 0x1 | (1 << 5); 3165 3166 rcntxt->nsds_rings = rcntxt_sds_rings; 3167 3168 rcntxt->rds_producer_mode = Q8_RCV_CNTXT_RDS_PROD_MODE_UNIQUE; 3169 3170 rcntxt->rcv_vpid = 0; 3171 3172 for (i = 0; i < rcntxt_sds_rings; i++) { 3173 rcntxt->sds[i].paddr = 3174 qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr); 3175 rcntxt->sds[i].size = 3176 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 3177 rcntxt->sds[i].intr_id = qla_host_to_le16(hw->intr_id[i]); 3178 rcntxt->sds[i].intr_src_bit = qla_host_to_le16(0); 3179 } 3180 3181 for (i = 0; i < rcntxt_rds_rings; i++) { 3182 rcntxt->rds[i].paddr_std = 3183 qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr); 3184 3185 if (ha->hw.enable_9kb) 3186 rcntxt->rds[i].std_bsize = 3187 qla_host_to_le64(MJUM9BYTES); 3188 else 3189 rcntxt->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 3190 3191 rcntxt->rds[i].std_nentries = 3192 qla_host_to_le32(NUM_RX_DESCRIPTORS); 3193 } 3194 3195 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 3196 (sizeof (q80_rq_rcv_cntxt_t) >> 2), 3197 ha->hw.mbox, (sizeof(q80_rsp_rcv_cntxt_t) >> 2), 0)) { 3198 device_printf(dev, "%s: failed0\n", __func__); 3199 return (-1); 3200 } 3201 3202 rcntxt_rsp = (q80_rsp_rcv_cntxt_t *)ha->hw.mbox; 3203 3204 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 3205 3206 if (err) { 3207 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 3208 return (-1); 3209 } 3210 3211 for (i = 0; i < rcntxt_sds_rings; i++) { 3212 hw->sds[i].sds_consumer = rcntxt_rsp->sds_cons[i]; 3213 } 3214 3215 for (i = 0; i < rcntxt_rds_rings; i++) { 3216 hw->rds[i].prod_std = rcntxt_rsp->rds[i].prod_std; 3217 } 3218 3219 hw->rcv_cntxt_id = rcntxt_rsp->cntxt_id; 3220 3221 ha->hw.flags.init_rx_cnxt = 1; 3222 3223 if (hw->num_sds_rings > MAX_RCNTXT_SDS_RINGS) { 3224 3225 for (i = MAX_RCNTXT_SDS_RINGS; i < hw->num_sds_rings;) { 3226 3227 if ((i + MAX_RCNTXT_SDS_RINGS) < hw->num_sds_rings) 3228 max_idx = MAX_RCNTXT_SDS_RINGS; 3229 else 3230 max_idx = hw->num_sds_rings - i; 3231 3232 err = qla_add_rcv_rings(ha, i, max_idx); 3233 if (err) 3234 return -1; 3235 3236 i += max_idx; 3237 } 3238 } 3239 3240 if (hw->num_rds_rings > 1) { 3241 3242 for (i = 0; i < hw->num_rds_rings; ) { 3243 3244 if ((i + MAX_SDS_TO_RDS_MAP) < hw->num_rds_rings) 3245 max_idx = MAX_SDS_TO_RDS_MAP; 3246 else 3247 max_idx = hw->num_rds_rings - i; 3248 3249 err = qla_map_sds_to_rds(ha, i, max_idx); 3250 if (err) 3251 return -1; 3252 3253 i += max_idx; 3254 } 3255 } 3256 3257 return (0); 3258} 3259 3260static int 3261qla_add_rcv_rings(qla_host_t *ha, uint32_t sds_idx, uint32_t nsds) 3262{ 3263 device_t dev = ha->pci_dev; 3264 q80_rq_add_rcv_rings_t *add_rcv; 3265 q80_rsp_add_rcv_rings_t *add_rcv_rsp; 3266 uint32_t i,j, err; 3267 qla_hw_t *hw = &ha->hw; 3268 3269 add_rcv = (q80_rq_add_rcv_rings_t *)ha->hw.mbox; 3270 bzero(add_rcv, sizeof (q80_rq_add_rcv_rings_t)); 3271 3272 add_rcv->opcode = Q8_MBX_ADD_RX_RINGS; 3273 add_rcv->count_version = (sizeof (q80_rq_add_rcv_rings_t) >> 2); 3274 add_rcv->count_version |= Q8_MBX_CMD_VERSION; 3275 3276 add_rcv->nrds_sets_rings = nsds | (1 << 5); 3277 add_rcv->nsds_rings = nsds; 3278 add_rcv->cntxt_id = hw->rcv_cntxt_id; 3279 3280 for (i = 0; i < nsds; i++) { 3281 3282 j = i + sds_idx; 3283 3284 add_rcv->sds[i].paddr = 3285 qla_host_to_le64(hw->dma_buf.sds_ring[j].dma_addr); 3286 3287 add_rcv->sds[i].size = 3288 qla_host_to_le32(NUM_STATUS_DESCRIPTORS); 3289 3290 add_rcv->sds[i].intr_id = qla_host_to_le16(hw->intr_id[j]); 3291 add_rcv->sds[i].intr_src_bit = qla_host_to_le16(0); 3292 3293 } 3294 3295 for (i = 0; (i < nsds); i++) { 3296 j = i + sds_idx; 3297 3298 add_rcv->rds[i].paddr_std = 3299 qla_host_to_le64(hw->dma_buf.rds_ring[j].dma_addr); 3300 3301 if (ha->hw.enable_9kb) 3302 add_rcv->rds[i].std_bsize = 3303 qla_host_to_le64(MJUM9BYTES); 3304 else 3305 add_rcv->rds[i].std_bsize = qla_host_to_le64(MCLBYTES); 3306 3307 add_rcv->rds[i].std_nentries = 3308 qla_host_to_le32(NUM_RX_DESCRIPTORS); 3309 } 3310 3311 3312 if (qla_mbx_cmd(ha, (uint32_t *)add_rcv, 3313 (sizeof (q80_rq_add_rcv_rings_t) >> 2), 3314 ha->hw.mbox, (sizeof(q80_rsp_add_rcv_rings_t) >> 2), 0)) { 3315 device_printf(dev, "%s: failed0\n", __func__); 3316 return (-1); 3317 } 3318 3319 add_rcv_rsp = (q80_rsp_add_rcv_rings_t *)ha->hw.mbox; 3320 3321 err = Q8_MBX_RSP_STATUS(add_rcv_rsp->regcnt_status); 3322 3323 if (err) { 3324 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 3325 return (-1); 3326 } 3327 3328 for (i = 0; i < nsds; i++) { 3329 hw->sds[(i + sds_idx)].sds_consumer = add_rcv_rsp->sds_cons[i]; 3330 } 3331 3332 for (i = 0; i < nsds; i++) { 3333 hw->rds[(i + sds_idx)].prod_std = add_rcv_rsp->rds[i].prod_std; 3334 } 3335 3336 return (0); 3337} 3338 3339/* 3340 * Name: qla_del_rcv_cntxt 3341 * Function: Destroys the Receive Context. 3342 */ 3343static void 3344qla_del_rcv_cntxt(qla_host_t *ha) 3345{ 3346 device_t dev = ha->pci_dev; 3347 q80_rcv_cntxt_destroy_t *rcntxt; 3348 q80_rcv_cntxt_destroy_rsp_t *rcntxt_rsp; 3349 uint32_t err; 3350 uint8_t bcast_mac[6]; 3351 3352 if (!ha->hw.flags.init_rx_cnxt) 3353 return; 3354 3355 if (qla_hw_del_all_mcast(ha)) 3356 return; 3357 3358 if (ha->hw.flags.bcast_mac) { 3359 3360 bcast_mac[0] = 0xFF; bcast_mac[1] = 0xFF; bcast_mac[2] = 0xFF; 3361 bcast_mac[3] = 0xFF; bcast_mac[4] = 0xFF; bcast_mac[5] = 0xFF; 3362 3363 if (qla_config_mac_addr(ha, bcast_mac, 0, 1)) 3364 return; 3365 ha->hw.flags.bcast_mac = 0; 3366 3367 } 3368 3369 if (ha->hw.flags.unicast_mac) { 3370 if (qla_config_mac_addr(ha, ha->hw.mac_addr, 0, 1)) 3371 return; 3372 ha->hw.flags.unicast_mac = 0; 3373 } 3374 3375 rcntxt = (q80_rcv_cntxt_destroy_t *)ha->hw.mbox; 3376 bzero(rcntxt, (sizeof (q80_rcv_cntxt_destroy_t))); 3377 3378 rcntxt->opcode = Q8_MBX_DESTROY_RX_CNTXT; 3379 rcntxt->count_version = (sizeof (q80_rcv_cntxt_destroy_t) >> 2); 3380 rcntxt->count_version |= Q8_MBX_CMD_VERSION; 3381 3382 rcntxt->cntxt_id = ha->hw.rcv_cntxt_id; 3383 3384 if (qla_mbx_cmd(ha, (uint32_t *)rcntxt, 3385 (sizeof (q80_rcv_cntxt_destroy_t) >> 2), 3386 ha->hw.mbox, (sizeof(q80_rcv_cntxt_destroy_rsp_t) >> 2), 0)) { 3387 device_printf(dev, "%s: failed0\n", __func__); 3388 return; 3389 } 3390 rcntxt_rsp = (q80_rcv_cntxt_destroy_rsp_t *)ha->hw.mbox; 3391 3392 err = Q8_MBX_RSP_STATUS(rcntxt_rsp->regcnt_status); 3393 3394 if (err) { 3395 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 3396 } 3397 3398 ha->hw.flags.init_rx_cnxt = 0; 3399 return; 3400} 3401 3402/* 3403 * Name: qla_init_xmt_cntxt 3404 * Function: Creates the Transmit Context. 3405 */ 3406static int 3407qla_init_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 3408{ 3409 device_t dev; 3410 qla_hw_t *hw = &ha->hw; 3411 q80_rq_tx_cntxt_t *tcntxt; 3412 q80_rsp_tx_cntxt_t *tcntxt_rsp; 3413 uint32_t err; 3414 qla_hw_tx_cntxt_t *hw_tx_cntxt; 3415 uint32_t intr_idx; 3416 3417 hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 3418 3419 dev = ha->pci_dev; 3420 3421 /* 3422 * Create Transmit Context 3423 */ 3424 tcntxt = (q80_rq_tx_cntxt_t *)ha->hw.mbox; 3425 bzero(tcntxt, (sizeof (q80_rq_tx_cntxt_t))); 3426 3427 tcntxt->opcode = Q8_MBX_CREATE_TX_CNTXT; 3428 tcntxt->count_version = (sizeof (q80_rq_tx_cntxt_t) >> 2); 3429 tcntxt->count_version |= Q8_MBX_CMD_VERSION; 3430 3431 intr_idx = txr_idx; 3432 3433#ifdef QL_ENABLE_ISCSI_TLV 3434 3435 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO | 3436 Q8_TX_CNTXT_CAP0_TC; 3437 3438 if (txr_idx >= (ha->hw.num_tx_rings >> 1)) { 3439 tcntxt->traffic_class = 1; 3440 } 3441 3442 intr_idx = txr_idx % (ha->hw.num_tx_rings >> 1); 3443 3444#else 3445 tcntxt->cap0 = Q8_TX_CNTXT_CAP0_BASEFW | Q8_TX_CNTXT_CAP0_LSO; 3446 3447#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 3448 3449 tcntxt->ntx_rings = 1; 3450 3451 tcntxt->tx_ring[0].paddr = 3452 qla_host_to_le64(hw_tx_cntxt->tx_ring_paddr); 3453 tcntxt->tx_ring[0].tx_consumer = 3454 qla_host_to_le64(hw_tx_cntxt->tx_cons_paddr); 3455 tcntxt->tx_ring[0].nentries = qla_host_to_le16(NUM_TX_DESCRIPTORS); 3456 3457 tcntxt->tx_ring[0].intr_id = qla_host_to_le16(hw->intr_id[intr_idx]); 3458 tcntxt->tx_ring[0].intr_src_bit = qla_host_to_le16(0); 3459 3460 hw_tx_cntxt->txr_free = NUM_TX_DESCRIPTORS; 3461 hw_tx_cntxt->txr_next = hw_tx_cntxt->txr_comp = 0; 3462 *hw_tx_cntxt->tx_cons = 0; 3463 3464 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 3465 (sizeof (q80_rq_tx_cntxt_t) >> 2), 3466 ha->hw.mbox, 3467 (sizeof(q80_rsp_tx_cntxt_t) >> 2), 0)) { 3468 device_printf(dev, "%s: failed0\n", __func__); 3469 return (-1); 3470 } 3471 tcntxt_rsp = (q80_rsp_tx_cntxt_t *)ha->hw.mbox; 3472 3473 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 3474 3475 if (err) { 3476 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 3477 return -1; 3478 } 3479 3480 hw_tx_cntxt->tx_prod_reg = tcntxt_rsp->tx_ring[0].prod_index; 3481 hw_tx_cntxt->tx_cntxt_id = tcntxt_rsp->tx_ring[0].cntxt_id; 3482 3483 if (qla_config_intr_coalesce(ha, hw_tx_cntxt->tx_cntxt_id, 0, 0)) 3484 return (-1); 3485 3486 return (0); 3487} 3488 3489 3490/* 3491 * Name: qla_del_xmt_cntxt 3492 * Function: Destroys the Transmit Context. 3493 */ 3494static int 3495qla_del_xmt_cntxt_i(qla_host_t *ha, uint32_t txr_idx) 3496{ 3497 device_t dev = ha->pci_dev; 3498 q80_tx_cntxt_destroy_t *tcntxt; 3499 q80_tx_cntxt_destroy_rsp_t *tcntxt_rsp; 3500 uint32_t err; 3501 3502 tcntxt = (q80_tx_cntxt_destroy_t *)ha->hw.mbox; 3503 bzero(tcntxt, (sizeof (q80_tx_cntxt_destroy_t))); 3504 3505 tcntxt->opcode = Q8_MBX_DESTROY_TX_CNTXT; 3506 tcntxt->count_version = (sizeof (q80_tx_cntxt_destroy_t) >> 2); 3507 tcntxt->count_version |= Q8_MBX_CMD_VERSION; 3508 3509 tcntxt->cntxt_id = ha->hw.tx_cntxt[txr_idx].tx_cntxt_id; 3510 3511 if (qla_mbx_cmd(ha, (uint32_t *)tcntxt, 3512 (sizeof (q80_tx_cntxt_destroy_t) >> 2), 3513 ha->hw.mbox, (sizeof (q80_tx_cntxt_destroy_rsp_t) >> 2), 0)) { 3514 device_printf(dev, "%s: failed0\n", __func__); 3515 return (-1); 3516 } 3517 tcntxt_rsp = (q80_tx_cntxt_destroy_rsp_t *)ha->hw.mbox; 3518 3519 err = Q8_MBX_RSP_STATUS(tcntxt_rsp->regcnt_status); 3520 3521 if (err) { 3522 device_printf(dev, "%s: failed1 [0x%08x]\n", __func__, err); 3523 return (-1); 3524 } 3525 3526 return (0); 3527} 3528static int 3529qla_del_xmt_cntxt(qla_host_t *ha) 3530{ 3531 uint32_t i; 3532 int ret = 0; 3533 3534 if (!ha->hw.flags.init_tx_cnxt) 3535 return (ret); 3536 3537 for (i = 0; i < ha->hw.num_tx_rings; i++) { 3538 if ((ret = qla_del_xmt_cntxt_i(ha, i)) != 0) 3539 break; 3540 } 3541 ha->hw.flags.init_tx_cnxt = 0; 3542 3543 return (ret); 3544} 3545 3546static int 3547qla_init_xmt_cntxt(qla_host_t *ha) 3548{ 3549 uint32_t i, j; 3550 3551 for (i = 0; i < ha->hw.num_tx_rings; i++) { 3552 if (qla_init_xmt_cntxt_i(ha, i) != 0) { 3553 for (j = 0; j < i; j++) { 3554 if (qla_del_xmt_cntxt_i(ha, j)) 3555 break; 3556 } 3557 return (-1); 3558 } 3559 } 3560 ha->hw.flags.init_tx_cnxt = 1; 3561 return (0); 3562} 3563 3564static int 3565qla_hw_all_mcast(qla_host_t *ha, uint32_t add_mcast) 3566{ 3567 int i, nmcast; 3568 uint32_t count = 0; 3569 uint8_t *mcast; 3570 3571 nmcast = ha->hw.nmcast; 3572 3573 QL_DPRINT2(ha, (ha->pci_dev, 3574 "%s:[0x%x] enter nmcast = %d \n", __func__, add_mcast, nmcast)); 3575 3576 mcast = ha->hw.mac_addr_arr; 3577 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3578 3579 for (i = 0 ; ((i < Q8_MAX_NUM_MULTICAST_ADDRS) && nmcast); i++) { 3580 if ((ha->hw.mcast[i].addr[0] != 0) || 3581 (ha->hw.mcast[i].addr[1] != 0) || 3582 (ha->hw.mcast[i].addr[2] != 0) || 3583 (ha->hw.mcast[i].addr[3] != 0) || 3584 (ha->hw.mcast[i].addr[4] != 0) || 3585 (ha->hw.mcast[i].addr[5] != 0)) { 3586 3587 bcopy(ha->hw.mcast[i].addr, mcast, ETHER_ADDR_LEN); 3588 mcast = mcast + ETHER_ADDR_LEN; 3589 count++; 3590 3591 if (count == Q8_MAX_MAC_ADDRS) { 3592 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 3593 add_mcast, count)) { 3594 device_printf(ha->pci_dev, 3595 "%s: failed\n", __func__); 3596 return (-1); 3597 } 3598 3599 count = 0; 3600 mcast = ha->hw.mac_addr_arr; 3601 memset(mcast, 0, 3602 (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3603 } 3604 3605 nmcast--; 3606 } 3607 } 3608 3609 if (count) { 3610 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mcast, 3611 count)) { 3612 device_printf(ha->pci_dev, "%s: failed\n", __func__); 3613 return (-1); 3614 } 3615 } 3616 QL_DPRINT2(ha, (ha->pci_dev, 3617 "%s:[0x%x] exit nmcast = %d \n", __func__, add_mcast, nmcast)); 3618 3619 return 0; 3620} 3621 3622static int 3623qla_hw_add_all_mcast(qla_host_t *ha) 3624{ 3625 int ret; 3626 3627 ret = qla_hw_all_mcast(ha, 1); 3628 3629 return (ret); 3630} 3631 3632int 3633qla_hw_del_all_mcast(qla_host_t *ha) 3634{ 3635 int ret; 3636 3637 ret = qla_hw_all_mcast(ha, 0); 3638 3639 bzero(ha->hw.mcast, (sizeof (qla_mcast_t) * Q8_MAX_NUM_MULTICAST_ADDRS)); 3640 ha->hw.nmcast = 0; 3641 3642 return (ret); 3643} 3644 3645static int 3646qla_hw_mac_addr_present(qla_host_t *ha, uint8_t *mta) 3647{ 3648 int i; 3649 3650 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3651 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) 3652 return (0); /* its been already added */ 3653 } 3654 return (-1); 3655} 3656 3657static int 3658qla_hw_add_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3659{ 3660 int i; 3661 3662 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3663 3664 if ((ha->hw.mcast[i].addr[0] == 0) && 3665 (ha->hw.mcast[i].addr[1] == 0) && 3666 (ha->hw.mcast[i].addr[2] == 0) && 3667 (ha->hw.mcast[i].addr[3] == 0) && 3668 (ha->hw.mcast[i].addr[4] == 0) && 3669 (ha->hw.mcast[i].addr[5] == 0)) { 3670 3671 bcopy(mta, ha->hw.mcast[i].addr, Q8_MAC_ADDR_LEN); 3672 ha->hw.nmcast++; 3673 3674 mta = mta + ETHER_ADDR_LEN; 3675 nmcast--; 3676 3677 if (nmcast == 0) 3678 break; 3679 } 3680 3681 } 3682 return 0; 3683} 3684 3685static int 3686qla_hw_del_mcast(qla_host_t *ha, uint8_t *mta, uint32_t nmcast) 3687{ 3688 int i; 3689 3690 for (i = 0; i < Q8_MAX_NUM_MULTICAST_ADDRS; i++) { 3691 if (QL_MAC_CMP(ha->hw.mcast[i].addr, mta) == 0) { 3692 3693 ha->hw.mcast[i].addr[0] = 0; 3694 ha->hw.mcast[i].addr[1] = 0; 3695 ha->hw.mcast[i].addr[2] = 0; 3696 ha->hw.mcast[i].addr[3] = 0; 3697 ha->hw.mcast[i].addr[4] = 0; 3698 ha->hw.mcast[i].addr[5] = 0; 3699 3700 ha->hw.nmcast--; 3701 3702 mta = mta + ETHER_ADDR_LEN; 3703 nmcast--; 3704 3705 if (nmcast == 0) 3706 break; 3707 } 3708 } 3709 return 0; 3710} 3711 3712/* 3713 * Name: ql_hw_set_multi 3714 * Function: Sets the Multicast Addresses provided by the host O.S into the 3715 * hardware (for the given interface) 3716 */ 3717int 3718ql_hw_set_multi(qla_host_t *ha, uint8_t *mcast_addr, uint32_t mcnt, 3719 uint32_t add_mac) 3720{ 3721 uint8_t *mta = mcast_addr; 3722 int i; 3723 int ret = 0; 3724 uint32_t count = 0; 3725 uint8_t *mcast; 3726 3727 mcast = ha->hw.mac_addr_arr; 3728 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3729 3730 for (i = 0; i < mcnt; i++) { 3731 if (mta[0] || mta[1] || mta[2] || mta[3] || mta[4] || mta[5]) { 3732 if (add_mac) { 3733 if (qla_hw_mac_addr_present(ha, mta) != 0) { 3734 bcopy(mta, mcast, ETHER_ADDR_LEN); 3735 mcast = mcast + ETHER_ADDR_LEN; 3736 count++; 3737 } 3738 } else { 3739 if (qla_hw_mac_addr_present(ha, mta) == 0) { 3740 bcopy(mta, mcast, ETHER_ADDR_LEN); 3741 mcast = mcast + ETHER_ADDR_LEN; 3742 count++; 3743 } 3744 } 3745 } 3746 if (count == Q8_MAX_MAC_ADDRS) { 3747 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, 3748 add_mac, count)) { 3749 device_printf(ha->pci_dev, "%s: failed\n", 3750 __func__); 3751 return (-1); 3752 } 3753 3754 if (add_mac) { 3755 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, 3756 count); 3757 } else { 3758 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, 3759 count); 3760 } 3761 3762 count = 0; 3763 mcast = ha->hw.mac_addr_arr; 3764 memset(mcast, 0, (Q8_MAX_MAC_ADDRS * ETHER_ADDR_LEN)); 3765 } 3766 3767 mta += Q8_MAC_ADDR_LEN; 3768 } 3769 3770 if (count) { 3771 if (qla_config_mac_addr(ha, ha->hw.mac_addr_arr, add_mac, 3772 count)) { 3773 device_printf(ha->pci_dev, "%s: failed\n", __func__); 3774 return (-1); 3775 } 3776 if (add_mac) { 3777 qla_hw_add_mcast(ha, ha->hw.mac_addr_arr, count); 3778 } else { 3779 qla_hw_del_mcast(ha, ha->hw.mac_addr_arr, count); 3780 } 3781 } 3782 3783 return (ret); 3784} 3785 3786/* 3787 * Name: ql_hw_tx_done_locked 3788 * Function: Handle Transmit Completions 3789 */ 3790void 3791ql_hw_tx_done_locked(qla_host_t *ha, uint32_t txr_idx) 3792{ 3793 qla_tx_buf_t *txb; 3794 qla_hw_t *hw = &ha->hw; 3795 uint32_t comp_idx, comp_count = 0; 3796 qla_hw_tx_cntxt_t *hw_tx_cntxt; 3797 3798 hw_tx_cntxt = &hw->tx_cntxt[txr_idx]; 3799 3800 /* retrieve index of last entry in tx ring completed */ 3801 comp_idx = qla_le32_to_host(*(hw_tx_cntxt->tx_cons)); 3802 3803 while (comp_idx != hw_tx_cntxt->txr_comp) { 3804 3805 txb = &ha->tx_ring[txr_idx].tx_buf[hw_tx_cntxt->txr_comp]; 3806 3807 hw_tx_cntxt->txr_comp++; 3808 if (hw_tx_cntxt->txr_comp == NUM_TX_DESCRIPTORS) 3809 hw_tx_cntxt->txr_comp = 0; 3810 3811 comp_count++; 3812 3813 if (txb->m_head) { 3814 if_inc_counter(ha->ifp, IFCOUNTER_OPACKETS, 1); 3815 3816 bus_dmamap_sync(ha->tx_tag, txb->map, 3817 BUS_DMASYNC_POSTWRITE); 3818 bus_dmamap_unload(ha->tx_tag, txb->map); 3819 m_freem(txb->m_head); 3820 3821 txb->m_head = NULL; 3822 } 3823 } 3824 3825 hw_tx_cntxt->txr_free += comp_count; 3826 return; 3827} 3828 3829void 3830ql_update_link_state(qla_host_t *ha) 3831{ 3832 uint32_t link_state = 0; 3833 uint32_t prev_link_state; 3834 3835 prev_link_state = ha->hw.link_up; 3836 3837 if (ha->ifp->if_drv_flags & IFF_DRV_RUNNING) { 3838 link_state = READ_REG32(ha, Q8_LINK_STATE); 3839 3840 if (ha->pci_func == 0) { 3841 link_state = (((link_state & 0xF) == 1)? 1 : 0); 3842 } else { 3843 link_state = ((((link_state >> 4)& 0xF) == 1)? 1 : 0); 3844 } 3845 } 3846 3847 atomic_store_rel_8(&ha->hw.link_up, (uint8_t)link_state); 3848 3849 if (prev_link_state != ha->hw.link_up) { 3850 if (ha->hw.link_up) { 3851 if_link_state_change(ha->ifp, LINK_STATE_UP); 3852 } else { 3853 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 3854 } 3855 } 3856 return; 3857} 3858 3859int 3860ql_hw_check_health(qla_host_t *ha) 3861{ 3862 uint32_t val; 3863 3864 ha->hw.health_count++; 3865 3866 if (ha->hw.health_count < 500) 3867 return 0; 3868 3869 ha->hw.health_count = 0; 3870 3871 val = READ_REG32(ha, Q8_ASIC_TEMPERATURE); 3872 3873 if (((val & 0xFFFF) == 2) || ((val & 0xFFFF) == 3) || 3874 (QL_ERR_INJECT(ha, INJCT_TEMPERATURE_FAILURE))) { 3875 device_printf(ha->pci_dev, "%s: Temperature Alert" 3876 " at ts_usecs %ld ts_reg = 0x%08x\n", 3877 __func__, qla_get_usec_timestamp(), val); 3878 3879 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_TEMP_FAILURE) 3880 ha->hw.sp_log_stop = -1; 3881 3882 QL_INITIATE_RECOVERY(ha); 3883 return -1; 3884 } 3885 3886 val = READ_REG32(ha, Q8_FIRMWARE_HEARTBEAT); 3887 3888 if ((val != ha->hw.hbeat_value) && 3889 (!(QL_ERR_INJECT(ha, INJCT_HEARTBEAT_FAILURE)))) { 3890 ha->hw.hbeat_value = val; 3891 ha->hw.hbeat_failure = 0; 3892 return 0; 3893 } 3894 3895 ha->hw.hbeat_failure++; 3896 3897 3898 if ((ha->dbg_level & 0x8000) && (ha->hw.hbeat_failure == 1)) 3899 device_printf(ha->pci_dev, "%s: Heartbeat Failue 1[0x%08x]\n", 3900 __func__, val); 3901 if (ha->hw.hbeat_failure < 2) /* we ignore the first failure */ 3902 return 0; 3903 else { 3904 uint32_t peg_halt_status1; 3905 uint32_t peg_halt_status2; 3906 3907 peg_halt_status1 = READ_REG32(ha, Q8_PEG_HALT_STATUS1); 3908 peg_halt_status2 = READ_REG32(ha, Q8_PEG_HALT_STATUS2); 3909 3910 device_printf(ha->pci_dev, 3911 "%s: Heartbeat Failue at ts_usecs = %ld " 3912 "fw_heart_beat = 0x%08x " 3913 "peg_halt_status1 = 0x%08x " 3914 "peg_halt_status2 = 0x%08x\n", 3915 __func__, qla_get_usec_timestamp(), val, 3916 peg_halt_status1, peg_halt_status2); 3917 3918 if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_HBEAT_FAILURE) 3919 ha->hw.sp_log_stop = -1; 3920 } 3921 QL_INITIATE_RECOVERY(ha); 3922 3923 return -1; 3924} 3925 3926static int 3927qla_init_nic_func(qla_host_t *ha) 3928{ 3929 device_t dev; 3930 q80_init_nic_func_t *init_nic; 3931 q80_init_nic_func_rsp_t *init_nic_rsp; 3932 uint32_t err; 3933 3934 dev = ha->pci_dev; 3935 3936 init_nic = (q80_init_nic_func_t *)ha->hw.mbox; 3937 bzero(init_nic, sizeof(q80_init_nic_func_t)); 3938 3939 init_nic->opcode = Q8_MBX_INIT_NIC_FUNC; 3940 init_nic->count_version = (sizeof (q80_init_nic_func_t) >> 2); 3941 init_nic->count_version |= Q8_MBX_CMD_VERSION; 3942 3943 init_nic->options = Q8_INIT_NIC_REG_DCBX_CHNG_AEN; 3944 init_nic->options |= Q8_INIT_NIC_REG_SFP_CHNG_AEN; 3945 init_nic->options |= Q8_INIT_NIC_REG_IDC_AEN; 3946 3947//qla_dump_buf8(ha, __func__, init_nic, sizeof (q80_init_nic_func_t)); 3948 if (qla_mbx_cmd(ha, (uint32_t *)init_nic, 3949 (sizeof (q80_init_nic_func_t) >> 2), 3950 ha->hw.mbox, (sizeof (q80_init_nic_func_rsp_t) >> 2), 0)) { 3951 device_printf(dev, "%s: failed\n", __func__); 3952 return -1; 3953 } 3954 3955 init_nic_rsp = (q80_init_nic_func_rsp_t *)ha->hw.mbox; 3956// qla_dump_buf8(ha, __func__, init_nic_rsp, sizeof (q80_init_nic_func_rsp_t)); 3957 3958 err = Q8_MBX_RSP_STATUS(init_nic_rsp->regcnt_status); 3959 3960 if (err) { 3961 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 3962 } 3963 3964 return 0; 3965} 3966 3967static int 3968qla_stop_nic_func(qla_host_t *ha) 3969{ 3970 device_t dev; 3971 q80_stop_nic_func_t *stop_nic; 3972 q80_stop_nic_func_rsp_t *stop_nic_rsp; 3973 uint32_t err; 3974 3975 dev = ha->pci_dev; 3976 3977 stop_nic = (q80_stop_nic_func_t *)ha->hw.mbox; 3978 bzero(stop_nic, sizeof(q80_stop_nic_func_t)); 3979 3980 stop_nic->opcode = Q8_MBX_STOP_NIC_FUNC; 3981 stop_nic->count_version = (sizeof (q80_stop_nic_func_t) >> 2); 3982 stop_nic->count_version |= Q8_MBX_CMD_VERSION; 3983 3984 stop_nic->options = Q8_STOP_NIC_DEREG_DCBX_CHNG_AEN; 3985 stop_nic->options |= Q8_STOP_NIC_DEREG_SFP_CHNG_AEN; 3986 3987//qla_dump_buf8(ha, __func__, stop_nic, sizeof (q80_stop_nic_func_t)); 3988 if (qla_mbx_cmd(ha, (uint32_t *)stop_nic, 3989 (sizeof (q80_stop_nic_func_t) >> 2), 3990 ha->hw.mbox, (sizeof (q80_stop_nic_func_rsp_t) >> 2), 0)) { 3991 device_printf(dev, "%s: failed\n", __func__); 3992 return -1; 3993 } 3994 3995 stop_nic_rsp = (q80_stop_nic_func_rsp_t *)ha->hw.mbox; 3996//qla_dump_buf8(ha, __func__, stop_nic_rsp, sizeof (q80_stop_nic_func_rsp_ t)); 3997 3998 err = Q8_MBX_RSP_STATUS(stop_nic_rsp->regcnt_status); 3999 4000 if (err) { 4001 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 4002 } 4003 4004 return 0; 4005} 4006 4007static int 4008qla_query_fw_dcbx_caps(qla_host_t *ha) 4009{ 4010 device_t dev; 4011 q80_query_fw_dcbx_caps_t *fw_dcbx; 4012 q80_query_fw_dcbx_caps_rsp_t *fw_dcbx_rsp; 4013 uint32_t err; 4014 4015 dev = ha->pci_dev; 4016 4017 fw_dcbx = (q80_query_fw_dcbx_caps_t *)ha->hw.mbox; 4018 bzero(fw_dcbx, sizeof(q80_query_fw_dcbx_caps_t)); 4019 4020 fw_dcbx->opcode = Q8_MBX_GET_FW_DCBX_CAPS; 4021 fw_dcbx->count_version = (sizeof (q80_query_fw_dcbx_caps_t) >> 2); 4022 fw_dcbx->count_version |= Q8_MBX_CMD_VERSION; 4023 4024 ql_dump_buf8(ha, __func__, fw_dcbx, sizeof (q80_query_fw_dcbx_caps_t)); 4025 if (qla_mbx_cmd(ha, (uint32_t *)fw_dcbx, 4026 (sizeof (q80_query_fw_dcbx_caps_t) >> 2), 4027 ha->hw.mbox, (sizeof (q80_query_fw_dcbx_caps_rsp_t) >> 2), 0)) { 4028 device_printf(dev, "%s: failed\n", __func__); 4029 return -1; 4030 } 4031 4032 fw_dcbx_rsp = (q80_query_fw_dcbx_caps_rsp_t *)ha->hw.mbox; 4033 ql_dump_buf8(ha, __func__, fw_dcbx_rsp, 4034 sizeof (q80_query_fw_dcbx_caps_rsp_t)); 4035 4036 err = Q8_MBX_RSP_STATUS(fw_dcbx_rsp->regcnt_status); 4037 4038 if (err) { 4039 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 4040 } 4041 4042 return 0; 4043} 4044 4045static int 4046qla_idc_ack(qla_host_t *ha, uint32_t aen_mb1, uint32_t aen_mb2, 4047 uint32_t aen_mb3, uint32_t aen_mb4) 4048{ 4049 device_t dev; 4050 q80_idc_ack_t *idc_ack; 4051 q80_idc_ack_rsp_t *idc_ack_rsp; 4052 uint32_t err; 4053 int count = 300; 4054 4055 dev = ha->pci_dev; 4056 4057 idc_ack = (q80_idc_ack_t *)ha->hw.mbox; 4058 bzero(idc_ack, sizeof(q80_idc_ack_t)); 4059 4060 idc_ack->opcode = Q8_MBX_IDC_ACK; 4061 idc_ack->count_version = (sizeof (q80_idc_ack_t) >> 2); 4062 idc_ack->count_version |= Q8_MBX_CMD_VERSION; 4063 4064 idc_ack->aen_mb1 = aen_mb1; 4065 idc_ack->aen_mb2 = aen_mb2; 4066 idc_ack->aen_mb3 = aen_mb3; 4067 idc_ack->aen_mb4 = aen_mb4; 4068 4069 ha->hw.imd_compl= 0; 4070 4071 if (qla_mbx_cmd(ha, (uint32_t *)idc_ack, 4072 (sizeof (q80_idc_ack_t) >> 2), 4073 ha->hw.mbox, (sizeof (q80_idc_ack_rsp_t) >> 2), 0)) { 4074 device_printf(dev, "%s: failed\n", __func__); 4075 return -1; 4076 } 4077 4078 idc_ack_rsp = (q80_idc_ack_rsp_t *)ha->hw.mbox; 4079 4080 err = Q8_MBX_RSP_STATUS(idc_ack_rsp->regcnt_status); 4081 4082 if (err) { 4083 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 4084 return(-1); 4085 } 4086 4087 while (count && !ha->hw.imd_compl) { 4088 qla_mdelay(__func__, 100); 4089 count--; 4090 } 4091 4092 if (!count) 4093 return -1; 4094 else 4095 device_printf(dev, "%s: count %d\n", __func__, count); 4096 4097 return (0); 4098} 4099 4100static int 4101qla_set_port_config(qla_host_t *ha, uint32_t cfg_bits) 4102{ 4103 device_t dev; 4104 q80_set_port_cfg_t *pcfg; 4105 q80_set_port_cfg_rsp_t *pfg_rsp; 4106 uint32_t err; 4107 int count = 300; 4108 4109 dev = ha->pci_dev; 4110 4111 pcfg = (q80_set_port_cfg_t *)ha->hw.mbox; 4112 bzero(pcfg, sizeof(q80_set_port_cfg_t)); 4113 4114 pcfg->opcode = Q8_MBX_SET_PORT_CONFIG; 4115 pcfg->count_version = (sizeof (q80_set_port_cfg_t) >> 2); 4116 pcfg->count_version |= Q8_MBX_CMD_VERSION; 4117 4118 pcfg->cfg_bits = cfg_bits; 4119 4120 device_printf(dev, "%s: cfg_bits" 4121 " [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 4122 " [0x%x, 0x%x, 0x%x]\n", __func__, 4123 ((cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 4124 ((cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 4125 ((cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0)); 4126 4127 ha->hw.imd_compl= 0; 4128 4129 if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 4130 (sizeof (q80_set_port_cfg_t) >> 2), 4131 ha->hw.mbox, (sizeof (q80_set_port_cfg_rsp_t) >> 2), 0)) { 4132 device_printf(dev, "%s: failed\n", __func__); 4133 return -1; 4134 } 4135 4136 pfg_rsp = (q80_set_port_cfg_rsp_t *)ha->hw.mbox; 4137 4138 err = Q8_MBX_RSP_STATUS(pfg_rsp->regcnt_status); 4139 4140 if (err == Q8_MBX_RSP_IDC_INTRMD_RSP) { 4141 while (count && !ha->hw.imd_compl) { 4142 qla_mdelay(__func__, 100); 4143 count--; 4144 } 4145 if (count) { 4146 device_printf(dev, "%s: count %d\n", __func__, count); 4147 4148 err = 0; 4149 } 4150 } 4151 4152 if (err) { 4153 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 4154 return(-1); 4155 } 4156 4157 return (0); 4158} 4159 4160 4161static int 4162qla_get_minidump_tmplt_size(qla_host_t *ha, uint32_t *size) 4163{ 4164 uint32_t err; 4165 device_t dev = ha->pci_dev; 4166 q80_config_md_templ_size_t *md_size; 4167 q80_config_md_templ_size_rsp_t *md_size_rsp; 4168 4169#ifndef QL_LDFLASH_FW 4170 4171 ql_minidump_template_hdr_t *hdr; 4172 4173 hdr = (ql_minidump_template_hdr_t *)ql83xx_minidump; 4174 *size = hdr->size_of_template; 4175 return (0); 4176 4177#endif /* #ifdef QL_LDFLASH_FW */ 4178 4179 md_size = (q80_config_md_templ_size_t *) ha->hw.mbox; 4180 bzero(md_size, sizeof(q80_config_md_templ_size_t)); 4181 4182 md_size->opcode = Q8_MBX_GET_MINIDUMP_TMPLT_SIZE; 4183 md_size->count_version = (sizeof (q80_config_md_templ_size_t) >> 2); 4184 md_size->count_version |= Q8_MBX_CMD_VERSION; 4185 4186 if (qla_mbx_cmd(ha, (uint32_t *) md_size, 4187 (sizeof(q80_config_md_templ_size_t) >> 2), ha->hw.mbox, 4188 (sizeof(q80_config_md_templ_size_rsp_t) >> 2), 0)) { 4189 4190 device_printf(dev, "%s: failed\n", __func__); 4191 4192 return (-1); 4193 } 4194 4195 md_size_rsp = (q80_config_md_templ_size_rsp_t *) ha->hw.mbox; 4196 4197 err = Q8_MBX_RSP_STATUS(md_size_rsp->regcnt_status); 4198 4199 if (err) { 4200 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 4201 return(-1); 4202 } 4203 4204 *size = md_size_rsp->templ_size; 4205 4206 return (0); 4207} 4208 4209static int 4210qla_get_port_config(qla_host_t *ha, uint32_t *cfg_bits) 4211{ 4212 device_t dev; 4213 q80_get_port_cfg_t *pcfg; 4214 q80_get_port_cfg_rsp_t *pcfg_rsp; 4215 uint32_t err; 4216 4217 dev = ha->pci_dev; 4218 4219 pcfg = (q80_get_port_cfg_t *)ha->hw.mbox; 4220 bzero(pcfg, sizeof(q80_get_port_cfg_t)); 4221 4222 pcfg->opcode = Q8_MBX_GET_PORT_CONFIG; 4223 pcfg->count_version = (sizeof (q80_get_port_cfg_t) >> 2); 4224 pcfg->count_version |= Q8_MBX_CMD_VERSION; 4225 4226 if (qla_mbx_cmd(ha, (uint32_t *)pcfg, 4227 (sizeof (q80_get_port_cfg_t) >> 2), 4228 ha->hw.mbox, (sizeof (q80_get_port_cfg_rsp_t) >> 2), 0)) { 4229 device_printf(dev, "%s: failed\n", __func__); 4230 return -1; 4231 } 4232 4233 pcfg_rsp = (q80_get_port_cfg_rsp_t *)ha->hw.mbox; 4234 4235 err = Q8_MBX_RSP_STATUS(pcfg_rsp->regcnt_status); 4236 4237 if (err) { 4238 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 4239 return(-1); 4240 } 4241 4242 device_printf(dev, "%s: [cfg_bits, port type]" 4243 " [0x%08x, 0x%02x] [STD_PAUSE_DIR, PAUSE_TYPE, DCBX]" 4244 " [0x%x, 0x%x, 0x%x]\n", __func__, 4245 pcfg_rsp->cfg_bits, pcfg_rsp->phys_port_type, 4246 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_STDPAUSE_DIR_MASK)>>20), 4247 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_PAUSE_CFG_MASK) >> 5), 4248 ((pcfg_rsp->cfg_bits & Q8_PORT_CFG_BITS_DCBX_ENABLE) ? 1: 0) 4249 ); 4250 4251 *cfg_bits = pcfg_rsp->cfg_bits; 4252 4253 return (0); 4254} 4255 4256int 4257ql_iscsi_pdu(qla_host_t *ha, struct mbuf *mp) 4258{ 4259 struct ether_vlan_header *eh; 4260 uint16_t etype; 4261 struct ip *ip = NULL; 4262 struct ip6_hdr *ip6 = NULL; 4263 struct tcphdr *th = NULL; 4264 uint32_t hdrlen; 4265 uint32_t offset; 4266 uint8_t buf[sizeof(struct ip6_hdr)]; 4267 4268 eh = mtod(mp, struct ether_vlan_header *); 4269 4270 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 4271 hdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 4272 etype = ntohs(eh->evl_proto); 4273 } else { 4274 hdrlen = ETHER_HDR_LEN; 4275 etype = ntohs(eh->evl_encap_proto); 4276 } 4277 4278 if (etype == ETHERTYPE_IP) { 4279 4280 offset = (hdrlen + sizeof (struct ip)); 4281 4282 if (mp->m_len >= offset) { 4283 ip = (struct ip *)(mp->m_data + hdrlen); 4284 } else { 4285 m_copydata(mp, hdrlen, sizeof (struct ip), buf); 4286 ip = (struct ip *)buf; 4287 } 4288 4289 if (ip->ip_p == IPPROTO_TCP) { 4290 4291 hdrlen += ip->ip_hl << 2; 4292 offset = hdrlen + 4; 4293 4294 if (mp->m_len >= offset) { 4295 th = (struct tcphdr *)(mp->m_data + hdrlen);; 4296 } else { 4297 m_copydata(mp, hdrlen, 4, buf); 4298 th = (struct tcphdr *)buf; 4299 } 4300 } 4301 4302 } else if (etype == ETHERTYPE_IPV6) { 4303 4304 offset = (hdrlen + sizeof (struct ip6_hdr)); 4305 4306 if (mp->m_len >= offset) { 4307 ip6 = (struct ip6_hdr *)(mp->m_data + hdrlen); 4308 } else { 4309 m_copydata(mp, hdrlen, sizeof (struct ip6_hdr), buf); 4310 ip6 = (struct ip6_hdr *)buf; 4311 } 4312 4313 if (ip6->ip6_nxt == IPPROTO_TCP) { 4314 4315 hdrlen += sizeof(struct ip6_hdr); 4316 offset = hdrlen + 4; 4317 4318 if (mp->m_len >= offset) { 4319 th = (struct tcphdr *)(mp->m_data + hdrlen);; 4320 } else { 4321 m_copydata(mp, hdrlen, 4, buf); 4322 th = (struct tcphdr *)buf; 4323 } 4324 } 4325 } 4326 4327 if (th != NULL) { 4328 if ((th->th_sport == htons(3260)) || 4329 (th->th_dport == htons(3260))) 4330 return 0; 4331 } 4332 return (-1); 4333} 4334 4335void 4336qla_hw_async_event(qla_host_t *ha) 4337{ 4338 switch (ha->hw.aen_mb0) { 4339 case 0x8101: 4340 (void)qla_idc_ack(ha, ha->hw.aen_mb1, ha->hw.aen_mb2, 4341 ha->hw.aen_mb3, ha->hw.aen_mb4); 4342 4343 break; 4344 4345 default: 4346 break; 4347 } 4348 4349 return; 4350} 4351 4352#ifdef QL_LDFLASH_FW 4353static int 4354ql_get_minidump_template(qla_host_t *ha) 4355{ 4356 uint32_t err; 4357 device_t dev = ha->pci_dev; 4358 q80_config_md_templ_cmd_t *md_templ; 4359 q80_config_md_templ_cmd_rsp_t *md_templ_rsp; 4360 4361 md_templ = (q80_config_md_templ_cmd_t *) ha->hw.mbox; 4362 bzero(md_templ, (sizeof (q80_config_md_templ_cmd_t))); 4363 4364 md_templ->opcode = Q8_MBX_GET_MINIDUMP_TMPLT; 4365 md_templ->count_version = ( sizeof(q80_config_md_templ_cmd_t) >> 2); 4366 md_templ->count_version |= Q8_MBX_CMD_VERSION; 4367 4368 md_templ->buf_addr = ha->hw.dma_buf.minidump.dma_addr; 4369 md_templ->buff_size = ha->hw.dma_buf.minidump.size; 4370 4371 if (qla_mbx_cmd(ha, (uint32_t *) md_templ, 4372 (sizeof(q80_config_md_templ_cmd_t) >> 2), 4373 ha->hw.mbox, 4374 (sizeof(q80_config_md_templ_cmd_rsp_t) >> 2), 0)) { 4375 4376 device_printf(dev, "%s: failed\n", __func__); 4377 4378 return (-1); 4379 } 4380 4381 md_templ_rsp = (q80_config_md_templ_cmd_rsp_t *) ha->hw.mbox; 4382 4383 err = Q8_MBX_RSP_STATUS(md_templ_rsp->regcnt_status); 4384 4385 if (err) { 4386 device_printf(dev, "%s: failed [0x%08x]\n", __func__, err); 4387 return (-1); 4388 } 4389 4390 return (0); 4391 4392} 4393#endif /* #ifdef QL_LDFLASH_FW */ 4394 4395/* 4396 * Minidump related functionality 4397 */ 4398 4399static int ql_parse_template(qla_host_t *ha); 4400 4401static uint32_t ql_rdcrb(qla_host_t *ha, 4402 ql_minidump_entry_rdcrb_t *crb_entry, 4403 uint32_t * data_buff); 4404 4405static uint32_t ql_pollrd(qla_host_t *ha, 4406 ql_minidump_entry_pollrd_t *entry, 4407 uint32_t * data_buff); 4408 4409static uint32_t ql_pollrd_modify_write(qla_host_t *ha, 4410 ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 4411 uint32_t *data_buff); 4412 4413static uint32_t ql_L2Cache(qla_host_t *ha, 4414 ql_minidump_entry_cache_t *cacheEntry, 4415 uint32_t * data_buff); 4416 4417static uint32_t ql_L1Cache(qla_host_t *ha, 4418 ql_minidump_entry_cache_t *cacheEntry, 4419 uint32_t *data_buff); 4420 4421static uint32_t ql_rdocm(qla_host_t *ha, 4422 ql_minidump_entry_rdocm_t *ocmEntry, 4423 uint32_t *data_buff); 4424 4425static uint32_t ql_rdmem(qla_host_t *ha, 4426 ql_minidump_entry_rdmem_t *mem_entry, 4427 uint32_t *data_buff); 4428 4429static uint32_t ql_rdrom(qla_host_t *ha, 4430 ql_minidump_entry_rdrom_t *romEntry, 4431 uint32_t *data_buff); 4432 4433static uint32_t ql_rdmux(qla_host_t *ha, 4434 ql_minidump_entry_mux_t *muxEntry, 4435 uint32_t *data_buff); 4436 4437static uint32_t ql_rdmux2(qla_host_t *ha, 4438 ql_minidump_entry_mux2_t *muxEntry, 4439 uint32_t *data_buff); 4440 4441static uint32_t ql_rdqueue(qla_host_t *ha, 4442 ql_minidump_entry_queue_t *queueEntry, 4443 uint32_t *data_buff); 4444 4445static uint32_t ql_cntrl(qla_host_t *ha, 4446 ql_minidump_template_hdr_t *template_hdr, 4447 ql_minidump_entry_cntrl_t *crbEntry); 4448 4449 4450static uint32_t 4451ql_minidump_size(qla_host_t *ha) 4452{ 4453 uint32_t i, k; 4454 uint32_t size = 0; 4455 ql_minidump_template_hdr_t *hdr; 4456 4457 hdr = (ql_minidump_template_hdr_t *)ha->hw.dma_buf.minidump.dma_b; 4458 4459 i = 0x2; 4460 4461 for (k = 1; k < QL_DBG_CAP_SIZE_ARRAY_LEN; k++) { 4462 if (i & ha->hw.mdump_capture_mask) 4463 size += hdr->capture_size_array[k]; 4464 i = i << 1; 4465 } 4466 return (size); 4467} 4468 4469static void 4470ql_free_minidump_buffer(qla_host_t *ha) 4471{ 4472 if (ha->hw.mdump_buffer != NULL) { 4473 free(ha->hw.mdump_buffer, M_QLA83XXBUF); 4474 ha->hw.mdump_buffer = NULL; 4475 ha->hw.mdump_buffer_size = 0; 4476 } 4477 return; 4478} 4479 4480static int 4481ql_alloc_minidump_buffer(qla_host_t *ha) 4482{ 4483 ha->hw.mdump_buffer_size = ql_minidump_size(ha); 4484 4485 if (!ha->hw.mdump_buffer_size) 4486 return (-1); 4487 4488 ha->hw.mdump_buffer = malloc(ha->hw.mdump_buffer_size, M_QLA83XXBUF, 4489 M_NOWAIT); 4490 4491 if (ha->hw.mdump_buffer == NULL) 4492 return (-1); 4493 4494 return (0); 4495} 4496 4497static void 4498ql_free_minidump_template_buffer(qla_host_t *ha) 4499{ 4500 if (ha->hw.mdump_template != NULL) { 4501 free(ha->hw.mdump_template, M_QLA83XXBUF); 4502 ha->hw.mdump_template = NULL; 4503 ha->hw.mdump_template_size = 0; 4504 } 4505 return; 4506} 4507 4508static int 4509ql_alloc_minidump_template_buffer(qla_host_t *ha) 4510{ 4511 ha->hw.mdump_template_size = ha->hw.dma_buf.minidump.size; 4512 4513 ha->hw.mdump_template = malloc(ha->hw.mdump_template_size, 4514 M_QLA83XXBUF, M_NOWAIT); 4515 4516 if (ha->hw.mdump_template == NULL) 4517 return (-1); 4518 4519 return (0); 4520} 4521 4522static int 4523ql_alloc_minidump_buffers(qla_host_t *ha) 4524{ 4525 int ret; 4526 4527 ret = ql_alloc_minidump_template_buffer(ha); 4528 4529 if (ret) 4530 return (ret); 4531 4532 ret = ql_alloc_minidump_buffer(ha); 4533 4534 if (ret) 4535 ql_free_minidump_template_buffer(ha); 4536 4537 return (ret); 4538} 4539 4540 4541static uint32_t 4542ql_validate_minidump_checksum(qla_host_t *ha) 4543{ 4544 uint64_t sum = 0; 4545 int count; 4546 uint32_t *template_buff; 4547 4548 count = ha->hw.dma_buf.minidump.size / sizeof (uint32_t); 4549 template_buff = ha->hw.dma_buf.minidump.dma_b; 4550 4551 while (count-- > 0) { 4552 sum += *template_buff++; 4553 } 4554 4555 while (sum >> 32) { 4556 sum = (sum & 0xFFFFFFFF) + (sum >> 32); 4557 } 4558 4559 return (~sum); 4560} 4561 4562int 4563ql_minidump_init(qla_host_t *ha) 4564{ 4565 int ret = 0; 4566 uint32_t template_size = 0; 4567 device_t dev = ha->pci_dev; 4568 4569 /* 4570 * Get Minidump Template Size 4571 */ 4572 ret = qla_get_minidump_tmplt_size(ha, &template_size); 4573 4574 if (ret || (template_size == 0)) { 4575 device_printf(dev, "%s: failed [%d, %d]\n", __func__, ret, 4576 template_size); 4577 return (-1); 4578 } 4579 4580 /* 4581 * Allocate Memory for Minidump Template 4582 */ 4583 4584 ha->hw.dma_buf.minidump.alignment = 8; 4585 ha->hw.dma_buf.minidump.size = template_size; 4586 4587#ifdef QL_LDFLASH_FW 4588 if (ql_alloc_dmabuf(ha, &ha->hw.dma_buf.minidump)) { 4589 4590 device_printf(dev, "%s: minidump dma alloc failed\n", __func__); 4591 4592 return (-1); 4593 } 4594 ha->hw.dma_buf.flags.minidump = 1; 4595 4596 /* 4597 * Retrieve Minidump Template 4598 */ 4599 ret = ql_get_minidump_template(ha); 4600#else 4601 ha->hw.dma_buf.minidump.dma_b = ql83xx_minidump; 4602 4603#endif /* #ifdef QL_LDFLASH_FW */ 4604 4605 if (ret == 0) { 4606 4607 ret = ql_validate_minidump_checksum(ha); 4608 4609 if (ret == 0) { 4610 4611 ret = ql_alloc_minidump_buffers(ha); 4612 4613 if (ret == 0) 4614 ha->hw.mdump_init = 1; 4615 else 4616 device_printf(dev, 4617 "%s: ql_alloc_minidump_buffers" 4618 " failed\n", __func__); 4619 } else { 4620 device_printf(dev, "%s: ql_validate_minidump_checksum" 4621 " failed\n", __func__); 4622 } 4623 } else { 4624 device_printf(dev, "%s: ql_get_minidump_template failed\n", 4625 __func__); 4626 } 4627 4628 if (ret) 4629 ql_minidump_free(ha); 4630 4631 return (ret); 4632} 4633 4634static void 4635ql_minidump_free(qla_host_t *ha) 4636{ 4637 ha->hw.mdump_init = 0; 4638 if (ha->hw.dma_buf.flags.minidump) { 4639 ha->hw.dma_buf.flags.minidump = 0; 4640 ql_free_dmabuf(ha, &ha->hw.dma_buf.minidump); 4641 } 4642 4643 ql_free_minidump_template_buffer(ha); 4644 ql_free_minidump_buffer(ha); 4645 4646 return; 4647} 4648 4649void 4650ql_minidump(qla_host_t *ha) 4651{ 4652 if (!ha->hw.mdump_init) 4653 return; 4654 4655 if (ha->hw.mdump_done) 4656 return; 4657 ha->hw.mdump_usec_ts = qla_get_usec_timestamp(); 4658 ha->hw.mdump_start_seq_index = ql_stop_sequence(ha); 4659 4660 bzero(ha->hw.mdump_buffer, ha->hw.mdump_buffer_size); 4661 bzero(ha->hw.mdump_template, ha->hw.mdump_template_size); 4662 4663 bcopy(ha->hw.dma_buf.minidump.dma_b, ha->hw.mdump_template, 4664 ha->hw.mdump_template_size); 4665 4666 ql_parse_template(ha); 4667 4668 ql_start_sequence(ha, ha->hw.mdump_start_seq_index); 4669 4670 ha->hw.mdump_done = 1; 4671 4672 return; 4673} 4674 4675 4676/* 4677 * helper routines 4678 */ 4679static void 4680ql_entry_err_chk(ql_minidump_entry_t *entry, uint32_t esize) 4681{ 4682 if (esize != entry->hdr.entry_capture_size) { 4683 entry->hdr.entry_capture_size = esize; 4684 entry->hdr.driver_flags |= QL_DBG_SIZE_ERR_FLAG; 4685 } 4686 return; 4687} 4688 4689 4690static int 4691ql_parse_template(qla_host_t *ha) 4692{ 4693 uint32_t num_of_entries, buff_level, e_cnt, esize; 4694 uint32_t end_cnt, rv = 0; 4695 char *dump_buff, *dbuff; 4696 int sane_start = 0, sane_end = 0; 4697 ql_minidump_template_hdr_t *template_hdr; 4698 ql_minidump_entry_t *entry; 4699 uint32_t capture_mask; 4700 uint32_t dump_size; 4701 4702 /* Setup parameters */ 4703 template_hdr = (ql_minidump_template_hdr_t *)ha->hw.mdump_template; 4704 4705 if (template_hdr->entry_type == TLHDR) 4706 sane_start = 1; 4707 4708 dump_buff = (char *) ha->hw.mdump_buffer; 4709 4710 num_of_entries = template_hdr->num_of_entries; 4711 4712 entry = (ql_minidump_entry_t *) ((char *)template_hdr 4713 + template_hdr->first_entry_offset ); 4714 4715 template_hdr->saved_state_array[QL_OCM0_ADDR_INDX] = 4716 template_hdr->ocm_window_array[ha->pci_func]; 4717 template_hdr->saved_state_array[QL_PCIE_FUNC_INDX] = ha->pci_func; 4718 4719 capture_mask = ha->hw.mdump_capture_mask; 4720 dump_size = ha->hw.mdump_buffer_size; 4721 4722 template_hdr->driver_capture_mask = capture_mask; 4723 4724 QL_DPRINT80(ha, (ha->pci_dev, 4725 "%s: sane_start = %d num_of_entries = %d " 4726 "capture_mask = 0x%x dump_size = %d \n", 4727 __func__, sane_start, num_of_entries, capture_mask, dump_size)); 4728 4729 for (buff_level = 0, e_cnt = 0; e_cnt < num_of_entries; e_cnt++) { 4730 4731 /* 4732 * If the capture_mask of the entry does not match capture mask 4733 * skip the entry after marking the driver_flags indicator. 4734 */ 4735 4736 if (!(entry->hdr.entry_capture_mask & capture_mask)) { 4737 4738 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4739 entry = (ql_minidump_entry_t *) ((char *) entry 4740 + entry->hdr.entry_size); 4741 continue; 4742 } 4743 4744 /* 4745 * This is ONLY needed in implementations where 4746 * the capture buffer allocated is too small to capture 4747 * all of the required entries for a given capture mask. 4748 * We need to empty the buffer contents to a file 4749 * if possible, before processing the next entry 4750 * If the buff_full_flag is set, no further capture will happen 4751 * and all remaining non-control entries will be skipped. 4752 */ 4753 if (entry->hdr.entry_capture_size != 0) { 4754 if ((buff_level + entry->hdr.entry_capture_size) > 4755 dump_size) { 4756 /* Try to recover by emptying buffer to file */ 4757 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4758 entry = (ql_minidump_entry_t *) ((char *) entry 4759 + entry->hdr.entry_size); 4760 continue; 4761 } 4762 } 4763 4764 /* 4765 * Decode the entry type and process it accordingly 4766 */ 4767 4768 switch (entry->hdr.entry_type) { 4769 case RDNOP: 4770 break; 4771 4772 case RDEND: 4773 if (sane_end == 0) { 4774 end_cnt = e_cnt; 4775 } 4776 sane_end++; 4777 break; 4778 4779 case RDCRB: 4780 dbuff = dump_buff + buff_level; 4781 esize = ql_rdcrb(ha, (void *)entry, (void *)dbuff); 4782 ql_entry_err_chk(entry, esize); 4783 buff_level += esize; 4784 break; 4785 4786 case POLLRD: 4787 dbuff = dump_buff + buff_level; 4788 esize = ql_pollrd(ha, (void *)entry, (void *)dbuff); 4789 ql_entry_err_chk(entry, esize); 4790 buff_level += esize; 4791 break; 4792 4793 case POLLRDMWR: 4794 dbuff = dump_buff + buff_level; 4795 esize = ql_pollrd_modify_write(ha, (void *)entry, 4796 (void *)dbuff); 4797 ql_entry_err_chk(entry, esize); 4798 buff_level += esize; 4799 break; 4800 4801 case L2ITG: 4802 case L2DTG: 4803 case L2DAT: 4804 case L2INS: 4805 dbuff = dump_buff + buff_level; 4806 esize = ql_L2Cache(ha, (void *)entry, (void *)dbuff); 4807 if (esize == -1) { 4808 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4809 } else { 4810 ql_entry_err_chk(entry, esize); 4811 buff_level += esize; 4812 } 4813 break; 4814 4815 case L1DAT: 4816 case L1INS: 4817 dbuff = dump_buff + buff_level; 4818 esize = ql_L1Cache(ha, (void *)entry, (void *)dbuff); 4819 ql_entry_err_chk(entry, esize); 4820 buff_level += esize; 4821 break; 4822 4823 case RDOCM: 4824 dbuff = dump_buff + buff_level; 4825 esize = ql_rdocm(ha, (void *)entry, (void *)dbuff); 4826 ql_entry_err_chk(entry, esize); 4827 buff_level += esize; 4828 break; 4829 4830 case RDMEM: 4831 dbuff = dump_buff + buff_level; 4832 esize = ql_rdmem(ha, (void *)entry, (void *)dbuff); 4833 ql_entry_err_chk(entry, esize); 4834 buff_level += esize; 4835 break; 4836 4837 case BOARD: 4838 case RDROM: 4839 dbuff = dump_buff + buff_level; 4840 esize = ql_rdrom(ha, (void *)entry, (void *)dbuff); 4841 ql_entry_err_chk(entry, esize); 4842 buff_level += esize; 4843 break; 4844 4845 case RDMUX: 4846 dbuff = dump_buff + buff_level; 4847 esize = ql_rdmux(ha, (void *)entry, (void *)dbuff); 4848 ql_entry_err_chk(entry, esize); 4849 buff_level += esize; 4850 break; 4851 4852 case RDMUX2: 4853 dbuff = dump_buff + buff_level; 4854 esize = ql_rdmux2(ha, (void *)entry, (void *)dbuff); 4855 ql_entry_err_chk(entry, esize); 4856 buff_level += esize; 4857 break; 4858 4859 case QUEUE: 4860 dbuff = dump_buff + buff_level; 4861 esize = ql_rdqueue(ha, (void *)entry, (void *)dbuff); 4862 ql_entry_err_chk(entry, esize); 4863 buff_level += esize; 4864 break; 4865 4866 case CNTRL: 4867 if ((rv = ql_cntrl(ha, template_hdr, (void *)entry))) { 4868 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4869 } 4870 break; 4871 default: 4872 entry->hdr.driver_flags |= QL_DBG_SKIPPED_FLAG; 4873 break; 4874 } 4875 /* next entry in the template */ 4876 entry = (ql_minidump_entry_t *) ((char *) entry 4877 + entry->hdr.entry_size); 4878 } 4879 4880 if (!sane_start || (sane_end > 1)) { 4881 device_printf(ha->pci_dev, 4882 "\n%s: Template configuration error. Check Template\n", 4883 __func__); 4884 } 4885 4886 QL_DPRINT80(ha, (ha->pci_dev, "%s: Minidump num of entries = %d\n", 4887 __func__, template_hdr->num_of_entries)); 4888 4889 return 0; 4890} 4891 4892/* 4893 * Read CRB operation. 4894 */ 4895static uint32_t 4896ql_rdcrb(qla_host_t *ha, ql_minidump_entry_rdcrb_t * crb_entry, 4897 uint32_t * data_buff) 4898{ 4899 int loop_cnt; 4900 int ret; 4901 uint32_t op_count, addr, stride, value = 0; 4902 4903 addr = crb_entry->addr; 4904 op_count = crb_entry->op_count; 4905 stride = crb_entry->addr_stride; 4906 4907 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 4908 4909 ret = ql_rdwr_indreg32(ha, addr, &value, 1); 4910 4911 if (ret) 4912 return (0); 4913 4914 *data_buff++ = addr; 4915 *data_buff++ = value; 4916 addr = addr + stride; 4917 } 4918 4919 /* 4920 * for testing purpose we return amount of data written 4921 */ 4922 return (op_count * (2 * sizeof(uint32_t))); 4923} 4924 4925/* 4926 * Handle L2 Cache. 4927 */ 4928 4929static uint32_t 4930ql_L2Cache(qla_host_t *ha, ql_minidump_entry_cache_t *cacheEntry, 4931 uint32_t * data_buff) 4932{ 4933 int i, k; 4934 int loop_cnt; 4935 int ret; 4936 4937 uint32_t read_value; 4938 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr, cntl_value_w; 4939 uint32_t tag_value, read_cnt; 4940 volatile uint8_t cntl_value_r; 4941 long timeout; 4942 uint32_t data; 4943 4944 loop_cnt = cacheEntry->op_count; 4945 4946 read_addr = cacheEntry->read_addr; 4947 cntrl_addr = cacheEntry->control_addr; 4948 cntl_value_w = (uint32_t) cacheEntry->write_value; 4949 4950 tag_reg_addr = cacheEntry->tag_reg_addr; 4951 4952 tag_value = cacheEntry->init_tag_value; 4953 read_cnt = cacheEntry->read_addr_cnt; 4954 4955 for (i = 0; i < loop_cnt; i++) { 4956 4957 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 4958 if (ret) 4959 return (0); 4960 4961 if (cacheEntry->write_value != 0) { 4962 4963 ret = ql_rdwr_indreg32(ha, cntrl_addr, 4964 &cntl_value_w, 0); 4965 if (ret) 4966 return (0); 4967 } 4968 4969 if (cacheEntry->poll_mask != 0) { 4970 4971 timeout = cacheEntry->poll_wait; 4972 4973 ret = ql_rdwr_indreg32(ha, cntrl_addr, &data, 1); 4974 if (ret) 4975 return (0); 4976 4977 cntl_value_r = (uint8_t)data; 4978 4979 while ((cntl_value_r & cacheEntry->poll_mask) != 0) { 4980 4981 if (timeout) { 4982 qla_mdelay(__func__, 1); 4983 timeout--; 4984 } else 4985 break; 4986 4987 ret = ql_rdwr_indreg32(ha, cntrl_addr, 4988 &data, 1); 4989 if (ret) 4990 return (0); 4991 4992 cntl_value_r = (uint8_t)data; 4993 } 4994 if (!timeout) { 4995 /* Report timeout error. 4996 * core dump capture failed 4997 * Skip remaining entries. 4998 * Write buffer out to file 4999 * Use driver specific fields in template header 5000 * to report this error. 5001 */ 5002 return (-1); 5003 } 5004 } 5005 5006 addr = read_addr; 5007 for (k = 0; k < read_cnt; k++) { 5008 5009 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 5010 if (ret) 5011 return (0); 5012 5013 *data_buff++ = read_value; 5014 addr += cacheEntry->read_addr_stride; 5015 } 5016 5017 tag_value += cacheEntry->tag_value_stride; 5018 } 5019 5020 return (read_cnt * loop_cnt * sizeof(uint32_t)); 5021} 5022 5023/* 5024 * Handle L1 Cache. 5025 */ 5026 5027static uint32_t 5028ql_L1Cache(qla_host_t *ha, 5029 ql_minidump_entry_cache_t *cacheEntry, 5030 uint32_t *data_buff) 5031{ 5032 int ret; 5033 int i, k; 5034 int loop_cnt; 5035 5036 uint32_t read_value; 5037 uint32_t addr, read_addr, cntrl_addr, tag_reg_addr; 5038 uint32_t tag_value, read_cnt; 5039 uint32_t cntl_value_w; 5040 5041 loop_cnt = cacheEntry->op_count; 5042 5043 read_addr = cacheEntry->read_addr; 5044 cntrl_addr = cacheEntry->control_addr; 5045 cntl_value_w = (uint32_t) cacheEntry->write_value; 5046 5047 tag_reg_addr = cacheEntry->tag_reg_addr; 5048 5049 tag_value = cacheEntry->init_tag_value; 5050 read_cnt = cacheEntry->read_addr_cnt; 5051 5052 for (i = 0; i < loop_cnt; i++) { 5053 5054 ret = ql_rdwr_indreg32(ha, tag_reg_addr, &tag_value, 0); 5055 if (ret) 5056 return (0); 5057 5058 ret = ql_rdwr_indreg32(ha, cntrl_addr, &cntl_value_w, 0); 5059 if (ret) 5060 return (0); 5061 5062 addr = read_addr; 5063 for (k = 0; k < read_cnt; k++) { 5064 5065 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 5066 if (ret) 5067 return (0); 5068 5069 *data_buff++ = read_value; 5070 addr += cacheEntry->read_addr_stride; 5071 } 5072 5073 tag_value += cacheEntry->tag_value_stride; 5074 } 5075 5076 return (read_cnt * loop_cnt * sizeof(uint32_t)); 5077} 5078 5079/* 5080 * Reading OCM memory 5081 */ 5082 5083static uint32_t 5084ql_rdocm(qla_host_t *ha, 5085 ql_minidump_entry_rdocm_t *ocmEntry, 5086 uint32_t *data_buff) 5087{ 5088 int i, loop_cnt; 5089 volatile uint32_t addr; 5090 volatile uint32_t value; 5091 5092 addr = ocmEntry->read_addr; 5093 loop_cnt = ocmEntry->op_count; 5094 5095 for (i = 0; i < loop_cnt; i++) { 5096 value = READ_REG32(ha, addr); 5097 *data_buff++ = value; 5098 addr += ocmEntry->read_addr_stride; 5099 } 5100 return (loop_cnt * sizeof(value)); 5101} 5102 5103/* 5104 * Read memory 5105 */ 5106 5107static uint32_t 5108ql_rdmem(qla_host_t *ha, 5109 ql_minidump_entry_rdmem_t *mem_entry, 5110 uint32_t *data_buff) 5111{ 5112 int ret; 5113 int i, loop_cnt; 5114 volatile uint32_t addr; 5115 q80_offchip_mem_val_t val; 5116 5117 addr = mem_entry->read_addr; 5118 5119 /* size in bytes / 16 */ 5120 loop_cnt = mem_entry->read_data_size / (sizeof(uint32_t) * 4); 5121 5122 for (i = 0; i < loop_cnt; i++) { 5123 5124 ret = ql_rdwr_offchip_mem(ha, (addr & 0x0ffffffff), &val, 1); 5125 if (ret) 5126 return (0); 5127 5128 *data_buff++ = val.data_lo; 5129 *data_buff++ = val.data_hi; 5130 *data_buff++ = val.data_ulo; 5131 *data_buff++ = val.data_uhi; 5132 5133 addr += (sizeof(uint32_t) * 4); 5134 } 5135 5136 return (loop_cnt * (sizeof(uint32_t) * 4)); 5137} 5138 5139/* 5140 * Read Rom 5141 */ 5142 5143static uint32_t 5144ql_rdrom(qla_host_t *ha, 5145 ql_minidump_entry_rdrom_t *romEntry, 5146 uint32_t *data_buff) 5147{ 5148 int ret; 5149 int i, loop_cnt; 5150 uint32_t addr; 5151 uint32_t value; 5152 5153 addr = romEntry->read_addr; 5154 loop_cnt = romEntry->read_data_size; /* This is size in bytes */ 5155 loop_cnt /= sizeof(value); 5156 5157 for (i = 0; i < loop_cnt; i++) { 5158 5159 ret = ql_rd_flash32(ha, addr, &value); 5160 if (ret) 5161 return (0); 5162 5163 *data_buff++ = value; 5164 addr += sizeof(value); 5165 } 5166 5167 return (loop_cnt * sizeof(value)); 5168} 5169 5170/* 5171 * Read MUX data 5172 */ 5173 5174static uint32_t 5175ql_rdmux(qla_host_t *ha, 5176 ql_minidump_entry_mux_t *muxEntry, 5177 uint32_t *data_buff) 5178{ 5179 int ret; 5180 int loop_cnt; 5181 uint32_t read_value, sel_value; 5182 uint32_t read_addr, select_addr; 5183 5184 select_addr = muxEntry->select_addr; 5185 sel_value = muxEntry->select_value; 5186 read_addr = muxEntry->read_addr; 5187 5188 for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) { 5189 5190 ret = ql_rdwr_indreg32(ha, select_addr, &sel_value, 0); 5191 if (ret) 5192 return (0); 5193 5194 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 5195 if (ret) 5196 return (0); 5197 5198 *data_buff++ = sel_value; 5199 *data_buff++ = read_value; 5200 5201 sel_value += muxEntry->select_value_stride; 5202 } 5203 5204 return (loop_cnt * (2 * sizeof(uint32_t))); 5205} 5206 5207static uint32_t 5208ql_rdmux2(qla_host_t *ha, 5209 ql_minidump_entry_mux2_t *muxEntry, 5210 uint32_t *data_buff) 5211{ 5212 int ret; 5213 int loop_cnt; 5214 5215 uint32_t select_addr_1, select_addr_2; 5216 uint32_t select_value_1, select_value_2; 5217 uint32_t select_value_count, select_value_mask; 5218 uint32_t read_addr, read_value; 5219 5220 select_addr_1 = muxEntry->select_addr_1; 5221 select_addr_2 = muxEntry->select_addr_2; 5222 select_value_1 = muxEntry->select_value_1; 5223 select_value_2 = muxEntry->select_value_2; 5224 select_value_count = muxEntry->select_value_count; 5225 select_value_mask = muxEntry->select_value_mask; 5226 5227 read_addr = muxEntry->read_addr; 5228 5229 for (loop_cnt = 0; loop_cnt < muxEntry->select_value_count; 5230 loop_cnt++) { 5231 5232 uint32_t temp_sel_val; 5233 5234 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_1, 0); 5235 if (ret) 5236 return (0); 5237 5238 temp_sel_val = select_value_1 & select_value_mask; 5239 5240 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 5241 if (ret) 5242 return (0); 5243 5244 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 5245 if (ret) 5246 return (0); 5247 5248 *data_buff++ = temp_sel_val; 5249 *data_buff++ = read_value; 5250 5251 ret = ql_rdwr_indreg32(ha, select_addr_1, &select_value_2, 0); 5252 if (ret) 5253 return (0); 5254 5255 temp_sel_val = select_value_2 & select_value_mask; 5256 5257 ret = ql_rdwr_indreg32(ha, select_addr_2, &temp_sel_val, 0); 5258 if (ret) 5259 return (0); 5260 5261 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 5262 if (ret) 5263 return (0); 5264 5265 *data_buff++ = temp_sel_val; 5266 *data_buff++ = read_value; 5267 5268 select_value_1 += muxEntry->select_value_stride; 5269 select_value_2 += muxEntry->select_value_stride; 5270 } 5271 5272 return (loop_cnt * (4 * sizeof(uint32_t))); 5273} 5274 5275/* 5276 * Handling Queue State Reads. 5277 */ 5278 5279static uint32_t 5280ql_rdqueue(qla_host_t *ha, 5281 ql_minidump_entry_queue_t *queueEntry, 5282 uint32_t *data_buff) 5283{ 5284 int ret; 5285 int loop_cnt, k; 5286 uint32_t read_value; 5287 uint32_t read_addr, read_stride, select_addr; 5288 uint32_t queue_id, read_cnt; 5289 5290 read_cnt = queueEntry->read_addr_cnt; 5291 read_stride = queueEntry->read_addr_stride; 5292 select_addr = queueEntry->select_addr; 5293 5294 for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count; 5295 loop_cnt++) { 5296 5297 ret = ql_rdwr_indreg32(ha, select_addr, &queue_id, 0); 5298 if (ret) 5299 return (0); 5300 5301 read_addr = queueEntry->read_addr; 5302 5303 for (k = 0; k < read_cnt; k++) { 5304 5305 ret = ql_rdwr_indreg32(ha, read_addr, &read_value, 1); 5306 if (ret) 5307 return (0); 5308 5309 *data_buff++ = read_value; 5310 read_addr += read_stride; 5311 } 5312 5313 queue_id += queueEntry->queue_id_stride; 5314 } 5315 5316 return (loop_cnt * (read_cnt * sizeof(uint32_t))); 5317} 5318 5319/* 5320 * Handling control entries. 5321 */ 5322 5323static uint32_t 5324ql_cntrl(qla_host_t *ha, 5325 ql_minidump_template_hdr_t *template_hdr, 5326 ql_minidump_entry_cntrl_t *crbEntry) 5327{ 5328 int ret; 5329 int count; 5330 uint32_t opcode, read_value, addr, entry_addr; 5331 long timeout; 5332 5333 entry_addr = crbEntry->addr; 5334 5335 for (count = 0; count < crbEntry->op_count; count++) { 5336 opcode = crbEntry->opcode; 5337 5338 if (opcode & QL_DBG_OPCODE_WR) { 5339 5340 ret = ql_rdwr_indreg32(ha, entry_addr, 5341 &crbEntry->value_1, 0); 5342 if (ret) 5343 return (0); 5344 5345 opcode &= ~QL_DBG_OPCODE_WR; 5346 } 5347 5348 if (opcode & QL_DBG_OPCODE_RW) { 5349 5350 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 5351 if (ret) 5352 return (0); 5353 5354 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 5355 if (ret) 5356 return (0); 5357 5358 opcode &= ~QL_DBG_OPCODE_RW; 5359 } 5360 5361 if (opcode & QL_DBG_OPCODE_AND) { 5362 5363 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 5364 if (ret) 5365 return (0); 5366 5367 read_value &= crbEntry->value_2; 5368 opcode &= ~QL_DBG_OPCODE_AND; 5369 5370 if (opcode & QL_DBG_OPCODE_OR) { 5371 read_value |= crbEntry->value_3; 5372 opcode &= ~QL_DBG_OPCODE_OR; 5373 } 5374 5375 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 5376 if (ret) 5377 return (0); 5378 } 5379 5380 if (opcode & QL_DBG_OPCODE_OR) { 5381 5382 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 1); 5383 if (ret) 5384 return (0); 5385 5386 read_value |= crbEntry->value_3; 5387 5388 ret = ql_rdwr_indreg32(ha, entry_addr, &read_value, 0); 5389 if (ret) 5390 return (0); 5391 5392 opcode &= ~QL_DBG_OPCODE_OR; 5393 } 5394 5395 if (opcode & QL_DBG_OPCODE_POLL) { 5396 5397 opcode &= ~QL_DBG_OPCODE_POLL; 5398 timeout = crbEntry->poll_timeout; 5399 addr = entry_addr; 5400 5401 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 5402 if (ret) 5403 return (0); 5404 5405 while ((read_value & crbEntry->value_2) 5406 != crbEntry->value_1) { 5407 5408 if (timeout) { 5409 qla_mdelay(__func__, 1); 5410 timeout--; 5411 } else 5412 break; 5413 5414 ret = ql_rdwr_indreg32(ha, addr, 5415 &read_value, 1); 5416 if (ret) 5417 return (0); 5418 } 5419 5420 if (!timeout) { 5421 /* 5422 * Report timeout error. 5423 * core dump capture failed 5424 * Skip remaining entries. 5425 * Write buffer out to file 5426 * Use driver specific fields in template header 5427 * to report this error. 5428 */ 5429 return (-1); 5430 } 5431 } 5432 5433 if (opcode & QL_DBG_OPCODE_RDSTATE) { 5434 /* 5435 * decide which address to use. 5436 */ 5437 if (crbEntry->state_index_a) { 5438 addr = template_hdr->saved_state_array[ 5439 crbEntry-> state_index_a]; 5440 } else { 5441 addr = entry_addr; 5442 } 5443 5444 ret = ql_rdwr_indreg32(ha, addr, &read_value, 1); 5445 if (ret) 5446 return (0); 5447 5448 template_hdr->saved_state_array[crbEntry->state_index_v] 5449 = read_value; 5450 opcode &= ~QL_DBG_OPCODE_RDSTATE; 5451 } 5452 5453 if (opcode & QL_DBG_OPCODE_WRSTATE) { 5454 /* 5455 * decide which value to use. 5456 */ 5457 if (crbEntry->state_index_v) { 5458 read_value = template_hdr->saved_state_array[ 5459 crbEntry->state_index_v]; 5460 } else { 5461 read_value = crbEntry->value_1; 5462 } 5463 /* 5464 * decide which address to use. 5465 */ 5466 if (crbEntry->state_index_a) { 5467 addr = template_hdr->saved_state_array[ 5468 crbEntry-> state_index_a]; 5469 } else { 5470 addr = entry_addr; 5471 } 5472 5473 ret = ql_rdwr_indreg32(ha, addr, &read_value, 0); 5474 if (ret) 5475 return (0); 5476 5477 opcode &= ~QL_DBG_OPCODE_WRSTATE; 5478 } 5479 5480 if (opcode & QL_DBG_OPCODE_MDSTATE) { 5481 /* Read value from saved state using index */ 5482 read_value = template_hdr->saved_state_array[ 5483 crbEntry->state_index_v]; 5484 5485 read_value <<= crbEntry->shl; /*Shift left operation */ 5486 read_value >>= crbEntry->shr; /*Shift right operation */ 5487 5488 if (crbEntry->value_2) { 5489 /* check if AND mask is provided */ 5490 read_value &= crbEntry->value_2; 5491 } 5492 5493 read_value |= crbEntry->value_3; /* OR operation */ 5494 read_value += crbEntry->value_1; /* increment op */ 5495 5496 /* Write value back to state area. */ 5497 5498 template_hdr->saved_state_array[crbEntry->state_index_v] 5499 = read_value; 5500 opcode &= ~QL_DBG_OPCODE_MDSTATE; 5501 } 5502 5503 entry_addr += crbEntry->addr_stride; 5504 } 5505 5506 return (0); 5507} 5508 5509/* 5510 * Handling rd poll entry. 5511 */ 5512 5513static uint32_t 5514ql_pollrd(qla_host_t *ha, ql_minidump_entry_pollrd_t *entry, 5515 uint32_t *data_buff) 5516{ 5517 int ret; 5518 int loop_cnt; 5519 uint32_t op_count, select_addr, select_value_stride, select_value; 5520 uint32_t read_addr, poll, mask, data_size, data; 5521 uint32_t wait_count = 0; 5522 5523 select_addr = entry->select_addr; 5524 read_addr = entry->read_addr; 5525 select_value = entry->select_value; 5526 select_value_stride = entry->select_value_stride; 5527 op_count = entry->op_count; 5528 poll = entry->poll; 5529 mask = entry->mask; 5530 data_size = entry->data_size; 5531 5532 for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) { 5533 5534 ret = ql_rdwr_indreg32(ha, select_addr, &select_value, 0); 5535 if (ret) 5536 return (0); 5537 5538 wait_count = 0; 5539 5540 while (wait_count < poll) { 5541 5542 uint32_t temp; 5543 5544 ret = ql_rdwr_indreg32(ha, select_addr, &temp, 1); 5545 if (ret) 5546 return (0); 5547 5548 if ( (temp & mask) != 0 ) { 5549 break; 5550 } 5551 wait_count++; 5552 } 5553 5554 if (wait_count == poll) { 5555 device_printf(ha->pci_dev, 5556 "%s: Error in processing entry\n", __func__); 5557 device_printf(ha->pci_dev, 5558 "%s: wait_count <0x%x> poll <0x%x>\n", 5559 __func__, wait_count, poll); 5560 return 0; 5561 } 5562 5563 ret = ql_rdwr_indreg32(ha, read_addr, &data, 1); 5564 if (ret) 5565 return (0); 5566 5567 *data_buff++ = select_value; 5568 *data_buff++ = data; 5569 select_value = select_value + select_value_stride; 5570 } 5571 5572 /* 5573 * for testing purpose we return amount of data written 5574 */ 5575 return (loop_cnt * (2 * sizeof(uint32_t))); 5576} 5577 5578 5579/* 5580 * Handling rd modify write poll entry. 5581 */ 5582 5583static uint32_t 5584ql_pollrd_modify_write(qla_host_t *ha, 5585 ql_minidump_entry_rd_modify_wr_with_poll_t *entry, 5586 uint32_t *data_buff) 5587{ 5588 int ret; 5589 uint32_t addr_1, addr_2, value_1, value_2, data; 5590 uint32_t poll, mask, data_size, modify_mask; 5591 uint32_t wait_count = 0; 5592 5593 addr_1 = entry->addr_1; 5594 addr_2 = entry->addr_2; 5595 value_1 = entry->value_1; 5596 value_2 = entry->value_2; 5597 5598 poll = entry->poll; 5599 mask = entry->mask; 5600 modify_mask = entry->modify_mask; 5601 data_size = entry->data_size; 5602 5603 5604 ret = ql_rdwr_indreg32(ha, addr_1, &value_1, 0); 5605 if (ret) 5606 return (0); 5607 5608 wait_count = 0; 5609 while (wait_count < poll) { 5610 5611 uint32_t temp; 5612 5613 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 5614 if (ret) 5615 return (0); 5616 5617 if ( (temp & mask) != 0 ) { 5618 break; 5619 } 5620 wait_count++; 5621 } 5622 5623 if (wait_count == poll) { 5624 device_printf(ha->pci_dev, "%s Error in processing entry\n", 5625 __func__); 5626 } else { 5627 5628 ret = ql_rdwr_indreg32(ha, addr_2, &data, 1); 5629 if (ret) 5630 return (0); 5631 5632 data = (data & modify_mask); 5633 5634 ret = ql_rdwr_indreg32(ha, addr_2, &data, 0); 5635 if (ret) 5636 return (0); 5637 5638 ret = ql_rdwr_indreg32(ha, addr_1, &value_2, 0); 5639 if (ret) 5640 return (0); 5641 5642 /* Poll again */ 5643 wait_count = 0; 5644 while (wait_count < poll) { 5645 5646 uint32_t temp; 5647 5648 ret = ql_rdwr_indreg32(ha, addr_1, &temp, 1); 5649 if (ret) 5650 return (0); 5651 5652 if ( (temp & mask) != 0 ) { 5653 break; 5654 } 5655 wait_count++; 5656 } 5657 *data_buff++ = addr_2; 5658 *data_buff++ = data; 5659 } 5660 5661 /* 5662 * for testing purpose we return amount of data written 5663 */ 5664 return (2 * sizeof(uint32_t)); 5665} 5666 5667 5668