ql_os.c revision 322975
1/* 2 * Copyright (c) 2013-2016 Qlogic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28/* 29 * File: ql_os.c 30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/dev/qlxgbe/ql_os.c 322975 2017-08-28 19:17:28Z davidcs $"); 35 36 37#include "ql_os.h" 38#include "ql_hw.h" 39#include "ql_def.h" 40#include "ql_inline.h" 41#include "ql_ver.h" 42#include "ql_glbl.h" 43#include "ql_dbg.h" 44#include <sys/smp.h> 45 46/* 47 * Some PCI Configuration Space Related Defines 48 */ 49 50#ifndef PCI_VENDOR_QLOGIC 51#define PCI_VENDOR_QLOGIC 0x1077 52#endif 53 54#ifndef PCI_PRODUCT_QLOGIC_ISP8030 55#define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 56#endif 57 58#define PCI_QLOGIC_ISP8030 \ 59 ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) 60 61/* 62 * static functions 63 */ 64static int qla_alloc_parent_dma_tag(qla_host_t *ha); 65static void qla_free_parent_dma_tag(qla_host_t *ha); 66static int qla_alloc_xmt_bufs(qla_host_t *ha); 67static void qla_free_xmt_bufs(qla_host_t *ha); 68static int qla_alloc_rcv_bufs(qla_host_t *ha); 69static void qla_free_rcv_bufs(qla_host_t *ha); 70static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); 71 72static void qla_init_ifnet(device_t dev, qla_host_t *ha); 73static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); 74static void qla_release(qla_host_t *ha); 75static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 76 int error); 77static void qla_stop(qla_host_t *ha); 78static void qla_get_peer(qla_host_t *ha); 79static void qla_error_recovery(void *context, int pending); 80static void qla_async_event(void *context, int pending); 81static void qla_stats(void *context, int pending); 82static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 83 uint32_t iscsi_pdu); 84 85/* 86 * Hooks to the Operating Systems 87 */ 88static int qla_pci_probe (device_t); 89static int qla_pci_attach (device_t); 90static int qla_pci_detach (device_t); 91 92static void qla_init(void *arg); 93static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 94static int qla_media_change(struct ifnet *ifp); 95static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 96 97static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); 98static void qla_qflush(struct ifnet *ifp); 99static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 100static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); 101static int qla_create_fp_taskqueues(qla_host_t *ha); 102static void qla_destroy_fp_taskqueues(qla_host_t *ha); 103static void qla_drain_fp_taskqueues(qla_host_t *ha); 104 105static device_method_t qla_pci_methods[] = { 106 /* Device interface */ 107 DEVMETHOD(device_probe, qla_pci_probe), 108 DEVMETHOD(device_attach, qla_pci_attach), 109 DEVMETHOD(device_detach, qla_pci_detach), 110 { 0, 0 } 111}; 112 113static driver_t qla_pci_driver = { 114 "ql", qla_pci_methods, sizeof (qla_host_t), 115}; 116 117static devclass_t qla83xx_devclass; 118 119DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); 120 121MODULE_DEPEND(qla83xx, pci, 1, 1, 1); 122MODULE_DEPEND(qla83xx, ether, 1, 1, 1); 123 124MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); 125 126#define QL_STD_REPLENISH_THRES 0 127#define QL_JUMBO_REPLENISH_THRES 32 128 129 130static char dev_str[64]; 131static char ver_str[64]; 132 133/* 134 * Name: qla_pci_probe 135 * Function: Validate the PCI device to be a QLA80XX device 136 */ 137static int 138qla_pci_probe(device_t dev) 139{ 140 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 141 case PCI_QLOGIC_ISP8030: 142 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 143 "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", 144 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 145 QLA_VERSION_BUILD); 146 snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", 147 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 148 QLA_VERSION_BUILD); 149 device_set_desc(dev, dev_str); 150 break; 151 default: 152 return (ENXIO); 153 } 154 155 if (bootverbose) 156 printf("%s: %s\n ", __func__, dev_str); 157 158 return (BUS_PROBE_DEFAULT); 159} 160 161static void 162qla_add_sysctls(qla_host_t *ha) 163{ 164 device_t dev = ha->pci_dev; 165 166 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 167 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 168 OID_AUTO, "version", CTLFLAG_RD, 169 ver_str, 0, "Driver Version"); 170 171 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 172 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 173 OID_AUTO, "fw_version", CTLFLAG_RD, 174 ha->fw_ver_str, 0, "firmware version"); 175 176 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 177 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 178 OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, 179 (void *)ha, 0, 180 qla_sysctl_get_link_status, "I", "Link Status"); 181 182 ha->dbg_level = 0; 183 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 184 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 185 OID_AUTO, "debug", CTLFLAG_RW, 186 &ha->dbg_level, ha->dbg_level, "Debug Level"); 187 188 ha->std_replenish = QL_STD_REPLENISH_THRES; 189 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 190 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 191 OID_AUTO, "std_replenish", CTLFLAG_RW, 192 &ha->std_replenish, ha->std_replenish, 193 "Threshold for Replenishing Standard Frames"); 194 195 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 196 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 197 OID_AUTO, "ipv4_lro", 198 CTLFLAG_RD, &ha->ipv4_lro, 199 "number of ipv4 lro completions"); 200 201 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 202 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 203 OID_AUTO, "ipv6_lro", 204 CTLFLAG_RD, &ha->ipv6_lro, 205 "number of ipv6 lro completions"); 206 207 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 208 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 209 OID_AUTO, "tx_tso_frames", 210 CTLFLAG_RD, &ha->tx_tso_frames, 211 "number of Tx TSO Frames"); 212 213 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 214 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 215 OID_AUTO, "hw_vlan_tx_frames", 216 CTLFLAG_RD, &ha->hw_vlan_tx_frames, 217 "number of Tx VLAN Frames"); 218 219 SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), 220 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 221 OID_AUTO, "hw_lock_failed", 222 CTLFLAG_RD, &ha->hw_lock_failed, 223 "number of hw_lock failures"); 224 225 return; 226} 227 228static void 229qla_watchdog(void *arg) 230{ 231 qla_host_t *ha = arg; 232 qla_hw_t *hw; 233 struct ifnet *ifp; 234 235 hw = &ha->hw; 236 ifp = ha->ifp; 237 238 if (ha->qla_watchdog_exit) { 239 ha->qla_watchdog_exited = 1; 240 return; 241 } 242 ha->qla_watchdog_exited = 0; 243 244 if (!ha->qla_watchdog_pause) { 245 if (ql_hw_check_health(ha) || ha->qla_initiate_recovery || 246 (ha->msg_from_peer == QL_PEER_MSG_RESET)) { 247 248 if (!(ha->dbg_level & 0x8000)) { 249 ha->qla_watchdog_paused = 1; 250 ha->qla_watchdog_pause = 1; 251 ha->qla_initiate_recovery = 0; 252 ha->err_inject = 0; 253 device_printf(ha->pci_dev, 254 "%s: taskqueue_enqueue(err_task) \n", 255 __func__); 256 taskqueue_enqueue(ha->err_tq, &ha->err_task); 257 return; 258 } 259 260 } else if (ha->qla_interface_up) { 261 262 ha->watchdog_ticks++; 263 264 if (ha->watchdog_ticks > 1000) 265 ha->watchdog_ticks = 0; 266 267 if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { 268 taskqueue_enqueue(ha->stats_tq, &ha->stats_task); 269 } 270 271 if (ha->async_event) { 272 taskqueue_enqueue(ha->async_event_tq, 273 &ha->async_event_task); 274 } 275 276#if 0 277 for (i = 0; ((i < ha->hw.num_sds_rings) && 278 !ha->watchdog_ticks); i++) { 279 qla_tx_fp_t *fp = &ha->tx_fp[i]; 280 281 if (fp->fp_taskqueue != NULL) 282 taskqueue_enqueue(fp->fp_taskqueue, 283 &fp->fp_task); 284 } 285#endif 286 ha->qla_watchdog_paused = 0; 287 } else { 288 ha->qla_watchdog_paused = 0; 289 } 290 } else { 291 ha->qla_watchdog_paused = 1; 292 } 293 294 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 295 qla_watchdog, ha); 296} 297 298/* 299 * Name: qla_pci_attach 300 * Function: attaches the device to the operating system 301 */ 302static int 303qla_pci_attach(device_t dev) 304{ 305 qla_host_t *ha = NULL; 306 uint32_t rsrc_len; 307 int i; 308 uint32_t num_rcvq = 0; 309 310 if ((ha = device_get_softc(dev)) == NULL) { 311 device_printf(dev, "cannot get softc\n"); 312 return (ENOMEM); 313 } 314 315 memset(ha, 0, sizeof (qla_host_t)); 316 317 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { 318 device_printf(dev, "device is not ISP8030\n"); 319 return (ENXIO); 320 } 321 322 ha->pci_func = pci_get_function(dev) & 0x1; 323 324 ha->pci_dev = dev; 325 326 pci_enable_busmaster(dev); 327 328 ha->reg_rid = PCIR_BAR(0); 329 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 330 RF_ACTIVE); 331 332 if (ha->pci_reg == NULL) { 333 device_printf(dev, "unable to map any ports\n"); 334 goto qla_pci_attach_err; 335 } 336 337 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 338 ha->reg_rid); 339 340 mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 341 ha->flags.lock_init = 1; 342 343 qla_add_sysctls(ha); 344 345 ha->hw.num_sds_rings = MAX_SDS_RINGS; 346 ha->hw.num_rds_rings = MAX_RDS_RINGS; 347 ha->hw.num_tx_rings = NUM_TX_RINGS; 348 349 ha->reg_rid1 = PCIR_BAR(2); 350 ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 351 &ha->reg_rid1, RF_ACTIVE); 352 353 ha->msix_count = pci_msix_count(dev); 354 355 if (ha->msix_count < 1 ) { 356 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 357 ha->msix_count); 358 goto qla_pci_attach_err; 359 } 360 361 if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { 362 ha->hw.num_sds_rings = ha->msix_count - 1; 363 } 364 365 QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 366 " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, 367 ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, 368 ha->pci_reg1)); 369 370 /* initialize hardware */ 371 if (ql_init_hw(ha)) { 372 device_printf(dev, "%s: ql_init_hw failed\n", __func__); 373 goto qla_pci_attach_err; 374 } 375 376 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 377 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 378 ha->fw_ver_build); 379 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 380 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 381 ha->fw_ver_build); 382 383 if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { 384 device_printf(dev, "%s: qla_get_nic_partition failed\n", 385 __func__); 386 goto qla_pci_attach_err; 387 } 388 device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" 389 " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", 390 __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, 391 ha->pci_reg, ha->pci_reg1, num_rcvq); 392 393 if ((ha->msix_count < 64) || (num_rcvq != 32)) { 394 if (ha->hw.num_sds_rings > 15) { 395 ha->hw.num_sds_rings = 15; 396 } 397 } 398 399 ha->hw.num_rds_rings = ha->hw.num_sds_rings; 400 ha->hw.num_tx_rings = ha->hw.num_sds_rings; 401 402#ifdef QL_ENABLE_ISCSI_TLV 403 ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; 404#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 405 406 ql_hw_add_sysctls(ha); 407 408 ha->msix_count = ha->hw.num_sds_rings + 1; 409 410 if (pci_alloc_msix(dev, &ha->msix_count)) { 411 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 412 ha->msix_count); 413 ha->msix_count = 0; 414 goto qla_pci_attach_err; 415 } 416 417 ha->mbx_irq_rid = 1; 418 ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 419 &ha->mbx_irq_rid, 420 (RF_ACTIVE | RF_SHAREABLE)); 421 if (ha->mbx_irq == NULL) { 422 device_printf(dev, "could not allocate mbx interrupt\n"); 423 goto qla_pci_attach_err; 424 } 425 if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), 426 NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { 427 device_printf(dev, "could not setup mbx interrupt\n"); 428 goto qla_pci_attach_err; 429 } 430 431 for (i = 0; i < ha->hw.num_sds_rings; i++) { 432 ha->irq_vec[i].sds_idx = i; 433 ha->irq_vec[i].ha = ha; 434 ha->irq_vec[i].irq_rid = 2 + i; 435 436 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 437 &ha->irq_vec[i].irq_rid, 438 (RF_ACTIVE | RF_SHAREABLE)); 439 440 if (ha->irq_vec[i].irq == NULL) { 441 device_printf(dev, "could not allocate interrupt\n"); 442 goto qla_pci_attach_err; 443 } 444 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 445 (INTR_TYPE_NET | INTR_MPSAFE), 446 NULL, ql_isr, &ha->irq_vec[i], 447 &ha->irq_vec[i].handle)) { 448 device_printf(dev, "could not setup interrupt\n"); 449 goto qla_pci_attach_err; 450 } 451 452 ha->tx_fp[i].ha = ha; 453 ha->tx_fp[i].txr_idx = i; 454 455 if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { 456 device_printf(dev, "%s: could not allocate tx_br[%d]\n", 457 __func__, i); 458 goto qla_pci_attach_err; 459 } 460 } 461 462 if (qla_create_fp_taskqueues(ha) != 0) 463 goto qla_pci_attach_err; 464 465 printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, 466 ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); 467 468 ql_read_mac_addr(ha); 469 470 /* allocate parent dma tag */ 471 if (qla_alloc_parent_dma_tag(ha)) { 472 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 473 __func__); 474 goto qla_pci_attach_err; 475 } 476 477 /* alloc all dma buffers */ 478 if (ql_alloc_dma(ha)) { 479 device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); 480 goto qla_pci_attach_err; 481 } 482 qla_get_peer(ha); 483 484 if (ql_minidump_init(ha) != 0) { 485 device_printf(dev, "%s: ql_minidump_init failed\n", __func__); 486 goto qla_pci_attach_err; 487 } 488 /* create the o.s ethernet interface */ 489 qla_init_ifnet(dev, ha); 490 491 ha->flags.qla_watchdog_active = 1; 492 ha->qla_watchdog_pause = 0; 493 494 callout_init(&ha->tx_callout, TRUE); 495 ha->flags.qla_callout_init = 1; 496 497 /* create ioctl device interface */ 498 if (ql_make_cdev(ha)) { 499 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 500 goto qla_pci_attach_err; 501 } 502 503 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 504 qla_watchdog, ha); 505 506 TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); 507 ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, 508 taskqueue_thread_enqueue, &ha->err_tq); 509 taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", 510 device_get_nameunit(ha->pci_dev)); 511 512 TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); 513 ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, 514 taskqueue_thread_enqueue, &ha->async_event_tq); 515 taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", 516 device_get_nameunit(ha->pci_dev)); 517 518 TASK_INIT(&ha->stats_task, 0, qla_stats, ha); 519 ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, 520 taskqueue_thread_enqueue, &ha->stats_tq); 521 taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", 522 device_get_nameunit(ha->pci_dev)); 523 524 QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); 525 return (0); 526 527qla_pci_attach_err: 528 529 qla_release(ha); 530 531 if (ha->flags.lock_init) { 532 mtx_destroy(&ha->hw_lock); 533 } 534 535 QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); 536 return (ENXIO); 537} 538 539/* 540 * Name: qla_pci_detach 541 * Function: Unhooks the device from the operating system 542 */ 543static int 544qla_pci_detach(device_t dev) 545{ 546 qla_host_t *ha = NULL; 547 struct ifnet *ifp; 548 549 550 if ((ha = device_get_softc(dev)) == NULL) { 551 device_printf(dev, "cannot get softc\n"); 552 return (ENOMEM); 553 } 554 555 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 556 557 ifp = ha->ifp; 558 559 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 560 QLA_LOCK(ha, __func__, -1, 0); 561 562 ha->qla_detach_active = 1; 563 qla_stop(ha); 564 565 qla_release(ha); 566 567 QLA_UNLOCK(ha, __func__); 568 569 if (ha->flags.lock_init) { 570 mtx_destroy(&ha->hw_lock); 571 } 572 573 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 574 575 return (0); 576} 577 578/* 579 * SYSCTL Related Callbacks 580 */ 581static int 582qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) 583{ 584 int err, ret = 0; 585 qla_host_t *ha; 586 587 err = sysctl_handle_int(oidp, &ret, 0, req); 588 589 if (err || !req->newptr) 590 return (err); 591 592 if (ret == 1) { 593 ha = (qla_host_t *)arg1; 594 ql_hw_link_status(ha); 595 } 596 return (err); 597} 598 599/* 600 * Name: qla_release 601 * Function: Releases the resources allocated for the device 602 */ 603static void 604qla_release(qla_host_t *ha) 605{ 606 device_t dev; 607 int i; 608 609 dev = ha->pci_dev; 610 611 if (ha->async_event_tq) { 612 taskqueue_drain(ha->async_event_tq, &ha->async_event_task); 613 taskqueue_free(ha->async_event_tq); 614 } 615 616 if (ha->err_tq) { 617 taskqueue_drain(ha->err_tq, &ha->err_task); 618 taskqueue_free(ha->err_tq); 619 } 620 621 if (ha->stats_tq) { 622 taskqueue_drain(ha->stats_tq, &ha->stats_task); 623 taskqueue_free(ha->stats_tq); 624 } 625 626 ql_del_cdev(ha); 627 628 if (ha->flags.qla_watchdog_active) { 629 ha->qla_watchdog_exit = 1; 630 631 while (ha->qla_watchdog_exited == 0) 632 qla_mdelay(__func__, 1); 633 } 634 635 if (ha->flags.qla_callout_init) 636 callout_stop(&ha->tx_callout); 637 638 if (ha->ifp != NULL) 639 ether_ifdetach(ha->ifp); 640 641 ql_free_dma(ha); 642 qla_free_parent_dma_tag(ha); 643 644 if (ha->mbx_handle) 645 (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); 646 647 if (ha->mbx_irq) 648 (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, 649 ha->mbx_irq); 650 651 for (i = 0; i < ha->hw.num_sds_rings; i++) { 652 653 if (ha->irq_vec[i].handle) { 654 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 655 ha->irq_vec[i].handle); 656 } 657 658 if (ha->irq_vec[i].irq) { 659 (void)bus_release_resource(dev, SYS_RES_IRQ, 660 ha->irq_vec[i].irq_rid, 661 ha->irq_vec[i].irq); 662 } 663 664 qla_free_tx_br(ha, &ha->tx_fp[i]); 665 } 666 qla_destroy_fp_taskqueues(ha); 667 668 if (ha->msix_count) 669 pci_release_msi(dev); 670 671// if (ha->flags.lock_init) { 672// mtx_destroy(&ha->hw_lock); 673// } 674 675 if (ha->pci_reg) 676 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 677 ha->pci_reg); 678 679 if (ha->pci_reg1) 680 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, 681 ha->pci_reg1); 682 683 return; 684} 685 686/* 687 * DMA Related Functions 688 */ 689 690static void 691qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 692{ 693 *((bus_addr_t *)arg) = 0; 694 695 if (error) { 696 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 697 return; 698 } 699 700 *((bus_addr_t *)arg) = segs[0].ds_addr; 701 702 return; 703} 704 705int 706ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 707{ 708 int ret = 0; 709 device_t dev; 710 bus_addr_t b_addr; 711 712 dev = ha->pci_dev; 713 714 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 715 716 ret = bus_dma_tag_create( 717 ha->parent_tag,/* parent */ 718 dma_buf->alignment, 719 ((bus_size_t)(1ULL << 32)),/* boundary */ 720 BUS_SPACE_MAXADDR, /* lowaddr */ 721 BUS_SPACE_MAXADDR, /* highaddr */ 722 NULL, NULL, /* filter, filterarg */ 723 dma_buf->size, /* maxsize */ 724 1, /* nsegments */ 725 dma_buf->size, /* maxsegsize */ 726 0, /* flags */ 727 NULL, NULL, /* lockfunc, lockarg */ 728 &dma_buf->dma_tag); 729 730 if (ret) { 731 device_printf(dev, "%s: could not create dma tag\n", __func__); 732 goto ql_alloc_dmabuf_exit; 733 } 734 ret = bus_dmamem_alloc(dma_buf->dma_tag, 735 (void **)&dma_buf->dma_b, 736 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 737 &dma_buf->dma_map); 738 if (ret) { 739 bus_dma_tag_destroy(dma_buf->dma_tag); 740 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 741 goto ql_alloc_dmabuf_exit; 742 } 743 744 ret = bus_dmamap_load(dma_buf->dma_tag, 745 dma_buf->dma_map, 746 dma_buf->dma_b, 747 dma_buf->size, 748 qla_dmamap_callback, 749 &b_addr, BUS_DMA_NOWAIT); 750 751 if (ret || !b_addr) { 752 bus_dma_tag_destroy(dma_buf->dma_tag); 753 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 754 dma_buf->dma_map); 755 ret = -1; 756 goto ql_alloc_dmabuf_exit; 757 } 758 759 dma_buf->dma_addr = b_addr; 760 761ql_alloc_dmabuf_exit: 762 QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 763 __func__, ret, (void *)dma_buf->dma_tag, 764 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 765 dma_buf->size)); 766 767 return ret; 768} 769 770void 771ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 772{ 773 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 774 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 775 bus_dma_tag_destroy(dma_buf->dma_tag); 776} 777 778static int 779qla_alloc_parent_dma_tag(qla_host_t *ha) 780{ 781 int ret; 782 device_t dev; 783 784 dev = ha->pci_dev; 785 786 /* 787 * Allocate parent DMA Tag 788 */ 789 ret = bus_dma_tag_create( 790 bus_get_dma_tag(dev), /* parent */ 791 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 792 BUS_SPACE_MAXADDR, /* lowaddr */ 793 BUS_SPACE_MAXADDR, /* highaddr */ 794 NULL, NULL, /* filter, filterarg */ 795 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 796 0, /* nsegments */ 797 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 798 0, /* flags */ 799 NULL, NULL, /* lockfunc, lockarg */ 800 &ha->parent_tag); 801 802 if (ret) { 803 device_printf(dev, "%s: could not create parent dma tag\n", 804 __func__); 805 return (-1); 806 } 807 808 ha->flags.parent_tag = 1; 809 810 return (0); 811} 812 813static void 814qla_free_parent_dma_tag(qla_host_t *ha) 815{ 816 if (ha->flags.parent_tag) { 817 bus_dma_tag_destroy(ha->parent_tag); 818 ha->flags.parent_tag = 0; 819 } 820} 821 822/* 823 * Name: qla_init_ifnet 824 * Function: Creates the Network Device Interface and Registers it with the O.S 825 */ 826 827static void 828qla_init_ifnet(device_t dev, qla_host_t *ha) 829{ 830 struct ifnet *ifp; 831 832 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 833 834 ifp = ha->ifp = if_alloc(IFT_ETHER); 835 836 if (ifp == NULL) 837 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 838 839 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 840 841#if __FreeBSD_version >= 1000000 842 if_initbaudrate(ifp, IF_Gbps(10)); 843 ifp->if_capabilities = IFCAP_LINKSTATE; 844#else 845 ifp->if_mtu = ETHERMTU; 846 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 847 848#endif /* #if __FreeBSD_version >= 1000000 */ 849 850 ifp->if_init = qla_init; 851 ifp->if_softc = ha; 852 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 853 ifp->if_ioctl = qla_ioctl; 854 855 ifp->if_transmit = qla_transmit; 856 ifp->if_qflush = qla_qflush; 857 858 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 859 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 860 IFQ_SET_READY(&ifp->if_snd); 861 862 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 863 864 ether_ifattach(ifp, qla_get_mac_addr(ha)); 865 866 ifp->if_capabilities |= IFCAP_HWCSUM | 867 IFCAP_TSO4 | 868 IFCAP_JUMBO_MTU | 869 IFCAP_VLAN_HWTAGGING | 870 IFCAP_VLAN_MTU | 871 IFCAP_VLAN_HWTSO | 872 IFCAP_LRO; 873 874 ifp->if_capenable = ifp->if_capabilities; 875 876 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 877 878 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 879 880 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 881 NULL); 882 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 883 884 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 885 886 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 887 888 return; 889} 890 891static void 892qla_init_locked(qla_host_t *ha) 893{ 894 struct ifnet *ifp = ha->ifp; 895 896 qla_stop(ha); 897 898 if (qla_alloc_xmt_bufs(ha) != 0) 899 return; 900 901 qla_confirm_9kb_enable(ha); 902 903 if (qla_alloc_rcv_bufs(ha) != 0) 904 return; 905 906 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 907 908 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 909 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 910 911 ha->stop_rcv = 0; 912 if (ql_init_hw_if(ha) == 0) { 913 ifp = ha->ifp; 914 ifp->if_drv_flags |= IFF_DRV_RUNNING; 915 ha->qla_watchdog_pause = 0; 916 ha->hw_vlan_tx_frames = 0; 917 ha->tx_tso_frames = 0; 918 ha->qla_interface_up = 1; 919 ql_update_link_state(ha); 920 } 921 922 return; 923} 924 925static void 926qla_init(void *arg) 927{ 928 qla_host_t *ha; 929 930 ha = (qla_host_t *)arg; 931 932 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 933 934 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 935 return; 936 937 qla_init_locked(ha); 938 939 QLA_UNLOCK(ha, __func__); 940 941 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 942} 943 944static int 945qla_set_multi(qla_host_t *ha, uint32_t add_multi) 946{ 947 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 948 struct ifmultiaddr *ifma; 949 int mcnt = 0; 950 struct ifnet *ifp = ha->ifp; 951 int ret = 0; 952 953 if_maddr_rlock(ifp); 954 955 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 956 957 if (ifma->ifma_addr->sa_family != AF_LINK) 958 continue; 959 960 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 961 break; 962 963 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 964 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 965 966 mcnt++; 967 } 968 969 if_maddr_runlock(ifp); 970 971 if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 972 QLA_LOCK_NO_SLEEP) != 0) 973 return (-1); 974 975 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 976 ret = ql_hw_set_multi(ha, mta, mcnt, add_multi); 977 } 978 979 QLA_UNLOCK(ha, __func__); 980 981 return (ret); 982} 983 984static int 985qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 986{ 987 int ret = 0; 988 struct ifreq *ifr = (struct ifreq *)data; 989 struct ifaddr *ifa = (struct ifaddr *)data; 990 qla_host_t *ha; 991 992 ha = (qla_host_t *)ifp->if_softc; 993 994 switch (cmd) { 995 case SIOCSIFADDR: 996 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 997 __func__, cmd)); 998 999 if (ifa->ifa_addr->sa_family == AF_INET) { 1000 1001 ret = QLA_LOCK(ha, __func__, 1002 QLA_LOCK_DEFAULT_MS_TIMEOUT, 1003 QLA_LOCK_NO_SLEEP); 1004 if (ret) 1005 break; 1006 1007 ifp->if_flags |= IFF_UP; 1008 1009 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1010 qla_init_locked(ha); 1011 } 1012 1013 QLA_UNLOCK(ha, __func__); 1014 QL_DPRINT4(ha, (ha->pci_dev, 1015 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 1016 __func__, cmd, 1017 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 1018 1019 arp_ifinit(ifp, ifa); 1020 } else { 1021 ether_ioctl(ifp, cmd, data); 1022 } 1023 break; 1024 1025 case SIOCSIFMTU: 1026 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 1027 __func__, cmd)); 1028 1029 if (ifr->ifr_mtu > QLA_MAX_MTU) { 1030 ret = EINVAL; 1031 } else { 1032 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1033 QLA_LOCK_NO_SLEEP); 1034 1035 if (ret) 1036 break; 1037 1038 ifp->if_mtu = ifr->ifr_mtu; 1039 ha->max_frame_size = 1040 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1041 1042 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1043 qla_init_locked(ha); 1044 } 1045 1046 if (ifp->if_mtu > ETHERMTU) 1047 ha->std_replenish = QL_JUMBO_REPLENISH_THRES; 1048 else 1049 ha->std_replenish = QL_STD_REPLENISH_THRES; 1050 1051 1052 QLA_UNLOCK(ha, __func__); 1053 } 1054 1055 break; 1056 1057 case SIOCSIFFLAGS: 1058 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 1059 __func__, cmd)); 1060 1061 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1062 QLA_LOCK_NO_SLEEP); 1063 1064 if (ret) 1065 break; 1066 1067 if (ifp->if_flags & IFF_UP) { 1068 1069 ha->max_frame_size = ifp->if_mtu + 1070 ETHER_HDR_LEN + ETHER_CRC_LEN; 1071 qla_init_locked(ha); 1072 1073 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1074 if ((ifp->if_flags ^ ha->if_flags) & 1075 IFF_PROMISC) { 1076 ret = ql_set_promisc(ha); 1077 } else if ((ifp->if_flags ^ ha->if_flags) & 1078 IFF_ALLMULTI) { 1079 ret = ql_set_allmulti(ha); 1080 } 1081 } 1082 } else { 1083 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1084 qla_stop(ha); 1085 ha->if_flags = ifp->if_flags; 1086 } 1087 1088 QLA_UNLOCK(ha, __func__); 1089 break; 1090 1091 case SIOCADDMULTI: 1092 QL_DPRINT4(ha, (ha->pci_dev, 1093 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 1094 1095 if (qla_set_multi(ha, 1)) 1096 ret = EINVAL; 1097 break; 1098 1099 case SIOCDELMULTI: 1100 QL_DPRINT4(ha, (ha->pci_dev, 1101 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 1102 1103 if (qla_set_multi(ha, 0)) 1104 ret = EINVAL; 1105 break; 1106 1107 case SIOCSIFMEDIA: 1108 case SIOCGIFMEDIA: 1109 QL_DPRINT4(ha, (ha->pci_dev, 1110 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 1111 __func__, cmd)); 1112 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 1113 break; 1114 1115 case SIOCSIFCAP: 1116 { 1117 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1118 1119 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 1120 __func__, cmd)); 1121 1122 if (mask & IFCAP_HWCSUM) 1123 ifp->if_capenable ^= IFCAP_HWCSUM; 1124 if (mask & IFCAP_TSO4) 1125 ifp->if_capenable ^= IFCAP_TSO4; 1126 if (mask & IFCAP_TSO6) 1127 ifp->if_capenable ^= IFCAP_TSO6; 1128 if (mask & IFCAP_VLAN_HWTAGGING) 1129 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1130 if (mask & IFCAP_VLAN_HWTSO) 1131 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1132 if (mask & IFCAP_LRO) 1133 ifp->if_capenable ^= IFCAP_LRO; 1134 1135 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1136 ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, 1137 QLA_LOCK_NO_SLEEP); 1138 1139 if (ret) 1140 break; 1141 1142 qla_init_locked(ha); 1143 1144 QLA_UNLOCK(ha, __func__); 1145 1146 } 1147 VLAN_CAPABILITIES(ifp); 1148 break; 1149 } 1150 1151 default: 1152 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 1153 __func__, cmd)); 1154 ret = ether_ioctl(ifp, cmd, data); 1155 break; 1156 } 1157 1158 return (ret); 1159} 1160 1161static int 1162qla_media_change(struct ifnet *ifp) 1163{ 1164 qla_host_t *ha; 1165 struct ifmedia *ifm; 1166 int ret = 0; 1167 1168 ha = (qla_host_t *)ifp->if_softc; 1169 1170 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1171 1172 ifm = &ha->media; 1173 1174 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1175 ret = EINVAL; 1176 1177 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1178 1179 return (ret); 1180} 1181 1182static void 1183qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1184{ 1185 qla_host_t *ha; 1186 1187 ha = (qla_host_t *)ifp->if_softc; 1188 1189 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1190 1191 ifmr->ifm_status = IFM_AVALID; 1192 ifmr->ifm_active = IFM_ETHER; 1193 1194 ql_update_link_state(ha); 1195 if (ha->hw.link_up) { 1196 ifmr->ifm_status |= IFM_ACTIVE; 1197 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 1198 } 1199 1200 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ 1201 (ha->hw.link_up ? "link_up" : "link_down"))); 1202 1203 return; 1204} 1205 1206 1207static int 1208qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, 1209 uint32_t iscsi_pdu) 1210{ 1211 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1212 bus_dmamap_t map; 1213 int nsegs; 1214 int ret = -1; 1215 uint32_t tx_idx; 1216 struct mbuf *m_head = *m_headp; 1217 1218 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1219 1220 tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; 1221 map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; 1222 1223 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1224 BUS_DMA_NOWAIT); 1225 1226 if (ret == EFBIG) { 1227 1228 struct mbuf *m; 1229 1230 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1231 m_head->m_pkthdr.len)); 1232 1233 m = m_defrag(m_head, M_NOWAIT); 1234 if (m == NULL) { 1235 ha->err_tx_defrag++; 1236 m_freem(m_head); 1237 *m_headp = NULL; 1238 device_printf(ha->pci_dev, 1239 "%s: m_defrag() = NULL [%d]\n", 1240 __func__, ret); 1241 return (ENOBUFS); 1242 } 1243 m_head = m; 1244 *m_headp = m_head; 1245 1246 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1247 segs, &nsegs, BUS_DMA_NOWAIT))) { 1248 1249 ha->err_tx_dmamap_load++; 1250 1251 device_printf(ha->pci_dev, 1252 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1253 __func__, ret, m_head->m_pkthdr.len); 1254 1255 if (ret != ENOMEM) { 1256 m_freem(m_head); 1257 *m_headp = NULL; 1258 } 1259 return (ret); 1260 } 1261 1262 } else if (ret) { 1263 1264 ha->err_tx_dmamap_load++; 1265 1266 device_printf(ha->pci_dev, 1267 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1268 __func__, ret, m_head->m_pkthdr.len); 1269 1270 if (ret != ENOMEM) { 1271 m_freem(m_head); 1272 *m_headp = NULL; 1273 } 1274 return (ret); 1275 } 1276 1277 QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); 1278 1279 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1280 1281 if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, 1282 iscsi_pdu))) { 1283 ha->tx_ring[txr_idx].count++; 1284 if (iscsi_pdu) 1285 ha->tx_ring[txr_idx].iscsi_pkt_count++; 1286 ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; 1287 } else { 1288 if (ret == EINVAL) { 1289 if (m_head) 1290 m_freem(m_head); 1291 *m_headp = NULL; 1292 } 1293 } 1294 1295 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1296 return (ret); 1297} 1298 1299static int 1300qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1301{ 1302 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 1303 "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); 1304 1305 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 1306 1307 fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, 1308 M_NOWAIT, &fp->tx_mtx); 1309 if (fp->tx_br == NULL) { 1310 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 1311 " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); 1312 return (-ENOMEM); 1313 } 1314 return 0; 1315} 1316 1317static void 1318qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) 1319{ 1320 struct mbuf *mp; 1321 struct ifnet *ifp = ha->ifp; 1322 1323 if (mtx_initialized(&fp->tx_mtx)) { 1324 1325 if (fp->tx_br != NULL) { 1326 1327 mtx_lock(&fp->tx_mtx); 1328 1329 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1330 m_freem(mp); 1331 } 1332 1333 mtx_unlock(&fp->tx_mtx); 1334 1335 buf_ring_free(fp->tx_br, M_DEVBUF); 1336 fp->tx_br = NULL; 1337 } 1338 mtx_destroy(&fp->tx_mtx); 1339 } 1340 return; 1341} 1342 1343static void 1344qla_fp_taskqueue(void *context, int pending) 1345{ 1346 qla_tx_fp_t *fp; 1347 qla_host_t *ha; 1348 struct ifnet *ifp; 1349 struct mbuf *mp; 1350 int ret; 1351 uint32_t txr_idx; 1352 uint32_t iscsi_pdu = 0; 1353 uint32_t rx_pkts_left = -1; 1354 1355 fp = context; 1356 1357 if (fp == NULL) 1358 return; 1359 1360 ha = (qla_host_t *)fp->ha; 1361 1362 ifp = ha->ifp; 1363 1364 txr_idx = fp->txr_idx; 1365 1366 mtx_lock(&fp->tx_mtx); 1367 1368 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { 1369 mtx_unlock(&fp->tx_mtx); 1370 goto qla_fp_taskqueue_exit; 1371 } 1372 1373 while (rx_pkts_left && !ha->stop_rcv) { 1374 rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); 1375 1376#ifdef QL_ENABLE_ISCSI_TLV 1377 ql_hw_tx_done_locked(ha, fp->txr_idx); 1378 ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); 1379#else 1380 ql_hw_tx_done_locked(ha, fp->txr_idx); 1381#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1382 1383 mp = drbr_peek(ifp, fp->tx_br); 1384 1385 while (mp != NULL) { 1386 1387 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { 1388#ifdef QL_ENABLE_ISCSI_TLV 1389 if (ql_iscsi_pdu(ha, mp) == 0) { 1390 txr_idx = txr_idx + 1391 (ha->hw.num_tx_rings >> 1); 1392 iscsi_pdu = 1; 1393 } else { 1394 iscsi_pdu = 0; 1395 txr_idx = fp->txr_idx; 1396 } 1397#endif /* #ifdef QL_ENABLE_ISCSI_TLV */ 1398 } 1399 1400 ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); 1401 1402 if (ret) { 1403 if (mp != NULL) 1404 drbr_putback(ifp, fp->tx_br, mp); 1405 else { 1406 drbr_advance(ifp, fp->tx_br); 1407 } 1408 1409 mtx_unlock(&fp->tx_mtx); 1410 1411 goto qla_fp_taskqueue_exit0; 1412 } else { 1413 drbr_advance(ifp, fp->tx_br); 1414 } 1415 1416 mp = drbr_peek(ifp, fp->tx_br); 1417 } 1418 } 1419 mtx_unlock(&fp->tx_mtx); 1420 1421qla_fp_taskqueue_exit0: 1422 1423 if (rx_pkts_left || ((mp != NULL) && ret)) { 1424 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1425 } else { 1426 if (!ha->stop_rcv) { 1427 QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); 1428 } 1429 } 1430 1431qla_fp_taskqueue_exit: 1432 1433 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1434 return; 1435} 1436 1437static int 1438qla_create_fp_taskqueues(qla_host_t *ha) 1439{ 1440 int i; 1441 uint8_t tq_name[32]; 1442 1443 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1444 1445 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1446 1447 bzero(tq_name, sizeof (tq_name)); 1448 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 1449 1450 TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); 1451 1452 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 1453 taskqueue_thread_enqueue, 1454 &fp->fp_taskqueue); 1455 1456 if (fp->fp_taskqueue == NULL) 1457 return (-1); 1458 1459 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 1460 tq_name); 1461 1462 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 1463 fp->fp_taskqueue)); 1464 } 1465 1466 return (0); 1467} 1468 1469static void 1470qla_destroy_fp_taskqueues(qla_host_t *ha) 1471{ 1472 int i; 1473 1474 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1475 1476 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1477 1478 if (fp->fp_taskqueue != NULL) { 1479 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 1480 taskqueue_free(fp->fp_taskqueue); 1481 fp->fp_taskqueue = NULL; 1482 } 1483 } 1484 return; 1485} 1486 1487static void 1488qla_drain_fp_taskqueues(qla_host_t *ha) 1489{ 1490 int i; 1491 1492 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1493 qla_tx_fp_t *fp = &ha->tx_fp[i]; 1494 1495 if (fp->fp_taskqueue != NULL) { 1496 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 1497 } 1498 } 1499 return; 1500} 1501 1502static int 1503qla_transmit(struct ifnet *ifp, struct mbuf *mp) 1504{ 1505 qla_host_t *ha = (qla_host_t *)ifp->if_softc; 1506 qla_tx_fp_t *fp; 1507 int rss_id = 0; 1508 int ret = 0; 1509 1510 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1511 1512#if __FreeBSD_version >= 1100000 1513 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 1514#else 1515 if (mp->m_flags & M_FLOWID) 1516#endif 1517 rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % 1518 ha->hw.num_sds_rings; 1519 fp = &ha->tx_fp[rss_id]; 1520 1521 if (fp->tx_br == NULL) { 1522 ret = EINVAL; 1523 goto qla_transmit_exit; 1524 } 1525 1526 if (mp != NULL) { 1527 ret = drbr_enqueue(ifp, fp->tx_br, mp); 1528 } 1529 1530 if (fp->fp_taskqueue != NULL) 1531 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 1532 1533 ret = 0; 1534 1535qla_transmit_exit: 1536 1537 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 1538 return ret; 1539} 1540 1541static void 1542qla_qflush(struct ifnet *ifp) 1543{ 1544 int i; 1545 qla_tx_fp_t *fp; 1546 struct mbuf *mp; 1547 qla_host_t *ha; 1548 1549 ha = (qla_host_t *)ifp->if_softc; 1550 1551 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1552 1553 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1554 1555 fp = &ha->tx_fp[i]; 1556 1557 if (fp == NULL) 1558 continue; 1559 1560 if (fp->tx_br) { 1561 mtx_lock(&fp->tx_mtx); 1562 1563 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 1564 m_freem(mp); 1565 } 1566 mtx_unlock(&fp->tx_mtx); 1567 } 1568 } 1569 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1570 1571 return; 1572} 1573 1574static void 1575qla_stop(qla_host_t *ha) 1576{ 1577 struct ifnet *ifp = ha->ifp; 1578 device_t dev; 1579 int i = 0; 1580 1581 dev = ha->pci_dev; 1582 1583 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1584 ha->qla_watchdog_pause = 1; 1585 1586 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1587 qla_tx_fp_t *fp; 1588 1589 fp = &ha->tx_fp[i]; 1590 1591 if (fp == NULL) 1592 continue; 1593 1594 if (fp->tx_br != NULL) { 1595 mtx_lock(&fp->tx_mtx); 1596 mtx_unlock(&fp->tx_mtx); 1597 } 1598 } 1599 1600 while (!ha->qla_watchdog_paused) 1601 qla_mdelay(__func__, 1); 1602 1603 ha->qla_interface_up = 0; 1604 1605 qla_drain_fp_taskqueues(ha); 1606 1607 ql_del_hw_if(ha); 1608 1609 qla_free_xmt_bufs(ha); 1610 qla_free_rcv_bufs(ha); 1611 1612 return; 1613} 1614 1615/* 1616 * Buffer Management Functions for Transmit and Receive Rings 1617 */ 1618static int 1619qla_alloc_xmt_bufs(qla_host_t *ha) 1620{ 1621 int ret = 0; 1622 uint32_t i, j; 1623 qla_tx_buf_t *txb; 1624 1625 if (bus_dma_tag_create(NULL, /* parent */ 1626 1, 0, /* alignment, bounds */ 1627 BUS_SPACE_MAXADDR, /* lowaddr */ 1628 BUS_SPACE_MAXADDR, /* highaddr */ 1629 NULL, NULL, /* filter, filterarg */ 1630 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1631 QLA_MAX_SEGMENTS, /* nsegments */ 1632 PAGE_SIZE, /* maxsegsize */ 1633 BUS_DMA_ALLOCNOW, /* flags */ 1634 NULL, /* lockfunc */ 1635 NULL, /* lockfuncarg */ 1636 &ha->tx_tag)) { 1637 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1638 __func__); 1639 return (ENOMEM); 1640 } 1641 1642 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1643 bzero((void *)ha->tx_ring[i].tx_buf, 1644 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1645 } 1646 1647 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1648 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { 1649 1650 txb = &ha->tx_ring[j].tx_buf[i]; 1651 1652 if ((ret = bus_dmamap_create(ha->tx_tag, 1653 BUS_DMA_NOWAIT, &txb->map))) { 1654 1655 ha->err_tx_dmamap_create++; 1656 device_printf(ha->pci_dev, 1657 "%s: bus_dmamap_create failed[%d]\n", 1658 __func__, ret); 1659 1660 qla_free_xmt_bufs(ha); 1661 1662 return (ret); 1663 } 1664 } 1665 } 1666 1667 return 0; 1668} 1669 1670/* 1671 * Release mbuf after it sent on the wire 1672 */ 1673static void 1674qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1675{ 1676 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1677 1678 if (txb->m_head && txb->map) { 1679 1680 bus_dmamap_unload(ha->tx_tag, txb->map); 1681 1682 m_freem(txb->m_head); 1683 txb->m_head = NULL; 1684 } 1685 1686 if (txb->map) 1687 bus_dmamap_destroy(ha->tx_tag, txb->map); 1688 1689 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1690} 1691 1692static void 1693qla_free_xmt_bufs(qla_host_t *ha) 1694{ 1695 int i, j; 1696 1697 for (j = 0; j < ha->hw.num_tx_rings; j++) { 1698 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1699 qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); 1700 } 1701 1702 if (ha->tx_tag != NULL) { 1703 bus_dma_tag_destroy(ha->tx_tag); 1704 ha->tx_tag = NULL; 1705 } 1706 1707 for (i = 0; i < ha->hw.num_tx_rings; i++) { 1708 bzero((void *)ha->tx_ring[i].tx_buf, 1709 (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1710 } 1711 return; 1712} 1713 1714 1715static int 1716qla_alloc_rcv_std(qla_host_t *ha) 1717{ 1718 int i, j, k, r, ret = 0; 1719 qla_rx_buf_t *rxb; 1720 qla_rx_ring_t *rx_ring; 1721 1722 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1723 1724 rx_ring = &ha->rx_ring[r]; 1725 1726 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1727 1728 rxb = &rx_ring->rx_buf[i]; 1729 1730 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, 1731 &rxb->map); 1732 1733 if (ret) { 1734 device_printf(ha->pci_dev, 1735 "%s: dmamap[%d, %d] failed\n", 1736 __func__, r, i); 1737 1738 for (k = 0; k < r; k++) { 1739 for (j = 0; j < NUM_RX_DESCRIPTORS; 1740 j++) { 1741 rxb = &ha->rx_ring[k].rx_buf[j]; 1742 bus_dmamap_destroy(ha->rx_tag, 1743 rxb->map); 1744 } 1745 } 1746 1747 for (j = 0; j < i; j++) { 1748 bus_dmamap_destroy(ha->rx_tag, 1749 rx_ring->rx_buf[j].map); 1750 } 1751 goto qla_alloc_rcv_std_err; 1752 } 1753 } 1754 } 1755 1756 qla_init_hw_rcv_descriptors(ha); 1757 1758 1759 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1760 1761 rx_ring = &ha->rx_ring[r]; 1762 1763 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1764 rxb = &rx_ring->rx_buf[i]; 1765 rxb->handle = i; 1766 if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { 1767 /* 1768 * set the physical address in the 1769 * corresponding descriptor entry in the 1770 * receive ring/queue for the hba 1771 */ 1772 qla_set_hw_rcv_desc(ha, r, i, rxb->handle, 1773 rxb->paddr, 1774 (rxb->m_head)->m_pkthdr.len); 1775 } else { 1776 device_printf(ha->pci_dev, 1777 "%s: ql_get_mbuf [%d, %d] failed\n", 1778 __func__, r, i); 1779 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1780 goto qla_alloc_rcv_std_err; 1781 } 1782 } 1783 } 1784 return 0; 1785 1786qla_alloc_rcv_std_err: 1787 return (-1); 1788} 1789 1790static void 1791qla_free_rcv_std(qla_host_t *ha) 1792{ 1793 int i, r; 1794 qla_rx_buf_t *rxb; 1795 1796 for (r = 0; r < ha->hw.num_rds_rings; r++) { 1797 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1798 rxb = &ha->rx_ring[r].rx_buf[i]; 1799 if (rxb->m_head != NULL) { 1800 bus_dmamap_unload(ha->rx_tag, rxb->map); 1801 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1802 m_freem(rxb->m_head); 1803 rxb->m_head = NULL; 1804 } 1805 } 1806 } 1807 return; 1808} 1809 1810static int 1811qla_alloc_rcv_bufs(qla_host_t *ha) 1812{ 1813 int i, ret = 0; 1814 1815 if (bus_dma_tag_create(NULL, /* parent */ 1816 1, 0, /* alignment, bounds */ 1817 BUS_SPACE_MAXADDR, /* lowaddr */ 1818 BUS_SPACE_MAXADDR, /* highaddr */ 1819 NULL, NULL, /* filter, filterarg */ 1820 MJUM9BYTES, /* maxsize */ 1821 1, /* nsegments */ 1822 MJUM9BYTES, /* maxsegsize */ 1823 BUS_DMA_ALLOCNOW, /* flags */ 1824 NULL, /* lockfunc */ 1825 NULL, /* lockfuncarg */ 1826 &ha->rx_tag)) { 1827 1828 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1829 __func__); 1830 1831 return (ENOMEM); 1832 } 1833 1834 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1835 1836 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1837 ha->hw.sds[i].sdsr_next = 0; 1838 ha->hw.sds[i].rxb_free = NULL; 1839 ha->hw.sds[i].rx_free = 0; 1840 } 1841 1842 ret = qla_alloc_rcv_std(ha); 1843 1844 return (ret); 1845} 1846 1847static void 1848qla_free_rcv_bufs(qla_host_t *ha) 1849{ 1850 int i; 1851 1852 qla_free_rcv_std(ha); 1853 1854 if (ha->rx_tag != NULL) { 1855 bus_dma_tag_destroy(ha->rx_tag); 1856 ha->rx_tag = NULL; 1857 } 1858 1859 bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); 1860 1861 for (i = 0; i < ha->hw.num_sds_rings; i++) { 1862 ha->hw.sds[i].sdsr_next = 0; 1863 ha->hw.sds[i].rxb_free = NULL; 1864 ha->hw.sds[i].rx_free = 0; 1865 } 1866 1867 return; 1868} 1869 1870int 1871ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) 1872{ 1873 register struct mbuf *mp = nmp; 1874 struct ifnet *ifp; 1875 int ret = 0; 1876 uint32_t offset; 1877 bus_dma_segment_t segs[1]; 1878 int nsegs, mbuf_size; 1879 1880 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1881 1882 ifp = ha->ifp; 1883 1884 if (ha->hw.enable_9kb) 1885 mbuf_size = MJUM9BYTES; 1886 else 1887 mbuf_size = MCLBYTES; 1888 1889 if (mp == NULL) { 1890 1891 if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) 1892 return(-1); 1893 1894 if (ha->hw.enable_9kb) 1895 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); 1896 else 1897 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1898 1899 if (mp == NULL) { 1900 ha->err_m_getcl++; 1901 ret = ENOBUFS; 1902 device_printf(ha->pci_dev, 1903 "%s: m_getcl failed\n", __func__); 1904 goto exit_ql_get_mbuf; 1905 } 1906 mp->m_len = mp->m_pkthdr.len = mbuf_size; 1907 } else { 1908 mp->m_len = mp->m_pkthdr.len = mbuf_size; 1909 mp->m_data = mp->m_ext.ext_buf; 1910 mp->m_next = NULL; 1911 } 1912 1913 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1914 if (offset) { 1915 offset = 8 - offset; 1916 m_adj(mp, offset); 1917 } 1918 1919 /* 1920 * Using memory from the mbuf cluster pool, invoke the bus_dma 1921 * machinery to arrange the memory mapping. 1922 */ 1923 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, 1924 mp, segs, &nsegs, BUS_DMA_NOWAIT); 1925 rxb->paddr = segs[0].ds_addr; 1926 1927 if (ret || !rxb->paddr || (nsegs != 1)) { 1928 m_free(mp); 1929 rxb->m_head = NULL; 1930 device_printf(ha->pci_dev, 1931 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 1932 __func__, ret, (long long unsigned int)rxb->paddr, 1933 nsegs); 1934 ret = -1; 1935 goto exit_ql_get_mbuf; 1936 } 1937 rxb->m_head = mp; 1938 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1939 1940exit_ql_get_mbuf: 1941 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1942 return (ret); 1943} 1944 1945 1946static void 1947qla_get_peer(qla_host_t *ha) 1948{ 1949 device_t *peers; 1950 int count, i, slot; 1951 int my_slot = pci_get_slot(ha->pci_dev); 1952 1953 if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) 1954 return; 1955 1956 for (i = 0; i < count; i++) { 1957 slot = pci_get_slot(peers[i]); 1958 1959 if ((slot >= 0) && (slot == my_slot) && 1960 (pci_get_device(peers[i]) == 1961 pci_get_device(ha->pci_dev))) { 1962 if (ha->pci_dev != peers[i]) 1963 ha->peer_dev = peers[i]; 1964 } 1965 } 1966} 1967 1968static void 1969qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) 1970{ 1971 qla_host_t *ha_peer; 1972 1973 if (ha->peer_dev) { 1974 if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { 1975 1976 ha_peer->msg_from_peer = msg_to_peer; 1977 } 1978 } 1979} 1980 1981static void 1982qla_error_recovery(void *context, int pending) 1983{ 1984 qla_host_t *ha = context; 1985 uint32_t msecs_100 = 100; 1986 struct ifnet *ifp = ha->ifp; 1987 int i = 0; 1988 1989device_printf(ha->pci_dev, "%s: \n", __func__); 1990 ha->hw.imd_compl = 1; 1991 1992 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 1993 return; 1994 1995device_printf(ha->pci_dev, "%s: enter\n", __func__); 1996 1997 if (ha->qla_interface_up) { 1998 1999 qla_mdelay(__func__, 300); 2000 2001 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2002 2003 for (i = 0; i < ha->hw.num_sds_rings; i++) { 2004 qla_tx_fp_t *fp; 2005 2006 fp = &ha->tx_fp[i]; 2007 2008 if (fp == NULL) 2009 continue; 2010 2011 if (fp->tx_br != NULL) { 2012 mtx_lock(&fp->tx_mtx); 2013 mtx_unlock(&fp->tx_mtx); 2014 } 2015 } 2016 } 2017 2018 2019 qla_drain_fp_taskqueues(ha); 2020 2021 if ((ha->pci_func & 0x1) == 0) { 2022 2023 if (!ha->msg_from_peer) { 2024 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2025 2026 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && 2027 msecs_100--) 2028 qla_mdelay(__func__, 100); 2029 } 2030 2031 ha->msg_from_peer = 0; 2032 2033 ql_minidump(ha); 2034 2035 (void) ql_init_hw(ha); 2036 2037 if (ha->qla_interface_up) { 2038 qla_free_xmt_bufs(ha); 2039 qla_free_rcv_bufs(ha); 2040 } 2041 2042 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2043 2044 } else { 2045 if (ha->msg_from_peer == QL_PEER_MSG_RESET) { 2046 2047 ha->msg_from_peer = 0; 2048 2049 qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); 2050 } else { 2051 qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); 2052 } 2053 2054 while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) 2055 qla_mdelay(__func__, 100); 2056 ha->msg_from_peer = 0; 2057 2058 (void) ql_init_hw(ha); 2059 2060 qla_mdelay(__func__, 1000); 2061 2062 if (ha->qla_interface_up) { 2063 qla_free_xmt_bufs(ha); 2064 qla_free_rcv_bufs(ha); 2065 } 2066 } 2067 2068 if (ha->qla_interface_up) { 2069 2070 if (qla_alloc_xmt_bufs(ha) != 0) { 2071 goto qla_error_recovery_exit; 2072 } 2073 qla_confirm_9kb_enable(ha); 2074 2075 if (qla_alloc_rcv_bufs(ha) != 0) { 2076 goto qla_error_recovery_exit; 2077 } 2078 2079 ha->stop_rcv = 0; 2080 2081 if (ql_init_hw_if(ha) == 0) { 2082 ifp = ha->ifp; 2083 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2084 ha->qla_watchdog_pause = 0; 2085 } 2086 } else 2087 ha->qla_watchdog_pause = 0; 2088 2089qla_error_recovery_exit: 2090 2091device_printf(ha->pci_dev, "%s: exit\n", __func__); 2092 2093 QLA_UNLOCK(ha, __func__); 2094 2095 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 2096 qla_watchdog, ha); 2097 return; 2098} 2099 2100static void 2101qla_async_event(void *context, int pending) 2102{ 2103 qla_host_t *ha = context; 2104 2105 if (QLA_LOCK(ha, __func__, -1, 0) != 0) 2106 return; 2107 2108 if (ha->async_event) { 2109 ha->async_event = 0; 2110 qla_hw_async_event(ha); 2111 } 2112 2113 QLA_UNLOCK(ha, __func__); 2114 2115 return; 2116} 2117 2118static void 2119qla_stats(void *context, int pending) 2120{ 2121 qla_host_t *ha; 2122 2123 ha = context; 2124 2125 ql_get_stats(ha); 2126 return; 2127} 2128 2129