1/* 2 * Copyright (c) 2011-2013 Qlogic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28/* 29 * File: qla_os.c 30 * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/11/sys/dev/qlxgb/qla_os.c 365330 2020-09-04 13:19:18Z markj $"); 35 36#include "qla_os.h" 37#include "qla_reg.h" 38#include "qla_hw.h" 39#include "qla_def.h" 40#include "qla_inline.h" 41#include "qla_ver.h" 42#include "qla_glbl.h" 43#include "qla_dbg.h" 44 45/* 46 * Some PCI Configuration Space Related Defines 47 */ 48 49#ifndef PCI_VENDOR_QLOGIC 50#define PCI_VENDOR_QLOGIC 0x1077 51#endif 52 53#ifndef PCI_PRODUCT_QLOGIC_ISP8020 54#define PCI_PRODUCT_QLOGIC_ISP8020 0x8020 55#endif 56 57#define PCI_QLOGIC_ISP8020 \ 58 ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC) 59 60/* 61 * static functions 62 */ 63static int qla_alloc_parent_dma_tag(qla_host_t *ha); 64static void qla_free_parent_dma_tag(qla_host_t *ha); 65static int qla_alloc_xmt_bufs(qla_host_t *ha); 66static void qla_free_xmt_bufs(qla_host_t *ha); 67static int qla_alloc_rcv_bufs(qla_host_t *ha); 68static void qla_free_rcv_bufs(qla_host_t *ha); 69 70static void qla_init_ifnet(device_t dev, qla_host_t *ha); 71static int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS); 72static void qla_release(qla_host_t *ha); 73static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 74 int error); 75static void qla_stop(qla_host_t *ha); 76static int qla_send(qla_host_t *ha, struct mbuf **m_headp); 77static void qla_tx_done(void *context, int pending); 78 79/* 80 * Hooks to the Operating Systems 81 */ 82static int qla_pci_probe (device_t); 83static int qla_pci_attach (device_t); 84static int qla_pci_detach (device_t); 85 86static void qla_init(void *arg); 87static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 88static int qla_media_change(struct ifnet *ifp); 89static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 90 91static device_method_t qla_pci_methods[] = { 92 /* Device interface */ 93 DEVMETHOD(device_probe, qla_pci_probe), 94 DEVMETHOD(device_attach, qla_pci_attach), 95 DEVMETHOD(device_detach, qla_pci_detach), 96 { 0, 0 } 97}; 98 99static driver_t qla_pci_driver = { 100 "ql", qla_pci_methods, sizeof (qla_host_t), 101}; 102 103static devclass_t qla80xx_devclass; 104 105DRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0); 106 107MODULE_DEPEND(qla80xx, pci, 1, 1, 1); 108MODULE_DEPEND(qla80xx, ether, 1, 1, 1); 109 110MALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver"); 111 112uint32_t std_replenish = 8; 113uint32_t jumbo_replenish = 2; 114uint32_t rcv_pkt_thres = 128; 115uint32_t rcv_pkt_thres_d = 32; 116uint32_t snd_pkt_thres = 16; 117uint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2); 118 119static char dev_str[64]; 120 121/* 122 * Name: qla_pci_probe 123 * Function: Validate the PCI device to be a QLA80XX device 124 */ 125static int 126qla_pci_probe(device_t dev) 127{ 128 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 129 case PCI_QLOGIC_ISP8020: 130 snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 131 "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function", 132 QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 133 QLA_VERSION_BUILD); 134 device_set_desc(dev, dev_str); 135 break; 136 default: 137 return (ENXIO); 138 } 139 140 if (bootverbose) 141 printf("%s: %s\n ", __func__, dev_str); 142 143 return (BUS_PROBE_DEFAULT); 144} 145 146static void 147qla_add_sysctls(qla_host_t *ha) 148{ 149 device_t dev = ha->pci_dev; 150 151 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 152 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 153 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD, 154 (void *)ha, 0, 155 qla_sysctl_get_stats, "I", "Statistics"); 156 157 SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), 158 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 159 OID_AUTO, "fw_version", CTLFLAG_RD, 160 ha->fw_ver_str, 0, "firmware version"); 161 162 dbg_level = 0; 163 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 164 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 165 OID_AUTO, "debug", CTLFLAG_RW, 166 &dbg_level, dbg_level, "Debug Level"); 167 168 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 169 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 170 OID_AUTO, "std_replenish", CTLFLAG_RW, 171 &std_replenish, std_replenish, 172 "Threshold for Replenishing Standard Frames"); 173 174 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 175 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 176 OID_AUTO, "jumbo_replenish", CTLFLAG_RW, 177 &jumbo_replenish, jumbo_replenish, 178 "Threshold for Replenishing Jumbo Frames"); 179 180 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 181 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 182 OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW, 183 &rcv_pkt_thres, rcv_pkt_thres, 184 "Threshold for # of rcv pkts to trigger indication isr"); 185 186 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 187 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 188 OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW, 189 &rcv_pkt_thres_d, rcv_pkt_thres_d, 190 "Threshold for # of rcv pkts to trigger indication defered"); 191 192 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 193 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 194 OID_AUTO, "snd_pkt_thres", CTLFLAG_RW, 195 &snd_pkt_thres, snd_pkt_thres, 196 "Threshold for # of snd packets"); 197 198 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 199 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 200 OID_AUTO, "free_pkt_thres", CTLFLAG_RW, 201 &free_pkt_thres, free_pkt_thres, 202 "Threshold for # of packets to free at a time"); 203 204 return; 205} 206 207static void 208qla_watchdog(void *arg) 209{ 210 qla_host_t *ha = arg; 211 qla_hw_t *hw; 212 struct ifnet *ifp; 213 214 hw = &ha->hw; 215 ifp = ha->ifp; 216 217 if (ha->flags.qla_watchdog_exit) 218 return; 219 220 if (!ha->flags.qla_watchdog_pause) { 221 if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) { 222 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 223 } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { 224 taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 225 } 226 } 227 ha->watchdog_ticks = ha->watchdog_ticks++ % 1000; 228 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 229 qla_watchdog, ha); 230} 231 232/* 233 * Name: qla_pci_attach 234 * Function: attaches the device to the operating system 235 */ 236static int 237qla_pci_attach(device_t dev) 238{ 239 qla_host_t *ha = NULL; 240 uint32_t rsrc_len, i; 241 242 QL_DPRINT2((dev, "%s: enter\n", __func__)); 243 244 if ((ha = device_get_softc(dev)) == NULL) { 245 device_printf(dev, "cannot get softc\n"); 246 return (ENOMEM); 247 } 248 249 memset(ha, 0, sizeof (qla_host_t)); 250 251 if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) { 252 device_printf(dev, "device is not ISP8020\n"); 253 return (ENXIO); 254 } 255 256 ha->pci_func = pci_get_function(dev); 257 258 ha->pci_dev = dev; 259 260 pci_enable_busmaster(dev); 261 262 ha->reg_rid = PCIR_BAR(0); 263 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 264 RF_ACTIVE); 265 266 if (ha->pci_reg == NULL) { 267 device_printf(dev, "unable to map any ports\n"); 268 goto qla_pci_attach_err; 269 } 270 271 rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 272 ha->reg_rid); 273 274 mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 275 mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 276 mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF); 277 mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF); 278 ha->flags.lock_init = 1; 279 280 ha->msix_count = pci_msix_count(dev); 281 282 if (ha->msix_count < qla_get_msix_count(ha)) { 283 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 284 ha->msix_count); 285 goto qla_pci_attach_err; 286 } 287 288 QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x" 289 " msix_count 0x%x pci_reg %p\n", __func__, ha, 290 ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg)); 291 292 ha->msix_count = qla_get_msix_count(ha); 293 294 if (pci_alloc_msix(dev, &ha->msix_count)) { 295 device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 296 ha->msix_count); 297 ha->msix_count = 0; 298 goto qla_pci_attach_err; 299 } 300 301 TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha); 302 ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, 303 taskqueue_thread_enqueue, &ha->tx_tq); 304 taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", 305 device_get_nameunit(ha->pci_dev)); 306 307 for (i = 0; i < ha->msix_count; i++) { 308 ha->irq_vec[i].irq_rid = i+1; 309 ha->irq_vec[i].ha = ha; 310 311 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 312 &ha->irq_vec[i].irq_rid, 313 (RF_ACTIVE | RF_SHAREABLE)); 314 315 if (ha->irq_vec[i].irq == NULL) { 316 device_printf(dev, "could not allocate interrupt\n"); 317 goto qla_pci_attach_err; 318 } 319 320 if (bus_setup_intr(dev, ha->irq_vec[i].irq, 321 (INTR_TYPE_NET | INTR_MPSAFE), 322 NULL, qla_isr, &ha->irq_vec[i], 323 &ha->irq_vec[i].handle)) { 324 device_printf(dev, "could not setup interrupt\n"); 325 goto qla_pci_attach_err; 326 } 327 328 TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\ 329 &ha->irq_vec[i]); 330 331 ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq", 332 M_NOWAIT, taskqueue_thread_enqueue, 333 &ha->irq_vec[i].rcv_tq); 334 335 taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET, 336 "%s rcvq", 337 device_get_nameunit(ha->pci_dev)); 338 } 339 340 qla_add_sysctls(ha); 341 342 /* add hardware specific sysctls */ 343 qla_hw_add_sysctls(ha); 344 345 /* initialize hardware */ 346 if (qla_init_hw(ha)) { 347 device_printf(dev, "%s: qla_init_hw failed\n", __func__); 348 goto qla_pci_attach_err; 349 } 350 351 device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 352 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 353 ha->fw_ver_build); 354 355 snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", 356 ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 357 ha->fw_ver_build); 358 359 //qla_get_hw_caps(ha); 360 qla_read_mac_addr(ha); 361 362 /* allocate parent dma tag */ 363 if (qla_alloc_parent_dma_tag(ha)) { 364 device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 365 __func__); 366 goto qla_pci_attach_err; 367 } 368 369 /* alloc all dma buffers */ 370 if (qla_alloc_dma(ha)) { 371 device_printf(dev, "%s: qla_alloc_dma failed\n", __func__); 372 goto qla_pci_attach_err; 373 } 374 375 /* create the o.s ethernet interface */ 376 qla_init_ifnet(dev, ha); 377 378 ha->flags.qla_watchdog_active = 1; 379 ha->flags.qla_watchdog_pause = 1; 380 381 callout_init(&ha->tx_callout, 1); 382 383 /* create ioctl device interface */ 384 if (qla_make_cdev(ha)) { 385 device_printf(dev, "%s: qla_make_cdev failed\n", __func__); 386 goto qla_pci_attach_err; 387 } 388 389 callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 390 qla_watchdog, ha); 391 392 QL_DPRINT2((dev, "%s: exit 0\n", __func__)); 393 return (0); 394 395qla_pci_attach_err: 396 397 qla_release(ha); 398 399 QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__)); 400 return (ENXIO); 401} 402 403/* 404 * Name: qla_pci_detach 405 * Function: Unhooks the device from the operating system 406 */ 407static int 408qla_pci_detach(device_t dev) 409{ 410 qla_host_t *ha = NULL; 411 struct ifnet *ifp; 412 int i; 413 414 QL_DPRINT2((dev, "%s: enter\n", __func__)); 415 416 if ((ha = device_get_softc(dev)) == NULL) { 417 device_printf(dev, "cannot get softc\n"); 418 return (ENOMEM); 419 } 420 421 ifp = ha->ifp; 422 423 QLA_LOCK(ha, __func__); 424 qla_stop(ha); 425 QLA_UNLOCK(ha, __func__); 426 427 if (ha->tx_tq) { 428 taskqueue_drain(ha->tx_tq, &ha->tx_task); 429 taskqueue_free(ha->tx_tq); 430 } 431 432 for (i = 0; i < ha->msix_count; i++) { 433 taskqueue_drain(ha->irq_vec[i].rcv_tq, 434 &ha->irq_vec[i].rcv_task); 435 taskqueue_free(ha->irq_vec[i].rcv_tq); 436 } 437 438 qla_release(ha); 439 440 QL_DPRINT2((dev, "%s: exit\n", __func__)); 441 442 return (0); 443} 444 445/* 446 * SYSCTL Related Callbacks 447 */ 448static int 449qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS) 450{ 451 int err, ret = 0; 452 qla_host_t *ha; 453 454 err = sysctl_handle_int(oidp, &ret, 0, req); 455 456 if (err) 457 return (err); 458 459 ha = (qla_host_t *)arg1; 460 //qla_get_stats(ha); 461 QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret)); 462 return (err); 463} 464 465 466/* 467 * Name: qla_release 468 * Function: Releases the resources allocated for the device 469 */ 470static void 471qla_release(qla_host_t *ha) 472{ 473 device_t dev; 474 int i; 475 476 dev = ha->pci_dev; 477 478 qla_del_cdev(ha); 479 480 if (ha->flags.qla_watchdog_active) 481 ha->flags.qla_watchdog_exit = 1; 482 483 callout_stop(&ha->tx_callout); 484 qla_mdelay(__func__, 100); 485 486 if (ha->ifp != NULL) 487 ether_ifdetach(ha->ifp); 488 489 qla_free_dma(ha); 490 qla_free_parent_dma_tag(ha); 491 492 for (i = 0; i < ha->msix_count; i++) { 493 if (ha->irq_vec[i].handle) 494 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 495 ha->irq_vec[i].handle); 496 if (ha->irq_vec[i].irq) 497 (void) bus_release_resource(dev, SYS_RES_IRQ, 498 ha->irq_vec[i].irq_rid, 499 ha->irq_vec[i].irq); 500 } 501 if (ha->msix_count) 502 pci_release_msi(dev); 503 504 if (ha->flags.lock_init) { 505 mtx_destroy(&ha->tx_lock); 506 mtx_destroy(&ha->rx_lock); 507 mtx_destroy(&ha->rxj_lock); 508 mtx_destroy(&ha->hw_lock); 509 } 510 511 if (ha->pci_reg) 512 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 513 ha->pci_reg); 514} 515 516/* 517 * DMA Related Functions 518 */ 519 520static void 521qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 522{ 523 *((bus_addr_t *)arg) = 0; 524 525 if (error) { 526 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 527 return; 528 } 529 530 QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs)); 531 532 *((bus_addr_t *)arg) = segs[0].ds_addr; 533 534 return; 535} 536 537int 538qla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 539{ 540 int ret = 0; 541 device_t dev; 542 bus_addr_t b_addr; 543 544 dev = ha->pci_dev; 545 546 QL_DPRINT2((dev, "%s: enter\n", __func__)); 547 548 ret = bus_dma_tag_create( 549 ha->parent_tag,/* parent */ 550 dma_buf->alignment, 551 ((bus_size_t)(1ULL << 32)),/* boundary */ 552 BUS_SPACE_MAXADDR, /* lowaddr */ 553 BUS_SPACE_MAXADDR, /* highaddr */ 554 NULL, NULL, /* filter, filterarg */ 555 dma_buf->size, /* maxsize */ 556 1, /* nsegments */ 557 dma_buf->size, /* maxsegsize */ 558 0, /* flags */ 559 NULL, NULL, /* lockfunc, lockarg */ 560 &dma_buf->dma_tag); 561 562 if (ret) { 563 device_printf(dev, "%s: could not create dma tag\n", __func__); 564 goto qla_alloc_dmabuf_exit; 565 } 566 ret = bus_dmamem_alloc(dma_buf->dma_tag, 567 (void **)&dma_buf->dma_b, 568 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 569 &dma_buf->dma_map); 570 if (ret) { 571 bus_dma_tag_destroy(dma_buf->dma_tag); 572 device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 573 goto qla_alloc_dmabuf_exit; 574 } 575 576 ret = bus_dmamap_load(dma_buf->dma_tag, 577 dma_buf->dma_map, 578 dma_buf->dma_b, 579 dma_buf->size, 580 qla_dmamap_callback, 581 &b_addr, BUS_DMA_NOWAIT); 582 583 if (ret || !b_addr) { 584 bus_dma_tag_destroy(dma_buf->dma_tag); 585 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 586 dma_buf->dma_map); 587 ret = -1; 588 goto qla_alloc_dmabuf_exit; 589 } 590 591 dma_buf->dma_addr = b_addr; 592 593qla_alloc_dmabuf_exit: 594 QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 595 __func__, ret, (void *)dma_buf->dma_tag, 596 (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 597 dma_buf->size)); 598 599 return ret; 600} 601 602void 603qla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 604{ 605 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 606 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 607 bus_dma_tag_destroy(dma_buf->dma_tag); 608} 609 610static int 611qla_alloc_parent_dma_tag(qla_host_t *ha) 612{ 613 int ret; 614 device_t dev; 615 616 dev = ha->pci_dev; 617 618 /* 619 * Allocate parent DMA Tag 620 */ 621 ret = bus_dma_tag_create( 622 bus_get_dma_tag(dev), /* parent */ 623 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 624 BUS_SPACE_MAXADDR, /* lowaddr */ 625 BUS_SPACE_MAXADDR, /* highaddr */ 626 NULL, NULL, /* filter, filterarg */ 627 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 628 0, /* nsegments */ 629 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 630 0, /* flags */ 631 NULL, NULL, /* lockfunc, lockarg */ 632 &ha->parent_tag); 633 634 if (ret) { 635 device_printf(dev, "%s: could not create parent dma tag\n", 636 __func__); 637 return (-1); 638 } 639 640 ha->flags.parent_tag = 1; 641 642 return (0); 643} 644 645static void 646qla_free_parent_dma_tag(qla_host_t *ha) 647{ 648 if (ha->flags.parent_tag) { 649 bus_dma_tag_destroy(ha->parent_tag); 650 ha->flags.parent_tag = 0; 651 } 652} 653 654/* 655 * Name: qla_init_ifnet 656 * Function: Creates the Network Device Interface and Registers it with the O.S 657 */ 658 659static void 660qla_init_ifnet(device_t dev, qla_host_t *ha) 661{ 662 struct ifnet *ifp; 663 664 QL_DPRINT2((dev, "%s: enter\n", __func__)); 665 666 ifp = ha->ifp = if_alloc(IFT_ETHER); 667 668 if (ifp == NULL) 669 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 670 671 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 672 673 ifp->if_mtu = ETHERMTU; 674 ifp->if_baudrate = IF_Gbps(10); 675 ifp->if_init = qla_init; 676 ifp->if_softc = ha; 677 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 678 ifp->if_ioctl = qla_ioctl; 679 ifp->if_start = qla_start; 680 681 IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 682 ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 683 IFQ_SET_READY(&ifp->if_snd); 684 685 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 686 687 ether_ifattach(ifp, qla_get_mac_addr(ha)); 688 689 ifp->if_capabilities = IFCAP_HWCSUM | 690 IFCAP_TSO4 | 691 IFCAP_JUMBO_MTU; 692 693 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 694 ifp->if_capabilities |= IFCAP_LINKSTATE; 695 696#if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) 697 ifp->if_timer = 0; 698 ifp->if_watchdog = NULL; 699#endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */ 700 701 ifp->if_capenable = ifp->if_capabilities; 702 703 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 704 705 ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 706 707 ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 708 NULL); 709 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 710 711 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 712 713 QL_DPRINT2((dev, "%s: exit\n", __func__)); 714 715 return; 716} 717 718static void 719qla_init_locked(qla_host_t *ha) 720{ 721 struct ifnet *ifp = ha->ifp; 722 723 qla_stop(ha); 724 725 if (qla_alloc_xmt_bufs(ha) != 0) 726 return; 727 728 if (qla_alloc_rcv_bufs(ha) != 0) 729 return; 730 731 if (qla_config_lro(ha)) 732 return; 733 734 bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 735 736 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 737 738 ha->flags.stop_rcv = 0; 739 if (qla_init_hw_if(ha) == 0) { 740 ifp = ha->ifp; 741 ifp->if_drv_flags |= IFF_DRV_RUNNING; 742 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 743 ha->flags.qla_watchdog_pause = 0; 744 } 745 746 return; 747} 748 749static void 750qla_init(void *arg) 751{ 752 qla_host_t *ha; 753 754 ha = (qla_host_t *)arg; 755 756 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 757 758 QLA_LOCK(ha, __func__); 759 qla_init_locked(ha); 760 QLA_UNLOCK(ha, __func__); 761 762 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 763} 764 765static void 766qla_set_multi(qla_host_t *ha, uint32_t add_multi) 767{ 768 uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 769 struct ifmultiaddr *ifma; 770 int mcnt = 0; 771 struct ifnet *ifp = ha->ifp; 772 773 if_maddr_rlock(ifp); 774 775 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 776 777 if (ifma->ifma_addr->sa_family != AF_LINK) 778 continue; 779 780 if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 781 break; 782 783 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 784 &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 785 786 mcnt++; 787 } 788 789 if_maddr_runlock(ifp); 790 791 qla_hw_set_multi(ha, mta, mcnt, add_multi); 792 793 return; 794} 795 796static int 797qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 798{ 799 int ret = 0; 800 struct ifreq *ifr = (struct ifreq *)data; 801 struct ifaddr *ifa = (struct ifaddr *)data; 802 qla_host_t *ha; 803 804 ha = (qla_host_t *)ifp->if_softc; 805 806 switch (cmd) { 807 case SIOCSIFADDR: 808 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 809 __func__, cmd)); 810 811 if (ifa->ifa_addr->sa_family == AF_INET) { 812 ifp->if_flags |= IFF_UP; 813 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 814 QLA_LOCK(ha, __func__); 815 qla_init_locked(ha); 816 QLA_UNLOCK(ha, __func__); 817 } 818 QL_DPRINT4((ha->pci_dev, 819 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 820 __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 821 822 arp_ifinit(ifp, ifa); 823 if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) { 824 qla_config_ipv4_addr(ha, 825 (IA_SIN(ifa)->sin_addr.s_addr)); 826 } 827 } else { 828 ether_ioctl(ifp, cmd, data); 829 } 830 break; 831 832 case SIOCSIFMTU: 833 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 834 __func__, cmd)); 835 836 if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) { 837 ret = EINVAL; 838 } else { 839 QLA_LOCK(ha, __func__); 840 ifp->if_mtu = ifr->ifr_mtu; 841 ha->max_frame_size = 842 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 843 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 844 ret = qla_set_max_mtu(ha, ha->max_frame_size, 845 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 846 } 847 QLA_UNLOCK(ha, __func__); 848 849 if (ret) 850 ret = EINVAL; 851 } 852 853 break; 854 855 case SIOCSIFFLAGS: 856 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 857 __func__, cmd)); 858 859 if (ifp->if_flags & IFF_UP) { 860 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 861 if ((ifp->if_flags ^ ha->if_flags) & 862 IFF_PROMISC) { 863 qla_set_promisc(ha); 864 } else if ((ifp->if_flags ^ ha->if_flags) & 865 IFF_ALLMULTI) { 866 qla_set_allmulti(ha); 867 } 868 } else { 869 QLA_LOCK(ha, __func__); 870 qla_init_locked(ha); 871 ha->max_frame_size = ifp->if_mtu + 872 ETHER_HDR_LEN + ETHER_CRC_LEN; 873 ret = qla_set_max_mtu(ha, ha->max_frame_size, 874 (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 875 QLA_UNLOCK(ha, __func__); 876 } 877 } else { 878 QLA_LOCK(ha, __func__); 879 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 880 qla_stop(ha); 881 ha->if_flags = ifp->if_flags; 882 QLA_UNLOCK(ha, __func__); 883 } 884 break; 885 886 case SIOCADDMULTI: 887 QL_DPRINT4((ha->pci_dev, 888 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 889 890 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 891 qla_set_multi(ha, 1); 892 } 893 break; 894 895 case SIOCDELMULTI: 896 QL_DPRINT4((ha->pci_dev, 897 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 898 899 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 900 qla_set_multi(ha, 0); 901 } 902 break; 903 904 case SIOCSIFMEDIA: 905 case SIOCGIFMEDIA: 906 QL_DPRINT4((ha->pci_dev, 907 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 908 __func__, cmd)); 909 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 910 break; 911 912 case SIOCSIFCAP: 913 { 914 int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 915 916 QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 917 __func__, cmd)); 918 919 if (mask & IFCAP_HWCSUM) 920 ifp->if_capenable ^= IFCAP_HWCSUM; 921 if (mask & IFCAP_TSO4) 922 ifp->if_capenable ^= IFCAP_TSO4; 923 if (mask & IFCAP_TSO6) 924 ifp->if_capenable ^= IFCAP_TSO6; 925 if (mask & IFCAP_VLAN_HWTAGGING) 926 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 927 928 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 929 qla_init(ha); 930 931 VLAN_CAPABILITIES(ifp); 932 break; 933 } 934 935 default: 936 QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n", 937 __func__, cmd)); 938 ret = ether_ioctl(ifp, cmd, data); 939 break; 940 } 941 942 return (ret); 943} 944 945static int 946qla_media_change(struct ifnet *ifp) 947{ 948 qla_host_t *ha; 949 struct ifmedia *ifm; 950 int ret = 0; 951 952 ha = (qla_host_t *)ifp->if_softc; 953 954 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 955 956 ifm = &ha->media; 957 958 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 959 ret = EINVAL; 960 961 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 962 963 return (ret); 964} 965 966static void 967qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 968{ 969 qla_host_t *ha; 970 971 ha = (qla_host_t *)ifp->if_softc; 972 973 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 974 975 ifmr->ifm_status = IFM_AVALID; 976 ifmr->ifm_active = IFM_ETHER; 977 978 qla_update_link_state(ha); 979 if (ha->hw.flags.link_up) { 980 ifmr->ifm_status |= IFM_ACTIVE; 981 ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 982 } 983 984 QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\ 985 (ha->hw.flags.link_up ? "link_up" : "link_down"))); 986 987 return; 988} 989 990void 991qla_start(struct ifnet *ifp) 992{ 993 struct mbuf *m_head; 994 qla_host_t *ha = (qla_host_t *)ifp->if_softc; 995 996 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 997 998 if (!mtx_trylock(&ha->tx_lock)) { 999 QL_DPRINT8((ha->pci_dev, 1000 "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); 1001 return; 1002 } 1003 1004 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1005 IFF_DRV_RUNNING) { 1006 QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 1007 QLA_TX_UNLOCK(ha); 1008 return; 1009 } 1010 1011 if (!ha->watchdog_ticks) 1012 qla_update_link_state(ha); 1013 1014 if (!ha->hw.flags.link_up) { 1015 QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__)); 1016 QLA_TX_UNLOCK(ha); 1017 return; 1018 } 1019 1020 while (ifp->if_snd.ifq_head != NULL) { 1021 IF_DEQUEUE(&ifp->if_snd, m_head); 1022 1023 if (m_head == NULL) { 1024 QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n", 1025 __func__)); 1026 break; 1027 } 1028 1029 if (qla_send(ha, &m_head)) { 1030 if (m_head == NULL) 1031 break; 1032 QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__)); 1033 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1034 IF_PREPEND(&ifp->if_snd, m_head); 1035 break; 1036 } 1037 /* Send a copy of the frame to the BPF listener */ 1038 ETHER_BPF_MTAP(ifp, m_head); 1039 } 1040 QLA_TX_UNLOCK(ha); 1041 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1042 return; 1043} 1044 1045static int 1046qla_send(qla_host_t *ha, struct mbuf **m_headp) 1047{ 1048 bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1049 bus_dmamap_t map; 1050 int nsegs; 1051 int ret = -1; 1052 uint32_t tx_idx; 1053 struct mbuf *m_head = *m_headp; 1054 1055 QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 1056 1057 if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) { 1058 ha->err_tx_dmamap_create++; 1059 device_printf(ha->pci_dev, 1060 "%s: bus_dmamap_create failed[%d, %d]\n", 1061 __func__, ret, m_head->m_pkthdr.len); 1062 return (ret); 1063 } 1064 1065 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1066 BUS_DMA_NOWAIT); 1067 1068 if (ret == EFBIG) { 1069 1070 struct mbuf *m; 1071 1072 QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1073 m_head->m_pkthdr.len)); 1074 1075 m = m_defrag(m_head, M_NOWAIT); 1076 if (m == NULL) { 1077 ha->err_tx_defrag++; 1078 m_freem(m_head); 1079 *m_headp = NULL; 1080 device_printf(ha->pci_dev, 1081 "%s: m_defrag() = NULL [%d]\n", 1082 __func__, ret); 1083 return (ENOBUFS); 1084 } 1085 m_head = m; 1086 1087 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1088 segs, &nsegs, BUS_DMA_NOWAIT))) { 1089 1090 ha->err_tx_dmamap_load++; 1091 1092 device_printf(ha->pci_dev, 1093 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1094 __func__, ret, m_head->m_pkthdr.len); 1095 1096 bus_dmamap_destroy(ha->tx_tag, map); 1097 if (ret != ENOMEM) { 1098 m_freem(m_head); 1099 *m_headp = NULL; 1100 } 1101 return (ret); 1102 } 1103 } else if (ret) { 1104 ha->err_tx_dmamap_load++; 1105 1106 device_printf(ha->pci_dev, 1107 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1108 __func__, ret, m_head->m_pkthdr.len); 1109 1110 bus_dmamap_destroy(ha->tx_tag, map); 1111 1112 if (ret != ENOMEM) { 1113 m_freem(m_head); 1114 *m_headp = NULL; 1115 } 1116 return (ret); 1117 } 1118 1119 QL_ASSERT((nsegs != 0), ("qla_send: empty packet")); 1120 1121 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1122 1123 if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) { 1124 ha->tx_buf[tx_idx].m_head = m_head; 1125 ha->tx_buf[tx_idx].map = map; 1126 } else { 1127 if (ret == EINVAL) { 1128 m_freem(m_head); 1129 *m_headp = NULL; 1130 } 1131 } 1132 1133 QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1134 return (ret); 1135} 1136 1137static void 1138qla_stop(qla_host_t *ha) 1139{ 1140 struct ifnet *ifp = ha->ifp; 1141 device_t dev; 1142 1143 dev = ha->pci_dev; 1144 1145 ha->flags.qla_watchdog_pause = 1; 1146 qla_mdelay(__func__, 100); 1147 1148 ha->flags.stop_rcv = 1; 1149 qla_hw_stop_rcv(ha); 1150 1151 qla_del_hw_if(ha); 1152 1153 qla_free_lro(ha); 1154 1155 qla_free_xmt_bufs(ha); 1156 qla_free_rcv_bufs(ha); 1157 1158 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1159 1160 return; 1161} 1162 1163/* 1164 * Buffer Management Functions for Transmit and Receive Rings 1165 */ 1166static int 1167qla_alloc_xmt_bufs(qla_host_t *ha) 1168{ 1169 if (bus_dma_tag_create(NULL, /* parent */ 1170 1, 0, /* alignment, bounds */ 1171 BUS_SPACE_MAXADDR, /* lowaddr */ 1172 BUS_SPACE_MAXADDR, /* highaddr */ 1173 NULL, NULL, /* filter, filterarg */ 1174 QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1175 QLA_MAX_SEGMENTS, /* nsegments */ 1176 PAGE_SIZE, /* maxsegsize */ 1177 BUS_DMA_ALLOCNOW, /* flags */ 1178 NULL, /* lockfunc */ 1179 NULL, /* lockfuncarg */ 1180 &ha->tx_tag)) { 1181 device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1182 __func__); 1183 return (ENOMEM); 1184 } 1185 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1186 1187 return 0; 1188} 1189 1190/* 1191 * Release mbuf after it sent on the wire 1192 */ 1193static void 1194qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1195{ 1196 QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1197 1198 if (txb->m_head) { 1199 1200 bus_dmamap_unload(ha->tx_tag, txb->map); 1201 bus_dmamap_destroy(ha->tx_tag, txb->map); 1202 1203 m_freem(txb->m_head); 1204 txb->m_head = NULL; 1205 } 1206 1207 QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 1208} 1209 1210static void 1211qla_free_xmt_bufs(qla_host_t *ha) 1212{ 1213 int i; 1214 1215 for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1216 qla_clear_tx_buf(ha, &ha->tx_buf[i]); 1217 1218 if (ha->tx_tag != NULL) { 1219 bus_dma_tag_destroy(ha->tx_tag); 1220 ha->tx_tag = NULL; 1221 } 1222 bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1223 1224 return; 1225} 1226 1227 1228static int 1229qla_alloc_rcv_bufs(qla_host_t *ha) 1230{ 1231 int i, j, ret = 0; 1232 qla_rx_buf_t *rxb; 1233 1234 if (bus_dma_tag_create(NULL, /* parent */ 1235 1, 0, /* alignment, bounds */ 1236 BUS_SPACE_MAXADDR, /* lowaddr */ 1237 BUS_SPACE_MAXADDR, /* highaddr */ 1238 NULL, NULL, /* filter, filterarg */ 1239 MJUM9BYTES, /* maxsize */ 1240 1, /* nsegments */ 1241 MJUM9BYTES, /* maxsegsize */ 1242 BUS_DMA_ALLOCNOW, /* flags */ 1243 NULL, /* lockfunc */ 1244 NULL, /* lockfuncarg */ 1245 &ha->rx_tag)) { 1246 1247 device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1248 __func__); 1249 1250 return (ENOMEM); 1251 } 1252 1253 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1254 bzero((void *)ha->rx_jbuf, 1255 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); 1256 1257 for (i = 0; i < MAX_SDS_RINGS; i++) { 1258 ha->hw.sds[i].sdsr_next = 0; 1259 ha->hw.sds[i].rxb_free = NULL; 1260 ha->hw.sds[i].rx_free = 0; 1261 ha->hw.sds[i].rxjb_free = NULL; 1262 ha->hw.sds[i].rxj_free = 0; 1263 } 1264 1265 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1266 1267 rxb = &ha->rx_buf[i]; 1268 1269 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1270 1271 if (ret) { 1272 device_printf(ha->pci_dev, 1273 "%s: dmamap[%d] failed\n", __func__, i); 1274 1275 for (j = 0; j < i; j++) { 1276 bus_dmamap_destroy(ha->rx_tag, 1277 ha->rx_buf[j].map); 1278 } 1279 goto qla_alloc_rcv_bufs_failed; 1280 } 1281 } 1282 1283 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL); 1284 1285 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1286 rxb = &ha->rx_buf[i]; 1287 rxb->handle = i; 1288 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) { 1289 /* 1290 * set the physical address in the corresponding 1291 * descriptor entry in the receive ring/queue for the 1292 * hba 1293 */ 1294 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i, 1295 rxb->handle, rxb->paddr, 1296 (rxb->m_head)->m_pkthdr.len); 1297 } else { 1298 device_printf(ha->pci_dev, 1299 "%s: qla_get_mbuf [standard(%d)] failed\n", 1300 __func__, i); 1301 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1302 goto qla_alloc_rcv_bufs_failed; 1303 } 1304 } 1305 1306 1307 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1308 1309 rxb = &ha->rx_jbuf[i]; 1310 1311 ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1312 1313 if (ret) { 1314 device_printf(ha->pci_dev, 1315 "%s: dmamap[%d] failed\n", __func__, i); 1316 1317 for (j = 0; j < i; j++) { 1318 bus_dmamap_destroy(ha->rx_tag, 1319 ha->rx_jbuf[j].map); 1320 } 1321 goto qla_alloc_rcv_bufs_failed; 1322 } 1323 } 1324 1325 qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO); 1326 1327 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1328 rxb = &ha->rx_jbuf[i]; 1329 rxb->handle = i; 1330 if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) { 1331 /* 1332 * set the physical address in the corresponding 1333 * descriptor entry in the receive ring/queue for the 1334 * hba 1335 */ 1336 qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i, 1337 rxb->handle, rxb->paddr, 1338 (rxb->m_head)->m_pkthdr.len); 1339 } else { 1340 device_printf(ha->pci_dev, 1341 "%s: qla_get_mbuf [jumbo(%d)] failed\n", 1342 __func__, i); 1343 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1344 goto qla_alloc_rcv_bufs_failed; 1345 } 1346 } 1347 1348 return (0); 1349 1350qla_alloc_rcv_bufs_failed: 1351 qla_free_rcv_bufs(ha); 1352 return (ret); 1353} 1354 1355static void 1356qla_free_rcv_bufs(qla_host_t *ha) 1357{ 1358 int i; 1359 qla_rx_buf_t *rxb; 1360 1361 for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1362 rxb = &ha->rx_buf[i]; 1363 if (rxb->m_head != NULL) { 1364 bus_dmamap_unload(ha->rx_tag, rxb->map); 1365 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1366 m_freem(rxb->m_head); 1367 rxb->m_head = NULL; 1368 } 1369 } 1370 1371 for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1372 rxb = &ha->rx_jbuf[i]; 1373 if (rxb->m_head != NULL) { 1374 bus_dmamap_unload(ha->rx_tag, rxb->map); 1375 bus_dmamap_destroy(ha->rx_tag, rxb->map); 1376 m_freem(rxb->m_head); 1377 rxb->m_head = NULL; 1378 } 1379 } 1380 1381 if (ha->rx_tag != NULL) { 1382 bus_dma_tag_destroy(ha->rx_tag); 1383 ha->rx_tag = NULL; 1384 } 1385 1386 bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1387 bzero((void *)ha->rx_jbuf, 1388 (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); 1389 1390 for (i = 0; i < MAX_SDS_RINGS; i++) { 1391 ha->hw.sds[i].sdsr_next = 0; 1392 ha->hw.sds[i].rxb_free = NULL; 1393 ha->hw.sds[i].rx_free = 0; 1394 ha->hw.sds[i].rxjb_free = NULL; 1395 ha->hw.sds[i].rxj_free = 0; 1396 } 1397 1398 return; 1399} 1400 1401int 1402qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp, 1403 uint32_t jumbo) 1404{ 1405 struct mbuf *mp = nmp; 1406 struct ifnet *ifp; 1407 int ret = 0; 1408 uint32_t offset; 1409 1410 QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo)); 1411 1412 ifp = ha->ifp; 1413 1414 if (mp == NULL) { 1415 1416 if (!jumbo) { 1417 mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1418 1419 if (mp == NULL) { 1420 ha->err_m_getcl++; 1421 ret = ENOBUFS; 1422 device_printf(ha->pci_dev, 1423 "%s: m_getcl failed\n", __func__); 1424 goto exit_qla_get_mbuf; 1425 } 1426 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1427 } else { 1428 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1429 MJUM9BYTES); 1430 if (mp == NULL) { 1431 ha->err_m_getjcl++; 1432 ret = ENOBUFS; 1433 device_printf(ha->pci_dev, 1434 "%s: m_getjcl failed\n", __func__); 1435 goto exit_qla_get_mbuf; 1436 } 1437 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; 1438 } 1439 } else { 1440 if (!jumbo) 1441 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1442 else 1443 mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; 1444 1445 mp->m_data = mp->m_ext.ext_buf; 1446 mp->m_next = NULL; 1447 } 1448 1449 1450 offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1451 if (offset) { 1452 offset = 8 - offset; 1453 m_adj(mp, offset); 1454 } 1455 1456 /* 1457 * Using memory from the mbuf cluster pool, invoke the bus_dma 1458 * machinery to arrange the memory mapping. 1459 */ 1460 ret = bus_dmamap_load(ha->rx_tag, rxb->map, 1461 mtod(mp, void *), mp->m_len, 1462 qla_dmamap_callback, &rxb->paddr, 1463 BUS_DMA_NOWAIT); 1464 if (ret || !rxb->paddr) { 1465 m_free(mp); 1466 rxb->m_head = NULL; 1467 device_printf(ha->pci_dev, 1468 "%s: bus_dmamap_load failed\n", __func__); 1469 ret = -1; 1470 goto exit_qla_get_mbuf; 1471 } 1472 rxb->m_head = mp; 1473 bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1474 1475exit_qla_get_mbuf: 1476 QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1477 return (ret); 1478} 1479 1480static void 1481qla_tx_done(void *context, int pending) 1482{ 1483 qla_host_t *ha = context; 1484 1485 qla_hw_tx_done(ha); 1486 qla_start(ha->ifp); 1487} 1488 1489