1227064Sbz/* 2227064Sbz * Copyright (c) 2010-2011 Qlogic Corporation 3227064Sbz * All rights reserved. 4227064Sbz * 5227064Sbz * Redistribution and use in source and binary forms, with or without 6227064Sbz * modification, are permitted provided that the following conditions 7227064Sbz * are met: 8227064Sbz * 9227064Sbz * 1. Redistributions of source code must retain the above copyright 10227064Sbz * notice, this list of conditions and the following disclaimer. 11227064Sbz * 2. Redistributions in binary form must reproduce the above copyright 12227064Sbz * notice, this list of conditions and the following disclaimer in the 13227064Sbz * documentation and/or other materials provided with the distribution. 14227064Sbz * 15227064Sbz * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16227064Sbz * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17227064Sbz * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18227064Sbz * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19227064Sbz * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20227064Sbz * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21227064Sbz * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22227064Sbz * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23227064Sbz * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24227064Sbz * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25227064Sbz * POSSIBILITY OF SUCH DAMAGE. 26227064Sbz */ 27227064Sbz 28227064Sbz/* 29227064Sbz * File: qla_os.c 30227064Sbz * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. 31227064Sbz */ 32227064Sbz 33227064Sbz#include <sys/cdefs.h> 34227064Sbz__FBSDID("$FreeBSD$"); 35227064Sbz 36227064Sbz#include "qla_os.h" 37227064Sbz#include "qla_reg.h" 38227064Sbz#include "qla_hw.h" 39227064Sbz#include "qla_def.h" 40227064Sbz#include "qla_inline.h" 41227064Sbz#include "qla_ver.h" 42227064Sbz#include "qla_glbl.h" 43227064Sbz#include "qla_dbg.h" 44227064Sbz 45227064Sbz/* 46227064Sbz * Some PCI Configuration Space Related Defines 47227064Sbz */ 48227064Sbz 49227064Sbz#ifndef PCI_VENDOR_QLOGIC 50227064Sbz#define PCI_VENDOR_QLOGIC 0x1077 51227064Sbz#endif 52227064Sbz 53227064Sbz#ifndef PCI_PRODUCT_QLOGIC_ISP8020 54227064Sbz#define PCI_PRODUCT_QLOGIC_ISP8020 0x8020 55227064Sbz#endif 56227064Sbz 57227064Sbz#define PCI_QLOGIC_ISP8020 \ 58227064Sbz ((PCI_PRODUCT_QLOGIC_ISP8020 << 16) | PCI_VENDOR_QLOGIC) 59227064Sbz 60227064Sbz/* 61227064Sbz * static functions 62227064Sbz */ 63227064Sbzstatic int qla_alloc_parent_dma_tag(qla_host_t *ha); 64227064Sbzstatic void qla_free_parent_dma_tag(qla_host_t *ha); 65227064Sbzstatic int qla_alloc_xmt_bufs(qla_host_t *ha); 66227064Sbzstatic void qla_free_xmt_bufs(qla_host_t *ha); 67227064Sbzstatic int qla_alloc_rcv_bufs(qla_host_t *ha); 68227064Sbzstatic void qla_free_rcv_bufs(qla_host_t *ha); 69227064Sbz 70227064Sbzstatic void qla_init_ifnet(device_t dev, qla_host_t *ha); 71227064Sbzstatic int qla_sysctl_get_stats(SYSCTL_HANDLER_ARGS); 72227064Sbzstatic void qla_release(qla_host_t *ha); 73227064Sbzstatic void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, 74227064Sbz int error); 75227064Sbzstatic void qla_stop(qla_host_t *ha); 76227064Sbzstatic int qla_send(qla_host_t *ha, struct mbuf **m_headp); 77227064Sbzstatic void qla_tx_done(void *context, int pending); 78227064Sbz 79227064Sbz/* 80227064Sbz * Hooks to the Operating Systems 81227064Sbz */ 82227064Sbzstatic int qla_pci_probe (device_t); 83227064Sbzstatic int qla_pci_attach (device_t); 84227064Sbzstatic int qla_pci_detach (device_t); 85227064Sbz 86227064Sbzstatic void qla_init(void *arg); 87227064Sbzstatic int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 88227064Sbzstatic int qla_media_change(struct ifnet *ifp); 89227064Sbzstatic void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 90227064Sbz 91227064Sbzstatic device_method_t qla_pci_methods[] = { 92227064Sbz /* Device interface */ 93227064Sbz DEVMETHOD(device_probe, qla_pci_probe), 94227064Sbz DEVMETHOD(device_attach, qla_pci_attach), 95227064Sbz DEVMETHOD(device_detach, qla_pci_detach), 96227064Sbz { 0, 0 } 97227064Sbz}; 98227064Sbz 99227064Sbzstatic driver_t qla_pci_driver = { 100227064Sbz "ql", qla_pci_methods, sizeof (qla_host_t), 101227064Sbz}; 102227064Sbz 103227064Sbzstatic devclass_t qla80xx_devclass; 104227064Sbz 105227064SbzDRIVER_MODULE(qla80xx, pci, qla_pci_driver, qla80xx_devclass, 0, 0); 106227064Sbz 107227064SbzMODULE_DEPEND(qla80xx, pci, 1, 1, 1); 108227064SbzMODULE_DEPEND(qla80xx, ether, 1, 1, 1); 109227064Sbz 110227064SbzMALLOC_DEFINE(M_QLA8XXXBUF, "qla80xxbuf", "Buffers for qla80xx driver"); 111227064Sbz 112227064Sbzuint32_t std_replenish = 8; 113227064Sbzuint32_t jumbo_replenish = 2; 114227064Sbzuint32_t rcv_pkt_thres = 128; 115227064Sbzuint32_t rcv_pkt_thres_d = 32; 116227064Sbzuint32_t snd_pkt_thres = 16; 117227064Sbzuint32_t free_pkt_thres = (NUM_TX_DESCRIPTORS / 2); 118227064Sbz 119227064Sbzstatic char dev_str[64]; 120227064Sbz 121227064Sbz/* 122227064Sbz * Name: qla_pci_probe 123227064Sbz * Function: Validate the PCI device to be a QLA80XX device 124227064Sbz */ 125227064Sbzstatic int 126227064Sbzqla_pci_probe(device_t dev) 127227064Sbz{ 128227064Sbz switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 129227064Sbz case PCI_QLOGIC_ISP8020: 130227064Sbz snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", 131227064Sbz "Qlogic ISP 80xx PCI CNA Adapter-Ethernet Function", 132227064Sbz QLA_VERSION_MAJOR, QLA_VERSION_MINOR, 133227064Sbz QLA_VERSION_BUILD); 134227064Sbz device_set_desc(dev, dev_str); 135227064Sbz break; 136227064Sbz default: 137227064Sbz return (ENXIO); 138227064Sbz } 139227064Sbz 140227064Sbz if (bootverbose) 141227064Sbz printf("%s: %s\n ", __func__, dev_str); 142227064Sbz 143227064Sbz return (BUS_PROBE_DEFAULT); 144227064Sbz} 145227064Sbz 146227064Sbzstatic void 147227064Sbzqla_add_sysctls(qla_host_t *ha) 148227064Sbz{ 149227064Sbz device_t dev = ha->pci_dev; 150227064Sbz 151227064Sbz SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 152227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 153227064Sbz OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RD, 154227064Sbz (void *)ha, 0, 155227064Sbz qla_sysctl_get_stats, "I", "Statistics"); 156227064Sbz 157227064Sbz dbg_level = 0; 158227064Sbz SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 159227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 160227064Sbz OID_AUTO, "debug", CTLFLAG_RW, 161227064Sbz &dbg_level, dbg_level, "Debug Level"); 162227064Sbz 163227064Sbz SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 164227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 165227064Sbz OID_AUTO, "std_replenish", CTLFLAG_RW, 166227064Sbz &std_replenish, std_replenish, 167227064Sbz "Threshold for Replenishing Standard Frames"); 168227064Sbz 169227064Sbz SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 170227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 171227064Sbz OID_AUTO, "jumbo_replenish", CTLFLAG_RW, 172227064Sbz &jumbo_replenish, jumbo_replenish, 173227064Sbz "Threshold for Replenishing Jumbo Frames"); 174227064Sbz 175227064Sbz SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 176227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 177227064Sbz OID_AUTO, "rcv_pkt_thres", CTLFLAG_RW, 178227064Sbz &rcv_pkt_thres, rcv_pkt_thres, 179227064Sbz "Threshold for # of rcv pkts to trigger indication isr"); 180227064Sbz 181227064Sbz SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 182227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 183227064Sbz OID_AUTO, "rcv_pkt_thres_d", CTLFLAG_RW, 184227064Sbz &rcv_pkt_thres_d, rcv_pkt_thres_d, 185227064Sbz "Threshold for # of rcv pkts to trigger indication defered"); 186227064Sbz 187227064Sbz SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 188227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 189227064Sbz OID_AUTO, "snd_pkt_thres", CTLFLAG_RW, 190227064Sbz &snd_pkt_thres, snd_pkt_thres, 191227064Sbz "Threshold for # of snd packets"); 192227064Sbz 193227064Sbz SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), 194227064Sbz SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 195227064Sbz OID_AUTO, "free_pkt_thres", CTLFLAG_RW, 196227064Sbz &free_pkt_thres, free_pkt_thres, 197227064Sbz "Threshold for # of packets to free at a time"); 198227064Sbz 199227064Sbz return; 200227064Sbz} 201227064Sbz 202227064Sbzstatic void 203227064Sbzqla_watchdog(void *arg) 204227064Sbz{ 205227064Sbz qla_host_t *ha = arg; 206227064Sbz qla_hw_t *hw; 207227064Sbz struct ifnet *ifp; 208227064Sbz 209227064Sbz hw = &ha->hw; 210227064Sbz ifp = ha->ifp; 211227064Sbz 212227064Sbz if (ha->flags.qla_watchdog_exit) 213227064Sbz return; 214227064Sbz 215227064Sbz if (!ha->flags.qla_watchdog_pause) { 216227064Sbz if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) { 217227064Sbz taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 218227064Sbz } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { 219227064Sbz taskqueue_enqueue(ha->tx_tq, &ha->tx_task); 220227064Sbz } 221227064Sbz } 222227064Sbz ha->watchdog_ticks = ha->watchdog_ticks++ % 1000; 223227064Sbz callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 224227064Sbz qla_watchdog, ha); 225227064Sbz} 226227064Sbz 227227064Sbz/* 228227064Sbz * Name: qla_pci_attach 229227064Sbz * Function: attaches the device to the operating system 230227064Sbz */ 231227064Sbzstatic int 232227064Sbzqla_pci_attach(device_t dev) 233227064Sbz{ 234227064Sbz qla_host_t *ha = NULL; 235227064Sbz uint32_t rsrc_len, i; 236227064Sbz 237227064Sbz QL_DPRINT2((dev, "%s: enter\n", __func__)); 238227064Sbz 239227064Sbz if ((ha = device_get_softc(dev)) == NULL) { 240227064Sbz device_printf(dev, "cannot get softc\n"); 241227064Sbz return (ENOMEM); 242227064Sbz } 243227064Sbz 244227064Sbz memset(ha, 0, sizeof (qla_host_t)); 245227064Sbz 246227064Sbz if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8020) { 247227064Sbz device_printf(dev, "device is not ISP8020\n"); 248227064Sbz return (ENXIO); 249227064Sbz } 250227064Sbz 251227064Sbz ha->pci_func = pci_get_function(dev); 252227064Sbz 253227064Sbz ha->pci_dev = dev; 254227064Sbz 255227064Sbz pci_enable_busmaster(dev); 256227064Sbz 257227064Sbz ha->reg_rid = PCIR_BAR(0); 258227064Sbz ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 259227064Sbz RF_ACTIVE); 260227064Sbz 261227064Sbz if (ha->pci_reg == NULL) { 262227064Sbz device_printf(dev, "unable to map any ports\n"); 263227064Sbz goto qla_pci_attach_err; 264227064Sbz } 265227064Sbz 266227064Sbz rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 267227064Sbz ha->reg_rid); 268227064Sbz 269227064Sbz mtx_init(&ha->hw_lock, "qla80xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 270227064Sbz mtx_init(&ha->tx_lock, "qla80xx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 271227064Sbz mtx_init(&ha->rx_lock, "qla80xx_rx_lock", MTX_NETWORK_LOCK, MTX_DEF); 272227064Sbz mtx_init(&ha->rxj_lock, "qla80xx_rxj_lock", MTX_NETWORK_LOCK, MTX_DEF); 273227064Sbz ha->flags.lock_init = 1; 274227064Sbz 275227064Sbz ha->msix_count = pci_msix_count(dev); 276227064Sbz 277227064Sbz if (ha->msix_count < qla_get_msix_count(ha)) { 278227064Sbz device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 279227064Sbz ha->msix_count); 280227064Sbz goto qla_pci_attach_err; 281227064Sbz } 282227064Sbz 283227064Sbz QL_DPRINT2((dev, "%s: ha %p irq %p pci_func 0x%x rsrc_count 0x%08x" 284227064Sbz " msix_count 0x%x pci_reg %p\n", __func__, ha, 285227064Sbz ha->irq, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg)); 286227064Sbz 287227064Sbz ha->msix_count = qla_get_msix_count(ha); 288227064Sbz 289227064Sbz if (pci_alloc_msix(dev, &ha->msix_count)) { 290227064Sbz device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, 291227064Sbz ha->msix_count); 292227064Sbz ha->msix_count = 0; 293227064Sbz goto qla_pci_attach_err; 294227064Sbz } 295227064Sbz 296227064Sbz TASK_INIT(&ha->tx_task, 0, qla_tx_done, ha); 297227064Sbz ha->tx_tq = taskqueue_create_fast("qla_txq", M_NOWAIT, 298227064Sbz taskqueue_thread_enqueue, &ha->tx_tq); 299227064Sbz taskqueue_start_threads(&ha->tx_tq, 1, PI_NET, "%s txq", 300227064Sbz device_get_nameunit(ha->pci_dev)); 301227064Sbz 302227064Sbz for (i = 0; i < ha->msix_count; i++) { 303227064Sbz ha->irq_vec[i].irq_rid = i+1; 304227064Sbz ha->irq_vec[i].ha = ha; 305227064Sbz 306227064Sbz ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 307227064Sbz &ha->irq_vec[i].irq_rid, 308227064Sbz (RF_ACTIVE | RF_SHAREABLE)); 309227064Sbz 310227064Sbz if (ha->irq_vec[i].irq == NULL) { 311227064Sbz device_printf(dev, "could not allocate interrupt\n"); 312227064Sbz goto qla_pci_attach_err; 313227064Sbz } 314227064Sbz 315227064Sbz if (bus_setup_intr(dev, ha->irq_vec[i].irq, 316227064Sbz (INTR_TYPE_NET | INTR_MPSAFE), 317227064Sbz NULL, qla_isr, &ha->irq_vec[i], 318227064Sbz &ha->irq_vec[i].handle)) { 319227064Sbz device_printf(dev, "could not setup interrupt\n"); 320227064Sbz goto qla_pci_attach_err; 321227064Sbz } 322227064Sbz 323227064Sbz TASK_INIT(&ha->irq_vec[i].rcv_task, 0, qla_rcv,\ 324227064Sbz &ha->irq_vec[i]); 325227064Sbz 326227064Sbz ha->irq_vec[i].rcv_tq = taskqueue_create_fast("qla_rcvq", 327227064Sbz M_NOWAIT, taskqueue_thread_enqueue, 328227064Sbz &ha->irq_vec[i].rcv_tq); 329227064Sbz 330227064Sbz taskqueue_start_threads(&ha->irq_vec[i].rcv_tq, 1, PI_NET, 331227064Sbz "%s rcvq", 332227064Sbz device_get_nameunit(ha->pci_dev)); 333227064Sbz } 334227064Sbz 335227064Sbz qla_add_sysctls(ha); 336227064Sbz 337227064Sbz /* add hardware specific sysctls */ 338227064Sbz qla_hw_add_sysctls(ha); 339227064Sbz 340227064Sbz /* initialize hardware */ 341227064Sbz if (qla_init_hw(ha)) { 342227064Sbz device_printf(dev, "%s: qla_init_hw failed\n", __func__); 343227064Sbz goto qla_pci_attach_err; 344227064Sbz } 345227064Sbz 346227064Sbz device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, 347227064Sbz ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, 348227064Sbz ha->fw_ver_build); 349227064Sbz 350227064Sbz //qla_get_hw_caps(ha); 351227064Sbz qla_read_mac_addr(ha); 352227064Sbz 353227064Sbz /* allocate parent dma tag */ 354227064Sbz if (qla_alloc_parent_dma_tag(ha)) { 355227064Sbz device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", 356227064Sbz __func__); 357227064Sbz goto qla_pci_attach_err; 358227064Sbz } 359227064Sbz 360227064Sbz /* alloc all dma buffers */ 361227064Sbz if (qla_alloc_dma(ha)) { 362227064Sbz device_printf(dev, "%s: qla_alloc_dma failed\n", __func__); 363227064Sbz goto qla_pci_attach_err; 364227064Sbz } 365227064Sbz 366227064Sbz /* create the o.s ethernet interface */ 367227064Sbz qla_init_ifnet(dev, ha); 368227064Sbz 369227064Sbz ha->flags.qla_watchdog_active = 1; 370227064Sbz ha->flags.qla_watchdog_pause = 1; 371227064Sbz 372227064Sbz callout_init(&ha->tx_callout, TRUE); 373227064Sbz 374227064Sbz /* create ioctl device interface */ 375227064Sbz if (qla_make_cdev(ha)) { 376227064Sbz device_printf(dev, "%s: qla_make_cdev failed\n", __func__); 377227064Sbz goto qla_pci_attach_err; 378227064Sbz } 379227064Sbz 380227064Sbz callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, 381227064Sbz qla_watchdog, ha); 382227064Sbz 383227064Sbz QL_DPRINT2((dev, "%s: exit 0\n", __func__)); 384227064Sbz return (0); 385227064Sbz 386227064Sbzqla_pci_attach_err: 387227064Sbz 388227064Sbz qla_release(ha); 389227064Sbz 390227064Sbz QL_DPRINT2((dev, "%s: exit ENXIO\n", __func__)); 391227064Sbz return (ENXIO); 392227064Sbz} 393227064Sbz 394227064Sbz/* 395227064Sbz * Name: qla_pci_detach 396227064Sbz * Function: Unhooks the device from the operating system 397227064Sbz */ 398227064Sbzstatic int 399227064Sbzqla_pci_detach(device_t dev) 400227064Sbz{ 401227064Sbz qla_host_t *ha = NULL; 402227064Sbz struct ifnet *ifp; 403227064Sbz int i; 404227064Sbz 405227064Sbz QL_DPRINT2((dev, "%s: enter\n", __func__)); 406227064Sbz 407227064Sbz if ((ha = device_get_softc(dev)) == NULL) { 408227064Sbz device_printf(dev, "cannot get softc\n"); 409227064Sbz return (ENOMEM); 410227064Sbz } 411227064Sbz 412227064Sbz ifp = ha->ifp; 413227064Sbz 414227064Sbz QLA_LOCK(ha, __func__); 415227064Sbz qla_stop(ha); 416227064Sbz QLA_UNLOCK(ha, __func__); 417227064Sbz 418227064Sbz if (ha->tx_tq) { 419227064Sbz taskqueue_drain(ha->tx_tq, &ha->tx_task); 420227064Sbz taskqueue_free(ha->tx_tq); 421227064Sbz } 422227064Sbz 423227064Sbz for (i = 0; i < ha->msix_count; i++) { 424227064Sbz taskqueue_drain(ha->irq_vec[i].rcv_tq, 425227064Sbz &ha->irq_vec[i].rcv_task); 426227064Sbz taskqueue_free(ha->irq_vec[i].rcv_tq); 427227064Sbz } 428227064Sbz 429227064Sbz qla_release(ha); 430227064Sbz 431227064Sbz QL_DPRINT2((dev, "%s: exit\n", __func__)); 432227064Sbz 433227064Sbz return (0); 434227064Sbz} 435227064Sbz 436227064Sbz/* 437227064Sbz * SYSCTL Related Callbacks 438227064Sbz */ 439227064Sbzstatic int 440227064Sbzqla_sysctl_get_stats(SYSCTL_HANDLER_ARGS) 441227064Sbz{ 442227064Sbz int err, ret = 0; 443227064Sbz qla_host_t *ha; 444227064Sbz 445227064Sbz err = sysctl_handle_int(oidp, &ret, 0, req); 446227064Sbz 447227064Sbz if (err) 448227064Sbz return (err); 449227064Sbz 450227064Sbz ha = (qla_host_t *)arg1; 451227064Sbz //qla_get_stats(ha); 452227064Sbz QL_DPRINT2((ha->pci_dev, "%s: called ret %d\n", __func__, ret)); 453227064Sbz return (err); 454227064Sbz} 455227064Sbz 456227064Sbz 457227064Sbz/* 458227064Sbz * Name: qla_release 459227064Sbz * Function: Releases the resources allocated for the device 460227064Sbz */ 461227064Sbzstatic void 462227064Sbzqla_release(qla_host_t *ha) 463227064Sbz{ 464227064Sbz device_t dev; 465227064Sbz int i; 466227064Sbz 467227064Sbz dev = ha->pci_dev; 468227064Sbz 469227064Sbz qla_del_cdev(ha); 470227064Sbz 471227064Sbz if (ha->flags.qla_watchdog_active) 472227064Sbz ha->flags.qla_watchdog_exit = 1; 473227064Sbz 474227064Sbz callout_stop(&ha->tx_callout); 475227064Sbz qla_mdelay(__func__, 100); 476227064Sbz 477227064Sbz if (ha->ifp != NULL) 478227064Sbz ether_ifdetach(ha->ifp); 479227064Sbz 480227064Sbz qla_free_dma(ha); 481227064Sbz qla_free_parent_dma_tag(ha); 482227064Sbz 483227064Sbz for (i = 0; i < ha->msix_count; i++) { 484227064Sbz if (ha->irq_vec[i].handle) 485227064Sbz (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 486227064Sbz ha->irq_vec[i].handle); 487227064Sbz if (ha->irq_vec[i].irq) 488227064Sbz (void) bus_release_resource(dev, SYS_RES_IRQ, 489227064Sbz ha->irq_vec[i].irq_rid, 490227064Sbz ha->irq_vec[i].irq); 491227064Sbz } 492227064Sbz if (ha->msix_count) 493227064Sbz pci_release_msi(dev); 494227064Sbz 495227064Sbz if (ha->flags.lock_init) { 496227064Sbz mtx_destroy(&ha->tx_lock); 497227064Sbz mtx_destroy(&ha->rx_lock); 498227064Sbz mtx_destroy(&ha->rxj_lock); 499227064Sbz mtx_destroy(&ha->hw_lock); 500227064Sbz } 501227064Sbz 502227064Sbz if (ha->pci_reg) 503227064Sbz (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 504227064Sbz ha->pci_reg); 505227064Sbz} 506227064Sbz 507227064Sbz/* 508227064Sbz * DMA Related Functions 509227064Sbz */ 510227064Sbz 511227064Sbzstatic void 512227064Sbzqla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 513227064Sbz{ 514227064Sbz *((bus_addr_t *)arg) = 0; 515227064Sbz 516227064Sbz if (error) { 517227064Sbz printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 518227064Sbz return; 519227064Sbz } 520227064Sbz 521227064Sbz QL_ASSERT((nsegs == 1), ("%s: %d segments returned!", __func__, nsegs)); 522227064Sbz 523227064Sbz *((bus_addr_t *)arg) = segs[0].ds_addr; 524227064Sbz 525227064Sbz return; 526227064Sbz} 527227064Sbz 528227064Sbzint 529227064Sbzqla_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 530227064Sbz{ 531227064Sbz int ret = 0; 532227064Sbz device_t dev; 533227064Sbz bus_addr_t b_addr; 534227064Sbz 535227064Sbz dev = ha->pci_dev; 536227064Sbz 537227064Sbz QL_DPRINT2((dev, "%s: enter\n", __func__)); 538227064Sbz 539227064Sbz ret = bus_dma_tag_create( 540227064Sbz ha->parent_tag,/* parent */ 541227064Sbz dma_buf->alignment, 542227064Sbz ((bus_size_t)(1ULL << 32)),/* boundary */ 543227064Sbz BUS_SPACE_MAXADDR, /* lowaddr */ 544227064Sbz BUS_SPACE_MAXADDR, /* highaddr */ 545227064Sbz NULL, NULL, /* filter, filterarg */ 546227064Sbz dma_buf->size, /* maxsize */ 547227064Sbz 1, /* nsegments */ 548227064Sbz dma_buf->size, /* maxsegsize */ 549227064Sbz 0, /* flags */ 550227064Sbz NULL, NULL, /* lockfunc, lockarg */ 551227064Sbz &dma_buf->dma_tag); 552227064Sbz 553227064Sbz if (ret) { 554227064Sbz device_printf(dev, "%s: could not create dma tag\n", __func__); 555227064Sbz goto qla_alloc_dmabuf_exit; 556227064Sbz } 557227064Sbz ret = bus_dmamem_alloc(dma_buf->dma_tag, 558227064Sbz (void **)&dma_buf->dma_b, 559227064Sbz (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 560227064Sbz &dma_buf->dma_map); 561227064Sbz if (ret) { 562227064Sbz bus_dma_tag_destroy(dma_buf->dma_tag); 563227064Sbz device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); 564227064Sbz goto qla_alloc_dmabuf_exit; 565227064Sbz } 566227064Sbz 567227064Sbz ret = bus_dmamap_load(dma_buf->dma_tag, 568227064Sbz dma_buf->dma_map, 569227064Sbz dma_buf->dma_b, 570227064Sbz dma_buf->size, 571227064Sbz qla_dmamap_callback, 572227064Sbz &b_addr, BUS_DMA_NOWAIT); 573227064Sbz 574227064Sbz if (ret || !b_addr) { 575227064Sbz bus_dma_tag_destroy(dma_buf->dma_tag); 576227064Sbz bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 577227064Sbz dma_buf->dma_map); 578227064Sbz ret = -1; 579227064Sbz goto qla_alloc_dmabuf_exit; 580227064Sbz } 581227064Sbz 582227064Sbz dma_buf->dma_addr = b_addr; 583227064Sbz 584227064Sbzqla_alloc_dmabuf_exit: 585227064Sbz QL_DPRINT2((dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", 586227064Sbz __func__, ret, (void *)dma_buf->dma_tag, 587227064Sbz (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, 588227064Sbz dma_buf->size)); 589227064Sbz 590227064Sbz return ret; 591227064Sbz} 592227064Sbz 593227064Sbzvoid 594227064Sbzqla_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) 595227064Sbz{ 596227064Sbz bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 597227064Sbz bus_dma_tag_destroy(dma_buf->dma_tag); 598227064Sbz} 599227064Sbz 600227064Sbzstatic int 601227064Sbzqla_alloc_parent_dma_tag(qla_host_t *ha) 602227064Sbz{ 603227064Sbz int ret; 604227064Sbz device_t dev; 605227064Sbz 606227064Sbz dev = ha->pci_dev; 607227064Sbz 608227064Sbz /* 609227064Sbz * Allocate parent DMA Tag 610227064Sbz */ 611227064Sbz ret = bus_dma_tag_create( 612227064Sbz bus_get_dma_tag(dev), /* parent */ 613227064Sbz 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 614227064Sbz BUS_SPACE_MAXADDR, /* lowaddr */ 615227064Sbz BUS_SPACE_MAXADDR, /* highaddr */ 616227064Sbz NULL, NULL, /* filter, filterarg */ 617227064Sbz BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 618227064Sbz 0, /* nsegments */ 619227064Sbz BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 620227064Sbz 0, /* flags */ 621227064Sbz NULL, NULL, /* lockfunc, lockarg */ 622227064Sbz &ha->parent_tag); 623227064Sbz 624227064Sbz if (ret) { 625227064Sbz device_printf(dev, "%s: could not create parent dma tag\n", 626227064Sbz __func__); 627227064Sbz return (-1); 628227064Sbz } 629227064Sbz 630227064Sbz ha->flags.parent_tag = 1; 631227064Sbz 632227064Sbz return (0); 633227064Sbz} 634227064Sbz 635227064Sbzstatic void 636227064Sbzqla_free_parent_dma_tag(qla_host_t *ha) 637227064Sbz{ 638227064Sbz if (ha->flags.parent_tag) { 639227064Sbz bus_dma_tag_destroy(ha->parent_tag); 640227064Sbz ha->flags.parent_tag = 0; 641227064Sbz } 642227064Sbz} 643227064Sbz 644227064Sbz/* 645227064Sbz * Name: qla_init_ifnet 646227064Sbz * Function: Creates the Network Device Interface and Registers it with the O.S 647227064Sbz */ 648227064Sbz 649227064Sbzstatic void 650227064Sbzqla_init_ifnet(device_t dev, qla_host_t *ha) 651227064Sbz{ 652227064Sbz struct ifnet *ifp; 653227064Sbz 654227064Sbz QL_DPRINT2((dev, "%s: enter\n", __func__)); 655227064Sbz 656227064Sbz ifp = ha->ifp = if_alloc(IFT_ETHER); 657227064Sbz 658227064Sbz if (ifp == NULL) 659227064Sbz panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 660227064Sbz 661227064Sbz if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 662227064Sbz 663227064Sbz ifp->if_mtu = ETHERMTU; 664227064Sbz ifp->if_baudrate = (1 * 1000 * 1000 *1000); 665227064Sbz ifp->if_init = qla_init; 666227064Sbz ifp->if_softc = ha; 667227064Sbz ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 668227064Sbz ifp->if_ioctl = qla_ioctl; 669227064Sbz ifp->if_start = qla_start; 670227064Sbz 671227064Sbz IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); 672227064Sbz ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); 673227064Sbz IFQ_SET_READY(&ifp->if_snd); 674227064Sbz 675227064Sbz ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 676227064Sbz 677227064Sbz ether_ifattach(ifp, qla_get_mac_addr(ha)); 678227064Sbz 679227064Sbz ifp->if_capabilities = IFCAP_HWCSUM | 680227064Sbz IFCAP_TSO4 | 681227064Sbz IFCAP_JUMBO_MTU; 682227064Sbz 683227064Sbz ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 684227064Sbz 685227064Sbz#if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) 686227064Sbz ifp->if_timer = 0; 687227064Sbz ifp->if_watchdog = NULL; 688227064Sbz#endif /* #if defined(__FreeBSD_version) && (__FreeBSD_version < 900002) */ 689227064Sbz 690227064Sbz ifp->if_capenable = ifp->if_capabilities; 691227064Sbz 692227064Sbz ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 693227064Sbz 694227064Sbz ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); 695227064Sbz 696227064Sbz ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, 697227064Sbz NULL); 698227064Sbz ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 699227064Sbz 700227064Sbz ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 701227064Sbz 702227064Sbz QL_DPRINT2((dev, "%s: exit\n", __func__)); 703227064Sbz 704227064Sbz return; 705227064Sbz} 706227064Sbz 707227064Sbzstatic void 708227064Sbzqla_init_locked(qla_host_t *ha) 709227064Sbz{ 710227064Sbz struct ifnet *ifp = ha->ifp; 711227064Sbz 712227064Sbz qla_stop(ha); 713227064Sbz 714227064Sbz if (qla_alloc_xmt_bufs(ha) != 0) 715227064Sbz return; 716227064Sbz 717227064Sbz if (qla_alloc_rcv_bufs(ha) != 0) 718227064Sbz return; 719227064Sbz 720227064Sbz if (qla_config_lro(ha)) 721227064Sbz return; 722227064Sbz 723227064Sbz bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); 724227064Sbz 725227064Sbz ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; 726227064Sbz 727227064Sbz ha->flags.stop_rcv = 0; 728227064Sbz if (qla_init_hw_if(ha) == 0) { 729227064Sbz ifp = ha->ifp; 730227064Sbz ifp->if_drv_flags |= IFF_DRV_RUNNING; 731227064Sbz ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 732227064Sbz ha->flags.qla_watchdog_pause = 0; 733227064Sbz } 734227064Sbz 735227064Sbz return; 736227064Sbz} 737227064Sbz 738227064Sbzstatic void 739227064Sbzqla_init(void *arg) 740227064Sbz{ 741227064Sbz qla_host_t *ha; 742227064Sbz 743227064Sbz ha = (qla_host_t *)arg; 744227064Sbz 745227064Sbz QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 746227064Sbz 747227064Sbz QLA_LOCK(ha, __func__); 748227064Sbz qla_init_locked(ha); 749227064Sbz QLA_UNLOCK(ha, __func__); 750227064Sbz 751227064Sbz QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 752227064Sbz} 753227064Sbz 754227064Sbzstatic void 755227064Sbzqla_set_multi(qla_host_t *ha, uint32_t add_multi) 756227064Sbz{ 757227064Sbz uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; 758227064Sbz struct ifmultiaddr *ifma; 759227064Sbz int mcnt = 0; 760227064Sbz struct ifnet *ifp = ha->ifp; 761227064Sbz 762231156Sjhb if_maddr_rlock(ifp); 763227064Sbz 764227064Sbz TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 765227064Sbz 766227064Sbz if (ifma->ifma_addr->sa_family != AF_LINK) 767227064Sbz continue; 768227064Sbz 769227064Sbz if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) 770227064Sbz break; 771227064Sbz 772227064Sbz bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 773227064Sbz &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); 774227064Sbz 775227064Sbz mcnt++; 776227064Sbz } 777227064Sbz 778231156Sjhb if_maddr_runlock(ifp); 779227064Sbz 780227064Sbz qla_hw_set_multi(ha, mta, mcnt, add_multi); 781227064Sbz 782227064Sbz return; 783227064Sbz} 784227064Sbz 785227064Sbzstatic int 786227064Sbzqla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 787227064Sbz{ 788227064Sbz int ret = 0; 789227064Sbz struct ifreq *ifr = (struct ifreq *)data; 790227064Sbz struct ifaddr *ifa = (struct ifaddr *)data; 791227064Sbz qla_host_t *ha; 792227064Sbz 793227064Sbz ha = (qla_host_t *)ifp->if_softc; 794227064Sbz 795227064Sbz switch (cmd) { 796227064Sbz case SIOCSIFADDR: 797227064Sbz QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 798227064Sbz __func__, cmd)); 799227064Sbz 800227064Sbz if (ifa->ifa_addr->sa_family == AF_INET) { 801227064Sbz ifp->if_flags |= IFF_UP; 802227064Sbz if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 803227064Sbz QLA_LOCK(ha, __func__); 804227064Sbz qla_init_locked(ha); 805227064Sbz QLA_UNLOCK(ha, __func__); 806227064Sbz } 807227064Sbz QL_DPRINT4((ha->pci_dev, 808227064Sbz "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 809227064Sbz __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 810227064Sbz 811227064Sbz arp_ifinit(ifp, ifa); 812227064Sbz if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) { 813227064Sbz qla_config_ipv4_addr(ha, 814227064Sbz (IA_SIN(ifa)->sin_addr.s_addr)); 815227064Sbz } 816227064Sbz } else { 817227064Sbz ether_ioctl(ifp, cmd, data); 818227064Sbz } 819227064Sbz break; 820227064Sbz 821227064Sbz case SIOCSIFMTU: 822227064Sbz QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 823227064Sbz __func__, cmd)); 824227064Sbz 825227064Sbz if (ifr->ifr_mtu > QLA_MAX_FRAME_SIZE - ETHER_HDR_LEN) { 826227064Sbz ret = EINVAL; 827227064Sbz } else { 828227064Sbz QLA_LOCK(ha, __func__); 829227064Sbz ifp->if_mtu = ifr->ifr_mtu; 830227064Sbz ha->max_frame_size = 831227064Sbz ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 832227064Sbz if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 833227064Sbz ret = qla_set_max_mtu(ha, ha->max_frame_size, 834227064Sbz (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 835227064Sbz } 836227064Sbz QLA_UNLOCK(ha, __func__); 837227064Sbz 838227064Sbz if (ret) 839227064Sbz ret = EINVAL; 840227064Sbz } 841227064Sbz 842227064Sbz break; 843227064Sbz 844227064Sbz case SIOCSIFFLAGS: 845227064Sbz QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 846227064Sbz __func__, cmd)); 847227064Sbz 848227064Sbz if (ifp->if_flags & IFF_UP) { 849227064Sbz if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 850227064Sbz if ((ifp->if_flags ^ ha->if_flags) & 851227064Sbz IFF_PROMISC) { 852227064Sbz qla_set_promisc(ha); 853227064Sbz } else if ((ifp->if_flags ^ ha->if_flags) & 854227064Sbz IFF_ALLMULTI) { 855227064Sbz qla_set_allmulti(ha); 856227064Sbz } 857227064Sbz } else { 858227064Sbz QLA_LOCK(ha, __func__); 859227064Sbz qla_init_locked(ha); 860227064Sbz ha->max_frame_size = ifp->if_mtu + 861227064Sbz ETHER_HDR_LEN + ETHER_CRC_LEN; 862227064Sbz ret = qla_set_max_mtu(ha, ha->max_frame_size, 863227064Sbz (ha->hw.rx_cntxt_rsp)->rx_rsp.cntxt_id); 864227064Sbz QLA_UNLOCK(ha, __func__); 865227064Sbz } 866227064Sbz } else { 867227064Sbz QLA_LOCK(ha, __func__); 868227064Sbz if (ifp->if_drv_flags & IFF_DRV_RUNNING) 869227064Sbz qla_stop(ha); 870227064Sbz ha->if_flags = ifp->if_flags; 871227064Sbz QLA_UNLOCK(ha, __func__); 872227064Sbz } 873227064Sbz break; 874227064Sbz 875227064Sbz case SIOCADDMULTI: 876227064Sbz QL_DPRINT4((ha->pci_dev, 877227064Sbz "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 878227064Sbz 879227064Sbz if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 880227064Sbz qla_set_multi(ha, 1); 881227064Sbz } 882227064Sbz break; 883227064Sbz 884227064Sbz case SIOCDELMULTI: 885227064Sbz QL_DPRINT4((ha->pci_dev, 886227064Sbz "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 887227064Sbz 888227064Sbz if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 889227064Sbz qla_set_multi(ha, 0); 890227064Sbz } 891227064Sbz break; 892227064Sbz 893227064Sbz case SIOCSIFMEDIA: 894227064Sbz case SIOCGIFMEDIA: 895227064Sbz QL_DPRINT4((ha->pci_dev, 896227064Sbz "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 897227064Sbz __func__, cmd)); 898227064Sbz ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 899227064Sbz break; 900227064Sbz 901227064Sbz case SIOCSIFCAP: 902227064Sbz { 903227064Sbz int mask = ifr->ifr_reqcap ^ ifp->if_capenable; 904227064Sbz 905227064Sbz QL_DPRINT4((ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 906227064Sbz __func__, cmd)); 907227064Sbz 908227064Sbz if (mask & IFCAP_HWCSUM) 909227064Sbz ifp->if_capenable ^= IFCAP_HWCSUM; 910227064Sbz if (mask & IFCAP_TSO4) 911227064Sbz ifp->if_capenable ^= IFCAP_TSO4; 912227064Sbz if (mask & IFCAP_TSO6) 913227064Sbz ifp->if_capenable ^= IFCAP_TSO6; 914227064Sbz if (mask & IFCAP_VLAN_HWTAGGING) 915227064Sbz ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 916227064Sbz 917227064Sbz if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 918227064Sbz qla_init(ha); 919227064Sbz 920227064Sbz VLAN_CAPABILITIES(ifp); 921227064Sbz break; 922227064Sbz } 923227064Sbz 924227064Sbz default: 925227064Sbz QL_DPRINT4((ha->pci_dev, "%s: default (0x%lx)\n", 926227064Sbz __func__, cmd)); 927227064Sbz ret = ether_ioctl(ifp, cmd, data); 928227064Sbz break; 929227064Sbz } 930227064Sbz 931227064Sbz return (ret); 932227064Sbz} 933227064Sbz 934227064Sbzstatic int 935227064Sbzqla_media_change(struct ifnet *ifp) 936227064Sbz{ 937227064Sbz qla_host_t *ha; 938227064Sbz struct ifmedia *ifm; 939227064Sbz int ret = 0; 940227064Sbz 941227064Sbz ha = (qla_host_t *)ifp->if_softc; 942227064Sbz 943227064Sbz QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 944227064Sbz 945227064Sbz ifm = &ha->media; 946227064Sbz 947227064Sbz if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 948227064Sbz ret = EINVAL; 949227064Sbz 950227064Sbz QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 951227064Sbz 952227064Sbz return (ret); 953227064Sbz} 954227064Sbz 955227064Sbzstatic void 956227064Sbzqla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 957227064Sbz{ 958227064Sbz qla_host_t *ha; 959227064Sbz 960227064Sbz ha = (qla_host_t *)ifp->if_softc; 961227064Sbz 962227064Sbz QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 963227064Sbz 964227064Sbz ifmr->ifm_status = IFM_AVALID; 965227064Sbz ifmr->ifm_active = IFM_ETHER; 966227064Sbz 967227064Sbz qla_update_link_state(ha); 968227064Sbz if (ha->hw.flags.link_up) { 969227064Sbz ifmr->ifm_status |= IFM_ACTIVE; 970227064Sbz ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); 971227064Sbz } 972227064Sbz 973227064Sbz QL_DPRINT2((ha->pci_dev, "%s: exit (%s)\n", __func__,\ 974227064Sbz (ha->hw.flags.link_up ? "link_up" : "link_down"))); 975227064Sbz 976227064Sbz return; 977227064Sbz} 978227064Sbz 979227064Sbzvoid 980227064Sbzqla_start(struct ifnet *ifp) 981227064Sbz{ 982227064Sbz struct mbuf *m_head; 983227064Sbz qla_host_t *ha = (qla_host_t *)ifp->if_softc; 984227064Sbz 985227064Sbz QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 986227064Sbz 987227064Sbz if (!mtx_trylock(&ha->tx_lock)) { 988227064Sbz QL_DPRINT8((ha->pci_dev, 989227064Sbz "%s: mtx_trylock(&ha->tx_lock) failed\n", __func__)); 990227064Sbz return; 991227064Sbz } 992227064Sbz 993227064Sbz if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 994227064Sbz IFF_DRV_RUNNING) { 995227064Sbz QL_DPRINT8((ha->pci_dev, "%s: !IFF_DRV_RUNNING\n", __func__)); 996227064Sbz QLA_TX_UNLOCK(ha); 997227064Sbz return; 998227064Sbz } 999227064Sbz 1000227064Sbz if (!ha->watchdog_ticks) 1001227064Sbz qla_update_link_state(ha); 1002227064Sbz 1003227064Sbz if (!ha->hw.flags.link_up) { 1004227064Sbz QL_DPRINT8((ha->pci_dev, "%s: link down\n", __func__)); 1005227064Sbz QLA_TX_UNLOCK(ha); 1006227064Sbz return; 1007227064Sbz } 1008227064Sbz 1009227064Sbz while (ifp->if_snd.ifq_head != NULL) { 1010227064Sbz IF_DEQUEUE(&ifp->if_snd, m_head); 1011227064Sbz 1012227064Sbz if (m_head == NULL) { 1013227064Sbz QL_DPRINT8((ha->pci_dev, "%s: m_head == NULL\n", 1014227064Sbz __func__)); 1015227064Sbz break; 1016227064Sbz } 1017227064Sbz 1018227064Sbz if (qla_send(ha, &m_head)) { 1019227064Sbz if (m_head == NULL) 1020227064Sbz break; 1021227064Sbz QL_DPRINT8((ha->pci_dev, "%s: PREPEND\n", __func__)); 1022227064Sbz ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1023227064Sbz IF_PREPEND(&ifp->if_snd, m_head); 1024227064Sbz break; 1025227064Sbz } 1026227064Sbz /* Send a copy of the frame to the BPF listener */ 1027227064Sbz ETHER_BPF_MTAP(ifp, m_head); 1028227064Sbz } 1029227064Sbz QLA_TX_UNLOCK(ha); 1030227064Sbz QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1031227064Sbz return; 1032227064Sbz} 1033227064Sbz 1034227064Sbzstatic int 1035227064Sbzqla_send(qla_host_t *ha, struct mbuf **m_headp) 1036227064Sbz{ 1037227064Sbz bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; 1038227064Sbz bus_dmamap_t map; 1039227064Sbz int nsegs; 1040227064Sbz int ret = -1; 1041227064Sbz uint32_t tx_idx; 1042227064Sbz struct mbuf *m_head = *m_headp; 1043227064Sbz 1044227064Sbz QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__)); 1045227064Sbz 1046227064Sbz if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &map))) { 1047227064Sbz ha->err_tx_dmamap_create++; 1048227064Sbz device_printf(ha->pci_dev, 1049227064Sbz "%s: bus_dmamap_create failed[%d, %d]\n", 1050227064Sbz __func__, ret, m_head->m_pkthdr.len); 1051227064Sbz return (ret); 1052227064Sbz } 1053227064Sbz 1054227064Sbz ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 1055227064Sbz BUS_DMA_NOWAIT); 1056227064Sbz 1057261862Sdavidcs if (ret == EFBIG) { 1058227064Sbz 1059227064Sbz struct mbuf *m; 1060227064Sbz 1061227064Sbz QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 1062227064Sbz m_head->m_pkthdr.len)); 1063227064Sbz 1064248078Smarius m = m_defrag(m_head, M_NOWAIT); 1065227064Sbz if (m == NULL) { 1066227064Sbz ha->err_tx_defrag++; 1067227064Sbz m_freem(m_head); 1068227064Sbz *m_headp = NULL; 1069227064Sbz device_printf(ha->pci_dev, 1070227064Sbz "%s: m_defrag() = NULL [%d]\n", 1071227064Sbz __func__, ret); 1072227064Sbz return (ENOBUFS); 1073227064Sbz } 1074227064Sbz m_head = m; 1075227064Sbz 1076227064Sbz if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 1077227064Sbz segs, &nsegs, BUS_DMA_NOWAIT))) { 1078227064Sbz 1079227064Sbz ha->err_tx_dmamap_load++; 1080227064Sbz 1081227064Sbz device_printf(ha->pci_dev, 1082227064Sbz "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 1083227064Sbz __func__, ret, m_head->m_pkthdr.len); 1084227064Sbz 1085227064Sbz bus_dmamap_destroy(ha->tx_tag, map); 1086227064Sbz if (ret != ENOMEM) { 1087227064Sbz m_freem(m_head); 1088227064Sbz *m_headp = NULL; 1089227064Sbz } 1090227064Sbz return (ret); 1091227064Sbz } 1092227064Sbz } else if (ret) { 1093227064Sbz ha->err_tx_dmamap_load++; 1094227064Sbz 1095227064Sbz device_printf(ha->pci_dev, 1096227064Sbz "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 1097227064Sbz __func__, ret, m_head->m_pkthdr.len); 1098227064Sbz 1099227064Sbz bus_dmamap_destroy(ha->tx_tag, map); 1100227064Sbz 1101227064Sbz if (ret != ENOMEM) { 1102227064Sbz m_freem(m_head); 1103227064Sbz *m_headp = NULL; 1104227064Sbz } 1105227064Sbz return (ret); 1106227064Sbz } 1107227064Sbz 1108227064Sbz QL_ASSERT((nsegs != 0), ("qla_send: empty packet")); 1109227064Sbz 1110227064Sbz bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 1111227064Sbz 1112227064Sbz if (!(ret = qla_hw_send(ha, segs, nsegs, &tx_idx, m_head))) { 1113227064Sbz ha->tx_buf[tx_idx].m_head = m_head; 1114227064Sbz ha->tx_buf[tx_idx].map = map; 1115227064Sbz } else { 1116227064Sbz if (ret == EINVAL) { 1117227064Sbz m_freem(m_head); 1118227064Sbz *m_headp = NULL; 1119227064Sbz } 1120227064Sbz } 1121227064Sbz 1122227064Sbz QL_DPRINT8((ha->pci_dev, "%s: exit\n", __func__)); 1123227064Sbz return (ret); 1124227064Sbz} 1125227064Sbz 1126227064Sbzstatic void 1127227064Sbzqla_stop(qla_host_t *ha) 1128227064Sbz{ 1129227064Sbz struct ifnet *ifp = ha->ifp; 1130227064Sbz device_t dev; 1131227064Sbz 1132227064Sbz dev = ha->pci_dev; 1133227064Sbz 1134227064Sbz ha->flags.qla_watchdog_pause = 1; 1135227064Sbz qla_mdelay(__func__, 100); 1136227064Sbz 1137227064Sbz ha->flags.stop_rcv = 1; 1138227064Sbz qla_hw_stop_rcv(ha); 1139227064Sbz 1140227064Sbz qla_del_hw_if(ha); 1141227064Sbz 1142227064Sbz qla_free_lro(ha); 1143227064Sbz 1144227064Sbz qla_free_xmt_bufs(ha); 1145227064Sbz qla_free_rcv_bufs(ha); 1146227064Sbz 1147227064Sbz ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1148227064Sbz 1149227064Sbz return; 1150227064Sbz} 1151227064Sbz 1152227064Sbz/* 1153227064Sbz * Buffer Management Functions for Transmit and Receive Rings 1154227064Sbz */ 1155227064Sbzstatic int 1156227064Sbzqla_alloc_xmt_bufs(qla_host_t *ha) 1157227064Sbz{ 1158227064Sbz if (bus_dma_tag_create(NULL, /* parent */ 1159227064Sbz 1, 0, /* alignment, bounds */ 1160227064Sbz BUS_SPACE_MAXADDR, /* lowaddr */ 1161227064Sbz BUS_SPACE_MAXADDR, /* highaddr */ 1162227064Sbz NULL, NULL, /* filter, filterarg */ 1163227064Sbz QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ 1164227064Sbz QLA_MAX_SEGMENTS, /* nsegments */ 1165227064Sbz PAGE_SIZE, /* maxsegsize */ 1166227064Sbz BUS_DMA_ALLOCNOW, /* flags */ 1167227064Sbz NULL, /* lockfunc */ 1168227064Sbz NULL, /* lockfuncarg */ 1169227064Sbz &ha->tx_tag)) { 1170227064Sbz device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", 1171227064Sbz __func__); 1172227064Sbz return (ENOMEM); 1173227064Sbz } 1174227064Sbz bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1175227064Sbz 1176227064Sbz return 0; 1177227064Sbz} 1178227064Sbz 1179227064Sbz/* 1180227064Sbz * Release mbuf after it sent on the wire 1181227064Sbz */ 1182227064Sbzstatic void 1183227064Sbzqla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) 1184227064Sbz{ 1185227064Sbz QL_DPRINT2((ha->pci_dev, "%s: enter\n", __func__)); 1186227064Sbz 1187227064Sbz if (txb->m_head) { 1188227064Sbz 1189227064Sbz bus_dmamap_unload(ha->tx_tag, txb->map); 1190227064Sbz bus_dmamap_destroy(ha->tx_tag, txb->map); 1191227064Sbz 1192227064Sbz m_freem(txb->m_head); 1193227064Sbz txb->m_head = NULL; 1194227064Sbz } 1195227064Sbz 1196227064Sbz QL_DPRINT2((ha->pci_dev, "%s: exit\n", __func__)); 1197227064Sbz} 1198227064Sbz 1199227064Sbzstatic void 1200227064Sbzqla_free_xmt_bufs(qla_host_t *ha) 1201227064Sbz{ 1202227064Sbz int i; 1203227064Sbz 1204227064Sbz for (i = 0; i < NUM_TX_DESCRIPTORS; i++) 1205227064Sbz qla_clear_tx_buf(ha, &ha->tx_buf[i]); 1206227064Sbz 1207227064Sbz if (ha->tx_tag != NULL) { 1208227064Sbz bus_dma_tag_destroy(ha->tx_tag); 1209227064Sbz ha->tx_tag = NULL; 1210227064Sbz } 1211227064Sbz bzero((void *)ha->tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); 1212227064Sbz 1213227064Sbz return; 1214227064Sbz} 1215227064Sbz 1216227064Sbz 1217227064Sbzstatic int 1218227064Sbzqla_alloc_rcv_bufs(qla_host_t *ha) 1219227064Sbz{ 1220227064Sbz int i, j, ret = 0; 1221227064Sbz qla_rx_buf_t *rxb; 1222227064Sbz 1223227064Sbz if (bus_dma_tag_create(NULL, /* parent */ 1224227064Sbz 1, 0, /* alignment, bounds */ 1225227064Sbz BUS_SPACE_MAXADDR, /* lowaddr */ 1226227064Sbz BUS_SPACE_MAXADDR, /* highaddr */ 1227227064Sbz NULL, NULL, /* filter, filterarg */ 1228227064Sbz MJUM9BYTES, /* maxsize */ 1229227064Sbz 1, /* nsegments */ 1230227064Sbz MJUM9BYTES, /* maxsegsize */ 1231227064Sbz BUS_DMA_ALLOCNOW, /* flags */ 1232227064Sbz NULL, /* lockfunc */ 1233227064Sbz NULL, /* lockfuncarg */ 1234227064Sbz &ha->rx_tag)) { 1235227064Sbz 1236227064Sbz device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", 1237227064Sbz __func__); 1238227064Sbz 1239227064Sbz return (ENOMEM); 1240227064Sbz } 1241227064Sbz 1242227064Sbz bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1243227064Sbz bzero((void *)ha->rx_jbuf, 1244227064Sbz (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); 1245227064Sbz 1246227064Sbz for (i = 0; i < MAX_SDS_RINGS; i++) { 1247227064Sbz ha->hw.sds[i].sdsr_next = 0; 1248227064Sbz ha->hw.sds[i].rxb_free = NULL; 1249227064Sbz ha->hw.sds[i].rx_free = 0; 1250227064Sbz ha->hw.sds[i].rxjb_free = NULL; 1251227064Sbz ha->hw.sds[i].rxj_free = 0; 1252227064Sbz } 1253227064Sbz 1254227064Sbz for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1255227064Sbz 1256227064Sbz rxb = &ha->rx_buf[i]; 1257227064Sbz 1258227064Sbz ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1259227064Sbz 1260227064Sbz if (ret) { 1261227064Sbz device_printf(ha->pci_dev, 1262227064Sbz "%s: dmamap[%d] failed\n", __func__, i); 1263227064Sbz 1264227064Sbz for (j = 0; j < i; j++) { 1265227064Sbz bus_dmamap_destroy(ha->rx_tag, 1266227064Sbz ha->rx_buf[j].map); 1267227064Sbz } 1268227064Sbz goto qla_alloc_rcv_bufs_failed; 1269227064Sbz } 1270227064Sbz } 1271227064Sbz 1272227064Sbz qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_NORMAL); 1273227064Sbz 1274227064Sbz for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1275227064Sbz rxb = &ha->rx_buf[i]; 1276227064Sbz rxb->handle = i; 1277227064Sbz if (!(ret = qla_get_mbuf(ha, rxb, NULL, 0))) { 1278227064Sbz /* 1279227064Sbz * set the physical address in the corresponding 1280227064Sbz * descriptor entry in the receive ring/queue for the 1281227064Sbz * hba 1282227064Sbz */ 1283227064Sbz qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_NORMAL, i, 1284227064Sbz rxb->handle, rxb->paddr, 1285227064Sbz (rxb->m_head)->m_pkthdr.len); 1286227064Sbz } else { 1287227064Sbz device_printf(ha->pci_dev, 1288227064Sbz "%s: qla_get_mbuf [standard(%d)] failed\n", 1289227064Sbz __func__, i); 1290227064Sbz bus_dmamap_destroy(ha->rx_tag, rxb->map); 1291227064Sbz goto qla_alloc_rcv_bufs_failed; 1292227064Sbz } 1293227064Sbz } 1294227064Sbz 1295227064Sbz 1296227064Sbz for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1297227064Sbz 1298227064Sbz rxb = &ha->rx_jbuf[i]; 1299227064Sbz 1300227064Sbz ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); 1301227064Sbz 1302227064Sbz if (ret) { 1303227064Sbz device_printf(ha->pci_dev, 1304227064Sbz "%s: dmamap[%d] failed\n", __func__, i); 1305227064Sbz 1306227064Sbz for (j = 0; j < i; j++) { 1307227064Sbz bus_dmamap_destroy(ha->rx_tag, 1308227064Sbz ha->rx_jbuf[j].map); 1309227064Sbz } 1310227064Sbz goto qla_alloc_rcv_bufs_failed; 1311227064Sbz } 1312227064Sbz } 1313227064Sbz 1314227064Sbz qla_init_hw_rcv_descriptors(ha, RDS_RING_INDEX_JUMBO); 1315227064Sbz 1316227064Sbz for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1317227064Sbz rxb = &ha->rx_jbuf[i]; 1318227064Sbz rxb->handle = i; 1319227064Sbz if (!(ret = qla_get_mbuf(ha, rxb, NULL, 1))) { 1320227064Sbz /* 1321227064Sbz * set the physical address in the corresponding 1322227064Sbz * descriptor entry in the receive ring/queue for the 1323227064Sbz * hba 1324227064Sbz */ 1325227064Sbz qla_set_hw_rcv_desc(ha, RDS_RING_INDEX_JUMBO, i, 1326227064Sbz rxb->handle, rxb->paddr, 1327227064Sbz (rxb->m_head)->m_pkthdr.len); 1328227064Sbz } else { 1329227064Sbz device_printf(ha->pci_dev, 1330227064Sbz "%s: qla_get_mbuf [jumbo(%d)] failed\n", 1331227064Sbz __func__, i); 1332227064Sbz bus_dmamap_destroy(ha->rx_tag, rxb->map); 1333227064Sbz goto qla_alloc_rcv_bufs_failed; 1334227064Sbz } 1335227064Sbz } 1336227064Sbz 1337227064Sbz return (0); 1338227064Sbz 1339227064Sbzqla_alloc_rcv_bufs_failed: 1340227064Sbz qla_free_rcv_bufs(ha); 1341227064Sbz return (ret); 1342227064Sbz} 1343227064Sbz 1344227064Sbzstatic void 1345227064Sbzqla_free_rcv_bufs(qla_host_t *ha) 1346227064Sbz{ 1347227064Sbz int i; 1348227064Sbz qla_rx_buf_t *rxb; 1349227064Sbz 1350227064Sbz for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { 1351227064Sbz rxb = &ha->rx_buf[i]; 1352227064Sbz if (rxb->m_head != NULL) { 1353227064Sbz bus_dmamap_unload(ha->rx_tag, rxb->map); 1354227064Sbz bus_dmamap_destroy(ha->rx_tag, rxb->map); 1355227064Sbz m_freem(rxb->m_head); 1356227064Sbz rxb->m_head = NULL; 1357227064Sbz } 1358227064Sbz } 1359227064Sbz 1360227064Sbz for (i = 0; i < NUM_RX_JUMBO_DESCRIPTORS; i++) { 1361227064Sbz rxb = &ha->rx_jbuf[i]; 1362227064Sbz if (rxb->m_head != NULL) { 1363227064Sbz bus_dmamap_unload(ha->rx_tag, rxb->map); 1364227064Sbz bus_dmamap_destroy(ha->rx_tag, rxb->map); 1365227064Sbz m_freem(rxb->m_head); 1366227064Sbz rxb->m_head = NULL; 1367227064Sbz } 1368227064Sbz } 1369227064Sbz 1370227064Sbz if (ha->rx_tag != NULL) { 1371227064Sbz bus_dma_tag_destroy(ha->rx_tag); 1372227064Sbz ha->rx_tag = NULL; 1373227064Sbz } 1374227064Sbz 1375227064Sbz bzero((void *)ha->rx_buf, (sizeof(qla_rx_buf_t) * NUM_RX_DESCRIPTORS)); 1376227064Sbz bzero((void *)ha->rx_jbuf, 1377227064Sbz (sizeof(qla_rx_buf_t) * NUM_RX_JUMBO_DESCRIPTORS)); 1378227064Sbz 1379227064Sbz for (i = 0; i < MAX_SDS_RINGS; i++) { 1380227064Sbz ha->hw.sds[i].sdsr_next = 0; 1381227064Sbz ha->hw.sds[i].rxb_free = NULL; 1382227064Sbz ha->hw.sds[i].rx_free = 0; 1383227064Sbz ha->hw.sds[i].rxjb_free = NULL; 1384227064Sbz ha->hw.sds[i].rxj_free = 0; 1385227064Sbz } 1386227064Sbz 1387227064Sbz return; 1388227064Sbz} 1389227064Sbz 1390227064Sbzint 1391227064Sbzqla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp, 1392227064Sbz uint32_t jumbo) 1393227064Sbz{ 1394227064Sbz register struct mbuf *mp = nmp; 1395227064Sbz struct ifnet *ifp; 1396227064Sbz int ret = 0; 1397227064Sbz uint32_t offset; 1398227064Sbz 1399227064Sbz QL_DPRINT2((ha->pci_dev, "%s: jumbo(0x%x) enter\n", __func__, jumbo)); 1400227064Sbz 1401227064Sbz ifp = ha->ifp; 1402227064Sbz 1403227064Sbz if (mp == NULL) { 1404227064Sbz 1405227064Sbz if (!jumbo) { 1406248078Smarius mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1407227064Sbz 1408227064Sbz if (mp == NULL) { 1409227064Sbz ha->err_m_getcl++; 1410227064Sbz ret = ENOBUFS; 1411227064Sbz device_printf(ha->pci_dev, 1412227064Sbz "%s: m_getcl failed\n", __func__); 1413227064Sbz goto exit_qla_get_mbuf; 1414227064Sbz } 1415227064Sbz mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1416227064Sbz } else { 1417248078Smarius mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, 1418227064Sbz MJUM9BYTES); 1419227064Sbz if (mp == NULL) { 1420227064Sbz ha->err_m_getjcl++; 1421227064Sbz ret = ENOBUFS; 1422227064Sbz device_printf(ha->pci_dev, 1423227064Sbz "%s: m_getjcl failed\n", __func__); 1424227064Sbz goto exit_qla_get_mbuf; 1425227064Sbz } 1426227064Sbz mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; 1427227064Sbz } 1428227064Sbz } else { 1429227064Sbz if (!jumbo) 1430227064Sbz mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1431227064Sbz else 1432227064Sbz mp->m_len = mp->m_pkthdr.len = MJUM9BYTES; 1433227064Sbz 1434227064Sbz mp->m_data = mp->m_ext.ext_buf; 1435227064Sbz mp->m_next = NULL; 1436227064Sbz } 1437227064Sbz 1438227064Sbz 1439227064Sbz offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); 1440227064Sbz if (offset) { 1441227064Sbz offset = 8 - offset; 1442227064Sbz m_adj(mp, offset); 1443227064Sbz } 1444227064Sbz 1445227064Sbz /* 1446227064Sbz * Using memory from the mbuf cluster pool, invoke the bus_dma 1447227064Sbz * machinery to arrange the memory mapping. 1448227064Sbz */ 1449227064Sbz ret = bus_dmamap_load(ha->rx_tag, rxb->map, 1450227064Sbz mtod(mp, void *), mp->m_len, 1451227064Sbz qla_dmamap_callback, &rxb->paddr, 1452227064Sbz BUS_DMA_NOWAIT); 1453227064Sbz if (ret || !rxb->paddr) { 1454227064Sbz m_free(mp); 1455227064Sbz rxb->m_head = NULL; 1456227064Sbz device_printf(ha->pci_dev, 1457227064Sbz "%s: bus_dmamap_load failed\n", __func__); 1458227064Sbz ret = -1; 1459227064Sbz goto exit_qla_get_mbuf; 1460227064Sbz } 1461227064Sbz rxb->m_head = mp; 1462227064Sbz bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); 1463227064Sbz 1464227064Sbzexit_qla_get_mbuf: 1465227064Sbz QL_DPRINT2((ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); 1466227064Sbz return (ret); 1467227064Sbz} 1468227064Sbz 1469227064Sbzstatic void 1470227064Sbzqla_tx_done(void *context, int pending) 1471227064Sbz{ 1472227064Sbz qla_host_t *ha = context; 1473227064Sbz 1474227064Sbz qla_hw_tx_done(ha); 1475227064Sbz qla_start(ha->ifp); 1476227064Sbz} 1477227064Sbz 1478