1/*- 2 * Copyright(c) 2002-2011 Exar Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification are permitted provided the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Exar Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31/*$FreeBSD$*/ 32 33#include <dev/vxge/vxge.h> 34 35static int vxge_pci_bd_no = -1; 36static u32 vxge_drv_copyright = 0; 37static u32 vxge_dev_ref_count = 0; 38static u32 vxge_dev_req_reboot = 0; 39 40static int vpath_selector[VXGE_HAL_MAX_VIRTUAL_PATHS] = \ 41{0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; 42 43/* 44 * vxge_probe 45 * Probes for x3100 devices 46 */ 47int 48vxge_probe(device_t ndev) 49{ 50 int err = ENXIO; 51 52 u16 pci_bd_no = 0; 53 u16 pci_vendor_id = 0; 54 u16 pci_device_id = 0; 55 56 char adapter_name[64]; 57 58 pci_vendor_id = pci_get_vendor(ndev); 59 if (pci_vendor_id != VXGE_PCI_VENDOR_ID) 60 goto _exit0; 61 62 pci_device_id = pci_get_device(ndev); 63 64 if (pci_device_id == VXGE_PCI_DEVICE_ID_TITAN_1) { 65 66 pci_bd_no = (pci_get_bus(ndev) | pci_get_slot(ndev)); 67 68 snprintf(adapter_name, sizeof(adapter_name), 69 VXGE_ADAPTER_NAME, pci_get_revid(ndev)); 70 device_set_desc_copy(ndev, adapter_name); 71 72 if (!vxge_drv_copyright) { 73 device_printf(ndev, VXGE_COPYRIGHT); 74 vxge_drv_copyright = 1; 75 } 76 77 if (vxge_dev_req_reboot == 0) { 78 vxge_pci_bd_no = pci_bd_no; 79 err = BUS_PROBE_DEFAULT; 80 } else { 81 if (pci_bd_no != vxge_pci_bd_no) { 82 vxge_pci_bd_no = pci_bd_no; 83 err = BUS_PROBE_DEFAULT; 84 } 85 } 86 } 87 88_exit0: 89 return (err); 90} 91 92/* 93 * vxge_attach 94 * Connects driver to the system if probe was success @ndev handle 95 */ 96int 97vxge_attach(device_t ndev) 98{ 99 int err = 0; 100 vxge_dev_t *vdev; 101 vxge_hal_device_t *hldev = NULL; 102 vxge_hal_device_attr_t device_attr; 103 vxge_free_resources_e error_level = VXGE_FREE_NONE; 104 105 vxge_hal_status_e status = VXGE_HAL_OK; 106 107 /* Get per-ndev buffer */ 108 vdev = (vxge_dev_t *) device_get_softc(ndev); 109 if (!vdev) 110 goto _exit0; 111 112 bzero(vdev, sizeof(vxge_dev_t)); 113 114 vdev->ndev = ndev; 115 strlcpy(vdev->ndev_name, "vxge", sizeof(vdev->ndev_name)); 116 117 err = vxge_driver_config(vdev); 118 if (err != 0) 119 goto _exit0; 120 121 /* Initialize HAL driver */ 122 status = vxge_driver_init(vdev); 123 if (status != VXGE_HAL_OK) { 124 device_printf(vdev->ndev, "Failed to initialize driver\n"); 125 goto _exit0; 126 } 127 /* Enable PCI bus-master */ 128 pci_enable_busmaster(ndev); 129 130 /* Allocate resources */ 131 err = vxge_alloc_resources(vdev); 132 if (err != 0) { 133 device_printf(vdev->ndev, "resource allocation failed\n"); 134 goto _exit0; 135 } 136 137 err = vxge_device_hw_info_get(vdev); 138 if (err != 0) { 139 error_level = VXGE_FREE_BAR2; 140 goto _exit0; 141 } 142 143 /* Get firmware default values for Device Configuration */ 144 vxge_hal_device_config_default_get(vdev->device_config); 145 146 /* Customize Device Configuration based on User request */ 147 vxge_vpath_config(vdev); 148 149 /* Allocate ISR resources */ 150 err = vxge_alloc_isr_resources(vdev); 151 if (err != 0) { 152 error_level = VXGE_FREE_ISR_RESOURCE; 153 device_printf(vdev->ndev, "isr resource allocation failed\n"); 154 goto _exit0; 155 } 156 157 /* HAL attributes */ 158 device_attr.bar0 = (u8 *) vdev->pdev->bar_info[0]; 159 device_attr.bar1 = (u8 *) vdev->pdev->bar_info[1]; 160 device_attr.bar2 = (u8 *) vdev->pdev->bar_info[2]; 161 device_attr.regh0 = (vxge_bus_res_t *) vdev->pdev->reg_map[0]; 162 device_attr.regh1 = (vxge_bus_res_t *) vdev->pdev->reg_map[1]; 163 device_attr.regh2 = (vxge_bus_res_t *) vdev->pdev->reg_map[2]; 164 device_attr.irqh = (pci_irq_h) vdev->config.isr_info[0].irq_handle; 165 device_attr.cfgh = vdev->pdev; 166 device_attr.pdev = vdev->pdev; 167 168 /* Initialize HAL Device */ 169 status = vxge_hal_device_initialize((vxge_hal_device_h *) &hldev, 170 &device_attr, vdev->device_config); 171 if (status != VXGE_HAL_OK) { 172 error_level = VXGE_FREE_ISR_RESOURCE; 173 device_printf(vdev->ndev, "hal device initialization failed\n"); 174 goto _exit0; 175 } 176 177 vdev->devh = hldev; 178 vxge_hal_device_private_set(hldev, vdev); 179 180 if (vdev->is_privilaged) { 181 err = vxge_firmware_verify(vdev); 182 if (err != 0) { 183 vxge_dev_req_reboot = 1; 184 error_level = VXGE_FREE_TERMINATE_DEVICE; 185 goto _exit0; 186 } 187 } 188 189 /* Allocate memory for vpath */ 190 vdev->vpaths = (vxge_vpath_t *) 191 vxge_mem_alloc(vdev->no_of_vpath * sizeof(vxge_vpath_t)); 192 193 if (vdev->vpaths == NULL) { 194 error_level = VXGE_FREE_TERMINATE_DEVICE; 195 device_printf(vdev->ndev, "vpath memory allocation failed\n"); 196 goto _exit0; 197 } 198 199 vdev->no_of_func = 1; 200 if (vdev->is_privilaged) { 201 202 vxge_hal_func_mode_count(vdev->devh, 203 vdev->config.hw_info.function_mode, &vdev->no_of_func); 204 205 vxge_bw_priority_config(vdev); 206 } 207 208 /* Initialize mutexes */ 209 vxge_mutex_init(vdev); 210 211 /* Initialize Media */ 212 vxge_media_init(vdev); 213 214 err = vxge_ifp_setup(ndev); 215 if (err != 0) { 216 error_level = VXGE_FREE_MEDIA; 217 device_printf(vdev->ndev, "setting up interface failed\n"); 218 goto _exit0; 219 } 220 221 err = vxge_isr_setup(vdev); 222 if (err != 0) { 223 error_level = VXGE_FREE_INTERFACE; 224 device_printf(vdev->ndev, 225 "failed to associate interrupt handler with device\n"); 226 goto _exit0; 227 } 228 vxge_device_hw_info_print(vdev); 229 vdev->is_active = TRUE; 230 231_exit0: 232 if (error_level) { 233 vxge_free_resources(ndev, error_level); 234 err = ENXIO; 235 } 236 237 return (err); 238} 239 240/* 241 * vxge_detach 242 * Detaches driver from the Kernel subsystem 243 */ 244int 245vxge_detach(device_t ndev) 246{ 247 vxge_dev_t *vdev; 248 249 vdev = (vxge_dev_t *) device_get_softc(ndev); 250 if (vdev->is_active) { 251 vdev->is_active = FALSE; 252 vxge_stop(vdev); 253 vxge_free_resources(ndev, VXGE_FREE_ALL); 254 } 255 256 return (0); 257} 258 259/* 260 * vxge_shutdown 261 * To shutdown device before system shutdown 262 */ 263int 264vxge_shutdown(device_t ndev) 265{ 266 vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev); 267 vxge_stop(vdev); 268 return (0); 269} 270 271/* 272 * vxge_init 273 * Initialize the interface 274 */ 275void 276vxge_init(void *vdev_ptr) 277{ 278 vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr; 279 280 VXGE_DRV_LOCK(vdev); 281 vxge_init_locked(vdev); 282 VXGE_DRV_UNLOCK(vdev); 283} 284 285/* 286 * vxge_init_locked 287 * Initialize the interface 288 */ 289void 290vxge_init_locked(vxge_dev_t *vdev) 291{ 292 int i, err = EINVAL; 293 vxge_hal_device_t *hldev = vdev->devh; 294 vxge_hal_status_e status = VXGE_HAL_OK; 295 vxge_hal_vpath_h vpath_handle; 296 297 ifnet_t ifp = vdev->ifp; 298 299 /* If device is in running state, initializing is not required */ 300 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 301 goto _exit0; 302 303 VXGE_DRV_LOCK_ASSERT(vdev); 304 305 /* Opening vpaths */ 306 err = vxge_vpath_open(vdev); 307 if (err != 0) 308 goto _exit1; 309 310 if (vdev->config.rth_enable) { 311 status = vxge_rth_config(vdev); 312 if (status != VXGE_HAL_OK) 313 goto _exit1; 314 } 315 316 for (i = 0; i < vdev->no_of_vpath; i++) { 317 vpath_handle = vxge_vpath_handle_get(vdev, i); 318 if (!vpath_handle) 319 continue; 320 321 /* check initial mtu before enabling the device */ 322 status = vxge_hal_device_mtu_check(vpath_handle, ifp->if_mtu); 323 if (status != VXGE_HAL_OK) { 324 device_printf(vdev->ndev, 325 "invalid mtu size %ld specified\n", ifp->if_mtu); 326 goto _exit1; 327 } 328 329 status = vxge_hal_vpath_mtu_set(vpath_handle, ifp->if_mtu); 330 if (status != VXGE_HAL_OK) { 331 device_printf(vdev->ndev, 332 "setting mtu in device failed\n"); 333 goto _exit1; 334 } 335 } 336 337 /* Enable HAL device */ 338 status = vxge_hal_device_enable(hldev); 339 if (status != VXGE_HAL_OK) { 340 device_printf(vdev->ndev, "failed to enable device\n"); 341 goto _exit1; 342 } 343 344 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) 345 vxge_msix_enable(vdev); 346 347 /* Checksum capability */ 348 ifp->if_hwassist = 0; 349 if (ifp->if_capenable & IFCAP_TXCSUM) 350 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); 351 352 if (ifp->if_capenable & IFCAP_TSO4) 353 ifp->if_hwassist |= CSUM_TSO; 354 355 for (i = 0; i < vdev->no_of_vpath; i++) { 356 vpath_handle = vxge_vpath_handle_get(vdev, i); 357 if (!vpath_handle) 358 continue; 359 360 /* Enabling mcast for all vpath */ 361 vxge_hal_vpath_mcast_enable(vpath_handle); 362 363 /* Enabling bcast for all vpath */ 364 status = vxge_hal_vpath_bcast_enable(vpath_handle); 365 if (status != VXGE_HAL_OK) 366 device_printf(vdev->ndev, 367 "can't enable bcast on vpath (%d)\n", i); 368 } 369 370 /* Enable interrupts */ 371 vxge_hal_device_intr_enable(vdev->devh); 372 373 for (i = 0; i < vdev->no_of_vpath; i++) { 374 vpath_handle = vxge_vpath_handle_get(vdev, i); 375 if (!vpath_handle) 376 continue; 377 378 bzero(&(vdev->vpaths[i].driver_stats), 379 sizeof(vxge_drv_stats_t)); 380 status = vxge_hal_vpath_enable(vpath_handle); 381 if (status != VXGE_HAL_OK) 382 goto _exit2; 383 } 384 385 vxge_os_mdelay(1000); 386 387 /* Device is initialized */ 388 vdev->is_initialized = TRUE; 389 390 /* Now inform the stack we're ready */ 391 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 392 ifp->if_drv_flags |= IFF_DRV_RUNNING; 393 394 goto _exit0; 395 396_exit2: 397 vxge_hal_device_intr_disable(vdev->devh); 398 vxge_hal_device_disable(hldev); 399 400_exit1: 401 vxge_vpath_close(vdev); 402 403_exit0: 404 return; 405} 406 407/* 408 * vxge_driver_init 409 * Initializes HAL driver 410 */ 411vxge_hal_status_e 412vxge_driver_init(vxge_dev_t *vdev) 413{ 414 vxge_hal_uld_cbs_t uld_callbacks; 415 vxge_hal_driver_config_t driver_config; 416 vxge_hal_status_e status = VXGE_HAL_OK; 417 418 /* Initialize HAL driver */ 419 if (!vxge_dev_ref_count) { 420 bzero(&uld_callbacks, sizeof(vxge_hal_uld_cbs_t)); 421 bzero(&driver_config, sizeof(vxge_hal_driver_config_t)); 422 423 uld_callbacks.link_up = vxge_link_up; 424 uld_callbacks.link_down = vxge_link_down; 425 uld_callbacks.crit_err = vxge_crit_error; 426 uld_callbacks.sched_timer = NULL; 427 uld_callbacks.xpak_alarm_log = NULL; 428 429 status = vxge_hal_driver_initialize(&driver_config, 430 &uld_callbacks); 431 if (status != VXGE_HAL_OK) { 432 device_printf(vdev->ndev, 433 "failed to initialize driver\n"); 434 goto _exit0; 435 } 436 } 437 vxge_hal_driver_debug_set(VXGE_TRACE); 438 vxge_dev_ref_count++; 439 440_exit0: 441 return (status); 442} 443 444/* 445 * vxge_driver_config 446 */ 447int 448vxge_driver_config(vxge_dev_t *vdev) 449{ 450 int i, err = 0; 451 char temp_buffer[30]; 452 453 vxge_bw_info_t bw_info; 454 455 VXGE_GET_PARAM("hint.vxge.0.no_of_vpath", vdev->config, 456 no_of_vpath, VXGE_DEFAULT_USER_HARDCODED); 457 458 if (vdev->config.no_of_vpath == VXGE_DEFAULT_USER_HARDCODED) 459 vdev->config.no_of_vpath = mp_ncpus; 460 461 if (vdev->config.no_of_vpath <= 0) { 462 err = EINVAL; 463 device_printf(vdev->ndev, 464 "Failed to load driver, \ 465 invalid config : \'no_of_vpath\'\n"); 466 goto _exit0; 467 } 468 469 VXGE_GET_PARAM("hint.vxge.0.intr_coalesce", vdev->config, 470 intr_coalesce, VXGE_DEFAULT_CONFIG_DISABLE); 471 472 VXGE_GET_PARAM("hint.vxge.0.rth_enable", vdev->config, 473 rth_enable, VXGE_DEFAULT_CONFIG_ENABLE); 474 475 VXGE_GET_PARAM("hint.vxge.0.rth_bkt_sz", vdev->config, 476 rth_bkt_sz, VXGE_DEFAULT_RTH_BUCKET_SIZE); 477 478 VXGE_GET_PARAM("hint.vxge.0.lro_enable", vdev->config, 479 lro_enable, VXGE_DEFAULT_CONFIG_ENABLE); 480 481 VXGE_GET_PARAM("hint.vxge.0.tso_enable", vdev->config, 482 tso_enable, VXGE_DEFAULT_CONFIG_ENABLE); 483 484 VXGE_GET_PARAM("hint.vxge.0.tx_steering", vdev->config, 485 tx_steering, VXGE_DEFAULT_CONFIG_DISABLE); 486 487 VXGE_GET_PARAM("hint.vxge.0.msix_enable", vdev->config, 488 intr_mode, VXGE_HAL_INTR_MODE_MSIX); 489 490 VXGE_GET_PARAM("hint.vxge.0.ifqmaxlen", vdev->config, 491 ifq_maxlen, VXGE_DEFAULT_CONFIG_IFQ_MAXLEN); 492 493 VXGE_GET_PARAM("hint.vxge.0.port_mode", vdev->config, 494 port_mode, VXGE_DEFAULT_CONFIG_VALUE); 495 496 if (vdev->config.port_mode == VXGE_DEFAULT_USER_HARDCODED) 497 vdev->config.port_mode = VXGE_DEFAULT_CONFIG_VALUE; 498 499 VXGE_GET_PARAM("hint.vxge.0.l2_switch", vdev->config, 500 l2_switch, VXGE_DEFAULT_CONFIG_VALUE); 501 502 if (vdev->config.l2_switch == VXGE_DEFAULT_USER_HARDCODED) 503 vdev->config.l2_switch = VXGE_DEFAULT_CONFIG_VALUE; 504 505 VXGE_GET_PARAM("hint.vxge.0.fw_upgrade", vdev->config, 506 fw_option, VXGE_FW_UPGRADE_ALL); 507 508 VXGE_GET_PARAM("hint.vxge.0.low_latency", vdev->config, 509 low_latency, VXGE_DEFAULT_CONFIG_DISABLE); 510 511 VXGE_GET_PARAM("hint.vxge.0.func_mode", vdev->config, 512 function_mode, VXGE_DEFAULT_CONFIG_VALUE); 513 514 if (vdev->config.function_mode == VXGE_DEFAULT_USER_HARDCODED) 515 vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE; 516 517 if (!(is_multi_func(vdev->config.function_mode) || 518 is_single_func(vdev->config.function_mode))) 519 vdev->config.function_mode = VXGE_DEFAULT_CONFIG_VALUE; 520 521 for (i = 0; i < VXGE_HAL_MAX_FUNCTIONS; i++) { 522 523 bw_info.func_id = i; 524 525 sprintf(temp_buffer, "hint.vxge.0.bandwidth_%d", i); 526 VXGE_GET_PARAM(temp_buffer, bw_info, 527 bandwidth, VXGE_DEFAULT_USER_HARDCODED); 528 529 if (bw_info.bandwidth == VXGE_DEFAULT_USER_HARDCODED) 530 bw_info.bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT; 531 532 sprintf(temp_buffer, "hint.vxge.0.priority_%d", i); 533 VXGE_GET_PARAM(temp_buffer, bw_info, 534 priority, VXGE_DEFAULT_USER_HARDCODED); 535 536 if (bw_info.priority == VXGE_DEFAULT_USER_HARDCODED) 537 bw_info.priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT; 538 539 vxge_os_memcpy(&vdev->config.bw_info[i], &bw_info, 540 sizeof(vxge_bw_info_t)); 541 } 542 543_exit0: 544 return (err); 545} 546 547/* 548 * vxge_stop 549 */ 550void 551vxge_stop(vxge_dev_t *vdev) 552{ 553 VXGE_DRV_LOCK(vdev); 554 vxge_stop_locked(vdev); 555 VXGE_DRV_UNLOCK(vdev); 556} 557 558/* 559 * vxge_stop_locked 560 * Common code for both stop and part of reset. 561 * disables device, interrupts and closes vpaths handle 562 */ 563void 564vxge_stop_locked(vxge_dev_t *vdev) 565{ 566 u64 adapter_status = 0; 567 vxge_hal_status_e status; 568 vxge_hal_device_t *hldev = vdev->devh; 569 ifnet_t ifp = vdev->ifp; 570 571 VXGE_DRV_LOCK_ASSERT(vdev); 572 573 /* If device is not in "Running" state, return */ 574 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 575 return; 576 577 /* Set appropriate flags */ 578 vdev->is_initialized = FALSE; 579 hldev->link_state = VXGE_HAL_LINK_NONE; 580 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 581 if_link_state_change(ifp, LINK_STATE_DOWN); 582 583 /* Disable interrupts */ 584 vxge_hal_device_intr_disable(hldev); 585 586 /* Disable HAL device */ 587 status = vxge_hal_device_disable(hldev); 588 if (status != VXGE_HAL_OK) { 589 vxge_hal_device_status(hldev, &adapter_status); 590 device_printf(vdev->ndev, 591 "adapter status: 0x%llx\n", adapter_status); 592 } 593 594 /* reset vpaths */ 595 vxge_vpath_reset(vdev); 596 597 vxge_os_mdelay(1000); 598 599 /* Close Vpaths */ 600 vxge_vpath_close(vdev); 601} 602 603void 604vxge_send(ifnet_t ifp) 605{ 606 vxge_vpath_t *vpath; 607 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; 608 609 vpath = &(vdev->vpaths[0]); 610 611 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 612 if (VXGE_TX_TRYLOCK(vpath)) { 613 vxge_send_locked(ifp, vpath); 614 VXGE_TX_UNLOCK(vpath); 615 } 616 } 617} 618 619static inline void 620vxge_send_locked(ifnet_t ifp, vxge_vpath_t *vpath) 621{ 622 mbuf_t m_head = NULL; 623 vxge_dev_t *vdev = vpath->vdev; 624 625 VXGE_TX_LOCK_ASSERT(vpath); 626 627 if ((!vdev->is_initialized) || 628 ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 629 IFF_DRV_RUNNING)) 630 return; 631 632 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 633 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 634 if (m_head == NULL) 635 break; 636 637 if (vxge_xmit(ifp, vpath, &m_head)) { 638 if (m_head == NULL) 639 break; 640 641 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 642 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 643 VXGE_DRV_STATS(vpath, tx_again); 644 break; 645 } 646 /* Send a copy of the frame to the BPF listener */ 647 ETHER_BPF_MTAP(ifp, m_head); 648 } 649} 650 651#if __FreeBSD_version >= 800000 652 653int 654vxge_mq_send(ifnet_t ifp, mbuf_t m_head) 655{ 656 int i = 0, err = 0; 657 658 vxge_vpath_t *vpath; 659 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; 660 661 if (vdev->config.tx_steering) { 662 i = vxge_vpath_get(vdev, m_head); 663 } else if ((m_head->m_flags & M_FLOWID) != 0) { 664 i = m_head->m_pkthdr.flowid % vdev->no_of_vpath; 665 } 666 667 vpath = &(vdev->vpaths[i]); 668 if (VXGE_TX_TRYLOCK(vpath)) { 669 err = vxge_mq_send_locked(ifp, vpath, m_head); 670 VXGE_TX_UNLOCK(vpath); 671 } else 672 err = drbr_enqueue(ifp, vpath->br, m_head); 673 674 return (err); 675} 676 677static inline int 678vxge_mq_send_locked(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t m_head) 679{ 680 int err = 0; 681 mbuf_t next = NULL; 682 vxge_dev_t *vdev = vpath->vdev; 683 684 VXGE_TX_LOCK_ASSERT(vpath); 685 686 if ((!vdev->is_initialized) || 687 ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 688 IFF_DRV_RUNNING)) { 689 err = drbr_enqueue(ifp, vpath->br, m_head); 690 goto _exit0; 691 } 692 if (m_head == NULL) { 693 next = drbr_dequeue(ifp, vpath->br); 694 } else if (drbr_needs_enqueue(ifp, vpath->br)) { 695 if ((err = drbr_enqueue(ifp, vpath->br, m_head)) != 0) 696 goto _exit0; 697 next = drbr_dequeue(ifp, vpath->br); 698 } else 699 next = m_head; 700 701 /* Process the queue */ 702 while (next != NULL) { 703 if ((err = vxge_xmit(ifp, vpath, &next)) != 0) { 704 if (next == NULL) 705 break; 706 707 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 708 err = drbr_enqueue(ifp, vpath->br, next); 709 VXGE_DRV_STATS(vpath, tx_again); 710 break; 711 } 712 ifp->if_obytes += next->m_pkthdr.len; 713 if (next->m_flags & M_MCAST) 714 ifp->if_omcasts++; 715 716 /* Send a copy of the frame to the BPF listener */ 717 ETHER_BPF_MTAP(ifp, next); 718 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 719 break; 720 721 next = drbr_dequeue(ifp, vpath->br); 722 } 723 724_exit0: 725 return (err); 726} 727 728void 729vxge_mq_qflush(ifnet_t ifp) 730{ 731 int i; 732 mbuf_t m_head; 733 vxge_vpath_t *vpath; 734 735 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; 736 737 for (i = 0; i < vdev->no_of_vpath; i++) { 738 vpath = &(vdev->vpaths[i]); 739 if (!vpath->handle) 740 continue; 741 742 VXGE_TX_LOCK(vpath); 743 while ((m_head = buf_ring_dequeue_sc(vpath->br)) != NULL) 744 vxge_free_packet(m_head); 745 746 VXGE_TX_UNLOCK(vpath); 747 } 748 if_qflush(ifp); 749} 750#endif 751 752static inline int 753vxge_xmit(ifnet_t ifp, vxge_vpath_t *vpath, mbuf_t *m_headp) 754{ 755 int err, num_segs = 0; 756 u32 txdl_avail, dma_index, tagged = 0; 757 758 dma_addr_t dma_addr; 759 bus_size_t dma_sizes; 760 761 void *dtr_priv; 762 vxge_txdl_priv_t *txdl_priv; 763 vxge_hal_txdl_h txdlh; 764 vxge_hal_status_e status; 765 vxge_dev_t *vdev = vpath->vdev; 766 767 VXGE_DRV_STATS(vpath, tx_xmit); 768 769 txdl_avail = vxge_hal_fifo_free_txdl_count_get(vpath->handle); 770 if (txdl_avail < VXGE_TX_LOW_THRESHOLD) { 771 772 VXGE_DRV_STATS(vpath, tx_low_dtr_cnt); 773 err = ENOBUFS; 774 goto _exit0; 775 } 776 777 /* Reserve descriptors */ 778 status = vxge_hal_fifo_txdl_reserve(vpath->handle, &txdlh, &dtr_priv); 779 if (status != VXGE_HAL_OK) { 780 VXGE_DRV_STATS(vpath, tx_reserve_failed); 781 err = ENOBUFS; 782 goto _exit0; 783 } 784 785 /* Update Tx private structure for this descriptor */ 786 txdl_priv = (vxge_txdl_priv_t *) dtr_priv; 787 788 /* 789 * Map the packet for DMA. 790 * Returns number of segments through num_segs. 791 */ 792 err = vxge_dma_mbuf_coalesce(vpath->dma_tag_tx, txdl_priv->dma_map, 793 m_headp, txdl_priv->dma_buffers, &num_segs); 794 795 if (vpath->driver_stats.tx_max_frags < num_segs) 796 vpath->driver_stats.tx_max_frags = num_segs; 797 798 if (err == ENOMEM) { 799 VXGE_DRV_STATS(vpath, tx_no_dma_setup); 800 vxge_hal_fifo_txdl_free(vpath->handle, txdlh); 801 goto _exit0; 802 } else if (err != 0) { 803 vxge_free_packet(*m_headp); 804 VXGE_DRV_STATS(vpath, tx_no_dma_setup); 805 vxge_hal_fifo_txdl_free(vpath->handle, txdlh); 806 goto _exit0; 807 } 808 809 txdl_priv->mbuf_pkt = *m_headp; 810 811 /* Set VLAN tag in descriptor only if this packet has it */ 812 if ((*m_headp)->m_flags & M_VLANTAG) 813 vxge_hal_fifo_txdl_vlan_set(txdlh, 814 (*m_headp)->m_pkthdr.ether_vtag); 815 816 /* Set descriptor buffer for header and each fragment/segment */ 817 for (dma_index = 0; dma_index < num_segs; dma_index++) { 818 819 dma_sizes = txdl_priv->dma_buffers[dma_index].ds_len; 820 dma_addr = htole64(txdl_priv->dma_buffers[dma_index].ds_addr); 821 822 vxge_hal_fifo_txdl_buffer_set(vpath->handle, txdlh, dma_index, 823 dma_addr, dma_sizes); 824 } 825 826 /* Pre-write Sync of mapping */ 827 bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map, 828 BUS_DMASYNC_PREWRITE); 829 830 if ((*m_headp)->m_pkthdr.csum_flags & CSUM_TSO) { 831 if ((*m_headp)->m_pkthdr.tso_segsz) { 832 VXGE_DRV_STATS(vpath, tx_tso); 833 vxge_hal_fifo_txdl_lso_set(txdlh, 834 VXGE_HAL_FIFO_LSO_FRM_ENCAP_AUTO, 835 (*m_headp)->m_pkthdr.tso_segsz); 836 } 837 } 838 839 /* Checksum */ 840 if (ifp->if_hwassist > 0) { 841 vxge_hal_fifo_txdl_cksum_set_bits(txdlh, 842 VXGE_HAL_FIFO_TXD_TX_CKO_IPV4_EN | 843 VXGE_HAL_FIFO_TXD_TX_CKO_TCP_EN | 844 VXGE_HAL_FIFO_TXD_TX_CKO_UDP_EN); 845 } 846 847 if ((vxge_hal_device_check_id(vdev->devh) == VXGE_HAL_CARD_TITAN_1A) && 848 (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0))) 849 tagged = 1; 850 851 vxge_hal_fifo_txdl_post(vpath->handle, txdlh, tagged); 852 VXGE_DRV_STATS(vpath, tx_posted); 853 854_exit0: 855 return (err); 856} 857 858/* 859 * vxge_tx_replenish 860 * Allocate buffers and set them into descriptors for later use 861 */ 862/* ARGSUSED */ 863vxge_hal_status_e 864vxge_tx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, 865 void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen) 866{ 867 int err = 0; 868 869 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; 870 vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv; 871 872 err = bus_dmamap_create(vpath->dma_tag_tx, BUS_DMA_NOWAIT, 873 &txdl_priv->dma_map); 874 875 return ((err == 0) ? VXGE_HAL_OK : VXGE_HAL_FAIL); 876} 877 878/* 879 * vxge_tx_compl 880 * If the interrupt is due to Tx completion, free the sent buffer 881 */ 882vxge_hal_status_e 883vxge_tx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, 884 void *dtr_priv, vxge_hal_fifo_tcode_e t_code, void *userdata) 885{ 886 vxge_hal_status_e status = VXGE_HAL_OK; 887 888 vxge_txdl_priv_t *txdl_priv; 889 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; 890 vxge_dev_t *vdev = vpath->vdev; 891 892 ifnet_t ifp = vdev->ifp; 893 894 VXGE_TX_LOCK(vpath); 895 896 /* 897 * For each completed descriptor 898 * Get private structure, free buffer, do unmapping, and free descriptor 899 */ 900 901 do { 902 VXGE_DRV_STATS(vpath, tx_compl); 903 if (t_code != VXGE_HAL_FIFO_T_CODE_OK) { 904 device_printf(vdev->ndev, "tx transfer code %d\n", 905 t_code); 906 907 ifp->if_oerrors++; 908 VXGE_DRV_STATS(vpath, tx_tcode); 909 vxge_hal_fifo_handle_tcode(vpath_handle, txdlh, t_code); 910 } 911 ifp->if_opackets++; 912 txdl_priv = (vxge_txdl_priv_t *) dtr_priv; 913 914 bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map); 915 916 vxge_free_packet(txdl_priv->mbuf_pkt); 917 vxge_hal_fifo_txdl_free(vpath->handle, txdlh); 918 919 } while (vxge_hal_fifo_txdl_next_completed(vpath_handle, &txdlh, 920 &dtr_priv, &t_code) == VXGE_HAL_OK); 921 922 923 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 924 VXGE_TX_UNLOCK(vpath); 925 926 return (status); 927} 928 929/* ARGSUSED */ 930void 931vxge_tx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, 932 void *dtr_priv, vxge_hal_txdl_state_e state, 933 void *userdata, vxge_hal_reopen_e reopen) 934{ 935 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; 936 vxge_txdl_priv_t *txdl_priv = (vxge_txdl_priv_t *) dtr_priv; 937 938 if (state != VXGE_HAL_TXDL_STATE_POSTED) 939 return; 940 941 if (txdl_priv != NULL) { 942 bus_dmamap_sync(vpath->dma_tag_tx, txdl_priv->dma_map, 943 BUS_DMASYNC_POSTWRITE); 944 945 bus_dmamap_unload(vpath->dma_tag_tx, txdl_priv->dma_map); 946 bus_dmamap_destroy(vpath->dma_tag_tx, txdl_priv->dma_map); 947 vxge_free_packet(txdl_priv->mbuf_pkt); 948 } 949 950 /* Free the descriptor */ 951 vxge_hal_fifo_txdl_free(vpath->handle, txdlh); 952} 953 954/* 955 * vxge_rx_replenish 956 * Allocate buffers and set them into descriptors for later use 957 */ 958/* ARGSUSED */ 959vxge_hal_status_e 960vxge_rx_replenish(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh, 961 void *dtr_priv, u32 dtr_index, void *userdata, vxge_hal_reopen_e reopen) 962{ 963 int err = 0; 964 vxge_hal_status_e status = VXGE_HAL_OK; 965 966 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; 967 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; 968 969 /* Create DMA map for these descriptors */ 970 err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT, 971 &rxd_priv->dma_map); 972 if (err == 0) { 973 if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) { 974 bus_dmamap_destroy(vpath->dma_tag_rx, 975 rxd_priv->dma_map); 976 status = VXGE_HAL_FAIL; 977 } 978 } 979 980 return (status); 981} 982 983/* 984 * vxge_rx_compl 985 */ 986vxge_hal_status_e 987vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh, 988 void *dtr_priv, u8 t_code, void *userdata) 989{ 990 mbuf_t mbuf_up; 991 992 vxge_rxd_priv_t *rxd_priv; 993 vxge_hal_ring_rxd_info_t ext_info; 994 vxge_hal_status_e status = VXGE_HAL_OK; 995 996 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; 997 vxge_dev_t *vdev = vpath->vdev; 998 999 struct lro_entry *queued = NULL; 1000 struct lro_ctrl *lro = &vpath->lro; 1001 1002 /* get the interface pointer */ 1003 ifnet_t ifp = vdev->ifp; 1004 1005 do { 1006 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1007 vxge_hal_ring_rxd_post(vpath_handle, rxdh); 1008 status = VXGE_HAL_FAIL; 1009 break; 1010 } 1011 1012 VXGE_DRV_STATS(vpath, rx_compl); 1013 rxd_priv = (vxge_rxd_priv_t *) dtr_priv; 1014 1015 /* Gets details of mbuf i.e., packet length */ 1016 vxge_rx_rxd_1b_get(vpath, rxdh, dtr_priv); 1017 1018 /* 1019 * Prepare one buffer to send it to upper layer Since upper 1020 * layer frees the buffer do not use rxd_priv->mbuf_pkt. 1021 * Meanwhile prepare a new buffer, do mapping, use with the 1022 * current descriptor and post descriptor back to ring vpath 1023 */ 1024 mbuf_up = rxd_priv->mbuf_pkt; 1025 if (t_code != VXGE_HAL_RING_RXD_T_CODE_OK) { 1026 1027 ifp->if_ierrors++; 1028 VXGE_DRV_STATS(vpath, rx_tcode); 1029 status = vxge_hal_ring_handle_tcode(vpath_handle, 1030 rxdh, t_code); 1031 1032 /* 1033 * If transfer code is not for unknown protocols and 1034 * vxge_hal_device_handle_tcode is NOT returned 1035 * VXGE_HAL_OK 1036 * drop this packet and increment rx_tcode stats 1037 */ 1038 if ((status != VXGE_HAL_OK) && 1039 (t_code != VXGE_HAL_RING_T_CODE_L3_PKT_ERR)) { 1040 1041 vxge_free_packet(mbuf_up); 1042 vxge_hal_ring_rxd_post(vpath_handle, rxdh); 1043 continue; 1044 } 1045 } 1046 1047 if (vxge_rx_rxd_1b_set(vpath, rxdh, dtr_priv)) { 1048 /* 1049 * If unable to allocate buffer, post descriptor back 1050 * to vpath for future processing of same packet. 1051 */ 1052 vxge_hal_ring_rxd_post(vpath_handle, rxdh); 1053 continue; 1054 } 1055 1056 /* Get the extended information */ 1057 vxge_hal_ring_rxd_1b_info_get(vpath_handle, rxdh, &ext_info); 1058 1059 /* post descriptor with newly allocated mbuf back to vpath */ 1060 vxge_hal_ring_rxd_post(vpath_handle, rxdh); 1061 vpath->rxd_posted++; 1062 1063 if (vpath->rxd_posted % VXGE_RXD_REPLENISH_COUNT == 0) 1064 vxge_hal_ring_rxd_post_post_db(vpath_handle); 1065 1066 /* 1067 * Set successfully computed checksums in the mbuf. 1068 * Leave the rest to the stack to be reverified. 1069 */ 1070 vxge_rx_checksum(ext_info, mbuf_up); 1071 1072#if __FreeBSD_version >= 800000 1073 mbuf_up->m_flags |= M_FLOWID; 1074 mbuf_up->m_pkthdr.flowid = vpath->vp_index; 1075#endif 1076 /* Post-Read sync for buffers */ 1077 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, 1078 BUS_DMASYNC_POSTREAD); 1079 1080 vxge_rx_input(ifp, mbuf_up, vpath); 1081 1082 } while (vxge_hal_ring_rxd_next_completed(vpath_handle, &rxdh, 1083 &dtr_priv, &t_code) == VXGE_HAL_OK); 1084 1085 /* Flush any outstanding LRO work */ 1086 if (vpath->lro_enable && vpath->lro.lro_cnt) { 1087 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { 1088 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1089 tcp_lro_flush(lro, queued); 1090 } 1091 } 1092 1093 return (status); 1094} 1095 1096static inline void 1097vxge_rx_input(ifnet_t ifp, mbuf_t mbuf_up, vxge_vpath_t *vpath) 1098{ 1099 if (vpath->lro_enable && vpath->lro.lro_cnt) { 1100 if (tcp_lro_rx(&vpath->lro, mbuf_up, 0) == 0) 1101 return; 1102 } 1103 (*ifp->if_input) (ifp, mbuf_up); 1104} 1105 1106static inline void 1107vxge_rx_checksum(vxge_hal_ring_rxd_info_t ext_info, mbuf_t mbuf_up) 1108{ 1109 1110 if (!(ext_info.proto & VXGE_HAL_FRAME_PROTO_IP_FRAG) && 1111 (ext_info.proto & VXGE_HAL_FRAME_PROTO_TCP_OR_UDP) && 1112 ext_info.l3_cksum_valid && ext_info.l4_cksum_valid) { 1113 1114 mbuf_up->m_pkthdr.csum_data = htons(0xffff); 1115 1116 mbuf_up->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 1117 mbuf_up->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1118 mbuf_up->m_pkthdr.csum_flags |= 1119 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1120 1121 } else { 1122 1123 if (ext_info.vlan) { 1124 mbuf_up->m_pkthdr.ether_vtag = ext_info.vlan; 1125 mbuf_up->m_flags |= M_VLANTAG; 1126 } 1127 } 1128} 1129 1130/* 1131 * vxge_rx_term During unload terminate and free all descriptors 1132 * @vpath_handle Rx vpath Handle @rxdh Rx Descriptor Handle @state Descriptor 1133 * State @userdata Per-adapter Data @reopen vpath open/reopen option 1134 */ 1135/* ARGSUSED */ 1136void 1137vxge_rx_term(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh, 1138 void *dtr_priv, vxge_hal_rxd_state_e state, void *userdata, 1139 vxge_hal_reopen_e reopen) 1140{ 1141 vxge_vpath_t *vpath = (vxge_vpath_t *) userdata; 1142 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; 1143 1144 if (state != VXGE_HAL_RXD_STATE_POSTED) 1145 return; 1146 1147 if (rxd_priv != NULL) { 1148 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, 1149 BUS_DMASYNC_POSTREAD); 1150 bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map); 1151 bus_dmamap_destroy(vpath->dma_tag_rx, rxd_priv->dma_map); 1152 1153 vxge_free_packet(rxd_priv->mbuf_pkt); 1154 } 1155 /* Free the descriptor */ 1156 vxge_hal_ring_rxd_free(vpath_handle, rxdh); 1157} 1158 1159/* 1160 * vxge_rx_rxd_1b_get 1161 * Get descriptors of packet to send up 1162 */ 1163void 1164vxge_rx_rxd_1b_get(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv) 1165{ 1166 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; 1167 mbuf_t mbuf_up = rxd_priv->mbuf_pkt; 1168 1169 /* Retrieve data from completed descriptor */ 1170 vxge_hal_ring_rxd_1b_get(vpath->handle, rxdh, &rxd_priv->dma_addr[0], 1171 (u32 *) &rxd_priv->dma_sizes[0]); 1172 1173 /* Update newly created buffer to be sent up with packet length */ 1174 mbuf_up->m_len = rxd_priv->dma_sizes[0]; 1175 mbuf_up->m_pkthdr.len = rxd_priv->dma_sizes[0]; 1176 mbuf_up->m_next = NULL; 1177} 1178 1179/* 1180 * vxge_rx_rxd_1b_set 1181 * Allocates new mbufs to be placed into descriptors 1182 */ 1183int 1184vxge_rx_rxd_1b_set(vxge_vpath_t *vpath, vxge_hal_rxd_h rxdh, void *dtr_priv) 1185{ 1186 int num_segs, err = 0; 1187 1188 mbuf_t mbuf_pkt; 1189 bus_dmamap_t dma_map; 1190 bus_dma_segment_t dma_buffers[1]; 1191 vxge_rxd_priv_t *rxd_priv = (vxge_rxd_priv_t *) dtr_priv; 1192 1193 vxge_dev_t *vdev = vpath->vdev; 1194 1195 mbuf_pkt = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, vdev->rx_mbuf_sz); 1196 if (!mbuf_pkt) { 1197 err = ENOBUFS; 1198 VXGE_DRV_STATS(vpath, rx_no_buf); 1199 device_printf(vdev->ndev, "out of memory to allocate mbuf\n"); 1200 goto _exit0; 1201 } 1202 1203 /* Update mbuf's length, packet length and receive interface */ 1204 mbuf_pkt->m_len = vdev->rx_mbuf_sz; 1205 mbuf_pkt->m_pkthdr.len = vdev->rx_mbuf_sz; 1206 mbuf_pkt->m_pkthdr.rcvif = vdev->ifp; 1207 1208 /* Load DMA map */ 1209 err = vxge_dma_mbuf_coalesce(vpath->dma_tag_rx, vpath->extra_dma_map, 1210 &mbuf_pkt, dma_buffers, &num_segs); 1211 if (err != 0) { 1212 VXGE_DRV_STATS(vpath, rx_map_fail); 1213 vxge_free_packet(mbuf_pkt); 1214 goto _exit0; 1215 } 1216 1217 /* Unload DMA map of mbuf in current descriptor */ 1218 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, 1219 BUS_DMASYNC_POSTREAD); 1220 bus_dmamap_unload(vpath->dma_tag_rx, rxd_priv->dma_map); 1221 1222 /* Update descriptor private data */ 1223 dma_map = rxd_priv->dma_map; 1224 rxd_priv->mbuf_pkt = mbuf_pkt; 1225 rxd_priv->dma_addr[0] = htole64(dma_buffers->ds_addr); 1226 rxd_priv->dma_map = vpath->extra_dma_map; 1227 vpath->extra_dma_map = dma_map; 1228 1229 /* Pre-Read/Write sync */ 1230 bus_dmamap_sync(vpath->dma_tag_rx, rxd_priv->dma_map, 1231 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1232 1233 /* Set descriptor buffer */ 1234 vxge_hal_ring_rxd_1b_set(rxdh, rxd_priv->dma_addr[0], vdev->rx_mbuf_sz); 1235 1236_exit0: 1237 return (err); 1238} 1239 1240/* 1241 * vxge_link_up 1242 * Callback for Link-up indication from HAL 1243 */ 1244/* ARGSUSED */ 1245void 1246vxge_link_up(vxge_hal_device_h devh, void *userdata) 1247{ 1248 int i; 1249 vxge_vpath_t *vpath; 1250 vxge_hal_device_hw_info_t *hw_info; 1251 1252 vxge_dev_t *vdev = (vxge_dev_t *) userdata; 1253 hw_info = &vdev->config.hw_info; 1254 1255 ifnet_t ifp = vdev->ifp; 1256 1257 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) { 1258 for (i = 0; i < vdev->no_of_vpath; i++) { 1259 vpath = &(vdev->vpaths[i]); 1260 vxge_hal_vpath_tti_ci_set(vpath->handle); 1261 vxge_hal_vpath_rti_ci_set(vpath->handle); 1262 } 1263 } 1264 1265 if (vdev->is_privilaged && (hw_info->ports > 1)) { 1266 vxge_active_port_update(vdev); 1267 device_printf(vdev->ndev, 1268 "Active Port : %lld\n", vdev->active_port); 1269 } 1270 1271 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1272 if_link_state_change(ifp, LINK_STATE_UP); 1273} 1274 1275/* 1276 * vxge_link_down 1277 * Callback for Link-down indication from HAL 1278 */ 1279/* ARGSUSED */ 1280void 1281vxge_link_down(vxge_hal_device_h devh, void *userdata) 1282{ 1283 int i; 1284 vxge_vpath_t *vpath; 1285 vxge_dev_t *vdev = (vxge_dev_t *) userdata; 1286 1287 ifnet_t ifp = vdev->ifp; 1288 1289 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) { 1290 for (i = 0; i < vdev->no_of_vpath; i++) { 1291 vpath = &(vdev->vpaths[i]); 1292 vxge_hal_vpath_tti_ci_reset(vpath->handle); 1293 vxge_hal_vpath_rti_ci_reset(vpath->handle); 1294 } 1295 } 1296 1297 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1298 if_link_state_change(ifp, LINK_STATE_DOWN); 1299} 1300 1301/* 1302 * vxge_reset 1303 */ 1304void 1305vxge_reset(vxge_dev_t *vdev) 1306{ 1307 if (!vdev->is_initialized) 1308 return; 1309 1310 VXGE_DRV_LOCK(vdev); 1311 vxge_stop_locked(vdev); 1312 vxge_init_locked(vdev); 1313 VXGE_DRV_UNLOCK(vdev); 1314} 1315 1316/* 1317 * vxge_crit_error 1318 * Callback for Critical error indication from HAL 1319 */ 1320/* ARGSUSED */ 1321void 1322vxge_crit_error(vxge_hal_device_h devh, void *userdata, 1323 vxge_hal_event_e type, u64 serr_data) 1324{ 1325 vxge_dev_t *vdev = (vxge_dev_t *) userdata; 1326 ifnet_t ifp = vdev->ifp; 1327 1328 switch (type) { 1329 case VXGE_HAL_EVENT_SERR: 1330 case VXGE_HAL_EVENT_KDFCCTL: 1331 case VXGE_HAL_EVENT_CRITICAL: 1332 vxge_hal_device_intr_disable(vdev->devh); 1333 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1334 if_link_state_change(ifp, LINK_STATE_DOWN); 1335 break; 1336 default: 1337 break; 1338 } 1339} 1340 1341/* 1342 * vxge_ifp_setup 1343 */ 1344int 1345vxge_ifp_setup(device_t ndev) 1346{ 1347 ifnet_t ifp; 1348 int i, j, err = 0; 1349 1350 vxge_dev_t *vdev = (vxge_dev_t *) device_get_softc(ndev); 1351 1352 for (i = 0, j = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { 1353 if (!bVAL1(vdev->config.hw_info.vpath_mask, i)) 1354 continue; 1355 1356 if (j >= vdev->no_of_vpath) 1357 break; 1358 1359 vdev->vpaths[j].vp_id = i; 1360 vdev->vpaths[j].vp_index = j; 1361 vdev->vpaths[j].vdev = vdev; 1362 vdev->vpaths[j].is_configured = TRUE; 1363 1364 vxge_os_memcpy((u8 *) vdev->vpaths[j].mac_addr, 1365 (u8 *) (vdev->config.hw_info.mac_addrs[i]), 1366 (size_t) ETHER_ADDR_LEN); 1367 j++; 1368 } 1369 1370 /* Get interface ifnet structure for this Ether device */ 1371 ifp = if_alloc(IFT_ETHER); 1372 if (ifp == NULL) { 1373 device_printf(vdev->ndev, 1374 "memory allocation for ifnet failed\n"); 1375 err = ENXIO; 1376 goto _exit0; 1377 } 1378 vdev->ifp = ifp; 1379 1380 /* Initialize interface ifnet structure */ 1381 if_initname(ifp, device_get_name(ndev), device_get_unit(ndev)); 1382 1383 ifp->if_mtu = ETHERMTU; 1384 ifp->if_baudrate = VXGE_BAUDRATE; 1385 ifp->if_init = vxge_init; 1386 ifp->if_softc = vdev; 1387 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1388 ifp->if_ioctl = vxge_ioctl; 1389 ifp->if_start = vxge_send; 1390 1391#if __FreeBSD_version >= 800000 1392 ifp->if_transmit = vxge_mq_send; 1393 ifp->if_qflush = vxge_mq_qflush; 1394#endif 1395 ifp->if_snd.ifq_drv_maxlen = max(vdev->config.ifq_maxlen, ifqmaxlen); 1396 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1397 /* IFQ_SET_READY(&ifp->if_snd); */ 1398 1399 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1400 1401 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; 1402 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1403 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1404 1405 if (vdev->config.tso_enable) 1406 vxge_tso_config(vdev); 1407 1408 if (vdev->config.lro_enable) 1409 ifp->if_capabilities |= IFCAP_LRO; 1410 1411 ifp->if_capenable = ifp->if_capabilities; 1412 1413 strlcpy(vdev->ndev_name, device_get_nameunit(ndev), 1414 sizeof(vdev->ndev_name)); 1415 1416 /* Attach the interface */ 1417 ether_ifattach(ifp, vdev->vpaths[0].mac_addr); 1418 1419_exit0: 1420 return (err); 1421} 1422 1423/* 1424 * vxge_isr_setup 1425 * Register isr functions 1426 */ 1427int 1428vxge_isr_setup(vxge_dev_t *vdev) 1429{ 1430 int i, irq_rid, err = 0; 1431 vxge_vpath_t *vpath; 1432 1433 void *isr_func_arg; 1434 void (*isr_func_ptr) (void *); 1435 1436 switch (vdev->config.intr_mode) { 1437 case VXGE_HAL_INTR_MODE_IRQLINE: 1438 err = bus_setup_intr(vdev->ndev, 1439 vdev->config.isr_info[0].irq_res, 1440 (INTR_TYPE_NET | INTR_MPSAFE), 1441 vxge_isr_filter, vxge_isr_line, vdev, 1442 &vdev->config.isr_info[0].irq_handle); 1443 break; 1444 1445 case VXGE_HAL_INTR_MODE_MSIX: 1446 for (i = 0; i < vdev->intr_count; i++) { 1447 1448 irq_rid = vdev->config.isr_info[i].irq_rid; 1449 vpath = &vdev->vpaths[irq_rid / 4]; 1450 1451 if ((irq_rid % 4) == 2) { 1452 isr_func_ptr = vxge_isr_msix; 1453 isr_func_arg = (void *) vpath; 1454 } else if ((irq_rid % 4) == 3) { 1455 isr_func_ptr = vxge_isr_msix_alarm; 1456 isr_func_arg = (void *) vpath; 1457 } else 1458 break; 1459 1460 err = bus_setup_intr(vdev->ndev, 1461 vdev->config.isr_info[i].irq_res, 1462 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 1463 (void *) isr_func_ptr, (void *) isr_func_arg, 1464 &vdev->config.isr_info[i].irq_handle); 1465 if (err != 0) 1466 break; 1467 } 1468 1469 if (err != 0) { 1470 /* Teardown interrupt handler */ 1471 while (--i > 0) 1472 bus_teardown_intr(vdev->ndev, 1473 vdev->config.isr_info[i].irq_res, 1474 vdev->config.isr_info[i].irq_handle); 1475 } 1476 break; 1477 } 1478 1479 return (err); 1480} 1481 1482/* 1483 * vxge_isr_filter 1484 * ISR filter function - filter interrupts from other shared devices 1485 */ 1486int 1487vxge_isr_filter(void *handle) 1488{ 1489 u64 val64 = 0; 1490 vxge_dev_t *vdev = (vxge_dev_t *) handle; 1491 __hal_device_t *hldev = (__hal_device_t *) vdev->devh; 1492 1493 vxge_hal_common_reg_t *common_reg = 1494 (vxge_hal_common_reg_t *) (hldev->common_reg); 1495 1496 val64 = vxge_os_pio_mem_read64(vdev->pdev, (vdev->devh)->regh0, 1497 &common_reg->titan_general_int_status); 1498 1499 return ((val64) ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); 1500} 1501 1502/* 1503 * vxge_isr_line 1504 * Interrupt service routine for Line interrupts 1505 */ 1506void 1507vxge_isr_line(void *vdev_ptr) 1508{ 1509 vxge_dev_t *vdev = (vxge_dev_t *) vdev_ptr; 1510 1511 vxge_hal_device_handle_irq(vdev->devh, 0); 1512} 1513 1514void 1515vxge_isr_msix(void *vpath_ptr) 1516{ 1517 u32 got_rx = 0; 1518 u32 got_tx = 0; 1519 1520 __hal_virtualpath_t *hal_vpath; 1521 vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr; 1522 vxge_dev_t *vdev = vpath->vdev; 1523 hal_vpath = ((__hal_vpath_handle_t *) vpath->handle)->vpath; 1524 1525 VXGE_DRV_STATS(vpath, isr_msix); 1526 VXGE_HAL_DEVICE_STATS_SW_INFO_TRAFFIC_INTR(vdev->devh); 1527 1528 vxge_hal_vpath_mf_msix_mask(vpath->handle, vpath->msix_vec); 1529 1530 /* processing rx */ 1531 vxge_hal_vpath_poll_rx(vpath->handle, &got_rx); 1532 1533 /* processing tx */ 1534 if (hal_vpath->vp_config->fifo.enable) { 1535 vxge_intr_coalesce_tx(vpath); 1536 vxge_hal_vpath_poll_tx(vpath->handle, &got_tx); 1537 } 1538 1539 vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec); 1540} 1541 1542void 1543vxge_isr_msix_alarm(void *vpath_ptr) 1544{ 1545 int i; 1546 vxge_hal_status_e status = VXGE_HAL_OK; 1547 1548 vxge_vpath_t *vpath = (vxge_vpath_t *) vpath_ptr; 1549 vxge_dev_t *vdev = vpath->vdev; 1550 1551 VXGE_HAL_DEVICE_STATS_SW_INFO_NOT_TRAFFIC_INTR(vdev->devh); 1552 1553 /* Process alarms in each vpath */ 1554 for (i = 0; i < vdev->no_of_vpath; i++) { 1555 1556 vpath = &(vdev->vpaths[i]); 1557 vxge_hal_vpath_mf_msix_mask(vpath->handle, 1558 vpath->msix_vec_alarm); 1559 status = vxge_hal_vpath_alarm_process(vpath->handle, 0); 1560 if ((status == VXGE_HAL_ERR_EVENT_SLOT_FREEZE) || 1561 (status == VXGE_HAL_ERR_EVENT_SERR)) { 1562 device_printf(vdev->ndev, 1563 "processing alarms urecoverable error %x\n", 1564 status); 1565 1566 /* Stop the driver */ 1567 vdev->is_initialized = FALSE; 1568 break; 1569 } 1570 vxge_hal_vpath_mf_msix_unmask(vpath->handle, 1571 vpath->msix_vec_alarm); 1572 } 1573} 1574 1575/* 1576 * vxge_msix_enable 1577 */ 1578vxge_hal_status_e 1579vxge_msix_enable(vxge_dev_t *vdev) 1580{ 1581 int i, first_vp_id, msix_id; 1582 1583 vxge_vpath_t *vpath; 1584 vxge_hal_status_e status = VXGE_HAL_OK; 1585 1586 /* 1587 * Unmasking and Setting MSIX vectors before enabling interrupts 1588 * tim[] : 0 - Tx ## 1 - Rx ## 2 - UMQ-DMQ ## 0 - BITMAP 1589 */ 1590 int tim[4] = {0, 1, 0, 0}; 1591 1592 for (i = 0; i < vdev->no_of_vpath; i++) { 1593 1594 vpath = vdev->vpaths + i; 1595 first_vp_id = vdev->vpaths[0].vp_id; 1596 1597 msix_id = vpath->vp_id * VXGE_HAL_VPATH_MSIX_ACTIVE; 1598 tim[1] = vpath->msix_vec = msix_id + 1; 1599 1600 vpath->msix_vec_alarm = first_vp_id * 1601 VXGE_HAL_VPATH_MSIX_ACTIVE + VXGE_HAL_VPATH_MSIX_ALARM_ID; 1602 1603 status = vxge_hal_vpath_mf_msix_set(vpath->handle, 1604 tim, VXGE_HAL_VPATH_MSIX_ALARM_ID); 1605 1606 if (status != VXGE_HAL_OK) { 1607 device_printf(vdev->ndev, 1608 "failed to set msix vectors to vpath\n"); 1609 break; 1610 } 1611 1612 vxge_hal_vpath_mf_msix_unmask(vpath->handle, vpath->msix_vec); 1613 vxge_hal_vpath_mf_msix_unmask(vpath->handle, 1614 vpath->msix_vec_alarm); 1615 } 1616 1617 return (status); 1618} 1619 1620/* 1621 * vxge_media_init 1622 * Initializes, adds and sets media 1623 */ 1624void 1625vxge_media_init(vxge_dev_t *vdev) 1626{ 1627 ifmedia_init(&vdev->media, 1628 IFM_IMASK, vxge_media_change, vxge_media_status); 1629 1630 /* Add supported media */ 1631 ifmedia_add(&vdev->media, 1632 IFM_ETHER | vdev->ifm_optics | IFM_FDX, 1633 0, NULL); 1634 1635 /* Set media */ 1636 ifmedia_add(&vdev->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1637 ifmedia_set(&vdev->media, IFM_ETHER | IFM_AUTO); 1638} 1639 1640/* 1641 * vxge_media_status 1642 * Callback for interface media settings 1643 */ 1644void 1645vxge_media_status(ifnet_t ifp, struct ifmediareq *ifmr) 1646{ 1647 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; 1648 vxge_hal_device_t *hldev = vdev->devh; 1649 1650 ifmr->ifm_status = IFM_AVALID; 1651 ifmr->ifm_active = IFM_ETHER; 1652 1653 /* set link state */ 1654 if (vxge_hal_device_link_state_get(hldev) == VXGE_HAL_LINK_UP) { 1655 ifmr->ifm_status |= IFM_ACTIVE; 1656 ifmr->ifm_active |= vdev->ifm_optics | IFM_FDX; 1657 if_link_state_change(ifp, LINK_STATE_UP); 1658 } 1659} 1660 1661/* 1662 * vxge_media_change 1663 * Media change driver callback 1664 */ 1665int 1666vxge_media_change(ifnet_t ifp) 1667{ 1668 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; 1669 struct ifmedia *ifmediap = &vdev->media; 1670 1671 return (IFM_TYPE(ifmediap->ifm_media) != IFM_ETHER ? EINVAL : 0); 1672} 1673 1674/* 1675 * Allocate PCI resources 1676 */ 1677int 1678vxge_alloc_resources(vxge_dev_t *vdev) 1679{ 1680 int err = 0; 1681 vxge_pci_info_t *pci_info = NULL; 1682 vxge_free_resources_e error_level = VXGE_FREE_NONE; 1683 1684 device_t ndev = vdev->ndev; 1685 1686 /* Allocate Buffer for HAL Device Configuration */ 1687 vdev->device_config = (vxge_hal_device_config_t *) 1688 vxge_mem_alloc(sizeof(vxge_hal_device_config_t)); 1689 1690 if (!vdev->device_config) { 1691 err = ENOMEM; 1692 error_level = VXGE_DISABLE_PCI_BUSMASTER; 1693 device_printf(vdev->ndev, 1694 "failed to allocate memory for device config\n"); 1695 goto _exit0; 1696 } 1697 1698 1699 pci_info = (vxge_pci_info_t *) vxge_mem_alloc(sizeof(vxge_pci_info_t)); 1700 if (!pci_info) { 1701 error_level = VXGE_FREE_DEVICE_CONFIG; 1702 err = ENOMEM; 1703 device_printf(vdev->ndev, 1704 "failed to allocate memory for pci info\n"); 1705 goto _exit0; 1706 } 1707 pci_info->ndev = ndev; 1708 vdev->pdev = pci_info; 1709 1710 err = vxge_alloc_bar_resources(vdev, 0); 1711 if (err != 0) { 1712 error_level = VXGE_FREE_BAR0; 1713 goto _exit0; 1714 } 1715 1716 err = vxge_alloc_bar_resources(vdev, 1); 1717 if (err != 0) { 1718 error_level = VXGE_FREE_BAR1; 1719 goto _exit0; 1720 } 1721 1722 err = vxge_alloc_bar_resources(vdev, 2); 1723 if (err != 0) 1724 error_level = VXGE_FREE_BAR2; 1725 1726_exit0: 1727 if (error_level) 1728 vxge_free_resources(ndev, error_level); 1729 1730 return (err); 1731} 1732 1733/* 1734 * vxge_alloc_bar_resources 1735 * Allocates BAR resources 1736 */ 1737int 1738vxge_alloc_bar_resources(vxge_dev_t *vdev, int i) 1739{ 1740 int err = 0; 1741 int res_id = 0; 1742 vxge_pci_info_t *pci_info = vdev->pdev; 1743 1744 res_id = PCIR_BAR((i == 0) ? 0 : (i * 2)); 1745 1746 pci_info->bar_info[i] = 1747 bus_alloc_resource_any(vdev->ndev, 1748 SYS_RES_MEMORY, &res_id, RF_ACTIVE); 1749 1750 if (pci_info->bar_info[i] == NULL) { 1751 device_printf(vdev->ndev, 1752 "failed to allocate memory for bus resources\n"); 1753 err = ENOMEM; 1754 goto _exit0; 1755 } 1756 1757 pci_info->reg_map[i] = 1758 (vxge_bus_res_t *) vxge_mem_alloc(sizeof(vxge_bus_res_t)); 1759 1760 if (pci_info->reg_map[i] == NULL) { 1761 device_printf(vdev->ndev, 1762 "failed to allocate memory bar resources\n"); 1763 err = ENOMEM; 1764 goto _exit0; 1765 } 1766 1767 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_tag = 1768 rman_get_bustag(pci_info->bar_info[i]); 1769 1770 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_space_handle = 1771 rman_get_bushandle(pci_info->bar_info[i]); 1772 1773 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bar_start_addr = 1774 pci_info->bar_info[i]; 1775 1776 ((vxge_bus_res_t *) (pci_info->reg_map[i]))->bus_res_len = 1777 rman_get_size(pci_info->bar_info[i]); 1778 1779_exit0: 1780 return (err); 1781} 1782 1783/* 1784 * vxge_alloc_isr_resources 1785 */ 1786int 1787vxge_alloc_isr_resources(vxge_dev_t *vdev) 1788{ 1789 int i, err = 0, irq_rid; 1790 int msix_vec_reqd, intr_count, msix_count; 1791 1792 int intr_mode = VXGE_HAL_INTR_MODE_IRQLINE; 1793 1794 if (vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) { 1795 /* MSI-X messages supported by device */ 1796 intr_count = pci_msix_count(vdev->ndev); 1797 if (intr_count) { 1798 1799 msix_vec_reqd = 4 * vdev->no_of_vpath; 1800 if (intr_count >= msix_vec_reqd) { 1801 intr_count = msix_vec_reqd; 1802 1803 err = pci_alloc_msix(vdev->ndev, &intr_count); 1804 if (err == 0) 1805 intr_mode = VXGE_HAL_INTR_MODE_MSIX; 1806 } 1807 1808 if ((err != 0) || (intr_count < msix_vec_reqd)) { 1809 device_printf(vdev->ndev, "Unable to allocate " 1810 "msi/x vectors switching to INTA mode\n"); 1811 } 1812 } 1813 } 1814 1815 err = 0; 1816 vdev->intr_count = 0; 1817 vdev->config.intr_mode = intr_mode; 1818 1819 switch (vdev->config.intr_mode) { 1820 case VXGE_HAL_INTR_MODE_IRQLINE: 1821 vdev->config.isr_info[0].irq_rid = 0; 1822 vdev->config.isr_info[0].irq_res = 1823 bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ, 1824 &vdev->config.isr_info[0].irq_rid, 1825 (RF_SHAREABLE | RF_ACTIVE)); 1826 1827 if (vdev->config.isr_info[0].irq_res == NULL) { 1828 device_printf(vdev->ndev, 1829 "failed to allocate line interrupt resource\n"); 1830 err = ENOMEM; 1831 goto _exit0; 1832 } 1833 vdev->intr_count++; 1834 break; 1835 1836 case VXGE_HAL_INTR_MODE_MSIX: 1837 msix_count = 0; 1838 for (i = 0; i < vdev->no_of_vpath; i++) { 1839 irq_rid = i * 4; 1840 1841 vdev->config.isr_info[msix_count].irq_rid = irq_rid + 2; 1842 vdev->config.isr_info[msix_count].irq_res = 1843 bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ, 1844 &vdev->config.isr_info[msix_count].irq_rid, 1845 (RF_SHAREABLE | RF_ACTIVE)); 1846 1847 if (vdev->config.isr_info[msix_count].irq_res == NULL) { 1848 device_printf(vdev->ndev, 1849 "allocating bus resource (rid %d) failed\n", 1850 vdev->config.isr_info[msix_count].irq_rid); 1851 err = ENOMEM; 1852 goto _exit0; 1853 } 1854 1855 vdev->intr_count++; 1856 err = bus_bind_intr(vdev->ndev, 1857 vdev->config.isr_info[msix_count].irq_res, 1858 (i % mp_ncpus)); 1859 if (err != 0) 1860 break; 1861 1862 msix_count++; 1863 } 1864 1865 vdev->config.isr_info[msix_count].irq_rid = 3; 1866 vdev->config.isr_info[msix_count].irq_res = 1867 bus_alloc_resource_any(vdev->ndev, SYS_RES_IRQ, 1868 &vdev->config.isr_info[msix_count].irq_rid, 1869 (RF_SHAREABLE | RF_ACTIVE)); 1870 1871 if (vdev->config.isr_info[msix_count].irq_res == NULL) { 1872 device_printf(vdev->ndev, 1873 "allocating bus resource (rid %d) failed\n", 1874 vdev->config.isr_info[msix_count].irq_rid); 1875 err = ENOMEM; 1876 goto _exit0; 1877 } 1878 1879 vdev->intr_count++; 1880 err = bus_bind_intr(vdev->ndev, 1881 vdev->config.isr_info[msix_count].irq_res, (i % mp_ncpus)); 1882 1883 break; 1884 } 1885 1886 vdev->device_config->intr_mode = vdev->config.intr_mode; 1887 1888_exit0: 1889 return (err); 1890} 1891 1892/* 1893 * vxge_free_resources 1894 * Undo what-all we did during load/attach 1895 */ 1896void 1897vxge_free_resources(device_t ndev, vxge_free_resources_e vxge_free_resource) 1898{ 1899 int i; 1900 vxge_dev_t *vdev; 1901 1902 vdev = (vxge_dev_t *) device_get_softc(ndev); 1903 1904 switch (vxge_free_resource) { 1905 case VXGE_FREE_ALL: 1906 for (i = 0; i < vdev->intr_count; i++) { 1907 bus_teardown_intr(ndev, 1908 vdev->config.isr_info[i].irq_res, 1909 vdev->config.isr_info[i].irq_handle); 1910 } 1911 /* FALLTHROUGH */ 1912 1913 case VXGE_FREE_INTERFACE: 1914 ether_ifdetach(vdev->ifp); 1915 bus_generic_detach(ndev); 1916 if_free(vdev->ifp); 1917 /* FALLTHROUGH */ 1918 1919 case VXGE_FREE_MEDIA: 1920 ifmedia_removeall(&vdev->media); 1921 /* FALLTHROUGH */ 1922 1923 case VXGE_FREE_MUTEX: 1924 vxge_mutex_destroy(vdev); 1925 /* FALLTHROUGH */ 1926 1927 case VXGE_FREE_VPATH: 1928 vxge_mem_free(vdev->vpaths, 1929 vdev->no_of_vpath * sizeof(vxge_vpath_t)); 1930 /* FALLTHROUGH */ 1931 1932 case VXGE_FREE_TERMINATE_DEVICE: 1933 if (vdev->devh != NULL) { 1934 vxge_hal_device_private_set(vdev->devh, 0); 1935 vxge_hal_device_terminate(vdev->devh); 1936 } 1937 /* FALLTHROUGH */ 1938 1939 case VXGE_FREE_ISR_RESOURCE: 1940 vxge_free_isr_resources(vdev); 1941 /* FALLTHROUGH */ 1942 1943 case VXGE_FREE_BAR2: 1944 vxge_free_bar_resources(vdev, 2); 1945 /* FALLTHROUGH */ 1946 1947 case VXGE_FREE_BAR1: 1948 vxge_free_bar_resources(vdev, 1); 1949 /* FALLTHROUGH */ 1950 1951 case VXGE_FREE_BAR0: 1952 vxge_free_bar_resources(vdev, 0); 1953 /* FALLTHROUGH */ 1954 1955 case VXGE_FREE_PCI_INFO: 1956 vxge_mem_free(vdev->pdev, sizeof(vxge_pci_info_t)); 1957 /* FALLTHROUGH */ 1958 1959 case VXGE_FREE_DEVICE_CONFIG: 1960 vxge_mem_free(vdev->device_config, 1961 sizeof(vxge_hal_device_config_t)); 1962 /* FALLTHROUGH */ 1963 1964 case VXGE_DISABLE_PCI_BUSMASTER: 1965 pci_disable_busmaster(ndev); 1966 /* FALLTHROUGH */ 1967 1968 case VXGE_FREE_TERMINATE_DRIVER: 1969 if (vxge_dev_ref_count) { 1970 --vxge_dev_ref_count; 1971 if (0 == vxge_dev_ref_count) 1972 vxge_hal_driver_terminate(); 1973 } 1974 /* FALLTHROUGH */ 1975 1976 default: 1977 case VXGE_FREE_NONE: 1978 break; 1979 /* NOTREACHED */ 1980 } 1981} 1982 1983void 1984vxge_free_isr_resources(vxge_dev_t *vdev) 1985{ 1986 int i; 1987 1988 switch (vdev->config.intr_mode) { 1989 case VXGE_HAL_INTR_MODE_IRQLINE: 1990 if (vdev->config.isr_info[0].irq_res) { 1991 bus_release_resource(vdev->ndev, SYS_RES_IRQ, 1992 vdev->config.isr_info[0].irq_rid, 1993 vdev->config.isr_info[0].irq_res); 1994 1995 vdev->config.isr_info[0].irq_res = NULL; 1996 } 1997 break; 1998 1999 case VXGE_HAL_INTR_MODE_MSIX: 2000 for (i = 0; i < vdev->intr_count; i++) { 2001 if (vdev->config.isr_info[i].irq_res) { 2002 bus_release_resource(vdev->ndev, SYS_RES_IRQ, 2003 vdev->config.isr_info[i].irq_rid, 2004 vdev->config.isr_info[i].irq_res); 2005 2006 vdev->config.isr_info[i].irq_res = NULL; 2007 } 2008 } 2009 2010 if (vdev->intr_count) 2011 pci_release_msi(vdev->ndev); 2012 2013 break; 2014 } 2015} 2016 2017void 2018vxge_free_bar_resources(vxge_dev_t *vdev, int i) 2019{ 2020 int res_id = 0; 2021 vxge_pci_info_t *pci_info = vdev->pdev; 2022 2023 res_id = PCIR_BAR((i == 0) ? 0 : (i * 2)); 2024 2025 if (pci_info->bar_info[i]) 2026 bus_release_resource(vdev->ndev, SYS_RES_MEMORY, 2027 res_id, pci_info->bar_info[i]); 2028 2029 vxge_mem_free(pci_info->reg_map[i], sizeof(vxge_bus_res_t)); 2030} 2031 2032/* 2033 * vxge_init_mutex 2034 * Initializes mutexes used in driver 2035 */ 2036void 2037vxge_mutex_init(vxge_dev_t *vdev) 2038{ 2039 int i; 2040 2041 snprintf(vdev->mtx_drv_name, sizeof(vdev->mtx_drv_name), 2042 "%s_drv", vdev->ndev_name); 2043 2044 mtx_init(&vdev->mtx_drv, vdev->mtx_drv_name, 2045 MTX_NETWORK_LOCK, MTX_DEF); 2046 2047 for (i = 0; i < vdev->no_of_vpath; i++) { 2048 snprintf(vdev->vpaths[i].mtx_tx_name, 2049 sizeof(vdev->vpaths[i].mtx_tx_name), "%s_tx_%d", 2050 vdev->ndev_name, i); 2051 2052 mtx_init(&vdev->vpaths[i].mtx_tx, 2053 vdev->vpaths[i].mtx_tx_name, NULL, MTX_DEF); 2054 } 2055} 2056 2057/* 2058 * vxge_mutex_destroy 2059 * Destroys mutexes used in driver 2060 */ 2061void 2062vxge_mutex_destroy(vxge_dev_t *vdev) 2063{ 2064 int i; 2065 2066 for (i = 0; i < vdev->no_of_vpath; i++) 2067 VXGE_TX_LOCK_DESTROY(&(vdev->vpaths[i])); 2068 2069 VXGE_DRV_LOCK_DESTROY(vdev); 2070} 2071 2072/* 2073 * vxge_rth_config 2074 */ 2075vxge_hal_status_e 2076vxge_rth_config(vxge_dev_t *vdev) 2077{ 2078 int i; 2079 vxge_hal_vpath_h vpath_handle; 2080 vxge_hal_rth_hash_types_t hash_types; 2081 vxge_hal_status_e status = VXGE_HAL_OK; 2082 u8 mtable[256] = {0}; 2083 2084 /* Filling matable with bucket-to-vpath mapping */ 2085 vdev->config.rth_bkt_sz = VXGE_DEFAULT_RTH_BUCKET_SIZE; 2086 2087 for (i = 0; i < (1 << vdev->config.rth_bkt_sz); i++) 2088 mtable[i] = i % vdev->no_of_vpath; 2089 2090 /* Fill RTH hash types */ 2091 hash_types.hash_type_tcpipv4_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV4; 2092 hash_types.hash_type_tcpipv6_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6; 2093 hash_types.hash_type_tcpipv6ex_en = VXGE_HAL_RING_HASH_TYPE_TCP_IPV6_EX; 2094 hash_types.hash_type_ipv4_en = VXGE_HAL_RING_HASH_TYPE_IPV4; 2095 hash_types.hash_type_ipv6_en = VXGE_HAL_RING_HASH_TYPE_IPV6; 2096 hash_types.hash_type_ipv6ex_en = VXGE_HAL_RING_HASH_TYPE_IPV6_EX; 2097 2098 /* set indirection table, bucket-to-vpath mapping */ 2099 status = vxge_hal_vpath_rts_rth_itable_set(vdev->vpath_handles, 2100 vdev->no_of_vpath, mtable, 2101 ((u32) (1 << vdev->config.rth_bkt_sz))); 2102 2103 if (status != VXGE_HAL_OK) { 2104 device_printf(vdev->ndev, "rth configuration failed\n"); 2105 goto _exit0; 2106 } 2107 for (i = 0; i < vdev->no_of_vpath; i++) { 2108 vpath_handle = vxge_vpath_handle_get(vdev, i); 2109 if (!vpath_handle) 2110 continue; 2111 2112 status = vxge_hal_vpath_rts_rth_set(vpath_handle, 2113 RTH_ALG_JENKINS, 2114 &hash_types, vdev->config.rth_bkt_sz, TRUE); 2115 if (status != VXGE_HAL_OK) { 2116 device_printf(vdev->ndev, 2117 "rth configuration failed for vpath (%d)\n", 2118 vdev->vpaths[i].vp_id); 2119 break; 2120 } 2121 } 2122 2123_exit0: 2124 return (status); 2125} 2126 2127/* 2128 * vxge_vpath_config 2129 * Sets HAL parameter values from kenv 2130 */ 2131void 2132vxge_vpath_config(vxge_dev_t *vdev) 2133{ 2134 int i; 2135 u32 no_of_vpath = 0; 2136 vxge_hal_vp_config_t *vp_config; 2137 vxge_hal_device_config_t *device_config = vdev->device_config; 2138 2139 device_config->debug_level = VXGE_TRACE; 2140 device_config->debug_mask = VXGE_COMPONENT_ALL; 2141 device_config->device_poll_millis = VXGE_DEFAULT_DEVICE_POLL_MILLIS; 2142 2143 vdev->config.no_of_vpath = 2144 min(vdev->config.no_of_vpath, vdev->max_supported_vpath); 2145 2146 for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { 2147 vp_config = &(device_config->vp_config[i]); 2148 vp_config->fifo.enable = VXGE_HAL_FIFO_DISABLE; 2149 vp_config->ring.enable = VXGE_HAL_RING_DISABLE; 2150 } 2151 2152 for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { 2153 if (no_of_vpath >= vdev->config.no_of_vpath) 2154 break; 2155 2156 if (!bVAL1(vdev->config.hw_info.vpath_mask, i)) 2157 continue; 2158 2159 no_of_vpath++; 2160 vp_config = &(device_config->vp_config[i]); 2161 vp_config->mtu = VXGE_HAL_DEFAULT_MTU; 2162 vp_config->ring.enable = VXGE_HAL_RING_ENABLE; 2163 vp_config->ring.post_mode = VXGE_HAL_RING_POST_MODE_DOORBELL; 2164 vp_config->ring.buffer_mode = VXGE_HAL_RING_RXD_BUFFER_MODE_1; 2165 vp_config->ring.ring_length = 2166 vxge_ring_length_get(VXGE_HAL_RING_RXD_BUFFER_MODE_1); 2167 vp_config->ring.scatter_mode = VXGE_HAL_RING_SCATTER_MODE_A; 2168 vp_config->rpa_all_vid_en = VXGE_DEFAULT_ALL_VID_ENABLE; 2169 vp_config->rpa_strip_vlan_tag = VXGE_DEFAULT_STRIP_VLAN_TAG; 2170 vp_config->rpa_ucast_all_addr_en = 2171 VXGE_HAL_VPATH_RPA_UCAST_ALL_ADDR_DISABLE; 2172 2173 vp_config->rti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE; 2174 vp_config->rti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE; 2175 vp_config->rti.util_sel = 2176 VXGE_HAL_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; 2177 2178 vp_config->rti.uec_a = VXGE_DEFAULT_RTI_RX_UFC_A; 2179 vp_config->rti.uec_b = VXGE_DEFAULT_RTI_RX_UFC_B; 2180 vp_config->rti.uec_c = VXGE_DEFAULT_RTI_RX_UFC_C; 2181 vp_config->rti.uec_d = VXGE_DEFAULT_RTI_RX_UFC_D; 2182 2183 vp_config->rti.urange_a = VXGE_DEFAULT_RTI_RX_URANGE_A; 2184 vp_config->rti.urange_b = VXGE_DEFAULT_RTI_RX_URANGE_B; 2185 vp_config->rti.urange_c = VXGE_DEFAULT_RTI_RX_URANGE_C; 2186 2187 vp_config->rti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE; 2188 vp_config->rti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE; 2189 2190 vp_config->rti.btimer_val = 2191 (VXGE_DEFAULT_RTI_BTIMER_VAL * 1000) / 272; 2192 vp_config->rti.rtimer_val = 2193 (VXGE_DEFAULT_RTI_RTIMER_VAL * 1000) / 272; 2194 vp_config->rti.ltimer_val = 2195 (VXGE_DEFAULT_RTI_LTIMER_VAL * 1000) / 272; 2196 2197 if ((no_of_vpath > 1) && (VXGE_DEFAULT_CONFIG_MQ_ENABLE == 0)) 2198 continue; 2199 2200 vp_config->fifo.enable = VXGE_HAL_FIFO_ENABLE; 2201 vp_config->fifo.max_aligned_frags = 2202 VXGE_DEFAULT_FIFO_ALIGNED_FRAGS; 2203 2204 vp_config->tti.intr_enable = VXGE_HAL_TIM_INTR_ENABLE; 2205 vp_config->tti.txfrm_cnt_en = VXGE_HAL_TXFRM_CNT_EN_ENABLE; 2206 vp_config->tti.util_sel = 2207 VXGE_HAL_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; 2208 2209 vp_config->tti.uec_a = VXGE_DEFAULT_TTI_TX_UFC_A; 2210 vp_config->tti.uec_b = VXGE_DEFAULT_TTI_TX_UFC_B; 2211 vp_config->tti.uec_c = VXGE_DEFAULT_TTI_TX_UFC_C; 2212 vp_config->tti.uec_d = VXGE_DEFAULT_TTI_TX_UFC_D; 2213 2214 vp_config->tti.urange_a = VXGE_DEFAULT_TTI_TX_URANGE_A; 2215 vp_config->tti.urange_b = VXGE_DEFAULT_TTI_TX_URANGE_B; 2216 vp_config->tti.urange_c = VXGE_DEFAULT_TTI_TX_URANGE_C; 2217 2218 vp_config->tti.timer_ac_en = VXGE_HAL_TIM_TIMER_AC_ENABLE; 2219 vp_config->tti.timer_ci_en = VXGE_HAL_TIM_TIMER_CI_ENABLE; 2220 2221 vp_config->tti.btimer_val = 2222 (VXGE_DEFAULT_TTI_BTIMER_VAL * 1000) / 272; 2223 vp_config->tti.rtimer_val = 2224 (VXGE_DEFAULT_TTI_RTIMER_VAL * 1000) / 272; 2225 vp_config->tti.ltimer_val = 2226 (VXGE_DEFAULT_TTI_LTIMER_VAL * 1000) / 272; 2227 } 2228 2229 vdev->no_of_vpath = no_of_vpath; 2230 2231 if (vdev->no_of_vpath == 1) 2232 vdev->config.tx_steering = 0; 2233 2234 if (vdev->config.rth_enable && (vdev->no_of_vpath > 1)) { 2235 device_config->rth_en = VXGE_HAL_RTH_ENABLE; 2236 device_config->rth_it_type = VXGE_HAL_RTH_IT_TYPE_MULTI_IT; 2237 } 2238 2239 vdev->config.rth_enable = device_config->rth_en; 2240} 2241 2242/* 2243 * vxge_vpath_cb_fn 2244 * Virtual path Callback function 2245 */ 2246/* ARGSUSED */ 2247static vxge_hal_status_e 2248vxge_vpath_cb_fn(vxge_hal_client_h client_handle, vxge_hal_up_msg_h msgh, 2249 vxge_hal_message_type_e msg_type, vxge_hal_obj_id_t obj_id, 2250 vxge_hal_result_e result, vxge_hal_opaque_handle_t *opaque_handle) 2251{ 2252 return (VXGE_HAL_OK); 2253} 2254 2255/* 2256 * vxge_vpath_open 2257 */ 2258int 2259vxge_vpath_open(vxge_dev_t *vdev) 2260{ 2261 int i, err = EINVAL; 2262 u64 func_id; 2263 2264 vxge_vpath_t *vpath; 2265 vxge_hal_vpath_attr_t vpath_attr; 2266 vxge_hal_status_e status = VXGE_HAL_OK; 2267 struct lro_ctrl *lro = NULL; 2268 2269 bzero(&vpath_attr, sizeof(vxge_hal_vpath_attr_t)); 2270 2271 for (i = 0; i < vdev->no_of_vpath; i++) { 2272 2273 vpath = &(vdev->vpaths[i]); 2274 lro = &vpath->lro; 2275 2276 /* Vpath vpath_attr: FIFO */ 2277 vpath_attr.vp_id = vpath->vp_id; 2278 vpath_attr.fifo_attr.callback = vxge_tx_compl; 2279 vpath_attr.fifo_attr.txdl_init = vxge_tx_replenish; 2280 vpath_attr.fifo_attr.txdl_term = vxge_tx_term; 2281 vpath_attr.fifo_attr.userdata = vpath; 2282 vpath_attr.fifo_attr.per_txdl_space = sizeof(vxge_txdl_priv_t); 2283 2284 /* Vpath vpath_attr: Ring */ 2285 vpath_attr.ring_attr.callback = vxge_rx_compl; 2286 vpath_attr.ring_attr.rxd_init = vxge_rx_replenish; 2287 vpath_attr.ring_attr.rxd_term = vxge_rx_term; 2288 vpath_attr.ring_attr.userdata = vpath; 2289 vpath_attr.ring_attr.per_rxd_space = sizeof(vxge_rxd_priv_t); 2290 2291 err = vxge_dma_tags_create(vpath); 2292 if (err != 0) { 2293 device_printf(vdev->ndev, 2294 "failed to create dma tags\n"); 2295 break; 2296 } 2297#if __FreeBSD_version >= 800000 2298 vpath->br = buf_ring_alloc(VXGE_DEFAULT_BR_SIZE, M_DEVBUF, 2299 M_WAITOK, &vpath->mtx_tx); 2300 if (vpath->br == NULL) { 2301 err = ENOMEM; 2302 break; 2303 } 2304#endif 2305 status = vxge_hal_vpath_open(vdev->devh, &vpath_attr, 2306 (vxge_hal_vpath_callback_f) vxge_vpath_cb_fn, 2307 NULL, &vpath->handle); 2308 if (status != VXGE_HAL_OK) { 2309 device_printf(vdev->ndev, 2310 "failed to open vpath (%d)\n", vpath->vp_id); 2311 err = EPERM; 2312 break; 2313 } 2314 vpath->is_open = TRUE; 2315 vdev->vpath_handles[i] = vpath->handle; 2316 2317 vpath->tx_ticks = ticks; 2318 vpath->rx_ticks = ticks; 2319 2320 vpath->tti_rtimer_val = VXGE_DEFAULT_TTI_RTIMER_VAL; 2321 vpath->tti_rtimer_val = VXGE_DEFAULT_TTI_RTIMER_VAL; 2322 2323 vpath->tx_intr_coalesce = vdev->config.intr_coalesce; 2324 vpath->rx_intr_coalesce = vdev->config.intr_coalesce; 2325 2326 func_id = vdev->config.hw_info.func_id; 2327 2328 if (vdev->config.low_latency && 2329 (vdev->config.bw_info[func_id].priority == 2330 VXGE_DEFAULT_VPATH_PRIORITY_HIGH)) { 2331 vpath->tx_intr_coalesce = 0; 2332 } 2333 2334 if (vdev->ifp->if_capenable & IFCAP_LRO) { 2335 err = tcp_lro_init(lro); 2336 if (err != 0) { 2337 device_printf(vdev->ndev, 2338 "LRO Initialization failed!\n"); 2339 break; 2340 } 2341 vpath->lro_enable = TRUE; 2342 lro->ifp = vdev->ifp; 2343 } 2344 } 2345 2346 return (err); 2347} 2348 2349void 2350vxge_tso_config(vxge_dev_t *vdev) 2351{ 2352 u32 func_id, priority; 2353 vxge_hal_status_e status = VXGE_HAL_OK; 2354 2355 vdev->ifp->if_capabilities |= IFCAP_TSO4; 2356 2357 status = vxge_bw_priority_get(vdev, NULL); 2358 if (status == VXGE_HAL_OK) { 2359 2360 func_id = vdev->config.hw_info.func_id; 2361 priority = vdev->config.bw_info[func_id].priority; 2362 2363 if (priority != VXGE_DEFAULT_VPATH_PRIORITY_HIGH) 2364 vdev->ifp->if_capabilities &= ~IFCAP_TSO4; 2365 } 2366 2367#if __FreeBSD_version >= 800000 2368 if (vdev->ifp->if_capabilities & IFCAP_TSO4) 2369 vdev->ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 2370#endif 2371 2372} 2373 2374vxge_hal_status_e 2375vxge_bw_priority_get(vxge_dev_t *vdev, vxge_bw_info_t *bw_info) 2376{ 2377 u32 priority, bandwidth; 2378 u32 vpath_count; 2379 2380 u64 func_id, func_mode, vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS]; 2381 vxge_hal_status_e status = VXGE_HAL_OK; 2382 2383 func_id = vdev->config.hw_info.func_id; 2384 if (bw_info) { 2385 func_id = bw_info->func_id; 2386 func_mode = vdev->config.hw_info.function_mode; 2387 if ((is_single_func(func_mode)) && (func_id > 0)) 2388 return (VXGE_HAL_FAIL); 2389 } 2390 2391 if (vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 0)) { 2392 2393 status = vxge_hal_vf_rx_bw_get(vdev->devh, 2394 func_id, &bandwidth, &priority); 2395 2396 } else { 2397 2398 status = vxge_hal_get_vpath_list(vdev->devh, 2399 func_id, vpath_list, &vpath_count); 2400 2401 if (status == VXGE_HAL_OK) { 2402 status = vxge_hal_bw_priority_get(vdev->devh, 2403 vpath_list[0], &bandwidth, &priority); 2404 } 2405 } 2406 2407 if (status == VXGE_HAL_OK) { 2408 if (bw_info) { 2409 bw_info->priority = priority; 2410 bw_info->bandwidth = bandwidth; 2411 } else { 2412 vdev->config.bw_info[func_id].priority = priority; 2413 vdev->config.bw_info[func_id].bandwidth = bandwidth; 2414 } 2415 } 2416 2417 return (status); 2418} 2419 2420/* 2421 * close vpaths 2422 */ 2423void 2424vxge_vpath_close(vxge_dev_t *vdev) 2425{ 2426 int i; 2427 vxge_vpath_t *vpath; 2428 2429 for (i = 0; i < vdev->no_of_vpath; i++) { 2430 2431 vpath = &(vdev->vpaths[i]); 2432 if (vpath->handle) 2433 vxge_hal_vpath_close(vpath->handle); 2434 2435#if __FreeBSD_version >= 800000 2436 if (vpath->br != NULL) 2437 buf_ring_free(vpath->br, M_DEVBUF); 2438#endif 2439 /* Free LRO memory */ 2440 if (vpath->lro_enable) 2441 tcp_lro_free(&vpath->lro); 2442 2443 if (vpath->dma_tag_rx) { 2444 bus_dmamap_destroy(vpath->dma_tag_rx, 2445 vpath->extra_dma_map); 2446 bus_dma_tag_destroy(vpath->dma_tag_rx); 2447 } 2448 2449 if (vpath->dma_tag_tx) 2450 bus_dma_tag_destroy(vpath->dma_tag_tx); 2451 2452 vpath->handle = NULL; 2453 vpath->is_open = FALSE; 2454 } 2455} 2456 2457/* 2458 * reset vpaths 2459 */ 2460void 2461vxge_vpath_reset(vxge_dev_t *vdev) 2462{ 2463 int i; 2464 vxge_hal_vpath_h vpath_handle; 2465 vxge_hal_status_e status = VXGE_HAL_OK; 2466 2467 for (i = 0; i < vdev->no_of_vpath; i++) { 2468 vpath_handle = vxge_vpath_handle_get(vdev, i); 2469 if (!vpath_handle) 2470 continue; 2471 2472 status = vxge_hal_vpath_reset(vpath_handle); 2473 if (status != VXGE_HAL_OK) 2474 device_printf(vdev->ndev, 2475 "failed to reset vpath :%d\n", i); 2476 } 2477} 2478 2479static inline int 2480vxge_vpath_get(vxge_dev_t *vdev, mbuf_t mhead) 2481{ 2482 struct tcphdr *th = NULL; 2483 struct udphdr *uh = NULL; 2484 struct ip *ip = NULL; 2485 struct ip6_hdr *ip6 = NULL; 2486 struct ether_vlan_header *eth = NULL; 2487 void *ulp = NULL; 2488 2489 int ehdrlen, iphlen = 0; 2490 u8 ipproto = 0; 2491 u16 etype, src_port, dst_port; 2492 u16 queue_len, counter = 0; 2493 2494 src_port = dst_port = 0; 2495 queue_len = vdev->no_of_vpath; 2496 2497 eth = mtod(mhead, struct ether_vlan_header *); 2498 if (eth->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2499 etype = ntohs(eth->evl_proto); 2500 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2501 } else { 2502 etype = ntohs(eth->evl_encap_proto); 2503 ehdrlen = ETHER_HDR_LEN; 2504 } 2505 2506 switch (etype) { 2507 case ETHERTYPE_IP: 2508 ip = (struct ip *) (mhead->m_data + ehdrlen); 2509 iphlen = ip->ip_hl << 2; 2510 ipproto = ip->ip_p; 2511 th = (struct tcphdr *) ((caddr_t)ip + iphlen); 2512 uh = (struct udphdr *) ((caddr_t)ip + iphlen); 2513 break; 2514 2515 case ETHERTYPE_IPV6: 2516 ip6 = (struct ip6_hdr *) (mhead->m_data + ehdrlen); 2517 iphlen = sizeof(struct ip6_hdr); 2518 ipproto = ip6->ip6_nxt; 2519 2520 ulp = mtod(mhead, char *) + iphlen; 2521 th = ((struct tcphdr *) (ulp)); 2522 uh = ((struct udphdr *) (ulp)); 2523 break; 2524 2525 default: 2526 break; 2527 } 2528 2529 switch (ipproto) { 2530 case IPPROTO_TCP: 2531 src_port = th->th_sport; 2532 dst_port = th->th_dport; 2533 break; 2534 2535 case IPPROTO_UDP: 2536 src_port = uh->uh_sport; 2537 dst_port = uh->uh_dport; 2538 break; 2539 2540 default: 2541 break; 2542 } 2543 2544 counter = (ntohs(src_port) + ntohs(dst_port)) & 2545 vpath_selector[queue_len - 1]; 2546 2547 if (counter >= queue_len) 2548 counter = queue_len - 1; 2549 2550 return (counter); 2551} 2552 2553static inline vxge_hal_vpath_h 2554vxge_vpath_handle_get(vxge_dev_t *vdev, int i) 2555{ 2556 return (vdev->vpaths[i].is_open ? vdev->vpaths[i].handle : NULL); 2557} 2558 2559int 2560vxge_firmware_verify(vxge_dev_t *vdev) 2561{ 2562 int err = 0; 2563 u64 active_config; 2564 vxge_hal_status_e status = VXGE_HAL_FAIL; 2565 2566 if (vdev->fw_upgrade) { 2567 status = vxge_firmware_upgrade(vdev); 2568 if (status == VXGE_HAL_OK) { 2569 err = ENXIO; 2570 goto _exit0; 2571 } 2572 } 2573 2574 if ((vdev->config.function_mode != VXGE_DEFAULT_CONFIG_VALUE) && 2575 (vdev->config.hw_info.function_mode != 2576 (u64) vdev->config.function_mode)) { 2577 2578 status = vxge_func_mode_set(vdev); 2579 if (status == VXGE_HAL_OK) 2580 err = ENXIO; 2581 } 2582 2583 /* l2_switch configuration */ 2584 active_config = VXGE_DEFAULT_CONFIG_VALUE; 2585 status = vxge_hal_get_active_config(vdev->devh, 2586 VXGE_HAL_XMAC_NWIF_ActConfig_L2SwitchEnabled, 2587 &active_config); 2588 2589 if (status == VXGE_HAL_OK) { 2590 vdev->l2_switch = active_config; 2591 if (vdev->config.l2_switch != VXGE_DEFAULT_CONFIG_VALUE) { 2592 if (vdev->config.l2_switch != active_config) { 2593 status = vxge_l2switch_mode_set(vdev); 2594 if (status == VXGE_HAL_OK) 2595 err = ENXIO; 2596 } 2597 } 2598 } 2599 2600 if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) { 2601 if (vxge_port_mode_update(vdev) == ENXIO) 2602 err = ENXIO; 2603 } 2604 2605_exit0: 2606 if (err == ENXIO) 2607 device_printf(vdev->ndev, "PLEASE POWER CYCLE THE SYSTEM\n"); 2608 2609 return (err); 2610} 2611 2612vxge_hal_status_e 2613vxge_firmware_upgrade(vxge_dev_t *vdev) 2614{ 2615 u8 *fw_buffer; 2616 u32 fw_size; 2617 vxge_hal_device_hw_info_t *hw_info; 2618 vxge_hal_status_e status = VXGE_HAL_OK; 2619 2620 hw_info = &vdev->config.hw_info; 2621 2622 fw_size = sizeof(VXGE_FW_ARRAY_NAME); 2623 fw_buffer = (u8 *) VXGE_FW_ARRAY_NAME; 2624 2625 device_printf(vdev->ndev, "Current firmware version : %s (%s)\n", 2626 hw_info->fw_version.version, hw_info->fw_date.date); 2627 2628 device_printf(vdev->ndev, "Upgrading firmware to %d.%d.%d\n", 2629 VXGE_MIN_FW_MAJOR_VERSION, VXGE_MIN_FW_MINOR_VERSION, 2630 VXGE_MIN_FW_BUILD_NUMBER); 2631 2632 /* Call HAL API to upgrade firmware */ 2633 status = vxge_hal_mrpcim_fw_upgrade(vdev->pdev, 2634 (pci_reg_h) vdev->pdev->reg_map[0], 2635 (u8 *) vdev->pdev->bar_info[0], 2636 fw_buffer, fw_size); 2637 2638 device_printf(vdev->ndev, "firmware upgrade %s\n", 2639 (status == VXGE_HAL_OK) ? "successful" : "failed"); 2640 2641 return (status); 2642} 2643 2644vxge_hal_status_e 2645vxge_func_mode_set(vxge_dev_t *vdev) 2646{ 2647 u64 active_config; 2648 vxge_hal_status_e status = VXGE_HAL_FAIL; 2649 2650 status = vxge_hal_mrpcim_pcie_func_mode_set(vdev->devh, 2651 vdev->config.function_mode); 2652 device_printf(vdev->ndev, 2653 "function mode change %s\n", 2654 (status == VXGE_HAL_OK) ? "successful" : "failed"); 2655 2656 if (status == VXGE_HAL_OK) { 2657 vxge_hal_set_fw_api(vdev->devh, 0ULL, 2658 VXGE_HAL_API_FUNC_MODE_COMMIT, 2659 0, 0ULL, 0ULL); 2660 2661 vxge_hal_get_active_config(vdev->devh, 2662 VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode, 2663 &active_config); 2664 2665 /* 2666 * If in MF + DP mode 2667 * if user changes to SF, change port_mode to single port mode 2668 */ 2669 if (((is_multi_func(vdev->config.hw_info.function_mode)) && 2670 is_single_func(vdev->config.function_mode)) && 2671 (active_config == VXGE_HAL_DP_NP_MODE_DUAL_PORT)) { 2672 vdev->config.port_mode = 2673 VXGE_HAL_DP_NP_MODE_SINGLE_PORT; 2674 2675 status = vxge_port_mode_set(vdev); 2676 } 2677 } 2678 return (status); 2679} 2680 2681vxge_hal_status_e 2682vxge_port_mode_set(vxge_dev_t *vdev) 2683{ 2684 vxge_hal_status_e status = VXGE_HAL_FAIL; 2685 2686 status = vxge_hal_set_port_mode(vdev->devh, vdev->config.port_mode); 2687 device_printf(vdev->ndev, 2688 "port mode change %s\n", 2689 (status == VXGE_HAL_OK) ? "successful" : "failed"); 2690 2691 if (status == VXGE_HAL_OK) { 2692 vxge_hal_set_fw_api(vdev->devh, 0ULL, 2693 VXGE_HAL_API_FUNC_MODE_COMMIT, 2694 0, 0ULL, 0ULL); 2695 2696 /* Configure vpath_mapping for active-active mode only */ 2697 if (vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) { 2698 2699 status = vxge_hal_config_vpath_map(vdev->devh, 2700 VXGE_DUAL_PORT_MAP); 2701 2702 device_printf(vdev->ndev, "dual port map change %s\n", 2703 (status == VXGE_HAL_OK) ? "successful" : "failed"); 2704 } 2705 } 2706 return (status); 2707} 2708 2709int 2710vxge_port_mode_update(vxge_dev_t *vdev) 2711{ 2712 int err = 0; 2713 u64 active_config; 2714 vxge_hal_status_e status = VXGE_HAL_FAIL; 2715 2716 if ((vdev->config.port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) && 2717 is_single_func(vdev->config.hw_info.function_mode)) { 2718 2719 device_printf(vdev->ndev, 2720 "Adapter in SF mode, dual port mode is not allowed\n"); 2721 err = EPERM; 2722 goto _exit0; 2723 } 2724 2725 active_config = VXGE_DEFAULT_CONFIG_VALUE; 2726 status = vxge_hal_get_active_config(vdev->devh, 2727 VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode, 2728 &active_config); 2729 if (status != VXGE_HAL_OK) { 2730 err = EINVAL; 2731 goto _exit0; 2732 } 2733 2734 vdev->port_mode = active_config; 2735 if (vdev->config.port_mode != VXGE_DEFAULT_CONFIG_VALUE) { 2736 if (vdev->config.port_mode != vdev->port_mode) { 2737 status = vxge_port_mode_set(vdev); 2738 if (status != VXGE_HAL_OK) { 2739 err = EINVAL; 2740 goto _exit0; 2741 } 2742 err = ENXIO; 2743 vdev->port_mode = vdev->config.port_mode; 2744 } 2745 } 2746 2747 active_config = VXGE_DEFAULT_CONFIG_VALUE; 2748 status = vxge_hal_get_active_config(vdev->devh, 2749 VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail, 2750 &active_config); 2751 if (status != VXGE_HAL_OK) { 2752 err = EINVAL; 2753 goto _exit0; 2754 } 2755 2756 vdev->port_failure = active_config; 2757 2758 /* 2759 * active/active mode : set to NoMove 2760 * active/passive mode: set to Failover-Failback 2761 */ 2762 if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_DUAL_PORT) 2763 vdev->config.port_failure = 2764 VXGE_HAL_XMAC_NWIF_OnFailure_NoMove; 2765 2766 else if (vdev->port_mode == VXGE_HAL_DP_NP_MODE_ACTIVE_PASSIVE) 2767 vdev->config.port_failure = 2768 VXGE_HAL_XMAC_NWIF_OnFailure_OtherPortBackOnRestore; 2769 2770 if ((vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) && 2771 (vdev->config.port_failure != vdev->port_failure)) { 2772 status = vxge_port_behavior_on_failure_set(vdev); 2773 if (status == VXGE_HAL_OK) 2774 err = ENXIO; 2775 } 2776 2777_exit0: 2778 return (err); 2779} 2780 2781vxge_hal_status_e 2782vxge_port_mode_get(vxge_dev_t *vdev, vxge_port_info_t *port_info) 2783{ 2784 int err = 0; 2785 u64 active_config; 2786 vxge_hal_status_e status = VXGE_HAL_FAIL; 2787 2788 active_config = VXGE_DEFAULT_CONFIG_VALUE; 2789 status = vxge_hal_get_active_config(vdev->devh, 2790 VXGE_HAL_XMAC_NWIF_ActConfig_NWPortMode, 2791 &active_config); 2792 2793 if (status != VXGE_HAL_OK) { 2794 err = ENXIO; 2795 goto _exit0; 2796 } 2797 2798 port_info->port_mode = active_config; 2799 2800 active_config = VXGE_DEFAULT_CONFIG_VALUE; 2801 status = vxge_hal_get_active_config(vdev->devh, 2802 VXGE_HAL_XMAC_NWIF_ActConfig_BehaviourOnFail, 2803 &active_config); 2804 if (status != VXGE_HAL_OK) { 2805 err = ENXIO; 2806 goto _exit0; 2807 } 2808 2809 port_info->port_failure = active_config; 2810 2811_exit0: 2812 return (err); 2813} 2814 2815vxge_hal_status_e 2816vxge_port_behavior_on_failure_set(vxge_dev_t *vdev) 2817{ 2818 vxge_hal_status_e status = VXGE_HAL_FAIL; 2819 2820 status = vxge_hal_set_behavior_on_failure(vdev->devh, 2821 vdev->config.port_failure); 2822 2823 device_printf(vdev->ndev, 2824 "port behaviour on failure change %s\n", 2825 (status == VXGE_HAL_OK) ? "successful" : "failed"); 2826 2827 if (status == VXGE_HAL_OK) 2828 vxge_hal_set_fw_api(vdev->devh, 0ULL, 2829 VXGE_HAL_API_FUNC_MODE_COMMIT, 2830 0, 0ULL, 0ULL); 2831 2832 return (status); 2833} 2834 2835void 2836vxge_active_port_update(vxge_dev_t *vdev) 2837{ 2838 u64 active_config; 2839 vxge_hal_status_e status = VXGE_HAL_FAIL; 2840 2841 active_config = VXGE_DEFAULT_CONFIG_VALUE; 2842 status = vxge_hal_get_active_config(vdev->devh, 2843 VXGE_HAL_XMAC_NWIF_ActConfig_ActivePort, 2844 &active_config); 2845 2846 if (status == VXGE_HAL_OK) 2847 vdev->active_port = active_config; 2848} 2849 2850vxge_hal_status_e 2851vxge_l2switch_mode_set(vxge_dev_t *vdev) 2852{ 2853 vxge_hal_status_e status = VXGE_HAL_FAIL; 2854 2855 status = vxge_hal_set_l2switch_mode(vdev->devh, 2856 vdev->config.l2_switch); 2857 2858 device_printf(vdev->ndev, "L2 switch %s\n", 2859 (status == VXGE_HAL_OK) ? 2860 (vdev->config.l2_switch) ? "enable" : "disable" : 2861 "change failed"); 2862 2863 if (status == VXGE_HAL_OK) 2864 vxge_hal_set_fw_api(vdev->devh, 0ULL, 2865 VXGE_HAL_API_FUNC_MODE_COMMIT, 2866 0, 0ULL, 0ULL); 2867 2868 return (status); 2869} 2870 2871/* 2872 * vxge_promisc_set 2873 * Enable Promiscuous Mode 2874 */ 2875void 2876vxge_promisc_set(vxge_dev_t *vdev) 2877{ 2878 int i; 2879 ifnet_t ifp; 2880 vxge_hal_vpath_h vpath_handle; 2881 2882 if (!vdev->is_initialized) 2883 return; 2884 2885 ifp = vdev->ifp; 2886 2887 for (i = 0; i < vdev->no_of_vpath; i++) { 2888 vpath_handle = vxge_vpath_handle_get(vdev, i); 2889 if (!vpath_handle) 2890 continue; 2891 2892 if (ifp->if_flags & IFF_PROMISC) 2893 vxge_hal_vpath_promisc_enable(vpath_handle); 2894 else 2895 vxge_hal_vpath_promisc_disable(vpath_handle); 2896 } 2897} 2898 2899/* 2900 * vxge_change_mtu 2901 * Change interface MTU to a requested valid size 2902 */ 2903int 2904vxge_change_mtu(vxge_dev_t *vdev, unsigned long new_mtu) 2905{ 2906 int err = EINVAL; 2907 2908 if ((new_mtu < VXGE_HAL_MIN_MTU) || (new_mtu > VXGE_HAL_MAX_MTU)) 2909 goto _exit0; 2910 2911 (vdev->ifp)->if_mtu = new_mtu; 2912 device_printf(vdev->ndev, "MTU changed to %ld\n", (vdev->ifp)->if_mtu); 2913 2914 if (vdev->is_initialized) { 2915 if_down(vdev->ifp); 2916 vxge_reset(vdev); 2917 if_up(vdev->ifp); 2918 } 2919 err = 0; 2920 2921_exit0: 2922 return (err); 2923} 2924 2925/* 2926 * Creates DMA tags for both Tx and Rx 2927 */ 2928int 2929vxge_dma_tags_create(vxge_vpath_t *vpath) 2930{ 2931 int err = 0; 2932 bus_size_t max_size, boundary; 2933 vxge_dev_t *vdev = vpath->vdev; 2934 ifnet_t ifp = vdev->ifp; 2935 2936 max_size = ifp->if_mtu + 2937 VXGE_HAL_MAC_HEADER_MAX_SIZE + 2938 VXGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN; 2939 2940 VXGE_BUFFER_ALIGN(max_size, 128) 2941 if (max_size <= MCLBYTES) 2942 vdev->rx_mbuf_sz = MCLBYTES; 2943 else 2944 vdev->rx_mbuf_sz = 2945 (max_size > MJUMPAGESIZE) ? MJUM9BYTES : MJUMPAGESIZE; 2946 2947 boundary = (max_size > PAGE_SIZE) ? 0 : PAGE_SIZE; 2948 2949 /* DMA tag for Tx */ 2950 err = bus_dma_tag_create( 2951 bus_get_dma_tag(vdev->ndev), 2952 1, 2953 PAGE_SIZE, 2954 BUS_SPACE_MAXADDR, 2955 BUS_SPACE_MAXADDR, 2956 NULL, 2957 NULL, 2958 VXGE_TSO_SIZE, 2959 VXGE_MAX_SEGS, 2960 PAGE_SIZE, 2961 BUS_DMA_ALLOCNOW, 2962 NULL, 2963 NULL, 2964 &(vpath->dma_tag_tx)); 2965 if (err != 0) 2966 goto _exit0; 2967 2968 /* DMA tag for Rx */ 2969 err = bus_dma_tag_create( 2970 bus_get_dma_tag(vdev->ndev), 2971 1, 2972 boundary, 2973 BUS_SPACE_MAXADDR, 2974 BUS_SPACE_MAXADDR, 2975 NULL, 2976 NULL, 2977 vdev->rx_mbuf_sz, 2978 1, 2979 vdev->rx_mbuf_sz, 2980 BUS_DMA_ALLOCNOW, 2981 NULL, 2982 NULL, 2983 &(vpath->dma_tag_rx)); 2984 if (err != 0) 2985 goto _exit1; 2986 2987 /* Create DMA map for this descriptor */ 2988 err = bus_dmamap_create(vpath->dma_tag_rx, BUS_DMA_NOWAIT, 2989 &vpath->extra_dma_map); 2990 if (err == 0) 2991 goto _exit0; 2992 2993 bus_dma_tag_destroy(vpath->dma_tag_rx); 2994 2995_exit1: 2996 bus_dma_tag_destroy(vpath->dma_tag_tx); 2997 2998_exit0: 2999 return (err); 3000} 3001 3002static inline int 3003vxge_dma_mbuf_coalesce(bus_dma_tag_t dma_tag_tx, bus_dmamap_t dma_map, 3004 mbuf_t * m_headp, bus_dma_segment_t * dma_buffers, 3005 int *num_segs) 3006{ 3007 int err = 0; 3008 mbuf_t mbuf_pkt = NULL; 3009 3010retry: 3011 err = bus_dmamap_load_mbuf_sg(dma_tag_tx, dma_map, *m_headp, 3012 dma_buffers, num_segs, BUS_DMA_NOWAIT); 3013 if (err == EFBIG) { 3014 /* try to defrag, too many segments */ 3015 mbuf_pkt = m_defrag(*m_headp, M_NOWAIT); 3016 if (mbuf_pkt == NULL) { 3017 err = ENOBUFS; 3018 goto _exit0; 3019 } 3020 *m_headp = mbuf_pkt; 3021 goto retry; 3022 } 3023 3024_exit0: 3025 return (err); 3026} 3027 3028int 3029vxge_device_hw_info_get(vxge_dev_t *vdev) 3030{ 3031 int i, err = ENXIO; 3032 u64 vpath_mask = 0; 3033 u32 max_supported_vpath = 0; 3034 u32 fw_ver_maj_min; 3035 vxge_firmware_upgrade_e fw_option; 3036 3037 vxge_hal_status_e status = VXGE_HAL_OK; 3038 vxge_hal_device_hw_info_t *hw_info; 3039 3040 status = vxge_hal_device_hw_info_get(vdev->pdev, 3041 (pci_reg_h) vdev->pdev->reg_map[0], 3042 (u8 *) vdev->pdev->bar_info[0], 3043 &vdev->config.hw_info); 3044 3045 if (status != VXGE_HAL_OK) 3046 goto _exit0; 3047 3048 hw_info = &vdev->config.hw_info; 3049 3050 vpath_mask = hw_info->vpath_mask; 3051 if (vpath_mask == 0) { 3052 device_printf(vdev->ndev, "No vpaths available in device\n"); 3053 goto _exit0; 3054 } 3055 3056 fw_option = vdev->config.fw_option; 3057 3058 /* Check how many vpaths are available */ 3059 for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { 3060 if (!((vpath_mask) & mBIT(i))) 3061 continue; 3062 max_supported_vpath++; 3063 } 3064 3065 vdev->max_supported_vpath = max_supported_vpath; 3066 status = vxge_hal_device_is_privileged(hw_info->host_type, 3067 hw_info->func_id); 3068 vdev->is_privilaged = (status == VXGE_HAL_OK) ? TRUE : FALSE; 3069 3070 vdev->hw_fw_version = VXGE_FW_VERSION( 3071 hw_info->fw_version.major, 3072 hw_info->fw_version.minor, 3073 hw_info->fw_version.build); 3074 3075 fw_ver_maj_min = 3076 VXGE_FW_MAJ_MIN_VERSION(hw_info->fw_version.major, 3077 hw_info->fw_version.minor); 3078 3079 if ((fw_option >= VXGE_FW_UPGRADE_FORCE) || 3080 (vdev->hw_fw_version != VXGE_DRV_FW_VERSION)) { 3081 3082 /* For fw_ver 1.8.1 and above ignore build number. */ 3083 if ((fw_option == VXGE_FW_UPGRADE_ALL) && 3084 ((vdev->hw_fw_version >= VXGE_FW_VERSION(1, 8, 1)) && 3085 (fw_ver_maj_min == VXGE_DRV_FW_MAJ_MIN_VERSION))) { 3086 goto _exit1; 3087 } 3088 3089 if (vdev->hw_fw_version < VXGE_BASE_FW_VERSION) { 3090 device_printf(vdev->ndev, 3091 "Upgrade driver through vxge_update, " 3092 "Unable to load the driver.\n"); 3093 goto _exit0; 3094 } 3095 vdev->fw_upgrade = TRUE; 3096 } 3097 3098_exit1: 3099 err = 0; 3100 3101_exit0: 3102 return (err); 3103} 3104 3105/* 3106 * vxge_device_hw_info_print 3107 * Print device and driver information 3108 */ 3109void 3110vxge_device_hw_info_print(vxge_dev_t *vdev) 3111{ 3112 u32 i; 3113 device_t ndev; 3114 struct sysctl_ctx_list *ctx; 3115 struct sysctl_oid_list *children; 3116 char pmd_type[2][VXGE_PMD_INFO_LEN]; 3117 3118 vxge_hal_device_t *hldev; 3119 vxge_hal_device_hw_info_t *hw_info; 3120 vxge_hal_device_pmd_info_t *pmd_port; 3121 3122 hldev = vdev->devh; 3123 ndev = vdev->ndev; 3124 3125 ctx = device_get_sysctl_ctx(ndev); 3126 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ndev)); 3127 3128 hw_info = &(vdev->config.hw_info); 3129 3130 snprintf(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION], 3131 sizeof(vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]), 3132 "%d.%d.%d.%d", XGELL_VERSION_MAJOR, XGELL_VERSION_MINOR, 3133 XGELL_VERSION_FIX, XGELL_VERSION_BUILD); 3134 3135 /* Print PCI-e bus type/speed/width info */ 3136 snprintf(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO], 3137 sizeof(vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]), 3138 "x%d", hldev->link_width); 3139 3140 if (hldev->link_width <= VXGE_HAL_PCI_E_LINK_WIDTH_X4) 3141 device_printf(ndev, "For optimal performance a x8 " 3142 "PCI-Express slot is required.\n"); 3143 3144 vxge_null_terminate((char *) hw_info->serial_number, 3145 sizeof(hw_info->serial_number)); 3146 3147 vxge_null_terminate((char *) hw_info->part_number, 3148 sizeof(hw_info->part_number)); 3149 3150 snprintf(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO], 3151 sizeof(vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]), 3152 "%s", hw_info->serial_number); 3153 3154 snprintf(vdev->config.nic_attr[VXGE_PRINT_PART_NO], 3155 sizeof(vdev->config.nic_attr[VXGE_PRINT_PART_NO]), 3156 "%s", hw_info->part_number); 3157 3158 snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION], 3159 sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]), 3160 "%s", hw_info->fw_version.version); 3161 3162 snprintf(vdev->config.nic_attr[VXGE_PRINT_FW_DATE], 3163 sizeof(vdev->config.nic_attr[VXGE_PRINT_FW_DATE]), 3164 "%s", hw_info->fw_date.date); 3165 3166 pmd_port = &(hw_info->pmd_port0); 3167 for (i = 0; i < hw_info->ports; i++) { 3168 3169 vxge_pmd_port_type_get(vdev, pmd_port->type, 3170 pmd_type[i], sizeof(pmd_type[i])); 3171 3172 strncpy(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i], 3173 "vendor=??, sn=??, pn=??, type=??", 3174 sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i])); 3175 3176 vxge_null_terminate(pmd_port->vendor, sizeof(pmd_port->vendor)); 3177 if (strlen(pmd_port->vendor) == 0) { 3178 pmd_port = &(hw_info->pmd_port1); 3179 continue; 3180 } 3181 3182 vxge_null_terminate(pmd_port->ser_num, 3183 sizeof(pmd_port->ser_num)); 3184 3185 vxge_null_terminate(pmd_port->part_num, 3186 sizeof(pmd_port->part_num)); 3187 3188 snprintf(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i], 3189 sizeof(vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0 + i]), 3190 "vendor=%s, sn=%s, pn=%s, type=%s", 3191 pmd_port->vendor, pmd_port->ser_num, 3192 pmd_port->part_num, pmd_type[i]); 3193 3194 pmd_port = &(hw_info->pmd_port1); 3195 } 3196 3197 switch (hw_info->function_mode) { 3198 case VXGE_HAL_PCIE_FUNC_MODE_SF1_VP17: 3199 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], 3200 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), 3201 "%s %d %s", "Single Function - 1 function(s)", 3202 vdev->max_supported_vpath, "VPath(s)/function"); 3203 break; 3204 3205 case VXGE_HAL_PCIE_FUNC_MODE_MF2_VP8: 3206 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], 3207 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), 3208 "%s %d %s", "Multi Function - 2 function(s)", 3209 vdev->max_supported_vpath, "VPath(s)/function"); 3210 break; 3211 3212 case VXGE_HAL_PCIE_FUNC_MODE_MF4_VP4: 3213 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], 3214 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), 3215 "%s %d %s", "Multi Function - 4 function(s)", 3216 vdev->max_supported_vpath, "VPath(s)/function"); 3217 break; 3218 3219 case VXGE_HAL_PCIE_FUNC_MODE_MF8_VP2: 3220 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], 3221 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), 3222 "%s %d %s", "Multi Function - 8 function(s)", 3223 vdev->max_supported_vpath, "VPath(s)/function"); 3224 break; 3225 3226 case VXGE_HAL_PCIE_FUNC_MODE_MF8P_VP2: 3227 snprintf(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], 3228 sizeof(vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]), 3229 "%s %d %s", "Multi Function (DirectIO) - 8 function(s)", 3230 vdev->max_supported_vpath, "VPath(s)/function"); 3231 break; 3232 } 3233 3234 snprintf(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE], 3235 sizeof(vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]), 3236 "%s", ((vdev->config.intr_mode == VXGE_HAL_INTR_MODE_MSIX) ? 3237 "MSI-X" : "INTA")); 3238 3239 snprintf(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT], 3240 sizeof(vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]), 3241 "%d", vdev->no_of_vpath); 3242 3243 snprintf(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE], 3244 sizeof(vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]), 3245 "%lu", vdev->ifp->if_mtu); 3246 3247 snprintf(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE], 3248 sizeof(vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]), 3249 "%s", ((vdev->config.lro_enable) ? "Enabled" : "Disabled")); 3250 3251 snprintf(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE], 3252 sizeof(vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]), 3253 "%s", ((vdev->config.rth_enable) ? "Enabled" : "Disabled")); 3254 3255 snprintf(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE], 3256 sizeof(vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]), 3257 "%s", ((vdev->ifp->if_capenable & IFCAP_TSO4) ? 3258 "Enabled" : "Disabled")); 3259 3260 snprintf(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE], 3261 sizeof(vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]), 3262 "%s", ((hw_info->ports == 1) ? "Single Port" : "Dual Port")); 3263 3264 if (vdev->is_privilaged) { 3265 3266 if (hw_info->ports > 1) { 3267 3268 snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE], 3269 sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]), 3270 "%s", vxge_port_mode[vdev->port_mode]); 3271 3272 if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) 3273 snprintf(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE], 3274 sizeof(vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]), 3275 "%s", vxge_port_failure[vdev->port_failure]); 3276 3277 vxge_active_port_update(vdev); 3278 snprintf(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT], 3279 sizeof(vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]), 3280 "%lld", vdev->active_port); 3281 } 3282 3283 if (!is_single_func(hw_info->function_mode)) { 3284 snprintf(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE], 3285 sizeof(vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]), 3286 "%s", ((vdev->l2_switch) ? "Enabled" : "Disabled")); 3287 } 3288 } 3289 3290 device_printf(ndev, "Driver version\t: %s\n", 3291 vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION]); 3292 3293 device_printf(ndev, "Serial number\t: %s\n", 3294 vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO]); 3295 3296 device_printf(ndev, "Part number\t: %s\n", 3297 vdev->config.nic_attr[VXGE_PRINT_PART_NO]); 3298 3299 device_printf(ndev, "Firmware version\t: %s\n", 3300 vdev->config.nic_attr[VXGE_PRINT_FW_VERSION]); 3301 3302 device_printf(ndev, "Firmware date\t: %s\n", 3303 vdev->config.nic_attr[VXGE_PRINT_FW_DATE]); 3304 3305 device_printf(ndev, "Link width\t: %s\n", 3306 vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO]); 3307 3308 if (vdev->is_privilaged) { 3309 device_printf(ndev, "Function mode\t: %s\n", 3310 vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE]); 3311 } 3312 3313 device_printf(ndev, "Interrupt type\t: %s\n", 3314 vdev->config.nic_attr[VXGE_PRINT_INTR_MODE]); 3315 3316 device_printf(ndev, "VPath(s) opened\t: %s\n", 3317 vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT]); 3318 3319 device_printf(ndev, "Adapter Type\t: %s\n", 3320 vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE]); 3321 3322 device_printf(ndev, "PMD Port 0\t: %s\n", 3323 vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0]); 3324 3325 if (hw_info->ports > 1) { 3326 device_printf(ndev, "PMD Port 1\t: %s\n", 3327 vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1]); 3328 3329 if (vdev->is_privilaged) { 3330 device_printf(ndev, "Port Mode\t: %s\n", 3331 vdev->config.nic_attr[VXGE_PRINT_PORT_MODE]); 3332 3333 if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) 3334 device_printf(ndev, "Port Failure\t: %s\n", 3335 vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE]); 3336 3337 device_printf(vdev->ndev, "Active Port\t: %s\n", 3338 vdev->config.nic_attr[VXGE_PRINT_ACTIVE_PORT]); 3339 } 3340 } 3341 3342 if (vdev->is_privilaged && !is_single_func(hw_info->function_mode)) { 3343 device_printf(vdev->ndev, "L2 Switch\t: %s\n", 3344 vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE]); 3345 } 3346 3347 device_printf(ndev, "MTU is %s\n", 3348 vdev->config.nic_attr[VXGE_PRINT_MTU_SIZE]); 3349 3350 device_printf(ndev, "LRO %s\n", 3351 vdev->config.nic_attr[VXGE_PRINT_LRO_MODE]); 3352 3353 device_printf(ndev, "RTH %s\n", 3354 vdev->config.nic_attr[VXGE_PRINT_RTH_MODE]); 3355 3356 device_printf(ndev, "TSO %s\n", 3357 vdev->config.nic_attr[VXGE_PRINT_TSO_MODE]); 3358 3359 SYSCTL_ADD_STRING(ctx, children, 3360 OID_AUTO, "Driver version", CTLFLAG_RD, 3361 &vdev->config.nic_attr[VXGE_PRINT_DRV_VERSION], 3362 0, "Driver version"); 3363 3364 SYSCTL_ADD_STRING(ctx, children, 3365 OID_AUTO, "Serial number", CTLFLAG_RD, 3366 &vdev->config.nic_attr[VXGE_PRINT_SERIAL_NO], 3367 0, "Serial number"); 3368 3369 SYSCTL_ADD_STRING(ctx, children, 3370 OID_AUTO, "Part number", CTLFLAG_RD, 3371 &vdev->config.nic_attr[VXGE_PRINT_PART_NO], 3372 0, "Part number"); 3373 3374 SYSCTL_ADD_STRING(ctx, children, 3375 OID_AUTO, "Firmware version", CTLFLAG_RD, 3376 &vdev->config.nic_attr[VXGE_PRINT_FW_VERSION], 3377 0, "Firmware version"); 3378 3379 SYSCTL_ADD_STRING(ctx, children, 3380 OID_AUTO, "Firmware date", CTLFLAG_RD, 3381 &vdev->config.nic_attr[VXGE_PRINT_FW_DATE], 3382 0, "Firmware date"); 3383 3384 SYSCTL_ADD_STRING(ctx, children, 3385 OID_AUTO, "Link width", CTLFLAG_RD, 3386 &vdev->config.nic_attr[VXGE_PRINT_PCIE_INFO], 3387 0, "Link width"); 3388 3389 if (vdev->is_privilaged) { 3390 SYSCTL_ADD_STRING(ctx, children, 3391 OID_AUTO, "Function mode", CTLFLAG_RD, 3392 &vdev->config.nic_attr[VXGE_PRINT_FUNC_MODE], 3393 0, "Function mode"); 3394 } 3395 3396 SYSCTL_ADD_STRING(ctx, children, 3397 OID_AUTO, "Interrupt type", CTLFLAG_RD, 3398 &vdev->config.nic_attr[VXGE_PRINT_INTR_MODE], 3399 0, "Interrupt type"); 3400 3401 SYSCTL_ADD_STRING(ctx, children, 3402 OID_AUTO, "VPath(s) opened", CTLFLAG_RD, 3403 &vdev->config.nic_attr[VXGE_PRINT_VPATH_COUNT], 3404 0, "VPath(s) opened"); 3405 3406 SYSCTL_ADD_STRING(ctx, children, 3407 OID_AUTO, "Adapter Type", CTLFLAG_RD, 3408 &vdev->config.nic_attr[VXGE_PRINT_ADAPTER_TYPE], 3409 0, "Adapter Type"); 3410 3411 SYSCTL_ADD_STRING(ctx, children, 3412 OID_AUTO, "pmd port 0", CTLFLAG_RD, 3413 &vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_0], 3414 0, "pmd port"); 3415 3416 if (hw_info->ports > 1) { 3417 3418 SYSCTL_ADD_STRING(ctx, children, 3419 OID_AUTO, "pmd port 1", CTLFLAG_RD, 3420 &vdev->config.nic_attr[VXGE_PRINT_PMD_PORTS_1], 3421 0, "pmd port"); 3422 3423 if (vdev->is_privilaged) { 3424 SYSCTL_ADD_STRING(ctx, children, 3425 OID_AUTO, "Port Mode", CTLFLAG_RD, 3426 &vdev->config.nic_attr[VXGE_PRINT_PORT_MODE], 3427 0, "Port Mode"); 3428 3429 if (vdev->port_mode != VXGE_HAL_DP_NP_MODE_SINGLE_PORT) 3430 SYSCTL_ADD_STRING(ctx, children, 3431 OID_AUTO, "Port Failure", CTLFLAG_RD, 3432 &vdev->config.nic_attr[VXGE_PRINT_PORT_FAILURE], 3433 0, "Port Failure"); 3434 3435 SYSCTL_ADD_STRING(ctx, children, 3436 OID_AUTO, "L2 Switch", CTLFLAG_RD, 3437 &vdev->config.nic_attr[VXGE_PRINT_L2SWITCH_MODE], 3438 0, "L2 Switch"); 3439 } 3440 } 3441 3442 SYSCTL_ADD_STRING(ctx, children, 3443 OID_AUTO, "LRO mode", CTLFLAG_RD, 3444 &vdev->config.nic_attr[VXGE_PRINT_LRO_MODE], 3445 0, "LRO mode"); 3446 3447 SYSCTL_ADD_STRING(ctx, children, 3448 OID_AUTO, "RTH mode", CTLFLAG_RD, 3449 &vdev->config.nic_attr[VXGE_PRINT_RTH_MODE], 3450 0, "RTH mode"); 3451 3452 SYSCTL_ADD_STRING(ctx, children, 3453 OID_AUTO, "TSO mode", CTLFLAG_RD, 3454 &vdev->config.nic_attr[VXGE_PRINT_TSO_MODE], 3455 0, "TSO mode"); 3456} 3457 3458void 3459vxge_pmd_port_type_get(vxge_dev_t *vdev, u32 port_type, 3460 char *ifm_name, u8 ifm_len) 3461{ 3462 3463 vdev->ifm_optics = IFM_UNKNOWN; 3464 3465 switch (port_type) { 3466 case VXGE_HAL_DEVICE_PMD_TYPE_10G_SR: 3467 vdev->ifm_optics = IFM_10G_SR; 3468 strlcpy(ifm_name, "10GbE SR", ifm_len); 3469 break; 3470 3471 case VXGE_HAL_DEVICE_PMD_TYPE_10G_LR: 3472 vdev->ifm_optics = IFM_10G_LR; 3473 strlcpy(ifm_name, "10GbE LR", ifm_len); 3474 break; 3475 3476 case VXGE_HAL_DEVICE_PMD_TYPE_10G_LRM: 3477 vdev->ifm_optics = IFM_10G_LRM; 3478 strlcpy(ifm_name, "10GbE LRM", ifm_len); 3479 break; 3480 3481 case VXGE_HAL_DEVICE_PMD_TYPE_10G_DIRECT: 3482 vdev->ifm_optics = IFM_10G_TWINAX; 3483 strlcpy(ifm_name, "10GbE DA (Direct Attached)", ifm_len); 3484 break; 3485 3486 case VXGE_HAL_DEVICE_PMD_TYPE_10G_CX4: 3487 vdev->ifm_optics = IFM_10G_CX4; 3488 strlcpy(ifm_name, "10GbE CX4", ifm_len); 3489 break; 3490 3491 case VXGE_HAL_DEVICE_PMD_TYPE_10G_BASE_T: 3492#if __FreeBSD_version >= 800000 3493 vdev->ifm_optics = IFM_10G_T; 3494#endif 3495 strlcpy(ifm_name, "10GbE baseT", ifm_len); 3496 break; 3497 3498 case VXGE_HAL_DEVICE_PMD_TYPE_10G_OTHER: 3499 strlcpy(ifm_name, "10GbE Other", ifm_len); 3500 break; 3501 3502 case VXGE_HAL_DEVICE_PMD_TYPE_1G_SX: 3503 vdev->ifm_optics = IFM_1000_SX; 3504 strlcpy(ifm_name, "1GbE SX", ifm_len); 3505 break; 3506 3507 case VXGE_HAL_DEVICE_PMD_TYPE_1G_LX: 3508 vdev->ifm_optics = IFM_1000_LX; 3509 strlcpy(ifm_name, "1GbE LX", ifm_len); 3510 break; 3511 3512 case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX: 3513 vdev->ifm_optics = IFM_1000_CX; 3514 strlcpy(ifm_name, "1GbE CX", ifm_len); 3515 break; 3516 3517 case VXGE_HAL_DEVICE_PMD_TYPE_1G_BASE_T: 3518 vdev->ifm_optics = IFM_1000_T; 3519 strlcpy(ifm_name, "1GbE baseT", ifm_len); 3520 break; 3521 3522 case VXGE_HAL_DEVICE_PMD_TYPE_1G_DIRECT: 3523 strlcpy(ifm_name, "1GbE DA (Direct Attached)", 3524 ifm_len); 3525 break; 3526 3527 case VXGE_HAL_DEVICE_PMD_TYPE_1G_CX4: 3528 strlcpy(ifm_name, "1GbE CX4", ifm_len); 3529 break; 3530 3531 case VXGE_HAL_DEVICE_PMD_TYPE_1G_OTHER: 3532 strlcpy(ifm_name, "1GbE Other", ifm_len); 3533 break; 3534 3535 default: 3536 case VXGE_HAL_DEVICE_PMD_TYPE_UNKNOWN: 3537 strlcpy(ifm_name, "UNSUP", ifm_len); 3538 break; 3539 } 3540} 3541 3542u32 3543vxge_ring_length_get(u32 buffer_mode) 3544{ 3545 return (VXGE_DEFAULT_RING_BLOCK * 3546 vxge_hal_ring_rxds_per_block_get(buffer_mode)); 3547} 3548 3549/* 3550 * Removes trailing spaces padded 3551 * and NULL terminates strings 3552 */ 3553static inline void 3554vxge_null_terminate(char *str, size_t len) 3555{ 3556 len--; 3557 while (*str && (*str != ' ') && (len != 0)) 3558 ++str; 3559 3560 --len; 3561 if (*str) 3562 *str = '\0'; 3563} 3564 3565/* 3566 * vxge_ioctl 3567 * Callback to control the device 3568 */ 3569int 3570vxge_ioctl(ifnet_t ifp, u_long command, caddr_t data) 3571{ 3572 int mask, err = 0; 3573 vxge_dev_t *vdev = (vxge_dev_t *) ifp->if_softc; 3574 struct ifreq *ifr = (struct ifreq *) data; 3575 3576 if (!vdev->is_active) 3577 return (EBUSY); 3578 3579 switch (command) { 3580 /* Set/Get ifnet address */ 3581 case SIOCSIFADDR: 3582 case SIOCGIFADDR: 3583 ether_ioctl(ifp, command, data); 3584 break; 3585 3586 /* Set Interface MTU */ 3587 case SIOCSIFMTU: 3588 err = vxge_change_mtu(vdev, (unsigned long)ifr->ifr_mtu); 3589 break; 3590 3591 /* Set Interface Flags */ 3592 case SIOCSIFFLAGS: 3593 VXGE_DRV_LOCK(vdev); 3594 if (ifp->if_flags & IFF_UP) { 3595 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 3596 if ((ifp->if_flags ^ vdev->if_flags) & 3597 (IFF_PROMISC | IFF_ALLMULTI)) 3598 vxge_promisc_set(vdev); 3599 } else { 3600 vxge_init_locked(vdev); 3601 } 3602 } else { 3603 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 3604 vxge_stop_locked(vdev); 3605 } 3606 vdev->if_flags = ifp->if_flags; 3607 VXGE_DRV_UNLOCK(vdev); 3608 break; 3609 3610 /* Add/delete multicast address */ 3611 case SIOCADDMULTI: 3612 case SIOCDELMULTI: 3613 break; 3614 3615 /* Get/Set Interface Media */ 3616 case SIOCSIFMEDIA: 3617 case SIOCGIFMEDIA: 3618 err = ifmedia_ioctl(ifp, ifr, &vdev->media, command); 3619 break; 3620 3621 /* Set Capabilities */ 3622 case SIOCSIFCAP: 3623 VXGE_DRV_LOCK(vdev); 3624 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3625 3626 if (mask & IFCAP_TXCSUM) { 3627 ifp->if_capenable ^= IFCAP_TXCSUM; 3628 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 3629 3630 if ((ifp->if_capenable & IFCAP_TSO) && 3631 !(ifp->if_capenable & IFCAP_TXCSUM)) { 3632 3633 ifp->if_capenable &= ~IFCAP_TSO; 3634 ifp->if_hwassist &= ~CSUM_TSO; 3635 if_printf(ifp, "TSO Disabled\n"); 3636 } 3637 } 3638 if (mask & IFCAP_RXCSUM) 3639 ifp->if_capenable ^= IFCAP_RXCSUM; 3640 3641 if (mask & IFCAP_TSO4) { 3642 ifp->if_capenable ^= IFCAP_TSO4; 3643 3644 if (ifp->if_capenable & IFCAP_TSO) { 3645 if (ifp->if_capenable & IFCAP_TXCSUM) { 3646 ifp->if_hwassist |= CSUM_TSO; 3647 if_printf(ifp, "TSO Enabled\n"); 3648 } else { 3649 ifp->if_capenable &= ~IFCAP_TSO; 3650 ifp->if_hwassist &= ~CSUM_TSO; 3651 if_printf(ifp, 3652 "Enable tx checksum offload \ 3653 first.\n"); 3654 err = EAGAIN; 3655 } 3656 } else { 3657 ifp->if_hwassist &= ~CSUM_TSO; 3658 if_printf(ifp, "TSO Disabled\n"); 3659 } 3660 } 3661 if (mask & IFCAP_LRO) 3662 ifp->if_capenable ^= IFCAP_LRO; 3663 3664 if (mask & IFCAP_VLAN_HWTAGGING) 3665 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3666 3667 if (mask & IFCAP_VLAN_MTU) 3668 ifp->if_capenable ^= IFCAP_VLAN_MTU; 3669 3670 if (mask & IFCAP_VLAN_HWCSUM) 3671 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 3672 3673#if __FreeBSD_version >= 800000 3674 if (mask & IFCAP_VLAN_HWTSO) 3675 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3676#endif 3677 3678#if defined(VLAN_CAPABILITIES) 3679 VLAN_CAPABILITIES(ifp); 3680#endif 3681 3682 VXGE_DRV_UNLOCK(vdev); 3683 break; 3684 3685 case SIOCGPRIVATE_0: 3686 VXGE_DRV_LOCK(vdev); 3687 err = vxge_ioctl_stats(vdev, ifr); 3688 VXGE_DRV_UNLOCK(vdev); 3689 break; 3690 3691 case SIOCGPRIVATE_1: 3692 VXGE_DRV_LOCK(vdev); 3693 err = vxge_ioctl_regs(vdev, ifr); 3694 VXGE_DRV_UNLOCK(vdev); 3695 break; 3696 3697 default: 3698 err = ether_ioctl(ifp, command, data); 3699 break; 3700 } 3701 3702 return (err); 3703} 3704 3705/* 3706 * vxge_ioctl_regs 3707 * IOCTL to get registers 3708 */ 3709int 3710vxge_ioctl_regs(vxge_dev_t *vdev, struct ifreq *ifr) 3711{ 3712 u64 value = 0x0; 3713 u32 vp_id = 0; 3714 u32 offset, reqd_size = 0; 3715 int i, err = EINVAL; 3716 3717 char *command = (char *) ifr->ifr_data; 3718 void *reg_info = (void *) ifr->ifr_data; 3719 3720 vxge_vpath_t *vpath; 3721 vxge_hal_status_e status = VXGE_HAL_OK; 3722 vxge_hal_mgmt_reg_type_e regs_type; 3723 3724 switch (*command) { 3725 case vxge_hal_mgmt_reg_type_pcicfgmgmt: 3726 if (vdev->is_privilaged) { 3727 reqd_size = sizeof(vxge_hal_pcicfgmgmt_reg_t); 3728 regs_type = vxge_hal_mgmt_reg_type_pcicfgmgmt; 3729 } 3730 break; 3731 3732 case vxge_hal_mgmt_reg_type_mrpcim: 3733 if (vdev->is_privilaged) { 3734 reqd_size = sizeof(vxge_hal_mrpcim_reg_t); 3735 regs_type = vxge_hal_mgmt_reg_type_mrpcim; 3736 } 3737 break; 3738 3739 case vxge_hal_mgmt_reg_type_srpcim: 3740 if (vdev->is_privilaged) { 3741 reqd_size = sizeof(vxge_hal_srpcim_reg_t); 3742 regs_type = vxge_hal_mgmt_reg_type_srpcim; 3743 } 3744 break; 3745 3746 case vxge_hal_mgmt_reg_type_memrepair: 3747 if (vdev->is_privilaged) { 3748 /* reqd_size = sizeof(vxge_hal_memrepair_reg_t); */ 3749 regs_type = vxge_hal_mgmt_reg_type_memrepair; 3750 } 3751 break; 3752 3753 case vxge_hal_mgmt_reg_type_legacy: 3754 reqd_size = sizeof(vxge_hal_legacy_reg_t); 3755 regs_type = vxge_hal_mgmt_reg_type_legacy; 3756 break; 3757 3758 case vxge_hal_mgmt_reg_type_toc: 3759 reqd_size = sizeof(vxge_hal_toc_reg_t); 3760 regs_type = vxge_hal_mgmt_reg_type_toc; 3761 break; 3762 3763 case vxge_hal_mgmt_reg_type_common: 3764 reqd_size = sizeof(vxge_hal_common_reg_t); 3765 regs_type = vxge_hal_mgmt_reg_type_common; 3766 break; 3767 3768 case vxge_hal_mgmt_reg_type_vpmgmt: 3769 reqd_size = sizeof(vxge_hal_vpmgmt_reg_t); 3770 regs_type = vxge_hal_mgmt_reg_type_vpmgmt; 3771 vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]); 3772 vp_id = vpath->vp_id; 3773 break; 3774 3775 case vxge_hal_mgmt_reg_type_vpath: 3776 reqd_size = sizeof(vxge_hal_vpath_reg_t); 3777 regs_type = vxge_hal_mgmt_reg_type_vpath; 3778 vpath = &(vdev->vpaths[*((u32 *) reg_info + 1)]); 3779 vp_id = vpath->vp_id; 3780 break; 3781 3782 case VXGE_GET_VPATH_COUNT: 3783 *((u32 *) reg_info) = vdev->no_of_vpath; 3784 err = 0; 3785 break; 3786 3787 default: 3788 reqd_size = 0; 3789 break; 3790 } 3791 3792 if (reqd_size) { 3793 for (i = 0, offset = 0; offset < reqd_size; 3794 i++, offset += 0x0008) { 3795 value = 0x0; 3796 status = vxge_hal_mgmt_reg_read(vdev->devh, regs_type, 3797 vp_id, offset, &value); 3798 3799 err = (status != VXGE_HAL_OK) ? EINVAL : 0; 3800 if (err == EINVAL) 3801 break; 3802 3803 *((u64 *) ((u64 *) reg_info + i)) = value; 3804 } 3805 } 3806 return (err); 3807} 3808 3809/* 3810 * vxge_ioctl_stats 3811 * IOCTL to get statistics 3812 */ 3813int 3814vxge_ioctl_stats(vxge_dev_t *vdev, struct ifreq *ifr) 3815{ 3816 int i, retsize, err = EINVAL; 3817 u32 bufsize; 3818 3819 vxge_vpath_t *vpath; 3820 vxge_bw_info_t *bw_info; 3821 vxge_port_info_t *port_info; 3822 vxge_drv_stats_t *drv_stat; 3823 3824 char *buffer = NULL; 3825 char *command = (char *) ifr->ifr_data; 3826 vxge_hal_status_e status = VXGE_HAL_OK; 3827 3828 switch (*command) { 3829 case VXGE_GET_PCI_CONF: 3830 bufsize = VXGE_STATS_BUFFER_SIZE; 3831 buffer = (char *) vxge_mem_alloc(bufsize); 3832 if (buffer != NULL) { 3833 status = vxge_hal_aux_pci_config_read(vdev->devh, 3834 bufsize, buffer, &retsize); 3835 if (status == VXGE_HAL_OK) 3836 err = copyout(buffer, ifr->ifr_data, retsize); 3837 else 3838 device_printf(vdev->ndev, 3839 "failed pciconfig statistics query\n"); 3840 3841 vxge_mem_free(buffer, bufsize); 3842 } 3843 break; 3844 3845 case VXGE_GET_MRPCIM_STATS: 3846 if (!vdev->is_privilaged) 3847 break; 3848 3849 bufsize = VXGE_STATS_BUFFER_SIZE; 3850 buffer = (char *) vxge_mem_alloc(bufsize); 3851 if (buffer != NULL) { 3852 status = vxge_hal_aux_stats_mrpcim_read(vdev->devh, 3853 bufsize, buffer, &retsize); 3854 if (status == VXGE_HAL_OK) 3855 err = copyout(buffer, ifr->ifr_data, retsize); 3856 else 3857 device_printf(vdev->ndev, 3858 "failed mrpcim statistics query\n"); 3859 3860 vxge_mem_free(buffer, bufsize); 3861 } 3862 break; 3863 3864 case VXGE_GET_DEVICE_STATS: 3865 bufsize = VXGE_STATS_BUFFER_SIZE; 3866 buffer = (char *) vxge_mem_alloc(bufsize); 3867 if (buffer != NULL) { 3868 status = vxge_hal_aux_stats_device_read(vdev->devh, 3869 bufsize, buffer, &retsize); 3870 if (status == VXGE_HAL_OK) 3871 err = copyout(buffer, ifr->ifr_data, retsize); 3872 else 3873 device_printf(vdev->ndev, 3874 "failed device statistics query\n"); 3875 3876 vxge_mem_free(buffer, bufsize); 3877 } 3878 break; 3879 3880 case VXGE_GET_DEVICE_HWINFO: 3881 bufsize = sizeof(vxge_device_hw_info_t); 3882 buffer = (char *) vxge_mem_alloc(bufsize); 3883 if (buffer != NULL) { 3884 vxge_os_memcpy( 3885 &(((vxge_device_hw_info_t *) buffer)->hw_info), 3886 &vdev->config.hw_info, 3887 sizeof(vxge_hal_device_hw_info_t)); 3888 3889 ((vxge_device_hw_info_t *) buffer)->port_mode = 3890 vdev->port_mode; 3891 3892 ((vxge_device_hw_info_t *) buffer)->port_failure = 3893 vdev->port_failure; 3894 3895 err = copyout(buffer, ifr->ifr_data, bufsize); 3896 if (err != 0) 3897 device_printf(vdev->ndev, 3898 "failed device hardware info query\n"); 3899 3900 vxge_mem_free(buffer, bufsize); 3901 } 3902 break; 3903 3904 case VXGE_GET_DRIVER_STATS: 3905 bufsize = sizeof(vxge_drv_stats_t) * vdev->no_of_vpath; 3906 drv_stat = (vxge_drv_stats_t *) vxge_mem_alloc(bufsize); 3907 if (drv_stat != NULL) { 3908 for (i = 0; i < vdev->no_of_vpath; i++) { 3909 vpath = &(vdev->vpaths[i]); 3910 3911 vpath->driver_stats.rx_lro_queued += 3912 vpath->lro.lro_queued; 3913 3914 vpath->driver_stats.rx_lro_flushed += 3915 vpath->lro.lro_flushed; 3916 3917 vxge_os_memcpy(&drv_stat[i], 3918 &(vpath->driver_stats), 3919 sizeof(vxge_drv_stats_t)); 3920 } 3921 3922 err = copyout(drv_stat, ifr->ifr_data, bufsize); 3923 if (err != 0) 3924 device_printf(vdev->ndev, 3925 "failed driver statistics query\n"); 3926 3927 vxge_mem_free(drv_stat, bufsize); 3928 } 3929 break; 3930 3931 case VXGE_GET_BANDWIDTH: 3932 bw_info = (vxge_bw_info_t *) ifr->ifr_data; 3933 3934 if ((vdev->config.hw_info.func_id != 0) && 3935 (vdev->hw_fw_version < VXGE_FW_VERSION(1, 8, 0))) 3936 break; 3937 3938 if (vdev->config.hw_info.func_id != 0) 3939 bw_info->func_id = vdev->config.hw_info.func_id; 3940 3941 status = vxge_bw_priority_get(vdev, bw_info); 3942 if (status != VXGE_HAL_OK) 3943 break; 3944 3945 err = copyout(bw_info, ifr->ifr_data, sizeof(vxge_bw_info_t)); 3946 break; 3947 3948 case VXGE_SET_BANDWIDTH: 3949 if (vdev->is_privilaged) 3950 err = vxge_bw_priority_set(vdev, ifr); 3951 break; 3952 3953 case VXGE_SET_PORT_MODE: 3954 if (vdev->is_privilaged) { 3955 if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) { 3956 port_info = (vxge_port_info_t *) ifr->ifr_data; 3957 vdev->config.port_mode = port_info->port_mode; 3958 err = vxge_port_mode_update(vdev); 3959 if (err != ENXIO) 3960 err = VXGE_HAL_FAIL; 3961 else { 3962 err = VXGE_HAL_OK; 3963 device_printf(vdev->ndev, 3964 "PLEASE POWER CYCLE THE SYSTEM\n"); 3965 } 3966 } 3967 } 3968 break; 3969 3970 case VXGE_GET_PORT_MODE: 3971 if (vdev->is_privilaged) { 3972 if (vdev->config.hw_info.ports == VXGE_DUAL_PORT_MODE) { 3973 port_info = (vxge_port_info_t *) ifr->ifr_data; 3974 err = vxge_port_mode_get(vdev, port_info); 3975 if (err == VXGE_HAL_OK) { 3976 err = copyout(port_info, ifr->ifr_data, 3977 sizeof(vxge_port_info_t)); 3978 } 3979 } 3980 } 3981 break; 3982 3983 default: 3984 break; 3985 } 3986 3987 return (err); 3988} 3989 3990int 3991vxge_bw_priority_config(vxge_dev_t *vdev) 3992{ 3993 u32 i; 3994 int err = EINVAL; 3995 3996 for (i = 0; i < vdev->no_of_func; i++) { 3997 err = vxge_bw_priority_update(vdev, i, TRUE); 3998 if (err != 0) 3999 break; 4000 } 4001 4002 return (err); 4003} 4004 4005int 4006vxge_bw_priority_set(vxge_dev_t *vdev, struct ifreq *ifr) 4007{ 4008 int err; 4009 u32 func_id; 4010 vxge_bw_info_t *bw_info; 4011 4012 bw_info = (vxge_bw_info_t *) ifr->ifr_data; 4013 func_id = bw_info->func_id; 4014 4015 vdev->config.bw_info[func_id].priority = bw_info->priority; 4016 vdev->config.bw_info[func_id].bandwidth = bw_info->bandwidth; 4017 4018 err = vxge_bw_priority_update(vdev, func_id, FALSE); 4019 4020 return (err); 4021} 4022 4023int 4024vxge_bw_priority_update(vxge_dev_t *vdev, u32 func_id, bool binit) 4025{ 4026 u32 i, set = 0; 4027 u32 bandwidth, priority, vpath_count; 4028 u64 vpath_list[VXGE_HAL_MAX_VIRTUAL_PATHS]; 4029 4030 vxge_hal_device_t *hldev; 4031 vxge_hal_vp_config_t *vp_config; 4032 vxge_hal_status_e status = VXGE_HAL_OK; 4033 4034 hldev = vdev->devh; 4035 4036 status = vxge_hal_get_vpath_list(vdev->devh, func_id, 4037 vpath_list, &vpath_count); 4038 4039 if (status != VXGE_HAL_OK) 4040 return (status); 4041 4042 for (i = 0; i < vpath_count; i++) { 4043 vp_config = &(hldev->config.vp_config[vpath_list[i]]); 4044 4045 /* Configure Bandwidth */ 4046 if (vdev->config.bw_info[func_id].bandwidth != 4047 VXGE_HAL_VPATH_BW_LIMIT_DEFAULT) { 4048 4049 set = 1; 4050 bandwidth = vdev->config.bw_info[func_id].bandwidth; 4051 if (bandwidth < VXGE_HAL_VPATH_BW_LIMIT_MIN || 4052 bandwidth > VXGE_HAL_VPATH_BW_LIMIT_MAX) { 4053 4054 bandwidth = VXGE_HAL_VPATH_BW_LIMIT_DEFAULT; 4055 } 4056 vp_config->bandwidth = bandwidth; 4057 } 4058 4059 /* 4060 * If b/w limiting is enabled on any of the 4061 * VFs, then for remaining VFs set the priority to 3 4062 * and b/w limiting to max i.e 10 Gb) 4063 */ 4064 if (vp_config->bandwidth == VXGE_HAL_VPATH_BW_LIMIT_DEFAULT) 4065 vp_config->bandwidth = VXGE_HAL_VPATH_BW_LIMIT_MAX; 4066 4067 if (binit && vdev->config.low_latency) { 4068 if (func_id == 0) 4069 vdev->config.bw_info[func_id].priority = 4070 VXGE_DEFAULT_VPATH_PRIORITY_HIGH; 4071 } 4072 4073 /* Configure Priority */ 4074 if (vdev->config.bw_info[func_id].priority != 4075 VXGE_HAL_VPATH_PRIORITY_DEFAULT) { 4076 4077 set = 1; 4078 priority = vdev->config.bw_info[func_id].priority; 4079 if (priority < VXGE_HAL_VPATH_PRIORITY_MIN || 4080 priority > VXGE_HAL_VPATH_PRIORITY_MAX) { 4081 4082 priority = VXGE_HAL_VPATH_PRIORITY_DEFAULT; 4083 } 4084 vp_config->priority = priority; 4085 4086 } else if (vdev->config.low_latency) { 4087 set = 1; 4088 vp_config->priority = VXGE_DEFAULT_VPATH_PRIORITY_LOW; 4089 } 4090 4091 if (set == 1) { 4092 status = vxge_hal_rx_bw_priority_set(vdev->devh, 4093 vpath_list[i]); 4094 if (status != VXGE_HAL_OK) 4095 break; 4096 4097 if (vpath_list[i] < VXGE_HAL_TX_BW_VPATH_LIMIT) { 4098 status = vxge_hal_tx_bw_priority_set( 4099 vdev->devh, vpath_list[i]); 4100 if (status != VXGE_HAL_OK) 4101 break; 4102 } 4103 } 4104 } 4105 4106 return ((status == VXGE_HAL_OK) ? 0 : EINVAL); 4107} 4108 4109/* 4110 * vxge_intr_coalesce_tx 4111 * Changes interrupt coalescing if the interrupts are not within a range 4112 * Return Value: Nothing 4113 */ 4114void 4115vxge_intr_coalesce_tx(vxge_vpath_t *vpath) 4116{ 4117 u32 timer; 4118 4119 if (!vpath->tx_intr_coalesce) 4120 return; 4121 4122 vpath->tx_interrupts++; 4123 if (ticks > vpath->tx_ticks + hz/100) { 4124 4125 vpath->tx_ticks = ticks; 4126 timer = vpath->tti_rtimer_val; 4127 if (vpath->tx_interrupts > VXGE_MAX_TX_INTERRUPT_COUNT) { 4128 if (timer != VXGE_TTI_RTIMER_ADAPT_VAL) { 4129 vpath->tti_rtimer_val = 4130 VXGE_TTI_RTIMER_ADAPT_VAL; 4131 4132 vxge_hal_vpath_dynamic_tti_rtimer_set( 4133 vpath->handle, vpath->tti_rtimer_val); 4134 } 4135 } else { 4136 if (timer != 0) { 4137 vpath->tti_rtimer_val = 0; 4138 vxge_hal_vpath_dynamic_tti_rtimer_set( 4139 vpath->handle, vpath->tti_rtimer_val); 4140 } 4141 } 4142 vpath->tx_interrupts = 0; 4143 } 4144} 4145 4146/* 4147 * vxge_intr_coalesce_rx 4148 * Changes interrupt coalescing if the interrupts are not within a range 4149 * Return Value: Nothing 4150 */ 4151void 4152vxge_intr_coalesce_rx(vxge_vpath_t *vpath) 4153{ 4154 u32 timer; 4155 4156 if (!vpath->rx_intr_coalesce) 4157 return; 4158 4159 vpath->rx_interrupts++; 4160 if (ticks > vpath->rx_ticks + hz/100) { 4161 4162 vpath->rx_ticks = ticks; 4163 timer = vpath->rti_rtimer_val; 4164 4165 if (vpath->rx_interrupts > VXGE_MAX_RX_INTERRUPT_COUNT) { 4166 if (timer != VXGE_RTI_RTIMER_ADAPT_VAL) { 4167 vpath->rti_rtimer_val = 4168 VXGE_RTI_RTIMER_ADAPT_VAL; 4169 4170 vxge_hal_vpath_dynamic_rti_rtimer_set( 4171 vpath->handle, vpath->rti_rtimer_val); 4172 } 4173 } else { 4174 if (timer != 0) { 4175 vpath->rti_rtimer_val = 0; 4176 vxge_hal_vpath_dynamic_rti_rtimer_set( 4177 vpath->handle, vpath->rti_rtimer_val); 4178 } 4179 } 4180 vpath->rx_interrupts = 0; 4181 } 4182} 4183 4184/* 4185 * vxge_methods FreeBSD device interface entry points 4186 */ 4187static device_method_t vxge_methods[] = { 4188 DEVMETHOD(device_probe, vxge_probe), 4189 DEVMETHOD(device_attach, vxge_attach), 4190 DEVMETHOD(device_detach, vxge_detach), 4191 DEVMETHOD(device_shutdown, vxge_shutdown), 4192 {0, 0} 4193}; 4194 4195static driver_t vxge_driver = { 4196 "vxge", vxge_methods, sizeof(vxge_dev_t), 4197}; 4198 4199static devclass_t vxge_devclass; 4200 4201DRIVER_MODULE(vxge, pci, vxge_driver, vxge_devclass, 0, 0); 4202