1/****************************************************************************** 2* This software may be used and distributed according to the terms of 3* the GNU General Public License (GPL), incorporated herein by reference. 4* Drivers based on or derived from this code fall under the GPL and must 5* retain the authorship, copyright and license notice. This file is not 6* a complete program and may only be used when the entire operating 7* system is licensed under the GPL. 8* See the file COPYING in this distribution for more information. 9* 10* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O 11* Virtualized Server Adapter. 12* Copyright(c) 2002-2010 Exar Corp. 13* 14* The module loadable parameters that are supported by the driver and a brief 15* explanation of all the variables: 16* vlan_tag_strip: 17* Strip VLAN Tag enable/disable. Instructs the device to remove 18* the VLAN tag from all received tagged frames that are not 19* replicated at the internal L2 switch. 20* 0 - Do not strip the VLAN tag. 21* 1 - Strip the VLAN tag. 22* 23* addr_learn_en: 24* Enable learning the mac address of the guest OS interface in 25* a virtualization environment. 26* 0 - DISABLE 27* 1 - ENABLE 28* 29* max_config_port: 30* Maximum number of port to be supported. 31* MIN -1 and MAX - 2 32* 33* max_config_vpath: 34* This configures the maximum no of VPATH configures for each 35* device function. 36* MIN - 1 and MAX - 17 37* 38* max_config_dev: 39* This configures maximum no of Device function to be enabled. 40* MIN - 1 and MAX - 17 41* 42******************************************************************************/ 43 44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45 46#include <linux/if_vlan.h> 47#include <linux/pci.h> 48#include <linux/slab.h> 49#include <linux/tcp.h> 50#include <net/ip.h> 51#include <linux/netdevice.h> 52#include <linux/etherdevice.h> 53#include "vxge-main.h" 54#include "vxge-reg.h" 55 56MODULE_LICENSE("Dual BSD/GPL"); 57MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O" 58 "Virtualized Server Adapter"); 59 60static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = { 61 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID, 62 PCI_ANY_ID}, 63 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID, 64 PCI_ANY_ID}, 65 {0} 66}; 67 68MODULE_DEVICE_TABLE(pci, vxge_id_table); 69 70VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE); 71VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT); 72VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT); 73VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT); 74VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT); 75VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV); 76 77static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] = 78 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31}; 79static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] = 80 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF}; 81module_param_array(bw_percentage, uint, NULL, 0); 82 83static struct vxge_drv_config *driver_config; 84 85static inline int is_vxge_card_up(struct vxgedev *vdev) 86{ 87 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); 88} 89 90static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) 91{ 92 struct sk_buff **skb_ptr = NULL; 93 struct sk_buff **temp; 94#define NR_SKB_COMPLETED 128 95 struct sk_buff *completed[NR_SKB_COMPLETED]; 96 int more; 97 98 do { 99 more = 0; 100 skb_ptr = completed; 101 102 if (__netif_tx_trylock(fifo->txq)) { 103 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr, 104 NR_SKB_COMPLETED, &more); 105 __netif_tx_unlock(fifo->txq); 106 } 107 108 /* free SKBs */ 109 for (temp = completed; temp != skb_ptr; temp++) 110 dev_kfree_skb_irq(*temp); 111 } while (more); 112} 113 114static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev) 115{ 116 int i; 117 118 /* Complete all transmits */ 119 for (i = 0; i < vdev->no_of_vpath; i++) 120 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo); 121} 122 123static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev) 124{ 125 int i; 126 struct vxge_ring *ring; 127 128 /* Complete all receives*/ 129 for (i = 0; i < vdev->no_of_vpath; i++) { 130 ring = &vdev->vpaths[i].ring; 131 vxge_hw_vpath_poll_rx(ring->handle); 132 } 133} 134 135/* 136 * vxge_callback_link_up 137 * 138 * This function is called during interrupt context to notify link up state 139 * change. 140 */ 141void 142vxge_callback_link_up(struct __vxge_hw_device *hldev) 143{ 144 struct net_device *dev = hldev->ndev; 145 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 146 147 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 148 vdev->ndev->name, __func__, __LINE__); 149 netdev_notice(vdev->ndev, "Link Up\n"); 150 vdev->stats.link_up++; 151 152 netif_carrier_on(vdev->ndev); 153 netif_tx_wake_all_queues(vdev->ndev); 154 155 vxge_debug_entryexit(VXGE_TRACE, 156 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); 157} 158 159/* 160 * vxge_callback_link_down 161 * 162 * This function is called during interrupt context to notify link down state 163 * change. 164 */ 165void 166vxge_callback_link_down(struct __vxge_hw_device *hldev) 167{ 168 struct net_device *dev = hldev->ndev; 169 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 170 171 vxge_debug_entryexit(VXGE_TRACE, 172 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 173 netdev_notice(vdev->ndev, "Link Down\n"); 174 175 vdev->stats.link_down++; 176 netif_carrier_off(vdev->ndev); 177 netif_tx_stop_all_queues(vdev->ndev); 178 179 vxge_debug_entryexit(VXGE_TRACE, 180 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__); 181} 182 183/* 184 * vxge_rx_alloc 185 * 186 * Allocate SKB. 187 */ 188static struct sk_buff* 189vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) 190{ 191 struct net_device *dev; 192 struct sk_buff *skb; 193 struct vxge_rx_priv *rx_priv; 194 195 dev = ring->ndev; 196 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 197 ring->ndev->name, __func__, __LINE__); 198 199 rx_priv = vxge_hw_ring_rxd_private_get(dtrh); 200 201 /* try to allocate skb first. this one may fail */ 202 skb = netdev_alloc_skb(dev, skb_size + 203 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 204 if (skb == NULL) { 205 vxge_debug_mem(VXGE_ERR, 206 "%s: out of memory to allocate SKB", dev->name); 207 ring->stats.skb_alloc_fail++; 208 return NULL; 209 } 210 211 vxge_debug_mem(VXGE_TRACE, 212 "%s: %s:%d Skb : 0x%p", ring->ndev->name, 213 __func__, __LINE__, skb); 214 215 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 216 217 rx_priv->skb = skb; 218 rx_priv->skb_data = NULL; 219 rx_priv->data_size = skb_size; 220 vxge_debug_entryexit(VXGE_TRACE, 221 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 222 223 return skb; 224} 225 226/* 227 * vxge_rx_map 228 */ 229static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) 230{ 231 struct vxge_rx_priv *rx_priv; 232 dma_addr_t dma_addr; 233 234 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 235 ring->ndev->name, __func__, __LINE__); 236 rx_priv = vxge_hw_ring_rxd_private_get(dtrh); 237 238 rx_priv->skb_data = rx_priv->skb->data; 239 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, 240 rx_priv->data_size, PCI_DMA_FROMDEVICE); 241 242 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { 243 ring->stats.pci_map_fail++; 244 return -EIO; 245 } 246 vxge_debug_mem(VXGE_TRACE, 247 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx", 248 ring->ndev->name, __func__, __LINE__, 249 (unsigned long long)dma_addr); 250 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size); 251 252 rx_priv->data_dma = dma_addr; 253 vxge_debug_entryexit(VXGE_TRACE, 254 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 255 256 return 0; 257} 258 259/* 260 * vxge_rx_initial_replenish 261 * Allocation of RxD as an initial replenish procedure. 262 */ 263static enum vxge_hw_status 264vxge_rx_initial_replenish(void *dtrh, void *userdata) 265{ 266 struct vxge_ring *ring = (struct vxge_ring *)userdata; 267 struct vxge_rx_priv *rx_priv; 268 269 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 270 ring->ndev->name, __func__, __LINE__); 271 if (vxge_rx_alloc(dtrh, ring, 272 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL) 273 return VXGE_HW_FAIL; 274 275 if (vxge_rx_map(dtrh, ring)) { 276 rx_priv = vxge_hw_ring_rxd_private_get(dtrh); 277 dev_kfree_skb(rx_priv->skb); 278 279 return VXGE_HW_FAIL; 280 } 281 vxge_debug_entryexit(VXGE_TRACE, 282 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 283 284 return VXGE_HW_OK; 285} 286 287static inline void 288vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, 289 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info) 290{ 291 292 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 293 ring->ndev->name, __func__, __LINE__); 294 skb_record_rx_queue(skb, ring->driver_id); 295 skb->protocol = eth_type_trans(skb, ring->ndev); 296 297 ring->stats.rx_frms++; 298 ring->stats.rx_bytes += pkt_length; 299 300 if (skb->pkt_type == PACKET_MULTICAST) 301 ring->stats.rx_mcast++; 302 303 vxge_debug_rx(VXGE_TRACE, 304 "%s: %s:%d skb protocol = %d", 305 ring->ndev->name, __func__, __LINE__, skb->protocol); 306 307 if (ring->gro_enable) { 308 if (ring->vlgrp && ext_info->vlan && 309 (ring->vlan_tag_strip == 310 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) 311 vlan_gro_receive(ring->napi_p, ring->vlgrp, 312 ext_info->vlan, skb); 313 else 314 napi_gro_receive(ring->napi_p, skb); 315 } else { 316 if (ring->vlgrp && vlan && 317 (ring->vlan_tag_strip == 318 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE)) 319 vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan); 320 else 321 netif_receive_skb(skb); 322 } 323 vxge_debug_entryexit(VXGE_TRACE, 324 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__); 325} 326 327static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, 328 struct vxge_rx_priv *rx_priv) 329{ 330 pci_dma_sync_single_for_device(ring->pdev, 331 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); 332 333 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); 334 vxge_hw_ring_rxd_pre_post(ring->handle, dtr); 335} 336 337static inline void vxge_post(int *dtr_cnt, void **first_dtr, 338 void *post_dtr, struct __vxge_hw_ring *ringh) 339{ 340 int dtr_count = *dtr_cnt; 341 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) { 342 if (*first_dtr) 343 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr); 344 *first_dtr = post_dtr; 345 } else 346 vxge_hw_ring_rxd_post_post(ringh, post_dtr); 347 dtr_count++; 348 *dtr_cnt = dtr_count; 349} 350 351/* 352 * vxge_rx_1b_compl 353 * 354 * If the interrupt is because of a received frame or if the receive ring 355 * contains fresh as yet un-processed frames, this function is called. 356 */ 357enum vxge_hw_status 358vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, 359 u8 t_code, void *userdata) 360{ 361 struct vxge_ring *ring = (struct vxge_ring *)userdata; 362 struct net_device *dev = ring->ndev; 363 unsigned int dma_sizes; 364 void *first_dtr = NULL; 365 int dtr_cnt = 0; 366 int data_size; 367 dma_addr_t data_dma; 368 int pkt_length; 369 struct sk_buff *skb; 370 struct vxge_rx_priv *rx_priv; 371 struct vxge_hw_ring_rxd_info ext_info; 372 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 373 ring->ndev->name, __func__, __LINE__); 374 ring->pkts_processed = 0; 375 376 vxge_hw_ring_replenish(ringh); 377 378 do { 379 prefetch((char *)dtr + L1_CACHE_BYTES); 380 rx_priv = vxge_hw_ring_rxd_private_get(dtr); 381 skb = rx_priv->skb; 382 data_size = rx_priv->data_size; 383 data_dma = rx_priv->data_dma; 384 prefetch(rx_priv->skb_data); 385 386 vxge_debug_rx(VXGE_TRACE, 387 "%s: %s:%d skb = 0x%p", 388 ring->ndev->name, __func__, __LINE__, skb); 389 390 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes); 391 pkt_length = dma_sizes; 392 393 pkt_length -= ETH_FCS_LEN; 394 395 vxge_debug_rx(VXGE_TRACE, 396 "%s: %s:%d Packet Length = %d", 397 ring->ndev->name, __func__, __LINE__, pkt_length); 398 399 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info); 400 401 /* check skb validity */ 402 vxge_assert(skb); 403 404 prefetch((char *)skb + L1_CACHE_BYTES); 405 if (unlikely(t_code)) { 406 407 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != 408 VXGE_HW_OK) { 409 410 ring->stats.rx_errors++; 411 vxge_debug_rx(VXGE_TRACE, 412 "%s: %s :%d Rx T_code is %d", 413 ring->ndev->name, __func__, 414 __LINE__, t_code); 415 416 /* If the t_code is not supported and if the 417 * t_code is other than 0x5 (unparseable packet 418 * such as unknown UPV6 header), Drop it !!! 419 */ 420 vxge_re_pre_post(dtr, ring, rx_priv); 421 422 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); 423 ring->stats.rx_dropped++; 424 continue; 425 } 426 } 427 428 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { 429 430 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { 431 432 if (!vxge_rx_map(dtr, ring)) { 433 skb_put(skb, pkt_length); 434 435 pci_unmap_single(ring->pdev, data_dma, 436 data_size, PCI_DMA_FROMDEVICE); 437 438 vxge_hw_ring_rxd_pre_post(ringh, dtr); 439 vxge_post(&dtr_cnt, &first_dtr, dtr, 440 ringh); 441 } else { 442 dev_kfree_skb(rx_priv->skb); 443 rx_priv->skb = skb; 444 rx_priv->data_size = data_size; 445 vxge_re_pre_post(dtr, ring, rx_priv); 446 447 vxge_post(&dtr_cnt, &first_dtr, dtr, 448 ringh); 449 ring->stats.rx_dropped++; 450 break; 451 } 452 } else { 453 vxge_re_pre_post(dtr, ring, rx_priv); 454 455 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); 456 ring->stats.rx_dropped++; 457 break; 458 } 459 } else { 460 struct sk_buff *skb_up; 461 462 skb_up = netdev_alloc_skb(dev, pkt_length + 463 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 464 if (skb_up != NULL) { 465 skb_reserve(skb_up, 466 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 467 468 pci_dma_sync_single_for_cpu(ring->pdev, 469 data_dma, data_size, 470 PCI_DMA_FROMDEVICE); 471 472 vxge_debug_mem(VXGE_TRACE, 473 "%s: %s:%d skb_up = %p", 474 ring->ndev->name, __func__, 475 __LINE__, skb); 476 memcpy(skb_up->data, skb->data, pkt_length); 477 478 vxge_re_pre_post(dtr, ring, rx_priv); 479 480 vxge_post(&dtr_cnt, &first_dtr, dtr, 481 ringh); 482 /* will netif_rx small SKB instead */ 483 skb = skb_up; 484 skb_put(skb, pkt_length); 485 } else { 486 vxge_re_pre_post(dtr, ring, rx_priv); 487 488 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh); 489 vxge_debug_rx(VXGE_ERR, 490 "%s: vxge_rx_1b_compl: out of " 491 "memory", dev->name); 492 ring->stats.skb_alloc_fail++; 493 break; 494 } 495 } 496 497 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) && 498 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) && 499 ring->rx_csum && /* Offload Rx side CSUM */ 500 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK && 501 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK) 502 skb->ip_summed = CHECKSUM_UNNECESSARY; 503 else 504 skb->ip_summed = CHECKSUM_NONE; 505 506 vxge_rx_complete(ring, skb, ext_info.vlan, 507 pkt_length, &ext_info); 508 509 ring->budget--; 510 ring->pkts_processed++; 511 if (!ring->budget) 512 break; 513 514 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr, 515 &t_code) == VXGE_HW_OK); 516 517 if (first_dtr) 518 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); 519 520 vxge_debug_entryexit(VXGE_TRACE, 521 "%s:%d Exiting...", 522 __func__, __LINE__); 523 return VXGE_HW_OK; 524} 525 526/* 527 * vxge_xmit_compl 528 * 529 * If an interrupt was raised to indicate DMA complete of the Tx packet, 530 * this function is called. It identifies the last TxD whose buffer was 531 * freed and frees all skbs whose data have already DMA'ed into the NICs 532 * internal memory. 533 */ 534enum vxge_hw_status 535vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, 536 enum vxge_hw_fifo_tcode t_code, void *userdata, 537 struct sk_buff ***skb_ptr, int nr_skb, int *more) 538{ 539 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; 540 struct sk_buff *skb, **done_skb = *skb_ptr; 541 int pkt_cnt = 0; 542 543 vxge_debug_entryexit(VXGE_TRACE, 544 "%s:%d Entered....", __func__, __LINE__); 545 546 do { 547 int frg_cnt; 548 skb_frag_t *frag; 549 int i = 0, j; 550 struct vxge_tx_priv *txd_priv = 551 vxge_hw_fifo_txdl_private_get(dtr); 552 553 skb = txd_priv->skb; 554 frg_cnt = skb_shinfo(skb)->nr_frags; 555 frag = &skb_shinfo(skb)->frags[0]; 556 557 vxge_debug_tx(VXGE_TRACE, 558 "%s: %s:%d fifo_hw = %p dtr = %p " 559 "tcode = 0x%x", fifo->ndev->name, __func__, 560 __LINE__, fifo_hw, dtr, t_code); 561 /* check skb validity */ 562 vxge_assert(skb); 563 vxge_debug_tx(VXGE_TRACE, 564 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d", 565 fifo->ndev->name, __func__, __LINE__, 566 skb, txd_priv, frg_cnt); 567 if (unlikely(t_code)) { 568 fifo->stats.tx_errors++; 569 vxge_debug_tx(VXGE_ERR, 570 "%s: tx: dtr %p completed due to " 571 "error t_code %01x", fifo->ndev->name, 572 dtr, t_code); 573 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code); 574 } 575 576 /* for unfragmented skb */ 577 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], 578 skb_headlen(skb), PCI_DMA_TODEVICE); 579 580 for (j = 0; j < frg_cnt; j++) { 581 pci_unmap_page(fifo->pdev, 582 txd_priv->dma_buffers[i++], 583 frag->size, PCI_DMA_TODEVICE); 584 frag += 1; 585 } 586 587 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 588 589 /* Updating the statistics block */ 590 fifo->stats.tx_frms++; 591 fifo->stats.tx_bytes += skb->len; 592 593 *done_skb++ = skb; 594 595 if (--nr_skb <= 0) { 596 *more = 1; 597 break; 598 } 599 600 pkt_cnt++; 601 if (pkt_cnt > fifo->indicate_max_pkts) 602 break; 603 604 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw, 605 &dtr, &t_code) == VXGE_HW_OK); 606 607 *skb_ptr = done_skb; 608 if (netif_tx_queue_stopped(fifo->txq)) 609 netif_tx_wake_queue(fifo->txq); 610 611 vxge_debug_entryexit(VXGE_TRACE, 612 "%s: %s:%d Exiting...", 613 fifo->ndev->name, __func__, __LINE__); 614 return VXGE_HW_OK; 615} 616 617/* select a vpath to transmit the packet */ 618static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb) 619{ 620 u16 queue_len, counter = 0; 621 if (skb->protocol == htons(ETH_P_IP)) { 622 struct iphdr *ip; 623 struct tcphdr *th; 624 625 ip = ip_hdr(skb); 626 627 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) { 628 th = (struct tcphdr *)(((unsigned char *)ip) + 629 ip->ihl*4); 630 631 queue_len = vdev->no_of_vpath; 632 counter = (ntohs(th->source) + 633 ntohs(th->dest)) & 634 vdev->vpath_selector[queue_len - 1]; 635 if (counter >= queue_len) 636 counter = queue_len - 1; 637 } 638 } 639 return counter; 640} 641 642static enum vxge_hw_status vxge_search_mac_addr_in_list( 643 struct vxge_vpath *vpath, u64 del_mac) 644{ 645 struct list_head *entry, *next; 646 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 647 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) 648 return TRUE; 649 } 650 return FALSE; 651} 652 653static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) 654{ 655 struct macInfo mac_info; 656 u8 *mac_address = NULL; 657 u64 mac_addr = 0, vpath_vector = 0; 658 int vpath_idx = 0; 659 enum vxge_hw_status status = VXGE_HW_OK; 660 struct vxge_vpath *vpath = NULL; 661 struct __vxge_hw_device *hldev; 662 663 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 664 665 mac_address = (u8 *)&mac_addr; 666 memcpy(mac_address, mac_header, ETH_ALEN); 667 668 /* Is this mac address already in the list? */ 669 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 670 vpath = &vdev->vpaths[vpath_idx]; 671 if (vxge_search_mac_addr_in_list(vpath, mac_addr)) 672 return vpath_idx; 673 } 674 675 memset(&mac_info, 0, sizeof(struct macInfo)); 676 memcpy(mac_info.macaddr, mac_header, ETH_ALEN); 677 678 /* Any vpath has room to add mac address to its da table? */ 679 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 680 vpath = &vdev->vpaths[vpath_idx]; 681 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) { 682 /* Add this mac address to this vpath */ 683 mac_info.vpath_no = vpath_idx; 684 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; 685 status = vxge_add_mac_addr(vdev, &mac_info); 686 if (status != VXGE_HW_OK) 687 return -EPERM; 688 return vpath_idx; 689 } 690 } 691 692 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST; 693 vpath_idx = 0; 694 mac_info.vpath_no = vpath_idx; 695 /* Is the first vpath already selected as catch-basin ? */ 696 vpath = &vdev->vpaths[vpath_idx]; 697 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) { 698 /* Add this mac address to this vpath */ 699 if (FALSE == vxge_mac_list_add(vpath, &mac_info)) 700 return -EPERM; 701 return vpath_idx; 702 } 703 704 /* Select first vpath as catch-basin */ 705 vpath_vector = vxge_mBIT(vpath->device_id); 706 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh, 707 vxge_hw_mgmt_reg_type_mrpcim, 708 0, 709 (ulong)offsetof( 710 struct vxge_hw_mrpcim_reg, 711 rts_mgr_cbasin_cfg), 712 vpath_vector); 713 if (status != VXGE_HW_OK) { 714 vxge_debug_tx(VXGE_ERR, 715 "%s: Unable to set the vpath-%d in catch-basin mode", 716 VXGE_DRIVER_NAME, vpath->device_id); 717 return -EPERM; 718 } 719 720 if (FALSE == vxge_mac_list_add(vpath, &mac_info)) 721 return -EPERM; 722 723 return vpath_idx; 724} 725 726/** 727 * vxge_xmit 728 * @skb : the socket buffer containing the Tx data. 729 * @dev : device pointer. 730 * 731 * This function is the Tx entry point of the driver. Neterion NIC supports 732 * certain protocol assist features on Tx side, namely CSO, S/G, LSO. 733*/ 734static netdev_tx_t 735vxge_xmit(struct sk_buff *skb, struct net_device *dev) 736{ 737 struct vxge_fifo *fifo = NULL; 738 void *dtr_priv; 739 void *dtr = NULL; 740 struct vxgedev *vdev = NULL; 741 enum vxge_hw_status status; 742 int frg_cnt, first_frg_len; 743 skb_frag_t *frag; 744 int i = 0, j = 0, avail; 745 u64 dma_pointer; 746 struct vxge_tx_priv *txdl_priv = NULL; 747 struct __vxge_hw_fifo *fifo_hw; 748 int offload_type; 749 int vpath_no = 0; 750 751 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 752 dev->name, __func__, __LINE__); 753 754 /* A buffer with no data will be dropped */ 755 if (unlikely(skb->len <= 0)) { 756 vxge_debug_tx(VXGE_ERR, 757 "%s: Buffer has no data..", dev->name); 758 dev_kfree_skb(skb); 759 return NETDEV_TX_OK; 760 } 761 762 vdev = (struct vxgedev *)netdev_priv(dev); 763 764 if (unlikely(!is_vxge_card_up(vdev))) { 765 vxge_debug_tx(VXGE_ERR, 766 "%s: vdev not initialized", dev->name); 767 dev_kfree_skb(skb); 768 return NETDEV_TX_OK; 769 } 770 771 if (vdev->config.addr_learn_en) { 772 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN); 773 if (vpath_no == -EPERM) { 774 vxge_debug_tx(VXGE_ERR, 775 "%s: Failed to store the mac address", 776 dev->name); 777 dev_kfree_skb(skb); 778 return NETDEV_TX_OK; 779 } 780 } 781 782 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING) 783 vpath_no = skb_get_queue_mapping(skb); 784 else if (vdev->config.tx_steering_type == TX_PORT_STEERING) 785 vpath_no = vxge_get_vpath_no(vdev, skb); 786 787 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no); 788 789 if (vpath_no >= vdev->no_of_vpath) 790 vpath_no = 0; 791 792 fifo = &vdev->vpaths[vpath_no].fifo; 793 fifo_hw = fifo->handle; 794 795 if (netif_tx_queue_stopped(fifo->txq)) 796 return NETDEV_TX_BUSY; 797 798 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw); 799 if (avail == 0) { 800 vxge_debug_tx(VXGE_ERR, 801 "%s: No free TXDs available", dev->name); 802 fifo->stats.txd_not_free++; 803 goto _exit0; 804 } 805 806 /* Last TXD? Stop tx queue to avoid dropping packets. TX 807 * completion will resume the queue. 808 */ 809 if (avail == 1) 810 netif_tx_stop_queue(fifo->txq); 811 812 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv); 813 if (unlikely(status != VXGE_HW_OK)) { 814 vxge_debug_tx(VXGE_ERR, 815 "%s: Out of descriptors .", dev->name); 816 fifo->stats.txd_out_of_desc++; 817 goto _exit0; 818 } 819 820 vxge_debug_tx(VXGE_TRACE, 821 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p", 822 dev->name, __func__, __LINE__, 823 fifo_hw, dtr, dtr_priv); 824 825 if (vdev->vlgrp && vlan_tx_tag_present(skb)) { 826 u16 vlan_tag = vlan_tx_tag_get(skb); 827 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag); 828 } 829 830 first_frg_len = skb_headlen(skb); 831 832 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, 833 PCI_DMA_TODEVICE); 834 835 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { 836 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 837 fifo->stats.pci_map_fail++; 838 goto _exit0; 839 } 840 841 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr); 842 txdl_priv->skb = skb; 843 txdl_priv->dma_buffers[j] = dma_pointer; 844 845 frg_cnt = skb_shinfo(skb)->nr_frags; 846 vxge_debug_tx(VXGE_TRACE, 847 "%s: %s:%d skb = %p txdl_priv = %p " 848 "frag_cnt = %d dma_pointer = 0x%llx", dev->name, 849 __func__, __LINE__, skb, txdl_priv, 850 frg_cnt, (unsigned long long)dma_pointer); 851 852 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, 853 first_frg_len); 854 855 frag = &skb_shinfo(skb)->frags[0]; 856 for (i = 0; i < frg_cnt; i++) { 857 /* ignore 0 length fragment */ 858 if (!frag->size) 859 continue; 860 861 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page, 862 frag->page_offset, frag->size, 863 PCI_DMA_TODEVICE); 864 865 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) 866 goto _exit2; 867 vxge_debug_tx(VXGE_TRACE, 868 "%s: %s:%d frag = %d dma_pointer = 0x%llx", 869 dev->name, __func__, __LINE__, i, 870 (unsigned long long)dma_pointer); 871 872 txdl_priv->dma_buffers[j] = dma_pointer; 873 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, 874 frag->size); 875 frag += 1; 876 } 877 878 offload_type = vxge_offload_type(skb); 879 880 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 881 int mss = vxge_tcp_mss(skb); 882 if (mss) { 883 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d", 884 dev->name, __func__, __LINE__, mss); 885 vxge_hw_fifo_txdl_mss_set(dtr, mss); 886 } else { 887 vxge_assert(skb->len <= 888 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE); 889 vxge_assert(0); 890 goto _exit1; 891 } 892 } 893 894 if (skb->ip_summed == CHECKSUM_PARTIAL) 895 vxge_hw_fifo_txdl_cksum_set_bits(dtr, 896 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN | 897 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN | 898 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN); 899 900 vxge_hw_fifo_txdl_post(fifo_hw, dtr); 901 902 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 903 dev->name, __func__, __LINE__); 904 return NETDEV_TX_OK; 905 906_exit2: 907 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name); 908_exit1: 909 j = 0; 910 frag = &skb_shinfo(skb)->frags[0]; 911 912 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], 913 skb_headlen(skb), PCI_DMA_TODEVICE); 914 915 for (; j < i; j++) { 916 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], 917 frag->size, PCI_DMA_TODEVICE); 918 frag += 1; 919 } 920 921 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 922_exit0: 923 netif_tx_stop_queue(fifo->txq); 924 dev_kfree_skb(skb); 925 926 return NETDEV_TX_OK; 927} 928 929/* 930 * vxge_rx_term 931 * 932 * Function will be called by hw function to abort all outstanding receive 933 * descriptors. 934 */ 935static void 936vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) 937{ 938 struct vxge_ring *ring = (struct vxge_ring *)userdata; 939 struct vxge_rx_priv *rx_priv = 940 vxge_hw_ring_rxd_private_get(dtrh); 941 942 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 943 ring->ndev->name, __func__, __LINE__); 944 if (state != VXGE_HW_RXD_STATE_POSTED) 945 return; 946 947 pci_unmap_single(ring->pdev, rx_priv->data_dma, 948 rx_priv->data_size, PCI_DMA_FROMDEVICE); 949 950 dev_kfree_skb(rx_priv->skb); 951 rx_priv->skb_data = NULL; 952 953 vxge_debug_entryexit(VXGE_TRACE, 954 "%s: %s:%d Exiting...", 955 ring->ndev->name, __func__, __LINE__); 956} 957 958/* 959 * vxge_tx_term 960 * 961 * Function will be called to abort all outstanding tx descriptors 962 */ 963static void 964vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) 965{ 966 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata; 967 skb_frag_t *frag; 968 int i = 0, j, frg_cnt; 969 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh); 970 struct sk_buff *skb = txd_priv->skb; 971 972 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 973 974 if (state != VXGE_HW_TXDL_STATE_POSTED) 975 return; 976 977 /* check skb validity */ 978 vxge_assert(skb); 979 frg_cnt = skb_shinfo(skb)->nr_frags; 980 frag = &skb_shinfo(skb)->frags[0]; 981 982 /* for unfragmented skb */ 983 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], 984 skb_headlen(skb), PCI_DMA_TODEVICE); 985 986 for (j = 0; j < frg_cnt; j++) { 987 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], 988 frag->size, PCI_DMA_TODEVICE); 989 frag += 1; 990 } 991 992 dev_kfree_skb(skb); 993 994 vxge_debug_entryexit(VXGE_TRACE, 995 "%s:%d Exiting...", __func__, __LINE__); 996} 997 998/** 999 * vxge_set_multicast 1000 * @dev: pointer to the device structure 1001 * 1002 * Entry point for multicast address enable/disable 1003 * This function is a driver entry point which gets called by the kernel 1004 * whenever multicast addresses must be enabled/disabled. This also gets 1005 * called to set/reset promiscuous mode. Depending on the deivce flag, we 1006 * determine, if multicast address must be enabled or if promiscuous mode 1007 * is to be disabled etc. 1008 */ 1009static void vxge_set_multicast(struct net_device *dev) 1010{ 1011 struct netdev_hw_addr *ha; 1012 struct vxgedev *vdev; 1013 int i, mcast_cnt = 0; 1014 struct __vxge_hw_device *hldev; 1015 struct vxge_vpath *vpath; 1016 enum vxge_hw_status status = VXGE_HW_OK; 1017 struct macInfo mac_info; 1018 int vpath_idx = 0; 1019 struct vxge_mac_addrs *mac_entry; 1020 struct list_head *list_head; 1021 struct list_head *entry, *next; 1022 u8 *mac_address = NULL; 1023 1024 vxge_debug_entryexit(VXGE_TRACE, 1025 "%s:%d", __func__, __LINE__); 1026 1027 vdev = (struct vxgedev *)netdev_priv(dev); 1028 hldev = (struct __vxge_hw_device *)vdev->devh; 1029 1030 if (unlikely(!is_vxge_card_up(vdev))) 1031 return; 1032 1033 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) { 1034 for (i = 0; i < vdev->no_of_vpath; i++) { 1035 vpath = &vdev->vpaths[i]; 1036 vxge_assert(vpath->is_open); 1037 status = vxge_hw_vpath_mcast_enable(vpath->handle); 1038 if (status != VXGE_HW_OK) 1039 vxge_debug_init(VXGE_ERR, "failed to enable " 1040 "multicast, status %d", status); 1041 vdev->all_multi_flg = 1; 1042 } 1043 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) { 1044 for (i = 0; i < vdev->no_of_vpath; i++) { 1045 vpath = &vdev->vpaths[i]; 1046 vxge_assert(vpath->is_open); 1047 status = vxge_hw_vpath_mcast_disable(vpath->handle); 1048 if (status != VXGE_HW_OK) 1049 vxge_debug_init(VXGE_ERR, "failed to disable " 1050 "multicast, status %d", status); 1051 vdev->all_multi_flg = 0; 1052 } 1053 } 1054 1055 1056 if (!vdev->config.addr_learn_en) { 1057 for (i = 0; i < vdev->no_of_vpath; i++) { 1058 vpath = &vdev->vpaths[i]; 1059 vxge_assert(vpath->is_open); 1060 1061 if (dev->flags & IFF_PROMISC) 1062 status = vxge_hw_vpath_promisc_enable( 1063 vpath->handle); 1064 else 1065 status = vxge_hw_vpath_promisc_disable( 1066 vpath->handle); 1067 if (status != VXGE_HW_OK) 1068 vxge_debug_init(VXGE_ERR, "failed to %s promisc" 1069 ", status %d", dev->flags&IFF_PROMISC ? 1070 "enable" : "disable", status); 1071 } 1072 } 1073 1074 memset(&mac_info, 0, sizeof(struct macInfo)); 1075 /* Update individual M_CAST address list */ 1076 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) { 1077 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1078 list_head = &vdev->vpaths[0].mac_addr_list; 1079 if ((netdev_mc_count(dev) + 1080 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) > 1081 vdev->vpaths[0].max_mac_addr_cnt) 1082 goto _set_all_mcast; 1083 1084 /* Delete previous MC's */ 1085 for (i = 0; i < mcast_cnt; i++) { 1086 list_for_each_safe(entry, next, list_head) { 1087 mac_entry = (struct vxge_mac_addrs *) entry; 1088 /* Copy the mac address to delete */ 1089 mac_address = (u8 *)&mac_entry->macaddr; 1090 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1091 1092 /* Is this a multicast address */ 1093 if (0x01 & mac_info.macaddr[0]) { 1094 for (vpath_idx = 0; vpath_idx < 1095 vdev->no_of_vpath; 1096 vpath_idx++) { 1097 mac_info.vpath_no = vpath_idx; 1098 status = vxge_del_mac_addr( 1099 vdev, 1100 &mac_info); 1101 } 1102 } 1103 } 1104 } 1105 1106 /* Add new ones */ 1107 netdev_for_each_mc_addr(ha, dev) { 1108 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN); 1109 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1110 vpath_idx++) { 1111 mac_info.vpath_no = vpath_idx; 1112 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; 1113 status = vxge_add_mac_addr(vdev, &mac_info); 1114 if (status != VXGE_HW_OK) { 1115 vxge_debug_init(VXGE_ERR, 1116 "%s:%d Setting individual" 1117 "multicast address failed", 1118 __func__, __LINE__); 1119 goto _set_all_mcast; 1120 } 1121 } 1122 } 1123 1124 return; 1125_set_all_mcast: 1126 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt; 1127 /* Delete previous MC's */ 1128 for (i = 0; i < mcast_cnt; i++) { 1129 list_for_each_safe(entry, next, list_head) { 1130 mac_entry = (struct vxge_mac_addrs *) entry; 1131 /* Copy the mac address to delete */ 1132 mac_address = (u8 *)&mac_entry->macaddr; 1133 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1134 1135 /* Is this a multicast address */ 1136 if (0x01 & mac_info.macaddr[0]) 1137 break; 1138 } 1139 1140 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1141 vpath_idx++) { 1142 mac_info.vpath_no = vpath_idx; 1143 status = vxge_del_mac_addr(vdev, &mac_info); 1144 } 1145 } 1146 1147 /* Enable all multicast */ 1148 for (i = 0; i < vdev->no_of_vpath; i++) { 1149 vpath = &vdev->vpaths[i]; 1150 vxge_assert(vpath->is_open); 1151 1152 status = vxge_hw_vpath_mcast_enable(vpath->handle); 1153 if (status != VXGE_HW_OK) { 1154 vxge_debug_init(VXGE_ERR, 1155 "%s:%d Enabling all multicasts failed", 1156 __func__, __LINE__); 1157 } 1158 vdev->all_multi_flg = 1; 1159 } 1160 dev->flags |= IFF_ALLMULTI; 1161 } 1162 1163 vxge_debug_entryexit(VXGE_TRACE, 1164 "%s:%d Exiting...", __func__, __LINE__); 1165} 1166 1167/** 1168 * vxge_set_mac_addr 1169 * @dev: pointer to the device structure 1170 * 1171 * Update entry "0" (default MAC addr) 1172 */ 1173static int vxge_set_mac_addr(struct net_device *dev, void *p) 1174{ 1175 struct sockaddr *addr = p; 1176 struct vxgedev *vdev; 1177 struct __vxge_hw_device *hldev; 1178 enum vxge_hw_status status = VXGE_HW_OK; 1179 struct macInfo mac_info_new, mac_info_old; 1180 int vpath_idx = 0; 1181 1182 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1183 1184 vdev = (struct vxgedev *)netdev_priv(dev); 1185 hldev = vdev->devh; 1186 1187 if (!is_valid_ether_addr(addr->sa_data)) 1188 return -EINVAL; 1189 1190 memset(&mac_info_new, 0, sizeof(struct macInfo)); 1191 memset(&mac_info_old, 0, sizeof(struct macInfo)); 1192 1193 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", 1194 __func__, __LINE__); 1195 1196 /* Get the old address */ 1197 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len); 1198 1199 /* Copy the new address */ 1200 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len); 1201 1202 /* First delete the old mac address from all the vpaths 1203 as we can't specify the index while adding new mac address */ 1204 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 1205 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx]; 1206 if (!vpath->is_open) { 1207 /* This can happen when this interface is added/removed 1208 to the bonding interface. Delete this station address 1209 from the linked list */ 1210 vxge_mac_list_del(vpath, &mac_info_old); 1211 1212 /* Add this new address to the linked list 1213 for later restoring */ 1214 vxge_mac_list_add(vpath, &mac_info_new); 1215 1216 continue; 1217 } 1218 /* Delete the station address */ 1219 mac_info_old.vpath_no = vpath_idx; 1220 status = vxge_del_mac_addr(vdev, &mac_info_old); 1221 } 1222 1223 if (unlikely(!is_vxge_card_up(vdev))) { 1224 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1225 return VXGE_HW_OK; 1226 } 1227 1228 /* Set this mac address to all the vpaths */ 1229 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 1230 mac_info_new.vpath_no = vpath_idx; 1231 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE; 1232 status = vxge_add_mac_addr(vdev, &mac_info_new); 1233 if (status != VXGE_HW_OK) 1234 return -EINVAL; 1235 } 1236 1237 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1238 1239 return status; 1240} 1241 1242/* 1243 * vxge_vpath_intr_enable 1244 * @vdev: pointer to vdev 1245 * @vp_id: vpath for which to enable the interrupts 1246 * 1247 * Enables the interrupts for the vpath 1248*/ 1249void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1250{ 1251 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1252 int msix_id = 0; 1253 int tim_msix_id[4] = {0, 1, 0, 0}; 1254 int alarm_msix_id = VXGE_ALARM_MSIX_ID; 1255 1256 vxge_hw_vpath_intr_enable(vpath->handle); 1257 1258 if (vdev->config.intr_type == INTA) 1259 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); 1260 else { 1261 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 1262 alarm_msix_id); 1263 1264 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; 1265 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); 1266 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); 1267 1268 /* enable the alarm vector */ 1269 msix_id = (vpath->handle->vpath->hldev->first_vp_id * 1270 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id; 1271 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); 1272 } 1273} 1274 1275/* 1276 * vxge_vpath_intr_disable 1277 * @vdev: pointer to vdev 1278 * @vp_id: vpath for which to disable the interrupts 1279 * 1280 * Disables the interrupts for the vpath 1281*/ 1282void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1283{ 1284 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1285 int msix_id; 1286 1287 vxge_hw_vpath_intr_disable(vpath->handle); 1288 1289 if (vdev->config.intr_type == INTA) 1290 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); 1291 else { 1292 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE; 1293 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1294 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); 1295 1296 /* disable the alarm vector */ 1297 msix_id = (vpath->handle->vpath->hldev->first_vp_id * 1298 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 1299 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1300 } 1301} 1302 1303/* 1304 * vxge_reset_vpath 1305 * @vdev: pointer to vdev 1306 * @vp_id: vpath to reset 1307 * 1308 * Resets the vpath 1309*/ 1310static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) 1311{ 1312 enum vxge_hw_status status = VXGE_HW_OK; 1313 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1314 int ret = 0; 1315 1316 /* check if device is down already */ 1317 if (unlikely(!is_vxge_card_up(vdev))) 1318 return 0; 1319 1320 /* is device reset already scheduled */ 1321 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 1322 return 0; 1323 1324 if (vpath->handle) { 1325 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { 1326 if (is_vxge_card_up(vdev) && 1327 vxge_hw_vpath_recover_from_reset(vpath->handle) 1328 != VXGE_HW_OK) { 1329 vxge_debug_init(VXGE_ERR, 1330 "vxge_hw_vpath_recover_from_reset" 1331 "failed for vpath:%d", vp_id); 1332 return status; 1333 } 1334 } else { 1335 vxge_debug_init(VXGE_ERR, 1336 "vxge_hw_vpath_reset failed for" 1337 "vpath:%d", vp_id); 1338 return status; 1339 } 1340 } else 1341 return VXGE_HW_FAIL; 1342 1343 vxge_restore_vpath_mac_addr(vpath); 1344 vxge_restore_vpath_vid_table(vpath); 1345 1346 /* Enable all broadcast */ 1347 vxge_hw_vpath_bcast_enable(vpath->handle); 1348 1349 /* Enable all multicast */ 1350 if (vdev->all_multi_flg) { 1351 status = vxge_hw_vpath_mcast_enable(vpath->handle); 1352 if (status != VXGE_HW_OK) 1353 vxge_debug_init(VXGE_ERR, 1354 "%s:%d Enabling multicast failed", 1355 __func__, __LINE__); 1356 } 1357 1358 /* Enable the interrupts */ 1359 vxge_vpath_intr_enable(vdev, vp_id); 1360 1361 smp_wmb(); 1362 1363 /* Enable the flow of traffic through the vpath */ 1364 vxge_hw_vpath_enable(vpath->handle); 1365 1366 smp_wmb(); 1367 vxge_hw_vpath_rx_doorbell_init(vpath->handle); 1368 vpath->ring.last_status = VXGE_HW_OK; 1369 1370 /* Vpath reset done */ 1371 clear_bit(vp_id, &vdev->vp_reset); 1372 1373 /* Start the vpath queue */ 1374 if (netif_tx_queue_stopped(vpath->fifo.txq)) 1375 netif_tx_wake_queue(vpath->fifo.txq); 1376 1377 return ret; 1378} 1379 1380static int do_vxge_reset(struct vxgedev *vdev, int event) 1381{ 1382 enum vxge_hw_status status; 1383 int ret = 0, vp_id, i; 1384 1385 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1386 1387 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) { 1388 /* check if device is down already */ 1389 if (unlikely(!is_vxge_card_up(vdev))) 1390 return 0; 1391 1392 /* is reset already scheduled */ 1393 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 1394 return 0; 1395 } 1396 1397 if (event == VXGE_LL_FULL_RESET) { 1398 /* wait for all the vpath reset to complete */ 1399 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 1400 while (test_bit(vp_id, &vdev->vp_reset)) 1401 msleep(50); 1402 } 1403 1404 /* if execution mode is set to debug, don't reset the adapter */ 1405 if (unlikely(vdev->exec_mode)) { 1406 vxge_debug_init(VXGE_ERR, 1407 "%s: execution mode is debug, returning..", 1408 vdev->ndev->name); 1409 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 1410 netif_tx_stop_all_queues(vdev->ndev); 1411 return 0; 1412 } 1413 } 1414 1415 if (event == VXGE_LL_FULL_RESET) { 1416 vxge_hw_device_intr_disable(vdev->devh); 1417 1418 switch (vdev->cric_err_event) { 1419 case VXGE_HW_EVENT_UNKNOWN: 1420 netif_tx_stop_all_queues(vdev->ndev); 1421 vxge_debug_init(VXGE_ERR, 1422 "fatal: %s: Disabling device due to" 1423 "unknown error", 1424 vdev->ndev->name); 1425 ret = -EPERM; 1426 goto out; 1427 case VXGE_HW_EVENT_RESET_START: 1428 break; 1429 case VXGE_HW_EVENT_RESET_COMPLETE: 1430 case VXGE_HW_EVENT_LINK_DOWN: 1431 case VXGE_HW_EVENT_LINK_UP: 1432 case VXGE_HW_EVENT_ALARM_CLEARED: 1433 case VXGE_HW_EVENT_ECCERR: 1434 case VXGE_HW_EVENT_MRPCIM_ECCERR: 1435 ret = -EPERM; 1436 goto out; 1437 case VXGE_HW_EVENT_FIFO_ERR: 1438 case VXGE_HW_EVENT_VPATH_ERR: 1439 break; 1440 case VXGE_HW_EVENT_CRITICAL_ERR: 1441 netif_tx_stop_all_queues(vdev->ndev); 1442 vxge_debug_init(VXGE_ERR, 1443 "fatal: %s: Disabling device due to" 1444 "serious error", 1445 vdev->ndev->name); 1446 /* SOP or device reset required */ 1447 /* This event is not currently used */ 1448 ret = -EPERM; 1449 goto out; 1450 case VXGE_HW_EVENT_SERR: 1451 netif_tx_stop_all_queues(vdev->ndev); 1452 vxge_debug_init(VXGE_ERR, 1453 "fatal: %s: Disabling device due to" 1454 "serious error", 1455 vdev->ndev->name); 1456 ret = -EPERM; 1457 goto out; 1458 case VXGE_HW_EVENT_SRPCIM_SERR: 1459 case VXGE_HW_EVENT_MRPCIM_SERR: 1460 ret = -EPERM; 1461 goto out; 1462 case VXGE_HW_EVENT_SLOT_FREEZE: 1463 netif_tx_stop_all_queues(vdev->ndev); 1464 vxge_debug_init(VXGE_ERR, 1465 "fatal: %s: Disabling device due to" 1466 "slot freeze", 1467 vdev->ndev->name); 1468 ret = -EPERM; 1469 goto out; 1470 default: 1471 break; 1472 1473 } 1474 } 1475 1476 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) 1477 netif_tx_stop_all_queues(vdev->ndev); 1478 1479 if (event == VXGE_LL_FULL_RESET) { 1480 status = vxge_reset_all_vpaths(vdev); 1481 if (status != VXGE_HW_OK) { 1482 vxge_debug_init(VXGE_ERR, 1483 "fatal: %s: can not reset vpaths", 1484 vdev->ndev->name); 1485 ret = -EPERM; 1486 goto out; 1487 } 1488 } 1489 1490 if (event == VXGE_LL_COMPL_RESET) { 1491 for (i = 0; i < vdev->no_of_vpath; i++) 1492 if (vdev->vpaths[i].handle) { 1493 if (vxge_hw_vpath_recover_from_reset( 1494 vdev->vpaths[i].handle) 1495 != VXGE_HW_OK) { 1496 vxge_debug_init(VXGE_ERR, 1497 "vxge_hw_vpath_recover_" 1498 "from_reset failed for vpath: " 1499 "%d", i); 1500 ret = -EPERM; 1501 goto out; 1502 } 1503 } else { 1504 vxge_debug_init(VXGE_ERR, 1505 "vxge_hw_vpath_reset failed for " 1506 "vpath:%d", i); 1507 ret = -EPERM; 1508 goto out; 1509 } 1510 } 1511 1512 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) { 1513 /* Reprogram the DA table with populated mac addresses */ 1514 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 1515 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]); 1516 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]); 1517 } 1518 1519 /* enable vpath interrupts */ 1520 for (i = 0; i < vdev->no_of_vpath; i++) 1521 vxge_vpath_intr_enable(vdev, i); 1522 1523 vxge_hw_device_intr_enable(vdev->devh); 1524 1525 smp_wmb(); 1526 1527 /* Indicate card up */ 1528 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 1529 1530 /* Get the traffic to flow through the vpaths */ 1531 for (i = 0; i < vdev->no_of_vpath; i++) { 1532 vxge_hw_vpath_enable(vdev->vpaths[i].handle); 1533 smp_wmb(); 1534 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle); 1535 } 1536 1537 netif_tx_wake_all_queues(vdev->ndev); 1538 } 1539 1540out: 1541 vxge_debug_entryexit(VXGE_TRACE, 1542 "%s:%d Exiting...", __func__, __LINE__); 1543 1544 /* Indicate reset done */ 1545 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) 1546 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); 1547 return ret; 1548} 1549 1550/* 1551 * vxge_reset 1552 * @vdev: pointer to ll device 1553 * 1554 * driver may reset the chip on events of serr, eccerr, etc 1555 */ 1556int vxge_reset(struct vxgedev *vdev) 1557{ 1558 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1559} 1560 1561/** 1562 * vxge_poll - Receive handler when Receive Polling is used. 1563 * @dev: pointer to the device structure. 1564 * @budget: Number of packets budgeted to be processed in this iteration. 1565 * 1566 * This function comes into picture only if Receive side is being handled 1567 * through polling (called NAPI in linux). It mostly does what the normal 1568 * Rx interrupt handler does in terms of descriptor and packet processing 1569 * but not in an interrupt context. Also it will process a specified number 1570 * of packets at most in one iteration. This value is passed down by the 1571 * kernel as the function argument 'budget'. 1572 */ 1573static int vxge_poll_msix(struct napi_struct *napi, int budget) 1574{ 1575 struct vxge_ring *ring = 1576 container_of(napi, struct vxge_ring, napi); 1577 int budget_org = budget; 1578 ring->budget = budget; 1579 1580 vxge_hw_vpath_poll_rx(ring->handle); 1581 1582 if (ring->pkts_processed < budget_org) { 1583 napi_complete(napi); 1584 /* Re enable the Rx interrupts for the vpath */ 1585 vxge_hw_channel_msix_unmask( 1586 (struct __vxge_hw_channel *)ring->handle, 1587 ring->rx_vector_no); 1588 } 1589 1590 return ring->pkts_processed; 1591} 1592 1593static int vxge_poll_inta(struct napi_struct *napi, int budget) 1594{ 1595 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi); 1596 int pkts_processed = 0; 1597 int i; 1598 int budget_org = budget; 1599 struct vxge_ring *ring; 1600 1601 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 1602 pci_get_drvdata(vdev->pdev); 1603 1604 for (i = 0; i < vdev->no_of_vpath; i++) { 1605 ring = &vdev->vpaths[i].ring; 1606 ring->budget = budget; 1607 vxge_hw_vpath_poll_rx(ring->handle); 1608 pkts_processed += ring->pkts_processed; 1609 budget -= ring->pkts_processed; 1610 if (budget <= 0) 1611 break; 1612 } 1613 1614 VXGE_COMPLETE_ALL_TX(vdev); 1615 1616 if (pkts_processed < budget_org) { 1617 napi_complete(napi); 1618 /* Re enable the Rx interrupts for the ring */ 1619 vxge_hw_device_unmask_all(hldev); 1620 vxge_hw_device_flush_io(hldev); 1621 } 1622 1623 return pkts_processed; 1624} 1625 1626#ifdef CONFIG_NET_POLL_CONTROLLER 1627/** 1628 * vxge_netpoll - netpoll event handler entry point 1629 * @dev : pointer to the device structure. 1630 * Description: 1631 * This function will be called by upper layer to check for events on the 1632 * interface in situations where interrupts are disabled. It is used for 1633 * specific in-kernel networking tasks, such as remote consoles and kernel 1634 * debugging over the network (example netdump in RedHat). 1635 */ 1636static void vxge_netpoll(struct net_device *dev) 1637{ 1638 struct __vxge_hw_device *hldev; 1639 struct vxgedev *vdev; 1640 1641 vdev = (struct vxgedev *)netdev_priv(dev); 1642 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 1643 1644 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1645 1646 if (pci_channel_offline(vdev->pdev)) 1647 return; 1648 1649 disable_irq(dev->irq); 1650 vxge_hw_device_clear_tx_rx(hldev); 1651 1652 vxge_hw_device_clear_tx_rx(hldev); 1653 VXGE_COMPLETE_ALL_RX(vdev); 1654 VXGE_COMPLETE_ALL_TX(vdev); 1655 1656 enable_irq(dev->irq); 1657 1658 vxge_debug_entryexit(VXGE_TRACE, 1659 "%s:%d Exiting...", __func__, __LINE__); 1660} 1661#endif 1662 1663/* RTH configuration */ 1664static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev) 1665{ 1666 enum vxge_hw_status status = VXGE_HW_OK; 1667 struct vxge_hw_rth_hash_types hash_types; 1668 u8 itable[256] = {0}; /* indirection table */ 1669 u8 mtable[256] = {0}; /* CPU to vpath mapping */ 1670 int index; 1671 1672 /* 1673 * Filling 1674 * - itable with bucket numbers 1675 * - mtable with bucket-to-vpath mapping 1676 */ 1677 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) { 1678 itable[index] = index; 1679 mtable[index] = index % vdev->no_of_vpath; 1680 } 1681 1682 /* Fill RTH hash types */ 1683 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4; 1684 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4; 1685 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6; 1686 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6; 1687 hash_types.hash_type_tcpipv6ex_en = 1688 vdev->config.rth_hash_type_tcpipv6ex; 1689 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex; 1690 1691 /* set indirection table, bucket-to-vpath mapping */ 1692 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, 1693 vdev->no_of_vpath, 1694 mtable, itable, 1695 vdev->config.rth_bkt_sz); 1696 if (status != VXGE_HW_OK) { 1697 vxge_debug_init(VXGE_ERR, 1698 "RTH indirection table configuration failed " 1699 "for vpath:%d", vdev->vpaths[0].device_id); 1700 return status; 1701 } 1702 1703 /* 1704 * Because the itable_set() method uses the active_table field 1705 * for the target virtual path the RTH config should be updated 1706 * for all VPATHs. The h/w only uses the lowest numbered VPATH 1707 * when steering frames. 1708 */ 1709 for (index = 0; index < vdev->no_of_vpath; index++) { 1710 status = vxge_hw_vpath_rts_rth_set( 1711 vdev->vpaths[index].handle, 1712 vdev->config.rth_algorithm, 1713 &hash_types, 1714 vdev->config.rth_bkt_sz); 1715 1716 if (status != VXGE_HW_OK) { 1717 vxge_debug_init(VXGE_ERR, 1718 "RTH configuration failed for vpath:%d", 1719 vdev->vpaths[index].device_id); 1720 return status; 1721 } 1722 } 1723 1724 return status; 1725} 1726 1727int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac) 1728{ 1729 struct vxge_mac_addrs *new_mac_entry; 1730 u8 *mac_address = NULL; 1731 1732 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT) 1733 return TRUE; 1734 1735 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC); 1736 if (!new_mac_entry) { 1737 vxge_debug_mem(VXGE_ERR, 1738 "%s: memory allocation failed", 1739 VXGE_DRIVER_NAME); 1740 return FALSE; 1741 } 1742 1743 list_add(&new_mac_entry->item, &vpath->mac_addr_list); 1744 1745 /* Copy the new mac address to the list */ 1746 mac_address = (u8 *)&new_mac_entry->macaddr; 1747 memcpy(mac_address, mac->macaddr, ETH_ALEN); 1748 1749 new_mac_entry->state = mac->state; 1750 vpath->mac_addr_cnt++; 1751 1752 /* Is this a multicast address */ 1753 if (0x01 & mac->macaddr[0]) 1754 vpath->mcast_addr_cnt++; 1755 1756 return TRUE; 1757} 1758 1759/* Add a mac address to DA table */ 1760enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1761{ 1762 enum vxge_hw_status status = VXGE_HW_OK; 1763 struct vxge_vpath *vpath; 1764 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode; 1765 1766 if (0x01 & mac->macaddr[0]) /* multicast address */ 1767 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE; 1768 else 1769 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE; 1770 1771 vpath = &vdev->vpaths[mac->vpath_no]; 1772 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr, 1773 mac->macmask, duplicate_mode); 1774 if (status != VXGE_HW_OK) { 1775 vxge_debug_init(VXGE_ERR, 1776 "DA config add entry failed for vpath:%d", 1777 vpath->device_id); 1778 } else 1779 if (FALSE == vxge_mac_list_add(vpath, mac)) 1780 status = -EPERM; 1781 1782 return status; 1783} 1784 1785int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) 1786{ 1787 struct list_head *entry, *next; 1788 u64 del_mac = 0; 1789 u8 *mac_address = (u8 *) (&del_mac); 1790 1791 /* Copy the mac address to delete from the list */ 1792 memcpy(mac_address, mac->macaddr, ETH_ALEN); 1793 1794 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 1795 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { 1796 list_del(entry); 1797 kfree((struct vxge_mac_addrs *)entry); 1798 vpath->mac_addr_cnt--; 1799 1800 /* Is this a multicast address */ 1801 if (0x01 & mac->macaddr[0]) 1802 vpath->mcast_addr_cnt--; 1803 return TRUE; 1804 } 1805 } 1806 1807 return FALSE; 1808} 1809/* delete a mac address from DA table */ 1810enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac) 1811{ 1812 enum vxge_hw_status status = VXGE_HW_OK; 1813 struct vxge_vpath *vpath; 1814 1815 vpath = &vdev->vpaths[mac->vpath_no]; 1816 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr, 1817 mac->macmask); 1818 if (status != VXGE_HW_OK) { 1819 vxge_debug_init(VXGE_ERR, 1820 "DA config delete entry failed for vpath:%d", 1821 vpath->device_id); 1822 } else 1823 vxge_mac_list_del(vpath, mac); 1824 return status; 1825} 1826 1827/* list all mac addresses from DA table */ 1828enum vxge_hw_status 1829static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, 1830 struct macInfo *mac) 1831{ 1832 enum vxge_hw_status status = VXGE_HW_OK; 1833 unsigned char macmask[ETH_ALEN]; 1834 unsigned char macaddr[ETH_ALEN]; 1835 1836 status = vxge_hw_vpath_mac_addr_get(vpath->handle, 1837 macaddr, macmask); 1838 if (status != VXGE_HW_OK) { 1839 vxge_debug_init(VXGE_ERR, 1840 "DA config list entry failed for vpath:%d", 1841 vpath->device_id); 1842 return status; 1843 } 1844 1845 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) { 1846 1847 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle, 1848 macaddr, macmask); 1849 if (status != VXGE_HW_OK) 1850 break; 1851 } 1852 1853 return status; 1854} 1855 1856/* Store all vlan ids from the list to the vid table */ 1857enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath) 1858{ 1859 enum vxge_hw_status status = VXGE_HW_OK; 1860 struct vxgedev *vdev = vpath->vdev; 1861 u16 vid; 1862 1863 if (vdev->vlgrp && vpath->is_open) { 1864 1865 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 1866 if (!vlan_group_get_device(vdev->vlgrp, vid)) 1867 continue; 1868 /* Add these vlan to the vid table */ 1869 status = vxge_hw_vpath_vid_add(vpath->handle, vid); 1870 } 1871 } 1872 1873 return status; 1874} 1875 1876/* Store all mac addresses from the list to the DA table */ 1877enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath) 1878{ 1879 enum vxge_hw_status status = VXGE_HW_OK; 1880 struct macInfo mac_info; 1881 u8 *mac_address = NULL; 1882 struct list_head *entry, *next; 1883 1884 memset(&mac_info, 0, sizeof(struct macInfo)); 1885 1886 if (vpath->is_open) { 1887 1888 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 1889 mac_address = 1890 (u8 *)& 1891 ((struct vxge_mac_addrs *)entry)->macaddr; 1892 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1893 ((struct vxge_mac_addrs *)entry)->state = 1894 VXGE_LL_MAC_ADDR_IN_DA_TABLE; 1895 /* does this mac address already exist in da table? */ 1896 status = vxge_search_mac_addr_in_da_table(vpath, 1897 &mac_info); 1898 if (status != VXGE_HW_OK) { 1899 /* Add this mac address to the DA table */ 1900 status = vxge_hw_vpath_mac_addr_add( 1901 vpath->handle, mac_info.macaddr, 1902 mac_info.macmask, 1903 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE); 1904 if (status != VXGE_HW_OK) { 1905 vxge_debug_init(VXGE_ERR, 1906 "DA add entry failed for vpath:%d", 1907 vpath->device_id); 1908 ((struct vxge_mac_addrs *)entry)->state 1909 = VXGE_LL_MAC_ADDR_IN_LIST; 1910 } 1911 } 1912 } 1913 } 1914 1915 return status; 1916} 1917 1918/* reset vpaths */ 1919enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1920{ 1921 enum vxge_hw_status status = VXGE_HW_OK; 1922 struct vxge_vpath *vpath; 1923 int i; 1924 1925 for (i = 0; i < vdev->no_of_vpath; i++) { 1926 vpath = &vdev->vpaths[i]; 1927 if (vpath->handle) { 1928 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) { 1929 if (is_vxge_card_up(vdev) && 1930 vxge_hw_vpath_recover_from_reset( 1931 vpath->handle) != VXGE_HW_OK) { 1932 vxge_debug_init(VXGE_ERR, 1933 "vxge_hw_vpath_recover_" 1934 "from_reset failed for vpath: " 1935 "%d", i); 1936 return status; 1937 } 1938 } else { 1939 vxge_debug_init(VXGE_ERR, 1940 "vxge_hw_vpath_reset failed for " 1941 "vpath:%d", i); 1942 return status; 1943 } 1944 } 1945 } 1946 1947 return status; 1948} 1949 1950/* close vpaths */ 1951void vxge_close_vpaths(struct vxgedev *vdev, int index) 1952{ 1953 struct vxge_vpath *vpath; 1954 int i; 1955 1956 for (i = index; i < vdev->no_of_vpath; i++) { 1957 vpath = &vdev->vpaths[i]; 1958 1959 if (vpath->handle && vpath->is_open) { 1960 vxge_hw_vpath_close(vpath->handle); 1961 vdev->stats.vpaths_open--; 1962 } 1963 vpath->is_open = 0; 1964 vpath->handle = NULL; 1965 } 1966} 1967 1968/* open vpaths */ 1969int vxge_open_vpaths(struct vxgedev *vdev) 1970{ 1971 struct vxge_hw_vpath_attr attr; 1972 enum vxge_hw_status status; 1973 struct vxge_vpath *vpath; 1974 u32 vp_id = 0; 1975 int i; 1976 1977 for (i = 0; i < vdev->no_of_vpath; i++) { 1978 vpath = &vdev->vpaths[i]; 1979 1980 vxge_assert(vpath->is_configured); 1981 attr.vp_id = vpath->device_id; 1982 attr.fifo_attr.callback = vxge_xmit_compl; 1983 attr.fifo_attr.txdl_term = vxge_tx_term; 1984 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv); 1985 attr.fifo_attr.userdata = &vpath->fifo; 1986 1987 attr.ring_attr.callback = vxge_rx_1b_compl; 1988 attr.ring_attr.rxd_init = vxge_rx_initial_replenish; 1989 attr.ring_attr.rxd_term = vxge_rx_term; 1990 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv); 1991 attr.ring_attr.userdata = &vpath->ring; 1992 1993 vpath->ring.ndev = vdev->ndev; 1994 vpath->ring.pdev = vdev->pdev; 1995 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); 1996 if (status == VXGE_HW_OK) { 1997 vpath->fifo.handle = 1998 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata; 1999 vpath->ring.handle = 2000 (struct __vxge_hw_ring *)attr.ring_attr.userdata; 2001 vpath->fifo.tx_steering_type = 2002 vdev->config.tx_steering_type; 2003 vpath->fifo.ndev = vdev->ndev; 2004 vpath->fifo.pdev = vdev->pdev; 2005 if (vdev->config.tx_steering_type) 2006 vpath->fifo.txq = 2007 netdev_get_tx_queue(vdev->ndev, i); 2008 else 2009 vpath->fifo.txq = 2010 netdev_get_tx_queue(vdev->ndev, 0); 2011 vpath->fifo.indicate_max_pkts = 2012 vdev->config.fifo_indicate_max_pkts; 2013 vpath->ring.rx_vector_no = 0; 2014 vpath->ring.rx_csum = vdev->rx_csum; 2015 vpath->is_open = 1; 2016 vdev->vp_handles[i] = vpath->handle; 2017 vpath->ring.gro_enable = vdev->config.gro_enable; 2018 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip; 2019 vdev->stats.vpaths_open++; 2020 } else { 2021 vdev->stats.vpath_open_fail++; 2022 vxge_debug_init(VXGE_ERR, 2023 "%s: vpath: %d failed to open " 2024 "with status: %d", 2025 vdev->ndev->name, vpath->device_id, 2026 status); 2027 vxge_close_vpaths(vdev, 0); 2028 return -EPERM; 2029 } 2030 2031 vp_id = vpath->handle->vpath->vp_id; 2032 vdev->vpaths_deployed |= vxge_mBIT(vp_id); 2033 } 2034 return VXGE_HW_OK; 2035} 2036 2037/* 2038 * vxge_isr_napi 2039 * @irq: the irq of the device. 2040 * @dev_id: a void pointer to the hldev structure of the Titan device 2041 * @ptregs: pointer to the registers pushed on the stack. 2042 * 2043 * This function is the ISR handler of the device when napi is enabled. It 2044 * identifies the reason for the interrupt and calls the relevant service 2045 * routines. 2046 */ 2047static irqreturn_t vxge_isr_napi(int irq, void *dev_id) 2048{ 2049 struct net_device *dev; 2050 struct __vxge_hw_device *hldev; 2051 u64 reason; 2052 enum vxge_hw_status status; 2053 struct vxgedev *vdev = (struct vxgedev *) dev_id;; 2054 2055 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2056 2057 dev = vdev->ndev; 2058 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 2059 2060 if (pci_channel_offline(vdev->pdev)) 2061 return IRQ_NONE; 2062 2063 if (unlikely(!is_vxge_card_up(vdev))) 2064 return IRQ_NONE; 2065 2066 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, 2067 &reason); 2068 if (status == VXGE_HW_OK) { 2069 vxge_hw_device_mask_all(hldev); 2070 2071 if (reason & 2072 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT( 2073 vdev->vpaths_deployed >> 2074 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) { 2075 2076 vxge_hw_device_clear_tx_rx(hldev); 2077 napi_schedule(&vdev->napi); 2078 vxge_debug_intr(VXGE_TRACE, 2079 "%s:%d Exiting...", __func__, __LINE__); 2080 return IRQ_HANDLED; 2081 } else 2082 vxge_hw_device_unmask_all(hldev); 2083 } else if (unlikely((status == VXGE_HW_ERR_VPATH) || 2084 (status == VXGE_HW_ERR_CRITICAL) || 2085 (status == VXGE_HW_ERR_FIFO))) { 2086 vxge_hw_device_mask_all(hldev); 2087 vxge_hw_device_flush_io(hldev); 2088 return IRQ_HANDLED; 2089 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE)) 2090 return IRQ_HANDLED; 2091 2092 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); 2093 return IRQ_NONE; 2094} 2095 2096#ifdef CONFIG_PCI_MSI 2097 2098static irqreturn_t 2099vxge_tx_msix_handle(int irq, void *dev_id) 2100{ 2101 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; 2102 2103 VXGE_COMPLETE_VPATH_TX(fifo); 2104 2105 return IRQ_HANDLED; 2106} 2107 2108static irqreturn_t 2109vxge_rx_msix_napi_handle(int irq, void *dev_id) 2110{ 2111 struct vxge_ring *ring = (struct vxge_ring *)dev_id; 2112 2113 /* MSIX_IDX for Rx is 1 */ 2114 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, 2115 ring->rx_vector_no); 2116 2117 napi_schedule(&ring->napi); 2118 return IRQ_HANDLED; 2119} 2120 2121static irqreturn_t 2122vxge_alarm_msix_handle(int irq, void *dev_id) 2123{ 2124 int i; 2125 enum vxge_hw_status status; 2126 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; 2127 struct vxgedev *vdev = vpath->vdev; 2128 int msix_id = (vpath->handle->vpath->vp_id * 2129 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2130 2131 for (i = 0; i < vdev->no_of_vpath; i++) { 2132 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); 2133 2134 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2135 vdev->exec_mode); 2136 if (status == VXGE_HW_OK) { 2137 2138 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2139 msix_id); 2140 continue; 2141 } 2142 vxge_debug_intr(VXGE_ERR, 2143 "%s: vxge_hw_vpath_alarm_process failed %x ", 2144 VXGE_DRIVER_NAME, status); 2145 } 2146 return IRQ_HANDLED; 2147} 2148 2149static int vxge_alloc_msix(struct vxgedev *vdev) 2150{ 2151 int j, i, ret = 0; 2152 int msix_intr_vect = 0, temp; 2153 vdev->intr_cnt = 0; 2154 2155start: 2156 /* Tx/Rx MSIX Vectors count */ 2157 vdev->intr_cnt = vdev->no_of_vpath * 2; 2158 2159 /* Alarm MSIX Vectors count */ 2160 vdev->intr_cnt++; 2161 2162 vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry), 2163 GFP_KERNEL); 2164 if (!vdev->entries) { 2165 vxge_debug_init(VXGE_ERR, 2166 "%s: memory allocation failed", 2167 VXGE_DRIVER_NAME); 2168 ret = -ENOMEM; 2169 goto alloc_entries_failed; 2170 } 2171 2172 vdev->vxge_entries = 2173 kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry), 2174 GFP_KERNEL); 2175 if (!vdev->vxge_entries) { 2176 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2177 VXGE_DRIVER_NAME); 2178 ret = -ENOMEM; 2179 goto alloc_vxge_entries_failed; 2180 } 2181 2182 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) { 2183 2184 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2185 2186 /* Initialize the fifo vector */ 2187 vdev->entries[j].entry = msix_intr_vect; 2188 vdev->vxge_entries[j].entry = msix_intr_vect; 2189 vdev->vxge_entries[j].in_use = 0; 2190 j++; 2191 2192 /* Initialize the ring vector */ 2193 vdev->entries[j].entry = msix_intr_vect + 1; 2194 vdev->vxge_entries[j].entry = msix_intr_vect + 1; 2195 vdev->vxge_entries[j].in_use = 0; 2196 j++; 2197 } 2198 2199 /* Initialize the alarm vector */ 2200 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID; 2201 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; 2202 vdev->vxge_entries[j].in_use = 0; 2203 2204 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); 2205 if (ret > 0) { 2206 vxge_debug_init(VXGE_ERR, 2207 "%s: MSI-X enable failed for %d vectors, ret: %d", 2208 VXGE_DRIVER_NAME, vdev->intr_cnt, ret); 2209 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) { 2210 ret = -ENODEV; 2211 goto enable_msix_failed; 2212 } 2213 2214 kfree(vdev->entries); 2215 kfree(vdev->vxge_entries); 2216 vdev->entries = NULL; 2217 vdev->vxge_entries = NULL; 2218 /* Try with less no of vector by reducing no of vpaths count */ 2219 temp = (ret - 1)/2; 2220 vxge_close_vpaths(vdev, temp); 2221 vdev->no_of_vpath = temp; 2222 goto start; 2223 } else if (ret < 0) { 2224 ret = -ENODEV; 2225 goto enable_msix_failed; 2226 } 2227 return 0; 2228 2229enable_msix_failed: 2230 kfree(vdev->vxge_entries); 2231alloc_vxge_entries_failed: 2232 kfree(vdev->entries); 2233alloc_entries_failed: 2234 return ret; 2235} 2236 2237static int vxge_enable_msix(struct vxgedev *vdev) 2238{ 2239 2240 int i, ret = 0; 2241 /* 0 - Tx, 1 - Rx */ 2242 int tim_msix_id[4] = {0, 1, 0, 0}; 2243 2244 vdev->intr_cnt = 0; 2245 2246 /* allocate msix vectors */ 2247 ret = vxge_alloc_msix(vdev); 2248 if (!ret) { 2249 for (i = 0; i < vdev->no_of_vpath; i++) { 2250 struct vxge_vpath *vpath = &vdev->vpaths[i]; 2251 2252 /* If fifo or ring are not enabled, the MSIX vector for 2253 * it should be set to 0. 2254 */ 2255 vpath->ring.rx_vector_no = (vpath->device_id * 2256 VXGE_HW_VPATH_MSIX_ACTIVE) + 1; 2257 2258 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 2259 VXGE_ALARM_MSIX_ID); 2260 } 2261 } 2262 2263 return ret; 2264} 2265 2266static void vxge_rem_msix_isr(struct vxgedev *vdev) 2267{ 2268 int intr_cnt; 2269 2270 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1); 2271 intr_cnt++) { 2272 if (vdev->vxge_entries[intr_cnt].in_use) { 2273 synchronize_irq(vdev->entries[intr_cnt].vector); 2274 free_irq(vdev->entries[intr_cnt].vector, 2275 vdev->vxge_entries[intr_cnt].arg); 2276 vdev->vxge_entries[intr_cnt].in_use = 0; 2277 } 2278 } 2279 2280 kfree(vdev->entries); 2281 kfree(vdev->vxge_entries); 2282 vdev->entries = NULL; 2283 vdev->vxge_entries = NULL; 2284 2285 if (vdev->config.intr_type == MSI_X) 2286 pci_disable_msix(vdev->pdev); 2287} 2288#endif 2289 2290static void vxge_rem_isr(struct vxgedev *vdev) 2291{ 2292 struct __vxge_hw_device *hldev; 2293 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2294 2295#ifdef CONFIG_PCI_MSI 2296 if (vdev->config.intr_type == MSI_X) { 2297 vxge_rem_msix_isr(vdev); 2298 } else 2299#endif 2300 if (vdev->config.intr_type == INTA) { 2301 synchronize_irq(vdev->pdev->irq); 2302 free_irq(vdev->pdev->irq, vdev); 2303 } 2304} 2305 2306static int vxge_add_isr(struct vxgedev *vdev) 2307{ 2308 int ret = 0; 2309#ifdef CONFIG_PCI_MSI 2310 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; 2311 int pci_fun = PCI_FUNC(vdev->pdev->devfn); 2312 2313 if (vdev->config.intr_type == MSI_X) 2314 ret = vxge_enable_msix(vdev); 2315 2316 if (ret) { 2317 vxge_debug_init(VXGE_ERR, 2318 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); 2319 vxge_debug_init(VXGE_ERR, 2320 "%s: Defaulting to INTA", VXGE_DRIVER_NAME); 2321 vdev->config.intr_type = INTA; 2322 } 2323 2324 if (vdev->config.intr_type == MSI_X) { 2325 for (intr_idx = 0; 2326 intr_idx < (vdev->no_of_vpath * 2327 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { 2328 2329 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE; 2330 irq_req = 0; 2331 2332 switch (msix_idx) { 2333 case 0: 2334 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2335 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d", 2336 vdev->ndev->name, 2337 vdev->entries[intr_cnt].entry, 2338 pci_fun, vp_idx); 2339 ret = request_irq( 2340 vdev->entries[intr_cnt].vector, 2341 vxge_tx_msix_handle, 0, 2342 vdev->desc[intr_cnt], 2343 &vdev->vpaths[vp_idx].fifo); 2344 vdev->vxge_entries[intr_cnt].arg = 2345 &vdev->vpaths[vp_idx].fifo; 2346 irq_req = 1; 2347 break; 2348 case 1: 2349 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2350 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d", 2351 vdev->ndev->name, 2352 vdev->entries[intr_cnt].entry, 2353 pci_fun, vp_idx); 2354 ret = request_irq( 2355 vdev->entries[intr_cnt].vector, 2356 vxge_rx_msix_napi_handle, 2357 0, 2358 vdev->desc[intr_cnt], 2359 &vdev->vpaths[vp_idx].ring); 2360 vdev->vxge_entries[intr_cnt].arg = 2361 &vdev->vpaths[vp_idx].ring; 2362 irq_req = 1; 2363 break; 2364 } 2365 2366 if (ret) { 2367 vxge_debug_init(VXGE_ERR, 2368 "%s: MSIX - %d Registration failed", 2369 vdev->ndev->name, intr_cnt); 2370 vxge_rem_msix_isr(vdev); 2371 vdev->config.intr_type = INTA; 2372 vxge_debug_init(VXGE_ERR, 2373 "%s: Defaulting to INTA" 2374 , vdev->ndev->name); 2375 goto INTA_MODE; 2376 } 2377 2378 if (irq_req) { 2379 /* We requested for this msix interrupt */ 2380 vdev->vxge_entries[intr_cnt].in_use = 1; 2381 msix_idx += vdev->vpaths[vp_idx].device_id * 2382 VXGE_HW_VPATH_MSIX_ACTIVE; 2383 vxge_hw_vpath_msix_unmask( 2384 vdev->vpaths[vp_idx].handle, 2385 msix_idx); 2386 intr_cnt++; 2387 } 2388 2389 /* Point to next vpath handler */ 2390 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) && 2391 (vp_idx < (vdev->no_of_vpath - 1))) 2392 vp_idx++; 2393 } 2394 2395 intr_cnt = vdev->no_of_vpath * 2; 2396 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2397 "%s:vxge:MSI-X %d - Alarm - fn:%d", 2398 vdev->ndev->name, 2399 vdev->entries[intr_cnt].entry, 2400 pci_fun); 2401 /* For Alarm interrupts */ 2402 ret = request_irq(vdev->entries[intr_cnt].vector, 2403 vxge_alarm_msix_handle, 0, 2404 vdev->desc[intr_cnt], 2405 &vdev->vpaths[0]); 2406 if (ret) { 2407 vxge_debug_init(VXGE_ERR, 2408 "%s: MSIX - %d Registration failed", 2409 vdev->ndev->name, intr_cnt); 2410 vxge_rem_msix_isr(vdev); 2411 vdev->config.intr_type = INTA; 2412 vxge_debug_init(VXGE_ERR, 2413 "%s: Defaulting to INTA", 2414 vdev->ndev->name); 2415 goto INTA_MODE; 2416 } 2417 2418 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * 2419 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; 2420 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2421 msix_idx); 2422 vdev->vxge_entries[intr_cnt].in_use = 1; 2423 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; 2424 } 2425INTA_MODE: 2426#endif 2427 2428 if (vdev->config.intr_type == INTA) { 2429 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, 2430 "%s:vxge:INTA", vdev->ndev->name); 2431 vxge_hw_device_set_intr_type(vdev->devh, 2432 VXGE_HW_INTR_MODE_IRQLINE); 2433 vxge_hw_vpath_tti_ci_set(vdev->devh, 2434 vdev->vpaths[0].device_id); 2435 ret = request_irq((int) vdev->pdev->irq, 2436 vxge_isr_napi, 2437 IRQF_SHARED, vdev->desc[0], vdev); 2438 if (ret) { 2439 vxge_debug_init(VXGE_ERR, 2440 "%s %s-%d: ISR registration failed", 2441 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq); 2442 return -ENODEV; 2443 } 2444 vxge_debug_init(VXGE_TRACE, 2445 "new %s-%d line allocated", 2446 "IRQ", vdev->pdev->irq); 2447 } 2448 2449 return VXGE_HW_OK; 2450} 2451 2452static void vxge_poll_vp_reset(unsigned long data) 2453{ 2454 struct vxgedev *vdev = (struct vxgedev *)data; 2455 int i, j = 0; 2456 2457 for (i = 0; i < vdev->no_of_vpath; i++) { 2458 if (test_bit(i, &vdev->vp_reset)) { 2459 vxge_reset_vpath(vdev, i); 2460 j++; 2461 } 2462 } 2463 if (j && (vdev->config.intr_type != MSI_X)) { 2464 vxge_hw_device_unmask_all(vdev->devh); 2465 vxge_hw_device_flush_io(vdev->devh); 2466 } 2467 2468 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2); 2469} 2470 2471static void vxge_poll_vp_lockup(unsigned long data) 2472{ 2473 struct vxgedev *vdev = (struct vxgedev *)data; 2474 enum vxge_hw_status status = VXGE_HW_OK; 2475 struct vxge_vpath *vpath; 2476 struct vxge_ring *ring; 2477 int i; 2478 2479 for (i = 0; i < vdev->no_of_vpath; i++) { 2480 ring = &vdev->vpaths[i].ring; 2481 /* Did this vpath received any packets */ 2482 if (ring->stats.prev_rx_frms == ring->stats.rx_frms) { 2483 status = vxge_hw_vpath_check_leak(ring->handle); 2484 2485 /* Did it received any packets last time */ 2486 if ((VXGE_HW_FAIL == status) && 2487 (VXGE_HW_FAIL == ring->last_status)) { 2488 2489 /* schedule vpath reset */ 2490 if (!test_and_set_bit(i, &vdev->vp_reset)) { 2491 vpath = &vdev->vpaths[i]; 2492 2493 /* disable interrupts for this vpath */ 2494 vxge_vpath_intr_disable(vdev, i); 2495 2496 /* stop the queue for this vpath */ 2497 netif_tx_stop_queue(vpath->fifo.txq); 2498 continue; 2499 } 2500 } 2501 } 2502 ring->stats.prev_rx_frms = ring->stats.rx_frms; 2503 ring->last_status = status; 2504 } 2505 2506 /* Check every 1 milli second */ 2507 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000); 2508} 2509 2510/** 2511 * vxge_open 2512 * @dev: pointer to the device structure. 2513 * 2514 * This function is the open entry point of the driver. It mainly calls a 2515 * function to allocate Rx buffers and inserts them into the buffer 2516 * descriptors and then enables the Rx part of the NIC. 2517 * Return value: '0' on success and an appropriate (-)ve integer as 2518 * defined in errno.h file on failure. 2519 */ 2520int 2521vxge_open(struct net_device *dev) 2522{ 2523 enum vxge_hw_status status; 2524 struct vxgedev *vdev; 2525 struct __vxge_hw_device *hldev; 2526 struct vxge_vpath *vpath; 2527 int ret = 0; 2528 int i; 2529 u64 val64, function_mode; 2530 vxge_debug_entryexit(VXGE_TRACE, 2531 "%s: %s:%d", dev->name, __func__, __LINE__); 2532 2533 vdev = (struct vxgedev *)netdev_priv(dev); 2534 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2535 function_mode = vdev->config.device_hw_info.function_mode; 2536 2537 /* make sure you have link off by default every time Nic is 2538 * initialized */ 2539 netif_carrier_off(dev); 2540 2541 /* Open VPATHs */ 2542 status = vxge_open_vpaths(vdev); 2543 if (status != VXGE_HW_OK) { 2544 vxge_debug_init(VXGE_ERR, 2545 "%s: fatal: Vpath open failed", vdev->ndev->name); 2546 ret = -EPERM; 2547 goto out0; 2548 } 2549 2550 vdev->mtu = dev->mtu; 2551 2552 status = vxge_add_isr(vdev); 2553 if (status != VXGE_HW_OK) { 2554 vxge_debug_init(VXGE_ERR, 2555 "%s: fatal: ISR add failed", dev->name); 2556 ret = -EPERM; 2557 goto out1; 2558 } 2559 2560 if (vdev->config.intr_type != MSI_X) { 2561 netif_napi_add(dev, &vdev->napi, vxge_poll_inta, 2562 vdev->config.napi_weight); 2563 napi_enable(&vdev->napi); 2564 for (i = 0; i < vdev->no_of_vpath; i++) { 2565 vpath = &vdev->vpaths[i]; 2566 vpath->ring.napi_p = &vdev->napi; 2567 } 2568 } else { 2569 for (i = 0; i < vdev->no_of_vpath; i++) { 2570 vpath = &vdev->vpaths[i]; 2571 netif_napi_add(dev, &vpath->ring.napi, 2572 vxge_poll_msix, vdev->config.napi_weight); 2573 napi_enable(&vpath->ring.napi); 2574 vpath->ring.napi_p = &vpath->ring.napi; 2575 } 2576 } 2577 2578 /* configure RTH */ 2579 if (vdev->config.rth_steering) { 2580 status = vxge_rth_configure(vdev); 2581 if (status != VXGE_HW_OK) { 2582 vxge_debug_init(VXGE_ERR, 2583 "%s: fatal: RTH configuration failed", 2584 dev->name); 2585 ret = -EPERM; 2586 goto out2; 2587 } 2588 } 2589 2590 for (i = 0; i < vdev->no_of_vpath; i++) { 2591 vpath = &vdev->vpaths[i]; 2592 2593 /* set initial mtu before enabling the device */ 2594 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu); 2595 if (status != VXGE_HW_OK) { 2596 vxge_debug_init(VXGE_ERR, 2597 "%s: fatal: can not set new MTU", dev->name); 2598 ret = -EPERM; 2599 goto out2; 2600 } 2601 } 2602 2603 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev); 2604 vxge_debug_init(vdev->level_trace, 2605 "%s: MTU is %d", vdev->ndev->name, vdev->mtu); 2606 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev); 2607 2608 /* Restore the DA, VID table and also multicast and promiscuous mode 2609 * states 2610 */ 2611 if (vdev->all_multi_flg) { 2612 for (i = 0; i < vdev->no_of_vpath; i++) { 2613 vpath = &vdev->vpaths[i]; 2614 vxge_restore_vpath_mac_addr(vpath); 2615 vxge_restore_vpath_vid_table(vpath); 2616 2617 status = vxge_hw_vpath_mcast_enable(vpath->handle); 2618 if (status != VXGE_HW_OK) 2619 vxge_debug_init(VXGE_ERR, 2620 "%s:%d Enabling multicast failed", 2621 __func__, __LINE__); 2622 } 2623 } 2624 2625 /* Enable vpath to sniff all unicast/multicast traffic that not 2626 * addressed to them. We allow promiscous mode for PF only 2627 */ 2628 2629 val64 = 0; 2630 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 2631 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i); 2632 2633 vxge_hw_mgmt_reg_write(vdev->devh, 2634 vxge_hw_mgmt_reg_type_mrpcim, 2635 0, 2636 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2637 rxmac_authorize_all_addr), 2638 val64); 2639 2640 vxge_hw_mgmt_reg_write(vdev->devh, 2641 vxge_hw_mgmt_reg_type_mrpcim, 2642 0, 2643 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2644 rxmac_authorize_all_vid), 2645 val64); 2646 2647 vxge_set_multicast(dev); 2648 2649 /* Enabling Bcast and mcast for all vpath */ 2650 for (i = 0; i < vdev->no_of_vpath; i++) { 2651 vpath = &vdev->vpaths[i]; 2652 status = vxge_hw_vpath_bcast_enable(vpath->handle); 2653 if (status != VXGE_HW_OK) 2654 vxge_debug_init(VXGE_ERR, 2655 "%s : Can not enable bcast for vpath " 2656 "id %d", dev->name, i); 2657 if (vdev->config.addr_learn_en) { 2658 status = vxge_hw_vpath_mcast_enable(vpath->handle); 2659 if (status != VXGE_HW_OK) 2660 vxge_debug_init(VXGE_ERR, 2661 "%s : Can not enable mcast for vpath " 2662 "id %d", dev->name, i); 2663 } 2664 } 2665 2666 vxge_hw_device_setpause_data(vdev->devh, 0, 2667 vdev->config.tx_pause_enable, 2668 vdev->config.rx_pause_enable); 2669 2670 if (vdev->vp_reset_timer.function == NULL) 2671 vxge_os_timer(vdev->vp_reset_timer, 2672 vxge_poll_vp_reset, vdev, (HZ/2)); 2673 2674 if (vdev->vp_lockup_timer.function == NULL) 2675 vxge_os_timer(vdev->vp_lockup_timer, 2676 vxge_poll_vp_lockup, vdev, (HZ/2)); 2677 2678 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2679 2680 smp_wmb(); 2681 2682 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) { 2683 netif_carrier_on(vdev->ndev); 2684 netdev_notice(vdev->ndev, "Link Up\n"); 2685 vdev->stats.link_up++; 2686 } 2687 2688 vxge_hw_device_intr_enable(vdev->devh); 2689 2690 smp_wmb(); 2691 2692 for (i = 0; i < vdev->no_of_vpath; i++) { 2693 vpath = &vdev->vpaths[i]; 2694 2695 vxge_hw_vpath_enable(vpath->handle); 2696 smp_wmb(); 2697 vxge_hw_vpath_rx_doorbell_init(vpath->handle); 2698 } 2699 2700 netif_tx_start_all_queues(vdev->ndev); 2701 goto out0; 2702 2703out2: 2704 vxge_rem_isr(vdev); 2705 2706 /* Disable napi */ 2707 if (vdev->config.intr_type != MSI_X) 2708 napi_disable(&vdev->napi); 2709 else { 2710 for (i = 0; i < vdev->no_of_vpath; i++) 2711 napi_disable(&vdev->vpaths[i].ring.napi); 2712 } 2713 2714out1: 2715 vxge_close_vpaths(vdev, 0); 2716out0: 2717 vxge_debug_entryexit(VXGE_TRACE, 2718 "%s: %s:%d Exiting...", 2719 dev->name, __func__, __LINE__); 2720 return ret; 2721} 2722 2723/* Loop throught the mac address list and delete all the entries */ 2724void vxge_free_mac_add_list(struct vxge_vpath *vpath) 2725{ 2726 2727 struct list_head *entry, *next; 2728 if (list_empty(&vpath->mac_addr_list)) 2729 return; 2730 2731 list_for_each_safe(entry, next, &vpath->mac_addr_list) { 2732 list_del(entry); 2733 kfree((struct vxge_mac_addrs *)entry); 2734 } 2735} 2736 2737static void vxge_napi_del_all(struct vxgedev *vdev) 2738{ 2739 int i; 2740 if (vdev->config.intr_type != MSI_X) 2741 netif_napi_del(&vdev->napi); 2742 else { 2743 for (i = 0; i < vdev->no_of_vpath; i++) 2744 netif_napi_del(&vdev->vpaths[i].ring.napi); 2745 } 2746} 2747 2748int do_vxge_close(struct net_device *dev, int do_io) 2749{ 2750 enum vxge_hw_status status; 2751 struct vxgedev *vdev; 2752 struct __vxge_hw_device *hldev; 2753 int i; 2754 u64 val64, vpath_vector; 2755 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 2756 dev->name, __func__, __LINE__); 2757 2758 vdev = (struct vxgedev *)netdev_priv(dev); 2759 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2760 2761 if (unlikely(!is_vxge_card_up(vdev))) 2762 return 0; 2763 2764 /* If vxge_handle_crit_err task is executing, 2765 * wait till it completes. */ 2766 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 2767 msleep(50); 2768 2769 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2770 if (do_io) { 2771 /* Put the vpath back in normal mode */ 2772 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); 2773 status = vxge_hw_mgmt_reg_read(vdev->devh, 2774 vxge_hw_mgmt_reg_type_mrpcim, 2775 0, 2776 (ulong)offsetof( 2777 struct vxge_hw_mrpcim_reg, 2778 rts_mgr_cbasin_cfg), 2779 &val64); 2780 2781 if (status == VXGE_HW_OK) { 2782 val64 &= ~vpath_vector; 2783 status = vxge_hw_mgmt_reg_write(vdev->devh, 2784 vxge_hw_mgmt_reg_type_mrpcim, 2785 0, 2786 (ulong)offsetof( 2787 struct vxge_hw_mrpcim_reg, 2788 rts_mgr_cbasin_cfg), 2789 val64); 2790 } 2791 2792 /* Remove the function 0 from promiscous mode */ 2793 vxge_hw_mgmt_reg_write(vdev->devh, 2794 vxge_hw_mgmt_reg_type_mrpcim, 2795 0, 2796 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2797 rxmac_authorize_all_addr), 2798 0); 2799 2800 vxge_hw_mgmt_reg_write(vdev->devh, 2801 vxge_hw_mgmt_reg_type_mrpcim, 2802 0, 2803 (ulong)offsetof(struct vxge_hw_mrpcim_reg, 2804 rxmac_authorize_all_vid), 2805 0); 2806 2807 smp_wmb(); 2808 } 2809 del_timer_sync(&vdev->vp_lockup_timer); 2810 2811 del_timer_sync(&vdev->vp_reset_timer); 2812 2813 /* Disable napi */ 2814 if (vdev->config.intr_type != MSI_X) 2815 napi_disable(&vdev->napi); 2816 else { 2817 for (i = 0; i < vdev->no_of_vpath; i++) 2818 napi_disable(&vdev->vpaths[i].ring.napi); 2819 } 2820 2821 netif_carrier_off(vdev->ndev); 2822 netdev_notice(vdev->ndev, "Link Down\n"); 2823 netif_tx_stop_all_queues(vdev->ndev); 2824 2825 /* Note that at this point xmit() is stopped by upper layer */ 2826 if (do_io) 2827 vxge_hw_device_intr_disable(vdev->devh); 2828 2829 mdelay(1000); 2830 2831 vxge_rem_isr(vdev); 2832 2833 vxge_napi_del_all(vdev); 2834 2835 if (do_io) 2836 vxge_reset_all_vpaths(vdev); 2837 2838 vxge_close_vpaths(vdev, 0); 2839 2840 vxge_debug_entryexit(VXGE_TRACE, 2841 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); 2842 2843 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); 2844 2845 return 0; 2846} 2847 2848/** 2849 * vxge_close 2850 * @dev: device pointer. 2851 * 2852 * This is the stop entry point of the driver. It needs to undo exactly 2853 * whatever was done by the open entry point, thus it's usually referred to 2854 * as the close function.Among other things this function mainly stops the 2855 * Rx side of the NIC and frees all the Rx buffers in the Rx rings. 2856 * Return value: '0' on success and an appropriate (-)ve integer as 2857 * defined in errno.h file on failure. 2858 */ 2859int 2860vxge_close(struct net_device *dev) 2861{ 2862 do_vxge_close(dev, 1); 2863 return 0; 2864} 2865 2866/** 2867 * vxge_change_mtu 2868 * @dev: net device pointer. 2869 * @new_mtu :the new MTU size for the device. 2870 * 2871 * A driver entry point to change MTU size for the device. Before changing 2872 * the MTU the device must be stopped. 2873 */ 2874static int vxge_change_mtu(struct net_device *dev, int new_mtu) 2875{ 2876 struct vxgedev *vdev = netdev_priv(dev); 2877 2878 vxge_debug_entryexit(vdev->level_trace, 2879 "%s:%d", __func__, __LINE__); 2880 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) { 2881 vxge_debug_init(vdev->level_err, 2882 "%s: mtu size is invalid", dev->name); 2883 return -EPERM; 2884 } 2885 2886 /* check if device is down already */ 2887 if (unlikely(!is_vxge_card_up(vdev))) { 2888 /* just store new value, will use later on open() */ 2889 dev->mtu = new_mtu; 2890 vxge_debug_init(vdev->level_err, 2891 "%s", "device is down on MTU change"); 2892 return 0; 2893 } 2894 2895 vxge_debug_init(vdev->level_trace, 2896 "trying to apply new MTU %d", new_mtu); 2897 2898 if (vxge_close(dev)) 2899 return -EIO; 2900 2901 dev->mtu = new_mtu; 2902 vdev->mtu = new_mtu; 2903 2904 if (vxge_open(dev)) 2905 return -EIO; 2906 2907 vxge_debug_init(vdev->level_trace, 2908 "%s: MTU changed to %d", vdev->ndev->name, new_mtu); 2909 2910 vxge_debug_entryexit(vdev->level_trace, 2911 "%s:%d Exiting...", __func__, __LINE__); 2912 2913 return 0; 2914} 2915 2916/** 2917 * vxge_get_stats 2918 * @dev: pointer to the device structure 2919 * 2920 * Updates the device statistics structure. This function updates the device 2921 * statistics structure in the net_device structure and returns a pointer 2922 * to the same. 2923 */ 2924static struct net_device_stats * 2925vxge_get_stats(struct net_device *dev) 2926{ 2927 struct vxgedev *vdev; 2928 struct net_device_stats *net_stats; 2929 int k; 2930 2931 vdev = netdev_priv(dev); 2932 2933 net_stats = &vdev->stats.net_stats; 2934 2935 memset(net_stats, 0, sizeof(struct net_device_stats)); 2936 2937 for (k = 0; k < vdev->no_of_vpath; k++) { 2938 net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; 2939 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; 2940 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; 2941 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; 2942 net_stats->rx_dropped += 2943 vdev->vpaths[k].ring.stats.rx_dropped; 2944 2945 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; 2946 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; 2947 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; 2948 } 2949 2950 return net_stats; 2951} 2952 2953/** 2954 * vxge_ioctl 2955 * @dev: Device pointer. 2956 * @ifr: An IOCTL specific structure, that can contain a pointer to 2957 * a proprietary structure used to pass information to the driver. 2958 * @cmd: This is used to distinguish between the different commands that 2959 * can be passed to the IOCTL functions. 2960 * 2961 * Entry point for the Ioctl. 2962 */ 2963static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2964{ 2965 return -EOPNOTSUPP; 2966} 2967 2968/** 2969 * vxge_tx_watchdog 2970 * @dev: pointer to net device structure 2971 * 2972 * Watchdog for transmit side. 2973 * This function is triggered if the Tx Queue is stopped 2974 * for a pre-defined amount of time when the Interface is still up. 2975 */ 2976static void 2977vxge_tx_watchdog(struct net_device *dev) 2978{ 2979 struct vxgedev *vdev; 2980 2981 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2982 2983 vdev = (struct vxgedev *)netdev_priv(dev); 2984 2985 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; 2986 2987 vxge_reset(vdev); 2988 vxge_debug_entryexit(VXGE_TRACE, 2989 "%s:%d Exiting...", __func__, __LINE__); 2990} 2991 2992/** 2993 * vxge_vlan_rx_register 2994 * @dev: net device pointer. 2995 * @grp: vlan group 2996 * 2997 * Vlan group registration 2998 */ 2999static void 3000vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 3001{ 3002 struct vxgedev *vdev; 3003 struct vxge_vpath *vpath; 3004 int vp; 3005 u64 vid; 3006 enum vxge_hw_status status; 3007 int i; 3008 3009 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3010 3011 vdev = (struct vxgedev *)netdev_priv(dev); 3012 3013 vpath = &vdev->vpaths[0]; 3014 if ((NULL == grp) && (vpath->is_open)) { 3015 /* Get the first vlan */ 3016 status = vxge_hw_vpath_vid_get(vpath->handle, &vid); 3017 3018 while (status == VXGE_HW_OK) { 3019 3020 /* Delete this vlan from the vid table */ 3021 for (vp = 0; vp < vdev->no_of_vpath; vp++) { 3022 vpath = &vdev->vpaths[vp]; 3023 if (!vpath->is_open) 3024 continue; 3025 3026 vxge_hw_vpath_vid_delete(vpath->handle, vid); 3027 } 3028 3029 /* Get the next vlan to be deleted */ 3030 vpath = &vdev->vpaths[0]; 3031 status = vxge_hw_vpath_vid_get(vpath->handle, &vid); 3032 } 3033 } 3034 3035 vdev->vlgrp = grp; 3036 3037 for (i = 0; i < vdev->no_of_vpath; i++) { 3038 if (vdev->vpaths[i].is_configured) 3039 vdev->vpaths[i].ring.vlgrp = grp; 3040 } 3041 3042 vxge_debug_entryexit(VXGE_TRACE, 3043 "%s:%d Exiting...", __func__, __LINE__); 3044} 3045 3046/** 3047 * vxge_vlan_rx_add_vid 3048 * @dev: net device pointer. 3049 * @vid: vid 3050 * 3051 * Add the vlan id to the devices vlan id table 3052 */ 3053static void 3054vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 3055{ 3056 struct vxgedev *vdev; 3057 struct vxge_vpath *vpath; 3058 int vp_id; 3059 3060 vdev = (struct vxgedev *)netdev_priv(dev); 3061 3062 /* Add these vlan to the vid table */ 3063 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3064 vpath = &vdev->vpaths[vp_id]; 3065 if (!vpath->is_open) 3066 continue; 3067 vxge_hw_vpath_vid_add(vpath->handle, vid); 3068 } 3069} 3070 3071/** 3072 * vxge_vlan_rx_add_vid 3073 * @dev: net device pointer. 3074 * @vid: vid 3075 * 3076 * Remove the vlan id from the device's vlan id table 3077 */ 3078static void 3079vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 3080{ 3081 struct vxgedev *vdev; 3082 struct vxge_vpath *vpath; 3083 int vp_id; 3084 3085 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3086 3087 vdev = (struct vxgedev *)netdev_priv(dev); 3088 3089 vlan_group_set_device(vdev->vlgrp, vid, NULL); 3090 3091 /* Delete this vlan from the vid table */ 3092 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3093 vpath = &vdev->vpaths[vp_id]; 3094 if (!vpath->is_open) 3095 continue; 3096 vxge_hw_vpath_vid_delete(vpath->handle, vid); 3097 } 3098 vxge_debug_entryexit(VXGE_TRACE, 3099 "%s:%d Exiting...", __func__, __LINE__); 3100} 3101 3102static const struct net_device_ops vxge_netdev_ops = { 3103 .ndo_open = vxge_open, 3104 .ndo_stop = vxge_close, 3105 .ndo_get_stats = vxge_get_stats, 3106 .ndo_start_xmit = vxge_xmit, 3107 .ndo_validate_addr = eth_validate_addr, 3108 .ndo_set_multicast_list = vxge_set_multicast, 3109 3110 .ndo_do_ioctl = vxge_ioctl, 3111 3112 .ndo_set_mac_address = vxge_set_mac_addr, 3113 .ndo_change_mtu = vxge_change_mtu, 3114 .ndo_vlan_rx_register = vxge_vlan_rx_register, 3115 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, 3116 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, 3117 3118 .ndo_tx_timeout = vxge_tx_watchdog, 3119#ifdef CONFIG_NET_POLL_CONTROLLER 3120 .ndo_poll_controller = vxge_netpoll, 3121#endif 3122}; 3123 3124int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3125 struct vxge_config *config, 3126 int high_dma, int no_of_vpath, 3127 struct vxgedev **vdev_out) 3128{ 3129 struct net_device *ndev; 3130 enum vxge_hw_status status = VXGE_HW_OK; 3131 struct vxgedev *vdev; 3132 int ret = 0, no_of_queue = 1; 3133 u64 stat; 3134 3135 *vdev_out = NULL; 3136 if (config->tx_steering_type) 3137 no_of_queue = no_of_vpath; 3138 3139 ndev = alloc_etherdev_mq(sizeof(struct vxgedev), 3140 no_of_queue); 3141 if (ndev == NULL) { 3142 vxge_debug_init( 3143 vxge_hw_device_trace_level_get(hldev), 3144 "%s : device allocation failed", __func__); 3145 ret = -ENODEV; 3146 goto _out0; 3147 } 3148 3149 vxge_debug_entryexit( 3150 vxge_hw_device_trace_level_get(hldev), 3151 "%s: %s:%d Entering...", 3152 ndev->name, __func__, __LINE__); 3153 3154 vdev = netdev_priv(ndev); 3155 memset(vdev, 0, sizeof(struct vxgedev)); 3156 3157 vdev->ndev = ndev; 3158 vdev->devh = hldev; 3159 vdev->pdev = hldev->pdev; 3160 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3161 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3162 3163 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3164 3165 ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 3166 NETIF_F_HW_VLAN_FILTER; 3167 /* Driver entry points */ 3168 ndev->irq = vdev->pdev->irq; 3169 ndev->base_addr = (unsigned long) hldev->bar0; 3170 3171 ndev->netdev_ops = &vxge_netdev_ops; 3172 3173 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; 3174 3175 initialize_ethtool_ops(ndev); 3176 3177 /* Allocate memory for vpath */ 3178 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3179 no_of_vpath, GFP_KERNEL); 3180 if (!vdev->vpaths) { 3181 vxge_debug_init(VXGE_ERR, 3182 "%s: vpath memory allocation failed", 3183 vdev->ndev->name); 3184 ret = -ENODEV; 3185 goto _out1; 3186 } 3187 3188 ndev->features |= NETIF_F_SG; 3189 3190 ndev->features |= NETIF_F_HW_CSUM; 3191 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3192 "%s : checksuming enabled", __func__); 3193 3194 if (high_dma) { 3195 ndev->features |= NETIF_F_HIGHDMA; 3196 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3197 "%s : using High DMA", __func__); 3198 } 3199 3200 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; 3201 3202 if (vdev->config.gro_enable) 3203 ndev->features |= NETIF_F_GRO; 3204 3205 if (register_netdev(ndev)) { 3206 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3207 "%s: %s : device registration failed!", 3208 ndev->name, __func__); 3209 ret = -ENODEV; 3210 goto _out2; 3211 } 3212 3213 /* Set the factory defined MAC address initially */ 3214 ndev->addr_len = ETH_ALEN; 3215 3216 /* Make Link state as off at this point, when the Link change 3217 * interrupt comes the state will be automatically changed to 3218 * the right state. 3219 */ 3220 netif_carrier_off(ndev); 3221 3222 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3223 "%s: Ethernet device registered", 3224 ndev->name); 3225 3226 *vdev_out = vdev; 3227 3228 /* Resetting the Device stats */ 3229 status = vxge_hw_mrpcim_stats_access( 3230 hldev, 3231 VXGE_HW_STATS_OP_CLEAR_ALL_STATS, 3232 0, 3233 0, 3234 &stat); 3235 3236 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) 3237 vxge_debug_init( 3238 vxge_hw_device_trace_level_get(hldev), 3239 "%s: device stats clear returns" 3240 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); 3241 3242 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), 3243 "%s: %s:%d Exiting...", 3244 ndev->name, __func__, __LINE__); 3245 3246 return ret; 3247_out2: 3248 kfree(vdev->vpaths); 3249_out1: 3250 free_netdev(ndev); 3251_out0: 3252 return ret; 3253} 3254 3255/* 3256 * vxge_device_unregister 3257 * 3258 * This function will unregister and free network device 3259 */ 3260void 3261vxge_device_unregister(struct __vxge_hw_device *hldev) 3262{ 3263 struct vxgedev *vdev; 3264 struct net_device *dev; 3265 char buf[IFNAMSIZ]; 3266#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 3267 u32 level_trace; 3268#endif 3269 3270 dev = hldev->ndev; 3271 vdev = netdev_priv(dev); 3272#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 3273 level_trace = vdev->level_trace; 3274#endif 3275 vxge_debug_entryexit(level_trace, 3276 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 3277 3278 memcpy(buf, vdev->ndev->name, IFNAMSIZ); 3279 3280 /* in 2.6 will call stop() if device is up */ 3281 unregister_netdev(dev); 3282 3283 flush_scheduled_work(); 3284 3285 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); 3286 vxge_debug_entryexit(level_trace, 3287 "%s: %s:%d Exiting...", buf, __func__, __LINE__); 3288} 3289 3290/* 3291 * vxge_callback_crit_err 3292 * 3293 * This function is called by the alarm handler in interrupt context. 3294 * Driver must analyze it based on the event type. 3295 */ 3296static void 3297vxge_callback_crit_err(struct __vxge_hw_device *hldev, 3298 enum vxge_hw_event type, u64 vp_id) 3299{ 3300 struct net_device *dev = hldev->ndev; 3301 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3302 struct vxge_vpath *vpath = NULL; 3303 int vpath_idx; 3304 3305 vxge_debug_entryexit(vdev->level_trace, 3306 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 3307 3308 /* Note: This event type should be used for device wide 3309 * indications only - Serious errors, Slot freeze and critical errors 3310 */ 3311 vdev->cric_err_event = type; 3312 3313 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { 3314 vpath = &vdev->vpaths[vpath_idx]; 3315 if (vpath->device_id == vp_id) 3316 break; 3317 } 3318 3319 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) { 3320 if (type == VXGE_HW_EVENT_SLOT_FREEZE) { 3321 vxge_debug_init(VXGE_ERR, 3322 "%s: Slot is frozen", vdev->ndev->name); 3323 } else if (type == VXGE_HW_EVENT_SERR) { 3324 vxge_debug_init(VXGE_ERR, 3325 "%s: Encountered Serious Error", 3326 vdev->ndev->name); 3327 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) 3328 vxge_debug_init(VXGE_ERR, 3329 "%s: Encountered Critical Error", 3330 vdev->ndev->name); 3331 } 3332 3333 if ((type == VXGE_HW_EVENT_SERR) || 3334 (type == VXGE_HW_EVENT_SLOT_FREEZE)) { 3335 if (unlikely(vdev->exec_mode)) 3336 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 3337 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) { 3338 vxge_hw_device_mask_all(hldev); 3339 if (unlikely(vdev->exec_mode)) 3340 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 3341 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) || 3342 (type == VXGE_HW_EVENT_VPATH_ERR)) { 3343 3344 if (unlikely(vdev->exec_mode)) 3345 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state); 3346 else { 3347 /* check if this vpath is already set for reset */ 3348 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) { 3349 3350 /* disable interrupts for this vpath */ 3351 vxge_vpath_intr_disable(vdev, vpath_idx); 3352 3353 /* stop the queue for this vpath */ 3354 netif_tx_stop_queue(vpath->fifo.txq); 3355 } 3356 } 3357 } 3358 3359 vxge_debug_entryexit(vdev->level_trace, 3360 "%s: %s:%d Exiting...", 3361 vdev->ndev->name, __func__, __LINE__); 3362} 3363 3364static void verify_bandwidth(void) 3365{ 3366 int i, band_width, total = 0, equal_priority = 0; 3367 3368 /* 1. If user enters 0 for some fifo, give equal priority to all */ 3369 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3370 if (bw_percentage[i] == 0) { 3371 equal_priority = 1; 3372 break; 3373 } 3374 } 3375 3376 if (!equal_priority) { 3377 /* 2. If sum exceeds 100, give equal priority to all */ 3378 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3379 if (bw_percentage[i] == 0xFF) 3380 break; 3381 3382 total += bw_percentage[i]; 3383 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) { 3384 equal_priority = 1; 3385 break; 3386 } 3387 } 3388 } 3389 3390 if (!equal_priority) { 3391 /* Is all the bandwidth consumed? */ 3392 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) { 3393 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) { 3394 /* Split rest of bw equally among next VPs*/ 3395 band_width = 3396 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) / 3397 (VXGE_HW_MAX_VIRTUAL_PATHS - i); 3398 if (band_width < 2) /* min of 2% */ 3399 equal_priority = 1; 3400 else { 3401 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS; 3402 i++) 3403 bw_percentage[i] = 3404 band_width; 3405 } 3406 } 3407 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS) 3408 equal_priority = 1; 3409 } 3410 3411 if (equal_priority) { 3412 vxge_debug_init(VXGE_ERR, 3413 "%s: Assigning equal bandwidth to all the vpaths", 3414 VXGE_DRIVER_NAME); 3415 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX / 3416 VXGE_HW_MAX_VIRTUAL_PATHS; 3417 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3418 bw_percentage[i] = bw_percentage[0]; 3419 } 3420} 3421 3422/* 3423 * Vpath configuration 3424 */ 3425static int __devinit vxge_config_vpaths( 3426 struct vxge_hw_device_config *device_config, 3427 u64 vpath_mask, struct vxge_config *config_param) 3428{ 3429 int i, no_of_vpaths = 0, default_no_vpath = 0, temp; 3430 u32 txdl_size, txdl_per_memblock; 3431 3432 temp = driver_config->vpath_per_dev; 3433 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) && 3434 (max_config_dev == VXGE_MAX_CONFIG_DEV)) { 3435 /* No more CPU. Return vpath number as zero.*/ 3436 if (driver_config->g_no_cpus == -1) 3437 return 0; 3438 3439 if (!driver_config->g_no_cpus) 3440 driver_config->g_no_cpus = num_online_cpus(); 3441 3442 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; 3443 if (!driver_config->vpath_per_dev) 3444 driver_config->vpath_per_dev = 1; 3445 3446 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3447 if (!vxge_bVALn(vpath_mask, i, 1)) 3448 continue; 3449 else 3450 default_no_vpath++; 3451 if (default_no_vpath < driver_config->vpath_per_dev) 3452 driver_config->vpath_per_dev = default_no_vpath; 3453 3454 driver_config->g_no_cpus = driver_config->g_no_cpus - 3455 (driver_config->vpath_per_dev * 2); 3456 if (driver_config->g_no_cpus <= 0) 3457 driver_config->g_no_cpus = -1; 3458 } 3459 3460 if (driver_config->vpath_per_dev == 1) { 3461 vxge_debug_ll_config(VXGE_TRACE, 3462 "%s: Disable tx and rx steering, " 3463 "as single vpath is configured", VXGE_DRIVER_NAME); 3464 config_param->rth_steering = NO_STEERING; 3465 config_param->tx_steering_type = NO_STEERING; 3466 device_config->rth_en = 0; 3467 } 3468 3469 /* configure bandwidth */ 3470 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3471 device_config->vp_config[i].min_bandwidth = bw_percentage[i]; 3472 3473 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3474 device_config->vp_config[i].vp_id = i; 3475 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU; 3476 if (no_of_vpaths < driver_config->vpath_per_dev) { 3477 if (!vxge_bVALn(vpath_mask, i, 1)) { 3478 vxge_debug_ll_config(VXGE_TRACE, 3479 "%s: vpath: %d is not available", 3480 VXGE_DRIVER_NAME, i); 3481 continue; 3482 } else { 3483 vxge_debug_ll_config(VXGE_TRACE, 3484 "%s: vpath: %d available", 3485 VXGE_DRIVER_NAME, i); 3486 no_of_vpaths++; 3487 } 3488 } else { 3489 vxge_debug_ll_config(VXGE_TRACE, 3490 "%s: vpath: %d is not configured, " 3491 "max_config_vpath exceeded", 3492 VXGE_DRIVER_NAME, i); 3493 break; 3494 } 3495 3496 /* Configure Tx fifo's */ 3497 device_config->vp_config[i].fifo.enable = 3498 VXGE_HW_FIFO_ENABLE; 3499 device_config->vp_config[i].fifo.max_frags = 3500 MAX_SKB_FRAGS + 1; 3501 device_config->vp_config[i].fifo.memblock_size = 3502 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; 3503 3504 txdl_size = device_config->vp_config[i].fifo.max_frags * 3505 sizeof(struct vxge_hw_fifo_txd); 3506 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; 3507 3508 device_config->vp_config[i].fifo.fifo_blocks = 3509 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1; 3510 3511 device_config->vp_config[i].fifo.intr = 3512 VXGE_HW_FIFO_QUEUE_INTR_DISABLE; 3513 3514 /* Configure tti properties */ 3515 device_config->vp_config[i].tti.intr_enable = 3516 VXGE_HW_TIM_INTR_ENABLE; 3517 3518 device_config->vp_config[i].tti.btimer_val = 3519 (VXGE_TTI_BTIMER_VAL * 1000) / 272; 3520 3521 device_config->vp_config[i].tti.timer_ac_en = 3522 VXGE_HW_TIM_TIMER_AC_ENABLE; 3523 3524 /* For msi-x with napi (each vector 3525 has a handler of its own) - 3526 Set CI to OFF for all vpaths */ 3527 device_config->vp_config[i].tti.timer_ci_en = 3528 VXGE_HW_TIM_TIMER_CI_DISABLE; 3529 3530 device_config->vp_config[i].tti.timer_ri_en = 3531 VXGE_HW_TIM_TIMER_RI_DISABLE; 3532 3533 device_config->vp_config[i].tti.util_sel = 3534 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL; 3535 3536 device_config->vp_config[i].tti.ltimer_val = 3537 (VXGE_TTI_LTIMER_VAL * 1000) / 272; 3538 3539 device_config->vp_config[i].tti.rtimer_val = 3540 (VXGE_TTI_RTIMER_VAL * 1000) / 272; 3541 3542 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A; 3543 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B; 3544 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C; 3545 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A; 3546 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B; 3547 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C; 3548 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D; 3549 3550 /* Configure Rx rings */ 3551 device_config->vp_config[i].ring.enable = 3552 VXGE_HW_RING_ENABLE; 3553 3554 device_config->vp_config[i].ring.ring_blocks = 3555 VXGE_HW_DEF_RING_BLOCKS; 3556 device_config->vp_config[i].ring.buffer_mode = 3557 VXGE_HW_RING_RXD_BUFFER_MODE_1; 3558 device_config->vp_config[i].ring.rxds_limit = 3559 VXGE_HW_DEF_RING_RXDS_LIMIT; 3560 device_config->vp_config[i].ring.scatter_mode = 3561 VXGE_HW_RING_SCATTER_MODE_A; 3562 3563 /* Configure rti properties */ 3564 device_config->vp_config[i].rti.intr_enable = 3565 VXGE_HW_TIM_INTR_ENABLE; 3566 3567 device_config->vp_config[i].rti.btimer_val = 3568 (VXGE_RTI_BTIMER_VAL * 1000)/272; 3569 3570 device_config->vp_config[i].rti.timer_ac_en = 3571 VXGE_HW_TIM_TIMER_AC_ENABLE; 3572 3573 device_config->vp_config[i].rti.timer_ci_en = 3574 VXGE_HW_TIM_TIMER_CI_DISABLE; 3575 3576 device_config->vp_config[i].rti.timer_ri_en = 3577 VXGE_HW_TIM_TIMER_RI_DISABLE; 3578 3579 device_config->vp_config[i].rti.util_sel = 3580 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL; 3581 3582 device_config->vp_config[i].rti.urange_a = 3583 RTI_RX_URANGE_A; 3584 device_config->vp_config[i].rti.urange_b = 3585 RTI_RX_URANGE_B; 3586 device_config->vp_config[i].rti.urange_c = 3587 RTI_RX_URANGE_C; 3588 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A; 3589 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B; 3590 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C; 3591 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D; 3592 3593 device_config->vp_config[i].rti.rtimer_val = 3594 (VXGE_RTI_RTIMER_VAL * 1000) / 272; 3595 3596 device_config->vp_config[i].rti.ltimer_val = 3597 (VXGE_RTI_LTIMER_VAL * 1000) / 272; 3598 3599 device_config->vp_config[i].rpa_strip_vlan_tag = 3600 vlan_tag_strip; 3601 } 3602 3603 driver_config->vpath_per_dev = temp; 3604 return no_of_vpaths; 3605} 3606 3607/* initialize device configuratrions */ 3608static void __devinit vxge_device_config_init( 3609 struct vxge_hw_device_config *device_config, 3610 int *intr_type) 3611{ 3612 /* Used for CQRQ/SRQ. */ 3613 device_config->dma_blockpool_initial = 3614 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE; 3615 3616 device_config->dma_blockpool_max = 3617 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE; 3618 3619 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) 3620 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; 3621 3622#ifndef CONFIG_PCI_MSI 3623 vxge_debug_init(VXGE_ERR, 3624 "%s: This Kernel does not support " 3625 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); 3626 *intr_type = INTA; 3627#endif 3628 3629 /* Configure whether MSI-X or IRQL. */ 3630 switch (*intr_type) { 3631 case INTA: 3632 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE; 3633 break; 3634 3635 case MSI_X: 3636 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3637 break; 3638 } 3639 /* Timer period between device poll */ 3640 device_config->device_poll_millis = VXGE_TIMER_DELAY; 3641 3642 /* Configure mac based steering. */ 3643 device_config->rts_mac_en = addr_learn_en; 3644 3645 /* Configure Vpaths */ 3646 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT; 3647 3648 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", 3649 __func__); 3650 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d", 3651 device_config->dma_blockpool_initial); 3652 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d", 3653 device_config->dma_blockpool_max); 3654 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", 3655 device_config->intr_mode); 3656 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", 3657 device_config->device_poll_millis); 3658 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d", 3659 device_config->rts_mac_en); 3660 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", 3661 device_config->rth_en); 3662 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", 3663 device_config->rth_it_type); 3664} 3665 3666static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) 3667{ 3668 int i; 3669 3670 vxge_debug_init(VXGE_TRACE, 3671 "%s: %d Vpath(s) opened", 3672 vdev->ndev->name, vdev->no_of_vpath); 3673 3674 switch (vdev->config.intr_type) { 3675 case INTA: 3676 vxge_debug_init(VXGE_TRACE, 3677 "%s: Interrupt type INTA", vdev->ndev->name); 3678 break; 3679 3680 case MSI_X: 3681 vxge_debug_init(VXGE_TRACE, 3682 "%s: Interrupt type MSI-X", vdev->ndev->name); 3683 break; 3684 } 3685 3686 if (vdev->config.rth_steering) { 3687 vxge_debug_init(VXGE_TRACE, 3688 "%s: RTH steering enabled for TCP_IPV4", 3689 vdev->ndev->name); 3690 } else { 3691 vxge_debug_init(VXGE_TRACE, 3692 "%s: RTH steering disabled", vdev->ndev->name); 3693 } 3694 3695 switch (vdev->config.tx_steering_type) { 3696 case NO_STEERING: 3697 vxge_debug_init(VXGE_TRACE, 3698 "%s: Tx steering disabled", vdev->ndev->name); 3699 break; 3700 case TX_PRIORITY_STEERING: 3701 vxge_debug_init(VXGE_TRACE, 3702 "%s: Unsupported tx steering option", 3703 vdev->ndev->name); 3704 vxge_debug_init(VXGE_TRACE, 3705 "%s: Tx steering disabled", vdev->ndev->name); 3706 vdev->config.tx_steering_type = 0; 3707 break; 3708 case TX_VLAN_STEERING: 3709 vxge_debug_init(VXGE_TRACE, 3710 "%s: Unsupported tx steering option", 3711 vdev->ndev->name); 3712 vxge_debug_init(VXGE_TRACE, 3713 "%s: Tx steering disabled", vdev->ndev->name); 3714 vdev->config.tx_steering_type = 0; 3715 break; 3716 case TX_MULTIQ_STEERING: 3717 vxge_debug_init(VXGE_TRACE, 3718 "%s: Tx multiqueue steering enabled", 3719 vdev->ndev->name); 3720 break; 3721 case TX_PORT_STEERING: 3722 vxge_debug_init(VXGE_TRACE, 3723 "%s: Tx port steering enabled", 3724 vdev->ndev->name); 3725 break; 3726 default: 3727 vxge_debug_init(VXGE_ERR, 3728 "%s: Unsupported tx steering type", 3729 vdev->ndev->name); 3730 vxge_debug_init(VXGE_TRACE, 3731 "%s: Tx steering disabled", vdev->ndev->name); 3732 vdev->config.tx_steering_type = 0; 3733 } 3734 3735 if (vdev->config.gro_enable) { 3736 vxge_debug_init(VXGE_ERR, 3737 "%s: Generic receive offload enabled", 3738 vdev->ndev->name); 3739 } else 3740 vxge_debug_init(VXGE_TRACE, 3741 "%s: Generic receive offload disabled", 3742 vdev->ndev->name); 3743 3744 if (vdev->config.addr_learn_en) 3745 vxge_debug_init(VXGE_TRACE, 3746 "%s: MAC Address learning enabled", vdev->ndev->name); 3747 3748 vxge_debug_init(VXGE_TRACE, 3749 "%s: Rx doorbell mode enabled", vdev->ndev->name); 3750 3751 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3752 if (!vxge_bVALn(vpath_mask, i, 1)) 3753 continue; 3754 vxge_debug_ll_config(VXGE_TRACE, 3755 "%s: MTU size - %d", vdev->ndev->name, 3756 ((struct __vxge_hw_device *)(vdev->devh))-> 3757 config.vp_config[i].mtu); 3758 vxge_debug_init(VXGE_TRACE, 3759 "%s: VLAN tag stripping %s", vdev->ndev->name, 3760 ((struct __vxge_hw_device *)(vdev->devh))-> 3761 config.vp_config[i].rpa_strip_vlan_tag 3762 ? "Enabled" : "Disabled"); 3763 vxge_debug_init(VXGE_TRACE, 3764 "%s: Ring blocks : %d", vdev->ndev->name, 3765 ((struct __vxge_hw_device *)(vdev->devh))-> 3766 config.vp_config[i].ring.ring_blocks); 3767 vxge_debug_init(VXGE_TRACE, 3768 "%s: Fifo blocks : %d", vdev->ndev->name, 3769 ((struct __vxge_hw_device *)(vdev->devh))-> 3770 config.vp_config[i].fifo.fifo_blocks); 3771 vxge_debug_ll_config(VXGE_TRACE, 3772 "%s: Max frags : %d", vdev->ndev->name, 3773 ((struct __vxge_hw_device *)(vdev->devh))-> 3774 config.vp_config[i].fifo.max_frags); 3775 break; 3776 } 3777} 3778 3779#ifdef CONFIG_PM 3780/** 3781 * vxge_pm_suspend - vxge power management suspend entry point 3782 * 3783 */ 3784static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) 3785{ 3786 return -ENOSYS; 3787} 3788/** 3789 * vxge_pm_resume - vxge power management resume entry point 3790 * 3791 */ 3792static int vxge_pm_resume(struct pci_dev *pdev) 3793{ 3794 return -ENOSYS; 3795} 3796 3797#endif 3798 3799/** 3800 * vxge_io_error_detected - called when PCI error is detected 3801 * @pdev: Pointer to PCI device 3802 * @state: The current pci connection state 3803 * 3804 * This function is called after a PCI bus error affecting 3805 * this device has been detected. 3806 */ 3807static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, 3808 pci_channel_state_t state) 3809{ 3810 struct __vxge_hw_device *hldev = 3811 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 3812 struct net_device *netdev = hldev->ndev; 3813 3814 netif_device_detach(netdev); 3815 3816 if (state == pci_channel_io_perm_failure) 3817 return PCI_ERS_RESULT_DISCONNECT; 3818 3819 if (netif_running(netdev)) { 3820 /* Bring down the card, while avoiding PCI I/O */ 3821 do_vxge_close(netdev, 0); 3822 } 3823 3824 pci_disable_device(pdev); 3825 3826 return PCI_ERS_RESULT_NEED_RESET; 3827} 3828 3829/** 3830 * vxge_io_slot_reset - called after the pci bus has been reset. 3831 * @pdev: Pointer to PCI device 3832 * 3833 * Restart the card from scratch, as if from a cold-boot. 3834 * At this point, the card has exprienced a hard reset, 3835 * followed by fixups by BIOS, and has its config space 3836 * set up identically to what it was at cold boot. 3837 */ 3838static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) 3839{ 3840 struct __vxge_hw_device *hldev = 3841 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 3842 struct net_device *netdev = hldev->ndev; 3843 3844 struct vxgedev *vdev = netdev_priv(netdev); 3845 3846 if (pci_enable_device(pdev)) { 3847 netdev_err(netdev, "Cannot re-enable device after reset\n"); 3848 return PCI_ERS_RESULT_DISCONNECT; 3849 } 3850 3851 pci_set_master(pdev); 3852 vxge_reset(vdev); 3853 3854 return PCI_ERS_RESULT_RECOVERED; 3855} 3856 3857/** 3858 * vxge_io_resume - called when traffic can start flowing again. 3859 * @pdev: Pointer to PCI device 3860 * 3861 * This callback is called when the error recovery driver tells 3862 * us that its OK to resume normal operation. 3863 */ 3864static void vxge_io_resume(struct pci_dev *pdev) 3865{ 3866 struct __vxge_hw_device *hldev = 3867 (struct __vxge_hw_device *) pci_get_drvdata(pdev); 3868 struct net_device *netdev = hldev->ndev; 3869 3870 if (netif_running(netdev)) { 3871 if (vxge_open(netdev)) { 3872 netdev_err(netdev, 3873 "Can't bring device back up after reset\n"); 3874 return; 3875 } 3876 } 3877 3878 netif_device_attach(netdev); 3879} 3880 3881static inline u32 vxge_get_num_vfs(u64 function_mode) 3882{ 3883 u32 num_functions = 0; 3884 3885 switch (function_mode) { 3886 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: 3887 case VXGE_HW_FUNCTION_MODE_SRIOV_8: 3888 num_functions = 8; 3889 break; 3890 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: 3891 num_functions = 1; 3892 break; 3893 case VXGE_HW_FUNCTION_MODE_SRIOV: 3894 case VXGE_HW_FUNCTION_MODE_MRIOV: 3895 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17: 3896 num_functions = 17; 3897 break; 3898 case VXGE_HW_FUNCTION_MODE_SRIOV_4: 3899 num_functions = 4; 3900 break; 3901 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2: 3902 num_functions = 2; 3903 break; 3904 case VXGE_HW_FUNCTION_MODE_MRIOV_8: 3905 num_functions = 8; /* TODO */ 3906 break; 3907 } 3908 return num_functions; 3909} 3910 3911/** 3912 * vxge_probe 3913 * @pdev : structure containing the PCI related information of the device. 3914 * @pre: List of PCI devices supported by the driver listed in vxge_id_table. 3915 * Description: 3916 * This function is called when a new PCI device gets detected and initializes 3917 * it. 3918 * Return value: 3919 * returns 0 on success and negative on failure. 3920 * 3921 */ 3922static int __devinit 3923vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 3924{ 3925 struct __vxge_hw_device *hldev; 3926 enum vxge_hw_status status; 3927 int ret; 3928 int high_dma = 0; 3929 u64 vpath_mask = 0; 3930 struct vxgedev *vdev; 3931 struct vxge_config *ll_config = NULL; 3932 struct vxge_hw_device_config *device_config = NULL; 3933 struct vxge_hw_device_attr attr; 3934 int i, j, no_of_vpath = 0, max_vpath_supported = 0; 3935 u8 *macaddr; 3936 struct vxge_mac_addrs *entry; 3937 static int bus = -1, device = -1; 3938 u32 host_type; 3939 u8 new_device = 0; 3940 enum vxge_hw_status is_privileged; 3941 u32 function_mode; 3942 u32 num_vfs = 0; 3943 3944 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3945 attr.pdev = pdev; 3946 3947 /* In SRIOV-17 mode, functions of the same adapter 3948 * can be deployed on different buses */ 3949 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || 3950 (device != PCI_SLOT(pdev->devfn)))) 3951 new_device = 1; 3952 3953 bus = pdev->bus->number; 3954 device = PCI_SLOT(pdev->devfn); 3955 3956 if (new_device) { 3957 if (driver_config->config_dev_cnt && 3958 (driver_config->config_dev_cnt != 3959 driver_config->total_dev_cnt)) 3960 vxge_debug_init(VXGE_ERR, 3961 "%s: Configured %d of %d devices", 3962 VXGE_DRIVER_NAME, 3963 driver_config->config_dev_cnt, 3964 driver_config->total_dev_cnt); 3965 driver_config->config_dev_cnt = 0; 3966 driver_config->total_dev_cnt = 0; 3967 } 3968 /* Now making the CPU based no of vpath calculation 3969 * applicable for individual functions as well. 3970 */ 3971 driver_config->g_no_cpus = 0; 3972 driver_config->vpath_per_dev = max_config_vpath; 3973 3974 driver_config->total_dev_cnt++; 3975 if (++driver_config->config_dev_cnt > max_config_dev) { 3976 ret = 0; 3977 goto _exit0; 3978 } 3979 3980 device_config = kzalloc(sizeof(struct vxge_hw_device_config), 3981 GFP_KERNEL); 3982 if (!device_config) { 3983 ret = -ENOMEM; 3984 vxge_debug_init(VXGE_ERR, 3985 "device_config : malloc failed %s %d", 3986 __FILE__, __LINE__); 3987 goto _exit0; 3988 } 3989 3990 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); 3991 if (!ll_config) { 3992 ret = -ENOMEM; 3993 vxge_debug_init(VXGE_ERR, 3994 "ll_config : malloc failed %s %d", 3995 __FILE__, __LINE__); 3996 goto _exit0; 3997 } 3998 ll_config->tx_steering_type = TX_MULTIQ_STEERING; 3999 ll_config->intr_type = MSI_X; 4000 ll_config->napi_weight = NEW_NAPI_WEIGHT; 4001 ll_config->rth_steering = RTH_STEERING; 4002 4003 /* get the default configuration parameters */ 4004 vxge_hw_device_config_default_get(device_config); 4005 4006 /* initialize configuration parameters */ 4007 vxge_device_config_init(device_config, &ll_config->intr_type); 4008 4009 ret = pci_enable_device(pdev); 4010 if (ret) { 4011 vxge_debug_init(VXGE_ERR, 4012 "%s : can not enable PCI device", __func__); 4013 goto _exit0; 4014 } 4015 4016 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4017 vxge_debug_ll_config(VXGE_TRACE, 4018 "%s : using 64bit DMA", __func__); 4019 4020 high_dma = 1; 4021 4022 if (pci_set_consistent_dma_mask(pdev, 4023 DMA_BIT_MASK(64))) { 4024 vxge_debug_init(VXGE_ERR, 4025 "%s : unable to obtain 64bit DMA for " 4026 "consistent allocations", __func__); 4027 ret = -ENOMEM; 4028 goto _exit1; 4029 } 4030 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 4031 vxge_debug_ll_config(VXGE_TRACE, 4032 "%s : using 32bit DMA", __func__); 4033 } else { 4034 ret = -ENOMEM; 4035 goto _exit1; 4036 } 4037 4038 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { 4039 vxge_debug_init(VXGE_ERR, 4040 "%s : request regions failed", __func__); 4041 ret = -ENODEV; 4042 goto _exit1; 4043 } 4044 4045 pci_set_master(pdev); 4046 4047 attr.bar0 = pci_ioremap_bar(pdev, 0); 4048 if (!attr.bar0) { 4049 vxge_debug_init(VXGE_ERR, 4050 "%s : cannot remap io memory bar0", __func__); 4051 ret = -ENODEV; 4052 goto _exit2; 4053 } 4054 vxge_debug_ll_config(VXGE_TRACE, 4055 "pci ioremap bar0: %p:0x%llx", 4056 attr.bar0, 4057 (unsigned long long)pci_resource_start(pdev, 0)); 4058 4059 status = vxge_hw_device_hw_info_get(attr.bar0, 4060 &ll_config->device_hw_info); 4061 if (status != VXGE_HW_OK) { 4062 vxge_debug_init(VXGE_ERR, 4063 "%s: Reading of hardware info failed." 4064 "Please try upgrading the firmware.", VXGE_DRIVER_NAME); 4065 ret = -EINVAL; 4066 goto _exit3; 4067 } 4068 4069 if (ll_config->device_hw_info.fw_version.major != 4070 VXGE_DRIVER_FW_VERSION_MAJOR) { 4071 vxge_debug_init(VXGE_ERR, 4072 "%s: Incorrect firmware version." 4073 "Please upgrade the firmware to version 1.x.x", 4074 VXGE_DRIVER_NAME); 4075 ret = -EINVAL; 4076 goto _exit3; 4077 } 4078 4079 vpath_mask = ll_config->device_hw_info.vpath_mask; 4080 if (vpath_mask == 0) { 4081 vxge_debug_ll_config(VXGE_TRACE, 4082 "%s: No vpaths available in device", VXGE_DRIVER_NAME); 4083 ret = -EINVAL; 4084 goto _exit3; 4085 } 4086 4087 vxge_debug_ll_config(VXGE_TRACE, 4088 "%s:%d Vpath mask = %llx", __func__, __LINE__, 4089 (unsigned long long)vpath_mask); 4090 4091 function_mode = ll_config->device_hw_info.function_mode; 4092 host_type = ll_config->device_hw_info.host_type; 4093 is_privileged = __vxge_hw_device_is_privilaged(host_type, 4094 ll_config->device_hw_info.func_id); 4095 4096 /* Check how many vpaths are available */ 4097 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4098 if (!((vpath_mask) & vxge_mBIT(i))) 4099 continue; 4100 max_vpath_supported++; 4101 } 4102 4103 if (new_device) 4104 num_vfs = vxge_get_num_vfs(function_mode) - 1; 4105 4106 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4107 if (is_sriov(function_mode) && (max_config_dev > 1) && 4108 (ll_config->intr_type != INTA) && 4109 (is_privileged == VXGE_HW_OK)) { 4110 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs) 4111 ? (max_config_dev - 1) : num_vfs); 4112 if (ret) 4113 vxge_debug_ll_config(VXGE_ERR, 4114 "Failed in enabling SRIOV mode: %d\n", ret); 4115 } 4116 4117 /* 4118 * Configure vpaths and get driver configured number of vpaths 4119 * which is less than or equal to the maximum vpaths per function. 4120 */ 4121 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config); 4122 if (!no_of_vpath) { 4123 vxge_debug_ll_config(VXGE_ERR, 4124 "%s: No more vpaths to configure", VXGE_DRIVER_NAME); 4125 ret = 0; 4126 goto _exit3; 4127 } 4128 4129 /* Setting driver callbacks */ 4130 attr.uld_callbacks.link_up = vxge_callback_link_up; 4131 attr.uld_callbacks.link_down = vxge_callback_link_down; 4132 attr.uld_callbacks.crit_err = vxge_callback_crit_err; 4133 4134 status = vxge_hw_device_initialize(&hldev, &attr, device_config); 4135 if (status != VXGE_HW_OK) { 4136 vxge_debug_init(VXGE_ERR, 4137 "Failed to initialize device (%d)", status); 4138 ret = -EINVAL; 4139 goto _exit3; 4140 } 4141 4142 /* if FCS stripping is not disabled in MAC fail driver load */ 4143 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { 4144 vxge_debug_init(VXGE_ERR, 4145 "%s: FCS stripping is not disabled in MAC" 4146 " failing driver load", VXGE_DRIVER_NAME); 4147 ret = -EINVAL; 4148 goto _exit4; 4149 } 4150 4151 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4152 4153 /* set private device info */ 4154 pci_set_drvdata(pdev, hldev); 4155 4156 ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE; 4157 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4158 ll_config->addr_learn_en = addr_learn_en; 4159 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4160 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4161 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4162 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4163 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4164 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4165 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4166 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; 4167 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4168 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4169 4170 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, 4171 &vdev)) { 4172 ret = -EINVAL; 4173 goto _exit4; 4174 } 4175 4176 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); 4177 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4178 vxge_hw_device_trace_level_get(hldev)); 4179 4180 /* set private HW device info */ 4181 hldev->ndev = vdev->ndev; 4182 vdev->mtu = VXGE_HW_DEFAULT_MTU; 4183 vdev->bar0 = attr.bar0; 4184 vdev->max_vpath_supported = max_vpath_supported; 4185 vdev->no_of_vpath = no_of_vpath; 4186 4187 /* Virtual Path count */ 4188 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4189 if (!vxge_bVALn(vpath_mask, i, 1)) 4190 continue; 4191 if (j >= vdev->no_of_vpath) 4192 break; 4193 4194 vdev->vpaths[j].is_configured = 1; 4195 vdev->vpaths[j].device_id = i; 4196 vdev->vpaths[j].ring.driver_id = j; 4197 vdev->vpaths[j].vdev = vdev; 4198 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath; 4199 memcpy((u8 *)vdev->vpaths[j].macaddr, 4200 ll_config->device_hw_info.mac_addrs[i], 4201 ETH_ALEN); 4202 4203 /* Initialize the mac address list header */ 4204 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list); 4205 4206 vdev->vpaths[j].mac_addr_cnt = 0; 4207 vdev->vpaths[j].mcast_addr_cnt = 0; 4208 j++; 4209 } 4210 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE; 4211 vdev->max_config_port = max_config_port; 4212 4213 vdev->vlan_tag_strip = vlan_tag_strip; 4214 4215 /* map the hashing selector table to the configured vpaths */ 4216 for (i = 0; i < vdev->no_of_vpath; i++) 4217 vdev->vpath_selector[i] = vpath_selector[i]; 4218 4219 macaddr = (u8 *)vdev->vpaths[0].macaddr; 4220 4221 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4222 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0'; 4223 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0'; 4224 4225 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s", 4226 vdev->ndev->name, ll_config->device_hw_info.serial_number); 4227 4228 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s", 4229 vdev->ndev->name, ll_config->device_hw_info.part_number); 4230 4231 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter", 4232 vdev->ndev->name, ll_config->device_hw_info.product_desc); 4233 4234 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM", 4235 vdev->ndev->name, macaddr); 4236 4237 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d", 4238 vdev->ndev->name, vxge_hw_device_link_width_get(hldev)); 4239 4240 vxge_debug_init(VXGE_TRACE, 4241 "%s: Firmware version : %s Date : %s", vdev->ndev->name, 4242 ll_config->device_hw_info.fw_version.version, 4243 ll_config->device_hw_info.fw_date.date); 4244 4245 if (new_device) { 4246 switch (ll_config->device_hw_info.function_mode) { 4247 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION: 4248 vxge_debug_init(VXGE_TRACE, 4249 "%s: Single Function Mode Enabled", vdev->ndev->name); 4250 break; 4251 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION: 4252 vxge_debug_init(VXGE_TRACE, 4253 "%s: Multi Function Mode Enabled", vdev->ndev->name); 4254 break; 4255 case VXGE_HW_FUNCTION_MODE_SRIOV: 4256 vxge_debug_init(VXGE_TRACE, 4257 "%s: Single Root IOV Mode Enabled", vdev->ndev->name); 4258 break; 4259 case VXGE_HW_FUNCTION_MODE_MRIOV: 4260 vxge_debug_init(VXGE_TRACE, 4261 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name); 4262 break; 4263 } 4264 } 4265 4266 vxge_print_parm(vdev, vpath_mask); 4267 4268 /* Store the fw version for ethttool option */ 4269 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version); 4270 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN); 4271 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN); 4272 4273 /* Copy the station mac address to the list */ 4274 for (i = 0; i < vdev->no_of_vpath; i++) { 4275 entry = (struct vxge_mac_addrs *) 4276 kzalloc(sizeof(struct vxge_mac_addrs), 4277 GFP_KERNEL); 4278 if (NULL == entry) { 4279 vxge_debug_init(VXGE_ERR, 4280 "%s: mac_addr_list : memory allocation failed", 4281 vdev->ndev->name); 4282 ret = -EPERM; 4283 goto _exit5; 4284 } 4285 macaddr = (u8 *)&entry->macaddr; 4286 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); 4287 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list); 4288 vdev->vpaths[i].mac_addr_cnt = 1; 4289 } 4290 4291 kfree(device_config); 4292 4293 /* 4294 * INTA is shared in multi-function mode. This is unlike the INTA 4295 * implementation in MR mode, where each VH has its own INTA message. 4296 * - INTA is masked (disabled) as long as at least one function sets 4297 * its TITAN_MASK_ALL_INT.ALARM bit. 4298 * - INTA is unmasked (enabled) when all enabled functions have cleared 4299 * their own TITAN_MASK_ALL_INT.ALARM bit. 4300 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up. 4301 * Though this driver leaves the top level interrupts unmasked while 4302 * leaving the required module interrupt bits masked on exit, there 4303 * could be a rougue driver around that does not follow this procedure 4304 * resulting in a failure to generate interrupts. The following code is 4305 * present to prevent such a failure. 4306 */ 4307 4308 if (ll_config->device_hw_info.function_mode == 4309 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) 4310 if (vdev->config.intr_type == INTA) 4311 vxge_hw_device_unmask_all(hldev); 4312 4313 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 4314 vdev->ndev->name, __func__, __LINE__); 4315 4316 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4317 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4318 vxge_hw_device_trace_level_get(hldev)); 4319 4320 kfree(ll_config); 4321 return 0; 4322 4323_exit5: 4324 for (i = 0; i < vdev->no_of_vpath; i++) 4325 vxge_free_mac_add_list(&vdev->vpaths[i]); 4326 4327 vxge_device_unregister(hldev); 4328_exit4: 4329 pci_disable_sriov(pdev); 4330 vxge_hw_device_terminate(hldev); 4331_exit3: 4332 iounmap(attr.bar0); 4333_exit2: 4334 pci_release_regions(pdev); 4335_exit1: 4336 pci_disable_device(pdev); 4337_exit0: 4338 kfree(ll_config); 4339 kfree(device_config); 4340 driver_config->config_dev_cnt--; 4341 pci_set_drvdata(pdev, NULL); 4342 return ret; 4343} 4344 4345/** 4346 * vxge_rem_nic - Free the PCI device 4347 * @pdev: structure containing the PCI related information of the device. 4348 * Description: This function is called by the Pci subsystem to release a 4349 * PCI device and free up all resource held up by the device. 4350 */ 4351static void __devexit 4352vxge_remove(struct pci_dev *pdev) 4353{ 4354 struct __vxge_hw_device *hldev; 4355 struct vxgedev *vdev = NULL; 4356 struct net_device *dev; 4357 int i = 0; 4358#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 4359 u32 level_trace; 4360#endif 4361 4362 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4363 4364 if (hldev == NULL) 4365 return; 4366 dev = hldev->ndev; 4367 vdev = netdev_priv(dev); 4368 4369#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)) 4370 level_trace = vdev->level_trace; 4371#endif 4372 vxge_debug_entryexit(level_trace, 4373 "%s:%d", __func__, __LINE__); 4374 4375 vxge_debug_init(level_trace, 4376 "%s : removing PCI device...", __func__); 4377 vxge_device_unregister(hldev); 4378 4379 for (i = 0; i < vdev->no_of_vpath; i++) { 4380 vxge_free_mac_add_list(&vdev->vpaths[i]); 4381 vdev->vpaths[i].mcast_addr_cnt = 0; 4382 vdev->vpaths[i].mac_addr_cnt = 0; 4383 } 4384 4385 kfree(vdev->vpaths); 4386 4387 iounmap(vdev->bar0); 4388 4389 pci_disable_sriov(pdev); 4390 4391 /* we are safe to free it now */ 4392 free_netdev(dev); 4393 4394 vxge_debug_init(level_trace, 4395 "%s:%d Device unregistered", __func__, __LINE__); 4396 4397 vxge_hw_device_terminate(hldev); 4398 4399 pci_disable_device(pdev); 4400 pci_release_regions(pdev); 4401 pci_set_drvdata(pdev, NULL); 4402 vxge_debug_entryexit(level_trace, 4403 "%s:%d Exiting...", __func__, __LINE__); 4404} 4405 4406static struct pci_error_handlers vxge_err_handler = { 4407 .error_detected = vxge_io_error_detected, 4408 .slot_reset = vxge_io_slot_reset, 4409 .resume = vxge_io_resume, 4410}; 4411 4412static struct pci_driver vxge_driver = { 4413 .name = VXGE_DRIVER_NAME, 4414 .id_table = vxge_id_table, 4415 .probe = vxge_probe, 4416 .remove = __devexit_p(vxge_remove), 4417#ifdef CONFIG_PM 4418 .suspend = vxge_pm_suspend, 4419 .resume = vxge_pm_resume, 4420#endif 4421 .err_handler = &vxge_err_handler, 4422}; 4423 4424static int __init 4425vxge_starter(void) 4426{ 4427 int ret = 0; 4428 4429 pr_info("Copyright(c) 2002-2010 Exar Corp.\n"); 4430 pr_info("Driver version: %s\n", DRV_VERSION); 4431 4432 verify_bandwidth(); 4433 4434 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL); 4435 if (!driver_config) 4436 return -ENOMEM; 4437 4438 ret = pci_register_driver(&vxge_driver); 4439 4440 if (driver_config->config_dev_cnt && 4441 (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) 4442 vxge_debug_init(VXGE_ERR, 4443 "%s: Configured %d of %d devices", 4444 VXGE_DRIVER_NAME, driver_config->config_dev_cnt, 4445 driver_config->total_dev_cnt); 4446 4447 if (ret) 4448 kfree(driver_config); 4449 4450 return ret; 4451} 4452 4453static void __exit 4454vxge_closer(void) 4455{ 4456 pci_unregister_driver(&vxge_driver); 4457 kfree(driver_config); 4458} 4459module_init(vxge_starter); 4460module_exit(vxge_closer); 4461