1/* cnic.c: Broadcom CNIC core network driver. 2 * 3 * Copyright (c) 2006-2010 Broadcom Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 * 9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 10 * Modified and maintained by: Michael Chan <mchan@broadcom.com> 11 */ 12 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15#include <linux/module.h> 16 17#include <linux/kernel.h> 18#include <linux/errno.h> 19#include <linux/list.h> 20#include <linux/slab.h> 21#include <linux/pci.h> 22#include <linux/init.h> 23#include <linux/netdevice.h> 24#include <linux/uio_driver.h> 25#include <linux/in.h> 26#include <linux/dma-mapping.h> 27#include <linux/delay.h> 28#include <linux/ethtool.h> 29#include <linux/if_vlan.h> 30#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 31#define BCM_VLAN 1 32#endif 33#include <net/ip.h> 34#include <net/tcp.h> 35#include <net/route.h> 36#include <net/ipv6.h> 37#include <net/ip6_route.h> 38#include <net/ip6_checksum.h> 39#include <scsi/iscsi_if.h> 40 41#include "cnic_if.h" 42#include "bnx2.h" 43#include "bnx2x/bnx2x_reg.h" 44#include "bnx2x/bnx2x_fw_defs.h" 45#include "bnx2x/bnx2x_hsi.h" 46#include "../scsi/bnx2i/57xx_iscsi_constants.h" 47#include "../scsi/bnx2i/57xx_iscsi_hsi.h" 48#include "cnic.h" 49#include "cnic_defs.h" 50 51#define DRV_MODULE_NAME "cnic" 52 53static char version[] __devinitdata = 54 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 55 56MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 57 "Chen (zongxi@broadcom.com"); 58MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); 59MODULE_LICENSE("GPL"); 60MODULE_VERSION(CNIC_MODULE_VERSION); 61 62static LIST_HEAD(cnic_dev_list); 63static DEFINE_RWLOCK(cnic_dev_lock); 64static DEFINE_MUTEX(cnic_lock); 65 66static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 67 68static int cnic_service_bnx2(void *, void *); 69static int cnic_service_bnx2x(void *, void *); 70static int cnic_ctl(void *, struct cnic_ctl_info *); 71 72static struct cnic_ops cnic_bnx2_ops = { 73 .cnic_owner = THIS_MODULE, 74 .cnic_handler = cnic_service_bnx2, 75 .cnic_ctl = cnic_ctl, 76}; 77 78static struct cnic_ops cnic_bnx2x_ops = { 79 .cnic_owner = THIS_MODULE, 80 .cnic_handler = cnic_service_bnx2x, 81 .cnic_ctl = cnic_ctl, 82}; 83 84static void cnic_shutdown_rings(struct cnic_dev *); 85static void cnic_init_rings(struct cnic_dev *); 86static int cnic_cm_set_pg(struct cnic_sock *); 87 88static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 89{ 90 struct cnic_dev *dev = uinfo->priv; 91 struct cnic_local *cp = dev->cnic_priv; 92 93 if (!capable(CAP_NET_ADMIN)) 94 return -EPERM; 95 96 if (cp->uio_dev != -1) 97 return -EBUSY; 98 99 rtnl_lock(); 100 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 101 rtnl_unlock(); 102 return -ENODEV; 103 } 104 105 cp->uio_dev = iminor(inode); 106 107 cnic_init_rings(dev); 108 rtnl_unlock(); 109 110 return 0; 111} 112 113static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 114{ 115 struct cnic_dev *dev = uinfo->priv; 116 struct cnic_local *cp = dev->cnic_priv; 117 118 cnic_shutdown_rings(dev); 119 120 cp->uio_dev = -1; 121 return 0; 122} 123 124static inline void cnic_hold(struct cnic_dev *dev) 125{ 126 atomic_inc(&dev->ref_count); 127} 128 129static inline void cnic_put(struct cnic_dev *dev) 130{ 131 atomic_dec(&dev->ref_count); 132} 133 134static inline void csk_hold(struct cnic_sock *csk) 135{ 136 atomic_inc(&csk->ref_count); 137} 138 139static inline void csk_put(struct cnic_sock *csk) 140{ 141 atomic_dec(&csk->ref_count); 142} 143 144static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 145{ 146 struct cnic_dev *cdev; 147 148 read_lock(&cnic_dev_lock); 149 list_for_each_entry(cdev, &cnic_dev_list, list) { 150 if (netdev == cdev->netdev) { 151 cnic_hold(cdev); 152 read_unlock(&cnic_dev_lock); 153 return cdev; 154 } 155 } 156 read_unlock(&cnic_dev_lock); 157 return NULL; 158} 159 160static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 161{ 162 atomic_inc(&ulp_ops->ref_count); 163} 164 165static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 166{ 167 atomic_dec(&ulp_ops->ref_count); 168} 169 170static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 171{ 172 struct cnic_local *cp = dev->cnic_priv; 173 struct cnic_eth_dev *ethdev = cp->ethdev; 174 struct drv_ctl_info info; 175 struct drv_ctl_io *io = &info.data.io; 176 177 info.cmd = DRV_CTL_CTX_WR_CMD; 178 io->cid_addr = cid_addr; 179 io->offset = off; 180 io->data = val; 181 ethdev->drv_ctl(dev->netdev, &info); 182} 183 184static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 185{ 186 struct cnic_local *cp = dev->cnic_priv; 187 struct cnic_eth_dev *ethdev = cp->ethdev; 188 struct drv_ctl_info info; 189 struct drv_ctl_io *io = &info.data.io; 190 191 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 192 io->offset = off; 193 io->dma_addr = addr; 194 ethdev->drv_ctl(dev->netdev, &info); 195} 196 197static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 198{ 199 struct cnic_local *cp = dev->cnic_priv; 200 struct cnic_eth_dev *ethdev = cp->ethdev; 201 struct drv_ctl_info info; 202 struct drv_ctl_l2_ring *ring = &info.data.ring; 203 204 if (start) 205 info.cmd = DRV_CTL_START_L2_CMD; 206 else 207 info.cmd = DRV_CTL_STOP_L2_CMD; 208 209 ring->cid = cid; 210 ring->client_id = cl_id; 211 ethdev->drv_ctl(dev->netdev, &info); 212} 213 214static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 215{ 216 struct cnic_local *cp = dev->cnic_priv; 217 struct cnic_eth_dev *ethdev = cp->ethdev; 218 struct drv_ctl_info info; 219 struct drv_ctl_io *io = &info.data.io; 220 221 info.cmd = DRV_CTL_IO_WR_CMD; 222 io->offset = off; 223 io->data = val; 224 ethdev->drv_ctl(dev->netdev, &info); 225} 226 227static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 228{ 229 struct cnic_local *cp = dev->cnic_priv; 230 struct cnic_eth_dev *ethdev = cp->ethdev; 231 struct drv_ctl_info info; 232 struct drv_ctl_io *io = &info.data.io; 233 234 info.cmd = DRV_CTL_IO_RD_CMD; 235 io->offset = off; 236 ethdev->drv_ctl(dev->netdev, &info); 237 return io->data; 238} 239 240static int cnic_in_use(struct cnic_sock *csk) 241{ 242 return test_bit(SK_F_INUSE, &csk->flags); 243} 244 245static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) 246{ 247 struct cnic_local *cp = dev->cnic_priv; 248 struct cnic_eth_dev *ethdev = cp->ethdev; 249 struct drv_ctl_info info; 250 251 info.cmd = DRV_CTL_COMPLETION_CMD; 252 info.data.comp.comp_count = count; 253 ethdev->drv_ctl(dev->netdev, &info); 254} 255 256static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 257{ 258 u32 i; 259 260 for (i = 0; i < cp->max_cid_space; i++) { 261 if (cp->ctx_tbl[i].cid == cid) { 262 *l5_cid = i; 263 return 0; 264 } 265 } 266 return -EINVAL; 267} 268 269static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 270 struct cnic_sock *csk) 271{ 272 struct iscsi_path path_req; 273 char *buf = NULL; 274 u16 len = 0; 275 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 276 struct cnic_ulp_ops *ulp_ops; 277 278 if (cp->uio_dev == -1) 279 return -ENODEV; 280 281 if (csk) { 282 len = sizeof(path_req); 283 buf = (char *) &path_req; 284 memset(&path_req, 0, len); 285 286 msg_type = ISCSI_KEVENT_PATH_REQ; 287 path_req.handle = (u64) csk->l5_cid; 288 if (test_bit(SK_F_IPV6, &csk->flags)) { 289 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 290 sizeof(struct in6_addr)); 291 path_req.ip_addr_len = 16; 292 } else { 293 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 294 sizeof(struct in_addr)); 295 path_req.ip_addr_len = 4; 296 } 297 path_req.vlan_id = csk->vlan_id; 298 path_req.pmtu = csk->mtu; 299 } 300 301 rcu_read_lock(); 302 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 303 if (ulp_ops) 304 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); 305 rcu_read_unlock(); 306 return 0; 307} 308 309static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 310 char *buf, u16 len) 311{ 312 int rc = -EINVAL; 313 314 switch (msg_type) { 315 case ISCSI_UEVENT_PATH_UPDATE: { 316 struct cnic_local *cp; 317 u32 l5_cid; 318 struct cnic_sock *csk; 319 struct iscsi_path *path_resp; 320 321 if (len < sizeof(*path_resp)) 322 break; 323 324 path_resp = (struct iscsi_path *) buf; 325 cp = dev->cnic_priv; 326 l5_cid = (u32) path_resp->handle; 327 if (l5_cid >= MAX_CM_SK_TBL_SZ) 328 break; 329 330 rcu_read_lock(); 331 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { 332 rc = -ENODEV; 333 rcu_read_unlock(); 334 break; 335 } 336 csk = &cp->csk_tbl[l5_cid]; 337 csk_hold(csk); 338 if (cnic_in_use(csk)) { 339 memcpy(csk->ha, path_resp->mac_addr, 6); 340 if (test_bit(SK_F_IPV6, &csk->flags)) 341 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 342 sizeof(struct in6_addr)); 343 else 344 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 345 sizeof(struct in_addr)); 346 if (is_valid_ether_addr(csk->ha)) 347 cnic_cm_set_pg(csk); 348 } 349 csk_put(csk); 350 rcu_read_unlock(); 351 rc = 0; 352 } 353 } 354 355 return rc; 356} 357 358static int cnic_offld_prep(struct cnic_sock *csk) 359{ 360 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 361 return 0; 362 363 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 364 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 365 return 0; 366 } 367 368 return 1; 369} 370 371static int cnic_close_prep(struct cnic_sock *csk) 372{ 373 clear_bit(SK_F_CONNECT_START, &csk->flags); 374 smp_mb__after_clear_bit(); 375 376 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 377 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 378 msleep(1); 379 380 return 1; 381 } 382 return 0; 383} 384 385static int cnic_abort_prep(struct cnic_sock *csk) 386{ 387 clear_bit(SK_F_CONNECT_START, &csk->flags); 388 smp_mb__after_clear_bit(); 389 390 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 391 msleep(1); 392 393 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 394 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 395 return 1; 396 } 397 398 return 0; 399} 400 401static void cnic_uio_stop(void) 402{ 403 struct cnic_dev *dev; 404 405 read_lock(&cnic_dev_lock); 406 list_for_each_entry(dev, &cnic_dev_list, list) { 407 struct cnic_local *cp = dev->cnic_priv; 408 409 if (cp->cnic_uinfo) 410 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 411 } 412 read_unlock(&cnic_dev_lock); 413} 414 415int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 416{ 417 struct cnic_dev *dev; 418 419 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 420 pr_err("%s: Bad type %d\n", __func__, ulp_type); 421 return -EINVAL; 422 } 423 mutex_lock(&cnic_lock); 424 if (cnic_ulp_tbl[ulp_type]) { 425 pr_err("%s: Type %d has already been registered\n", 426 __func__, ulp_type); 427 mutex_unlock(&cnic_lock); 428 return -EBUSY; 429 } 430 431 read_lock(&cnic_dev_lock); 432 list_for_each_entry(dev, &cnic_dev_list, list) { 433 struct cnic_local *cp = dev->cnic_priv; 434 435 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 436 } 437 read_unlock(&cnic_dev_lock); 438 439 atomic_set(&ulp_ops->ref_count, 0); 440 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 441 mutex_unlock(&cnic_lock); 442 443 /* Prevent race conditions with netdev_event */ 444 rtnl_lock(); 445 read_lock(&cnic_dev_lock); 446 list_for_each_entry(dev, &cnic_dev_list, list) { 447 struct cnic_local *cp = dev->cnic_priv; 448 449 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 450 ulp_ops->cnic_init(dev); 451 } 452 read_unlock(&cnic_dev_lock); 453 rtnl_unlock(); 454 455 return 0; 456} 457 458int cnic_unregister_driver(int ulp_type) 459{ 460 struct cnic_dev *dev; 461 struct cnic_ulp_ops *ulp_ops; 462 int i = 0; 463 464 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 465 pr_err("%s: Bad type %d\n", __func__, ulp_type); 466 return -EINVAL; 467 } 468 mutex_lock(&cnic_lock); 469 ulp_ops = cnic_ulp_tbl[ulp_type]; 470 if (!ulp_ops) { 471 pr_err("%s: Type %d has not been registered\n", 472 __func__, ulp_type); 473 goto out_unlock; 474 } 475 read_lock(&cnic_dev_lock); 476 list_for_each_entry(dev, &cnic_dev_list, list) { 477 struct cnic_local *cp = dev->cnic_priv; 478 479 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 480 pr_err("%s: Type %d still has devices registered\n", 481 __func__, ulp_type); 482 read_unlock(&cnic_dev_lock); 483 goto out_unlock; 484 } 485 } 486 read_unlock(&cnic_dev_lock); 487 488 if (ulp_type == CNIC_ULP_ISCSI) 489 cnic_uio_stop(); 490 491 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 492 493 mutex_unlock(&cnic_lock); 494 synchronize_rcu(); 495 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 496 msleep(100); 497 i++; 498 } 499 500 if (atomic_read(&ulp_ops->ref_count) != 0) 501 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); 502 return 0; 503 504out_unlock: 505 mutex_unlock(&cnic_lock); 506 return -EINVAL; 507} 508 509static int cnic_start_hw(struct cnic_dev *); 510static void cnic_stop_hw(struct cnic_dev *); 511 512static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 513 void *ulp_ctx) 514{ 515 struct cnic_local *cp = dev->cnic_priv; 516 struct cnic_ulp_ops *ulp_ops; 517 518 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 519 pr_err("%s: Bad type %d\n", __func__, ulp_type); 520 return -EINVAL; 521 } 522 mutex_lock(&cnic_lock); 523 if (cnic_ulp_tbl[ulp_type] == NULL) { 524 pr_err("%s: Driver with type %d has not been registered\n", 525 __func__, ulp_type); 526 mutex_unlock(&cnic_lock); 527 return -EAGAIN; 528 } 529 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 530 pr_err("%s: Type %d has already been registered to this device\n", 531 __func__, ulp_type); 532 mutex_unlock(&cnic_lock); 533 return -EBUSY; 534 } 535 536 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 537 cp->ulp_handle[ulp_type] = ulp_ctx; 538 ulp_ops = cnic_ulp_tbl[ulp_type]; 539 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 540 cnic_hold(dev); 541 542 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 543 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 544 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 545 546 mutex_unlock(&cnic_lock); 547 548 return 0; 549 550} 551EXPORT_SYMBOL(cnic_register_driver); 552 553static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 554{ 555 struct cnic_local *cp = dev->cnic_priv; 556 int i = 0; 557 558 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 559 pr_err("%s: Bad type %d\n", __func__, ulp_type); 560 return -EINVAL; 561 } 562 mutex_lock(&cnic_lock); 563 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 564 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); 565 cnic_put(dev); 566 } else { 567 pr_err("%s: device not registered to this ulp type %d\n", 568 __func__, ulp_type); 569 mutex_unlock(&cnic_lock); 570 return -EINVAL; 571 } 572 mutex_unlock(&cnic_lock); 573 574 synchronize_rcu(); 575 576 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 577 i < 20) { 578 msleep(100); 579 i++; 580 } 581 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 582 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 583 584 return 0; 585} 586EXPORT_SYMBOL(cnic_unregister_driver); 587 588static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) 589{ 590 id_tbl->start = start_id; 591 id_tbl->max = size; 592 id_tbl->next = 0; 593 spin_lock_init(&id_tbl->lock); 594 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 595 if (!id_tbl->table) 596 return -ENOMEM; 597 598 return 0; 599} 600 601static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 602{ 603 kfree(id_tbl->table); 604 id_tbl->table = NULL; 605} 606 607static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 608{ 609 int ret = -1; 610 611 id -= id_tbl->start; 612 if (id >= id_tbl->max) 613 return ret; 614 615 spin_lock(&id_tbl->lock); 616 if (!test_bit(id, id_tbl->table)) { 617 set_bit(id, id_tbl->table); 618 ret = 0; 619 } 620 spin_unlock(&id_tbl->lock); 621 return ret; 622} 623 624/* Returns -1 if not successful */ 625static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 626{ 627 u32 id; 628 629 spin_lock(&id_tbl->lock); 630 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 631 if (id >= id_tbl->max) { 632 id = -1; 633 if (id_tbl->next != 0) { 634 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 635 if (id >= id_tbl->next) 636 id = -1; 637 } 638 } 639 640 if (id < id_tbl->max) { 641 set_bit(id, id_tbl->table); 642 id_tbl->next = (id + 1) & (id_tbl->max - 1); 643 id += id_tbl->start; 644 } 645 646 spin_unlock(&id_tbl->lock); 647 648 return id; 649} 650 651static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 652{ 653 if (id == -1) 654 return; 655 656 id -= id_tbl->start; 657 if (id >= id_tbl->max) 658 return; 659 660 clear_bit(id, id_tbl->table); 661} 662 663static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 664{ 665 int i; 666 667 if (!dma->pg_arr) 668 return; 669 670 for (i = 0; i < dma->num_pages; i++) { 671 if (dma->pg_arr[i]) { 672 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 673 dma->pg_arr[i], dma->pg_map_arr[i]); 674 dma->pg_arr[i] = NULL; 675 } 676 } 677 if (dma->pgtbl) { 678 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 679 dma->pgtbl, dma->pgtbl_map); 680 dma->pgtbl = NULL; 681 } 682 kfree(dma->pg_arr); 683 dma->pg_arr = NULL; 684 dma->num_pages = 0; 685} 686 687static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 688{ 689 int i; 690 u32 *page_table = dma->pgtbl; 691 692 for (i = 0; i < dma->num_pages; i++) { 693 /* Each entry needs to be in big endian format. */ 694 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 695 page_table++; 696 *page_table = (u32) dma->pg_map_arr[i]; 697 page_table++; 698 } 699} 700 701static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 702{ 703 int i; 704 u32 *page_table = dma->pgtbl; 705 706 for (i = 0; i < dma->num_pages; i++) { 707 /* Each entry needs to be in little endian format. */ 708 *page_table = dma->pg_map_arr[i] & 0xffffffff; 709 page_table++; 710 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); 711 page_table++; 712 } 713} 714 715static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 716 int pages, int use_pg_tbl) 717{ 718 int i, size; 719 struct cnic_local *cp = dev->cnic_priv; 720 721 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 722 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 723 if (dma->pg_arr == NULL) 724 return -ENOMEM; 725 726 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 727 dma->num_pages = pages; 728 729 for (i = 0; i < pages; i++) { 730 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 731 BCM_PAGE_SIZE, 732 &dma->pg_map_arr[i], 733 GFP_ATOMIC); 734 if (dma->pg_arr[i] == NULL) 735 goto error; 736 } 737 if (!use_pg_tbl) 738 return 0; 739 740 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 741 ~(BCM_PAGE_SIZE - 1); 742 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 743 &dma->pgtbl_map, GFP_ATOMIC); 744 if (dma->pgtbl == NULL) 745 goto error; 746 747 cp->setup_pgtbl(dev, dma); 748 749 return 0; 750 751error: 752 cnic_free_dma(dev, dma); 753 return -ENOMEM; 754} 755 756static void cnic_free_context(struct cnic_dev *dev) 757{ 758 struct cnic_local *cp = dev->cnic_priv; 759 int i; 760 761 for (i = 0; i < cp->ctx_blks; i++) { 762 if (cp->ctx_arr[i].ctx) { 763 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 764 cp->ctx_arr[i].ctx, 765 cp->ctx_arr[i].mapping); 766 cp->ctx_arr[i].ctx = NULL; 767 } 768 } 769} 770 771static void cnic_free_resc(struct cnic_dev *dev) 772{ 773 struct cnic_local *cp = dev->cnic_priv; 774 int i = 0; 775 776 if (cp->cnic_uinfo) { 777 while (cp->uio_dev != -1 && i < 15) { 778 msleep(100); 779 i++; 780 } 781 uio_unregister_device(cp->cnic_uinfo); 782 kfree(cp->cnic_uinfo); 783 cp->cnic_uinfo = NULL; 784 } 785 786 if (cp->l2_buf) { 787 dma_free_coherent(&dev->pcidev->dev, cp->l2_buf_size, 788 cp->l2_buf, cp->l2_buf_map); 789 cp->l2_buf = NULL; 790 } 791 792 if (cp->l2_ring) { 793 dma_free_coherent(&dev->pcidev->dev, cp->l2_ring_size, 794 cp->l2_ring, cp->l2_ring_map); 795 cp->l2_ring = NULL; 796 } 797 798 cnic_free_context(dev); 799 kfree(cp->ctx_arr); 800 cp->ctx_arr = NULL; 801 cp->ctx_blks = 0; 802 803 cnic_free_dma(dev, &cp->gbl_buf_info); 804 cnic_free_dma(dev, &cp->conn_buf_info); 805 cnic_free_dma(dev, &cp->kwq_info); 806 cnic_free_dma(dev, &cp->kwq_16_data_info); 807 cnic_free_dma(dev, &cp->kcq1.dma); 808 kfree(cp->iscsi_tbl); 809 cp->iscsi_tbl = NULL; 810 kfree(cp->ctx_tbl); 811 cp->ctx_tbl = NULL; 812 813 cnic_free_id_tbl(&cp->cid_tbl); 814} 815 816static int cnic_alloc_context(struct cnic_dev *dev) 817{ 818 struct cnic_local *cp = dev->cnic_priv; 819 820 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 821 int i, k, arr_size; 822 823 cp->ctx_blk_size = BCM_PAGE_SIZE; 824 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 825 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 826 sizeof(struct cnic_ctx); 827 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 828 if (cp->ctx_arr == NULL) 829 return -ENOMEM; 830 831 k = 0; 832 for (i = 0; i < 2; i++) { 833 u32 j, reg, off, lo, hi; 834 835 if (i == 0) 836 off = BNX2_PG_CTX_MAP; 837 else 838 off = BNX2_ISCSI_CTX_MAP; 839 840 reg = cnic_reg_rd_ind(dev, off); 841 lo = reg >> 16; 842 hi = reg & 0xffff; 843 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 844 cp->ctx_arr[k].cid = j; 845 } 846 847 cp->ctx_blks = k; 848 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 849 cp->ctx_blks = 0; 850 return -ENOMEM; 851 } 852 853 for (i = 0; i < cp->ctx_blks; i++) { 854 cp->ctx_arr[i].ctx = 855 dma_alloc_coherent(&dev->pcidev->dev, 856 BCM_PAGE_SIZE, 857 &cp->ctx_arr[i].mapping, 858 GFP_KERNEL); 859 if (cp->ctx_arr[i].ctx == NULL) 860 return -ENOMEM; 861 } 862 } 863 return 0; 864} 865 866static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info) 867{ 868 int err, i, is_bnx2 = 0; 869 struct kcqe **kcq; 870 871 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) 872 is_bnx2 = 1; 873 874 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2); 875 if (err) 876 return err; 877 878 kcq = (struct kcqe **) info->dma.pg_arr; 879 info->kcq = kcq; 880 881 if (is_bnx2) 882 return 0; 883 884 for (i = 0; i < KCQ_PAGE_CNT; i++) { 885 struct bnx2x_bd_chain_next *next = 886 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; 887 int j = i + 1; 888 889 if (j >= KCQ_PAGE_CNT) 890 j = 0; 891 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; 892 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; 893 } 894 return 0; 895} 896 897static int cnic_alloc_l2_rings(struct cnic_dev *dev, int pages) 898{ 899 struct cnic_local *cp = dev->cnic_priv; 900 901 cp->l2_ring_size = pages * BCM_PAGE_SIZE; 902 cp->l2_ring = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_ring_size, 903 &cp->l2_ring_map, 904 GFP_KERNEL | __GFP_COMP); 905 if (!cp->l2_ring) 906 return -ENOMEM; 907 908 cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 909 cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); 910 cp->l2_buf = dma_alloc_coherent(&dev->pcidev->dev, cp->l2_buf_size, 911 &cp->l2_buf_map, 912 GFP_KERNEL | __GFP_COMP); 913 if (!cp->l2_buf) 914 return -ENOMEM; 915 916 return 0; 917} 918 919static int cnic_alloc_uio(struct cnic_dev *dev) { 920 struct cnic_local *cp = dev->cnic_priv; 921 struct uio_info *uinfo; 922 int ret; 923 924 uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); 925 if (!uinfo) 926 return -ENOMEM; 927 928 uinfo->mem[0].addr = dev->netdev->base_addr; 929 uinfo->mem[0].internal_addr = dev->regview; 930 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 931 uinfo->mem[0].memtype = UIO_MEM_PHYS; 932 933 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 934 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 935 PAGE_MASK; 936 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 937 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 938 else 939 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 940 941 uinfo->name = "bnx2_cnic"; 942 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 943 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 944 PAGE_MASK; 945 uinfo->mem[1].size = sizeof(struct host_def_status_block); 946 947 uinfo->name = "bnx2x_cnic"; 948 } 949 950 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 951 952 uinfo->mem[2].addr = (unsigned long) cp->l2_ring; 953 uinfo->mem[2].size = cp->l2_ring_size; 954 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 955 956 uinfo->mem[3].addr = (unsigned long) cp->l2_buf; 957 uinfo->mem[3].size = cp->l2_buf_size; 958 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 959 960 uinfo->version = CNIC_MODULE_VERSION; 961 uinfo->irq = UIO_IRQ_CUSTOM; 962 963 uinfo->open = cnic_uio_open; 964 uinfo->release = cnic_uio_close; 965 966 uinfo->priv = dev; 967 968 ret = uio_register_device(&dev->pcidev->dev, uinfo); 969 if (ret) { 970 kfree(uinfo); 971 return ret; 972 } 973 974 cp->cnic_uinfo = uinfo; 975 return 0; 976} 977 978static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 979{ 980 struct cnic_local *cp = dev->cnic_priv; 981 int ret; 982 983 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 984 if (ret) 985 goto error; 986 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 987 988 ret = cnic_alloc_kcq(dev, &cp->kcq1); 989 if (ret) 990 goto error; 991 992 ret = cnic_alloc_context(dev); 993 if (ret) 994 goto error; 995 996 ret = cnic_alloc_l2_rings(dev, 2); 997 if (ret) 998 goto error; 999 1000 ret = cnic_alloc_uio(dev); 1001 if (ret) 1002 goto error; 1003 1004 return 0; 1005 1006error: 1007 cnic_free_resc(dev); 1008 return ret; 1009} 1010 1011static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1012{ 1013 struct cnic_local *cp = dev->cnic_priv; 1014 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1015 int total_mem, blks, i; 1016 1017 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; 1018 blks = total_mem / ctx_blk_size; 1019 if (total_mem % ctx_blk_size) 1020 blks++; 1021 1022 if (blks > cp->ethdev->ctx_tbl_len) 1023 return -ENOMEM; 1024 1025 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL); 1026 if (cp->ctx_arr == NULL) 1027 return -ENOMEM; 1028 1029 cp->ctx_blks = blks; 1030 cp->ctx_blk_size = ctx_blk_size; 1031 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) 1032 cp->ctx_align = 0; 1033 else 1034 cp->ctx_align = ctx_blk_size; 1035 1036 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1037 1038 for (i = 0; i < blks; i++) { 1039 cp->ctx_arr[i].ctx = 1040 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1041 &cp->ctx_arr[i].mapping, 1042 GFP_KERNEL); 1043 if (cp->ctx_arr[i].ctx == NULL) 1044 return -ENOMEM; 1045 1046 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1047 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1048 cnic_free_context(dev); 1049 cp->ctx_blk_size += cp->ctx_align; 1050 i = -1; 1051 continue; 1052 } 1053 } 1054 } 1055 return 0; 1056} 1057 1058static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1059{ 1060 struct cnic_local *cp = dev->cnic_priv; 1061 struct cnic_eth_dev *ethdev = cp->ethdev; 1062 u32 start_cid = ethdev->starting_cid; 1063 int i, j, n, ret, pages; 1064 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1065 1066 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1067 cp->iscsi_start_cid = start_cid; 1068 if (start_cid < BNX2X_ISCSI_START_CID) { 1069 u32 delta = BNX2X_ISCSI_START_CID - start_cid; 1070 1071 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; 1072 cp->max_cid_space += delta; 1073 } 1074 1075 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1076 GFP_KERNEL); 1077 if (!cp->iscsi_tbl) 1078 goto error; 1079 1080 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1081 cp->max_cid_space, GFP_KERNEL); 1082 if (!cp->ctx_tbl) 1083 goto error; 1084 1085 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1086 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1087 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1088 } 1089 1090 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1091 PAGE_SIZE; 1092 1093 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1094 if (ret) 1095 return -ENOMEM; 1096 1097 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1098 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1099 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1100 1101 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1102 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1103 off; 1104 1105 if ((i % n) == (n - 1)) 1106 j++; 1107 } 1108 1109 ret = cnic_alloc_kcq(dev, &cp->kcq1); 1110 if (ret) 1111 goto error; 1112 1113 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * 1114 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; 1115 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); 1116 if (ret) 1117 goto error; 1118 1119 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1120 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1121 if (ret) 1122 goto error; 1123 1124 ret = cnic_alloc_bnx2x_context(dev); 1125 if (ret) 1126 goto error; 1127 1128 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1129 1130 memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x)); 1131 1132 cp->l2_rx_ring_size = 15; 1133 1134 ret = cnic_alloc_l2_rings(dev, 4); 1135 if (ret) 1136 goto error; 1137 1138 ret = cnic_alloc_uio(dev); 1139 if (ret) 1140 goto error; 1141 1142 return 0; 1143 1144error: 1145 cnic_free_resc(dev); 1146 return -ENOMEM; 1147} 1148 1149static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1150{ 1151 return cp->max_kwq_idx - 1152 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1153} 1154 1155static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1156 u32 num_wqes) 1157{ 1158 struct cnic_local *cp = dev->cnic_priv; 1159 struct kwqe *prod_qe; 1160 u16 prod, sw_prod, i; 1161 1162 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1163 return -EAGAIN; /* bnx2 is down */ 1164 1165 spin_lock_bh(&cp->cnic_ulp_lock); 1166 if (num_wqes > cnic_kwq_avail(cp) && 1167 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1168 spin_unlock_bh(&cp->cnic_ulp_lock); 1169 return -EAGAIN; 1170 } 1171 1172 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1173 1174 prod = cp->kwq_prod_idx; 1175 sw_prod = prod & MAX_KWQ_IDX; 1176 for (i = 0; i < num_wqes; i++) { 1177 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1178 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1179 prod++; 1180 sw_prod = prod & MAX_KWQ_IDX; 1181 } 1182 cp->kwq_prod_idx = prod; 1183 1184 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1185 1186 spin_unlock_bh(&cp->cnic_ulp_lock); 1187 return 0; 1188} 1189 1190static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1191 union l5cm_specific_data *l5_data) 1192{ 1193 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1194 dma_addr_t map; 1195 1196 map = ctx->kwqe_data_mapping; 1197 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1198 l5_data->phy_address.hi = (u64) map >> 32; 1199 return ctx->kwqe_data; 1200} 1201 1202static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1203 u32 type, union l5cm_specific_data *l5_data) 1204{ 1205 struct cnic_local *cp = dev->cnic_priv; 1206 struct l5cm_spe kwqe; 1207 struct kwqe_16 *kwq[1]; 1208 int ret; 1209 1210 kwqe.hdr.conn_and_cmd_data = 1211 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1212 BNX2X_HW_CID(cid, cp->func))); 1213 kwqe.hdr.type = cpu_to_le16(type); 1214 kwqe.hdr.reserved = 0; 1215 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1216 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1217 1218 kwq[0] = (struct kwqe_16 *) &kwqe; 1219 1220 spin_lock_bh(&cp->cnic_ulp_lock); 1221 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1222 spin_unlock_bh(&cp->cnic_ulp_lock); 1223 1224 if (ret == 1) 1225 return 0; 1226 1227 return -EBUSY; 1228} 1229 1230static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1231 struct kcqe *cqes[], u32 num_cqes) 1232{ 1233 struct cnic_local *cp = dev->cnic_priv; 1234 struct cnic_ulp_ops *ulp_ops; 1235 1236 rcu_read_lock(); 1237 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1238 if (likely(ulp_ops)) { 1239 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1240 cqes, num_cqes); 1241 } 1242 rcu_read_unlock(); 1243} 1244 1245static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1246{ 1247 struct cnic_local *cp = dev->cnic_priv; 1248 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1249 int func = cp->func, pages; 1250 int hq_bds; 1251 1252 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1253 cp->num_ccells = req1->num_ccells_per_conn; 1254 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1255 cp->num_iscsi_tasks; 1256 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1257 BNX2X_ISCSI_R2TQE_SIZE; 1258 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1259 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1260 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1261 cp->num_cqs = req1->num_cqs; 1262 1263 if (!dev->max_iscsi_conn) 1264 return 0; 1265 1266 /* init Tstorm RAM */ 1267 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func), 1268 req1->rq_num_wqes); 1269 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1270 PAGE_SIZE); 1271 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1272 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1273 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1274 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1275 req1->num_tasks_per_conn); 1276 1277 /* init Ustorm RAM */ 1278 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1279 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func), 1280 req1->rq_buffer_size); 1281 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1282 PAGE_SIZE); 1283 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1284 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1285 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1286 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1287 req1->num_tasks_per_conn); 1288 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func), 1289 req1->rq_num_wqes); 1290 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func), 1291 req1->cq_num_wqes); 1292 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func), 1293 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1294 1295 /* init Xstorm RAM */ 1296 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1297 PAGE_SIZE); 1298 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1299 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1300 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1301 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1302 req1->num_tasks_per_conn); 1303 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func), 1304 hq_bds); 1305 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func), 1306 req1->num_tasks_per_conn); 1307 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func), 1308 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1309 1310 /* init Cstorm RAM */ 1311 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func), 1312 PAGE_SIZE); 1313 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1314 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT); 1315 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1316 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func), 1317 req1->num_tasks_per_conn); 1318 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func), 1319 req1->cq_num_wqes); 1320 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func), 1321 hq_bds); 1322 1323 return 0; 1324} 1325 1326static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1327{ 1328 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1329 struct cnic_local *cp = dev->cnic_priv; 1330 int func = cp->func; 1331 struct iscsi_kcqe kcqe; 1332 struct kcqe *cqes[1]; 1333 1334 memset(&kcqe, 0, sizeof(kcqe)); 1335 if (!dev->max_iscsi_conn) { 1336 kcqe.completion_status = 1337 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1338 goto done; 1339 } 1340 1341 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1342 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); 1343 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1344 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, 1345 req2->error_bit_map[1]); 1346 1347 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1348 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); 1349 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1350 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]); 1351 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1352 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4, 1353 req2->error_bit_map[1]); 1354 1355 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1356 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn); 1357 1358 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1359 1360done: 1361 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1362 cqes[0] = (struct kcqe *) &kcqe; 1363 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1364 1365 return 0; 1366} 1367 1368static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1369{ 1370 struct cnic_local *cp = dev->cnic_priv; 1371 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1372 1373 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1374 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1375 1376 cnic_free_dma(dev, &iscsi->hq_info); 1377 cnic_free_dma(dev, &iscsi->r2tq_info); 1378 cnic_free_dma(dev, &iscsi->task_array_info); 1379 } 1380 cnic_free_id(&cp->cid_tbl, ctx->cid); 1381 ctx->cid = 0; 1382} 1383 1384static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1385{ 1386 u32 cid; 1387 int ret, pages; 1388 struct cnic_local *cp = dev->cnic_priv; 1389 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1390 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1391 1392 cid = cnic_alloc_new_id(&cp->cid_tbl); 1393 if (cid == -1) { 1394 ret = -ENOMEM; 1395 goto error; 1396 } 1397 1398 ctx->cid = cid; 1399 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1400 1401 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1402 if (ret) 1403 goto error; 1404 1405 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1406 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1407 if (ret) 1408 goto error; 1409 1410 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1411 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1412 if (ret) 1413 goto error; 1414 1415 return 0; 1416 1417error: 1418 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1419 return ret; 1420} 1421 1422static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1423 struct regpair *ctx_addr) 1424{ 1425 struct cnic_local *cp = dev->cnic_priv; 1426 struct cnic_eth_dev *ethdev = cp->ethdev; 1427 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1428 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1429 unsigned long align_off = 0; 1430 dma_addr_t ctx_map; 1431 void *ctx; 1432 1433 if (cp->ctx_align) { 1434 unsigned long mask = cp->ctx_align - 1; 1435 1436 if (cp->ctx_arr[blk].mapping & mask) 1437 align_off = cp->ctx_align - 1438 (cp->ctx_arr[blk].mapping & mask); 1439 } 1440 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1441 (off * BNX2X_CONTEXT_MEM_SIZE); 1442 ctx = cp->ctx_arr[blk].ctx + align_off + 1443 (off * BNX2X_CONTEXT_MEM_SIZE); 1444 if (init) 1445 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1446 1447 ctx_addr->lo = ctx_map & 0xffffffff; 1448 ctx_addr->hi = (u64) ctx_map >> 32; 1449 return ctx; 1450} 1451 1452static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1453 u32 num) 1454{ 1455 struct cnic_local *cp = dev->cnic_priv; 1456 struct iscsi_kwqe_conn_offload1 *req1 = 1457 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1458 struct iscsi_kwqe_conn_offload2 *req2 = 1459 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1460 struct iscsi_kwqe_conn_offload3 *req3; 1461 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1462 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1463 u32 cid = ctx->cid; 1464 u32 hw_cid = BNX2X_HW_CID(cid, cp->func); 1465 struct iscsi_context *ictx; 1466 struct regpair context_addr; 1467 int i, j, n = 2, n_max; 1468 1469 ctx->ctx_flags = 0; 1470 if (!req2->num_additional_wqes) 1471 return -EINVAL; 1472 1473 n_max = req2->num_additional_wqes + 2; 1474 1475 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1476 if (ictx == NULL) 1477 return -ENOMEM; 1478 1479 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1480 1481 ictx->xstorm_ag_context.hq_prod = 1; 1482 1483 ictx->xstorm_st_context.iscsi.first_burst_length = 1484 ISCSI_DEF_FIRST_BURST_LEN; 1485 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1486 ISCSI_DEF_MAX_RECV_SEG_LEN; 1487 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1488 req1->sq_page_table_addr_lo; 1489 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1490 req1->sq_page_table_addr_hi; 1491 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1492 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1493 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1494 iscsi->hq_info.pgtbl_map & 0xffffffff; 1495 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1496 (u64) iscsi->hq_info.pgtbl_map >> 32; 1497 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1498 iscsi->hq_info.pgtbl[0]; 1499 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1500 iscsi->hq_info.pgtbl[1]; 1501 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1502 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1503 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1504 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1505 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1506 iscsi->r2tq_info.pgtbl[0]; 1507 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1508 iscsi->r2tq_info.pgtbl[1]; 1509 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1510 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1511 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1512 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1513 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1514 BNX2X_ISCSI_PBL_NOT_CACHED; 1515 ictx->xstorm_st_context.iscsi.flags.flags |= 1516 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1517 ictx->xstorm_st_context.iscsi.flags.flags |= 1518 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1519 1520 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1521 /* TSTORM requires the base address of RQ DB & not PTE */ 1522 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1523 req2->rq_page_table_addr_lo & PAGE_MASK; 1524 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1525 req2->rq_page_table_addr_hi; 1526 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1527 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1528 ictx->tstorm_st_context.tcp.flags2 |= 1529 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1530 1531 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1532 1533 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1534 req2->rq_page_table_addr_lo; 1535 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1536 req2->rq_page_table_addr_hi; 1537 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1538 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1539 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1540 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1541 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1542 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1543 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1544 iscsi->r2tq_info.pgtbl[0]; 1545 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1546 iscsi->r2tq_info.pgtbl[1]; 1547 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1548 req1->cq_page_table_addr_lo; 1549 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1550 req1->cq_page_table_addr_hi; 1551 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1552 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1553 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1554 ictx->ustorm_st_context.task_pbe_cache_index = 1555 BNX2X_ISCSI_PBL_NOT_CACHED; 1556 ictx->ustorm_st_context.task_pdu_cache_index = 1557 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1558 1559 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1560 if (j == 3) { 1561 if (n >= n_max) 1562 break; 1563 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1564 j = 0; 1565 } 1566 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1567 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1568 req3->qp_first_pte[j].hi; 1569 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1570 req3->qp_first_pte[j].lo; 1571 } 1572 1573 ictx->ustorm_st_context.task_pbl_base.lo = 1574 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1575 ictx->ustorm_st_context.task_pbl_base.hi = 1576 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1577 ictx->ustorm_st_context.tce_phy_addr.lo = 1578 iscsi->task_array_info.pgtbl[0]; 1579 ictx->ustorm_st_context.tce_phy_addr.hi = 1580 iscsi->task_array_info.pgtbl[1]; 1581 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1582 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1583 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1584 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1585 ISCSI_DEF_MAX_BURST_LEN; 1586 ictx->ustorm_st_context.negotiated_rx |= 1587 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1588 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1589 1590 ictx->cstorm_st_context.hq_pbl_base.lo = 1591 iscsi->hq_info.pgtbl_map & 0xffffffff; 1592 ictx->cstorm_st_context.hq_pbl_base.hi = 1593 (u64) iscsi->hq_info.pgtbl_map >> 32; 1594 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1595 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1596 ictx->cstorm_st_context.task_pbl_base.lo = 1597 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1598 ictx->cstorm_st_context.task_pbl_base.hi = 1599 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1600 /* CSTORM and USTORM initialization is different, CSTORM requires 1601 * CQ DB base & not PTE addr */ 1602 ictx->cstorm_st_context.cq_db_base.lo = 1603 req1->cq_page_table_addr_lo & PAGE_MASK; 1604 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1605 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1606 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1607 for (i = 0; i < cp->num_cqs; i++) { 1608 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1609 ISCSI_INITIAL_SN; 1610 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1611 ISCSI_INITIAL_SN; 1612 } 1613 1614 ictx->xstorm_ag_context.cdu_reserved = 1615 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1616 ISCSI_CONNECTION_TYPE); 1617 ictx->ustorm_ag_context.cdu_usage = 1618 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1619 ISCSI_CONNECTION_TYPE); 1620 return 0; 1621 1622} 1623 1624static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1625 u32 num, int *work) 1626{ 1627 struct iscsi_kwqe_conn_offload1 *req1; 1628 struct iscsi_kwqe_conn_offload2 *req2; 1629 struct cnic_local *cp = dev->cnic_priv; 1630 struct iscsi_kcqe kcqe; 1631 struct kcqe *cqes[1]; 1632 u32 l5_cid; 1633 int ret; 1634 1635 if (num < 2) { 1636 *work = num; 1637 return -EINVAL; 1638 } 1639 1640 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1641 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1642 if ((num - 2) < req2->num_additional_wqes) { 1643 *work = num; 1644 return -EINVAL; 1645 } 1646 *work = 2 + req2->num_additional_wqes;; 1647 1648 l5_cid = req1->iscsi_conn_id; 1649 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1650 return -EINVAL; 1651 1652 memset(&kcqe, 0, sizeof(kcqe)); 1653 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1654 kcqe.iscsi_conn_id = l5_cid; 1655 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1656 1657 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1658 atomic_dec(&cp->iscsi_conn); 1659 ret = 0; 1660 goto done; 1661 } 1662 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1663 if (ret) { 1664 atomic_dec(&cp->iscsi_conn); 1665 ret = 0; 1666 goto done; 1667 } 1668 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1669 if (ret < 0) { 1670 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1671 atomic_dec(&cp->iscsi_conn); 1672 goto done; 1673 } 1674 1675 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1676 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid, 1677 cp->func); 1678 1679done: 1680 cqes[0] = (struct kcqe *) &kcqe; 1681 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1682 return ret; 1683} 1684 1685 1686static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1687{ 1688 struct cnic_local *cp = dev->cnic_priv; 1689 struct iscsi_kwqe_conn_update *req = 1690 (struct iscsi_kwqe_conn_update *) kwqe; 1691 void *data; 1692 union l5cm_specific_data l5_data; 1693 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1694 int ret; 1695 1696 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1697 return -EINVAL; 1698 1699 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1700 if (!data) 1701 return -ENOMEM; 1702 1703 memcpy(data, kwqe, sizeof(struct kwqe)); 1704 1705 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1706 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1707 return ret; 1708} 1709 1710static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1711{ 1712 struct cnic_local *cp = dev->cnic_priv; 1713 struct iscsi_kwqe_conn_destroy *req = 1714 (struct iscsi_kwqe_conn_destroy *) kwqe; 1715 union l5cm_specific_data l5_data; 1716 u32 l5_cid = req->reserved0; 1717 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1718 int ret = 0; 1719 struct iscsi_kcqe kcqe; 1720 struct kcqe *cqes[1]; 1721 1722 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START)) 1723 goto skip_cfc_delete; 1724 1725 while (!time_after(jiffies, ctx->timestamp + (2 * HZ))) 1726 msleep(250); 1727 1728 init_waitqueue_head(&ctx->waitq); 1729 ctx->wait_cond = 0; 1730 memset(&l5_data, 0, sizeof(l5_data)); 1731 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 1732 req->context_id, 1733 ETH_CONNECTION_TYPE | 1734 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), 1735 &l5_data); 1736 if (ret == 0) 1737 wait_event(ctx->waitq, ctx->wait_cond); 1738 1739skip_cfc_delete: 1740 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1741 1742 atomic_dec(&cp->iscsi_conn); 1743 1744 memset(&kcqe, 0, sizeof(kcqe)); 1745 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 1746 kcqe.iscsi_conn_id = l5_cid; 1747 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1748 kcqe.iscsi_conn_context_id = req->context_id; 1749 1750 cqes[0] = (struct kcqe *) &kcqe; 1751 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1752 1753 return ret; 1754} 1755 1756static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 1757 struct l4_kwq_connect_req1 *kwqe1, 1758 struct l4_kwq_connect_req3 *kwqe3, 1759 struct l5cm_active_conn_buffer *conn_buf) 1760{ 1761 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 1762 struct l5cm_xstorm_conn_buffer *xstorm_buf = 1763 &conn_buf->xstorm_conn_buffer; 1764 struct l5cm_tstorm_conn_buffer *tstorm_buf = 1765 &conn_buf->tstorm_conn_buffer; 1766 struct regpair context_addr; 1767 u32 cid = BNX2X_SW_CID(kwqe1->cid); 1768 struct in6_addr src_ip, dst_ip; 1769 int i; 1770 u32 *addrp; 1771 1772 addrp = (u32 *) &conn_addr->local_ip_addr; 1773 for (i = 0; i < 4; i++, addrp++) 1774 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1775 1776 addrp = (u32 *) &conn_addr->remote_ip_addr; 1777 for (i = 0; i < 4; i++, addrp++) 1778 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 1779 1780 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 1781 1782 xstorm_buf->context_addr.hi = context_addr.hi; 1783 xstorm_buf->context_addr.lo = context_addr.lo; 1784 xstorm_buf->mss = 0xffff; 1785 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 1786 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 1787 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 1788 xstorm_buf->pseudo_header_checksum = 1789 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 1790 1791 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK)) 1792 tstorm_buf->params |= 1793 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE; 1794 if (kwqe3->ka_timeout) { 1795 tstorm_buf->ka_enable = 1; 1796 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 1797 tstorm_buf->ka_interval = kwqe3->ka_interval; 1798 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 1799 } 1800 tstorm_buf->rcv_buf = kwqe3->rcv_buf; 1801 tstorm_buf->snd_buf = kwqe3->snd_buf; 1802 tstorm_buf->max_rt_time = 0xffffffff; 1803} 1804 1805static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 1806{ 1807 struct cnic_local *cp = dev->cnic_priv; 1808 int func = CNIC_FUNC(cp); 1809 u8 *mac = dev->mac_addr; 1810 1811 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1812 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]); 1813 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1814 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]); 1815 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1816 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]); 1817 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1818 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]); 1819 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1820 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]); 1821 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1822 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]); 1823 1824 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1825 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]); 1826 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1827 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, 1828 mac[4]); 1829 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1830 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]); 1831 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1832 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1, 1833 mac[2]); 1834 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1835 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2, 1836 mac[1]); 1837 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1838 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3, 1839 mac[0]); 1840} 1841 1842static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 1843{ 1844 struct cnic_local *cp = dev->cnic_priv; 1845 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 1846 u16 tstorm_flags = 0; 1847 1848 if (tcp_ts) { 1849 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1850 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1851 } 1852 1853 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1854 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags); 1855 1856 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1857 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags); 1858} 1859 1860static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 1861 u32 num, int *work) 1862{ 1863 struct cnic_local *cp = dev->cnic_priv; 1864 struct l4_kwq_connect_req1 *kwqe1 = 1865 (struct l4_kwq_connect_req1 *) wqes[0]; 1866 struct l4_kwq_connect_req3 *kwqe3; 1867 struct l5cm_active_conn_buffer *conn_buf; 1868 struct l5cm_conn_addr_params *conn_addr; 1869 union l5cm_specific_data l5_data; 1870 u32 l5_cid = kwqe1->pg_cid; 1871 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 1872 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1873 int ret; 1874 1875 if (num < 2) { 1876 *work = num; 1877 return -EINVAL; 1878 } 1879 1880 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 1881 *work = 3; 1882 else 1883 *work = 2; 1884 1885 if (num < *work) { 1886 *work = num; 1887 return -EINVAL; 1888 } 1889 1890 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 1891 netdev_err(dev->netdev, "conn_buf size too big\n"); 1892 return -ENOMEM; 1893 } 1894 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1895 if (!conn_buf) 1896 return -ENOMEM; 1897 1898 memset(conn_buf, 0, sizeof(*conn_buf)); 1899 1900 conn_addr = &conn_buf->conn_addr_buf; 1901 conn_addr->remote_addr_0 = csk->ha[0]; 1902 conn_addr->remote_addr_1 = csk->ha[1]; 1903 conn_addr->remote_addr_2 = csk->ha[2]; 1904 conn_addr->remote_addr_3 = csk->ha[3]; 1905 conn_addr->remote_addr_4 = csk->ha[4]; 1906 conn_addr->remote_addr_5 = csk->ha[5]; 1907 1908 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 1909 struct l4_kwq_connect_req2 *kwqe2 = 1910 (struct l4_kwq_connect_req2 *) wqes[1]; 1911 1912 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 1913 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 1914 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 1915 1916 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 1917 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 1918 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 1919 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 1920 } 1921 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 1922 1923 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 1924 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 1925 conn_addr->local_tcp_port = kwqe1->src_port; 1926 conn_addr->remote_tcp_port = kwqe1->dst_port; 1927 1928 conn_addr->pmtu = kwqe3->pmtu; 1929 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 1930 1931 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1932 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id); 1933 1934 cnic_bnx2x_set_tcp_timestamp(dev, 1935 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP); 1936 1937 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 1938 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 1939 if (!ret) 1940 ctx->ctx_flags |= CTX_FL_OFFLD_START; 1941 1942 return ret; 1943} 1944 1945static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 1946{ 1947 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 1948 union l5cm_specific_data l5_data; 1949 int ret; 1950 1951 memset(&l5_data, 0, sizeof(l5_data)); 1952 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 1953 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 1954 return ret; 1955} 1956 1957static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 1958{ 1959 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 1960 union l5cm_specific_data l5_data; 1961 int ret; 1962 1963 memset(&l5_data, 0, sizeof(l5_data)); 1964 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 1965 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 1966 return ret; 1967} 1968static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 1969{ 1970 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 1971 struct l4_kcq kcqe; 1972 struct kcqe *cqes[1]; 1973 1974 memset(&kcqe, 0, sizeof(kcqe)); 1975 kcqe.pg_host_opaque = req->host_opaque; 1976 kcqe.pg_cid = req->host_opaque; 1977 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 1978 cqes[0] = (struct kcqe *) &kcqe; 1979 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 1980 return 0; 1981} 1982 1983static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 1984{ 1985 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 1986 struct l4_kcq kcqe; 1987 struct kcqe *cqes[1]; 1988 1989 memset(&kcqe, 0, sizeof(kcqe)); 1990 kcqe.pg_host_opaque = req->pg_host_opaque; 1991 kcqe.pg_cid = req->pg_cid; 1992 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 1993 cqes[0] = (struct kcqe *) &kcqe; 1994 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 1995 return 0; 1996} 1997 1998static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1999 u32 num_wqes) 2000{ 2001 int i, work, ret; 2002 u32 opcode; 2003 struct kwqe *kwqe; 2004 2005 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2006 return -EAGAIN; /* bnx2 is down */ 2007 2008 for (i = 0; i < num_wqes; ) { 2009 kwqe = wqes[i]; 2010 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2011 work = 1; 2012 2013 switch (opcode) { 2014 case ISCSI_KWQE_OPCODE_INIT1: 2015 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 2016 break; 2017 case ISCSI_KWQE_OPCODE_INIT2: 2018 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 2019 break; 2020 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2021 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2022 num_wqes - i, &work); 2023 break; 2024 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2025 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2026 break; 2027 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2028 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2029 break; 2030 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2031 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2032 &work); 2033 break; 2034 case L4_KWQE_OPCODE_VALUE_CLOSE: 2035 ret = cnic_bnx2x_close(dev, kwqe); 2036 break; 2037 case L4_KWQE_OPCODE_VALUE_RESET: 2038 ret = cnic_bnx2x_reset(dev, kwqe); 2039 break; 2040 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2041 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2042 break; 2043 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2044 ret = cnic_bnx2x_update_pg(dev, kwqe); 2045 break; 2046 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2047 ret = 0; 2048 break; 2049 default: 2050 ret = 0; 2051 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2052 opcode); 2053 break; 2054 } 2055 if (ret < 0) 2056 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2057 opcode); 2058 i += work; 2059 } 2060 return 0; 2061} 2062 2063static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2064{ 2065 struct cnic_local *cp = dev->cnic_priv; 2066 int i, j; 2067 2068 i = 0; 2069 j = 1; 2070 while (num_cqes) { 2071 struct cnic_ulp_ops *ulp_ops; 2072 int ulp_type; 2073 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2074 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; 2075 2076 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2077 cnic_kwq_completion(dev, 1); 2078 2079 while (j < num_cqes) { 2080 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2081 2082 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) 2083 break; 2084 2085 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2086 cnic_kwq_completion(dev, 1); 2087 j++; 2088 } 2089 2090 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2091 ulp_type = CNIC_ULP_RDMA; 2092 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2093 ulp_type = CNIC_ULP_ISCSI; 2094 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2095 ulp_type = CNIC_ULP_L4; 2096 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2097 goto end; 2098 else { 2099 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2100 kcqe_op_flag); 2101 goto end; 2102 } 2103 2104 rcu_read_lock(); 2105 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2106 if (likely(ulp_ops)) { 2107 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2108 cp->completed_kcq + i, j); 2109 } 2110 rcu_read_unlock(); 2111end: 2112 num_cqes -= j; 2113 i += j; 2114 j = 1; 2115 } 2116} 2117 2118static u16 cnic_bnx2_next_idx(u16 idx) 2119{ 2120 return idx + 1; 2121} 2122 2123static u16 cnic_bnx2_hw_idx(u16 idx) 2124{ 2125 return idx; 2126} 2127 2128static u16 cnic_bnx2x_next_idx(u16 idx) 2129{ 2130 idx++; 2131 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2132 idx++; 2133 2134 return idx; 2135} 2136 2137static u16 cnic_bnx2x_hw_idx(u16 idx) 2138{ 2139 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 2140 idx++; 2141 return idx; 2142} 2143 2144static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) 2145{ 2146 struct cnic_local *cp = dev->cnic_priv; 2147 u16 i, ri, hw_prod, last; 2148 struct kcqe *kcqe; 2149 int kcqe_cnt = 0, last_cnt = 0; 2150 2151 i = ri = last = info->sw_prod_idx; 2152 ri &= MAX_KCQ_IDX; 2153 hw_prod = *info->hw_prod_idx_ptr; 2154 hw_prod = cp->hw_idx(hw_prod); 2155 2156 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2157 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2158 cp->completed_kcq[kcqe_cnt++] = kcqe; 2159 i = cp->next_idx(i); 2160 ri = i & MAX_KCQ_IDX; 2161 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2162 last_cnt = kcqe_cnt; 2163 last = i; 2164 } 2165 } 2166 2167 info->sw_prod_idx = last; 2168 return last_cnt; 2169} 2170 2171static int cnic_l2_completion(struct cnic_local *cp) 2172{ 2173 u16 hw_cons, sw_cons; 2174 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2175 (cp->l2_ring + (2 * BCM_PAGE_SIZE)); 2176 u32 cmd; 2177 int comp = 0; 2178 2179 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2180 return 0; 2181 2182 hw_cons = *cp->rx_cons_ptr; 2183 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2184 hw_cons++; 2185 2186 sw_cons = cp->rx_cons; 2187 while (sw_cons != hw_cons) { 2188 u8 cqe_fp_flags; 2189 2190 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2191 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2192 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2193 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2194 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2195 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2196 cmd == RAMROD_CMD_ID_ETH_HALT) 2197 comp++; 2198 } 2199 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2200 } 2201 return comp; 2202} 2203 2204static void cnic_chk_pkt_rings(struct cnic_local *cp) 2205{ 2206 u16 rx_cons = *cp->rx_cons_ptr; 2207 u16 tx_cons = *cp->tx_cons_ptr; 2208 int comp = 0; 2209 2210 if (!test_bit(CNIC_F_CNIC_UP, &cp->dev->flags)) 2211 return; 2212 2213 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2214 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2215 comp = cnic_l2_completion(cp); 2216 2217 cp->tx_cons = tx_cons; 2218 cp->rx_cons = rx_cons; 2219 2220 uio_event_notify(cp->cnic_uinfo); 2221 } 2222 if (comp) 2223 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2224} 2225 2226static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) 2227{ 2228 struct cnic_local *cp = dev->cnic_priv; 2229 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2230 int kcqe_cnt; 2231 2232 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2233 2234 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2235 2236 service_kcqes(dev, kcqe_cnt); 2237 2238 /* Tell compiler that status_blk fields can change. */ 2239 barrier(); 2240 if (status_idx != *cp->kcq1.status_idx_ptr) { 2241 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2242 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2243 } else 2244 break; 2245 } 2246 2247 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); 2248 2249 cnic_chk_pkt_rings(cp); 2250 2251 return status_idx; 2252} 2253 2254static int cnic_service_bnx2(void *data, void *status_blk) 2255{ 2256 struct cnic_dev *dev = data; 2257 struct cnic_local *cp = dev->cnic_priv; 2258 u32 status_idx = *cp->kcq1.status_idx_ptr; 2259 2260 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2261 return status_idx; 2262 2263 return cnic_service_bnx2_queues(dev); 2264} 2265 2266static void cnic_service_bnx2_msix(unsigned long data) 2267{ 2268 struct cnic_dev *dev = (struct cnic_dev *) data; 2269 struct cnic_local *cp = dev->cnic_priv; 2270 2271 cp->last_status_idx = cnic_service_bnx2_queues(dev); 2272 2273 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2274 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 2275} 2276 2277static void cnic_doirq(struct cnic_dev *dev) 2278{ 2279 struct cnic_local *cp = dev->cnic_priv; 2280 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; 2281 2282 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2283 prefetch(cp->status_blk.gen); 2284 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2285 2286 tasklet_schedule(&cp->cnic_irq_task); 2287 } 2288} 2289 2290static irqreturn_t cnic_irq(int irq, void *dev_instance) 2291{ 2292 struct cnic_dev *dev = dev_instance; 2293 struct cnic_local *cp = dev->cnic_priv; 2294 2295 if (cp->ack_int) 2296 cp->ack_int(dev); 2297 2298 cnic_doirq(dev); 2299 2300 return IRQ_HANDLED; 2301} 2302 2303static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 2304 u16 index, u8 op, u8 update) 2305{ 2306 struct cnic_local *cp = dev->cnic_priv; 2307 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 + 2308 COMMAND_REG_INT_ACK); 2309 struct igu_ack_register igu_ack; 2310 2311 igu_ack.status_block_index = index; 2312 igu_ack.sb_id_and_flags = 2313 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 2314 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 2315 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 2316 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 2317 2318 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 2319} 2320 2321static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 2322{ 2323 struct cnic_local *cp = dev->cnic_priv; 2324 2325 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0, 2326 IGU_INT_DISABLE, 0); 2327} 2328 2329static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 2330{ 2331 u32 last_status = *info->status_idx_ptr; 2332 int kcqe_cnt; 2333 2334 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2335 2336 service_kcqes(dev, kcqe_cnt); 2337 2338 /* Tell compiler that sblk fields can change. */ 2339 barrier(); 2340 if (last_status == *info->status_idx_ptr) 2341 break; 2342 2343 last_status = *info->status_idx_ptr; 2344 } 2345 return last_status; 2346} 2347 2348static void cnic_service_bnx2x_bh(unsigned long data) 2349{ 2350 struct cnic_dev *dev = (struct cnic_dev *) data; 2351 struct cnic_local *cp = dev->cnic_priv; 2352 u32 status_idx; 2353 2354 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2355 return; 2356 2357 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2358 2359 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2360 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 2361 status_idx, IGU_INT_ENABLE, 1); 2362} 2363 2364static int cnic_service_bnx2x(void *data, void *status_blk) 2365{ 2366 struct cnic_dev *dev = data; 2367 struct cnic_local *cp = dev->cnic_priv; 2368 2369 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 2370 cnic_doirq(dev); 2371 2372 cnic_chk_pkt_rings(cp); 2373 2374 return 0; 2375} 2376 2377static void cnic_ulp_stop(struct cnic_dev *dev) 2378{ 2379 struct cnic_local *cp = dev->cnic_priv; 2380 int if_type; 2381 2382 if (cp->cnic_uinfo) 2383 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 2384 2385 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2386 struct cnic_ulp_ops *ulp_ops; 2387 2388 mutex_lock(&cnic_lock); 2389 ulp_ops = cp->ulp_ops[if_type]; 2390 if (!ulp_ops) { 2391 mutex_unlock(&cnic_lock); 2392 continue; 2393 } 2394 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2395 mutex_unlock(&cnic_lock); 2396 2397 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2398 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 2399 2400 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2401 } 2402} 2403 2404static void cnic_ulp_start(struct cnic_dev *dev) 2405{ 2406 struct cnic_local *cp = dev->cnic_priv; 2407 int if_type; 2408 2409 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 2410 struct cnic_ulp_ops *ulp_ops; 2411 2412 mutex_lock(&cnic_lock); 2413 ulp_ops = cp->ulp_ops[if_type]; 2414 if (!ulp_ops || !ulp_ops->cnic_start) { 2415 mutex_unlock(&cnic_lock); 2416 continue; 2417 } 2418 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2419 mutex_unlock(&cnic_lock); 2420 2421 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 2422 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 2423 2424 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 2425 } 2426} 2427 2428static int cnic_ctl(void *data, struct cnic_ctl_info *info) 2429{ 2430 struct cnic_dev *dev = data; 2431 2432 switch (info->cmd) { 2433 case CNIC_CTL_STOP_CMD: 2434 cnic_hold(dev); 2435 2436 cnic_ulp_stop(dev); 2437 cnic_stop_hw(dev); 2438 2439 cnic_put(dev); 2440 break; 2441 case CNIC_CTL_START_CMD: 2442 cnic_hold(dev); 2443 2444 if (!cnic_start_hw(dev)) 2445 cnic_ulp_start(dev); 2446 2447 cnic_put(dev); 2448 break; 2449 case CNIC_CTL_COMPLETION_CMD: { 2450 u32 cid = BNX2X_SW_CID(info->data.comp.cid); 2451 u32 l5_cid; 2452 struct cnic_local *cp = dev->cnic_priv; 2453 2454 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 2455 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2456 2457 ctx->wait_cond = 1; 2458 wake_up(&ctx->waitq); 2459 } 2460 break; 2461 } 2462 default: 2463 return -EINVAL; 2464 } 2465 return 0; 2466} 2467 2468static void cnic_ulp_init(struct cnic_dev *dev) 2469{ 2470 int i; 2471 struct cnic_local *cp = dev->cnic_priv; 2472 2473 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 2474 struct cnic_ulp_ops *ulp_ops; 2475 2476 mutex_lock(&cnic_lock); 2477 ulp_ops = cnic_ulp_tbl[i]; 2478 if (!ulp_ops || !ulp_ops->cnic_init) { 2479 mutex_unlock(&cnic_lock); 2480 continue; 2481 } 2482 ulp_get(ulp_ops); 2483 mutex_unlock(&cnic_lock); 2484 2485 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 2486 ulp_ops->cnic_init(dev); 2487 2488 ulp_put(ulp_ops); 2489 } 2490} 2491 2492static void cnic_ulp_exit(struct cnic_dev *dev) 2493{ 2494 int i; 2495 struct cnic_local *cp = dev->cnic_priv; 2496 2497 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 2498 struct cnic_ulp_ops *ulp_ops; 2499 2500 mutex_lock(&cnic_lock); 2501 ulp_ops = cnic_ulp_tbl[i]; 2502 if (!ulp_ops || !ulp_ops->cnic_exit) { 2503 mutex_unlock(&cnic_lock); 2504 continue; 2505 } 2506 ulp_get(ulp_ops); 2507 mutex_unlock(&cnic_lock); 2508 2509 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 2510 ulp_ops->cnic_exit(dev); 2511 2512 ulp_put(ulp_ops); 2513 } 2514} 2515 2516static int cnic_cm_offload_pg(struct cnic_sock *csk) 2517{ 2518 struct cnic_dev *dev = csk->dev; 2519 struct l4_kwq_offload_pg *l4kwqe; 2520 struct kwqe *wqes[1]; 2521 2522 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 2523 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2524 wqes[0] = (struct kwqe *) l4kwqe; 2525 2526 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 2527 l4kwqe->flags = 2528 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 2529 l4kwqe->l2hdr_nbytes = ETH_HLEN; 2530 2531 l4kwqe->da0 = csk->ha[0]; 2532 l4kwqe->da1 = csk->ha[1]; 2533 l4kwqe->da2 = csk->ha[2]; 2534 l4kwqe->da3 = csk->ha[3]; 2535 l4kwqe->da4 = csk->ha[4]; 2536 l4kwqe->da5 = csk->ha[5]; 2537 2538 l4kwqe->sa0 = dev->mac_addr[0]; 2539 l4kwqe->sa1 = dev->mac_addr[1]; 2540 l4kwqe->sa2 = dev->mac_addr[2]; 2541 l4kwqe->sa3 = dev->mac_addr[3]; 2542 l4kwqe->sa4 = dev->mac_addr[4]; 2543 l4kwqe->sa5 = dev->mac_addr[5]; 2544 2545 l4kwqe->etype = ETH_P_IP; 2546 l4kwqe->ipid_start = DEF_IPID_START; 2547 l4kwqe->host_opaque = csk->l5_cid; 2548 2549 if (csk->vlan_id) { 2550 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 2551 l4kwqe->vlan_tag = csk->vlan_id; 2552 l4kwqe->l2hdr_nbytes += 4; 2553 } 2554 2555 return dev->submit_kwqes(dev, wqes, 1); 2556} 2557 2558static int cnic_cm_update_pg(struct cnic_sock *csk) 2559{ 2560 struct cnic_dev *dev = csk->dev; 2561 struct l4_kwq_update_pg *l4kwqe; 2562 struct kwqe *wqes[1]; 2563 2564 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 2565 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2566 wqes[0] = (struct kwqe *) l4kwqe; 2567 2568 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 2569 l4kwqe->flags = 2570 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 2571 l4kwqe->pg_cid = csk->pg_cid; 2572 2573 l4kwqe->da0 = csk->ha[0]; 2574 l4kwqe->da1 = csk->ha[1]; 2575 l4kwqe->da2 = csk->ha[2]; 2576 l4kwqe->da3 = csk->ha[3]; 2577 l4kwqe->da4 = csk->ha[4]; 2578 l4kwqe->da5 = csk->ha[5]; 2579 2580 l4kwqe->pg_host_opaque = csk->l5_cid; 2581 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 2582 2583 return dev->submit_kwqes(dev, wqes, 1); 2584} 2585 2586static int cnic_cm_upload_pg(struct cnic_sock *csk) 2587{ 2588 struct cnic_dev *dev = csk->dev; 2589 struct l4_kwq_upload *l4kwqe; 2590 struct kwqe *wqes[1]; 2591 2592 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 2593 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2594 wqes[0] = (struct kwqe *) l4kwqe; 2595 2596 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 2597 l4kwqe->flags = 2598 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 2599 l4kwqe->cid = csk->pg_cid; 2600 2601 return dev->submit_kwqes(dev, wqes, 1); 2602} 2603 2604static int cnic_cm_conn_req(struct cnic_sock *csk) 2605{ 2606 struct cnic_dev *dev = csk->dev; 2607 struct l4_kwq_connect_req1 *l4kwqe1; 2608 struct l4_kwq_connect_req2 *l4kwqe2; 2609 struct l4_kwq_connect_req3 *l4kwqe3; 2610 struct kwqe *wqes[3]; 2611 u8 tcp_flags = 0; 2612 int num_wqes = 2; 2613 2614 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 2615 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 2616 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 2617 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 2618 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 2619 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 2620 2621 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 2622 l4kwqe3->flags = 2623 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 2624 l4kwqe3->ka_timeout = csk->ka_timeout; 2625 l4kwqe3->ka_interval = csk->ka_interval; 2626 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 2627 l4kwqe3->tos = csk->tos; 2628 l4kwqe3->ttl = csk->ttl; 2629 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 2630 l4kwqe3->pmtu = csk->mtu; 2631 l4kwqe3->rcv_buf = csk->rcv_buf; 2632 l4kwqe3->snd_buf = csk->snd_buf; 2633 l4kwqe3->seed = csk->seed; 2634 2635 wqes[0] = (struct kwqe *) l4kwqe1; 2636 if (test_bit(SK_F_IPV6, &csk->flags)) { 2637 wqes[1] = (struct kwqe *) l4kwqe2; 2638 wqes[2] = (struct kwqe *) l4kwqe3; 2639 num_wqes = 3; 2640 2641 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 2642 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 2643 l4kwqe2->flags = 2644 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 2645 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 2646 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 2647 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 2648 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 2649 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 2650 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 2651 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 2652 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 2653 sizeof(struct tcphdr); 2654 } else { 2655 wqes[1] = (struct kwqe *) l4kwqe3; 2656 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 2657 sizeof(struct tcphdr); 2658 } 2659 2660 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 2661 l4kwqe1->flags = 2662 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 2663 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 2664 l4kwqe1->cid = csk->cid; 2665 l4kwqe1->pg_cid = csk->pg_cid; 2666 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 2667 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 2668 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 2669 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 2670 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 2671 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 2672 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 2673 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 2674 if (csk->tcp_flags & SK_TCP_NAGLE) 2675 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 2676 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 2677 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 2678 if (csk->tcp_flags & SK_TCP_SACK) 2679 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 2680 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 2681 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 2682 2683 l4kwqe1->tcp_flags = tcp_flags; 2684 2685 return dev->submit_kwqes(dev, wqes, num_wqes); 2686} 2687 2688static int cnic_cm_close_req(struct cnic_sock *csk) 2689{ 2690 struct cnic_dev *dev = csk->dev; 2691 struct l4_kwq_close_req *l4kwqe; 2692 struct kwqe *wqes[1]; 2693 2694 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 2695 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2696 wqes[0] = (struct kwqe *) l4kwqe; 2697 2698 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 2699 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 2700 l4kwqe->cid = csk->cid; 2701 2702 return dev->submit_kwqes(dev, wqes, 1); 2703} 2704 2705static int cnic_cm_abort_req(struct cnic_sock *csk) 2706{ 2707 struct cnic_dev *dev = csk->dev; 2708 struct l4_kwq_reset_req *l4kwqe; 2709 struct kwqe *wqes[1]; 2710 2711 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 2712 memset(l4kwqe, 0, sizeof(*l4kwqe)); 2713 wqes[0] = (struct kwqe *) l4kwqe; 2714 2715 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 2716 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 2717 l4kwqe->cid = csk->cid; 2718 2719 return dev->submit_kwqes(dev, wqes, 1); 2720} 2721 2722static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 2723 u32 l5_cid, struct cnic_sock **csk, void *context) 2724{ 2725 struct cnic_local *cp = dev->cnic_priv; 2726 struct cnic_sock *csk1; 2727 2728 if (l5_cid >= MAX_CM_SK_TBL_SZ) 2729 return -EINVAL; 2730 2731 csk1 = &cp->csk_tbl[l5_cid]; 2732 if (atomic_read(&csk1->ref_count)) 2733 return -EAGAIN; 2734 2735 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 2736 return -EBUSY; 2737 2738 csk1->dev = dev; 2739 csk1->cid = cid; 2740 csk1->l5_cid = l5_cid; 2741 csk1->ulp_type = ulp_type; 2742 csk1->context = context; 2743 2744 csk1->ka_timeout = DEF_KA_TIMEOUT; 2745 csk1->ka_interval = DEF_KA_INTERVAL; 2746 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 2747 csk1->tos = DEF_TOS; 2748 csk1->ttl = DEF_TTL; 2749 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 2750 csk1->rcv_buf = DEF_RCV_BUF; 2751 csk1->snd_buf = DEF_SND_BUF; 2752 csk1->seed = DEF_SEED; 2753 2754 *csk = csk1; 2755 return 0; 2756} 2757 2758static void cnic_cm_cleanup(struct cnic_sock *csk) 2759{ 2760 if (csk->src_port) { 2761 struct cnic_dev *dev = csk->dev; 2762 struct cnic_local *cp = dev->cnic_priv; 2763 2764 cnic_free_id(&cp->csk_port_tbl, csk->src_port); 2765 csk->src_port = 0; 2766 } 2767} 2768 2769static void cnic_close_conn(struct cnic_sock *csk) 2770{ 2771 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 2772 cnic_cm_upload_pg(csk); 2773 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 2774 } 2775 cnic_cm_cleanup(csk); 2776} 2777 2778static int cnic_cm_destroy(struct cnic_sock *csk) 2779{ 2780 if (!cnic_in_use(csk)) 2781 return -EINVAL; 2782 2783 csk_hold(csk); 2784 clear_bit(SK_F_INUSE, &csk->flags); 2785 smp_mb__after_clear_bit(); 2786 while (atomic_read(&csk->ref_count) != 1) 2787 msleep(1); 2788 cnic_cm_cleanup(csk); 2789 2790 csk->flags = 0; 2791 csk_put(csk); 2792 return 0; 2793} 2794 2795static inline u16 cnic_get_vlan(struct net_device *dev, 2796 struct net_device **vlan_dev) 2797{ 2798 if (dev->priv_flags & IFF_802_1Q_VLAN) { 2799 *vlan_dev = vlan_dev_real_dev(dev); 2800 return vlan_dev_vlan_id(dev); 2801 } 2802 *vlan_dev = dev; 2803 return 0; 2804} 2805 2806static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 2807 struct dst_entry **dst) 2808{ 2809#if defined(CONFIG_INET) 2810 struct flowi fl; 2811 int err; 2812 struct rtable *rt; 2813 2814 memset(&fl, 0, sizeof(fl)); 2815 fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; 2816 2817 err = ip_route_output_key(&init_net, &rt, &fl); 2818 if (!err) 2819 *dst = &rt->dst; 2820 return err; 2821#else 2822 return -ENETUNREACH; 2823#endif 2824} 2825 2826static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 2827 struct dst_entry **dst) 2828{ 2829#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 2830 struct flowi fl; 2831 2832 memset(&fl, 0, sizeof(fl)); 2833 ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); 2834 if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) 2835 fl.oif = dst_addr->sin6_scope_id; 2836 2837 *dst = ip6_route_output(&init_net, NULL, &fl); 2838 if (*dst) 2839 return 0; 2840#endif 2841 2842 return -ENETUNREACH; 2843} 2844 2845static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 2846 int ulp_type) 2847{ 2848 struct cnic_dev *dev = NULL; 2849 struct dst_entry *dst; 2850 struct net_device *netdev = NULL; 2851 int err = -ENETUNREACH; 2852 2853 if (dst_addr->sin_family == AF_INET) 2854 err = cnic_get_v4_route(dst_addr, &dst); 2855 else if (dst_addr->sin_family == AF_INET6) { 2856 struct sockaddr_in6 *dst_addr6 = 2857 (struct sockaddr_in6 *) dst_addr; 2858 2859 err = cnic_get_v6_route(dst_addr6, &dst); 2860 } else 2861 return NULL; 2862 2863 if (err) 2864 return NULL; 2865 2866 if (!dst->dev) 2867 goto done; 2868 2869 cnic_get_vlan(dst->dev, &netdev); 2870 2871 dev = cnic_from_netdev(netdev); 2872 2873done: 2874 dst_release(dst); 2875 if (dev) 2876 cnic_put(dev); 2877 return dev; 2878} 2879 2880static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 2881{ 2882 struct cnic_dev *dev = csk->dev; 2883 struct cnic_local *cp = dev->cnic_priv; 2884 2885 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 2886} 2887 2888static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 2889{ 2890 struct cnic_dev *dev = csk->dev; 2891 struct cnic_local *cp = dev->cnic_priv; 2892 int is_v6, rc = 0; 2893 struct dst_entry *dst = NULL; 2894 struct net_device *realdev; 2895 u32 local_port; 2896 2897 if (saddr->local.v6.sin6_family == AF_INET6 && 2898 saddr->remote.v6.sin6_family == AF_INET6) 2899 is_v6 = 1; 2900 else if (saddr->local.v4.sin_family == AF_INET && 2901 saddr->remote.v4.sin_family == AF_INET) 2902 is_v6 = 0; 2903 else 2904 return -EINVAL; 2905 2906 clear_bit(SK_F_IPV6, &csk->flags); 2907 2908 if (is_v6) { 2909 set_bit(SK_F_IPV6, &csk->flags); 2910 cnic_get_v6_route(&saddr->remote.v6, &dst); 2911 2912 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 2913 sizeof(struct in6_addr)); 2914 csk->dst_port = saddr->remote.v6.sin6_port; 2915 local_port = saddr->local.v6.sin6_port; 2916 2917 } else { 2918 cnic_get_v4_route(&saddr->remote.v4, &dst); 2919 2920 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 2921 csk->dst_port = saddr->remote.v4.sin_port; 2922 local_port = saddr->local.v4.sin_port; 2923 } 2924 2925 csk->vlan_id = 0; 2926 csk->mtu = dev->netdev->mtu; 2927 if (dst && dst->dev) { 2928 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 2929 if (realdev == dev->netdev) { 2930 csk->vlan_id = vlan; 2931 csk->mtu = dst_mtu(dst); 2932 } 2933 } 2934 2935 if (local_port >= CNIC_LOCAL_PORT_MIN && 2936 local_port < CNIC_LOCAL_PORT_MAX) { 2937 if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) 2938 local_port = 0; 2939 } else 2940 local_port = 0; 2941 2942 if (!local_port) { 2943 local_port = cnic_alloc_new_id(&cp->csk_port_tbl); 2944 if (local_port == -1) { 2945 rc = -ENOMEM; 2946 goto err_out; 2947 } 2948 } 2949 csk->src_port = local_port; 2950 2951err_out: 2952 dst_release(dst); 2953 return rc; 2954} 2955 2956static void cnic_init_csk_state(struct cnic_sock *csk) 2957{ 2958 csk->state = 0; 2959 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 2960 clear_bit(SK_F_CLOSING, &csk->flags); 2961} 2962 2963static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 2964{ 2965 int err = 0; 2966 2967 if (!cnic_in_use(csk)) 2968 return -EINVAL; 2969 2970 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 2971 return -EINVAL; 2972 2973 cnic_init_csk_state(csk); 2974 2975 err = cnic_get_route(csk, saddr); 2976 if (err) 2977 goto err_out; 2978 2979 err = cnic_resolve_addr(csk, saddr); 2980 if (!err) 2981 return 0; 2982 2983err_out: 2984 clear_bit(SK_F_CONNECT_START, &csk->flags); 2985 return err; 2986} 2987 2988static int cnic_cm_abort(struct cnic_sock *csk) 2989{ 2990 struct cnic_local *cp = csk->dev->cnic_priv; 2991 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; 2992 2993 if (!cnic_in_use(csk)) 2994 return -EINVAL; 2995 2996 if (cnic_abort_prep(csk)) 2997 return cnic_cm_abort_req(csk); 2998 2999 /* Getting here means that we haven't started connect, or 3000 * connect was not successful. 3001 */ 3002 3003 cp->close_conn(csk, opcode); 3004 if (csk->state != opcode) 3005 return -EALREADY; 3006 3007 return 0; 3008} 3009 3010static int cnic_cm_close(struct cnic_sock *csk) 3011{ 3012 if (!cnic_in_use(csk)) 3013 return -EINVAL; 3014 3015 if (cnic_close_prep(csk)) { 3016 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3017 return cnic_cm_close_req(csk); 3018 } else { 3019 return -EALREADY; 3020 } 3021 return 0; 3022} 3023 3024static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3025 u8 opcode) 3026{ 3027 struct cnic_ulp_ops *ulp_ops; 3028 int ulp_type = csk->ulp_type; 3029 3030 rcu_read_lock(); 3031 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3032 if (ulp_ops) { 3033 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3034 ulp_ops->cm_connect_complete(csk); 3035 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3036 ulp_ops->cm_close_complete(csk); 3037 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3038 ulp_ops->cm_remote_abort(csk); 3039 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3040 ulp_ops->cm_abort_complete(csk); 3041 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3042 ulp_ops->cm_remote_close(csk); 3043 } 3044 rcu_read_unlock(); 3045} 3046 3047static int cnic_cm_set_pg(struct cnic_sock *csk) 3048{ 3049 if (cnic_offld_prep(csk)) { 3050 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3051 cnic_cm_update_pg(csk); 3052 else 3053 cnic_cm_offload_pg(csk); 3054 } 3055 return 0; 3056} 3057 3058static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3059{ 3060 struct cnic_local *cp = dev->cnic_priv; 3061 u32 l5_cid = kcqe->pg_host_opaque; 3062 u8 opcode = kcqe->op_code; 3063 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3064 3065 csk_hold(csk); 3066 if (!cnic_in_use(csk)) 3067 goto done; 3068 3069 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3070 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3071 goto done; 3072 } 3073 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3074 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3075 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3076 cnic_cm_upcall(cp, csk, 3077 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3078 goto done; 3079 } 3080 3081 csk->pg_cid = kcqe->pg_cid; 3082 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3083 cnic_cm_conn_req(csk); 3084 3085done: 3086 csk_put(csk); 3087} 3088 3089static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3090{ 3091 struct cnic_local *cp = dev->cnic_priv; 3092 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3093 u8 opcode = l4kcqe->op_code; 3094 u32 l5_cid; 3095 struct cnic_sock *csk; 3096 3097 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3098 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3099 cnic_cm_process_offld_pg(dev, l4kcqe); 3100 return; 3101 } 3102 3103 l5_cid = l4kcqe->conn_id; 3104 if (opcode & 0x80) 3105 l5_cid = l4kcqe->cid; 3106 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3107 return; 3108 3109 csk = &cp->csk_tbl[l5_cid]; 3110 csk_hold(csk); 3111 3112 if (!cnic_in_use(csk)) { 3113 csk_put(csk); 3114 return; 3115 } 3116 3117 switch (opcode) { 3118 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 3119 if (l4kcqe->status != 0) { 3120 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3121 cnic_cm_upcall(cp, csk, 3122 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3123 } 3124 break; 3125 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 3126 if (l4kcqe->status == 0) 3127 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 3128 3129 smp_mb__before_clear_bit(); 3130 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3131 cnic_cm_upcall(cp, csk, opcode); 3132 break; 3133 3134 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3135 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3136 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3137 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3138 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3139 cp->close_conn(csk, opcode); 3140 break; 3141 3142 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 3143 cnic_cm_upcall(cp, csk, opcode); 3144 break; 3145 } 3146 csk_put(csk); 3147} 3148 3149static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 3150{ 3151 struct cnic_dev *dev = data; 3152 int i; 3153 3154 for (i = 0; i < num; i++) 3155 cnic_cm_process_kcqe(dev, kcqe[i]); 3156} 3157 3158static struct cnic_ulp_ops cm_ulp_ops = { 3159 .indicate_kcqes = cnic_cm_indicate_kcqe, 3160}; 3161 3162static void cnic_cm_free_mem(struct cnic_dev *dev) 3163{ 3164 struct cnic_local *cp = dev->cnic_priv; 3165 3166 kfree(cp->csk_tbl); 3167 cp->csk_tbl = NULL; 3168 cnic_free_id_tbl(&cp->csk_port_tbl); 3169} 3170 3171static int cnic_cm_alloc_mem(struct cnic_dev *dev) 3172{ 3173 struct cnic_local *cp = dev->cnic_priv; 3174 3175 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 3176 GFP_KERNEL); 3177 if (!cp->csk_tbl) 3178 return -ENOMEM; 3179 3180 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 3181 CNIC_LOCAL_PORT_MIN)) { 3182 cnic_cm_free_mem(dev); 3183 return -ENOMEM; 3184 } 3185 return 0; 3186} 3187 3188static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 3189{ 3190 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 3191 /* Unsolicited RESET_COMP or RESET_RECEIVED */ 3192 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 3193 csk->state = opcode; 3194 } 3195 3196 /* 1. If event opcode matches the expected event in csk->state 3197 * 2. If the expected event is CLOSE_COMP, we accept any event 3198 * 3. If the expected event is 0, meaning the connection was never 3199 * never established, we accept the opcode from cm_abort. 3200 */ 3201 if (opcode == csk->state || csk->state == 0 || 3202 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) { 3203 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 3204 if (csk->state == 0) 3205 csk->state = opcode; 3206 return 1; 3207 } 3208 } 3209 return 0; 3210} 3211 3212static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 3213{ 3214 struct cnic_dev *dev = csk->dev; 3215 struct cnic_local *cp = dev->cnic_priv; 3216 3217 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { 3218 cnic_cm_upcall(cp, csk, opcode); 3219 return; 3220 } 3221 3222 clear_bit(SK_F_CONNECT_START, &csk->flags); 3223 cnic_close_conn(csk); 3224 csk->state = opcode; 3225 cnic_cm_upcall(cp, csk, opcode); 3226} 3227 3228static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 3229{ 3230} 3231 3232static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 3233{ 3234 u32 seed; 3235 3236 get_random_bytes(&seed, 4); 3237 cnic_ctx_wr(dev, 45, 0, seed); 3238 return 0; 3239} 3240 3241static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 3242{ 3243 struct cnic_dev *dev = csk->dev; 3244 struct cnic_local *cp = dev->cnic_priv; 3245 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 3246 union l5cm_specific_data l5_data; 3247 u32 cmd = 0; 3248 int close_complete = 0; 3249 3250 switch (opcode) { 3251 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 3252 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3253 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3254 if (cnic_ready_to_close(csk, opcode)) { 3255 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3256 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 3257 else 3258 close_complete = 1; 3259 } 3260 break; 3261 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 3262 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 3263 break; 3264 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 3265 close_complete = 1; 3266 break; 3267 } 3268 if (cmd) { 3269 memset(&l5_data, 0, sizeof(l5_data)); 3270 3271 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 3272 &l5_data); 3273 } else if (close_complete) { 3274 ctx->timestamp = jiffies; 3275 cnic_close_conn(csk); 3276 cnic_cm_upcall(cp, csk, csk->state); 3277 } 3278} 3279 3280static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 3281{ 3282} 3283 3284static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 3285{ 3286 struct cnic_local *cp = dev->cnic_priv; 3287 int func = CNIC_FUNC(cp); 3288 3289 cnic_init_bnx2x_mac(dev); 3290 cnic_bnx2x_set_tcp_timestamp(dev, 1); 3291 3292 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 3293 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0); 3294 3295 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3296 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1); 3297 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3298 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), 3299 DEF_MAX_DA_COUNT); 3300 3301 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3302 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL); 3303 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3304 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS); 3305 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 3306 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2); 3307 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 3308 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER); 3309 3310 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func), 3311 DEF_MAX_CWND); 3312 return 0; 3313} 3314 3315static int cnic_cm_open(struct cnic_dev *dev) 3316{ 3317 struct cnic_local *cp = dev->cnic_priv; 3318 int err; 3319 3320 err = cnic_cm_alloc_mem(dev); 3321 if (err) 3322 return err; 3323 3324 err = cp->start_cm(dev); 3325 3326 if (err) 3327 goto err_out; 3328 3329 dev->cm_create = cnic_cm_create; 3330 dev->cm_destroy = cnic_cm_destroy; 3331 dev->cm_connect = cnic_cm_connect; 3332 dev->cm_abort = cnic_cm_abort; 3333 dev->cm_close = cnic_cm_close; 3334 dev->cm_select_dev = cnic_cm_select_dev; 3335 3336 cp->ulp_handle[CNIC_ULP_L4] = dev; 3337 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 3338 return 0; 3339 3340err_out: 3341 cnic_cm_free_mem(dev); 3342 return err; 3343} 3344 3345static int cnic_cm_shutdown(struct cnic_dev *dev) 3346{ 3347 struct cnic_local *cp = dev->cnic_priv; 3348 int i; 3349 3350 cp->stop_cm(dev); 3351 3352 if (!cp->csk_tbl) 3353 return 0; 3354 3355 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 3356 struct cnic_sock *csk = &cp->csk_tbl[i]; 3357 3358 clear_bit(SK_F_INUSE, &csk->flags); 3359 cnic_cm_cleanup(csk); 3360 } 3361 cnic_cm_free_mem(dev); 3362 3363 return 0; 3364} 3365 3366static void cnic_init_context(struct cnic_dev *dev, u32 cid) 3367{ 3368 u32 cid_addr; 3369 int i; 3370 3371 cid_addr = GET_CID_ADDR(cid); 3372 3373 for (i = 0; i < CTX_SIZE; i += 4) 3374 cnic_ctx_wr(dev, cid_addr, i, 0); 3375} 3376 3377static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 3378{ 3379 struct cnic_local *cp = dev->cnic_priv; 3380 int ret = 0, i; 3381 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 3382 3383 if (CHIP_NUM(cp) != CHIP_NUM_5709) 3384 return 0; 3385 3386 for (i = 0; i < cp->ctx_blks; i++) { 3387 int j; 3388 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 3389 u32 val; 3390 3391 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 3392 3393 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 3394 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 3395 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 3396 (u64) cp->ctx_arr[i].mapping >> 32); 3397 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 3398 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 3399 for (j = 0; j < 10; j++) { 3400 3401 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 3402 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 3403 break; 3404 udelay(5); 3405 } 3406 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 3407 ret = -EBUSY; 3408 break; 3409 } 3410 } 3411 return ret; 3412} 3413 3414static void cnic_free_irq(struct cnic_dev *dev) 3415{ 3416 struct cnic_local *cp = dev->cnic_priv; 3417 struct cnic_eth_dev *ethdev = cp->ethdev; 3418 3419 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3420 cp->disable_int_sync(dev); 3421 tasklet_disable(&cp->cnic_irq_task); 3422 free_irq(ethdev->irq_arr[0].vector, dev); 3423 } 3424} 3425 3426static int cnic_init_bnx2_irq(struct cnic_dev *dev) 3427{ 3428 struct cnic_local *cp = dev->cnic_priv; 3429 struct cnic_eth_dev *ethdev = cp->ethdev; 3430 3431 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3432 int err, i = 0; 3433 int sblk_num = cp->status_blk_num; 3434 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 3435 BNX2_HC_SB_CONFIG_1; 3436 3437 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 3438 3439 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 3440 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 3441 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 3442 3443 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 3444 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 3445 (unsigned long) dev); 3446 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 3447 "cnic", dev); 3448 if (err) { 3449 tasklet_disable(&cp->cnic_irq_task); 3450 return err; 3451 } 3452 while (cp->status_blk.bnx2->status_completion_producer_index && 3453 i < 10) { 3454 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 3455 1 << (11 + sblk_num)); 3456 udelay(10); 3457 i++; 3458 barrier(); 3459 } 3460 if (cp->status_blk.bnx2->status_completion_producer_index) { 3461 cnic_free_irq(dev); 3462 goto failed; 3463 } 3464 3465 } else { 3466 struct status_block *sblk = cp->status_blk.gen; 3467 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 3468 int i = 0; 3469 3470 while (sblk->status_completion_producer_index && i < 10) { 3471 CNIC_WR(dev, BNX2_HC_COMMAND, 3472 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3473 udelay(10); 3474 i++; 3475 barrier(); 3476 } 3477 if (sblk->status_completion_producer_index) 3478 goto failed; 3479 3480 } 3481 return 0; 3482 3483failed: 3484 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 3485 return -EBUSY; 3486} 3487 3488static void cnic_enable_bnx2_int(struct cnic_dev *dev) 3489{ 3490 struct cnic_local *cp = dev->cnic_priv; 3491 struct cnic_eth_dev *ethdev = cp->ethdev; 3492 3493 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3494 return; 3495 3496 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 3497 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 3498} 3499 3500static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 3501{ 3502 struct cnic_local *cp = dev->cnic_priv; 3503 struct cnic_eth_dev *ethdev = cp->ethdev; 3504 3505 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3506 return; 3507 3508 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 3509 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3510 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 3511 synchronize_irq(ethdev->irq_arr[0].vector); 3512} 3513 3514static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 3515{ 3516 struct cnic_local *cp = dev->cnic_priv; 3517 struct cnic_eth_dev *ethdev = cp->ethdev; 3518 u32 cid_addr, tx_cid, sb_id; 3519 u32 val, offset0, offset1, offset2, offset3; 3520 int i; 3521 struct tx_bd *txbd; 3522 dma_addr_t buf_map; 3523 struct status_block *s_blk = cp->status_blk.gen; 3524 3525 sb_id = cp->status_blk_num; 3526 tx_cid = 20; 3527 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 3528 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3529 struct status_block_msix *sblk = cp->status_blk.bnx2; 3530 3531 tx_cid = TX_TSS_CID + sb_id - 1; 3532 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 3533 (TX_TSS_CID << 7)); 3534 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 3535 } 3536 cp->tx_cons = *cp->tx_cons_ptr; 3537 3538 cid_addr = GET_CID_ADDR(tx_cid); 3539 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 3540 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 3541 3542 for (i = 0; i < PHY_CTX_SIZE; i += 4) 3543 cnic_ctx_wr(dev, cid_addr2, i, 0); 3544 3545 offset0 = BNX2_L2CTX_TYPE_XI; 3546 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 3547 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 3548 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 3549 } else { 3550 cnic_init_context(dev, tx_cid); 3551 cnic_init_context(dev, tx_cid + 1); 3552 3553 offset0 = BNX2_L2CTX_TYPE; 3554 offset1 = BNX2_L2CTX_CMD_TYPE; 3555 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 3556 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 3557 } 3558 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 3559 cnic_ctx_wr(dev, cid_addr, offset0, val); 3560 3561 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 3562 cnic_ctx_wr(dev, cid_addr, offset1, val); 3563 3564 txbd = (struct tx_bd *) cp->l2_ring; 3565 3566 buf_map = cp->l2_buf_map; 3567 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 3568 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 3569 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3570 } 3571 val = (u64) cp->l2_ring_map >> 32; 3572 cnic_ctx_wr(dev, cid_addr, offset2, val); 3573 txbd->tx_bd_haddr_hi = val; 3574 3575 val = (u64) cp->l2_ring_map & 0xffffffff; 3576 cnic_ctx_wr(dev, cid_addr, offset3, val); 3577 txbd->tx_bd_haddr_lo = val; 3578} 3579 3580static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 3581{ 3582 struct cnic_local *cp = dev->cnic_priv; 3583 struct cnic_eth_dev *ethdev = cp->ethdev; 3584 u32 cid_addr, sb_id, val, coal_reg, coal_val; 3585 int i; 3586 struct rx_bd *rxbd; 3587 struct status_block *s_blk = cp->status_blk.gen; 3588 3589 sb_id = cp->status_blk_num; 3590 cnic_init_context(dev, 2); 3591 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 3592 coal_reg = BNX2_HC_COMMAND; 3593 coal_val = CNIC_RD(dev, coal_reg); 3594 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3595 struct status_block_msix *sblk = cp->status_blk.bnx2; 3596 3597 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 3598 coal_reg = BNX2_HC_COALESCE_NOW; 3599 coal_val = 1 << (11 + sb_id); 3600 } 3601 i = 0; 3602 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 3603 CNIC_WR(dev, coal_reg, coal_val); 3604 udelay(10); 3605 i++; 3606 barrier(); 3607 } 3608 cp->rx_cons = *cp->rx_cons_ptr; 3609 3610 cid_addr = GET_CID_ADDR(2); 3611 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 3612 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 3613 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 3614 3615 if (sb_id == 0) 3616 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 3617 else 3618 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 3619 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 3620 3621 rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); 3622 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 3623 dma_addr_t buf_map; 3624 int n = (i % cp->l2_rx_ring_size) + 1; 3625 3626 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); 3627 rxbd->rx_bd_len = cp->l2_single_buf_size; 3628 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 3629 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 3630 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 3631 } 3632 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 3633 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 3634 rxbd->rx_bd_haddr_hi = val; 3635 3636 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; 3637 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 3638 rxbd->rx_bd_haddr_lo = val; 3639 3640 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 3641 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 3642} 3643 3644static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 3645{ 3646 struct kwqe *wqes[1], l2kwqe; 3647 3648 memset(&l2kwqe, 0, sizeof(l2kwqe)); 3649 wqes[0] = &l2kwqe; 3650 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | 3651 (L2_KWQE_OPCODE_VALUE_FLUSH << 3652 KWQE_OPCODE_SHIFT) | 2; 3653 dev->submit_kwqes(dev, wqes, 1); 3654} 3655 3656static void cnic_set_bnx2_mac(struct cnic_dev *dev) 3657{ 3658 struct cnic_local *cp = dev->cnic_priv; 3659 u32 val; 3660 3661 val = cp->func << 2; 3662 3663 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 3664 3665 val = cnic_reg_rd_ind(dev, cp->shmem_base + 3666 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 3667 dev->mac_addr[0] = (u8) (val >> 8); 3668 dev->mac_addr[1] = (u8) val; 3669 3670 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 3671 3672 val = cnic_reg_rd_ind(dev, cp->shmem_base + 3673 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 3674 dev->mac_addr[2] = (u8) (val >> 24); 3675 dev->mac_addr[3] = (u8) (val >> 16); 3676 dev->mac_addr[4] = (u8) (val >> 8); 3677 dev->mac_addr[5] = (u8) val; 3678 3679 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 3680 3681 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 3682 if (CHIP_NUM(cp) != CHIP_NUM_5709) 3683 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 3684 3685 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 3686 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 3687 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 3688} 3689 3690static int cnic_start_bnx2_hw(struct cnic_dev *dev) 3691{ 3692 struct cnic_local *cp = dev->cnic_priv; 3693 struct cnic_eth_dev *ethdev = cp->ethdev; 3694 struct status_block *sblk = cp->status_blk.gen; 3695 u32 val, kcq_cid_addr, kwq_cid_addr; 3696 int err; 3697 3698 cnic_set_bnx2_mac(dev); 3699 3700 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 3701 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 3702 if (BCM_PAGE_BITS > 12) 3703 val |= (12 - 8) << 4; 3704 else 3705 val |= (BCM_PAGE_BITS - 8) << 4; 3706 3707 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 3708 3709 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 3710 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 3711 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 3712 3713 err = cnic_setup_5709_context(dev, 1); 3714 if (err) 3715 return err; 3716 3717 cnic_init_context(dev, KWQ_CID); 3718 cnic_init_context(dev, KCQ_CID); 3719 3720 kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 3721 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 3722 3723 cp->max_kwq_idx = MAX_KWQ_IDX; 3724 cp->kwq_prod_idx = 0; 3725 cp->kwq_con_idx = 0; 3726 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 3727 3728 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 3729 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 3730 else 3731 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 3732 3733 /* Initialize the kernel work queue context. */ 3734 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3735 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3736 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 3737 3738 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 3739 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3740 3741 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 3742 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3743 3744 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 3745 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3746 3747 val = (u32) cp->kwq_info.pgtbl_map; 3748 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3749 3750 kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 3751 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 3752 3753 cp->kcq1.sw_prod_idx = 0; 3754 cp->kcq1.hw_prod_idx_ptr = 3755 (u16 *) &sblk->status_completion_producer_index; 3756 3757 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx; 3758 3759 /* Initialize the kernel complete queue context. */ 3760 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 3761 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 3762 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 3763 3764 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 3765 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 3766 3767 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 3768 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 3769 3770 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 3771 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 3772 3773 val = (u32) cp->kcq1.dma.pgtbl_map; 3774 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 3775 3776 cp->int_num = 0; 3777 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3778 struct status_block_msix *msblk = cp->status_blk.bnx2; 3779 u32 sb_id = cp->status_blk_num; 3780 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 3781 3782 cp->kcq1.hw_prod_idx_ptr = 3783 (u16 *) &msblk->status_completion_producer_index; 3784 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx; 3785 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index; 3786 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 3787 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3788 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 3789 } 3790 3791 /* Enable Commnad Scheduler notification when we write to the 3792 * host producer index of the kernel contexts. */ 3793 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 3794 3795 /* Enable Command Scheduler notification when we write to either 3796 * the Send Queue or Receive Queue producer indexes of the kernel 3797 * bypass contexts. */ 3798 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 3799 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 3800 3801 /* Notify COM when the driver post an application buffer. */ 3802 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 3803 3804 /* Set the CP and COM doorbells. These two processors polls the 3805 * doorbell for a non zero value before running. This must be done 3806 * after setting up the kernel queue contexts. */ 3807 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 3808 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 3809 3810 cnic_init_bnx2_tx_ring(dev); 3811 cnic_init_bnx2_rx_ring(dev); 3812 3813 err = cnic_init_bnx2_irq(dev); 3814 if (err) { 3815 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 3816 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 3817 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 3818 return err; 3819 } 3820 3821 return 0; 3822} 3823 3824static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 3825{ 3826 struct cnic_local *cp = dev->cnic_priv; 3827 struct cnic_eth_dev *ethdev = cp->ethdev; 3828 u32 start_offset = ethdev->ctx_tbl_offset; 3829 int i; 3830 3831 for (i = 0; i < cp->ctx_blks; i++) { 3832 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 3833 dma_addr_t map = ctx->mapping; 3834 3835 if (cp->ctx_align) { 3836 unsigned long mask = cp->ctx_align - 1; 3837 3838 map = (map + mask) & ~mask; 3839 } 3840 3841 cnic_ctx_tbl_wr(dev, start_offset + i, map); 3842 } 3843} 3844 3845static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 3846{ 3847 struct cnic_local *cp = dev->cnic_priv; 3848 struct cnic_eth_dev *ethdev = cp->ethdev; 3849 int err = 0; 3850 3851 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 3852 (unsigned long) dev); 3853 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 3854 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 3855 "cnic", dev); 3856 if (err) 3857 tasklet_disable(&cp->cnic_irq_task); 3858 } 3859 return err; 3860} 3861 3862static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 3863{ 3864 struct cnic_local *cp = dev->cnic_priv; 3865 u8 sb_id = cp->status_blk_num; 3866 int port = CNIC_PORT(cp); 3867 3868 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 3869 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, 3870 HC_INDEX_C_ISCSI_EQ_CONS), 3871 64 / 12); 3872 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 3873 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, 3874 HC_INDEX_C_ISCSI_EQ_CONS), 0); 3875} 3876 3877static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 3878{ 3879} 3880 3881static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev) 3882{ 3883 struct cnic_local *cp = dev->cnic_priv; 3884 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring; 3885 struct eth_context *context; 3886 struct regpair context_addr; 3887 dma_addr_t buf_map; 3888 int func = CNIC_FUNC(cp); 3889 int port = CNIC_PORT(cp); 3890 int i; 3891 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 3892 u32 val; 3893 3894 memset(txbd, 0, BCM_PAGE_SIZE); 3895 3896 buf_map = cp->l2_buf_map; 3897 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 3898 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 3899 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 3900 3901 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 3902 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 3903 reg_bd->addr_hi = start_bd->addr_hi; 3904 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 3905 start_bd->nbytes = cpu_to_le16(0x10); 3906 start_bd->nbd = cpu_to_le16(3); 3907 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3908 start_bd->general_data = (UNICAST_ADDRESS << 3909 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 3910 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 3911 3912 } 3913 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr); 3914 3915 val = (u64) cp->l2_ring_map >> 32; 3916 txbd->next_bd.addr_hi = cpu_to_le32(val); 3917 3918 context->xstorm_st_context.tx_bd_page_base_hi = val; 3919 3920 val = (u64) cp->l2_ring_map & 0xffffffff; 3921 txbd->next_bd.addr_lo = cpu_to_le32(val); 3922 3923 context->xstorm_st_context.tx_bd_page_base_lo = val; 3924 3925 context->cstorm_st_context.sb_index_number = 3926 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS; 3927 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID; 3928 3929 if (cli < MAX_X_STAT_COUNTER_ID) 3930 context->xstorm_st_context.statistics_data = cli | 3931 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE; 3932 3933 context->xstorm_ag_context.cdu_reserved = 3934 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), 3935 CDU_REGION_NUMBER_XCM_AG, 3936 ETH_CONNECTION_TYPE); 3937 3938 /* reset xstorm per client statistics */ 3939 if (cli < MAX_X_STAT_COUNTER_ID) { 3940 val = BAR_XSTRORM_INTMEM + 3941 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 3942 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) 3943 CNIC_WR(dev, val + i * 4, 0); 3944 } 3945 3946 cp->tx_cons_ptr = 3947 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[ 3948 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS]; 3949} 3950 3951static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev) 3952{ 3953 struct cnic_local *cp = dev->cnic_priv; 3954 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring + 3955 BCM_PAGE_SIZE); 3956 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 3957 (cp->l2_ring + (2 * BCM_PAGE_SIZE)); 3958 struct eth_context *context; 3959 struct regpair context_addr; 3960 int i; 3961 int port = CNIC_PORT(cp); 3962 int func = CNIC_FUNC(cp); 3963 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 3964 u32 val; 3965 struct tstorm_eth_client_config tstorm_client = {0}; 3966 3967 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 3968 dma_addr_t buf_map; 3969 int n = (i % cp->l2_rx_ring_size) + 1; 3970 3971 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); 3972 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 3973 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 3974 } 3975 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr); 3976 3977 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; 3978 rxbd->addr_hi = cpu_to_le32(val); 3979 3980 context->ustorm_st_context.common.bd_page_base_hi = val; 3981 3982 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; 3983 rxbd->addr_lo = cpu_to_le32(val); 3984 3985 context->ustorm_st_context.common.bd_page_base_lo = val; 3986 3987 context->ustorm_st_context.common.sb_index_numbers = 3988 BNX2X_ISCSI_RX_SB_INDEX_NUM; 3989 context->ustorm_st_context.common.clientId = cli; 3990 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID; 3991 if (cli < MAX_U_STAT_COUNTER_ID) { 3992 context->ustorm_st_context.common.flags = 3993 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS; 3994 context->ustorm_st_context.common.statistics_counter_id = cli; 3995 } 3996 context->ustorm_st_context.common.mc_alignment_log_size = 0; 3997 context->ustorm_st_context.common.bd_buff_size = 3998 cp->l2_single_buf_size; 3999 4000 context->ustorm_ag_context.cdu_usage = 4001 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func), 4002 CDU_REGION_NUMBER_UCM_AG, 4003 ETH_CONNECTION_TYPE); 4004 4005 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4006 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 4007 rxcqe->addr_hi = cpu_to_le32(val); 4008 4009 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4010 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val); 4011 4012 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4013 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val); 4014 4015 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 4016 rxcqe->addr_lo = cpu_to_le32(val); 4017 4018 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4019 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val); 4020 4021 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4022 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val); 4023 4024 /* client tstorm info */ 4025 tstorm_client.mtu = cp->l2_single_buf_size - 14; 4026 tstorm_client.config_flags = TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE; 4027 4028 if (cli < MAX_T_STAT_COUNTER_ID) { 4029 tstorm_client.config_flags |= 4030 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4031 tstorm_client.statistics_counter_id = cli; 4032 } 4033 4034 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4035 TSTORM_CLIENT_CONFIG_OFFSET(port, cli), 4036 ((u32 *)&tstorm_client)[0]); 4037 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4038 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4, 4039 ((u32 *)&tstorm_client)[1]); 4040 4041 /* reset tstorm per client statistics */ 4042 if (cli < MAX_T_STAT_COUNTER_ID) { 4043 4044 val = BAR_TSTRORM_INTMEM + 4045 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4046 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) 4047 CNIC_WR(dev, val + i * 4, 0); 4048 } 4049 4050 /* reset ustorm per client statistics */ 4051 if (cli < MAX_U_STAT_COUNTER_ID) { 4052 val = BAR_USTRORM_INTMEM + 4053 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli); 4054 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++) 4055 CNIC_WR(dev, val + i * 4, 0); 4056 } 4057 4058 cp->rx_cons_ptr = 4059 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[ 4060 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS]; 4061} 4062 4063static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) 4064{ 4065 struct cnic_local *cp = dev->cnic_priv; 4066 u32 base, addr, val; 4067 int port = CNIC_PORT(cp); 4068 4069 dev->max_iscsi_conn = 0; 4070 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); 4071 if (base < 0xa0000 || base >= 0xc0000) 4072 return; 4073 4074 addr = BNX2X_SHMEM_ADDR(base, 4075 dev_info.port_hw_config[port].iscsi_mac_upper); 4076 4077 val = CNIC_RD(dev, addr); 4078 4079 dev->mac_addr[0] = (u8) (val >> 8); 4080 dev->mac_addr[1] = (u8) val; 4081 4082 addr = BNX2X_SHMEM_ADDR(base, 4083 dev_info.port_hw_config[port].iscsi_mac_lower); 4084 4085 val = CNIC_RD(dev, addr); 4086 4087 dev->mac_addr[2] = (u8) (val >> 24); 4088 dev->mac_addr[3] = (u8) (val >> 16); 4089 dev->mac_addr[4] = (u8) (val >> 8); 4090 dev->mac_addr[5] = (u8) val; 4091 4092 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); 4093 val = CNIC_RD(dev, addr); 4094 4095 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) { 4096 u16 val16; 4097 4098 addr = BNX2X_SHMEM_ADDR(base, 4099 drv_lic_key[port].max_iscsi_init_conn); 4100 val16 = CNIC_RD16(dev, addr); 4101 4102 if (val16) 4103 val16 ^= 0x1e1e; 4104 dev->max_iscsi_conn = val16; 4105 } 4106 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) { 4107 int func = CNIC_FUNC(cp); 4108 4109 addr = BNX2X_SHMEM_ADDR(base, 4110 mf_cfg.func_mf_config[func].e1hov_tag); 4111 val = CNIC_RD(dev, addr); 4112 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 4113 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 4114 addr = BNX2X_SHMEM_ADDR(base, 4115 mf_cfg.func_mf_config[func].config); 4116 val = CNIC_RD(dev, addr); 4117 val &= FUNC_MF_CFG_PROTOCOL_MASK; 4118 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI) 4119 dev->max_iscsi_conn = 0; 4120 } 4121 } 4122} 4123 4124static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4125{ 4126 struct cnic_local *cp = dev->cnic_priv; 4127 int func = CNIC_FUNC(cp), ret, i; 4128 int port = CNIC_PORT(cp); 4129 u16 eq_idx; 4130 u8 sb_id = cp->status_blk_num; 4131 4132 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 4133 cp->iscsi_start_cid); 4134 4135 if (ret) 4136 return -ENOMEM; 4137 4138 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 4139 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0); 4140 cp->kcq1.sw_prod_idx = 0; 4141 4142 cp->kcq1.hw_prod_idx_ptr = 4143 &cp->status_blk.bnx2x->c_status_block.index_values[ 4144 HC_INDEX_C_ISCSI_EQ_CONS]; 4145 cp->kcq1.status_idx_ptr = 4146 &cp->status_blk.bnx2x->c_status_block.status_block_index; 4147 4148 cnic_get_bnx2x_iscsi_info(dev); 4149 4150 /* Only 1 EQ */ 4151 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 4152 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4153 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0); 4154 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4155 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0), 4156 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 4157 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4158 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4, 4159 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 4160 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4161 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0), 4162 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 4163 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4164 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4, 4165 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 4166 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4167 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1); 4168 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4169 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num); 4170 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4171 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0), 4172 HC_INDEX_C_ISCSI_EQ_CONS); 4173 4174 for (i = 0; i < cp->conn_buf_info.num_pages; i++) { 4175 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4176 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i), 4177 cp->conn_buf_info.pgtbl[2 * i]); 4178 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 4179 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4, 4180 cp->conn_buf_info.pgtbl[(2 * i) + 1]); 4181 } 4182 4183 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4184 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func), 4185 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 4186 CNIC_WR(dev, BAR_USTRORM_INTMEM + 4187 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4, 4188 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 4189 4190 cnic_setup_bnx2x_context(dev); 4191 4192 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM + 4193 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + 4194 offsetof(struct cstorm_status_block_c, 4195 index_values[HC_INDEX_C_ISCSI_EQ_CONS])); 4196 if (eq_idx != 0) { 4197 netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx); 4198 return -EBUSY; 4199 } 4200 ret = cnic_init_bnx2x_irq(dev); 4201 if (ret) 4202 return ret; 4203 4204 cnic_init_bnx2x_tx_ring(dev); 4205 cnic_init_bnx2x_rx_ring(dev); 4206 4207 return 0; 4208} 4209 4210static void cnic_init_rings(struct cnic_dev *dev) 4211{ 4212 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4213 cnic_init_bnx2_tx_ring(dev); 4214 cnic_init_bnx2_rx_ring(dev); 4215 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4216 struct cnic_local *cp = dev->cnic_priv; 4217 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4218 union l5cm_specific_data l5_data; 4219 struct ustorm_eth_rx_producers rx_prods = {0}; 4220 u32 off, i; 4221 4222 rx_prods.bd_prod = 0; 4223 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 4224 barrier(); 4225 4226 off = BAR_USTRORM_INTMEM + 4227 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli); 4228 4229 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4230 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4231 4232 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4233 4234 cnic_init_bnx2x_tx_ring(dev); 4235 cnic_init_bnx2x_rx_ring(dev); 4236 4237 l5_data.phy_address.lo = cli; 4238 l5_data.phy_address.hi = 0; 4239 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4240 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4241 i = 0; 4242 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4243 ++i < 10) 4244 msleep(1); 4245 4246 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4247 netdev_err(dev->netdev, 4248 "iSCSI CLIENT_SETUP did not complete\n"); 4249 cnic_kwq_completion(dev, 1); 4250 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); 4251 } 4252} 4253 4254static void cnic_shutdown_rings(struct cnic_dev *dev) 4255{ 4256 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 4257 cnic_shutdown_bnx2_rx_ring(dev); 4258 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4259 struct cnic_local *cp = dev->cnic_priv; 4260 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4261 union l5cm_specific_data l5_data; 4262 int i; 4263 4264 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 4265 4266 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 4267 4268 l5_data.phy_address.lo = cli; 4269 l5_data.phy_address.hi = 0; 4270 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 4271 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4272 i = 0; 4273 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 4274 ++i < 10) 4275 msleep(1); 4276 4277 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 4278 netdev_err(dev->netdev, 4279 "iSCSI CLIENT_HALT did not complete\n"); 4280 cnic_kwq_completion(dev, 1); 4281 4282 memset(&l5_data, 0, sizeof(l5_data)); 4283 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 4284 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE | 4285 (1 << SPE_HDR_COMMON_RAMROD_SHIFT), &l5_data); 4286 msleep(10); 4287 } 4288} 4289 4290static int cnic_register_netdev(struct cnic_dev *dev) 4291{ 4292 struct cnic_local *cp = dev->cnic_priv; 4293 struct cnic_eth_dev *ethdev = cp->ethdev; 4294 int err; 4295 4296 if (!ethdev) 4297 return -ENODEV; 4298 4299 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 4300 return 0; 4301 4302 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 4303 if (err) 4304 netdev_err(dev->netdev, "register_cnic failed\n"); 4305 4306 return err; 4307} 4308 4309static void cnic_unregister_netdev(struct cnic_dev *dev) 4310{ 4311 struct cnic_local *cp = dev->cnic_priv; 4312 struct cnic_eth_dev *ethdev = cp->ethdev; 4313 4314 if (!ethdev) 4315 return; 4316 4317 ethdev->drv_unregister_cnic(dev->netdev); 4318} 4319 4320static int cnic_start_hw(struct cnic_dev *dev) 4321{ 4322 struct cnic_local *cp = dev->cnic_priv; 4323 struct cnic_eth_dev *ethdev = cp->ethdev; 4324 int err; 4325 4326 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 4327 return -EALREADY; 4328 4329 dev->regview = ethdev->io_base; 4330 cp->chip_id = ethdev->chip_id; 4331 pci_dev_get(dev->pcidev); 4332 cp->func = PCI_FUNC(dev->pcidev->devfn); 4333 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 4334 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 4335 4336 err = cp->alloc_resc(dev); 4337 if (err) { 4338 netdev_err(dev->netdev, "allocate resource failure\n"); 4339 goto err1; 4340 } 4341 4342 err = cp->start_hw(dev); 4343 if (err) 4344 goto err1; 4345 4346 err = cnic_cm_open(dev); 4347 if (err) 4348 goto err1; 4349 4350 set_bit(CNIC_F_CNIC_UP, &dev->flags); 4351 4352 cp->enable_int(dev); 4353 4354 return 0; 4355 4356err1: 4357 cp->free_resc(dev); 4358 pci_dev_put(dev->pcidev); 4359 return err; 4360} 4361 4362static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 4363{ 4364 cnic_disable_bnx2_int_sync(dev); 4365 4366 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4367 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4368 4369 cnic_init_context(dev, KWQ_CID); 4370 cnic_init_context(dev, KCQ_CID); 4371 4372 cnic_setup_5709_context(dev, 0); 4373 cnic_free_irq(dev); 4374 4375 cnic_free_resc(dev); 4376} 4377 4378 4379static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 4380{ 4381 struct cnic_local *cp = dev->cnic_priv; 4382 u8 sb_id = cp->status_blk_num; 4383 int port = CNIC_PORT(cp); 4384 4385 cnic_free_irq(dev); 4386 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 4387 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) + 4388 offsetof(struct cstorm_status_block_c, 4389 index_values[HC_INDEX_C_ISCSI_EQ_CONS]), 4390 0); 4391 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 4392 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->func, 0), 0); 4393 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 4394 cnic_free_resc(dev); 4395} 4396 4397static void cnic_stop_hw(struct cnic_dev *dev) 4398{ 4399 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4400 struct cnic_local *cp = dev->cnic_priv; 4401 int i = 0; 4402 4403 /* Need to wait for the ring shutdown event to complete 4404 * before clearing the CNIC_UP flag. 4405 */ 4406 while (cp->uio_dev != -1 && i < 15) { 4407 msleep(100); 4408 i++; 4409 } 4410 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 4411 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 4412 synchronize_rcu(); 4413 cnic_cm_shutdown(dev); 4414 cp->stop_hw(dev); 4415 pci_dev_put(dev->pcidev); 4416 } 4417} 4418 4419static void cnic_free_dev(struct cnic_dev *dev) 4420{ 4421 int i = 0; 4422 4423 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 4424 msleep(100); 4425 i++; 4426 } 4427 if (atomic_read(&dev->ref_count) != 0) 4428 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 4429 4430 netdev_info(dev->netdev, "Removed CNIC device\n"); 4431 dev_put(dev->netdev); 4432 kfree(dev); 4433} 4434 4435static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 4436 struct pci_dev *pdev) 4437{ 4438 struct cnic_dev *cdev; 4439 struct cnic_local *cp; 4440 int alloc_size; 4441 4442 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 4443 4444 cdev = kzalloc(alloc_size , GFP_KERNEL); 4445 if (cdev == NULL) { 4446 netdev_err(dev, "allocate dev struct failure\n"); 4447 return NULL; 4448 } 4449 4450 cdev->netdev = dev; 4451 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 4452 cdev->register_device = cnic_register_device; 4453 cdev->unregister_device = cnic_unregister_device; 4454 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 4455 4456 cp = cdev->cnic_priv; 4457 cp->dev = cdev; 4458 cp->uio_dev = -1; 4459 cp->l2_single_buf_size = 0x400; 4460 cp->l2_rx_ring_size = 3; 4461 4462 spin_lock_init(&cp->cnic_ulp_lock); 4463 4464 netdev_info(dev, "Added CNIC device\n"); 4465 4466 return cdev; 4467} 4468 4469static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 4470{ 4471 struct pci_dev *pdev; 4472 struct cnic_dev *cdev; 4473 struct cnic_local *cp; 4474 struct cnic_eth_dev *ethdev = NULL; 4475 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 4476 4477 probe = symbol_get(bnx2_cnic_probe); 4478 if (probe) { 4479 ethdev = (*probe)(dev); 4480 symbol_put(bnx2_cnic_probe); 4481 } 4482 if (!ethdev) 4483 return NULL; 4484 4485 pdev = ethdev->pdev; 4486 if (!pdev) 4487 return NULL; 4488 4489 dev_hold(dev); 4490 pci_dev_get(pdev); 4491 if (pdev->device == PCI_DEVICE_ID_NX2_5709 || 4492 pdev->device == PCI_DEVICE_ID_NX2_5709S) { 4493 u8 rev; 4494 4495 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 4496 if (rev < 0x10) { 4497 pci_dev_put(pdev); 4498 goto cnic_err; 4499 } 4500 } 4501 pci_dev_put(pdev); 4502 4503 cdev = cnic_alloc_dev(dev, pdev); 4504 if (cdev == NULL) 4505 goto cnic_err; 4506 4507 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 4508 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 4509 4510 cp = cdev->cnic_priv; 4511 cp->ethdev = ethdev; 4512 cdev->pcidev = pdev; 4513 4514 cp->cnic_ops = &cnic_bnx2_ops; 4515 cp->start_hw = cnic_start_bnx2_hw; 4516 cp->stop_hw = cnic_stop_bnx2_hw; 4517 cp->setup_pgtbl = cnic_setup_page_tbl; 4518 cp->alloc_resc = cnic_alloc_bnx2_resc; 4519 cp->free_resc = cnic_free_resc; 4520 cp->start_cm = cnic_cm_init_bnx2_hw; 4521 cp->stop_cm = cnic_cm_stop_bnx2_hw; 4522 cp->enable_int = cnic_enable_bnx2_int; 4523 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 4524 cp->close_conn = cnic_close_bnx2_conn; 4525 cp->next_idx = cnic_bnx2_next_idx; 4526 cp->hw_idx = cnic_bnx2_hw_idx; 4527 return cdev; 4528 4529cnic_err: 4530 dev_put(dev); 4531 return NULL; 4532} 4533 4534static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 4535{ 4536 struct pci_dev *pdev; 4537 struct cnic_dev *cdev; 4538 struct cnic_local *cp; 4539 struct cnic_eth_dev *ethdev = NULL; 4540 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL; 4541 4542 probe = symbol_get(bnx2x_cnic_probe); 4543 if (probe) { 4544 ethdev = (*probe)(dev); 4545 symbol_put(bnx2x_cnic_probe); 4546 } 4547 if (!ethdev) 4548 return NULL; 4549 4550 pdev = ethdev->pdev; 4551 if (!pdev) 4552 return NULL; 4553 4554 dev_hold(dev); 4555 cdev = cnic_alloc_dev(dev, pdev); 4556 if (cdev == NULL) { 4557 dev_put(dev); 4558 return NULL; 4559 } 4560 4561 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 4562 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 4563 4564 cp = cdev->cnic_priv; 4565 cp->ethdev = ethdev; 4566 cdev->pcidev = pdev; 4567 4568 cp->cnic_ops = &cnic_bnx2x_ops; 4569 cp->start_hw = cnic_start_bnx2x_hw; 4570 cp->stop_hw = cnic_stop_bnx2x_hw; 4571 cp->setup_pgtbl = cnic_setup_page_tbl_le; 4572 cp->alloc_resc = cnic_alloc_bnx2x_resc; 4573 cp->free_resc = cnic_free_resc; 4574 cp->start_cm = cnic_cm_init_bnx2x_hw; 4575 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 4576 cp->enable_int = cnic_enable_bnx2x_int; 4577 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 4578 cp->ack_int = cnic_ack_bnx2x_msix; 4579 cp->close_conn = cnic_close_bnx2x_conn; 4580 cp->next_idx = cnic_bnx2x_next_idx; 4581 cp->hw_idx = cnic_bnx2x_hw_idx; 4582 return cdev; 4583} 4584 4585static struct cnic_dev *is_cnic_dev(struct net_device *dev) 4586{ 4587 struct ethtool_drvinfo drvinfo; 4588 struct cnic_dev *cdev = NULL; 4589 4590 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 4591 memset(&drvinfo, 0, sizeof(drvinfo)); 4592 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 4593 4594 if (!strcmp(drvinfo.driver, "bnx2")) 4595 cdev = init_bnx2_cnic(dev); 4596 if (!strcmp(drvinfo.driver, "bnx2x")) 4597 cdev = init_bnx2x_cnic(dev); 4598 if (cdev) { 4599 write_lock(&cnic_dev_lock); 4600 list_add(&cdev->list, &cnic_dev_list); 4601 write_unlock(&cnic_dev_lock); 4602 } 4603 } 4604 return cdev; 4605} 4606 4607/** 4608 * netdev event handler 4609 */ 4610static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 4611 void *ptr) 4612{ 4613 struct net_device *netdev = ptr; 4614 struct cnic_dev *dev; 4615 int if_type; 4616 int new_dev = 0; 4617 4618 dev = cnic_from_netdev(netdev); 4619 4620 if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { 4621 /* Check for the hot-plug device */ 4622 dev = is_cnic_dev(netdev); 4623 if (dev) { 4624 new_dev = 1; 4625 cnic_hold(dev); 4626 } 4627 } 4628 if (dev) { 4629 struct cnic_local *cp = dev->cnic_priv; 4630 4631 if (new_dev) 4632 cnic_ulp_init(dev); 4633 else if (event == NETDEV_UNREGISTER) 4634 cnic_ulp_exit(dev); 4635 4636 if (event == NETDEV_UP) { 4637 if (cnic_register_netdev(dev) != 0) { 4638 cnic_put(dev); 4639 goto done; 4640 } 4641 if (!cnic_start_hw(dev)) 4642 cnic_ulp_start(dev); 4643 } 4644 4645 rcu_read_lock(); 4646 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 4647 struct cnic_ulp_ops *ulp_ops; 4648 void *ctx; 4649 4650 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 4651 if (!ulp_ops || !ulp_ops->indicate_netevent) 4652 continue; 4653 4654 ctx = cp->ulp_handle[if_type]; 4655 4656 ulp_ops->indicate_netevent(ctx, event); 4657 } 4658 rcu_read_unlock(); 4659 4660 if (event == NETDEV_GOING_DOWN) { 4661 cnic_ulp_stop(dev); 4662 cnic_stop_hw(dev); 4663 cnic_unregister_netdev(dev); 4664 } else if (event == NETDEV_UNREGISTER) { 4665 write_lock(&cnic_dev_lock); 4666 list_del_init(&dev->list); 4667 write_unlock(&cnic_dev_lock); 4668 4669 cnic_put(dev); 4670 cnic_free_dev(dev); 4671 goto done; 4672 } 4673 cnic_put(dev); 4674 } 4675done: 4676 return NOTIFY_DONE; 4677} 4678 4679static struct notifier_block cnic_netdev_notifier = { 4680 .notifier_call = cnic_netdev_event 4681}; 4682 4683static void cnic_release(void) 4684{ 4685 struct cnic_dev *dev; 4686 4687 while (!list_empty(&cnic_dev_list)) { 4688 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); 4689 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4690 cnic_ulp_stop(dev); 4691 cnic_stop_hw(dev); 4692 } 4693 4694 cnic_ulp_exit(dev); 4695 cnic_unregister_netdev(dev); 4696 list_del_init(&dev->list); 4697 cnic_free_dev(dev); 4698 } 4699} 4700 4701static int __init cnic_init(void) 4702{ 4703 int rc = 0; 4704 4705 pr_info("%s", version); 4706 4707 rc = register_netdevice_notifier(&cnic_netdev_notifier); 4708 if (rc) { 4709 cnic_release(); 4710 return rc; 4711 } 4712 4713 return 0; 4714} 4715 4716static void __exit cnic_exit(void) 4717{ 4718 unregister_netdevice_notifier(&cnic_netdev_notifier); 4719 cnic_release(); 4720} 4721 4722module_init(cnic_init); 4723module_exit(cnic_exit); 4724