1/* 2 * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/list.h> 34#include <linux/slab.h> 35#include <net/neighbour.h> 36#include <linux/notifier.h> 37#include <asm/atomic.h> 38#include <linux/proc_fs.h> 39#include <linux/if_vlan.h> 40#include <net/netevent.h> 41#include <linux/highmem.h> 42#include <linux/vmalloc.h> 43 44#include "common.h" 45#include "regs.h" 46#include "cxgb3_ioctl.h" 47#include "cxgb3_ctl_defs.h" 48#include "cxgb3_defs.h" 49#include "l2t.h" 50#include "firmware_exports.h" 51#include "cxgb3_offload.h" 52 53static LIST_HEAD(client_list); 54static LIST_HEAD(ofld_dev_list); 55static DEFINE_MUTEX(cxgb3_db_lock); 56 57static DEFINE_RWLOCK(adapter_list_lock); 58static LIST_HEAD(adapter_list); 59 60static const unsigned int MAX_ATIDS = 64 * 1024; 61static const unsigned int ATID_BASE = 0x10000; 62 63static inline int offload_activated(struct t3cdev *tdev) 64{ 65 const struct adapter *adapter = tdev2adap(tdev); 66 67 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)); 68} 69 70/** 71 * cxgb3_register_client - register an offload client 72 * @client: the client 73 * 74 * Add the client to the client list, 75 * and call backs the client for each activated offload device 76 */ 77void cxgb3_register_client(struct cxgb3_client *client) 78{ 79 struct t3cdev *tdev; 80 81 mutex_lock(&cxgb3_db_lock); 82 list_add_tail(&client->client_list, &client_list); 83 84 if (client->add) { 85 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 86 if (offload_activated(tdev)) 87 client->add(tdev); 88 } 89 } 90 mutex_unlock(&cxgb3_db_lock); 91} 92 93EXPORT_SYMBOL(cxgb3_register_client); 94 95/** 96 * cxgb3_unregister_client - unregister an offload client 97 * @client: the client 98 * 99 * Remove the client to the client list, 100 * and call backs the client for each activated offload device. 101 */ 102void cxgb3_unregister_client(struct cxgb3_client *client) 103{ 104 struct t3cdev *tdev; 105 106 mutex_lock(&cxgb3_db_lock); 107 list_del(&client->client_list); 108 109 if (client->remove) { 110 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 111 if (offload_activated(tdev)) 112 client->remove(tdev); 113 } 114 } 115 mutex_unlock(&cxgb3_db_lock); 116} 117 118EXPORT_SYMBOL(cxgb3_unregister_client); 119 120/** 121 * cxgb3_add_clients - activate registered clients for an offload device 122 * @tdev: the offload device 123 * 124 * Call backs all registered clients once a offload device is activated 125 */ 126void cxgb3_add_clients(struct t3cdev *tdev) 127{ 128 struct cxgb3_client *client; 129 130 mutex_lock(&cxgb3_db_lock); 131 list_for_each_entry(client, &client_list, client_list) { 132 if (client->add) 133 client->add(tdev); 134 } 135 mutex_unlock(&cxgb3_db_lock); 136} 137 138/** 139 * cxgb3_remove_clients - deactivates registered clients 140 * for an offload device 141 * @tdev: the offload device 142 * 143 * Call backs all registered clients once a offload device is deactivated 144 */ 145void cxgb3_remove_clients(struct t3cdev *tdev) 146{ 147 struct cxgb3_client *client; 148 149 mutex_lock(&cxgb3_db_lock); 150 list_for_each_entry(client, &client_list, client_list) { 151 if (client->remove) 152 client->remove(tdev); 153 } 154 mutex_unlock(&cxgb3_db_lock); 155} 156 157void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port) 158{ 159 struct cxgb3_client *client; 160 161 mutex_lock(&cxgb3_db_lock); 162 list_for_each_entry(client, &client_list, client_list) { 163 if (client->event_handler) 164 client->event_handler(tdev, event, port); 165 } 166 mutex_unlock(&cxgb3_db_lock); 167} 168 169static struct net_device *get_iff_from_mac(struct adapter *adapter, 170 const unsigned char *mac, 171 unsigned int vlan) 172{ 173 int i; 174 175 for_each_port(adapter, i) { 176 struct vlan_group *grp; 177 struct net_device *dev = adapter->port[i]; 178 const struct port_info *p = netdev_priv(dev); 179 180 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 181 if (vlan && vlan != VLAN_VID_MASK) { 182 grp = p->vlan_grp; 183 dev = NULL; 184 if (grp) 185 dev = vlan_group_get_device(grp, vlan); 186 } else 187 while (dev->master) 188 dev = dev->master; 189 return dev; 190 } 191 } 192 return NULL; 193} 194 195static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, 196 void *data) 197{ 198 int i; 199 int ret = 0; 200 unsigned int val = 0; 201 struct ulp_iscsi_info *uiip = data; 202 203 switch (req) { 204 case ULP_ISCSI_GET_PARAMS: 205 uiip->pdev = adapter->pdev; 206 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); 207 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); 208 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); 209 210 val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ); 211 for (i = 0; i < 4; i++, val >>= 8) 212 uiip->pgsz_factor[i] = val & 0xFF; 213 214 val = t3_read_reg(adapter, A_TP_PARA_REG7); 215 uiip->max_txsz = 216 uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0, 217 (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1); 218 /* 219 * On tx, the iscsi pdu has to be <= tx page size and has to 220 * fit into the Tx PM FIFO. 221 */ 222 val = min(adapter->params.tp.tx_pg_size, 223 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); 224 uiip->max_txsz = min(val, uiip->max_txsz); 225 226 /* set MaxRxData to 16224 */ 227 val = t3_read_reg(adapter, A_TP_PARA_REG2); 228 if ((val >> S_MAXRXDATA) != 0x3f60) { 229 val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE); 230 val |= V_MAXRXDATA(0x3f60); 231 printk(KERN_INFO 232 "%s, iscsi set MaxRxData to 16224 (0x%x).\n", 233 adapter->name, val); 234 t3_write_reg(adapter, A_TP_PARA_REG2, val); 235 } 236 237 /* 238 * on rx, the iscsi pdu has to be < rx page size and the 239 * the max rx data length programmed in TP 240 */ 241 val = min(adapter->params.tp.rx_pg_size, 242 ((t3_read_reg(adapter, A_TP_PARA_REG2)) >> 243 S_MAXRXDATA) & M_MAXRXDATA); 244 uiip->max_rxsz = min(val, uiip->max_rxsz); 245 break; 246 case ULP_ISCSI_SET_PARAMS: 247 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); 248 /* program the ddp page sizes */ 249 for (i = 0; i < 4; i++) 250 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); 251 if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) { 252 printk(KERN_INFO 253 "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n", 254 adapter->name, val, uiip->pgsz_factor[0], 255 uiip->pgsz_factor[1], uiip->pgsz_factor[2], 256 uiip->pgsz_factor[3]); 257 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); 258 } 259 break; 260 default: 261 ret = -EOPNOTSUPP; 262 } 263 return ret; 264} 265 266/* Response queue used for RDMA events. */ 267#define ASYNC_NOTIF_RSPQ 0 268 269static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) 270{ 271 int ret = 0; 272 273 switch (req) { 274 case RDMA_GET_PARAMS: { 275 struct rdma_info *rdma = data; 276 struct pci_dev *pdev = adapter->pdev; 277 278 rdma->udbell_physbase = pci_resource_start(pdev, 2); 279 rdma->udbell_len = pci_resource_len(pdev, 2); 280 rdma->tpt_base = 281 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); 282 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); 283 rdma->pbl_base = 284 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); 285 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); 286 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); 287 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); 288 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; 289 rdma->pdev = pdev; 290 break; 291 } 292 case RDMA_CQ_OP:{ 293 unsigned long flags; 294 struct rdma_cq_op *rdma = data; 295 296 /* may be called in any context */ 297 spin_lock_irqsave(&adapter->sge.reg_lock, flags); 298 ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op, 299 rdma->credits); 300 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); 301 break; 302 } 303 case RDMA_GET_MEM:{ 304 struct ch_mem_range *t = data; 305 struct mc7 *mem; 306 307 if ((t->addr & 7) || (t->len & 7)) 308 return -EINVAL; 309 if (t->mem_id == MEM_CM) 310 mem = &adapter->cm; 311 else if (t->mem_id == MEM_PMRX) 312 mem = &adapter->pmrx; 313 else if (t->mem_id == MEM_PMTX) 314 mem = &adapter->pmtx; 315 else 316 return -EINVAL; 317 318 ret = 319 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, 320 (u64 *) t->buf); 321 if (ret) 322 return ret; 323 break; 324 } 325 case RDMA_CQ_SETUP:{ 326 struct rdma_cq_setup *rdma = data; 327 328 spin_lock_irq(&adapter->sge.reg_lock); 329 ret = 330 t3_sge_init_cqcntxt(adapter, rdma->id, 331 rdma->base_addr, rdma->size, 332 ASYNC_NOTIF_RSPQ, 333 rdma->ovfl_mode, rdma->credits, 334 rdma->credit_thres); 335 spin_unlock_irq(&adapter->sge.reg_lock); 336 break; 337 } 338 case RDMA_CQ_DISABLE: 339 spin_lock_irq(&adapter->sge.reg_lock); 340 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); 341 spin_unlock_irq(&adapter->sge.reg_lock); 342 break; 343 case RDMA_CTRL_QP_SETUP:{ 344 struct rdma_ctrlqp_setup *rdma = data; 345 346 spin_lock_irq(&adapter->sge.reg_lock); 347 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, 348 SGE_CNTXT_RDMA, 349 ASYNC_NOTIF_RSPQ, 350 rdma->base_addr, rdma->size, 351 FW_RI_TID_START, 1, 0); 352 spin_unlock_irq(&adapter->sge.reg_lock); 353 break; 354 } 355 case RDMA_GET_MIB: { 356 spin_lock(&adapter->stats_lock); 357 t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data); 358 spin_unlock(&adapter->stats_lock); 359 break; 360 } 361 default: 362 ret = -EOPNOTSUPP; 363 } 364 return ret; 365} 366 367static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) 368{ 369 struct adapter *adapter = tdev2adap(tdev); 370 struct tid_range *tid; 371 struct mtutab *mtup; 372 struct iff_mac *iffmacp; 373 struct ddp_params *ddpp; 374 struct adap_ports *ports; 375 struct ofld_page_info *rx_page_info; 376 struct tp_params *tp = &adapter->params.tp; 377 int i; 378 379 switch (req) { 380 case GET_MAX_OUTSTANDING_WR: 381 *(unsigned int *)data = FW_WR_NUM; 382 break; 383 case GET_WR_LEN: 384 *(unsigned int *)data = WR_FLITS; 385 break; 386 case GET_TX_MAX_CHUNK: 387 *(unsigned int *)data = 1 << 20; /* 1MB */ 388 break; 389 case GET_TID_RANGE: 390 tid = data; 391 tid->num = t3_mc5_size(&adapter->mc5) - 392 adapter->params.mc5.nroutes - 393 adapter->params.mc5.nfilters - adapter->params.mc5.nservers; 394 tid->base = 0; 395 break; 396 case GET_STID_RANGE: 397 tid = data; 398 tid->num = adapter->params.mc5.nservers; 399 tid->base = t3_mc5_size(&adapter->mc5) - tid->num - 400 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; 401 break; 402 case GET_L2T_CAPACITY: 403 *(unsigned int *)data = 2048; 404 break; 405 case GET_MTUS: 406 mtup = data; 407 mtup->size = NMTUS; 408 mtup->mtus = adapter->params.mtus; 409 break; 410 case GET_IFF_FROM_MAC: 411 iffmacp = data; 412 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, 413 iffmacp->vlan_tag & 414 VLAN_VID_MASK); 415 break; 416 case GET_DDP_PARAMS: 417 ddpp = data; 418 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); 419 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); 420 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); 421 break; 422 case GET_PORTS: 423 ports = data; 424 ports->nports = adapter->params.nports; 425 for_each_port(adapter, i) 426 ports->lldevs[i] = adapter->port[i]; 427 break; 428 case ULP_ISCSI_GET_PARAMS: 429 case ULP_ISCSI_SET_PARAMS: 430 if (!offload_running(adapter)) 431 return -EAGAIN; 432 return cxgb_ulp_iscsi_ctl(adapter, req, data); 433 case RDMA_GET_PARAMS: 434 case RDMA_CQ_OP: 435 case RDMA_CQ_SETUP: 436 case RDMA_CQ_DISABLE: 437 case RDMA_CTRL_QP_SETUP: 438 case RDMA_GET_MEM: 439 case RDMA_GET_MIB: 440 if (!offload_running(adapter)) 441 return -EAGAIN; 442 return cxgb_rdma_ctl(adapter, req, data); 443 case GET_RX_PAGE_INFO: 444 rx_page_info = data; 445 rx_page_info->page_size = tp->rx_pg_size; 446 rx_page_info->num = tp->rx_num_pgs; 447 break; 448 case GET_ISCSI_IPV4ADDR: { 449 struct iscsi_ipv4addr *p = data; 450 struct port_info *pi = netdev_priv(p->dev); 451 p->ipv4addr = pi->iscsi_ipv4addr; 452 break; 453 } 454 case GET_EMBEDDED_INFO: { 455 struct ch_embedded_info *e = data; 456 457 spin_lock(&adapter->stats_lock); 458 t3_get_fw_version(adapter, &e->fw_vers); 459 t3_get_tp_version(adapter, &e->tp_vers); 460 spin_unlock(&adapter->stats_lock); 461 break; 462 } 463 default: 464 return -EOPNOTSUPP; 465 } 466 return 0; 467} 468 469/* 470 * Dummy handler for Rx offload packets in case we get an offload packet before 471 * proper processing is setup. This complains and drops the packet as it isn't 472 * normal to get offload packets at this stage. 473 */ 474static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, 475 int n) 476{ 477 while (n--) 478 dev_kfree_skb_any(skbs[n]); 479 return 0; 480} 481 482static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) 483{ 484} 485 486void cxgb3_set_dummy_ops(struct t3cdev *dev) 487{ 488 dev->recv = rx_offload_blackhole; 489 dev->neigh_update = dummy_neigh_update; 490} 491 492/* 493 * Free an active-open TID. 494 */ 495void *cxgb3_free_atid(struct t3cdev *tdev, int atid) 496{ 497 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 498 union active_open_entry *p = atid2entry(t, atid); 499 void *ctx = p->t3c_tid.ctx; 500 501 spin_lock_bh(&t->atid_lock); 502 p->next = t->afree; 503 t->afree = p; 504 t->atids_in_use--; 505 spin_unlock_bh(&t->atid_lock); 506 507 return ctx; 508} 509 510EXPORT_SYMBOL(cxgb3_free_atid); 511 512/* 513 * Free a server TID and return it to the free pool. 514 */ 515void cxgb3_free_stid(struct t3cdev *tdev, int stid) 516{ 517 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 518 union listen_entry *p = stid2entry(t, stid); 519 520 spin_lock_bh(&t->stid_lock); 521 p->next = t->sfree; 522 t->sfree = p; 523 t->stids_in_use--; 524 spin_unlock_bh(&t->stid_lock); 525} 526 527EXPORT_SYMBOL(cxgb3_free_stid); 528 529void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, 530 void *ctx, unsigned int tid) 531{ 532 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 533 534 t->tid_tab[tid].client = client; 535 t->tid_tab[tid].ctx = ctx; 536 atomic_inc(&t->tids_in_use); 537} 538 539EXPORT_SYMBOL(cxgb3_insert_tid); 540 541/* 542 * Populate a TID_RELEASE WR. The skb must be already propely sized. 543 */ 544static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) 545{ 546 struct cpl_tid_release *req; 547 548 skb->priority = CPL_PRIORITY_SETUP; 549 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 550 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 551 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 552} 553 554static void t3_process_tid_release_list(struct work_struct *work) 555{ 556 struct t3c_data *td = container_of(work, struct t3c_data, 557 tid_release_task); 558 struct sk_buff *skb; 559 struct t3cdev *tdev = td->dev; 560 561 562 spin_lock_bh(&td->tid_release_lock); 563 while (td->tid_release_list) { 564 struct t3c_tid_entry *p = td->tid_release_list; 565 566 td->tid_release_list = (struct t3c_tid_entry *)p->ctx; 567 spin_unlock_bh(&td->tid_release_lock); 568 569 skb = alloc_skb(sizeof(struct cpl_tid_release), 570 GFP_KERNEL); 571 if (!skb) 572 skb = td->nofail_skb; 573 if (!skb) { 574 spin_lock_bh(&td->tid_release_lock); 575 p->ctx = (void *)td->tid_release_list; 576 td->tid_release_list = (struct t3c_tid_entry *)p; 577 break; 578 } 579 mk_tid_release(skb, p - td->tid_maps.tid_tab); 580 cxgb3_ofld_send(tdev, skb); 581 p->ctx = NULL; 582 if (skb == td->nofail_skb) 583 td->nofail_skb = 584 alloc_skb(sizeof(struct cpl_tid_release), 585 GFP_KERNEL); 586 spin_lock_bh(&td->tid_release_lock); 587 } 588 td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1; 589 spin_unlock_bh(&td->tid_release_lock); 590 591 if (!td->nofail_skb) 592 td->nofail_skb = 593 alloc_skb(sizeof(struct cpl_tid_release), 594 GFP_KERNEL); 595} 596 597/* use ctx as a next pointer in the tid release list */ 598void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) 599{ 600 struct t3c_data *td = T3C_DATA(tdev); 601 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; 602 603 spin_lock_bh(&td->tid_release_lock); 604 p->ctx = (void *)td->tid_release_list; 605 p->client = NULL; 606 td->tid_release_list = p; 607 if (!p->ctx || td->release_list_incomplete) 608 schedule_work(&td->tid_release_task); 609 spin_unlock_bh(&td->tid_release_lock); 610} 611 612EXPORT_SYMBOL(cxgb3_queue_tid_release); 613 614/* 615 * Remove a tid from the TID table. A client may defer processing its last 616 * CPL message if it is locked at the time it arrives, and while the message 617 * sits in the client's backlog the TID may be reused for another connection. 618 * To handle this we atomically switch the TID association if it still points 619 * to the original client context. 620 */ 621void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) 622{ 623 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 624 625 BUG_ON(tid >= t->ntids); 626 if (tdev->type == T3A) 627 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); 628 else { 629 struct sk_buff *skb; 630 631 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 632 if (likely(skb)) { 633 mk_tid_release(skb, tid); 634 cxgb3_ofld_send(tdev, skb); 635 t->tid_tab[tid].ctx = NULL; 636 } else 637 cxgb3_queue_tid_release(tdev, tid); 638 } 639 atomic_dec(&t->tids_in_use); 640} 641 642EXPORT_SYMBOL(cxgb3_remove_tid); 643 644int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, 645 void *ctx) 646{ 647 int atid = -1; 648 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 649 650 spin_lock_bh(&t->atid_lock); 651 if (t->afree && 652 t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= 653 t->ntids) { 654 union active_open_entry *p = t->afree; 655 656 atid = (p - t->atid_tab) + t->atid_base; 657 t->afree = p->next; 658 p->t3c_tid.ctx = ctx; 659 p->t3c_tid.client = client; 660 t->atids_in_use++; 661 } 662 spin_unlock_bh(&t->atid_lock); 663 return atid; 664} 665 666EXPORT_SYMBOL(cxgb3_alloc_atid); 667 668int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, 669 void *ctx) 670{ 671 int stid = -1; 672 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 673 674 spin_lock_bh(&t->stid_lock); 675 if (t->sfree) { 676 union listen_entry *p = t->sfree; 677 678 stid = (p - t->stid_tab) + t->stid_base; 679 t->sfree = p->next; 680 p->t3c_tid.ctx = ctx; 681 p->t3c_tid.client = client; 682 t->stids_in_use++; 683 } 684 spin_unlock_bh(&t->stid_lock); 685 return stid; 686} 687 688EXPORT_SYMBOL(cxgb3_alloc_stid); 689 690/* Get the t3cdev associated with a net_device */ 691struct t3cdev *dev2t3cdev(struct net_device *dev) 692{ 693 const struct port_info *pi = netdev_priv(dev); 694 695 return (struct t3cdev *)pi->adapter; 696} 697 698EXPORT_SYMBOL(dev2t3cdev); 699 700static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 701{ 702 struct cpl_smt_write_rpl *rpl = cplhdr(skb); 703 704 if (rpl->status != CPL_ERR_NONE) 705 printk(KERN_ERR 706 "Unexpected SMT_WRITE_RPL status %u for entry %u\n", 707 rpl->status, GET_TID(rpl)); 708 709 return CPL_RET_BUF_DONE; 710} 711 712static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 713{ 714 struct cpl_l2t_write_rpl *rpl = cplhdr(skb); 715 716 if (rpl->status != CPL_ERR_NONE) 717 printk(KERN_ERR 718 "Unexpected L2T_WRITE_RPL status %u for entry %u\n", 719 rpl->status, GET_TID(rpl)); 720 721 return CPL_RET_BUF_DONE; 722} 723 724static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 725{ 726 struct cpl_rte_write_rpl *rpl = cplhdr(skb); 727 728 if (rpl->status != CPL_ERR_NONE) 729 printk(KERN_ERR 730 "Unexpected RTE_WRITE_RPL status %u for entry %u\n", 731 rpl->status, GET_TID(rpl)); 732 733 return CPL_RET_BUF_DONE; 734} 735 736static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) 737{ 738 struct cpl_act_open_rpl *rpl = cplhdr(skb); 739 unsigned int atid = G_TID(ntohl(rpl->atid)); 740 struct t3c_tid_entry *t3c_tid; 741 742 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 743 if (t3c_tid && t3c_tid->ctx && t3c_tid->client && 744 t3c_tid->client->handlers && 745 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 746 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 747 t3c_tid-> 748 ctx); 749 } else { 750 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 751 dev->name, CPL_ACT_OPEN_RPL); 752 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 753 } 754} 755 756static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) 757{ 758 union opcode_tid *p = cplhdr(skb); 759 unsigned int stid = G_TID(ntohl(p->opcode_tid)); 760 struct t3c_tid_entry *t3c_tid; 761 762 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 763 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 764 t3c_tid->client->handlers[p->opcode]) { 765 return t3c_tid->client->handlers[p->opcode] (dev, skb, 766 t3c_tid->ctx); 767 } else { 768 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 769 dev->name, p->opcode); 770 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 771 } 772} 773 774static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) 775{ 776 union opcode_tid *p = cplhdr(skb); 777 unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 778 struct t3c_tid_entry *t3c_tid; 779 780 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 781 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 782 t3c_tid->client->handlers[p->opcode]) { 783 return t3c_tid->client->handlers[p->opcode] 784 (dev, skb, t3c_tid->ctx); 785 } else { 786 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 787 dev->name, p->opcode); 788 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 789 } 790} 791 792static int do_cr(struct t3cdev *dev, struct sk_buff *skb) 793{ 794 struct cpl_pass_accept_req *req = cplhdr(skb); 795 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 796 struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 797 struct t3c_tid_entry *t3c_tid; 798 unsigned int tid = GET_TID(req); 799 800 if (unlikely(tid >= t->ntids)) { 801 printk("%s: passive open TID %u too large\n", 802 dev->name, tid); 803 t3_fatal_err(tdev2adap(dev)); 804 return CPL_RET_BUF_DONE; 805 } 806 807 t3c_tid = lookup_stid(t, stid); 808 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 809 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { 810 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] 811 (dev, skb, t3c_tid->ctx); 812 } else { 813 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 814 dev->name, CPL_PASS_ACCEPT_REQ); 815 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 816 } 817} 818 819/* 820 * Returns an sk_buff for a reply CPL message of size len. If the input 821 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer 822 * is allocated. The input skb must be of size at least len. Note that this 823 * operation does not destroy the original skb data even if it decides to reuse 824 * the buffer. 825 */ 826static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, 827 gfp_t gfp) 828{ 829 if (likely(!skb_cloned(skb))) { 830 BUG_ON(skb->len < len); 831 __skb_trim(skb, len); 832 skb_get(skb); 833 } else { 834 skb = alloc_skb(len, gfp); 835 if (skb) 836 __skb_put(skb, len); 837 } 838 return skb; 839} 840 841static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 842{ 843 union opcode_tid *p = cplhdr(skb); 844 unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 845 struct t3c_tid_entry *t3c_tid; 846 847 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 848 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 849 t3c_tid->client->handlers[p->opcode]) { 850 return t3c_tid->client->handlers[p->opcode] 851 (dev, skb, t3c_tid->ctx); 852 } else { 853 struct cpl_abort_req_rss *req = cplhdr(skb); 854 struct cpl_abort_rpl *rpl; 855 struct sk_buff *reply_skb; 856 unsigned int tid = GET_TID(req); 857 u8 cmd = req->status; 858 859 if (req->status == CPL_ERR_RTX_NEG_ADVICE || 860 req->status == CPL_ERR_PERSIST_NEG_ADVICE) 861 goto out; 862 863 reply_skb = cxgb3_get_cpl_reply_skb(skb, 864 sizeof(struct 865 cpl_abort_rpl), 866 GFP_ATOMIC); 867 868 if (!reply_skb) { 869 printk("do_abort_req_rss: couldn't get skb!\n"); 870 goto out; 871 } 872 reply_skb->priority = CPL_PRIORITY_DATA; 873 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); 874 rpl = cplhdr(reply_skb); 875 rpl->wr.wr_hi = 876 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 877 rpl->wr.wr_lo = htonl(V_WR_TID(tid)); 878 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); 879 rpl->cmd = cmd; 880 cxgb3_ofld_send(dev, reply_skb); 881out: 882 return CPL_RET_BUF_DONE; 883 } 884} 885 886static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) 887{ 888 struct cpl_act_establish *req = cplhdr(skb); 889 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 890 struct tid_info *t = &(T3C_DATA(dev))->tid_maps; 891 struct t3c_tid_entry *t3c_tid; 892 unsigned int tid = GET_TID(req); 893 894 if (unlikely(tid >= t->ntids)) { 895 printk("%s: active establish TID %u too large\n", 896 dev->name, tid); 897 t3_fatal_err(tdev2adap(dev)); 898 return CPL_RET_BUF_DONE; 899 } 900 901 t3c_tid = lookup_atid(t, atid); 902 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 903 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 904 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 905 (dev, skb, t3c_tid->ctx); 906 } else { 907 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 908 dev->name, CPL_ACT_ESTABLISH); 909 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 910 } 911} 912 913static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 914{ 915 struct cpl_trace_pkt *p = cplhdr(skb); 916 917 skb->protocol = htons(0xffff); 918 skb->dev = dev->lldev; 919 skb_pull(skb, sizeof(*p)); 920 skb_reset_mac_header(skb); 921 netif_receive_skb(skb); 922 return 0; 923} 924 925/* 926 * That skb would better have come from process_responses() where we abuse 927 * ->priority and ->csum to carry our data. NB: if we get to per-arch 928 * ->csum, the things might get really interesting here. 929 */ 930 931static inline u32 get_hwtid(struct sk_buff *skb) 932{ 933 return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff; 934} 935 936static inline u32 get_opcode(struct sk_buff *skb) 937{ 938 return G_OPCODE(ntohl((__force __be32)skb->csum)); 939} 940 941static int do_term(struct t3cdev *dev, struct sk_buff *skb) 942{ 943 unsigned int hwtid = get_hwtid(skb); 944 unsigned int opcode = get_opcode(skb); 945 struct t3c_tid_entry *t3c_tid; 946 947 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 948 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 949 t3c_tid->client->handlers[opcode]) { 950 return t3c_tid->client->handlers[opcode] (dev, skb, 951 t3c_tid->ctx); 952 } else { 953 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 954 dev->name, opcode); 955 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 956 } 957} 958 959static int nb_callback(struct notifier_block *self, unsigned long event, 960 void *ctx) 961{ 962 switch (event) { 963 case (NETEVENT_NEIGH_UPDATE):{ 964 cxgb_neigh_update((struct neighbour *)ctx); 965 break; 966 } 967 case (NETEVENT_PMTU_UPDATE): 968 break; 969 case (NETEVENT_REDIRECT):{ 970 struct netevent_redirect *nr = ctx; 971 cxgb_redirect(nr->old, nr->new); 972 cxgb_neigh_update(nr->new->neighbour); 973 break; 974 } 975 default: 976 break; 977 } 978 return 0; 979} 980 981static struct notifier_block nb = { 982 .notifier_call = nb_callback 983}; 984 985/* 986 * Process a received packet with an unknown/unexpected CPL opcode. 987 */ 988static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) 989{ 990 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, 991 *skb->data); 992 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 993} 994 995/* 996 * Handlers for each CPL opcode 997 */ 998static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; 999 1000/* 1001 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied 1002 * to unregister an existing handler. 1003 */ 1004void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) 1005{ 1006 if (opcode < NUM_CPL_CMDS) 1007 cpl_handlers[opcode] = h ? h : do_bad_cpl; 1008 else 1009 printk(KERN_ERR "T3C: handler registration for " 1010 "opcode %x failed\n", opcode); 1011} 1012 1013EXPORT_SYMBOL(t3_register_cpl_handler); 1014 1015/* 1016 * T3CDEV's receive method. 1017 */ 1018int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) 1019{ 1020 while (n--) { 1021 struct sk_buff *skb = *skbs++; 1022 unsigned int opcode = get_opcode(skb); 1023 int ret = cpl_handlers[opcode] (dev, skb); 1024 1025#if VALIDATE_TID 1026 if (ret & CPL_RET_UNKNOWN_TID) { 1027 union opcode_tid *p = cplhdr(skb); 1028 1029 printk(KERN_ERR "%s: CPL message (opcode %u) had " 1030 "unknown TID %u\n", dev->name, opcode, 1031 G_TID(ntohl(p->opcode_tid))); 1032 } 1033#endif 1034 if (ret & CPL_RET_BUF_DONE) 1035 kfree_skb(skb); 1036 } 1037 return 0; 1038} 1039 1040/* 1041 * Sends an sk_buff to a T3C driver after dealing with any active network taps. 1042 */ 1043int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) 1044{ 1045 int r; 1046 1047 local_bh_disable(); 1048 r = dev->send(dev, skb); 1049 local_bh_enable(); 1050 return r; 1051} 1052 1053EXPORT_SYMBOL(cxgb3_ofld_send); 1054 1055static int is_offloading(struct net_device *dev) 1056{ 1057 struct adapter *adapter; 1058 int i; 1059 1060 read_lock_bh(&adapter_list_lock); 1061 list_for_each_entry(adapter, &adapter_list, adapter_list) { 1062 for_each_port(adapter, i) { 1063 if (dev == adapter->port[i]) { 1064 read_unlock_bh(&adapter_list_lock); 1065 return 1; 1066 } 1067 } 1068 } 1069 read_unlock_bh(&adapter_list_lock); 1070 return 0; 1071} 1072 1073void cxgb_neigh_update(struct neighbour *neigh) 1074{ 1075 struct net_device *dev = neigh->dev; 1076 1077 if (dev && (is_offloading(dev))) { 1078 struct t3cdev *tdev = dev2t3cdev(dev); 1079 1080 BUG_ON(!tdev); 1081 t3_l2t_update(tdev, neigh); 1082 } 1083} 1084 1085static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) 1086{ 1087 struct sk_buff *skb; 1088 struct cpl_set_tcb_field *req; 1089 1090 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1091 if (!skb) { 1092 printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); 1093 return; 1094 } 1095 skb->priority = CPL_PRIORITY_CONTROL; 1096 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); 1097 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1098 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1099 req->reply = 0; 1100 req->cpu_idx = 0; 1101 req->word = htons(W_TCB_L2T_IX); 1102 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); 1103 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); 1104 tdev->send(tdev, skb); 1105} 1106 1107void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) 1108{ 1109 struct net_device *olddev, *newdev; 1110 struct tid_info *ti; 1111 struct t3cdev *tdev; 1112 u32 tid; 1113 int update_tcb; 1114 struct l2t_entry *e; 1115 struct t3c_tid_entry *te; 1116 1117 olddev = old->neighbour->dev; 1118 newdev = new->neighbour->dev; 1119 if (!is_offloading(olddev)) 1120 return; 1121 if (!is_offloading(newdev)) { 1122 printk(KERN_WARNING "%s: Redirect to non-offload " 1123 "device ignored.\n", __func__); 1124 return; 1125 } 1126 tdev = dev2t3cdev(olddev); 1127 BUG_ON(!tdev); 1128 if (tdev != dev2t3cdev(newdev)) { 1129 printk(KERN_WARNING "%s: Redirect to different " 1130 "offload device ignored.\n", __func__); 1131 return; 1132 } 1133 1134 /* Add new L2T entry */ 1135 e = t3_l2t_get(tdev, new->neighbour, newdev); 1136 if (!e) { 1137 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1138 __func__); 1139 return; 1140 } 1141 1142 /* Walk tid table and notify clients of dst change. */ 1143 ti = &(T3C_DATA(tdev))->tid_maps; 1144 for (tid = 0; tid < ti->ntids; tid++) { 1145 te = lookup_tid(ti, tid); 1146 BUG_ON(!te); 1147 if (te && te->ctx && te->client && te->client->redirect) { 1148 update_tcb = te->client->redirect(te->ctx, old, new, e); 1149 if (update_tcb) { 1150 l2t_hold(L2DATA(tdev), e); 1151 set_l2t_ix(tdev, tid, e); 1152 } 1153 } 1154 } 1155 l2t_release(L2DATA(tdev), e); 1156} 1157 1158/* 1159 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1160 * The allocated memory is cleared. 1161 */ 1162void *cxgb_alloc_mem(unsigned long size) 1163{ 1164 void *p = kmalloc(size, GFP_KERNEL); 1165 1166 if (!p) 1167 p = vmalloc(size); 1168 if (p) 1169 memset(p, 0, size); 1170 return p; 1171} 1172 1173/* 1174 * Free memory allocated through t3_alloc_mem(). 1175 */ 1176void cxgb_free_mem(void *addr) 1177{ 1178 if (is_vmalloc_addr(addr)) 1179 vfree(addr); 1180 else 1181 kfree(addr); 1182} 1183 1184/* 1185 * Allocate and initialize the TID tables. Returns 0 on success. 1186 */ 1187static int init_tid_tabs(struct tid_info *t, unsigned int ntids, 1188 unsigned int natids, unsigned int nstids, 1189 unsigned int atid_base, unsigned int stid_base) 1190{ 1191 unsigned long size = ntids * sizeof(*t->tid_tab) + 1192 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); 1193 1194 t->tid_tab = cxgb_alloc_mem(size); 1195 if (!t->tid_tab) 1196 return -ENOMEM; 1197 1198 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; 1199 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; 1200 t->ntids = ntids; 1201 t->nstids = nstids; 1202 t->stid_base = stid_base; 1203 t->sfree = NULL; 1204 t->natids = natids; 1205 t->atid_base = atid_base; 1206 t->afree = NULL; 1207 t->stids_in_use = t->atids_in_use = 0; 1208 atomic_set(&t->tids_in_use, 0); 1209 spin_lock_init(&t->stid_lock); 1210 spin_lock_init(&t->atid_lock); 1211 1212 /* 1213 * Setup the free lists for stid_tab and atid_tab. 1214 */ 1215 if (nstids) { 1216 while (--nstids) 1217 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; 1218 t->sfree = t->stid_tab; 1219 } 1220 if (natids) { 1221 while (--natids) 1222 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 1223 t->afree = t->atid_tab; 1224 } 1225 return 0; 1226} 1227 1228static void free_tid_maps(struct tid_info *t) 1229{ 1230 cxgb_free_mem(t->tid_tab); 1231} 1232 1233static inline void add_adapter(struct adapter *adap) 1234{ 1235 write_lock_bh(&adapter_list_lock); 1236 list_add_tail(&adap->adapter_list, &adapter_list); 1237 write_unlock_bh(&adapter_list_lock); 1238} 1239 1240static inline void remove_adapter(struct adapter *adap) 1241{ 1242 write_lock_bh(&adapter_list_lock); 1243 list_del(&adap->adapter_list); 1244 write_unlock_bh(&adapter_list_lock); 1245} 1246 1247int cxgb3_offload_activate(struct adapter *adapter) 1248{ 1249 struct t3cdev *dev = &adapter->tdev; 1250 int natids, err; 1251 struct t3c_data *t; 1252 struct tid_range stid_range, tid_range; 1253 struct mtutab mtutab; 1254 unsigned int l2t_capacity; 1255 1256 t = kzalloc(sizeof(*t), GFP_KERNEL); 1257 if (!t) 1258 return -ENOMEM; 1259 1260 err = -EOPNOTSUPP; 1261 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || 1262 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || 1263 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || 1264 dev->ctl(dev, GET_MTUS, &mtutab) < 0 || 1265 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || 1266 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) 1267 goto out_free; 1268 1269 err = -ENOMEM; 1270 L2DATA(dev) = t3_init_l2t(l2t_capacity); 1271 if (!L2DATA(dev)) 1272 goto out_free; 1273 1274 natids = min(tid_range.num / 2, MAX_ATIDS); 1275 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, 1276 stid_range.num, ATID_BASE, stid_range.base); 1277 if (err) 1278 goto out_free_l2t; 1279 1280 t->mtus = mtutab.mtus; 1281 t->nmtus = mtutab.size; 1282 1283 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); 1284 spin_lock_init(&t->tid_release_lock); 1285 INIT_LIST_HEAD(&t->list_node); 1286 t->dev = dev; 1287 1288 T3C_DATA(dev) = t; 1289 dev->recv = process_rx; 1290 dev->neigh_update = t3_l2t_update; 1291 1292 /* Register netevent handler once */ 1293 if (list_empty(&adapter_list)) 1294 register_netevent_notifier(&nb); 1295 1296 t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL); 1297 t->release_list_incomplete = 0; 1298 1299 add_adapter(adapter); 1300 return 0; 1301 1302out_free_l2t: 1303 t3_free_l2t(L2DATA(dev)); 1304 L2DATA(dev) = NULL; 1305out_free: 1306 kfree(t); 1307 return err; 1308} 1309 1310void cxgb3_offload_deactivate(struct adapter *adapter) 1311{ 1312 struct t3cdev *tdev = &adapter->tdev; 1313 struct t3c_data *t = T3C_DATA(tdev); 1314 1315 remove_adapter(adapter); 1316 if (list_empty(&adapter_list)) 1317 unregister_netevent_notifier(&nb); 1318 1319 free_tid_maps(&t->tid_maps); 1320 T3C_DATA(tdev) = NULL; 1321 t3_free_l2t(L2DATA(tdev)); 1322 L2DATA(tdev) = NULL; 1323 if (t->nofail_skb) 1324 kfree_skb(t->nofail_skb); 1325 kfree(t); 1326} 1327 1328static inline void register_tdev(struct t3cdev *tdev) 1329{ 1330 static int unit; 1331 1332 mutex_lock(&cxgb3_db_lock); 1333 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); 1334 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); 1335 mutex_unlock(&cxgb3_db_lock); 1336} 1337 1338static inline void unregister_tdev(struct t3cdev *tdev) 1339{ 1340 mutex_lock(&cxgb3_db_lock); 1341 list_del(&tdev->ofld_dev_list); 1342 mutex_unlock(&cxgb3_db_lock); 1343} 1344 1345static inline int adap2type(struct adapter *adapter) 1346{ 1347 int type = 0; 1348 1349 switch (adapter->params.rev) { 1350 case T3_REV_A: 1351 type = T3A; 1352 break; 1353 case T3_REV_B: 1354 case T3_REV_B2: 1355 type = T3B; 1356 break; 1357 case T3_REV_C: 1358 type = T3C; 1359 break; 1360 } 1361 return type; 1362} 1363 1364void __devinit cxgb3_adapter_ofld(struct adapter *adapter) 1365{ 1366 struct t3cdev *tdev = &adapter->tdev; 1367 1368 INIT_LIST_HEAD(&tdev->ofld_dev_list); 1369 1370 cxgb3_set_dummy_ops(tdev); 1371 tdev->send = t3_offload_tx; 1372 tdev->ctl = cxgb_offload_ctl; 1373 tdev->type = adap2type(adapter); 1374 1375 register_tdev(tdev); 1376} 1377 1378void __devexit cxgb3_adapter_unofld(struct adapter *adapter) 1379{ 1380 struct t3cdev *tdev = &adapter->tdev; 1381 1382 tdev->recv = NULL; 1383 tdev->neigh_update = NULL; 1384 1385 unregister_tdev(tdev); 1386} 1387 1388void __init cxgb3_offload_init(void) 1389{ 1390 int i; 1391 1392 for (i = 0; i < NUM_CPL_CMDS; ++i) 1393 cpl_handlers[i] = do_bad_cpl; 1394 1395 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); 1396 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); 1397 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl); 1398 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); 1399 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); 1400 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); 1401 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); 1402 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); 1403 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); 1404 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); 1405 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); 1406 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); 1407 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); 1408 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); 1409 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); 1410 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1411 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1412 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1413 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); 1414 t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); 1415 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1416 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1417 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); 1418 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); 1419 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); 1420 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); 1421} 1422