1/* 2 * Copyright (c) 2006-2007 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/list.h> 34#include <net/neighbour.h> 35#include <linux/notifier.h> 36#include <asm/atomic.h> 37#include <linux/proc_fs.h> 38#include <linux/if_vlan.h> 39#include <net/netevent.h> 40#include <linux/highmem.h> 41#include <linux/vmalloc.h> 42 43#include "common.h" 44#include "regs.h" 45#include "cxgb3_ioctl.h" 46#include "cxgb3_ctl_defs.h" 47#include "cxgb3_defs.h" 48#include "l2t.h" 49#include "firmware_exports.h" 50#include "cxgb3_offload.h" 51 52static LIST_HEAD(client_list); 53static LIST_HEAD(ofld_dev_list); 54static DEFINE_MUTEX(cxgb3_db_lock); 55 56static DEFINE_RWLOCK(adapter_list_lock); 57static LIST_HEAD(adapter_list); 58 59static const unsigned int MAX_ATIDS = 64 * 1024; 60static const unsigned int ATID_BASE = 0x100000; 61 62static inline int offload_activated(struct t3cdev *tdev) 63{ 64 const struct adapter *adapter = tdev2adap(tdev); 65 66 return (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)); 67} 68 69/** 70 * cxgb3_register_client - register an offload client 71 * @client: the client 72 * 73 * Add the client to the client list, 74 * and call backs the client for each activated offload device 75 */ 76void cxgb3_register_client(struct cxgb3_client *client) 77{ 78 struct t3cdev *tdev; 79 80 mutex_lock(&cxgb3_db_lock); 81 list_add_tail(&client->client_list, &client_list); 82 83 if (client->add) { 84 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 85 if (offload_activated(tdev)) 86 client->add(tdev); 87 } 88 } 89 mutex_unlock(&cxgb3_db_lock); 90} 91 92EXPORT_SYMBOL(cxgb3_register_client); 93 94/** 95 * cxgb3_unregister_client - unregister an offload client 96 * @client: the client 97 * 98 * Remove the client to the client list, 99 * and call backs the client for each activated offload device. 100 */ 101void cxgb3_unregister_client(struct cxgb3_client *client) 102{ 103 struct t3cdev *tdev; 104 105 mutex_lock(&cxgb3_db_lock); 106 list_del(&client->client_list); 107 108 if (client->remove) { 109 list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) { 110 if (offload_activated(tdev)) 111 client->remove(tdev); 112 } 113 } 114 mutex_unlock(&cxgb3_db_lock); 115} 116 117EXPORT_SYMBOL(cxgb3_unregister_client); 118 119/** 120 * cxgb3_add_clients - activate registered clients for an offload device 121 * @tdev: the offload device 122 * 123 * Call backs all registered clients once a offload device is activated 124 */ 125void cxgb3_add_clients(struct t3cdev *tdev) 126{ 127 struct cxgb3_client *client; 128 129 mutex_lock(&cxgb3_db_lock); 130 list_for_each_entry(client, &client_list, client_list) { 131 if (client->add) 132 client->add(tdev); 133 } 134 mutex_unlock(&cxgb3_db_lock); 135} 136 137/** 138 * cxgb3_remove_clients - deactivates registered clients 139 * for an offload device 140 * @tdev: the offload device 141 * 142 * Call backs all registered clients once a offload device is deactivated 143 */ 144void cxgb3_remove_clients(struct t3cdev *tdev) 145{ 146 struct cxgb3_client *client; 147 148 mutex_lock(&cxgb3_db_lock); 149 list_for_each_entry(client, &client_list, client_list) { 150 if (client->remove) 151 client->remove(tdev); 152 } 153 mutex_unlock(&cxgb3_db_lock); 154} 155 156static struct net_device *get_iff_from_mac(struct adapter *adapter, 157 const unsigned char *mac, 158 unsigned int vlan) 159{ 160 int i; 161 162 for_each_port(adapter, i) { 163 struct vlan_group *grp; 164 struct net_device *dev = adapter->port[i]; 165 const struct port_info *p = netdev_priv(dev); 166 167 if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) { 168 if (vlan && vlan != VLAN_VID_MASK) { 169 grp = p->vlan_grp; 170 dev = NULL; 171 if (grp) 172 dev = vlan_group_get_device(grp, vlan); 173 } else 174 while (dev->master) 175 dev = dev->master; 176 return dev; 177 } 178 } 179 return NULL; 180} 181 182static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, 183 void *data) 184{ 185 int ret = 0; 186 struct ulp_iscsi_info *uiip = data; 187 188 switch (req) { 189 case ULP_ISCSI_GET_PARAMS: 190 uiip->pdev = adapter->pdev; 191 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); 192 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); 193 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); 194 /* 195 * On tx, the iscsi pdu has to be <= tx page size and has to 196 * fit into the Tx PM FIFO. 197 */ 198 uiip->max_txsz = min(adapter->params.tp.tx_pg_size, 199 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); 200 /* on rx, the iscsi pdu has to be < rx page size and the 201 whole pdu + cpl headers has to fit into one sge buffer */ 202 uiip->max_rxsz = min_t(unsigned int, 203 adapter->params.tp.rx_pg_size, 204 (adapter->sge.qs[0].fl[1].buf_size - 205 sizeof(struct cpl_rx_data) * 2 - 206 sizeof(struct cpl_rx_data_ddp))); 207 break; 208 case ULP_ISCSI_SET_PARAMS: 209 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); 210 break; 211 default: 212 ret = -EOPNOTSUPP; 213 } 214 return ret; 215} 216 217/* Response queue used for RDMA events. */ 218#define ASYNC_NOTIF_RSPQ 0 219 220static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data) 221{ 222 int ret = 0; 223 224 switch (req) { 225 case RDMA_GET_PARAMS:{ 226 struct rdma_info *req = data; 227 struct pci_dev *pdev = adapter->pdev; 228 229 req->udbell_physbase = pci_resource_start(pdev, 2); 230 req->udbell_len = pci_resource_len(pdev, 2); 231 req->tpt_base = 232 t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT); 233 req->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); 234 req->pbl_base = 235 t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT); 236 req->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); 237 req->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); 238 req->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); 239 req->kdb_addr = adapter->regs + A_SG_KDOORBELL; 240 req->pdev = pdev; 241 break; 242 } 243 case RDMA_CQ_OP:{ 244 unsigned long flags; 245 struct rdma_cq_op *req = data; 246 247 /* may be called in any context */ 248 spin_lock_irqsave(&adapter->sge.reg_lock, flags); 249 ret = t3_sge_cqcntxt_op(adapter, req->id, req->op, 250 req->credits); 251 spin_unlock_irqrestore(&adapter->sge.reg_lock, flags); 252 break; 253 } 254 case RDMA_GET_MEM:{ 255 struct ch_mem_range *t = data; 256 struct mc7 *mem; 257 258 if ((t->addr & 7) || (t->len & 7)) 259 return -EINVAL; 260 if (t->mem_id == MEM_CM) 261 mem = &adapter->cm; 262 else if (t->mem_id == MEM_PMRX) 263 mem = &adapter->pmrx; 264 else if (t->mem_id == MEM_PMTX) 265 mem = &adapter->pmtx; 266 else 267 return -EINVAL; 268 269 ret = 270 t3_mc7_bd_read(mem, t->addr / 8, t->len / 8, 271 (u64 *) t->buf); 272 if (ret) 273 return ret; 274 break; 275 } 276 case RDMA_CQ_SETUP:{ 277 struct rdma_cq_setup *req = data; 278 279 spin_lock_irq(&adapter->sge.reg_lock); 280 ret = 281 t3_sge_init_cqcntxt(adapter, req->id, 282 req->base_addr, req->size, 283 ASYNC_NOTIF_RSPQ, 284 req->ovfl_mode, req->credits, 285 req->credit_thres); 286 spin_unlock_irq(&adapter->sge.reg_lock); 287 break; 288 } 289 case RDMA_CQ_DISABLE: 290 spin_lock_irq(&adapter->sge.reg_lock); 291 ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data); 292 spin_unlock_irq(&adapter->sge.reg_lock); 293 break; 294 case RDMA_CTRL_QP_SETUP:{ 295 struct rdma_ctrlqp_setup *req = data; 296 297 spin_lock_irq(&adapter->sge.reg_lock); 298 ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0, 299 SGE_CNTXT_RDMA, 300 ASYNC_NOTIF_RSPQ, 301 req->base_addr, req->size, 302 FW_RI_TID_START, 1, 0); 303 spin_unlock_irq(&adapter->sge.reg_lock); 304 break; 305 } 306 default: 307 ret = -EOPNOTSUPP; 308 } 309 return ret; 310} 311 312static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data) 313{ 314 struct adapter *adapter = tdev2adap(tdev); 315 struct tid_range *tid; 316 struct mtutab *mtup; 317 struct iff_mac *iffmacp; 318 struct ddp_params *ddpp; 319 struct adap_ports *ports; 320 int i; 321 322 switch (req) { 323 case GET_MAX_OUTSTANDING_WR: 324 *(unsigned int *)data = FW_WR_NUM; 325 break; 326 case GET_WR_LEN: 327 *(unsigned int *)data = WR_FLITS; 328 break; 329 case GET_TX_MAX_CHUNK: 330 *(unsigned int *)data = 1 << 20; /* 1MB */ 331 break; 332 case GET_TID_RANGE: 333 tid = data; 334 tid->num = t3_mc5_size(&adapter->mc5) - 335 adapter->params.mc5.nroutes - 336 adapter->params.mc5.nfilters - adapter->params.mc5.nservers; 337 tid->base = 0; 338 break; 339 case GET_STID_RANGE: 340 tid = data; 341 tid->num = adapter->params.mc5.nservers; 342 tid->base = t3_mc5_size(&adapter->mc5) - tid->num - 343 adapter->params.mc5.nfilters - adapter->params.mc5.nroutes; 344 break; 345 case GET_L2T_CAPACITY: 346 *(unsigned int *)data = 2048; 347 break; 348 case GET_MTUS: 349 mtup = data; 350 mtup->size = NMTUS; 351 mtup->mtus = adapter->params.mtus; 352 break; 353 case GET_IFF_FROM_MAC: 354 iffmacp = data; 355 iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr, 356 iffmacp->vlan_tag & 357 VLAN_VID_MASK); 358 break; 359 case GET_DDP_PARAMS: 360 ddpp = data; 361 ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT); 362 ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT); 363 ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK); 364 break; 365 case GET_PORTS: 366 ports = data; 367 ports->nports = adapter->params.nports; 368 for_each_port(adapter, i) 369 ports->lldevs[i] = adapter->port[i]; 370 break; 371 case ULP_ISCSI_GET_PARAMS: 372 case ULP_ISCSI_SET_PARAMS: 373 if (!offload_running(adapter)) 374 return -EAGAIN; 375 return cxgb_ulp_iscsi_ctl(adapter, req, data); 376 case RDMA_GET_PARAMS: 377 case RDMA_CQ_OP: 378 case RDMA_CQ_SETUP: 379 case RDMA_CQ_DISABLE: 380 case RDMA_CTRL_QP_SETUP: 381 case RDMA_GET_MEM: 382 if (!offload_running(adapter)) 383 return -EAGAIN; 384 return cxgb_rdma_ctl(adapter, req, data); 385 default: 386 return -EOPNOTSUPP; 387 } 388 return 0; 389} 390 391/* 392 * Dummy handler for Rx offload packets in case we get an offload packet before 393 * proper processing is setup. This complains and drops the packet as it isn't 394 * normal to get offload packets at this stage. 395 */ 396static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, 397 int n) 398{ 399 CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n", 400 n, ntohl(*(__be32 *)skbs[0]->data)); 401 while (n--) 402 dev_kfree_skb_any(skbs[n]); 403 return 0; 404} 405 406static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh) 407{ 408} 409 410void cxgb3_set_dummy_ops(struct t3cdev *dev) 411{ 412 dev->recv = rx_offload_blackhole; 413 dev->neigh_update = dummy_neigh_update; 414} 415 416/* 417 * Free an active-open TID. 418 */ 419void *cxgb3_free_atid(struct t3cdev *tdev, int atid) 420{ 421 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 422 union active_open_entry *p = atid2entry(t, atid); 423 void *ctx = p->t3c_tid.ctx; 424 425 spin_lock_bh(&t->atid_lock); 426 p->next = t->afree; 427 t->afree = p; 428 t->atids_in_use--; 429 spin_unlock_bh(&t->atid_lock); 430 431 return ctx; 432} 433 434EXPORT_SYMBOL(cxgb3_free_atid); 435 436/* 437 * Free a server TID and return it to the free pool. 438 */ 439void cxgb3_free_stid(struct t3cdev *tdev, int stid) 440{ 441 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 442 union listen_entry *p = stid2entry(t, stid); 443 444 spin_lock_bh(&t->stid_lock); 445 p->next = t->sfree; 446 t->sfree = p; 447 t->stids_in_use--; 448 spin_unlock_bh(&t->stid_lock); 449} 450 451EXPORT_SYMBOL(cxgb3_free_stid); 452 453void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client, 454 void *ctx, unsigned int tid) 455{ 456 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 457 458 t->tid_tab[tid].client = client; 459 t->tid_tab[tid].ctx = ctx; 460 atomic_inc(&t->tids_in_use); 461} 462 463EXPORT_SYMBOL(cxgb3_insert_tid); 464 465/* 466 * Populate a TID_RELEASE WR. The skb must be already propely sized. 467 */ 468static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid) 469{ 470 struct cpl_tid_release *req; 471 472 skb->priority = CPL_PRIORITY_SETUP; 473 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 474 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 475 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 476} 477 478static void t3_process_tid_release_list(struct work_struct *work) 479{ 480 struct t3c_data *td = container_of(work, struct t3c_data, 481 tid_release_task); 482 struct sk_buff *skb; 483 struct t3cdev *tdev = td->dev; 484 485 486 spin_lock_bh(&td->tid_release_lock); 487 while (td->tid_release_list) { 488 struct t3c_tid_entry *p = td->tid_release_list; 489 490 td->tid_release_list = (struct t3c_tid_entry *)p->ctx; 491 spin_unlock_bh(&td->tid_release_lock); 492 493 skb = alloc_skb(sizeof(struct cpl_tid_release), 494 GFP_KERNEL | __GFP_NOFAIL); 495 mk_tid_release(skb, p - td->tid_maps.tid_tab); 496 cxgb3_ofld_send(tdev, skb); 497 p->ctx = NULL; 498 spin_lock_bh(&td->tid_release_lock); 499 } 500 spin_unlock_bh(&td->tid_release_lock); 501} 502 503/* use ctx as a next pointer in the tid release list */ 504void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid) 505{ 506 struct t3c_data *td = T3C_DATA(tdev); 507 struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid]; 508 509 spin_lock_bh(&td->tid_release_lock); 510 p->ctx = (void *)td->tid_release_list; 511 p->client = NULL; 512 td->tid_release_list = p; 513 if (!p->ctx) 514 schedule_work(&td->tid_release_task); 515 spin_unlock_bh(&td->tid_release_lock); 516} 517 518EXPORT_SYMBOL(cxgb3_queue_tid_release); 519 520/* 521 * Remove a tid from the TID table. A client may defer processing its last 522 * CPL message if it is locked at the time it arrives, and while the message 523 * sits in the client's backlog the TID may be reused for another connection. 524 * To handle this we atomically switch the TID association if it still points 525 * to the original client context. 526 */ 527void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid) 528{ 529 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 530 531 BUG_ON(tid >= t->ntids); 532 if (tdev->type == T3A) 533 (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL); 534 else { 535 struct sk_buff *skb; 536 537 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 538 if (likely(skb)) { 539 mk_tid_release(skb, tid); 540 cxgb3_ofld_send(tdev, skb); 541 t->tid_tab[tid].ctx = NULL; 542 } else 543 cxgb3_queue_tid_release(tdev, tid); 544 } 545 atomic_dec(&t->tids_in_use); 546} 547 548EXPORT_SYMBOL(cxgb3_remove_tid); 549 550int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client, 551 void *ctx) 552{ 553 int atid = -1; 554 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 555 556 spin_lock_bh(&t->atid_lock); 557 if (t->afree && 558 t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <= 559 t->ntids) { 560 union active_open_entry *p = t->afree; 561 562 atid = (p - t->atid_tab) + t->atid_base; 563 t->afree = p->next; 564 p->t3c_tid.ctx = ctx; 565 p->t3c_tid.client = client; 566 t->atids_in_use++; 567 } 568 spin_unlock_bh(&t->atid_lock); 569 return atid; 570} 571 572EXPORT_SYMBOL(cxgb3_alloc_atid); 573 574int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client, 575 void *ctx) 576{ 577 int stid = -1; 578 struct tid_info *t = &(T3C_DATA(tdev))->tid_maps; 579 580 spin_lock_bh(&t->stid_lock); 581 if (t->sfree) { 582 union listen_entry *p = t->sfree; 583 584 stid = (p - t->stid_tab) + t->stid_base; 585 t->sfree = p->next; 586 p->t3c_tid.ctx = ctx; 587 p->t3c_tid.client = client; 588 t->stids_in_use++; 589 } 590 spin_unlock_bh(&t->stid_lock); 591 return stid; 592} 593 594EXPORT_SYMBOL(cxgb3_alloc_stid); 595 596static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 597{ 598 struct cpl_smt_write_rpl *rpl = cplhdr(skb); 599 600 if (rpl->status != CPL_ERR_NONE) 601 printk(KERN_ERR 602 "Unexpected SMT_WRITE_RPL status %u for entry %u\n", 603 rpl->status, GET_TID(rpl)); 604 605 return CPL_RET_BUF_DONE; 606} 607 608static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb) 609{ 610 struct cpl_l2t_write_rpl *rpl = cplhdr(skb); 611 612 if (rpl->status != CPL_ERR_NONE) 613 printk(KERN_ERR 614 "Unexpected L2T_WRITE_RPL status %u for entry %u\n", 615 rpl->status, GET_TID(rpl)); 616 617 return CPL_RET_BUF_DONE; 618} 619 620static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb) 621{ 622 struct cpl_act_open_rpl *rpl = cplhdr(skb); 623 unsigned int atid = G_TID(ntohl(rpl->atid)); 624 struct t3c_tid_entry *t3c_tid; 625 626 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 627 if (t3c_tid && t3c_tid->ctx && t3c_tid->client && 628 t3c_tid->client->handlers && 629 t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) { 630 return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb, 631 t3c_tid-> 632 ctx); 633 } else { 634 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 635 dev->name, CPL_ACT_OPEN_RPL); 636 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 637 } 638} 639 640static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb) 641{ 642 union opcode_tid *p = cplhdr(skb); 643 unsigned int stid = G_TID(ntohl(p->opcode_tid)); 644 struct t3c_tid_entry *t3c_tid; 645 646 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 647 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 648 t3c_tid->client->handlers[p->opcode]) { 649 return t3c_tid->client->handlers[p->opcode] (dev, skb, 650 t3c_tid->ctx); 651 } else { 652 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 653 dev->name, p->opcode); 654 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 655 } 656} 657 658static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb) 659{ 660 union opcode_tid *p = cplhdr(skb); 661 unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 662 struct t3c_tid_entry *t3c_tid; 663 664 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 665 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 666 t3c_tid->client->handlers[p->opcode]) { 667 return t3c_tid->client->handlers[p->opcode] 668 (dev, skb, t3c_tid->ctx); 669 } else { 670 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 671 dev->name, p->opcode); 672 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 673 } 674} 675 676static int do_cr(struct t3cdev *dev, struct sk_buff *skb) 677{ 678 struct cpl_pass_accept_req *req = cplhdr(skb); 679 unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 680 struct t3c_tid_entry *t3c_tid; 681 682 t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid); 683 if (t3c_tid->ctx && t3c_tid->client->handlers && 684 t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) { 685 return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ] 686 (dev, skb, t3c_tid->ctx); 687 } else { 688 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 689 dev->name, CPL_PASS_ACCEPT_REQ); 690 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 691 } 692} 693 694/* 695 * Returns an sk_buff for a reply CPL message of size len. If the input 696 * sk_buff has no other users it is trimmed and reused, otherwise a new buffer 697 * is allocated. The input skb must be of size at least len. Note that this 698 * operation does not destroy the original skb data even if it decides to reuse 699 * the buffer. 700 */ 701static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, 702 int gfp) 703{ 704 if (likely(!skb_cloned(skb))) { 705 BUG_ON(skb->len < len); 706 __skb_trim(skb, len); 707 skb_get(skb); 708 } else { 709 skb = alloc_skb(len, gfp); 710 if (skb) 711 __skb_put(skb, len); 712 } 713 return skb; 714} 715 716static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb) 717{ 718 union opcode_tid *p = cplhdr(skb); 719 unsigned int hwtid = G_TID(ntohl(p->opcode_tid)); 720 struct t3c_tid_entry *t3c_tid; 721 722 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 723 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 724 t3c_tid->client->handlers[p->opcode]) { 725 return t3c_tid->client->handlers[p->opcode] 726 (dev, skb, t3c_tid->ctx); 727 } else { 728 struct cpl_abort_req_rss *req = cplhdr(skb); 729 struct cpl_abort_rpl *rpl; 730 struct sk_buff *reply_skb; 731 unsigned int tid = GET_TID(req); 732 u8 cmd = req->status; 733 734 if (req->status == CPL_ERR_RTX_NEG_ADVICE || 735 req->status == CPL_ERR_PERSIST_NEG_ADVICE) 736 goto out; 737 738 reply_skb = cxgb3_get_cpl_reply_skb(skb, 739 sizeof(struct 740 cpl_abort_rpl), 741 GFP_ATOMIC); 742 743 if (!reply_skb) { 744 printk("do_abort_req_rss: couldn't get skb!\n"); 745 goto out; 746 } 747 reply_skb->priority = CPL_PRIORITY_DATA; 748 __skb_put(reply_skb, sizeof(struct cpl_abort_rpl)); 749 rpl = cplhdr(reply_skb); 750 rpl->wr.wr_hi = 751 htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 752 rpl->wr.wr_lo = htonl(V_WR_TID(tid)); 753 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); 754 rpl->cmd = cmd; 755 cxgb3_ofld_send(dev, reply_skb); 756out: 757 return CPL_RET_BUF_DONE; 758 } 759} 760 761static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb) 762{ 763 struct cpl_act_establish *req = cplhdr(skb); 764 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid)); 765 struct t3c_tid_entry *t3c_tid; 766 767 t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid); 768 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 769 t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) { 770 return t3c_tid->client->handlers[CPL_ACT_ESTABLISH] 771 (dev, skb, t3c_tid->ctx); 772 } else { 773 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 774 dev->name, CPL_PASS_ACCEPT_REQ); 775 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 776 } 777} 778 779static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 780{ 781 struct cpl_trace_pkt *p = cplhdr(skb); 782 783 skb->protocol = htons(0xffff); 784 skb->dev = dev->lldev; 785 skb_pull(skb, sizeof(*p)); 786 skb_reset_mac_header(skb); 787 netif_receive_skb(skb); 788 return 0; 789} 790 791static int do_term(struct t3cdev *dev, struct sk_buff *skb) 792{ 793 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff; 794 unsigned int opcode = G_OPCODE(ntohl(skb->csum)); 795 struct t3c_tid_entry *t3c_tid; 796 797 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 798 if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers && 799 t3c_tid->client->handlers[opcode]) { 800 return t3c_tid->client->handlers[opcode] (dev, skb, 801 t3c_tid->ctx); 802 } else { 803 printk(KERN_ERR "%s: received clientless CPL command 0x%x\n", 804 dev->name, opcode); 805 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 806 } 807} 808 809static int nb_callback(struct notifier_block *self, unsigned long event, 810 void *ctx) 811{ 812 switch (event) { 813 case (NETEVENT_NEIGH_UPDATE):{ 814 cxgb_neigh_update((struct neighbour *)ctx); 815 break; 816 } 817 case (NETEVENT_PMTU_UPDATE): 818 break; 819 case (NETEVENT_REDIRECT):{ 820 struct netevent_redirect *nr = ctx; 821 cxgb_redirect(nr->old, nr->new); 822 cxgb_neigh_update(nr->new->neighbour); 823 break; 824 } 825 default: 826 break; 827 } 828 return 0; 829} 830 831static struct notifier_block nb = { 832 .notifier_call = nb_callback 833}; 834 835/* 836 * Process a received packet with an unknown/unexpected CPL opcode. 837 */ 838static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb) 839{ 840 printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name, 841 *skb->data); 842 return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG; 843} 844 845/* 846 * Handlers for each CPL opcode 847 */ 848static cpl_handler_func cpl_handlers[NUM_CPL_CMDS]; 849 850/* 851 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied 852 * to unregister an existing handler. 853 */ 854void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h) 855{ 856 if (opcode < NUM_CPL_CMDS) 857 cpl_handlers[opcode] = h ? h : do_bad_cpl; 858 else 859 printk(KERN_ERR "T3C: handler registration for " 860 "opcode %x failed\n", opcode); 861} 862 863EXPORT_SYMBOL(t3_register_cpl_handler); 864 865/* 866 * T3CDEV's receive method. 867 */ 868int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n) 869{ 870 while (n--) { 871 struct sk_buff *skb = *skbs++; 872 unsigned int opcode = G_OPCODE(ntohl(skb->csum)); 873 int ret = cpl_handlers[opcode] (dev, skb); 874 875#if VALIDATE_TID 876 if (ret & CPL_RET_UNKNOWN_TID) { 877 union opcode_tid *p = cplhdr(skb); 878 879 printk(KERN_ERR "%s: CPL message (opcode %u) had " 880 "unknown TID %u\n", dev->name, opcode, 881 G_TID(ntohl(p->opcode_tid))); 882 } 883#endif 884 if (ret & CPL_RET_BUF_DONE) 885 kfree_skb(skb); 886 } 887 return 0; 888} 889 890/* 891 * Sends an sk_buff to a T3C driver after dealing with any active network taps. 892 */ 893int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb) 894{ 895 int r; 896 897 local_bh_disable(); 898 r = dev->send(dev, skb); 899 local_bh_enable(); 900 return r; 901} 902 903EXPORT_SYMBOL(cxgb3_ofld_send); 904 905static int is_offloading(struct net_device *dev) 906{ 907 struct adapter *adapter; 908 int i; 909 910 read_lock_bh(&adapter_list_lock); 911 list_for_each_entry(adapter, &adapter_list, adapter_list) { 912 for_each_port(adapter, i) { 913 if (dev == adapter->port[i]) { 914 read_unlock_bh(&adapter_list_lock); 915 return 1; 916 } 917 } 918 } 919 read_unlock_bh(&adapter_list_lock); 920 return 0; 921} 922 923void cxgb_neigh_update(struct neighbour *neigh) 924{ 925 struct net_device *dev = neigh->dev; 926 927 if (dev && (is_offloading(dev))) { 928 struct t3cdev *tdev = T3CDEV(dev); 929 930 BUG_ON(!tdev); 931 t3_l2t_update(tdev, neigh); 932 } 933} 934 935static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) 936{ 937 struct sk_buff *skb; 938 struct cpl_set_tcb_field *req; 939 940 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 941 if (!skb) { 942 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__); 943 return; 944 } 945 skb->priority = CPL_PRIORITY_CONTROL; 946 req = (struct cpl_set_tcb_field *)skb_put(skb, sizeof(*req)); 947 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 948 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 949 req->reply = 0; 950 req->cpu_idx = 0; 951 req->word = htons(W_TCB_L2T_IX); 952 req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX)); 953 req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx)); 954 tdev->send(tdev, skb); 955} 956 957void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) 958{ 959 struct net_device *olddev, *newdev; 960 struct tid_info *ti; 961 struct t3cdev *tdev; 962 u32 tid; 963 int update_tcb; 964 struct l2t_entry *e; 965 struct t3c_tid_entry *te; 966 967 olddev = old->neighbour->dev; 968 newdev = new->neighbour->dev; 969 if (!is_offloading(olddev)) 970 return; 971 if (!is_offloading(newdev)) { 972 printk(KERN_WARNING "%s: Redirect to non-offload" 973 "device ignored.\n", __FUNCTION__); 974 return; 975 } 976 tdev = T3CDEV(olddev); 977 BUG_ON(!tdev); 978 if (tdev != T3CDEV(newdev)) { 979 printk(KERN_WARNING "%s: Redirect to different " 980 "offload device ignored.\n", __FUNCTION__); 981 return; 982 } 983 984 /* Add new L2T entry */ 985 e = t3_l2t_get(tdev, new->neighbour, newdev); 986 if (!e) { 987 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 988 __FUNCTION__); 989 return; 990 } 991 992 /* Walk tid table and notify clients of dst change. */ 993 ti = &(T3C_DATA(tdev))->tid_maps; 994 for (tid = 0; tid < ti->ntids; tid++) { 995 te = lookup_tid(ti, tid); 996 BUG_ON(!te); 997 if (te && te->ctx && te->client && te->client->redirect) { 998 update_tcb = te->client->redirect(te->ctx, old, new, e); 999 if (update_tcb) { 1000 l2t_hold(L2DATA(tdev), e); 1001 set_l2t_ix(tdev, tid, e); 1002 } 1003 } 1004 } 1005 l2t_release(L2DATA(tdev), e); 1006} 1007 1008/* 1009 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1010 * The allocated memory is cleared. 1011 */ 1012void *cxgb_alloc_mem(unsigned long size) 1013{ 1014 void *p = kmalloc(size, GFP_KERNEL); 1015 1016 if (!p) 1017 p = vmalloc(size); 1018 if (p) 1019 memset(p, 0, size); 1020 return p; 1021} 1022 1023/* 1024 * Free memory allocated through t3_alloc_mem(). 1025 */ 1026void cxgb_free_mem(void *addr) 1027{ 1028 unsigned long p = (unsigned long)addr; 1029 1030 if (p >= VMALLOC_START && p < VMALLOC_END) 1031 vfree(addr); 1032 else 1033 kfree(addr); 1034} 1035 1036/* 1037 * Allocate and initialize the TID tables. Returns 0 on success. 1038 */ 1039static int init_tid_tabs(struct tid_info *t, unsigned int ntids, 1040 unsigned int natids, unsigned int nstids, 1041 unsigned int atid_base, unsigned int stid_base) 1042{ 1043 unsigned long size = ntids * sizeof(*t->tid_tab) + 1044 natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab); 1045 1046 t->tid_tab = cxgb_alloc_mem(size); 1047 if (!t->tid_tab) 1048 return -ENOMEM; 1049 1050 t->stid_tab = (union listen_entry *)&t->tid_tab[ntids]; 1051 t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids]; 1052 t->ntids = ntids; 1053 t->nstids = nstids; 1054 t->stid_base = stid_base; 1055 t->sfree = NULL; 1056 t->natids = natids; 1057 t->atid_base = atid_base; 1058 t->afree = NULL; 1059 t->stids_in_use = t->atids_in_use = 0; 1060 atomic_set(&t->tids_in_use, 0); 1061 spin_lock_init(&t->stid_lock); 1062 spin_lock_init(&t->atid_lock); 1063 1064 /* 1065 * Setup the free lists for stid_tab and atid_tab. 1066 */ 1067 if (nstids) { 1068 while (--nstids) 1069 t->stid_tab[nstids - 1].next = &t->stid_tab[nstids]; 1070 t->sfree = t->stid_tab; 1071 } 1072 if (natids) { 1073 while (--natids) 1074 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 1075 t->afree = t->atid_tab; 1076 } 1077 return 0; 1078} 1079 1080static void free_tid_maps(struct tid_info *t) 1081{ 1082 cxgb_free_mem(t->tid_tab); 1083} 1084 1085static inline void add_adapter(struct adapter *adap) 1086{ 1087 write_lock_bh(&adapter_list_lock); 1088 list_add_tail(&adap->adapter_list, &adapter_list); 1089 write_unlock_bh(&adapter_list_lock); 1090} 1091 1092static inline void remove_adapter(struct adapter *adap) 1093{ 1094 write_lock_bh(&adapter_list_lock); 1095 list_del(&adap->adapter_list); 1096 write_unlock_bh(&adapter_list_lock); 1097} 1098 1099int cxgb3_offload_activate(struct adapter *adapter) 1100{ 1101 struct t3cdev *dev = &adapter->tdev; 1102 int natids, err; 1103 struct t3c_data *t; 1104 struct tid_range stid_range, tid_range; 1105 struct mtutab mtutab; 1106 unsigned int l2t_capacity; 1107 1108 t = kcalloc(1, sizeof(*t), GFP_KERNEL); 1109 if (!t) 1110 return -ENOMEM; 1111 1112 err = -EOPNOTSUPP; 1113 if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 || 1114 dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 || 1115 dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 || 1116 dev->ctl(dev, GET_MTUS, &mtutab) < 0 || 1117 dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 || 1118 dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0) 1119 goto out_free; 1120 1121 err = -ENOMEM; 1122 L2DATA(dev) = t3_init_l2t(l2t_capacity); 1123 if (!L2DATA(dev)) 1124 goto out_free; 1125 1126 natids = min(tid_range.num / 2, MAX_ATIDS); 1127 err = init_tid_tabs(&t->tid_maps, tid_range.num, natids, 1128 stid_range.num, ATID_BASE, stid_range.base); 1129 if (err) 1130 goto out_free_l2t; 1131 1132 t->mtus = mtutab.mtus; 1133 t->nmtus = mtutab.size; 1134 1135 INIT_WORK(&t->tid_release_task, t3_process_tid_release_list); 1136 spin_lock_init(&t->tid_release_lock); 1137 INIT_LIST_HEAD(&t->list_node); 1138 t->dev = dev; 1139 1140 T3C_DATA(dev) = t; 1141 dev->recv = process_rx; 1142 dev->neigh_update = t3_l2t_update; 1143 1144 /* Register netevent handler once */ 1145 if (list_empty(&adapter_list)) 1146 register_netevent_notifier(&nb); 1147 1148 add_adapter(adapter); 1149 return 0; 1150 1151out_free_l2t: 1152 t3_free_l2t(L2DATA(dev)); 1153 L2DATA(dev) = NULL; 1154out_free: 1155 kfree(t); 1156 return err; 1157} 1158 1159void cxgb3_offload_deactivate(struct adapter *adapter) 1160{ 1161 struct t3cdev *tdev = &adapter->tdev; 1162 struct t3c_data *t = T3C_DATA(tdev); 1163 1164 remove_adapter(adapter); 1165 if (list_empty(&adapter_list)) 1166 unregister_netevent_notifier(&nb); 1167 1168 free_tid_maps(&t->tid_maps); 1169 T3C_DATA(tdev) = NULL; 1170 t3_free_l2t(L2DATA(tdev)); 1171 L2DATA(tdev) = NULL; 1172 kfree(t); 1173} 1174 1175static inline void register_tdev(struct t3cdev *tdev) 1176{ 1177 static int unit; 1178 1179 mutex_lock(&cxgb3_db_lock); 1180 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++); 1181 list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list); 1182 mutex_unlock(&cxgb3_db_lock); 1183} 1184 1185static inline void unregister_tdev(struct t3cdev *tdev) 1186{ 1187 mutex_lock(&cxgb3_db_lock); 1188 list_del(&tdev->ofld_dev_list); 1189 mutex_unlock(&cxgb3_db_lock); 1190} 1191 1192void __devinit cxgb3_adapter_ofld(struct adapter *adapter) 1193{ 1194 struct t3cdev *tdev = &adapter->tdev; 1195 1196 INIT_LIST_HEAD(&tdev->ofld_dev_list); 1197 1198 cxgb3_set_dummy_ops(tdev); 1199 tdev->send = t3_offload_tx; 1200 tdev->ctl = cxgb_offload_ctl; 1201 tdev->type = adapter->params.rev == 0 ? T3A : T3B; 1202 1203 register_tdev(tdev); 1204} 1205 1206void __devexit cxgb3_adapter_unofld(struct adapter *adapter) 1207{ 1208 struct t3cdev *tdev = &adapter->tdev; 1209 1210 tdev->recv = NULL; 1211 tdev->neigh_update = NULL; 1212 1213 unregister_tdev(tdev); 1214} 1215 1216void __init cxgb3_offload_init(void) 1217{ 1218 int i; 1219 1220 for (i = 0; i < NUM_CPL_CMDS; ++i) 1221 cpl_handlers[i] = do_bad_cpl; 1222 1223 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl); 1224 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl); 1225 t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl); 1226 t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl); 1227 t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr); 1228 t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl); 1229 t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl); 1230 t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl); 1231 t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl); 1232 t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl); 1233 t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl); 1234 t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl); 1235 t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl); 1236 t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl); 1237 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1238 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1239 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1240 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); 1241 t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); 1242 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1243 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1244 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace); 1245 t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl); 1246 t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl); 1247 t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl); 1248} 1249