1/* 2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <linux/module.h> 33#include <linux/list.h> 34#include <linux/workqueue.h> 35#include <linux/skbuff.h> 36#include <linux/timer.h> 37#include <linux/notifier.h> 38 39#include <net/neighbour.h> 40#include <net/netevent.h> 41#include <net/route.h> 42 43#include "tcb.h" 44#include "cxgb3_offload.h" 45#include "iwch.h" 46#include "iwch_provider.h" 47#include "iwch_cm.h" 48 49static char *states[] = { 50 "idle", 51 "listen", 52 "connecting", 53 "mpa_wait_req", 54 "mpa_req_sent", 55 "mpa_req_rcvd", 56 "mpa_rep_sent", 57 "fpdu_mode", 58 "aborting", 59 "closing", 60 "moribund", 61 "dead", 62 NULL, 63}; 64 65static int ep_timeout_secs = 10; 66module_param(ep_timeout_secs, int, 0444); 67MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 68 "in seconds (default=10)"); 69 70static int mpa_rev = 1; 71module_param(mpa_rev, int, 0444); 72MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 73 "1 is spec compliant. (default=1)"); 74 75static int markers_enabled = 0; 76module_param(markers_enabled, int, 0444); 77MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 78 79static int crc_enabled = 1; 80module_param(crc_enabled, int, 0444); 81MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 82 83static int rcv_win = 256 * 1024; 84module_param(rcv_win, int, 0444); 85MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)"); 86 87static int snd_win = 32 * 1024; 88module_param(snd_win, int, 0444); 89MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); 90 91static unsigned int nocong = 0; 92module_param(nocong, uint, 0444); 93MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)"); 94 95static unsigned int cong_flavor = 1; 96module_param(cong_flavor, uint, 0444); 97MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); 98 99static void process_work(struct work_struct *work); 100static struct workqueue_struct *workq; 101static DECLARE_WORK(skb_work, process_work); 102 103static struct sk_buff_head rxq; 104static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS]; 105 106static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 107static void ep_timeout(unsigned long arg); 108static void connect_reply_upcall(struct iwch_ep *ep, int status); 109 110static void start_ep_timer(struct iwch_ep *ep) 111{ 112 PDBG("%s ep %p\n", __FUNCTION__, ep); 113 if (timer_pending(&ep->timer)) { 114 PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep); 115 del_timer_sync(&ep->timer); 116 } else 117 get_ep(&ep->com); 118 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 119 ep->timer.data = (unsigned long)ep; 120 ep->timer.function = ep_timeout; 121 add_timer(&ep->timer); 122} 123 124static void stop_ep_timer(struct iwch_ep *ep) 125{ 126 PDBG("%s ep %p\n", __FUNCTION__, ep); 127 del_timer_sync(&ep->timer); 128 put_ep(&ep->com); 129} 130 131static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) 132{ 133 struct cpl_tid_release *req; 134 135 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 136 if (!skb) 137 return; 138 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 139 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 140 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 141 skb->priority = CPL_PRIORITY_SETUP; 142 tdev->send(tdev, skb); 143 return; 144} 145 146int iwch_quiesce_tid(struct iwch_ep *ep) 147{ 148 struct cpl_set_tcb_field *req; 149 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 150 151 if (!skb) 152 return -ENOMEM; 153 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); 154 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 155 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 156 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); 157 req->reply = 0; 158 req->cpu_idx = 0; 159 req->word = htons(W_TCB_RX_QUIESCE); 160 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); 161 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); 162 163 skb->priority = CPL_PRIORITY_DATA; 164 ep->com.tdev->send(ep->com.tdev, skb); 165 return 0; 166} 167 168int iwch_resume_tid(struct iwch_ep *ep) 169{ 170 struct cpl_set_tcb_field *req; 171 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 172 173 if (!skb) 174 return -ENOMEM; 175 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); 176 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 177 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 178 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); 179 req->reply = 0; 180 req->cpu_idx = 0; 181 req->word = htons(W_TCB_RX_QUIESCE); 182 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); 183 req->val = 0; 184 185 skb->priority = CPL_PRIORITY_DATA; 186 ep->com.tdev->send(ep->com.tdev, skb); 187 return 0; 188} 189 190static void set_emss(struct iwch_ep *ep, u16 opt) 191{ 192 PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt); 193 ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40; 194 if (G_TCPOPT_TSTAMP(opt)) 195 ep->emss -= 12; 196 if (ep->emss < 128) 197 ep->emss = 128; 198 PDBG("emss=%d\n", ep->emss); 199} 200 201static enum iwch_ep_state state_read(struct iwch_ep_common *epc) 202{ 203 unsigned long flags; 204 enum iwch_ep_state state; 205 206 spin_lock_irqsave(&epc->lock, flags); 207 state = epc->state; 208 spin_unlock_irqrestore(&epc->lock, flags); 209 return state; 210} 211 212static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new) 213{ 214 epc->state = new; 215} 216 217static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new) 218{ 219 unsigned long flags; 220 221 spin_lock_irqsave(&epc->lock, flags); 222 PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]); 223 __state_set(epc, new); 224 spin_unlock_irqrestore(&epc->lock, flags); 225 return; 226} 227 228static void *alloc_ep(int size, gfp_t gfp) 229{ 230 struct iwch_ep_common *epc; 231 232 epc = kmalloc(size, gfp); 233 if (epc) { 234 memset(epc, 0, size); 235 kref_init(&epc->kref); 236 spin_lock_init(&epc->lock); 237 init_waitqueue_head(&epc->waitq); 238 } 239 PDBG("%s alloc ep %p\n", __FUNCTION__, epc); 240 return epc; 241} 242 243void __free_ep(struct kref *kref) 244{ 245 struct iwch_ep_common *epc; 246 epc = container_of(kref, struct iwch_ep_common, kref); 247 PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]); 248 kfree(epc); 249} 250 251static void release_ep_resources(struct iwch_ep *ep) 252{ 253 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 254 cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); 255 dst_release(ep->dst); 256 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 257 if (ep->com.tdev->type == T3B) 258 release_tid(ep->com.tdev, ep->hwtid, NULL); 259 put_ep(&ep->com); 260} 261 262static void process_work(struct work_struct *work) 263{ 264 struct sk_buff *skb = NULL; 265 void *ep; 266 struct t3cdev *tdev; 267 int ret; 268 269 while ((skb = skb_dequeue(&rxq))) { 270 ep = *((void **) (skb->cb)); 271 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *))); 272 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep); 273 if (ret & CPL_RET_BUF_DONE) 274 kfree_skb(skb); 275 276 /* 277 * ep was referenced in sched(), and is freed here. 278 */ 279 put_ep((struct iwch_ep_common *)ep); 280 } 281} 282 283static int status2errno(int status) 284{ 285 switch (status) { 286 case CPL_ERR_NONE: 287 return 0; 288 case CPL_ERR_CONN_RESET: 289 return -ECONNRESET; 290 case CPL_ERR_ARP_MISS: 291 return -EHOSTUNREACH; 292 case CPL_ERR_CONN_TIMEDOUT: 293 return -ETIMEDOUT; 294 case CPL_ERR_TCAM_FULL: 295 return -ENOMEM; 296 case CPL_ERR_CONN_EXIST: 297 return -EADDRINUSE; 298 default: 299 return -EIO; 300 } 301} 302 303/* 304 * Try and reuse skbs already allocated... 305 */ 306static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 307{ 308 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 309 skb_trim(skb, 0); 310 skb_get(skb); 311 } else { 312 skb = alloc_skb(len, gfp); 313 } 314 return skb; 315} 316 317static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip, 318 __be32 peer_ip, __be16 local_port, 319 __be16 peer_port, u8 tos) 320{ 321 struct rtable *rt; 322 struct flowi fl = { 323 .oif = 0, 324 .nl_u = { 325 .ip4_u = { 326 .daddr = peer_ip, 327 .saddr = local_ip, 328 .tos = tos} 329 }, 330 .proto = IPPROTO_TCP, 331 .uli_u = { 332 .ports = { 333 .sport = local_port, 334 .dport = peer_port} 335 } 336 }; 337 338 if (ip_route_output_flow(&rt, &fl, NULL, 0)) 339 return NULL; 340 return rt; 341} 342 343static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu) 344{ 345 int i = 0; 346 347 while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu) 348 ++i; 349 return i; 350} 351 352static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb) 353{ 354 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 355 kfree_skb(skb); 356} 357 358/* 359 * Handle an ARP failure for an active open. 360 */ 361static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb) 362{ 363 printk(KERN_ERR MOD "ARP failure duing connect\n"); 364 kfree_skb(skb); 365} 366 367/* 368 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 369 * and send it along. 370 */ 371static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb) 372{ 373 struct cpl_abort_req *req = cplhdr(skb); 374 375 PDBG("%s t3cdev %p\n", __FUNCTION__, dev); 376 req->cmd = CPL_ABORT_NO_RST; 377 cxgb3_ofld_send(dev, skb); 378} 379 380static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) 381{ 382 struct cpl_close_con_req *req; 383 struct sk_buff *skb; 384 385 PDBG("%s ep %p\n", __FUNCTION__, ep); 386 skb = get_skb(NULL, sizeof(*req), gfp); 387 if (!skb) { 388 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 389 return -ENOMEM; 390 } 391 skb->priority = CPL_PRIORITY_DATA; 392 set_arp_failure_handler(skb, arp_failure_discard); 393 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req)); 394 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); 395 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 396 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); 397 l2t_send(ep->com.tdev, skb, ep->l2t); 398 return 0; 399} 400 401static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) 402{ 403 struct cpl_abort_req *req; 404 405 PDBG("%s ep %p\n", __FUNCTION__, ep); 406 skb = get_skb(skb, sizeof(*req), gfp); 407 if (!skb) { 408 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 409 __FUNCTION__); 410 return -ENOMEM; 411 } 412 skb->priority = CPL_PRIORITY_DATA; 413 set_arp_failure_handler(skb, abort_arp_failure); 414 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); 415 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); 416 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 417 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 418 req->cmd = CPL_ABORT_SEND_RST; 419 l2t_send(ep->com.tdev, skb, ep->l2t); 420 return 0; 421} 422 423static int send_connect(struct iwch_ep *ep) 424{ 425 struct cpl_act_open_req *req; 426 struct sk_buff *skb; 427 u32 opt0h, opt0l, opt2; 428 unsigned int mtu_idx; 429 int wscale; 430 431 PDBG("%s ep %p\n", __FUNCTION__, ep); 432 433 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 434 if (!skb) { 435 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 436 __FUNCTION__); 437 return -ENOMEM; 438 } 439 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); 440 wscale = compute_wscale(rcv_win); 441 opt0h = V_NAGLE(0) | 442 V_NO_CONG(nocong) | 443 V_KEEP_ALIVE(1) | 444 F_TCAM_BYPASS | 445 V_WND_SCALE(wscale) | 446 V_MSS_IDX(mtu_idx) | 447 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 448 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 449 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 450 skb->priority = CPL_PRIORITY_SETUP; 451 set_arp_failure_handler(skb, act_open_req_arp_failure); 452 453 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req)); 454 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 455 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); 456 req->local_port = ep->com.local_addr.sin_port; 457 req->peer_port = ep->com.remote_addr.sin_port; 458 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 459 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 460 req->opt0h = htonl(opt0h); 461 req->opt0l = htonl(opt0l); 462 req->params = 0; 463 req->opt2 = htonl(opt2); 464 l2t_send(ep->com.tdev, skb, ep->l2t); 465 return 0; 466} 467 468static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) 469{ 470 int mpalen; 471 struct tx_data_wr *req; 472 struct mpa_message *mpa; 473 int len; 474 475 PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen); 476 477 BUG_ON(skb_cloned(skb)); 478 479 mpalen = sizeof(*mpa) + ep->plen; 480 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) { 481 kfree_skb(skb); 482 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); 483 if (!skb) { 484 connect_reply_upcall(ep, -ENOMEM); 485 return; 486 } 487 } 488 skb_trim(skb, 0); 489 skb_reserve(skb, sizeof(*req)); 490 skb_put(skb, mpalen); 491 skb->priority = CPL_PRIORITY_DATA; 492 mpa = (struct mpa_message *) skb->data; 493 memset(mpa, 0, sizeof(*mpa)); 494 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 495 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 496 (markers_enabled ? MPA_MARKERS : 0); 497 mpa->private_data_size = htons(ep->plen); 498 mpa->revision = mpa_rev; 499 500 if (ep->plen) 501 memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); 502 503 /* 504 * Reference the mpa skb. This ensures the data area 505 * will remain in memory until the hw acks the tx. 506 * Function tx_ack() will deref it. 507 */ 508 skb_get(skb); 509 set_arp_failure_handler(skb, arp_failure_discard); 510 skb_reset_transport_header(skb); 511 len = skb->len; 512 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 513 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 514 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); 515 req->len = htonl(len); 516 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 517 V_TX_SNDBUF(snd_win>>15)); 518 req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT); 519 req->sndseq = htonl(ep->snd_seq); 520 BUG_ON(ep->mpa_skb); 521 ep->mpa_skb = skb; 522 l2t_send(ep->com.tdev, skb, ep->l2t); 523 start_ep_timer(ep); 524 state_set(&ep->com, MPA_REQ_SENT); 525 return; 526} 527 528static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) 529{ 530 int mpalen; 531 struct tx_data_wr *req; 532 struct mpa_message *mpa; 533 struct sk_buff *skb; 534 535 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 536 537 mpalen = sizeof(*mpa) + plen; 538 539 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 540 if (!skb) { 541 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 542 return -ENOMEM; 543 } 544 skb_reserve(skb, sizeof(*req)); 545 mpa = (struct mpa_message *) skb_put(skb, mpalen); 546 memset(mpa, 0, sizeof(*mpa)); 547 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 548 mpa->flags = MPA_REJECT; 549 mpa->revision = mpa_rev; 550 mpa->private_data_size = htons(plen); 551 if (plen) 552 memcpy(mpa->private_data, pdata, plen); 553 554 /* 555 * Reference the mpa skb again. This ensures the data area 556 * will remain in memory until the hw acks the tx. 557 * Function tx_ack() will deref it. 558 */ 559 skb_get(skb); 560 skb->priority = CPL_PRIORITY_DATA; 561 set_arp_failure_handler(skb, arp_failure_discard); 562 skb_reset_transport_header(skb); 563 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 564 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 565 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); 566 req->len = htonl(mpalen); 567 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 568 V_TX_SNDBUF(snd_win>>15)); 569 req->flags = htonl(F_TX_IMM_ACK|F_TX_INIT); 570 req->sndseq = htonl(ep->snd_seq); 571 BUG_ON(ep->mpa_skb); 572 ep->mpa_skb = skb; 573 l2t_send(ep->com.tdev, skb, ep->l2t); 574 return 0; 575} 576 577static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) 578{ 579 int mpalen; 580 struct tx_data_wr *req; 581 struct mpa_message *mpa; 582 int len; 583 struct sk_buff *skb; 584 585 PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); 586 587 mpalen = sizeof(*mpa) + plen; 588 589 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); 590 if (!skb) { 591 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); 592 return -ENOMEM; 593 } 594 skb->priority = CPL_PRIORITY_DATA; 595 skb_reserve(skb, sizeof(*req)); 596 mpa = (struct mpa_message *) skb_put(skb, mpalen); 597 memset(mpa, 0, sizeof(*mpa)); 598 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 599 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 600 (markers_enabled ? MPA_MARKERS : 0); 601 mpa->revision = mpa_rev; 602 mpa->private_data_size = htons(plen); 603 if (plen) 604 memcpy(mpa->private_data, pdata, plen); 605 606 /* 607 * Reference the mpa skb. This ensures the data area 608 * will remain in memory until the hw acks the tx. 609 * Function tx_ack() will deref it. 610 */ 611 skb_get(skb); 612 set_arp_failure_handler(skb, arp_failure_discard); 613 skb_reset_transport_header(skb); 614 len = skb->len; 615 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 616 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 617 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); 618 req->len = htonl(len); 619 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | 620 V_TX_SNDBUF(snd_win>>15)); 621 req->flags = htonl(F_TX_MORE | F_TX_IMM_ACK | F_TX_INIT); 622 req->sndseq = htonl(ep->snd_seq); 623 ep->mpa_skb = skb; 624 state_set(&ep->com, MPA_REP_SENT); 625 l2t_send(ep->com.tdev, skb, ep->l2t); 626 return 0; 627} 628 629static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 630{ 631 struct iwch_ep *ep = ctx; 632 struct cpl_act_establish *req = cplhdr(skb); 633 unsigned int tid = GET_TID(req); 634 635 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid); 636 637 dst_confirm(ep->dst); 638 639 /* setup the hwtid for this connection */ 640 ep->hwtid = tid; 641 cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid); 642 643 ep->snd_seq = ntohl(req->snd_isn); 644 645 set_emss(ep, ntohs(req->tcp_opt)); 646 647 /* dealloc the atid */ 648 cxgb3_free_atid(ep->com.tdev, ep->atid); 649 650 /* start MPA negotiation */ 651 send_mpa_req(ep, skb); 652 653 return 0; 654} 655 656static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) 657{ 658 PDBG("%s ep %p\n", __FILE__, ep); 659 state_set(&ep->com, ABORTING); 660 send_abort(ep, skb, gfp); 661} 662 663static void close_complete_upcall(struct iwch_ep *ep) 664{ 665 struct iw_cm_event event; 666 667 PDBG("%s ep %p\n", __FUNCTION__, ep); 668 memset(&event, 0, sizeof(event)); 669 event.event = IW_CM_EVENT_CLOSE; 670 if (ep->com.cm_id) { 671 PDBG("close complete delivered ep %p cm_id %p tid %d\n", 672 ep, ep->com.cm_id, ep->hwtid); 673 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 674 ep->com.cm_id->rem_ref(ep->com.cm_id); 675 ep->com.cm_id = NULL; 676 ep->com.qp = NULL; 677 } 678} 679 680static void peer_close_upcall(struct iwch_ep *ep) 681{ 682 struct iw_cm_event event; 683 684 PDBG("%s ep %p\n", __FUNCTION__, ep); 685 memset(&event, 0, sizeof(event)); 686 event.event = IW_CM_EVENT_DISCONNECT; 687 if (ep->com.cm_id) { 688 PDBG("peer close delivered ep %p cm_id %p tid %d\n", 689 ep, ep->com.cm_id, ep->hwtid); 690 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 691 } 692} 693 694static void peer_abort_upcall(struct iwch_ep *ep) 695{ 696 struct iw_cm_event event; 697 698 PDBG("%s ep %p\n", __FUNCTION__, ep); 699 memset(&event, 0, sizeof(event)); 700 event.event = IW_CM_EVENT_CLOSE; 701 event.status = -ECONNRESET; 702 if (ep->com.cm_id) { 703 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep, 704 ep->com.cm_id, ep->hwtid); 705 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 706 ep->com.cm_id->rem_ref(ep->com.cm_id); 707 ep->com.cm_id = NULL; 708 ep->com.qp = NULL; 709 } 710} 711 712static void connect_reply_upcall(struct iwch_ep *ep, int status) 713{ 714 struct iw_cm_event event; 715 716 PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status); 717 memset(&event, 0, sizeof(event)); 718 event.event = IW_CM_EVENT_CONNECT_REPLY; 719 event.status = status; 720 event.local_addr = ep->com.local_addr; 721 event.remote_addr = ep->com.remote_addr; 722 723 if ((status == 0) || (status == -ECONNREFUSED)) { 724 event.private_data_len = ep->plen; 725 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 726 } 727 if (ep->com.cm_id) { 728 PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep, 729 ep->hwtid, status); 730 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 731 } 732 if (status < 0) { 733 ep->com.cm_id->rem_ref(ep->com.cm_id); 734 ep->com.cm_id = NULL; 735 ep->com.qp = NULL; 736 } 737} 738 739static void connect_request_upcall(struct iwch_ep *ep) 740{ 741 struct iw_cm_event event; 742 743 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 744 memset(&event, 0, sizeof(event)); 745 event.event = IW_CM_EVENT_CONNECT_REQUEST; 746 event.local_addr = ep->com.local_addr; 747 event.remote_addr = ep->com.remote_addr; 748 event.private_data_len = ep->plen; 749 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 750 event.provider_data = ep; 751 if (state_read(&ep->parent_ep->com) != DEAD) 752 ep->parent_ep->com.cm_id->event_handler( 753 ep->parent_ep->com.cm_id, 754 &event); 755 put_ep(&ep->parent_ep->com); 756 ep->parent_ep = NULL; 757} 758 759static void established_upcall(struct iwch_ep *ep) 760{ 761 struct iw_cm_event event; 762 763 PDBG("%s ep %p\n", __FUNCTION__, ep); 764 memset(&event, 0, sizeof(event)); 765 event.event = IW_CM_EVENT_ESTABLISHED; 766 if (ep->com.cm_id) { 767 PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); 768 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 769 } 770} 771 772static int update_rx_credits(struct iwch_ep *ep, u32 credits) 773{ 774 struct cpl_rx_data_ack *req; 775 struct sk_buff *skb; 776 777 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 778 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 779 if (!skb) { 780 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 781 return 0; 782 } 783 784 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req)); 785 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 786 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); 787 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); 788 skb->priority = CPL_PRIORITY_ACK; 789 ep->com.tdev->send(ep->com.tdev, skb); 790 return credits; 791} 792 793static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) 794{ 795 struct mpa_message *mpa; 796 u16 plen; 797 struct iwch_qp_attributes attrs; 798 enum iwch_qp_attr_mask mask; 799 int err; 800 801 PDBG("%s ep %p\n", __FUNCTION__, ep); 802 803 /* 804 * Stop mpa timer. If it expired, then the state has 805 * changed and we bail since ep_timeout already aborted 806 * the connection. 807 */ 808 stop_ep_timer(ep); 809 if (state_read(&ep->com) != MPA_REQ_SENT) 810 return; 811 812 /* 813 * If we get more than the supported amount of private data 814 * then we must fail this connection. 815 */ 816 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 817 err = -EINVAL; 818 goto err; 819 } 820 821 /* 822 * copy the new data into our accumulation buffer. 823 */ 824 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 825 skb->len); 826 ep->mpa_pkt_len += skb->len; 827 828 /* 829 * if we don't even have the mpa message, then bail. 830 */ 831 if (ep->mpa_pkt_len < sizeof(*mpa)) 832 return; 833 mpa = (struct mpa_message *) ep->mpa_pkt; 834 835 /* Validate MPA header. */ 836 if (mpa->revision != mpa_rev) { 837 err = -EPROTO; 838 goto err; 839 } 840 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 841 err = -EPROTO; 842 goto err; 843 } 844 845 plen = ntohs(mpa->private_data_size); 846 847 /* 848 * Fail if there's too much private data. 849 */ 850 if (plen > MPA_MAX_PRIVATE_DATA) { 851 err = -EPROTO; 852 goto err; 853 } 854 855 /* 856 * If plen does not account for pkt size 857 */ 858 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 859 err = -EPROTO; 860 goto err; 861 } 862 863 ep->plen = (u8) plen; 864 865 /* 866 * If we don't have all the pdata yet, then bail. 867 * We'll continue process when more data arrives. 868 */ 869 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 870 return; 871 872 if (mpa->flags & MPA_REJECT) { 873 err = -ECONNREFUSED; 874 goto err; 875 } 876 877 /* 878 * If we get here we have accumulated the entire mpa 879 * start reply message including private data. And 880 * the MPA header is valid. 881 */ 882 state_set(&ep->com, FPDU_MODE); 883 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 884 ep->mpa_attr.recv_marker_enabled = markers_enabled; 885 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 886 ep->mpa_attr.version = mpa_rev; 887 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 888 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 889 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 890 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 891 892 attrs.mpa_attr = ep->mpa_attr; 893 attrs.max_ird = ep->ird; 894 attrs.max_ord = ep->ord; 895 attrs.llp_stream_handle = ep; 896 attrs.next_state = IWCH_QP_STATE_RTS; 897 898 mask = IWCH_QP_ATTR_NEXT_STATE | 899 IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR | 900 IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD; 901 902 /* bind QP and TID with INIT_WR */ 903 err = iwch_modify_qp(ep->com.qp->rhp, 904 ep->com.qp, mask, &attrs, 1); 905 if (!err) 906 goto out; 907err: 908 abort_connection(ep, skb, GFP_KERNEL); 909out: 910 connect_reply_upcall(ep, err); 911 return; 912} 913 914static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb) 915{ 916 struct mpa_message *mpa; 917 u16 plen; 918 919 PDBG("%s ep %p\n", __FUNCTION__, ep); 920 921 /* 922 * Stop mpa timer. If it expired, then the state has 923 * changed and we bail since ep_timeout already aborted 924 * the connection. 925 */ 926 stop_ep_timer(ep); 927 if (state_read(&ep->com) != MPA_REQ_WAIT) 928 return; 929 930 /* 931 * If we get more than the supported amount of private data 932 * then we must fail this connection. 933 */ 934 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 935 abort_connection(ep, skb, GFP_KERNEL); 936 return; 937 } 938 939 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 940 941 /* 942 * Copy the new data into our accumulation buffer. 943 */ 944 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 945 skb->len); 946 ep->mpa_pkt_len += skb->len; 947 948 /* 949 * If we don't even have the mpa message, then bail. 950 * We'll continue process when more data arrives. 951 */ 952 if (ep->mpa_pkt_len < sizeof(*mpa)) 953 return; 954 PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); 955 mpa = (struct mpa_message *) ep->mpa_pkt; 956 957 /* 958 * Validate MPA Header. 959 */ 960 if (mpa->revision != mpa_rev) { 961 abort_connection(ep, skb, GFP_KERNEL); 962 return; 963 } 964 965 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 966 abort_connection(ep, skb, GFP_KERNEL); 967 return; 968 } 969 970 plen = ntohs(mpa->private_data_size); 971 972 /* 973 * Fail if there's too much private data. 974 */ 975 if (plen > MPA_MAX_PRIVATE_DATA) { 976 abort_connection(ep, skb, GFP_KERNEL); 977 return; 978 } 979 980 /* 981 * If plen does not account for pkt size 982 */ 983 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 984 abort_connection(ep, skb, GFP_KERNEL); 985 return; 986 } 987 ep->plen = (u8) plen; 988 989 /* 990 * If we don't have all the pdata yet, then bail. 991 */ 992 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 993 return; 994 995 /* 996 * If we get here we have accumulated the entire mpa 997 * start reply message including private data. 998 */ 999 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1000 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1001 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1002 ep->mpa_attr.version = mpa_rev; 1003 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1004 "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, 1005 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1006 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 1007 1008 state_set(&ep->com, MPA_REQ_RCVD); 1009 1010 /* drive upcall */ 1011 connect_request_upcall(ep); 1012 return; 1013} 1014 1015static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1016{ 1017 struct iwch_ep *ep = ctx; 1018 struct cpl_rx_data *hdr = cplhdr(skb); 1019 unsigned int dlen = ntohs(hdr->len); 1020 1021 PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen); 1022 1023 skb_pull(skb, sizeof(*hdr)); 1024 skb_trim(skb, dlen); 1025 1026 switch (state_read(&ep->com)) { 1027 case MPA_REQ_SENT: 1028 process_mpa_reply(ep, skb); 1029 break; 1030 case MPA_REQ_WAIT: 1031 process_mpa_request(ep, skb); 1032 break; 1033 case MPA_REP_SENT: 1034 break; 1035 default: 1036 printk(KERN_ERR MOD "%s Unexpected streaming data." 1037 " ep %p state %d tid %d\n", 1038 __FUNCTION__, ep, state_read(&ep->com), ep->hwtid); 1039 1040 /* 1041 * The ep will timeout and inform the ULP of the failure. 1042 * See ep_timeout(). 1043 */ 1044 break; 1045 } 1046 1047 /* update RX credits */ 1048 update_rx_credits(ep, dlen); 1049 1050 return CPL_RET_BUF_DONE; 1051} 1052 1053/* 1054 * Upcall from the adapter indicating data has been transmitted. 1055 * For us its just the single MPA request or reply. We can now free 1056 * the skb holding the mpa message. 1057 */ 1058static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1059{ 1060 struct iwch_ep *ep = ctx; 1061 struct cpl_wr_ack *hdr = cplhdr(skb); 1062 unsigned int credits = ntohs(hdr->credits); 1063 enum iwch_qp_attr_mask mask; 1064 1065 PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); 1066 1067 if (credits == 0) 1068 return CPL_RET_BUF_DONE; 1069 BUG_ON(credits != 1); 1070 BUG_ON(ep->mpa_skb == NULL); 1071 kfree_skb(ep->mpa_skb); 1072 ep->mpa_skb = NULL; 1073 dst_confirm(ep->dst); 1074 if (state_read(&ep->com) == MPA_REP_SENT) { 1075 struct iwch_qp_attributes attrs; 1076 1077 /* bind QP to EP and move to RTS */ 1078 attrs.mpa_attr = ep->mpa_attr; 1079 attrs.max_ird = ep->ord; 1080 attrs.max_ord = ep->ord; 1081 attrs.llp_stream_handle = ep; 1082 attrs.next_state = IWCH_QP_STATE_RTS; 1083 1084 /* bind QP and TID with INIT_WR */ 1085 mask = IWCH_QP_ATTR_NEXT_STATE | 1086 IWCH_QP_ATTR_LLP_STREAM_HANDLE | 1087 IWCH_QP_ATTR_MPA_ATTR | 1088 IWCH_QP_ATTR_MAX_IRD | 1089 IWCH_QP_ATTR_MAX_ORD; 1090 1091 ep->com.rpl_err = iwch_modify_qp(ep->com.qp->rhp, 1092 ep->com.qp, mask, &attrs, 1); 1093 1094 if (!ep->com.rpl_err) { 1095 state_set(&ep->com, FPDU_MODE); 1096 established_upcall(ep); 1097 } 1098 1099 ep->com.rpl_done = 1; 1100 PDBG("waking up ep %p\n", ep); 1101 wake_up(&ep->com.waitq); 1102 } 1103 return CPL_RET_BUF_DONE; 1104} 1105 1106static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1107{ 1108 struct iwch_ep *ep = ctx; 1109 1110 PDBG("%s ep %p\n", __FUNCTION__, ep); 1111 1112 /* 1113 * We get 2 abort replies from the HW. The first one must 1114 * be ignored except for scribbling that we need one more. 1115 */ 1116 if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { 1117 ep->flags |= ABORT_REQ_IN_PROGRESS; 1118 return CPL_RET_BUF_DONE; 1119 } 1120 1121 close_complete_upcall(ep); 1122 state_set(&ep->com, DEAD); 1123 release_ep_resources(ep); 1124 return CPL_RET_BUF_DONE; 1125} 1126 1127static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1128{ 1129 struct iwch_ep *ep = ctx; 1130 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1131 1132 PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status, 1133 status2errno(rpl->status)); 1134 connect_reply_upcall(ep, status2errno(rpl->status)); 1135 state_set(&ep->com, DEAD); 1136 if (ep->com.tdev->type == T3B) 1137 release_tid(ep->com.tdev, GET_TID(rpl), NULL); 1138 cxgb3_free_atid(ep->com.tdev, ep->atid); 1139 dst_release(ep->dst); 1140 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1141 put_ep(&ep->com); 1142 return CPL_RET_BUF_DONE; 1143} 1144 1145static int listen_start(struct iwch_listen_ep *ep) 1146{ 1147 struct sk_buff *skb; 1148 struct cpl_pass_open_req *req; 1149 1150 PDBG("%s ep %p\n", __FUNCTION__, ep); 1151 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1152 if (!skb) { 1153 printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n"); 1154 return -ENOMEM; 1155 } 1156 1157 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); 1158 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1159 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); 1160 req->local_port = ep->com.local_addr.sin_port; 1161 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 1162 req->peer_port = 0; 1163 req->peer_ip = 0; 1164 req->peer_netmask = 0; 1165 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS); 1166 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10)); 1167 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); 1168 1169 skb->priority = 1; 1170 ep->com.tdev->send(ep->com.tdev, skb); 1171 return 0; 1172} 1173 1174static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1175{ 1176 struct iwch_listen_ep *ep = ctx; 1177 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 1178 1179 PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep, 1180 rpl->status, status2errno(rpl->status)); 1181 ep->com.rpl_err = status2errno(rpl->status); 1182 ep->com.rpl_done = 1; 1183 wake_up(&ep->com.waitq); 1184 1185 return CPL_RET_BUF_DONE; 1186} 1187 1188static int listen_stop(struct iwch_listen_ep *ep) 1189{ 1190 struct sk_buff *skb; 1191 struct cpl_close_listserv_req *req; 1192 1193 PDBG("%s ep %p\n", __FUNCTION__, ep); 1194 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1195 if (!skb) { 1196 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); 1197 return -ENOMEM; 1198 } 1199 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); 1200 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1201 req->cpu_idx = 0; 1202 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); 1203 skb->priority = 1; 1204 ep->com.tdev->send(ep->com.tdev, skb); 1205 return 0; 1206} 1207 1208static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, 1209 void *ctx) 1210{ 1211 struct iwch_listen_ep *ep = ctx; 1212 struct cpl_close_listserv_rpl *rpl = cplhdr(skb); 1213 1214 PDBG("%s ep %p\n", __FUNCTION__, ep); 1215 ep->com.rpl_err = status2errno(rpl->status); 1216 ep->com.rpl_done = 1; 1217 wake_up(&ep->com.waitq); 1218 return CPL_RET_BUF_DONE; 1219} 1220 1221static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) 1222{ 1223 struct cpl_pass_accept_rpl *rpl; 1224 unsigned int mtu_idx; 1225 u32 opt0h, opt0l, opt2; 1226 int wscale; 1227 1228 PDBG("%s ep %p\n", __FUNCTION__, ep); 1229 BUG_ON(skb_cloned(skb)); 1230 skb_trim(skb, sizeof(*rpl)); 1231 skb_get(skb); 1232 mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); 1233 wscale = compute_wscale(rcv_win); 1234 opt0h = V_NAGLE(0) | 1235 V_NO_CONG(nocong) | 1236 V_KEEP_ALIVE(1) | 1237 F_TCAM_BYPASS | 1238 V_WND_SCALE(wscale) | 1239 V_MSS_IDX(mtu_idx) | 1240 V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); 1241 opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); 1242 opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); 1243 1244 rpl = cplhdr(skb); 1245 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1246 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid)); 1247 rpl->peer_ip = peer_ip; 1248 rpl->opt0h = htonl(opt0h); 1249 rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT); 1250 rpl->opt2 = htonl(opt2); 1251 rpl->rsvd = rpl->opt2; 1252 skb->priority = CPL_PRIORITY_SETUP; 1253 l2t_send(ep->com.tdev, skb, ep->l2t); 1254 1255 return; 1256} 1257 1258static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, 1259 struct sk_buff *skb) 1260{ 1261 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid, 1262 peer_ip); 1263 BUG_ON(skb_cloned(skb)); 1264 skb_trim(skb, sizeof(struct cpl_tid_release)); 1265 skb_get(skb); 1266 1267 if (tdev->type == T3B) 1268 release_tid(tdev, hwtid, skb); 1269 else { 1270 struct cpl_pass_accept_rpl *rpl; 1271 1272 rpl = cplhdr(skb); 1273 skb->priority = CPL_PRIORITY_SETUP; 1274 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1275 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 1276 hwtid)); 1277 rpl->peer_ip = peer_ip; 1278 rpl->opt0h = htonl(F_TCAM_BYPASS); 1279 rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); 1280 rpl->opt2 = 0; 1281 rpl->rsvd = rpl->opt2; 1282 tdev->send(tdev, skb); 1283 } 1284} 1285 1286static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1287{ 1288 struct iwch_ep *child_ep, *parent_ep = ctx; 1289 struct cpl_pass_accept_req *req = cplhdr(skb); 1290 unsigned int hwtid = GET_TID(req); 1291 struct dst_entry *dst; 1292 struct l2t_entry *l2t; 1293 struct rtable *rt; 1294 struct iff_mac tim; 1295 1296 PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid); 1297 1298 if (state_read(&parent_ep->com) != LISTEN) { 1299 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1300 __FUNCTION__); 1301 goto reject; 1302 } 1303 1304 /* 1305 * Find the netdev for this connection request. 1306 */ 1307 tim.mac_addr = req->dst_mac; 1308 tim.vlan_tag = ntohs(req->vlan_tag); 1309 if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { 1310 printk(KERN_ERR 1311 "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", 1312 __FUNCTION__, 1313 req->dst_mac[0], 1314 req->dst_mac[1], 1315 req->dst_mac[2], 1316 req->dst_mac[3], 1317 req->dst_mac[4], 1318 req->dst_mac[5]); 1319 goto reject; 1320 } 1321 1322 /* Find output route */ 1323 rt = find_route(tdev, 1324 req->local_ip, 1325 req->peer_ip, 1326 req->local_port, 1327 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); 1328 if (!rt) { 1329 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 1330 __FUNCTION__); 1331 goto reject; 1332 } 1333 dst = &rt->u.dst; 1334 l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); 1335 if (!l2t) { 1336 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 1337 __FUNCTION__); 1338 dst_release(dst); 1339 goto reject; 1340 } 1341 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 1342 if (!child_ep) { 1343 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 1344 __FUNCTION__); 1345 l2t_release(L2DATA(tdev), l2t); 1346 dst_release(dst); 1347 goto reject; 1348 } 1349 state_set(&child_ep->com, CONNECTING); 1350 child_ep->com.tdev = tdev; 1351 child_ep->com.cm_id = NULL; 1352 child_ep->com.local_addr.sin_family = PF_INET; 1353 child_ep->com.local_addr.sin_port = req->local_port; 1354 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; 1355 child_ep->com.remote_addr.sin_family = PF_INET; 1356 child_ep->com.remote_addr.sin_port = req->peer_port; 1357 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; 1358 get_ep(&parent_ep->com); 1359 child_ep->parent_ep = parent_ep; 1360 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid)); 1361 child_ep->l2t = l2t; 1362 child_ep->dst = dst; 1363 child_ep->hwtid = hwtid; 1364 init_timer(&child_ep->timer); 1365 cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid); 1366 accept_cr(child_ep, req->peer_ip, skb); 1367 goto out; 1368reject: 1369 reject_cr(tdev, hwtid, req->peer_ip, skb); 1370out: 1371 return CPL_RET_BUF_DONE; 1372} 1373 1374static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1375{ 1376 struct iwch_ep *ep = ctx; 1377 struct cpl_pass_establish *req = cplhdr(skb); 1378 1379 PDBG("%s ep %p\n", __FUNCTION__, ep); 1380 ep->snd_seq = ntohl(req->snd_isn); 1381 1382 set_emss(ep, ntohs(req->tcp_opt)); 1383 1384 dst_confirm(ep->dst); 1385 state_set(&ep->com, MPA_REQ_WAIT); 1386 start_ep_timer(ep); 1387 1388 return CPL_RET_BUF_DONE; 1389} 1390 1391static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1392{ 1393 struct iwch_ep *ep = ctx; 1394 struct iwch_qp_attributes attrs; 1395 unsigned long flags; 1396 int disconnect = 1; 1397 int release = 0; 1398 1399 PDBG("%s ep %p\n", __FUNCTION__, ep); 1400 dst_confirm(ep->dst); 1401 1402 spin_lock_irqsave(&ep->com.lock, flags); 1403 switch (ep->com.state) { 1404 case MPA_REQ_WAIT: 1405 __state_set(&ep->com, CLOSING); 1406 break; 1407 case MPA_REQ_SENT: 1408 __state_set(&ep->com, CLOSING); 1409 connect_reply_upcall(ep, -ECONNRESET); 1410 break; 1411 case MPA_REQ_RCVD: 1412 1413 /* 1414 * We're gonna mark this puppy DEAD, but keep 1415 * the reference on it until the ULP accepts or 1416 * rejects the CR. 1417 */ 1418 __state_set(&ep->com, CLOSING); 1419 get_ep(&ep->com); 1420 break; 1421 case MPA_REP_SENT: 1422 __state_set(&ep->com, CLOSING); 1423 ep->com.rpl_done = 1; 1424 ep->com.rpl_err = -ECONNRESET; 1425 PDBG("waking up ep %p\n", ep); 1426 wake_up(&ep->com.waitq); 1427 break; 1428 case FPDU_MODE: 1429 start_ep_timer(ep); 1430 __state_set(&ep->com, CLOSING); 1431 attrs.next_state = IWCH_QP_STATE_CLOSING; 1432 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, 1433 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); 1434 peer_close_upcall(ep); 1435 break; 1436 case ABORTING: 1437 disconnect = 0; 1438 break; 1439 case CLOSING: 1440 __state_set(&ep->com, MORIBUND); 1441 disconnect = 0; 1442 break; 1443 case MORIBUND: 1444 stop_ep_timer(ep); 1445 if (ep->com.cm_id && ep->com.qp) { 1446 attrs.next_state = IWCH_QP_STATE_IDLE; 1447 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, 1448 IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); 1449 } 1450 close_complete_upcall(ep); 1451 __state_set(&ep->com, DEAD); 1452 release = 1; 1453 disconnect = 0; 1454 break; 1455 case DEAD: 1456 disconnect = 0; 1457 break; 1458 default: 1459 BUG_ON(1); 1460 } 1461 spin_unlock_irqrestore(&ep->com.lock, flags); 1462 if (disconnect) 1463 iwch_ep_disconnect(ep, 0, GFP_KERNEL); 1464 if (release) 1465 release_ep_resources(ep); 1466 return CPL_RET_BUF_DONE; 1467} 1468 1469/* 1470 * Returns whether an ABORT_REQ_RSS message is a negative advice. 1471 */ 1472static int is_neg_adv_abort(unsigned int status) 1473{ 1474 return status == CPL_ERR_RTX_NEG_ADVICE || 1475 status == CPL_ERR_PERSIST_NEG_ADVICE; 1476} 1477 1478static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1479{ 1480 struct cpl_abort_req_rss *req = cplhdr(skb); 1481 struct iwch_ep *ep = ctx; 1482 struct cpl_abort_rpl *rpl; 1483 struct sk_buff *rpl_skb; 1484 struct iwch_qp_attributes attrs; 1485 int ret; 1486 int state; 1487 1488 /* 1489 * We get 2 peer aborts from the HW. The first one must 1490 * be ignored except for scribbling that we need one more. 1491 */ 1492 if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { 1493 ep->flags |= PEER_ABORT_IN_PROGRESS; 1494 return CPL_RET_BUF_DONE; 1495 } 1496 1497 if (is_neg_adv_abort(req->status)) { 1498 PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, 1499 ep->hwtid); 1500 t3_l2t_send_event(ep->com.tdev, ep->l2t); 1501 return CPL_RET_BUF_DONE; 1502 } 1503 1504 state = state_read(&ep->com); 1505 PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state); 1506 switch (state) { 1507 case CONNECTING: 1508 break; 1509 case MPA_REQ_WAIT: 1510 stop_ep_timer(ep); 1511 break; 1512 case MPA_REQ_SENT: 1513 stop_ep_timer(ep); 1514 connect_reply_upcall(ep, -ECONNRESET); 1515 break; 1516 case MPA_REP_SENT: 1517 ep->com.rpl_done = 1; 1518 ep->com.rpl_err = -ECONNRESET; 1519 PDBG("waking up ep %p\n", ep); 1520 wake_up(&ep->com.waitq); 1521 break; 1522 case MPA_REQ_RCVD: 1523 1524 /* 1525 * We're gonna mark this puppy DEAD, but keep 1526 * the reference on it until the ULP accepts or 1527 * rejects the CR. 1528 */ 1529 get_ep(&ep->com); 1530 break; 1531 case MORIBUND: 1532 case CLOSING: 1533 stop_ep_timer(ep); 1534 /*FALLTHROUGH*/ 1535 case FPDU_MODE: 1536 if (ep->com.cm_id && ep->com.qp) { 1537 attrs.next_state = IWCH_QP_STATE_ERROR; 1538 ret = iwch_modify_qp(ep->com.qp->rhp, 1539 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, 1540 &attrs, 1); 1541 if (ret) 1542 printk(KERN_ERR MOD 1543 "%s - qp <- error failed!\n", 1544 __FUNCTION__); 1545 } 1546 peer_abort_upcall(ep); 1547 break; 1548 case ABORTING: 1549 break; 1550 case DEAD: 1551 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__); 1552 return CPL_RET_BUF_DONE; 1553 default: 1554 BUG_ON(1); 1555 break; 1556 } 1557 dst_confirm(ep->dst); 1558 1559 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 1560 if (!rpl_skb) { 1561 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 1562 __FUNCTION__); 1563 dst_release(ep->dst); 1564 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 1565 put_ep(&ep->com); 1566 return CPL_RET_BUF_DONE; 1567 } 1568 rpl_skb->priority = CPL_PRIORITY_DATA; 1569 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 1570 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); 1571 rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); 1572 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 1573 rpl->cmd = CPL_ABORT_NO_RST; 1574 ep->com.tdev->send(ep->com.tdev, rpl_skb); 1575 if (state != ABORTING) { 1576 state_set(&ep->com, DEAD); 1577 release_ep_resources(ep); 1578 } 1579 return CPL_RET_BUF_DONE; 1580} 1581 1582static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1583{ 1584 struct iwch_ep *ep = ctx; 1585 struct iwch_qp_attributes attrs; 1586 unsigned long flags; 1587 int release = 0; 1588 1589 PDBG("%s ep %p\n", __FUNCTION__, ep); 1590 BUG_ON(!ep); 1591 1592 /* The cm_id may be null if we failed to connect */ 1593 spin_lock_irqsave(&ep->com.lock, flags); 1594 switch (ep->com.state) { 1595 case CLOSING: 1596 __state_set(&ep->com, MORIBUND); 1597 break; 1598 case MORIBUND: 1599 stop_ep_timer(ep); 1600 if ((ep->com.cm_id) && (ep->com.qp)) { 1601 attrs.next_state = IWCH_QP_STATE_IDLE; 1602 iwch_modify_qp(ep->com.qp->rhp, 1603 ep->com.qp, 1604 IWCH_QP_ATTR_NEXT_STATE, 1605 &attrs, 1); 1606 } 1607 close_complete_upcall(ep); 1608 __state_set(&ep->com, DEAD); 1609 release = 1; 1610 break; 1611 case ABORTING: 1612 break; 1613 case DEAD: 1614 default: 1615 BUG_ON(1); 1616 break; 1617 } 1618 spin_unlock_irqrestore(&ep->com.lock, flags); 1619 if (release) 1620 release_ep_resources(ep); 1621 return CPL_RET_BUF_DONE; 1622} 1623 1624/* 1625 * T3A does 3 things when a TERM is received: 1626 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet 1627 * 2) generate an async event on the QP with the TERMINATE opcode 1628 * 3) post a TERMINATE opcde cqe into the associated CQ. 1629 * 1630 * For (1), we save the message in the qp for later consumer consumption. 1631 * For (2), we move the QP into TERMINATE, post a QP event and disconnect. 1632 * For (3), we toss the CQE in cxio_poll_cq(). 1633 * 1634 * terminate() handles case (1)... 1635 */ 1636static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1637{ 1638 struct iwch_ep *ep = ctx; 1639 1640 PDBG("%s ep %p\n", __FUNCTION__, ep); 1641 skb_pull(skb, sizeof(struct cpl_rdma_terminate)); 1642 PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); 1643 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, 1644 skb->len); 1645 ep->com.qp->attr.terminate_msg_len = skb->len; 1646 ep->com.qp->attr.is_terminate_local = 0; 1647 return CPL_RET_BUF_DONE; 1648} 1649 1650static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 1651{ 1652 struct cpl_rdma_ec_status *rep = cplhdr(skb); 1653 struct iwch_ep *ep = ctx; 1654 1655 PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid, 1656 rep->status); 1657 if (rep->status) { 1658 struct iwch_qp_attributes attrs; 1659 1660 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1661 __FUNCTION__, ep->hwtid); 1662 stop_ep_timer(ep); 1663 attrs.next_state = IWCH_QP_STATE_ERROR; 1664 iwch_modify_qp(ep->com.qp->rhp, 1665 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, 1666 &attrs, 1); 1667 abort_connection(ep, NULL, GFP_KERNEL); 1668 } 1669 return CPL_RET_BUF_DONE; 1670} 1671 1672static void ep_timeout(unsigned long arg) 1673{ 1674 struct iwch_ep *ep = (struct iwch_ep *)arg; 1675 struct iwch_qp_attributes attrs; 1676 unsigned long flags; 1677 1678 spin_lock_irqsave(&ep->com.lock, flags); 1679 PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid, 1680 ep->com.state); 1681 switch (ep->com.state) { 1682 case MPA_REQ_SENT: 1683 connect_reply_upcall(ep, -ETIMEDOUT); 1684 break; 1685 case MPA_REQ_WAIT: 1686 break; 1687 case CLOSING: 1688 case MORIBUND: 1689 if (ep->com.cm_id && ep->com.qp) { 1690 attrs.next_state = IWCH_QP_STATE_ERROR; 1691 iwch_modify_qp(ep->com.qp->rhp, 1692 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, 1693 &attrs, 1); 1694 } 1695 break; 1696 default: 1697 BUG(); 1698 } 1699 __state_set(&ep->com, CLOSING); 1700 spin_unlock_irqrestore(&ep->com.lock, flags); 1701 abort_connection(ep, NULL, GFP_ATOMIC); 1702 put_ep(&ep->com); 1703} 1704 1705int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1706{ 1707 int err; 1708 struct iwch_ep *ep = to_ep(cm_id); 1709 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1710 1711 if (state_read(&ep->com) == DEAD) { 1712 put_ep(&ep->com); 1713 return -ECONNRESET; 1714 } 1715 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1716 if (mpa_rev == 0) 1717 abort_connection(ep, NULL, GFP_KERNEL); 1718 else { 1719 err = send_mpa_reject(ep, pdata, pdata_len); 1720 err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); 1721 } 1722 return 0; 1723} 1724 1725int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1726{ 1727 int err; 1728 struct iwch_qp_attributes attrs; 1729 enum iwch_qp_attr_mask mask; 1730 struct iwch_ep *ep = to_ep(cm_id); 1731 struct iwch_dev *h = to_iwch_dev(cm_id->device); 1732 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); 1733 1734 PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); 1735 if (state_read(&ep->com) == DEAD) { 1736 put_ep(&ep->com); 1737 return -ECONNRESET; 1738 } 1739 1740 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1741 BUG_ON(!qp); 1742 1743 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || 1744 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { 1745 abort_connection(ep, NULL, GFP_KERNEL); 1746 return -EINVAL; 1747 } 1748 1749 cm_id->add_ref(cm_id); 1750 ep->com.cm_id = cm_id; 1751 ep->com.qp = qp; 1752 1753 ep->com.rpl_done = 0; 1754 ep->com.rpl_err = 0; 1755 ep->ird = conn_param->ird; 1756 ep->ord = conn_param->ord; 1757 PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord); 1758 get_ep(&ep->com); 1759 err = send_mpa_reply(ep, conn_param->private_data, 1760 conn_param->private_data_len); 1761 if (err) { 1762 ep->com.cm_id = NULL; 1763 ep->com.qp = NULL; 1764 cm_id->rem_ref(cm_id); 1765 abort_connection(ep, NULL, GFP_KERNEL); 1766 put_ep(&ep->com); 1767 return err; 1768 } 1769 1770 /* bind QP to EP and move to RTS */ 1771 attrs.mpa_attr = ep->mpa_attr; 1772 attrs.max_ird = ep->ord; 1773 attrs.max_ord = ep->ord; 1774 attrs.llp_stream_handle = ep; 1775 attrs.next_state = IWCH_QP_STATE_RTS; 1776 1777 /* bind QP and TID with INIT_WR */ 1778 mask = IWCH_QP_ATTR_NEXT_STATE | 1779 IWCH_QP_ATTR_LLP_STREAM_HANDLE | 1780 IWCH_QP_ATTR_MPA_ATTR | 1781 IWCH_QP_ATTR_MAX_IRD | 1782 IWCH_QP_ATTR_MAX_ORD; 1783 1784 err = iwch_modify_qp(ep->com.qp->rhp, 1785 ep->com.qp, mask, &attrs, 1); 1786 1787 if (err) { 1788 ep->com.cm_id = NULL; 1789 ep->com.qp = NULL; 1790 cm_id->rem_ref(cm_id); 1791 abort_connection(ep, NULL, GFP_KERNEL); 1792 } else { 1793 state_set(&ep->com, FPDU_MODE); 1794 established_upcall(ep); 1795 } 1796 put_ep(&ep->com); 1797 return err; 1798} 1799 1800int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1801{ 1802 int err = 0; 1803 struct iwch_dev *h = to_iwch_dev(cm_id->device); 1804 struct iwch_ep *ep; 1805 struct rtable *rt; 1806 1807 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1808 if (!ep) { 1809 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1810 err = -ENOMEM; 1811 goto out; 1812 } 1813 init_timer(&ep->timer); 1814 ep->plen = conn_param->private_data_len; 1815 if (ep->plen) 1816 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 1817 conn_param->private_data, ep->plen); 1818 ep->ird = conn_param->ird; 1819 ep->ord = conn_param->ord; 1820 ep->com.tdev = h->rdev.t3cdev_p; 1821 1822 cm_id->add_ref(cm_id); 1823 ep->com.cm_id = cm_id; 1824 ep->com.qp = get_qhp(h, conn_param->qpn); 1825 BUG_ON(!ep->com.qp); 1826 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn, 1827 ep->com.qp, cm_id); 1828 1829 /* 1830 * Allocate an active TID to initiate a TCP connection. 1831 */ 1832 ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); 1833 if (ep->atid == -1) { 1834 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1835 err = -ENOMEM; 1836 goto fail2; 1837 } 1838 1839 /* find a route */ 1840 rt = find_route(h->rdev.t3cdev_p, 1841 cm_id->local_addr.sin_addr.s_addr, 1842 cm_id->remote_addr.sin_addr.s_addr, 1843 cm_id->local_addr.sin_port, 1844 cm_id->remote_addr.sin_port, IPTOS_LOWDELAY); 1845 if (!rt) { 1846 printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__); 1847 err = -EHOSTUNREACH; 1848 goto fail3; 1849 } 1850 ep->dst = &rt->u.dst; 1851 1852 /* get a l2t entry */ 1853 ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, 1854 ep->dst->neighbour->dev); 1855 if (!ep->l2t) { 1856 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__); 1857 err = -ENOMEM; 1858 goto fail4; 1859 } 1860 1861 state_set(&ep->com, CONNECTING); 1862 ep->tos = IPTOS_LOWDELAY; 1863 ep->com.local_addr = cm_id->local_addr; 1864 ep->com.remote_addr = cm_id->remote_addr; 1865 1866 /* send connect request to rnic */ 1867 err = send_connect(ep); 1868 if (!err) 1869 goto out; 1870 1871 l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); 1872fail4: 1873 dst_release(ep->dst); 1874fail3: 1875 cxgb3_free_atid(ep->com.tdev, ep->atid); 1876fail2: 1877 put_ep(&ep->com); 1878out: 1879 return err; 1880} 1881 1882int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) 1883{ 1884 int err = 0; 1885 struct iwch_dev *h = to_iwch_dev(cm_id->device); 1886 struct iwch_listen_ep *ep; 1887 1888 1889 might_sleep(); 1890 1891 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1892 if (!ep) { 1893 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); 1894 err = -ENOMEM; 1895 goto fail1; 1896 } 1897 PDBG("%s ep %p\n", __FUNCTION__, ep); 1898 ep->com.tdev = h->rdev.t3cdev_p; 1899 cm_id->add_ref(cm_id); 1900 ep->com.cm_id = cm_id; 1901 ep->backlog = backlog; 1902 ep->com.local_addr = cm_id->local_addr; 1903 1904 /* 1905 * Allocate a server TID. 1906 */ 1907 ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); 1908 if (ep->stid == -1) { 1909 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); 1910 err = -ENOMEM; 1911 goto fail2; 1912 } 1913 1914 state_set(&ep->com, LISTEN); 1915 err = listen_start(ep); 1916 if (err) 1917 goto fail3; 1918 1919 /* wait for pass_open_rpl */ 1920 wait_event(ep->com.waitq, ep->com.rpl_done); 1921 err = ep->com.rpl_err; 1922 if (!err) { 1923 cm_id->provider_data = ep; 1924 goto out; 1925 } 1926fail3: 1927 cxgb3_free_stid(ep->com.tdev, ep->stid); 1928fail2: 1929 put_ep(&ep->com); 1930fail1: 1931out: 1932 return err; 1933} 1934 1935int iwch_destroy_listen(struct iw_cm_id *cm_id) 1936{ 1937 int err; 1938 struct iwch_listen_ep *ep = to_listen_ep(cm_id); 1939 1940 PDBG("%s ep %p\n", __FUNCTION__, ep); 1941 1942 might_sleep(); 1943 state_set(&ep->com, DEAD); 1944 ep->com.rpl_done = 0; 1945 ep->com.rpl_err = 0; 1946 err = listen_stop(ep); 1947 wait_event(ep->com.waitq, ep->com.rpl_done); 1948 cxgb3_free_stid(ep->com.tdev, ep->stid); 1949 err = ep->com.rpl_err; 1950 cm_id->rem_ref(cm_id); 1951 put_ep(&ep->com); 1952 return err; 1953} 1954 1955int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) 1956{ 1957 int ret=0; 1958 unsigned long flags; 1959 int close = 0; 1960 1961 spin_lock_irqsave(&ep->com.lock, flags); 1962 1963 PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep, 1964 states[ep->com.state], abrupt); 1965 1966 if (ep->com.state == DEAD) { 1967 PDBG("%s already dead ep %p\n", __FUNCTION__, ep); 1968 goto out; 1969 } 1970 1971 if (abrupt) { 1972 if (ep->com.state != ABORTING) { 1973 ep->com.state = ABORTING; 1974 close = 1; 1975 } 1976 goto out; 1977 } 1978 1979 switch (ep->com.state) { 1980 case MPA_REQ_WAIT: 1981 case MPA_REQ_SENT: 1982 case MPA_REQ_RCVD: 1983 case MPA_REP_SENT: 1984 case FPDU_MODE: 1985 start_ep_timer(ep); 1986 ep->com.state = CLOSING; 1987 close = 1; 1988 break; 1989 case CLOSING: 1990 ep->com.state = MORIBUND; 1991 close = 1; 1992 break; 1993 case MORIBUND: 1994 break; 1995 default: 1996 BUG(); 1997 break; 1998 } 1999out: 2000 spin_unlock_irqrestore(&ep->com.lock, flags); 2001 if (close) { 2002 if (abrupt) 2003 ret = send_abort(ep, NULL, gfp); 2004 else 2005 ret = send_halfclose(ep, gfp); 2006 } 2007 return ret; 2008} 2009 2010int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 2011 struct l2t_entry *l2t) 2012{ 2013 struct iwch_ep *ep = ctx; 2014 2015 if (ep->dst != old) 2016 return 0; 2017 2018 PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new, 2019 l2t); 2020 dst_hold(new); 2021 l2t_release(L2DATA(ep->com.tdev), ep->l2t); 2022 ep->l2t = l2t; 2023 dst_release(old); 2024 ep->dst = new; 2025 return 1; 2026} 2027 2028/* 2029 * All the CM events are handled on a work queue to have a safe context. 2030 */ 2031static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 2032{ 2033 struct iwch_ep_common *epc = ctx; 2034 2035 get_ep(epc); 2036 2037 /* 2038 * Save ctx and tdev in the skb->cb area. 2039 */ 2040 *((void **) skb->cb) = ctx; 2041 *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev; 2042 2043 /* 2044 * Queue the skb and schedule the worker thread. 2045 */ 2046 skb_queue_tail(&rxq, skb); 2047 queue_work(workq, &skb_work); 2048 return 0; 2049} 2050 2051static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 2052{ 2053 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 2054 2055 if (rpl->status != CPL_ERR_NONE) { 2056 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 2057 "for tid %u\n", rpl->status, GET_TID(rpl)); 2058 } 2059 return CPL_RET_BUF_DONE; 2060} 2061 2062int __init iwch_cm_init(void) 2063{ 2064 skb_queue_head_init(&rxq); 2065 2066 workq = create_singlethread_workqueue("iw_cxgb3"); 2067 if (!workq) 2068 return -ENOMEM; 2069 2070 /* 2071 * All upcalls from the T3 Core go to sched() to 2072 * schedule the processing on a work queue. 2073 */ 2074 t3c_handlers[CPL_ACT_ESTABLISH] = sched; 2075 t3c_handlers[CPL_ACT_OPEN_RPL] = sched; 2076 t3c_handlers[CPL_RX_DATA] = sched; 2077 t3c_handlers[CPL_TX_DMA_ACK] = sched; 2078 t3c_handlers[CPL_ABORT_RPL_RSS] = sched; 2079 t3c_handlers[CPL_ABORT_RPL] = sched; 2080 t3c_handlers[CPL_PASS_OPEN_RPL] = sched; 2081 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched; 2082 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched; 2083 t3c_handlers[CPL_PASS_ESTABLISH] = sched; 2084 t3c_handlers[CPL_PEER_CLOSE] = sched; 2085 t3c_handlers[CPL_CLOSE_CON_RPL] = sched; 2086 t3c_handlers[CPL_ABORT_REQ_RSS] = sched; 2087 t3c_handlers[CPL_RDMA_TERMINATE] = sched; 2088 t3c_handlers[CPL_RDMA_EC_STATUS] = sched; 2089 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl; 2090 2091 /* 2092 * These are the real handlers that are called from a 2093 * work queue. 2094 */ 2095 work_handlers[CPL_ACT_ESTABLISH] = act_establish; 2096 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl; 2097 work_handlers[CPL_RX_DATA] = rx_data; 2098 work_handlers[CPL_TX_DMA_ACK] = tx_ack; 2099 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl; 2100 work_handlers[CPL_ABORT_RPL] = abort_rpl; 2101 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl; 2102 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl; 2103 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req; 2104 work_handlers[CPL_PASS_ESTABLISH] = pass_establish; 2105 work_handlers[CPL_PEER_CLOSE] = peer_close; 2106 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort; 2107 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl; 2108 work_handlers[CPL_RDMA_TERMINATE] = terminate; 2109 work_handlers[CPL_RDMA_EC_STATUS] = ec_status; 2110 return 0; 2111} 2112 2113void __exit iwch_cm_term(void) 2114{ 2115 flush_workqueue(workq); 2116 destroy_workqueue(workq); 2117} 2118