1/* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: releng/10.3/sys/dev/cxgbe/iw_cxgbe/cm.c 283856 2015-05-31 23:47:08Z np $"); 34 35#include "opt_inet.h" 36 37#ifdef TCP_OFFLOAD 38#include <sys/types.h> 39#include <sys/malloc.h> 40#include <sys/socket.h> 41#include <sys/socketvar.h> 42#include <sys/sockio.h> 43#include <sys/taskqueue.h> 44#include <netinet/in.h> 45#include <net/route.h> 46 47#include <netinet/in_systm.h> 48#include <netinet/in_pcb.h> 49#include <netinet/ip.h> 50#include <netinet/ip_var.h> 51#include <netinet/tcp_var.h> 52#include <netinet/tcp.h> 53#include <netinet/tcpip.h> 54 55#include <netinet/toecore.h> 56 57struct sge_iq; 58struct rss_header; 59#include <linux/types.h> 60#include "offload.h" 61#include "tom/t4_tom.h" 62 63#define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe)) 64 65#include "iw_cxgbe.h" 66#include <linux/module.h> 67#include <linux/workqueue.h> 68#include <linux/notifier.h> 69#include <linux/inetdevice.h> 70#include <linux/if_vlan.h> 71#include <net/netevent.h> 72 73static spinlock_t req_lock; 74static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list; 75static struct work_struct c4iw_task; 76static struct workqueue_struct *c4iw_taskq; 77static LIST_HEAD(timeout_list); 78static spinlock_t timeout_lock; 79 80static void process_req(struct work_struct *ctx); 81static void start_ep_timer(struct c4iw_ep *ep); 82static void stop_ep_timer(struct c4iw_ep *ep); 83static int set_tcpinfo(struct c4iw_ep *ep); 84static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc); 85static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate); 86static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate); 87static void *alloc_ep(int size, gfp_t flags); 88void __free_ep(struct c4iw_ep_common *epc); 89static struct rtentry * find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port, 90 __be16 peer_port, u8 tos); 91static int close_socket(struct c4iw_ep_common *epc, int close); 92static int shutdown_socket(struct c4iw_ep_common *epc); 93static void abort_socket(struct c4iw_ep *ep); 94static void send_mpa_req(struct c4iw_ep *ep); 95static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen); 96static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen); 97static void close_complete_upcall(struct c4iw_ep *ep, int status); 98static int abort_connection(struct c4iw_ep *ep); 99static void peer_close_upcall(struct c4iw_ep *ep); 100static void peer_abort_upcall(struct c4iw_ep *ep); 101static void connect_reply_upcall(struct c4iw_ep *ep, int status); 102static void connect_request_upcall(struct c4iw_ep *ep); 103static void established_upcall(struct c4iw_ep *ep); 104static void process_mpa_reply(struct c4iw_ep *ep); 105static void process_mpa_request(struct c4iw_ep *ep); 106static void process_peer_close(struct c4iw_ep *ep); 107static void process_conn_error(struct c4iw_ep *ep); 108static void process_close_complete(struct c4iw_ep *ep); 109static void ep_timeout(unsigned long arg); 110static void init_sock(struct c4iw_ep_common *epc); 111static void process_data(struct c4iw_ep *ep); 112static void process_connected(struct c4iw_ep *ep); 113static struct socket * dequeue_socket(struct socket *head, struct sockaddr_in **remote, struct c4iw_ep *child_ep); 114static void process_newconn(struct c4iw_ep *parent_ep); 115static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag); 116static void process_socket_event(struct c4iw_ep *ep); 117static void release_ep_resources(struct c4iw_ep *ep); 118 119#define START_EP_TIMER(ep) \ 120 do { \ 121 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \ 122 __func__, __LINE__, (ep)); \ 123 start_ep_timer(ep); \ 124 } while (0) 125 126#define STOP_EP_TIMER(ep) \ 127 do { \ 128 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \ 129 __func__, __LINE__, (ep)); \ 130 stop_ep_timer(ep); \ 131 } while (0) 132 133#ifdef KTR 134static char *states[] = { 135 "idle", 136 "listen", 137 "connecting", 138 "mpa_wait_req", 139 "mpa_req_sent", 140 "mpa_req_rcvd", 141 "mpa_rep_sent", 142 "fpdu_mode", 143 "aborting", 144 "closing", 145 "moribund", 146 "dead", 147 NULL, 148}; 149#endif 150 151static void 152process_req(struct work_struct *ctx) 153{ 154 struct c4iw_ep_common *epc; 155 156 spin_lock(&req_lock); 157 while (!TAILQ_EMPTY(&req_list)) { 158 epc = TAILQ_FIRST(&req_list); 159 TAILQ_REMOVE(&req_list, epc, entry); 160 epc->entry.tqe_prev = NULL; 161 spin_unlock(&req_lock); 162 if (epc->so) 163 process_socket_event((struct c4iw_ep *)epc); 164 c4iw_put_ep(epc); 165 spin_lock(&req_lock); 166 } 167 spin_unlock(&req_lock); 168} 169 170/* 171 * XXX: doesn't belong here in the iWARP driver. 172 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is 173 * set. Is this a valid assumption for active open? 174 */ 175static int 176set_tcpinfo(struct c4iw_ep *ep) 177{ 178 struct socket *so = ep->com.so; 179 struct inpcb *inp = sotoinpcb(so); 180 struct tcpcb *tp; 181 struct toepcb *toep; 182 int rc = 0; 183 184 INP_WLOCK(inp); 185 tp = intotcpcb(inp); 186 if ((tp->t_flags & TF_TOE) == 0) { 187 rc = EINVAL; 188 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n", 189 __func__, so, ep); 190 goto done; 191 } 192 toep = TOEPCB(so); 193 194 ep->hwtid = toep->tid; 195 ep->snd_seq = tp->snd_nxt; 196 ep->rcv_seq = tp->rcv_nxt; 197 ep->emss = max(tp->t_maxseg, 128); 198done: 199 INP_WUNLOCK(inp); 200 return (rc); 201 202} 203 204static struct rtentry * 205find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port, 206 __be16 peer_port, u8 tos) 207{ 208 struct route iproute; 209 struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst; 210 211 CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip, 212 peer_ip, ntohs(local_port), ntohs(peer_port)); 213 bzero(&iproute, sizeof iproute); 214 dst->sin_family = AF_INET; 215 dst->sin_len = sizeof *dst; 216 dst->sin_addr.s_addr = peer_ip; 217 218 rtalloc(&iproute); 219 CTR2(KTR_IW_CXGBE, "%s:frtE %p", __func__, (uint64_t)iproute.ro_rt); 220 return iproute.ro_rt; 221} 222 223static int 224close_socket(struct c4iw_ep_common *epc, int close) 225{ 226 struct socket *so = epc->so; 227 int rc; 228 229 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc, so, 230 states[epc->state]); 231 232 SOCK_LOCK(so); 233 soupcall_clear(so, SO_RCV); 234 SOCK_UNLOCK(so); 235 236 if (close) 237 rc = soclose(so); 238 else 239 rc = soshutdown(so, SHUT_WR | SHUT_RD); 240 epc->so = NULL; 241 242 return (rc); 243} 244 245static int 246shutdown_socket(struct c4iw_ep_common *epc) 247{ 248 249 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc->so, epc, 250 states[epc->state]); 251 252 return (soshutdown(epc->so, SHUT_WR)); 253} 254 255static void 256abort_socket(struct c4iw_ep *ep) 257{ 258 struct sockopt sopt; 259 int rc; 260 struct linger l; 261 262 CTR4(KTR_IW_CXGBE, "%s ep %p so %p state %s", __func__, ep, ep->com.so, 263 states[ep->com.state]); 264 265 l.l_onoff = 1; 266 l.l_linger = 0; 267 268 /* linger_time of 0 forces RST to be sent */ 269 sopt.sopt_dir = SOPT_SET; 270 sopt.sopt_level = SOL_SOCKET; 271 sopt.sopt_name = SO_LINGER; 272 sopt.sopt_val = (caddr_t)&l; 273 sopt.sopt_valsize = sizeof l; 274 sopt.sopt_td = NULL; 275 rc = sosetopt(ep->com.so, &sopt); 276 if (rc) { 277 log(LOG_ERR, "%s: can't set linger to 0, no RST! err %d\n", 278 __func__, rc); 279 } 280} 281 282static void 283process_peer_close(struct c4iw_ep *ep) 284{ 285 struct c4iw_qp_attributes attrs; 286 int disconnect = 1; 287 int release = 0; 288 289 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep, 290 ep->com.so, states[ep->com.state]); 291 292 mutex_lock(&ep->com.mutex); 293 switch (ep->com.state) { 294 295 case MPA_REQ_WAIT: 296 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING", 297 __func__, ep); 298 __state_set(&ep->com, CLOSING); 299 break; 300 301 case MPA_REQ_SENT: 302 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING", 303 __func__, ep); 304 __state_set(&ep->com, DEAD); 305 connect_reply_upcall(ep, -ECONNABORTED); 306 307 disconnect = 0; 308 STOP_EP_TIMER(ep); 309 close_socket(&ep->com, 0); 310 ep->com.cm_id->rem_ref(ep->com.cm_id); 311 ep->com.cm_id = NULL; 312 ep->com.qp = NULL; 313 release = 1; 314 break; 315 316 case MPA_REQ_RCVD: 317 318 /* 319 * We're gonna mark this puppy DEAD, but keep 320 * the reference on it until the ULP accepts or 321 * rejects the CR. 322 */ 323 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING", 324 __func__, ep); 325 __state_set(&ep->com, CLOSING); 326 c4iw_get_ep(&ep->com); 327 break; 328 329 case MPA_REP_SENT: 330 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING", 331 __func__, ep); 332 __state_set(&ep->com, CLOSING); 333 break; 334 335 case FPDU_MODE: 336 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING", 337 __func__, ep); 338 START_EP_TIMER(ep); 339 __state_set(&ep->com, CLOSING); 340 attrs.next_state = C4IW_QP_STATE_CLOSING; 341 c4iw_modify_qp(ep->com.dev, ep->com.qp, 342 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 343 peer_close_upcall(ep); 344 break; 345 346 case ABORTING: 347 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)", 348 __func__, ep); 349 disconnect = 0; 350 break; 351 352 case CLOSING: 353 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND", 354 __func__, ep); 355 __state_set(&ep->com, MORIBUND); 356 disconnect = 0; 357 break; 358 359 case MORIBUND: 360 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__, 361 ep); 362 STOP_EP_TIMER(ep); 363 if (ep->com.cm_id && ep->com.qp) { 364 attrs.next_state = C4IW_QP_STATE_IDLE; 365 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 366 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 367 } 368 close_socket(&ep->com, 0); 369 close_complete_upcall(ep, 0); 370 __state_set(&ep->com, DEAD); 371 release = 1; 372 disconnect = 0; 373 break; 374 375 case DEAD: 376 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)", 377 __func__, ep); 378 disconnect = 0; 379 break; 380 381 default: 382 panic("%s: ep %p state %d", __func__, ep, 383 ep->com.state); 384 break; 385 } 386 387 mutex_unlock(&ep->com.mutex); 388 389 if (disconnect) { 390 391 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep); 392 c4iw_ep_disconnect(ep, 0, M_NOWAIT); 393 } 394 if (release) { 395 396 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep); 397 c4iw_put_ep(&ep->com); 398 } 399 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep); 400 return; 401} 402 403static void 404process_conn_error(struct c4iw_ep *ep) 405{ 406 struct c4iw_qp_attributes attrs; 407 int ret; 408 int state; 409 410 state = state_read(&ep->com); 411 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s", 412 __func__, ep, ep->com.so, ep->com.so->so_error, 413 states[ep->com.state]); 414 415 switch (state) { 416 417 case MPA_REQ_WAIT: 418 STOP_EP_TIMER(ep); 419 break; 420 421 case MPA_REQ_SENT: 422 STOP_EP_TIMER(ep); 423 connect_reply_upcall(ep, -ECONNRESET); 424 break; 425 426 case MPA_REP_SENT: 427 ep->com.rpl_err = ECONNRESET; 428 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep); 429 break; 430 431 case MPA_REQ_RCVD: 432 433 /* 434 * We're gonna mark this puppy DEAD, but keep 435 * the reference on it until the ULP accepts or 436 * rejects the CR. 437 */ 438 c4iw_get_ep(&ep->com); 439 break; 440 441 case MORIBUND: 442 case CLOSING: 443 STOP_EP_TIMER(ep); 444 /*FALLTHROUGH*/ 445 case FPDU_MODE: 446 447 if (ep->com.cm_id && ep->com.qp) { 448 449 attrs.next_state = C4IW_QP_STATE_ERROR; 450 ret = c4iw_modify_qp(ep->com.qp->rhp, 451 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 452 &attrs, 1); 453 if (ret) 454 log(LOG_ERR, 455 "%s - qp <- error failed!\n", 456 __func__); 457 } 458 peer_abort_upcall(ep); 459 break; 460 461 case ABORTING: 462 break; 463 464 case DEAD: 465 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!", 466 __func__, ep->com.so->so_error); 467 return; 468 469 default: 470 panic("%s: ep %p state %d", __func__, ep, state); 471 break; 472 } 473 474 if (state != ABORTING) { 475 476 CTR2(KTR_IW_CXGBE, "%s:pce1 %p", __func__, ep); 477 close_socket(&ep->com, 1); 478 state_set(&ep->com, DEAD); 479 c4iw_put_ep(&ep->com); 480 } 481 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep); 482 return; 483} 484 485static void 486process_close_complete(struct c4iw_ep *ep) 487{ 488 struct c4iw_qp_attributes attrs; 489 int release = 0; 490 491 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep, 492 ep->com.so, states[ep->com.state]); 493 494 /* The cm_id may be null if we failed to connect */ 495 mutex_lock(&ep->com.mutex); 496 497 switch (ep->com.state) { 498 499 case CLOSING: 500 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND", 501 __func__, ep); 502 __state_set(&ep->com, MORIBUND); 503 break; 504 505 case MORIBUND: 506 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__, 507 ep); 508 STOP_EP_TIMER(ep); 509 510 if ((ep->com.cm_id) && (ep->com.qp)) { 511 512 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE", 513 __func__, ep); 514 attrs.next_state = C4IW_QP_STATE_IDLE; 515 c4iw_modify_qp(ep->com.dev, 516 ep->com.qp, 517 C4IW_QP_ATTR_NEXT_STATE, 518 &attrs, 1); 519 } 520 521 if (ep->parent_ep) { 522 523 CTR2(KTR_IW_CXGBE, "%s:pcc3 %p", __func__, ep); 524 close_socket(&ep->com, 1); 525 } 526 else { 527 528 CTR2(KTR_IW_CXGBE, "%s:pcc4 %p", __func__, ep); 529 close_socket(&ep->com, 0); 530 } 531 close_complete_upcall(ep, 0); 532 __state_set(&ep->com, DEAD); 533 release = 1; 534 break; 535 536 case ABORTING: 537 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep); 538 break; 539 540 case DEAD: 541 default: 542 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep); 543 panic("%s:pcc6 %p DEAD", __func__, ep); 544 break; 545 } 546 mutex_unlock(&ep->com.mutex); 547 548 if (release) { 549 550 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p", __func__, ep); 551 c4iw_put_ep(&ep->com); 552 } 553 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep); 554 return; 555} 556 557static void 558init_sock(struct c4iw_ep_common *epc) 559{ 560 int rc; 561 struct sockopt sopt; 562 struct socket *so = epc->so; 563 int on = 1; 564 565 SOCK_LOCK(so); 566 soupcall_set(so, SO_RCV, c4iw_so_upcall, epc); 567 so->so_state |= SS_NBIO; 568 SOCK_UNLOCK(so); 569 sopt.sopt_dir = SOPT_SET; 570 sopt.sopt_level = IPPROTO_TCP; 571 sopt.sopt_name = TCP_NODELAY; 572 sopt.sopt_val = (caddr_t)&on; 573 sopt.sopt_valsize = sizeof on; 574 sopt.sopt_td = NULL; 575 rc = sosetopt(so, &sopt); 576 if (rc) { 577 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n", 578 __func__, so, rc); 579 } 580} 581 582static void 583process_data(struct c4iw_ep *ep) 584{ 585 struct sockaddr_in *local, *remote; 586 587 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sb_cc %d", __func__, 588 ep->com.so, ep, states[ep->com.state], ep->com.so->so_rcv.sb_cc); 589 590 switch (state_read(&ep->com)) { 591 case MPA_REQ_SENT: 592 process_mpa_reply(ep); 593 break; 594 case MPA_REQ_WAIT: 595 in_getsockaddr(ep->com.so, (struct sockaddr **)&local); 596 in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote); 597 ep->com.local_addr = *local; 598 ep->com.remote_addr = *remote; 599 free(local, M_SONAME); 600 free(remote, M_SONAME); 601 process_mpa_request(ep); 602 break; 603 default: 604 if (ep->com.so->so_rcv.sb_cc) 605 log(LOG_ERR, "%s: Unexpected streaming data. " 606 "ep %p, state %d, so %p, so_state 0x%x, sb_cc %u\n", 607 __func__, ep, state_read(&ep->com), ep->com.so, 608 ep->com.so->so_state, ep->com.so->so_rcv.sb_cc); 609 break; 610 } 611} 612 613static void 614process_connected(struct c4iw_ep *ep) 615{ 616 617 if ((ep->com.so->so_state & SS_ISCONNECTED) && !ep->com.so->so_error) 618 send_mpa_req(ep); 619 else { 620 connect_reply_upcall(ep, -ep->com.so->so_error); 621 close_socket(&ep->com, 0); 622 state_set(&ep->com, DEAD); 623 c4iw_put_ep(&ep->com); 624 } 625} 626 627static struct socket * 628dequeue_socket(struct socket *head, struct sockaddr_in **remote, 629 struct c4iw_ep *child_ep) 630{ 631 struct socket *so; 632 633 ACCEPT_LOCK(); 634 so = TAILQ_FIRST(&head->so_comp); 635 if (!so) { 636 ACCEPT_UNLOCK(); 637 return (NULL); 638 } 639 TAILQ_REMOVE(&head->so_comp, so, so_list); 640 head->so_qlen--; 641 SOCK_LOCK(so); 642 so->so_qstate &= ~SQ_COMP; 643 so->so_head = NULL; 644 soref(so); 645 soupcall_set(so, SO_RCV, c4iw_so_upcall, child_ep); 646 so->so_state |= SS_NBIO; 647 SOCK_UNLOCK(so); 648 ACCEPT_UNLOCK(); 649 soaccept(so, (struct sockaddr **)remote); 650 651 return (so); 652} 653 654static void 655process_newconn(struct c4iw_ep *parent_ep) 656{ 657 struct socket *child_so; 658 struct c4iw_ep *child_ep; 659 struct sockaddr_in *remote; 660 661 child_ep = alloc_ep(sizeof(*child_ep), M_NOWAIT); 662 if (!child_ep) { 663 CTR3(KTR_IW_CXGBE, "%s: parent so %p, parent ep %p, ENOMEM", 664 __func__, parent_ep->com.so, parent_ep); 665 log(LOG_ERR, "%s: failed to allocate ep entry\n", __func__); 666 return; 667 } 668 669 child_so = dequeue_socket(parent_ep->com.so, &remote, child_ep); 670 if (!child_so) { 671 CTR4(KTR_IW_CXGBE, 672 "%s: parent so %p, parent ep %p, child ep %p, dequeue err", 673 __func__, parent_ep->com.so, parent_ep, child_ep); 674 log(LOG_ERR, "%s: failed to dequeue child socket\n", __func__); 675 __free_ep(&child_ep->com); 676 return; 677 678 } 679 680 CTR5(KTR_IW_CXGBE, 681 "%s: parent so %p, parent ep %p, child so %p, child ep %p", 682 __func__, parent_ep->com.so, parent_ep, child_so, child_ep); 683 684 child_ep->com.local_addr = parent_ep->com.local_addr; 685 child_ep->com.remote_addr = *remote; 686 child_ep->com.dev = parent_ep->com.dev; 687 child_ep->com.so = child_so; 688 child_ep->com.cm_id = NULL; 689 child_ep->com.thread = parent_ep->com.thread; 690 child_ep->parent_ep = parent_ep; 691 692 free(remote, M_SONAME); 693 c4iw_get_ep(&parent_ep->com); 694 child_ep->parent_ep = parent_ep; 695 init_timer(&child_ep->timer); 696 state_set(&child_ep->com, MPA_REQ_WAIT); 697 START_EP_TIMER(child_ep); 698 699 /* maybe the request has already been queued up on the socket... */ 700 process_mpa_request(child_ep); 701} 702 703static int 704c4iw_so_upcall(struct socket *so, void *arg, int waitflag) 705{ 706 struct c4iw_ep *ep = arg; 707 708 spin_lock(&req_lock); 709 710 CTR6(KTR_IW_CXGBE, 711 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p", 712 __func__, so, so->so_state, ep, states[ep->com.state], 713 ep->com.entry.tqe_prev); 714 715 if (ep && ep->com.so && !ep->com.entry.tqe_prev) { 716 KASSERT(ep->com.so == so, ("%s: XXX review.", __func__)); 717 c4iw_get_ep(&ep->com); 718 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry); 719 queue_work(c4iw_taskq, &c4iw_task); 720 } 721 722 spin_unlock(&req_lock); 723 return (SU_OK); 724} 725 726static void 727process_socket_event(struct c4iw_ep *ep) 728{ 729 int state = state_read(&ep->com); 730 struct socket *so = ep->com.so; 731 732 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, " 733 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state, 734 so->so_error, so->so_rcv.sb_state, ep, states[state]); 735 736 if (state == CONNECTING) { 737 process_connected(ep); 738 return; 739 } 740 741 if (state == LISTEN) { 742 process_newconn(ep); 743 return; 744 } 745 746 /* connection error */ 747 if (so->so_error) { 748 process_conn_error(ep); 749 return; 750 } 751 752 /* peer close */ 753 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state < CLOSING) { 754 process_peer_close(ep); 755 return; 756 } 757 758 /* close complete */ 759 if (so->so_state & SS_ISDISCONNECTED) { 760 process_close_complete(ep); 761 return; 762 } 763 764 /* rx data */ 765 process_data(ep); 766} 767 768SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters"); 769 770int db_delay_usecs = 1; 771TUNABLE_INT("hw.iw_cxgbe.db_delay_usecs", &db_delay_usecs); 772SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RW, &db_delay_usecs, 0, 773 "Usecs to delay awaiting db fifo to drain"); 774 775static int dack_mode = 1; 776TUNABLE_INT("hw.iw_cxgbe.dack_mode", &dack_mode); 777SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RW, &dack_mode, 0, 778 "Delayed ack mode (default = 1)"); 779 780int c4iw_max_read_depth = 8; 781TUNABLE_INT("hw.iw_cxgbe.c4iw_max_read_depth", &c4iw_max_read_depth); 782SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RW, &c4iw_max_read_depth, 0, 783 "Per-connection max ORD/IRD (default = 8)"); 784 785static int enable_tcp_timestamps; 786TUNABLE_INT("hw.iw_cxgbe.enable_tcp_timestamps", &enable_tcp_timestamps); 787SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RW, &enable_tcp_timestamps, 0, 788 "Enable tcp timestamps (default = 0)"); 789 790static int enable_tcp_sack; 791TUNABLE_INT("hw.iw_cxgbe.enable_tcp_sack", &enable_tcp_sack); 792SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RW, &enable_tcp_sack, 0, 793 "Enable tcp SACK (default = 0)"); 794 795static int enable_tcp_window_scaling = 1; 796TUNABLE_INT("hw.iw_cxgbe.enable_tcp_window_scaling", &enable_tcp_window_scaling); 797SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RW, &enable_tcp_window_scaling, 0, 798 "Enable tcp window scaling (default = 1)"); 799 800int c4iw_debug = 1; 801TUNABLE_INT("hw.iw_cxgbe.c4iw_debug", &c4iw_debug); 802SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RW, &c4iw_debug, 0, 803 "Enable debug logging (default = 0)"); 804 805static int peer2peer; 806TUNABLE_INT("hw.iw_cxgbe.peer2peer", &peer2peer); 807SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RW, &peer2peer, 0, 808 "Support peer2peer ULPs (default = 0)"); 809 810static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 811TUNABLE_INT("hw.iw_cxgbe.p2p_type", &p2p_type); 812SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RW, &p2p_type, 0, 813 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); 814 815static int ep_timeout_secs = 60; 816TUNABLE_INT("hw.iw_cxgbe.ep_timeout_secs", &ep_timeout_secs); 817SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0, 818 "CM Endpoint operation timeout in seconds (default = 60)"); 819 820static int mpa_rev = 1; 821TUNABLE_INT("hw.iw_cxgbe.mpa_rev", &mpa_rev); 822#ifdef IW_CM_MPAV2 823SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0, 824 "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); 825#else 826SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0, 827 "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant (default = 1)"); 828#endif 829 830static int markers_enabled; 831TUNABLE_INT("hw.iw_cxgbe.markers_enabled", &markers_enabled); 832SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0, 833 "Enable MPA MARKERS (default(0) = disabled)"); 834 835static int crc_enabled = 1; 836TUNABLE_INT("hw.iw_cxgbe.crc_enabled", &crc_enabled); 837SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0, 838 "Enable MPA CRC (default(1) = enabled)"); 839 840static int rcv_win = 256 * 1024; 841TUNABLE_INT("hw.iw_cxgbe.rcv_win", &rcv_win); 842SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0, 843 "TCP receive window in bytes (default = 256KB)"); 844 845static int snd_win = 128 * 1024; 846TUNABLE_INT("hw.iw_cxgbe.snd_win", &snd_win); 847SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0, 848 "TCP send window in bytes (default = 128KB)"); 849 850int db_fc_threshold = 2000; 851TUNABLE_INT("hw.iw_cxgbe.db_fc_threshold", &db_fc_threshold); 852SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RW, &db_fc_threshold, 0, 853 "QP count/threshold that triggers automatic"); 854 855static void 856start_ep_timer(struct c4iw_ep *ep) 857{ 858 859 if (timer_pending(&ep->timer)) { 860 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep); 861 printk(KERN_ERR "%s timer already started! ep %p\n", __func__, 862 ep); 863 return; 864 } 865 clear_bit(TIMEOUT, &ep->com.flags); 866 c4iw_get_ep(&ep->com); 867 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 868 ep->timer.data = (unsigned long)ep; 869 ep->timer.function = ep_timeout; 870 add_timer(&ep->timer); 871} 872 873static void 874stop_ep_timer(struct c4iw_ep *ep) 875{ 876 877 del_timer_sync(&ep->timer); 878 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 879 c4iw_put_ep(&ep->com); 880 } 881} 882 883static enum 884c4iw_ep_state state_read(struct c4iw_ep_common *epc) 885{ 886 enum c4iw_ep_state state; 887 888 mutex_lock(&epc->mutex); 889 state = epc->state; 890 mutex_unlock(&epc->mutex); 891 892 return (state); 893} 894 895static void 896__state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 897{ 898 899 epc->state = new; 900} 901 902static void 903state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 904{ 905 906 mutex_lock(&epc->mutex); 907 __state_set(epc, new); 908 mutex_unlock(&epc->mutex); 909} 910 911static void * 912alloc_ep(int size, gfp_t gfp) 913{ 914 struct c4iw_ep_common *epc; 915 916 epc = kzalloc(size, gfp); 917 if (epc == NULL) 918 return (NULL); 919 920 kref_init(&epc->kref); 921 mutex_init(&epc->mutex); 922 c4iw_init_wr_wait(&epc->wr_wait); 923 924 return (epc); 925} 926 927void 928__free_ep(struct c4iw_ep_common *epc) 929{ 930 CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc); 931 KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so)); 932 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc)); 933 free(epc, M_DEVBUF); 934 CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc); 935} 936 937void _c4iw_free_ep(struct kref *kref) 938{ 939 struct c4iw_ep *ep; 940 struct c4iw_ep_common *epc; 941 942 ep = container_of(kref, struct c4iw_ep, com.kref); 943 epc = &ep->com; 944 KASSERT(!epc->so, ("%s ep->so %p", __func__, epc->so)); 945 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list", 946 __func__, epc)); 947 kfree(ep); 948} 949 950static void release_ep_resources(struct c4iw_ep *ep) 951{ 952 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep); 953 set_bit(RELEASE_RESOURCES, &ep->com.flags); 954 c4iw_put_ep(&ep->com); 955 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep); 956} 957 958static void 959send_mpa_req(struct c4iw_ep *ep) 960{ 961 int mpalen; 962 struct mpa_message *mpa; 963 struct mpa_v2_conn_params mpa_v2_params; 964 struct mbuf *m; 965 char mpa_rev_to_use = mpa_rev; 966 int err; 967 968 if (ep->retry_with_mpa_v1) 969 mpa_rev_to_use = 1; 970 mpalen = sizeof(*mpa) + ep->plen; 971 if (mpa_rev_to_use == 2) 972 mpalen += sizeof(struct mpa_v2_conn_params); 973 974 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 975 if (mpa == NULL) { 976failed: 977 connect_reply_upcall(ep, -ENOMEM); 978 return; 979 } 980 981 memset(mpa, 0, mpalen); 982 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 983 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 984 (markers_enabled ? MPA_MARKERS : 0) | 985 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 986 mpa->private_data_size = htons(ep->plen); 987 mpa->revision = mpa_rev_to_use; 988 989 if (mpa_rev_to_use == 1) { 990 ep->tried_with_mpa_v1 = 1; 991 ep->retry_with_mpa_v1 = 0; 992 } 993 994 if (mpa_rev_to_use == 2) { 995 mpa->private_data_size += 996 htons(sizeof(struct mpa_v2_conn_params)); 997 mpa_v2_params.ird = htons((u16)ep->ird); 998 mpa_v2_params.ord = htons((u16)ep->ord); 999 1000 if (peer2peer) { 1001 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1002 1003 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1004 mpa_v2_params.ord |= 1005 htons(MPA_V2_RDMA_WRITE_RTR); 1006 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1007 mpa_v2_params.ord |= 1008 htons(MPA_V2_RDMA_READ_RTR); 1009 } 1010 } 1011 memcpy(mpa->private_data, &mpa_v2_params, 1012 sizeof(struct mpa_v2_conn_params)); 1013 1014 if (ep->plen) { 1015 1016 memcpy(mpa->private_data + 1017 sizeof(struct mpa_v2_conn_params), 1018 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1019 } 1020 } else { 1021 1022 if (ep->plen) 1023 memcpy(mpa->private_data, 1024 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1025 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep); 1026 } 1027 1028 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1029 if (m == NULL) { 1030 free(mpa, M_CXGBE); 1031 goto failed; 1032 } 1033 m_copyback(m, 0, mpalen, (void *)mpa); 1034 free(mpa, M_CXGBE); 1035 1036 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1037 ep->com.thread); 1038 if (err) 1039 goto failed; 1040 1041 START_EP_TIMER(ep); 1042 state_set(&ep->com, MPA_REQ_SENT); 1043 ep->mpa_attr.initiator = 1; 1044} 1045 1046static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1047{ 1048 int mpalen ; 1049 struct mpa_message *mpa; 1050 struct mpa_v2_conn_params mpa_v2_params; 1051 struct mbuf *m; 1052 int err; 1053 1054 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid, 1055 ep->plen); 1056 1057 mpalen = sizeof(*mpa) + plen; 1058 1059 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1060 1061 mpalen += sizeof(struct mpa_v2_conn_params); 1062 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep, 1063 ep->mpa_attr.version, mpalen); 1064 } 1065 1066 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1067 if (mpa == NULL) 1068 return (-ENOMEM); 1069 1070 memset(mpa, 0, mpalen); 1071 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1072 mpa->flags = MPA_REJECT; 1073 mpa->revision = mpa_rev; 1074 mpa->private_data_size = htons(plen); 1075 1076 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1077 1078 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1079 mpa->private_data_size += 1080 htons(sizeof(struct mpa_v2_conn_params)); 1081 mpa_v2_params.ird = htons(((u16)ep->ird) | 1082 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1083 0)); 1084 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1085 (p2p_type == 1086 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1087 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1088 FW_RI_INIT_P2PTYPE_READ_REQ ? 1089 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1090 memcpy(mpa->private_data, &mpa_v2_params, 1091 sizeof(struct mpa_v2_conn_params)); 1092 1093 if (ep->plen) 1094 memcpy(mpa->private_data + 1095 sizeof(struct mpa_v2_conn_params), pdata, plen); 1096 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep, 1097 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen); 1098 } else 1099 if (plen) 1100 memcpy(mpa->private_data, pdata, plen); 1101 1102 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1103 if (m == NULL) { 1104 free(mpa, M_CXGBE); 1105 return (-ENOMEM); 1106 } 1107 m_copyback(m, 0, mpalen, (void *)mpa); 1108 free(mpa, M_CXGBE); 1109 1110 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); 1111 if (!err) 1112 ep->snd_seq += mpalen; 1113 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err); 1114 return err; 1115} 1116 1117static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1118{ 1119 int mpalen; 1120 struct mpa_message *mpa; 1121 struct mbuf *m; 1122 struct mpa_v2_conn_params mpa_v2_params; 1123 int err; 1124 1125 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep); 1126 1127 mpalen = sizeof(*mpa) + plen; 1128 1129 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1130 1131 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep, 1132 ep->mpa_attr.version); 1133 mpalen += sizeof(struct mpa_v2_conn_params); 1134 } 1135 1136 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1137 if (mpa == NULL) 1138 return (-ENOMEM); 1139 1140 memset(mpa, 0, sizeof(*mpa)); 1141 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1142 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 1143 (markers_enabled ? MPA_MARKERS : 0); 1144 mpa->revision = ep->mpa_attr.version; 1145 mpa->private_data_size = htons(plen); 1146 1147 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1148 1149 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1150 mpa->private_data_size += 1151 htons(sizeof(struct mpa_v2_conn_params)); 1152 mpa_v2_params.ird = htons((u16)ep->ird); 1153 mpa_v2_params.ord = htons((u16)ep->ord); 1154 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep, 1155 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord); 1156 1157 if (peer2peer && (ep->mpa_attr.p2p_type != 1158 FW_RI_INIT_P2PTYPE_DISABLED)) { 1159 1160 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1161 1162 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1163 1164 mpa_v2_params.ord |= 1165 htons(MPA_V2_RDMA_WRITE_RTR); 1166 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d", 1167 __func__, ep, p2p_type, mpa_v2_params.ird, 1168 mpa_v2_params.ord); 1169 } 1170 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1171 1172 mpa_v2_params.ord |= 1173 htons(MPA_V2_RDMA_READ_RTR); 1174 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d", 1175 __func__, ep, p2p_type, mpa_v2_params.ird, 1176 mpa_v2_params.ord); 1177 } 1178 } 1179 1180 memcpy(mpa->private_data, &mpa_v2_params, 1181 sizeof(struct mpa_v2_conn_params)); 1182 1183 if (ep->plen) 1184 memcpy(mpa->private_data + 1185 sizeof(struct mpa_v2_conn_params), pdata, plen); 1186 } else 1187 if (plen) 1188 memcpy(mpa->private_data, pdata, plen); 1189 1190 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1191 if (m == NULL) { 1192 free(mpa, M_CXGBE); 1193 return (-ENOMEM); 1194 } 1195 m_copyback(m, 0, mpalen, (void *)mpa); 1196 free(mpa, M_CXGBE); 1197 1198 1199 state_set(&ep->com, MPA_REP_SENT); 1200 ep->snd_seq += mpalen; 1201 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1202 ep->com.thread); 1203 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err); 1204 return err; 1205} 1206 1207 1208 1209static void close_complete_upcall(struct c4iw_ep *ep, int status) 1210{ 1211 struct iw_cm_event event; 1212 1213 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep); 1214 memset(&event, 0, sizeof(event)); 1215 event.event = IW_CM_EVENT_CLOSE; 1216 event.status = status; 1217 1218 if (ep->com.cm_id) { 1219 1220 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep); 1221 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1222 ep->com.cm_id->rem_ref(ep->com.cm_id); 1223 ep->com.cm_id = NULL; 1224 ep->com.qp = NULL; 1225 set_bit(CLOSE_UPCALL, &ep->com.history); 1226 } 1227 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep); 1228} 1229 1230static int abort_connection(struct c4iw_ep *ep) 1231{ 1232 int err; 1233 1234 CTR2(KTR_IW_CXGBE, "%s:abB %p", __func__, ep); 1235 close_complete_upcall(ep, -ECONNRESET); 1236 state_set(&ep->com, ABORTING); 1237 abort_socket(ep); 1238 err = close_socket(&ep->com, 0); 1239 set_bit(ABORT_CONN, &ep->com.history); 1240 CTR2(KTR_IW_CXGBE, "%s:abE %p", __func__, ep); 1241 return err; 1242} 1243 1244static void peer_close_upcall(struct c4iw_ep *ep) 1245{ 1246 struct iw_cm_event event; 1247 1248 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep); 1249 memset(&event, 0, sizeof(event)); 1250 event.event = IW_CM_EVENT_DISCONNECT; 1251 1252 if (ep->com.cm_id) { 1253 1254 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep); 1255 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1256 set_bit(DISCONN_UPCALL, &ep->com.history); 1257 } 1258 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep); 1259} 1260 1261static void peer_abort_upcall(struct c4iw_ep *ep) 1262{ 1263 struct iw_cm_event event; 1264 1265 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep); 1266 memset(&event, 0, sizeof(event)); 1267 event.event = IW_CM_EVENT_CLOSE; 1268 event.status = -ECONNRESET; 1269 1270 if (ep->com.cm_id) { 1271 1272 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep); 1273 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1274 ep->com.cm_id->rem_ref(ep->com.cm_id); 1275 ep->com.cm_id = NULL; 1276 ep->com.qp = NULL; 1277 set_bit(ABORT_UPCALL, &ep->com.history); 1278 } 1279 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep); 1280} 1281 1282static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1283{ 1284 struct iw_cm_event event; 1285 1286 CTR3(KTR_IW_CXGBE, "%s:cruB %p", __func__, ep, status); 1287 memset(&event, 0, sizeof(event)); 1288 event.event = IW_CM_EVENT_CONNECT_REPLY; 1289 event.status = (status ==-ECONNABORTED)?-ECONNRESET: status; 1290 event.local_addr = ep->com.local_addr; 1291 event.remote_addr = ep->com.remote_addr; 1292 1293 if ((status == 0) || (status == -ECONNREFUSED)) { 1294 1295 if (!ep->tried_with_mpa_v1) { 1296 1297 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep); 1298 /* this means MPA_v2 is used */ 1299 event.private_data_len = ep->plen - 1300 sizeof(struct mpa_v2_conn_params); 1301 event.private_data = ep->mpa_pkt + 1302 sizeof(struct mpa_message) + 1303 sizeof(struct mpa_v2_conn_params); 1304 } else { 1305 1306 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep); 1307 /* this means MPA_v1 is used */ 1308 event.private_data_len = ep->plen; 1309 event.private_data = ep->mpa_pkt + 1310 sizeof(struct mpa_message); 1311 } 1312 } 1313 1314 if (ep->com.cm_id) { 1315 1316 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep); 1317 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1318 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1319 } 1320 1321 if(status == -ECONNABORTED) { 1322 1323 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status); 1324 return; 1325 } 1326 1327 if (status < 0) { 1328 1329 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status); 1330 ep->com.cm_id->rem_ref(ep->com.cm_id); 1331 ep->com.cm_id = NULL; 1332 ep->com.qp = NULL; 1333 } 1334 1335 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep); 1336} 1337 1338static void connect_request_upcall(struct c4iw_ep *ep) 1339{ 1340 struct iw_cm_event event; 1341 1342 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep, 1343 ep->tried_with_mpa_v1); 1344 1345 memset(&event, 0, sizeof(event)); 1346 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1347 event.local_addr = ep->com.local_addr; 1348 event.remote_addr = ep->com.remote_addr; 1349 event.provider_data = ep; 1350 event.so = ep->com.so; 1351 1352 if (!ep->tried_with_mpa_v1) { 1353 /* this means MPA_v2 is used */ 1354#ifdef IW_CM_MPAV2 1355 event.ord = ep->ord; 1356 event.ird = ep->ird; 1357#endif 1358 event.private_data_len = ep->plen - 1359 sizeof(struct mpa_v2_conn_params); 1360 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1361 sizeof(struct mpa_v2_conn_params); 1362 } else { 1363 1364 /* this means MPA_v1 is used. Send max supported */ 1365#ifdef IW_CM_MPAV2 1366 event.ord = c4iw_max_read_depth; 1367 event.ird = c4iw_max_read_depth; 1368#endif 1369 event.private_data_len = ep->plen; 1370 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1371 } 1372 1373 c4iw_get_ep(&ep->com); 1374 ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1375 &event); 1376 set_bit(CONNREQ_UPCALL, &ep->com.history); 1377 c4iw_put_ep(&ep->parent_ep->com); 1378} 1379 1380static void established_upcall(struct c4iw_ep *ep) 1381{ 1382 struct iw_cm_event event; 1383 1384 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep); 1385 memset(&event, 0, sizeof(event)); 1386 event.event = IW_CM_EVENT_ESTABLISHED; 1387#ifdef IW_CM_MPAV2 1388 event.ird = ep->ird; 1389 event.ord = ep->ord; 1390#endif 1391 if (ep->com.cm_id) { 1392 1393 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep); 1394 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1395 set_bit(ESTAB_UPCALL, &ep->com.history); 1396 } 1397 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep); 1398} 1399 1400 1401 1402static void process_mpa_reply(struct c4iw_ep *ep) 1403{ 1404 struct mpa_message *mpa; 1405 struct mpa_v2_conn_params *mpa_v2_params; 1406 u16 plen; 1407 u16 resp_ird, resp_ord; 1408 u8 rtr_mismatch = 0, insuff_ird = 0; 1409 struct c4iw_qp_attributes attrs; 1410 enum c4iw_qp_attr_mask mask; 1411 int err; 1412 struct mbuf *top, *m; 1413 int flags = MSG_DONTWAIT; 1414 struct uio uio; 1415 1416 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep); 1417 1418 /* 1419 * Stop mpa timer. If it expired, then the state has 1420 * changed and we bail since ep_timeout already aborted 1421 * the connection. 1422 */ 1423 STOP_EP_TIMER(ep); 1424 if (state_read(&ep->com) != MPA_REQ_SENT) 1425 return; 1426 1427 uio.uio_resid = 1000000; 1428 uio.uio_td = ep->com.thread; 1429 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags); 1430 1431 if (err) { 1432 1433 if (err == EWOULDBLOCK) { 1434 1435 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep); 1436 START_EP_TIMER(ep); 1437 return; 1438 } 1439 err = -err; 1440 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep); 1441 goto err; 1442 } 1443 1444 if (ep->com.so->so_rcv.sb_mb) { 1445 1446 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep); 1447 printf("%s data after soreceive called! so %p sb_mb %p top %p\n", 1448 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top); 1449 } 1450 1451 m = top; 1452 1453 do { 1454 1455 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep); 1456 /* 1457 * If we get more than the supported amount of private data 1458 * then we must fail this connection. 1459 */ 1460 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) { 1461 1462 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep, 1463 ep->mpa_pkt_len + m->m_len); 1464 err = (-EINVAL); 1465 goto err; 1466 } 1467 1468 /* 1469 * copy the new data into our accumulation buffer. 1470 */ 1471 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len])); 1472 ep->mpa_pkt_len += m->m_len; 1473 if (!m->m_next) 1474 m = m->m_nextpkt; 1475 else 1476 m = m->m_next; 1477 } while (m); 1478 1479 m_freem(top); 1480 /* 1481 * if we don't even have the mpa message, then bail. 1482 */ 1483 if (ep->mpa_pkt_len < sizeof(*mpa)) 1484 return; 1485 mpa = (struct mpa_message *) ep->mpa_pkt; 1486 1487 /* Validate MPA header. */ 1488 if (mpa->revision > mpa_rev) { 1489 1490 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep, 1491 mpa->revision, mpa_rev); 1492 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, " 1493 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1494 err = -EPROTO; 1495 goto err; 1496 } 1497 1498 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1499 1500 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep); 1501 err = -EPROTO; 1502 goto err; 1503 } 1504 1505 plen = ntohs(mpa->private_data_size); 1506 1507 /* 1508 * Fail if there's too much private data. 1509 */ 1510 if (plen > MPA_MAX_PRIVATE_DATA) { 1511 1512 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep); 1513 err = -EPROTO; 1514 goto err; 1515 } 1516 1517 /* 1518 * If plen does not account for pkt size 1519 */ 1520 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1521 1522 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep); 1523 err = -EPROTO; 1524 goto err; 1525 } 1526 1527 ep->plen = (u8) plen; 1528 1529 /* 1530 * If we don't have all the pdata yet, then bail. 1531 * We'll continue process when more data arrives. 1532 */ 1533 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) { 1534 1535 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep); 1536 return; 1537 } 1538 1539 if (mpa->flags & MPA_REJECT) { 1540 1541 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep); 1542 err = -ECONNREFUSED; 1543 goto err; 1544 } 1545 1546 /* 1547 * If we get here we have accumulated the entire mpa 1548 * start reply message including private data. And 1549 * the MPA header is valid. 1550 */ 1551 state_set(&ep->com, FPDU_MODE); 1552 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1553 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1554 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1555 ep->mpa_attr.version = mpa->revision; 1556 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1557 1558 if (mpa->revision == 2) { 1559 1560 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep); 1561 ep->mpa_attr.enhanced_rdma_conn = 1562 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1563 1564 if (ep->mpa_attr.enhanced_rdma_conn) { 1565 1566 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep); 1567 mpa_v2_params = (struct mpa_v2_conn_params *) 1568 (ep->mpa_pkt + sizeof(*mpa)); 1569 resp_ird = ntohs(mpa_v2_params->ird) & 1570 MPA_V2_IRD_ORD_MASK; 1571 resp_ord = ntohs(mpa_v2_params->ord) & 1572 MPA_V2_IRD_ORD_MASK; 1573 1574 /* 1575 * This is a double-check. Ideally, below checks are 1576 * not required since ird/ord stuff has been taken 1577 * care of in c4iw_accept_cr 1578 */ 1579 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1580 1581 CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep); 1582 err = -ENOMEM; 1583 ep->ird = resp_ord; 1584 ep->ord = resp_ird; 1585 insuff_ird = 1; 1586 } 1587 1588 if (ntohs(mpa_v2_params->ird) & 1589 MPA_V2_PEER2PEER_MODEL) { 1590 1591 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep); 1592 if (ntohs(mpa_v2_params->ord) & 1593 MPA_V2_RDMA_WRITE_RTR) { 1594 1595 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep); 1596 ep->mpa_attr.p2p_type = 1597 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1598 } 1599 else if (ntohs(mpa_v2_params->ord) & 1600 MPA_V2_RDMA_READ_RTR) { 1601 1602 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep); 1603 ep->mpa_attr.p2p_type = 1604 FW_RI_INIT_P2PTYPE_READ_REQ; 1605 } 1606 } 1607 } 1608 } else { 1609 1610 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep); 1611 1612 if (mpa->revision == 1) { 1613 1614 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep); 1615 1616 if (peer2peer) { 1617 1618 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep); 1619 ep->mpa_attr.p2p_type = p2p_type; 1620 } 1621 } 1622 } 1623 1624 if (set_tcpinfo(ep)) { 1625 1626 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep); 1627 printf("%s set_tcpinfo error\n", __func__); 1628 goto err; 1629 } 1630 1631 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, " 1632 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__, 1633 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1634 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1635 ep->mpa_attr.p2p_type); 1636 1637 /* 1638 * If responder's RTR does not match with that of initiator, assign 1639 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1640 * generated when moving QP to RTS state. 1641 * A TERM message will be sent after QP has moved to RTS state 1642 */ 1643 if ((ep->mpa_attr.version == 2) && peer2peer && 1644 (ep->mpa_attr.p2p_type != p2p_type)) { 1645 1646 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep); 1647 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1648 rtr_mismatch = 1; 1649 } 1650 1651 1652 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 1653 attrs.mpa_attr = ep->mpa_attr; 1654 attrs.max_ird = ep->ird; 1655 attrs.max_ord = ep->ord; 1656 attrs.llp_stream_handle = ep; 1657 attrs.next_state = C4IW_QP_STATE_RTS; 1658 1659 mask = C4IW_QP_ATTR_NEXT_STATE | 1660 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1661 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1662 1663 /* bind QP and TID with INIT_WR */ 1664 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 1665 1666 if (err) { 1667 1668 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep); 1669 goto err; 1670 } 1671 1672 /* 1673 * If responder's RTR requirement did not match with what initiator 1674 * supports, generate TERM message 1675 */ 1676 if (rtr_mismatch) { 1677 1678 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep); 1679 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1680 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1681 attrs.ecode = MPA_NOMATCH_RTR; 1682 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1683 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1684 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1685 err = -ENOMEM; 1686 goto out; 1687 } 1688 1689 /* 1690 * Generate TERM if initiator IRD is not sufficient for responder 1691 * provided ORD. Currently, we do the same behaviour even when 1692 * responder provided IRD is also not sufficient as regards to 1693 * initiator ORD. 1694 */ 1695 if (insuff_ird) { 1696 1697 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep); 1698 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1699 __func__); 1700 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1701 attrs.ecode = MPA_INSUFF_IRD; 1702 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1703 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1704 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1705 err = -ENOMEM; 1706 goto out; 1707 } 1708 goto out; 1709err: 1710 state_set(&ep->com, ABORTING); 1711 abort_connection(ep); 1712out: 1713 connect_reply_upcall(ep, err); 1714 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep); 1715 return; 1716} 1717 1718static void 1719process_mpa_request(struct c4iw_ep *ep) 1720{ 1721 struct mpa_message *mpa; 1722 u16 plen; 1723 int flags = MSG_DONTWAIT; 1724 int rc; 1725 struct iovec iov; 1726 struct uio uio; 1727 enum c4iw_ep_state state = state_read(&ep->com); 1728 1729 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]); 1730 1731 if (state != MPA_REQ_WAIT) 1732 return; 1733 1734 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len]; 1735 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 1736 uio.uio_iov = &iov; 1737 uio.uio_iovcnt = 1; 1738 uio.uio_offset = 0; 1739 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 1740 uio.uio_segflg = UIO_SYSSPACE; 1741 uio.uio_rw = UIO_READ; 1742 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */ 1743 1744 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags); 1745 if (rc == EAGAIN) 1746 return; 1747 else if (rc) { 1748abort: 1749 STOP_EP_TIMER(ep); 1750 abort_connection(ep); 1751 return; 1752 } 1753 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data", 1754 __func__, ep->com.so)); 1755 ep->mpa_pkt_len += uio.uio_offset; 1756 1757 /* 1758 * If we get more than the supported amount of private data then we must 1759 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another 1760 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last 1761 * byte is filled by the soreceive above. 1762 */ 1763 1764 /* Don't even have the MPA message. Wait for more data to arrive. */ 1765 if (ep->mpa_pkt_len < sizeof(*mpa)) 1766 return; 1767 mpa = (struct mpa_message *) ep->mpa_pkt; 1768 1769 /* 1770 * Validate MPA Header. 1771 */ 1772 if (mpa->revision > mpa_rev) { 1773 log(LOG_ERR, "%s: MPA version mismatch. Local = %d," 1774 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1775 goto abort; 1776 } 1777 1778 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 1779 goto abort; 1780 1781 /* 1782 * Fail if there's too much private data. 1783 */ 1784 plen = ntohs(mpa->private_data_size); 1785 if (plen > MPA_MAX_PRIVATE_DATA) 1786 goto abort; 1787 1788 /* 1789 * If plen does not account for pkt size 1790 */ 1791 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 1792 goto abort; 1793 1794 ep->plen = (u8) plen; 1795 1796 /* 1797 * If we don't have all the pdata yet, then bail. 1798 */ 1799 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1800 return; 1801 1802 /* 1803 * If we get here we have accumulated the entire mpa 1804 * start reply message including private data. 1805 */ 1806 ep->mpa_attr.initiator = 0; 1807 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1808 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1809 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1810 ep->mpa_attr.version = mpa->revision; 1811 if (mpa->revision == 1) 1812 ep->tried_with_mpa_v1 = 1; 1813 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1814 1815 if (mpa->revision == 2) { 1816 ep->mpa_attr.enhanced_rdma_conn = 1817 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1818 if (ep->mpa_attr.enhanced_rdma_conn) { 1819 struct mpa_v2_conn_params *mpa_v2_params; 1820 u16 ird, ord; 1821 1822 mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)]; 1823 ird = ntohs(mpa_v2_params->ird); 1824 ord = ntohs(mpa_v2_params->ord); 1825 1826 ep->ird = ird & MPA_V2_IRD_ORD_MASK; 1827 ep->ord = ord & MPA_V2_IRD_ORD_MASK; 1828 if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) { 1829 if (ord & MPA_V2_RDMA_WRITE_RTR) { 1830 ep->mpa_attr.p2p_type = 1831 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1832 } else if (ord & MPA_V2_RDMA_READ_RTR) { 1833 ep->mpa_attr.p2p_type = 1834 FW_RI_INIT_P2PTYPE_READ_REQ; 1835 } 1836 } 1837 } 1838 } else if (mpa->revision == 1 && peer2peer) 1839 ep->mpa_attr.p2p_type = p2p_type; 1840 1841 if (set_tcpinfo(ep)) 1842 goto abort; 1843 1844 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, " 1845 "xmit_marker_enabled = %d, version = %d", __func__, 1846 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1847 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 1848 1849 state_set(&ep->com, MPA_REQ_RCVD); 1850 STOP_EP_TIMER(ep); 1851 1852 /* drive upcall */ 1853 mutex_lock(&ep->parent_ep->com.mutex); 1854 if (ep->parent_ep->com.state != DEAD) 1855 connect_request_upcall(ep); 1856 else 1857 abort_connection(ep); 1858 mutex_unlock(&ep->parent_ep->com.mutex); 1859} 1860 1861/* 1862 * Upcall from the adapter indicating data has been transmitted. 1863 * For us its just the single MPA request or reply. We can now free 1864 * the skb holding the mpa message. 1865 */ 1866int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1867{ 1868 int err; 1869 struct c4iw_ep *ep = to_ep(cm_id); 1870 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep); 1871 1872 if (state_read(&ep->com) == DEAD) { 1873 1874 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep); 1875 c4iw_put_ep(&ep->com); 1876 return -ECONNRESET; 1877 } 1878 set_bit(ULP_REJECT, &ep->com.history); 1879 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1880 1881 if (mpa_rev == 0) { 1882 1883 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep); 1884 abort_connection(ep); 1885 } 1886 else { 1887 1888 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep); 1889 err = send_mpa_reject(ep, pdata, pdata_len); 1890 err = soshutdown(ep->com.so, 3); 1891 } 1892 c4iw_put_ep(&ep->com); 1893 CTR2(KTR_IW_CXGBE, "%s:crc4 %p", __func__, ep); 1894 return 0; 1895} 1896 1897int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1898{ 1899 int err; 1900 struct c4iw_qp_attributes attrs; 1901 enum c4iw_qp_attr_mask mask; 1902 struct c4iw_ep *ep = to_ep(cm_id); 1903 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 1904 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 1905 1906 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep); 1907 1908 if (state_read(&ep->com) == DEAD) { 1909 1910 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep); 1911 err = -ECONNRESET; 1912 goto err; 1913 } 1914 1915 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1916 BUG_ON(!qp); 1917 1918 set_bit(ULP_ACCEPT, &ep->com.history); 1919 1920 if ((conn_param->ord > c4iw_max_read_depth) || 1921 (conn_param->ird > c4iw_max_read_depth)) { 1922 1923 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep); 1924 abort_connection(ep); 1925 err = -EINVAL; 1926 goto err; 1927 } 1928 1929 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1930 1931 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep); 1932 1933 if (conn_param->ord > ep->ird) { 1934 1935 CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep); 1936 ep->ird = conn_param->ird; 1937 ep->ord = conn_param->ord; 1938 send_mpa_reject(ep, conn_param->private_data, 1939 conn_param->private_data_len); 1940 abort_connection(ep); 1941 err = -ENOMEM; 1942 goto err; 1943 } 1944 1945 if (conn_param->ird > ep->ord) { 1946 1947 CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep); 1948 1949 if (!ep->ord) { 1950 1951 CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep); 1952 conn_param->ird = 1; 1953 } 1954 else { 1955 CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep); 1956 abort_connection(ep); 1957 err = -ENOMEM; 1958 goto err; 1959 } 1960 } 1961 1962 } 1963 ep->ird = conn_param->ird; 1964 ep->ord = conn_param->ord; 1965 1966 if (ep->mpa_attr.version != 2) { 1967 1968 CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep); 1969 1970 if (peer2peer && ep->ird == 0) { 1971 1972 CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep); 1973 ep->ird = 1; 1974 } 1975 } 1976 1977 1978 cm_id->add_ref(cm_id); 1979 ep->com.cm_id = cm_id; 1980 ep->com.qp = qp; 1981 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 1982 1983 /* bind QP to EP and move to RTS */ 1984 attrs.mpa_attr = ep->mpa_attr; 1985 attrs.max_ird = ep->ird; 1986 attrs.max_ord = ep->ord; 1987 attrs.llp_stream_handle = ep; 1988 attrs.next_state = C4IW_QP_STATE_RTS; 1989 1990 /* bind QP and TID with INIT_WR */ 1991 mask = C4IW_QP_ATTR_NEXT_STATE | 1992 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 1993 C4IW_QP_ATTR_MPA_ATTR | 1994 C4IW_QP_ATTR_MAX_IRD | 1995 C4IW_QP_ATTR_MAX_ORD; 1996 1997 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 1998 1999 if (err) { 2000 2001 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep); 2002 goto err1; 2003 } 2004 err = send_mpa_reply(ep, conn_param->private_data, 2005 conn_param->private_data_len); 2006 2007 if (err) { 2008 2009 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep); 2010 goto err1; 2011 } 2012 2013 state_set(&ep->com, FPDU_MODE); 2014 established_upcall(ep); 2015 c4iw_put_ep(&ep->com); 2016 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep); 2017 return 0; 2018err1: 2019 ep->com.cm_id = NULL; 2020 ep->com.qp = NULL; 2021 cm_id->rem_ref(cm_id); 2022err: 2023 c4iw_put_ep(&ep->com); 2024 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep); 2025 return err; 2026} 2027 2028 2029 2030int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2031{ 2032 int err = 0; 2033 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2034 struct c4iw_ep *ep = NULL; 2035 struct rtentry *rt; 2036 struct toedev *tdev; 2037 2038 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); 2039 2040 if ((conn_param->ord > c4iw_max_read_depth) || 2041 (conn_param->ird > c4iw_max_read_depth)) { 2042 2043 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id); 2044 err = -EINVAL; 2045 goto out; 2046 } 2047 ep = alloc_ep(sizeof(*ep), M_NOWAIT); 2048 2049 if (!ep) { 2050 2051 CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id); 2052 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2053 err = -ENOMEM; 2054 goto out; 2055 } 2056 init_timer(&ep->timer); 2057 ep->plen = conn_param->private_data_len; 2058 2059 if (ep->plen) { 2060 2061 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep); 2062 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2063 conn_param->private_data, ep->plen); 2064 } 2065 ep->ird = conn_param->ird; 2066 ep->ord = conn_param->ord; 2067 2068 if (peer2peer && ep->ord == 0) { 2069 2070 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep); 2071 ep->ord = 1; 2072 } 2073 2074 cm_id->add_ref(cm_id); 2075 ep->com.dev = dev; 2076 ep->com.cm_id = cm_id; 2077 ep->com.qp = get_qhp(dev, conn_param->qpn); 2078 2079 if (!ep->com.qp) { 2080 2081 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep); 2082 err = -EINVAL; 2083 goto fail2; 2084 } 2085 ep->com.thread = curthread; 2086 ep->com.so = cm_id->so; 2087 2088 init_sock(&ep->com); 2089 2090 /* find a route */ 2091 rt = find_route( 2092 cm_id->local_addr.sin_addr.s_addr, 2093 cm_id->remote_addr.sin_addr.s_addr, 2094 cm_id->local_addr.sin_port, 2095 cm_id->remote_addr.sin_port, 0); 2096 2097 if (!rt) { 2098 2099 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep); 2100 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2101 err = -EHOSTUNREACH; 2102 goto fail2; 2103 } 2104 2105 if (!(rt->rt_ifp->if_capenable & IFCAP_TOE)) { 2106 2107 CTR2(KTR_IW_CXGBE, "%s:cc8 %p", __func__, ep); 2108 printf("%s - interface not TOE capable.\n", __func__); 2109 close_socket(&ep->com, 0); 2110 err = -ENOPROTOOPT; 2111 goto fail3; 2112 } 2113 tdev = TOEDEV(rt->rt_ifp); 2114 2115 if (tdev == NULL) { 2116 2117 CTR2(KTR_IW_CXGBE, "%s:cc9 %p", __func__, ep); 2118 printf("%s - No toedev for interface.\n", __func__); 2119 goto fail3; 2120 } 2121 RTFREE(rt); 2122 2123 state_set(&ep->com, CONNECTING); 2124 ep->tos = 0; 2125 ep->com.local_addr = cm_id->local_addr; 2126 ep->com.remote_addr = cm_id->remote_addr; 2127 err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr, 2128 ep->com.thread); 2129 2130 if (!err) { 2131 CTR2(KTR_IW_CXGBE, "%s:cca %p", __func__, ep); 2132 goto out; 2133 } else { 2134 close_socket(&ep->com, 0); 2135 goto fail2; 2136 } 2137 2138fail3: 2139 CTR2(KTR_IW_CXGBE, "%s:ccb %p", __func__, ep); 2140 RTFREE(rt); 2141fail2: 2142 cm_id->rem_ref(cm_id); 2143 c4iw_put_ep(&ep->com); 2144out: 2145 CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep); 2146 return err; 2147} 2148 2149/* 2150 * iwcm->create_listen. Returns -errno on failure. 2151 */ 2152int 2153c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2154{ 2155 int rc; 2156 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2157 struct c4iw_listen_ep *ep; 2158 struct socket *so = cm_id->so; 2159 2160 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2161 CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__, 2162 cm_id, so, ep, so->so_pcb); 2163 if (ep == NULL) { 2164 log(LOG_ERR, "%s: failed to alloc memory for endpoint\n", 2165 __func__); 2166 rc = ENOMEM; 2167 goto failed; 2168 } 2169 2170 cm_id->add_ref(cm_id); 2171 ep->com.cm_id = cm_id; 2172 ep->com.dev = dev; 2173 ep->backlog = backlog; 2174 ep->com.local_addr = cm_id->local_addr; 2175 ep->com.thread = curthread; 2176 state_set(&ep->com, LISTEN); 2177 ep->com.so = so; 2178 init_sock(&ep->com); 2179 2180 rc = solisten(so, ep->backlog, ep->com.thread); 2181 if (rc != 0) { 2182 log(LOG_ERR, "%s: failed to start listener: %d\n", __func__, 2183 rc); 2184 close_socket(&ep->com, 0); 2185 cm_id->rem_ref(cm_id); 2186 c4iw_put_ep(&ep->com); 2187 goto failed; 2188 } 2189 2190 cm_id->provider_data = ep; 2191 return (0); 2192 2193failed: 2194 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc); 2195 return (-rc); 2196} 2197 2198int 2199c4iw_destroy_listen(struct iw_cm_id *cm_id) 2200{ 2201 int rc; 2202 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2203 2204 CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, inp %p", __func__, cm_id, 2205 cm_id->so, cm_id->so->so_pcb); 2206 2207 state_set(&ep->com, DEAD); 2208 rc = close_socket(&ep->com, 0); 2209 cm_id->rem_ref(cm_id); 2210 c4iw_put_ep(&ep->com); 2211 2212 return (rc); 2213} 2214 2215int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2216{ 2217 int ret = 0; 2218 int close = 0; 2219 int fatal = 0; 2220 struct c4iw_rdev *rdev; 2221 2222 mutex_lock(&ep->com.mutex); 2223 2224 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep); 2225 2226 rdev = &ep->com.dev->rdev; 2227 2228 if (c4iw_fatal_error(rdev)) { 2229 2230 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep); 2231 fatal = 1; 2232 close_complete_upcall(ep, -EIO); 2233 ep->com.state = DEAD; 2234 } 2235 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep, 2236 states[ep->com.state]); 2237 2238 switch (ep->com.state) { 2239 2240 case MPA_REQ_WAIT: 2241 case MPA_REQ_SENT: 2242 case MPA_REQ_RCVD: 2243 case MPA_REP_SENT: 2244 case FPDU_MODE: 2245 close = 1; 2246 if (abrupt) 2247 ep->com.state = ABORTING; 2248 else { 2249 ep->com.state = CLOSING; 2250 START_EP_TIMER(ep); 2251 } 2252 set_bit(CLOSE_SENT, &ep->com.flags); 2253 break; 2254 2255 case CLOSING: 2256 2257 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2258 2259 close = 1; 2260 if (abrupt) { 2261 STOP_EP_TIMER(ep); 2262 ep->com.state = ABORTING; 2263 } else 2264 ep->com.state = MORIBUND; 2265 } 2266 break; 2267 2268 case MORIBUND: 2269 case ABORTING: 2270 case DEAD: 2271 CTR3(KTR_IW_CXGBE, 2272 "%s ignoring disconnect ep %p state %u", __func__, 2273 ep, ep->com.state); 2274 break; 2275 2276 default: 2277 BUG(); 2278 break; 2279 } 2280 2281 mutex_unlock(&ep->com.mutex); 2282 2283 if (close) { 2284 2285 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep); 2286 2287 if (abrupt) { 2288 2289 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep); 2290 set_bit(EP_DISC_ABORT, &ep->com.history); 2291 ret = abort_connection(ep); 2292 } else { 2293 2294 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep); 2295 set_bit(EP_DISC_CLOSE, &ep->com.history); 2296 2297 if (!ep->parent_ep) 2298 __state_set(&ep->com, MORIBUND); 2299 ret = shutdown_socket(&ep->com); 2300 } 2301 2302 if (ret) { 2303 2304 fatal = 1; 2305 } 2306 } 2307 2308 if (fatal) { 2309 2310 release_ep_resources(ep); 2311 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep); 2312 } 2313 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep); 2314 return ret; 2315} 2316 2317#ifdef C4IW_EP_REDIRECT 2318int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 2319 struct l2t_entry *l2t) 2320{ 2321 struct c4iw_ep *ep = ctx; 2322 2323 if (ep->dst != old) 2324 return 0; 2325 2326 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2327 l2t); 2328 dst_hold(new); 2329 cxgb4_l2t_release(ep->l2t); 2330 ep->l2t = l2t; 2331 dst_release(old); 2332 ep->dst = new; 2333 return 1; 2334} 2335#endif 2336 2337 2338 2339static void ep_timeout(unsigned long arg) 2340{ 2341 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2342 int kickit = 0; 2343 2344 CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep); 2345 spin_lock(&timeout_lock); 2346 2347 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 2348 2349 list_add_tail(&ep->entry, &timeout_list); 2350 kickit = 1; 2351 } 2352 spin_unlock(&timeout_lock); 2353 2354 if (kickit) { 2355 2356 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep); 2357 queue_work(c4iw_taskq, &c4iw_task); 2358 } 2359 CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep); 2360} 2361 2362static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl) 2363{ 2364 uint64_t val = be64toh(*rpl); 2365 int ret; 2366 struct c4iw_wr_wait *wr_waitp; 2367 2368 ret = (int)((val >> 8) & 0xff); 2369 wr_waitp = (struct c4iw_wr_wait *)rpl[1]; 2370 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret); 2371 if (wr_waitp) 2372 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 2373 2374 return (0); 2375} 2376 2377static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl) 2378{ 2379 struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]); 2380 2381 CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl); 2382 c4iw_ev_dispatch(sc->iwarp_softc, &cqe); 2383 2384 return (0); 2385} 2386 2387static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 2388{ 2389 2390 struct adapter *sc = iq->adapter; 2391 2392 const struct cpl_rdma_terminate *rpl = (const void *)(rss + 1); 2393 unsigned int tid = GET_TID(rpl); 2394 struct c4iw_qp_attributes attrs; 2395 struct toepcb *toep = lookup_tid(sc, tid); 2396 struct socket *so = inp_inpcbtosocket(toep->inp); 2397 struct c4iw_ep *ep = so->so_rcv.sb_upcallarg; 2398 2399 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep); 2400 2401 if (ep && ep->com.qp) { 2402 2403 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 2404 ep->com.qp->wq.sq.qid); 2405 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2406 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 2407 1); 2408 } else 2409 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2410 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep); 2411 2412 return 0; 2413} 2414 2415 void 2416c4iw_cm_init_cpl(struct adapter *sc) 2417{ 2418 2419 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate); 2420 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, fw6_wr_rpl); 2421 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, fw6_cqe_handler); 2422 t4_register_an_handler(sc, c4iw_ev_handler); 2423} 2424 2425 void 2426c4iw_cm_term_cpl(struct adapter *sc) 2427{ 2428 2429 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL); 2430 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, NULL); 2431 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, NULL); 2432} 2433 2434int __init c4iw_cm_init(void) 2435{ 2436 2437 TAILQ_INIT(&req_list); 2438 spin_lock_init(&req_lock); 2439 INIT_LIST_HEAD(&timeout_list); 2440 spin_lock_init(&timeout_lock); 2441 2442 INIT_WORK(&c4iw_task, process_req); 2443 2444 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe"); 2445 if (!c4iw_taskq) 2446 return -ENOMEM; 2447 2448 2449 return 0; 2450} 2451 2452void __exit c4iw_cm_term(void) 2453{ 2454 WARN_ON(!TAILQ_EMPTY(&req_list)); 2455 WARN_ON(!list_empty(&timeout_list)); 2456 flush_workqueue(c4iw_taskq); 2457 destroy_workqueue(c4iw_taskq); 2458} 2459#endif 2460