Lines Matching refs:ep

144 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
163 static void deref_qp(struct c4iw_ep *ep)
165 c4iw_qp_rem_ref(&ep->com.qp->ibqp);
166 clear_bit(QP_REFERENCED, &ep->com.flags);
167 set_bit(QP_DEREFED, &ep->com.history);
170 static void ref_qp(struct c4iw_ep *ep)
172 set_bit(QP_REFERENCED, &ep->com.flags);
173 set_bit(QP_REFED, &ep->com.history);
174 c4iw_qp_add_ref(&ep->com.qp->ibqp);
177 static void start_ep_timer(struct c4iw_ep *ep)
179 pr_debug("ep %p\n", ep);
180 if (timer_pending(&ep->timer)) {
181 pr_err("%s timer already started! ep %p\n",
182 __func__, ep);
185 clear_bit(TIMEOUT, &ep->com.flags);
186 c4iw_get_ep(&ep->com);
187 ep->timer.expires = jiffies + ep_timeout_secs * HZ;
188 add_timer(&ep->timer);
191 static int stop_ep_timer(struct c4iw_ep *ep)
193 pr_debug("ep %p stopping\n", ep);
194 del_timer_sync(&ep->timer);
195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
196 c4iw_put_ep(&ep->com);
248 static void set_emss(struct c4iw_ep *ep, u16 opt)
250 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
251 ((AF_INET == ep->com.remote_addr.ss_family) ?
254 ep->mss = ep->emss;
256 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
257 if (ep->emss < 128)
258 ep->emss = 128;
259 if (ep->emss & 7)
261 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
262 pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
263 ep->emss);
325 pr_debug("alloc ep %p\n", epc);
330 static void remove_ep_tid(struct c4iw_ep *ep)
334 xa_lock_irqsave(&ep->com.dev->hwtids, flags);
335 __xa_erase(&ep->com.dev->hwtids, ep->hwtid);
336 if (xa_empty(&ep->com.dev->hwtids))
337 wake_up(&ep->com.dev->wait);
338 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
341 static int insert_ep_tid(struct c4iw_ep *ep)
346 xa_lock_irqsave(&ep->com.dev->hwtids, flags);
347 err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL);
348 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags);
354 * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
358 struct c4iw_ep *ep;
362 ep = xa_load(&dev->hwtids, tid);
363 if (ep)
364 c4iw_get_ep(&ep->com);
366 return ep;
370 * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
375 struct c4iw_listen_ep *ep;
379 ep = xa_load(&dev->stids, stid);
380 if (ep)
381 c4iw_get_ep(&ep->com);
383 return ep;
388 struct c4iw_ep *ep;
390 ep = container_of(kref, struct c4iw_ep, com.kref);
391 pr_debug("ep %p state %s\n", ep, states[ep->com.state]);
392 if (test_bit(QP_REFERENCED, &ep->com.flags))
393 deref_qp(ep);
394 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
395 if (ep->com.remote_addr.ss_family == AF_INET6) {
398 &ep->com.local_addr;
401 ep->com.dev->rdev.lldi.ports[0],
405 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
406 ep->com.local_addr.ss_family);
407 dst_release(ep->dst);
408 cxgb4_l2t_release(ep->l2t);
409 kfree_skb(ep->mpa_skb);
411 if (!skb_queue_empty(&ep->com.ep_skb_list))
412 skb_queue_purge(&ep->com.ep_skb_list);
413 c4iw_put_wr_wait(ep->com.wr_waitp);
414 kfree(ep);
417 static void release_ep_resources(struct c4iw_ep *ep)
419 set_bit(RELEASE_RESOURCES, &ep->com.flags);
424 * we have a race where one thread finds the ep ptr just
425 * before the other thread is freeing the ep memory.
427 if (ep->hwtid != -1)
428 remove_ep_tid(ep);
429 c4iw_put_ep(&ep->com);
494 struct c4iw_ep *ep;
496 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
497 release_ep_resources(ep);
503 struct c4iw_ep *ep;
505 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
506 c4iw_put_ep(&ep->parent_ep->com);
507 release_ep_resources(ep);
513 * _put_ep_safe() in a safe context to free the ep resources. This is needed
517 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
526 * Save ep in the skb->cb area, after where sched() will save the dev
529 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
530 sched(ep->com.dev, skb);
536 struct c4iw_ep *ep = handle;
539 ep->hwtid);
541 __state_set(&ep->com, DEAD);
542 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
550 struct c4iw_ep *ep = handle;
553 connect_reply_upcall(ep, -EHOSTUNREACH);
554 __state_set(&ep->com, DEAD);
555 if (ep->com.remote_addr.ss_family == AF_INET6) {
557 (struct sockaddr_in6 *)&ep->com.local_addr;
558 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
561 xa_erase_irq(&ep->com.dev->atids, ep->atid);
562 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
563 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
573 struct c4iw_ep *ep = handle;
574 struct c4iw_rdev *rdev = &ep->com.dev->rdev;
582 __state_set(&ep->com, DEAD);
583 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
588 static int send_flowc(struct c4iw_ep *ep)
591 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
592 u16 vlan = ep->l2t->vlan;
614 FW_WR_FLOWID_V(ep->hwtid));
618 (ep->com.dev->rdev.lldi.pf));
620 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
622 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
624 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
626 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
628 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
630 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
632 flowc->mnemval[7].val = cpu_to_be32(ep->emss);
634 flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale);
642 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
643 return c4iw_ofld_send(&ep->com.dev->rdev, skb);
646 static int send_halfclose(struct c4iw_ep *ep)
648 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
651 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
655 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
658 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
661 static void read_tcb(struct c4iw_ep *ep)
671 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
674 INIT_TP_WR(req, ep->hwtid);
675 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid));
676 req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid));
679 * keep a ref on the ep so the tcb is not unlocked before this
682 c4iw_get_ep(&ep->com);
683 if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb)))
684 c4iw_put_ep(&ep->com);
687 static int send_abort_req(struct c4iw_ep *ep)
690 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
692 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
696 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
697 ep, abort_arp_failure);
699 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
702 static int send_abort(struct c4iw_ep *ep)
704 if (!ep->com.qp || !ep->com.qp->srq) {
705 send_abort_req(ep);
708 set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags);
709 read_tcb(ep);
713 static int send_connect(struct c4iw_ep *ep)
728 &ep->com.local_addr;
730 &ep->com.remote_addr;
732 &ep->com.local_addr;
734 &ep->com.remote_addr;
736 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
741 netdev = ep->com.dev->rdev.lldi.ports[0];
762 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
766 pr_debug("ep %p atid %u\n", ep, ep->atid);
773 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
775 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
777 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
784 win = ep->rcv_win >> 10;
793 L2T_IDX_V(ep->l2t->idx) |
794 TX_CHAN_V(ep->tx_chan) |
795 SMAC_SEL_V(ep->smac_idx) |
796 DSCP_V(ep->tos >> 2) |
801 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
817 params = cxgb4_select_ntuple(netdev, ep->l2t);
819 if (ep->com.remote_addr.ss_family == AF_INET6)
820 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
823 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
825 if (ep->com.remote_addr.ss_family == AF_INET) {
850 ((ep->rss_qid<<14) | ep->atid)));
857 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
861 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
900 ((ep->rss_qid<<14)|ep->atid)));
909 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
911 ep->l2t));
914 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
931 set_bit(ACT_OPEN_REQ, &ep->com.history);
932 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
934 if (ret && ep->com.remote_addr.ss_family == AF_INET6)
935 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
940 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
948 pr_debug("ep %p tid %u pd_len %d\n",
949 ep, ep->hwtid, ep->plen);
951 mpalen = sizeof(*mpa) + ep->plen;
957 connect_reply_upcall(ep, -ENOMEM);
960 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
968 FW_WR_FLOWID_V(ep->hwtid) |
983 ep->mpa_attr.recv_marker_enabled = 1;
985 ep->mpa_attr.recv_marker_enabled = 0;
990 mpa->private_data_size = htons(ep->plen);
993 ep->tried_with_mpa_v1 = 1;
994 ep->retry_with_mpa_v1 = 0;
1001 pr_debug("initiator ird %u ord %u\n", ep->ird,
1002 ep->ord);
1003 mpa_v2_params.ird = htons((u16)ep->ird);
1004 mpa_v2_params.ord = htons((u16)ep->ord);
1018 if (ep->plen)
1021 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1023 if (ep->plen)
1025 ep->mpa_pkt + sizeof(*mpa), ep->plen);
1034 ep->mpa_skb = skb;
1035 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1038 start_ep_timer(ep);
1039 __state_set(&ep->com, MPA_REQ_SENT);
1040 ep->mpa_attr.initiator = 1;
1041 ep->snd_seq += mpalen;
1045 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1053 pr_debug("ep %p tid %u pd_len %d\n",
1054 ep, ep->hwtid, ep->plen);
1057 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1066 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1074 FW_WR_FLOWID_V(ep->hwtid) |
1085 mpa->revision = ep->mpa_attr.version;
1088 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1093 mpa_v2_params.ird = htons(((u16)ep->ird) |
1096 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1105 if (ep->plen)
1118 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1120 ep->mpa_skb = skb;
1121 ep->snd_seq += mpalen;
1122 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1125 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1133 pr_debug("ep %p tid %u pd_len %d\n",
1134 ep, ep->hwtid, ep->plen);
1137 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1146 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1154 FW_WR_FLOWID_V(ep->hwtid) |
1165 if (ep->mpa_attr.crc_enabled)
1167 if (ep->mpa_attr.recv_marker_enabled)
1169 mpa->revision = ep->mpa_attr.version;
1172 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1177 mpa_v2_params.ird = htons((u16)ep->ird);
1178 mpa_v2_params.ord = htons((u16)ep->ord);
1179 if (peer2peer && (ep->mpa_attr.p2p_type !=
1194 if (ep->plen)
1208 ep->mpa_skb = skb;
1209 __state_set(&ep->com, MPA_REP_SENT);
1210 ep->snd_seq += mpalen;
1211 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1216 struct c4iw_ep *ep;
1224 ep = lookup_atid(t, atid);
1226 pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid,
1229 mutex_lock(&ep->com.mutex);
1230 dst_confirm(ep->dst);
1233 ep->hwtid = tid;
1234 cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family);
1235 insert_ep_tid(ep);
1237 ep->snd_seq = be32_to_cpu(req->snd_isn);
1238 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1239 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1241 set_emss(ep, tcp_opt);
1244 xa_erase_irq(&ep->com.dev->atids, atid);
1246 set_bit(ACT_ESTAB, &ep->com.history);
1249 ret = send_flowc(ep);
1252 if (ep->retry_with_mpa_v1)
1253 ret = send_mpa_req(ep, skb, 1);
1255 ret = send_mpa_req(ep, skb, mpa_rev);
1258 mutex_unlock(&ep->com.mutex);
1261 mutex_unlock(&ep->com.mutex);
1262 connect_reply_upcall(ep, -ENOMEM);
1263 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1267 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1271 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1275 if (ep->com.cm_id) {
1276 pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
1277 ep, ep->com.cm_id, ep->hwtid);
1278 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1279 deref_cm_id(&ep->com);
1280 set_bit(CLOSE_UPCALL, &ep->com.history);
1284 static void peer_close_upcall(struct c4iw_ep *ep)
1288 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1291 if (ep->com.cm_id) {
1292 pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
1293 ep, ep->com.cm_id, ep->hwtid);
1294 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1295 set_bit(DISCONN_UPCALL, &ep->com.history);
1299 static void peer_abort_upcall(struct c4iw_ep *ep)
1303 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1307 if (ep->com.cm_id) {
1308 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
1309 ep->com.cm_id, ep->hwtid);
1310 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1311 deref_cm_id(&ep->com);
1312 set_bit(ABORT_UPCALL, &ep->com.history);
1316 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1320 pr_debug("ep %p tid %u status %d\n",
1321 ep, ep->hwtid, status);
1325 memcpy(&event.local_addr, &ep->com.local_addr,
1326 sizeof(ep->com.local_addr));
1327 memcpy(&event.remote_addr, &ep->com.remote_addr,
1328 sizeof(ep->com.remote_addr));
1331 if (!ep->tried_with_mpa_v1) {
1333 event.ord = ep->ird;
1334 event.ird = ep->ord;
1335 event.private_data_len = ep->plen -
1337 event.private_data = ep->mpa_pkt +
1342 event.ord = cur_max_read_depth(ep->com.dev);
1343 event.ird = cur_max_read_depth(ep->com.dev);
1344 event.private_data_len = ep->plen;
1345 event.private_data = ep->mpa_pkt +
1350 pr_debug("ep %p tid %u status %d\n", ep,
1351 ep->hwtid, status);
1352 set_bit(CONN_RPL_UPCALL, &ep->com.history);
1353 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1356 deref_cm_id(&ep->com);
1359 static int connect_request_upcall(struct c4iw_ep *ep)
1364 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1367 memcpy(&event.local_addr, &ep->com.local_addr,
1368 sizeof(ep->com.local_addr));
1369 memcpy(&event.remote_addr, &ep->com.remote_addr,
1370 sizeof(ep->com.remote_addr));
1371 event.provider_data = ep;
1372 if (!ep->tried_with_mpa_v1) {
1374 event.ord = ep->ord;
1375 event.ird = ep->ird;
1376 event.private_data_len = ep->plen -
1378 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1382 event.ord = cur_max_read_depth(ep->com.dev);
1383 event.ird = cur_max_read_depth(ep->com.dev);
1384 event.private_data_len = ep->plen;
1385 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1387 c4iw_get_ep(&ep->com);
1388 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1391 c4iw_put_ep(&ep->com);
1392 set_bit(CONNREQ_UPCALL, &ep->com.history);
1393 c4iw_put_ep(&ep->parent_ep->com);
1397 static void established_upcall(struct c4iw_ep *ep)
1401 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1404 event.ird = ep->ord;
1405 event.ord = ep->ird;
1406 if (ep->com.cm_id) {
1407 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1408 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1409 set_bit(ESTAB_UPCALL, &ep->com.history);
1413 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1419 pr_debug("ep %p tid %u credits %u\n",
1420 ep, ep->hwtid, credits);
1432 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1433 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1438 cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
1441 c4iw_ofld_send(&ep->com.dev->rdev, skb);
1459 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1471 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1477 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1485 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1487 ep->mpa_pkt_len += skb->len;
1492 if (ep->mpa_pkt_len < sizeof(*mpa))
1494 mpa = (struct mpa_message *) ep->mpa_pkt;
1521 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1526 ep->plen = (u8) plen;
1532 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1545 if (stop_ep_timer(ep))
1553 __state_set(&ep->com, FPDU_MODE);
1554 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1555 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1556 ep->mpa_attr.version = mpa->revision;
1557 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1560 ep->mpa_attr.enhanced_rdma_conn =
1562 if (ep->mpa_attr.enhanced_rdma_conn) {
1564 (ep->mpa_pkt + sizeof(*mpa));
1569 pr_debug("responder ird %u ord %u ep ird %u ord %u\n",
1570 resp_ird, resp_ord, ep->ird, ep->ord);
1577 if (ep->ird < resp_ord) {
1579 ep->com.dev->rdev.lldi.max_ordird_qp)
1580 ep->ird = resp_ord;
1583 } else if (ep->ird > resp_ord) {
1584 ep->ird = resp_ord;
1586 if (ep->ord > resp_ird) {
1588 ep->ord = resp_ird;
1594 ep->ird = resp_ord;
1595 ep->ord = resp_ird;
1602 ep->mpa_attr.p2p_type =
1606 ep->mpa_attr.p2p_type =
1612 ep->mpa_attr.p2p_type = p2p_type;
1615 ep->mpa_attr.crc_enabled,
1616 ep->mpa_attr.recv_marker_enabled,
1617 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1618 ep->mpa_attr.p2p_type, p2p_type);
1626 if ((ep->mpa_attr.version == 2) && peer2peer &&
1627 (ep->mpa_attr.p2p_type != p2p_type)) {
1628 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1632 attrs.mpa_attr = ep->mpa_attr;
1633 attrs.max_ird = ep->ird;
1634 attrs.max_ord = ep->ord;
1635 attrs.llp_stream_handle = ep;
1643 err = c4iw_modify_qp(ep->com.qp->rhp,
1644 ep->com.qp, mask, &attrs, 1);
1658 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1677 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1685 stop_ep_timer(ep);
1689 connect_reply_upcall(ep, err);
1705 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1711 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1717 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1725 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1727 ep->mpa_pkt_len += skb->len;
1733 if (ep->mpa_pkt_len < sizeof(*mpa))
1737 mpa = (struct mpa_message *) ep->mpa_pkt;
1762 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1764 ep->plen = (u8) plen;
1769 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1776 ep->mpa_attr.initiator = 0;
1777 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1778 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1779 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1780 ep->mpa_attr.version = mpa->revision;
1782 ep->tried_with_mpa_v1 = 1;
1783 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1786 ep->mpa_attr.enhanced_rdma_conn =
1788 if (ep->mpa_attr.enhanced_rdma_conn) {
1790 (ep->mpa_pkt + sizeof(*mpa));
1791 ep->ird = ntohs(mpa_v2_params->ird) &
1793 ep->ird = min_t(u32, ep->ird,
1794 cur_max_read_depth(ep->com.dev));
1795 ep->ord = ntohs(mpa_v2_params->ord) &
1797 ep->ord = min_t(u32, ep->ord,
1798 cur_max_read_depth(ep->com.dev));
1800 ep->ird, ep->ord);
1805 ep->mpa_attr.p2p_type =
1809 ep->mpa_attr.p2p_type =
1815 ep->mpa_attr.p2p_type = p2p_type;
1818 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1819 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1820 ep->mpa_attr.p2p_type);
1822 __state_set(&ep->com, MPA_REQ_RCVD);
1825 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1826 if (ep->parent_ep->com.state != DEAD) {
1827 if (connect_request_upcall(ep))
1832 mutex_unlock(&ep->parent_ep->com.mutex);
1836 mutex_unlock(&ep->parent_ep->com.mutex);
1839 (void)stop_ep_timer(ep);
1846 struct c4iw_ep *ep;
1853 ep = get_ep_from_tid(dev, tid);
1854 if (!ep)
1856 pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen);
1859 mutex_lock(&ep->com.mutex);
1861 switch (ep->com.state) {
1863 update_rx_credits(ep, dlen);
1864 ep->rcv_seq += dlen;
1865 disconnect = process_mpa_reply(ep, skb);
1868 update_rx_credits(ep, dlen);
1869 ep->rcv_seq += dlen;
1870 disconnect = process_mpa_request(ep, skb);
1875 update_rx_credits(ep, dlen);
1878 " qpid %u ep %p state %d tid %u status %d\n",
1879 __func__, ep->com.qp->wq.sq.qid, ep,
1880 ep->com.state, ep->hwtid, status);
1882 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1890 mutex_unlock(&ep->com.mutex);
1892 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
1893 c4iw_put_ep(&ep->com);
1897 static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx)
1901 adapter_type = ep->com.dev->rdev.lldi.adapter_type;
1910 if (ep->com.qp->ibqp.uobject)
1911 t4_set_wq_in_error(&ep->com.qp->wq, srqidx);
1913 c4iw_flush_srqidx(ep->com.qp, srqidx);
1920 struct c4iw_ep *ep;
1925 ep = get_ep_from_tid(dev, tid);
1926 if (!ep) {
1931 if (ep->com.qp && ep->com.qp->srq) {
1933 complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx);
1936 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
1937 mutex_lock(&ep->com.mutex);
1938 switch (ep->com.state) {
1940 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
1941 __state_set(&ep->com, DEAD);
1945 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
1948 mutex_unlock(&ep->com.mutex);
1951 close_complete_upcall(ep, -ECONNRESET);
1952 release_ep_resources(ep);
1954 c4iw_put_ep(&ep->com);
1958 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1975 ep->com.dev->rdev.lldi.ports[0],
1976 ep->l2t));
1977 sin = (struct sockaddr_in *)&ep->com.local_addr;
1980 sin = (struct sockaddr_in *)&ep->com.remote_addr;
1990 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1992 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1999 win = ep->rcv_win >> 10;
2009 L2T_IDX_V(ep->l2t->idx) |
2010 TX_CHAN_V(ep->tx_chan) |
2011 SMAC_SEL_V(ep->smac_idx) |
2012 DSCP_V(ep->tos >> 2) |
2016 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
2019 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
2028 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
2029 set_bit(ACT_OFLD_CONN, &ep->com.history);
2030 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2061 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
2063 ep->snd_win = snd_win;
2064 ep->rcv_win = rcv_win;
2066 ep->snd_win, ep->rcv_win);
2071 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2102 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2104 if (!ep->l2t) {
2108 ep->mtu = pdev->mtu;
2109 ep->tx_chan = cxgb4_port_chan(pdev);
2110 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
2113 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2116 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2117 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2119 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2123 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2125 if (!ep->l2t)
2127 ep->mtu = dst_mtu(dst);
2128 ep->tx_chan = cxgb4_port_chan(pdev);
2129 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
2132 ep->txq_idx = cxgb4_port_idx(pdev) * step;
2133 ep->ctrlq_idx = cxgb4_port_idx(pdev);
2136 ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2138 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2141 ep->retry_with_mpa_v1 = 0;
2142 ep->tried_with_mpa_v1 = 0;
2154 static int c4iw_reconnect(struct c4iw_ep *ep)
2159 &ep->com.cm_id->m_local_addr;
2161 &ep->com.cm_id->m_remote_addr;
2163 &ep->com.cm_id->m_local_addr;
2165 &ep->com.cm_id->m_remote_addr;
2169 pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id);
2170 c4iw_init_wr_wait(ep->com.wr_waitp);
2181 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
2182 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
2190 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2191 if (ep->atid == -1) {
2196 err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL);
2201 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
2202 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
2206 raddr->sin_port, ep->com.cm_id->tos);
2210 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
2216 ep->com.cm_id->tos,
2221 if (!ep->dst) {
2226 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
2227 ep->com.dev->rdev.lldi.adapter_type,
2228 ep->com.cm_id->tos);
2235 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2236 ep->l2t->idx);
2238 state_set(&ep->com, CONNECTING);
2239 ep->tos = ep->com.cm_id->tos;
2242 err = send_connect(ep);
2246 cxgb4_l2t_release(ep->l2t);
2248 dst_release(ep->dst);
2250 xa_erase_irq(&ep->com.dev->atids, ep->atid);
2252 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2260 connect_reply_upcall(ep, -ECONNRESET);
2262 c4iw_put_ep(&ep->com);
2269 struct c4iw_ep *ep;
2281 ep = lookup_atid(t, atid);
2282 la = (struct sockaddr_in *)&ep->com.local_addr;
2283 ra = (struct sockaddr_in *)&ep->com.remote_addr;
2284 la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2285 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
2287 pr_debug("ep %p atid %u status %u errno %d\n", ep, atid,
2293 ep->stats.connect_neg_adv++;
2300 set_bit(ACT_OPEN_RPL, &ep->com.history);
2313 if (ep->com.local_addr.ss_family == AF_INET &&
2315 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2323 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2324 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2325 if (ep->com.remote_addr.ss_family == AF_INET6) {
2328 &ep->com.local_addr;
2330 ep->com.dev->rdev.lldi.ports[0],
2334 xa_erase_irq(&ep->com.dev->atids, atid);
2336 dst_release(ep->dst);
2337 cxgb4_l2t_release(ep->l2t);
2338 c4iw_reconnect(ep);
2343 if (ep->com.local_addr.ss_family == AF_INET) {
2358 connect_reply_upcall(ep, status2errno(status));
2359 state_set(&ep->com, DEAD);
2361 if (ep->com.remote_addr.ss_family == AF_INET6) {
2363 (struct sockaddr_in6 *)&ep->com.local_addr;
2364 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2368 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl),
2369 ep->com.local_addr.ss_family);
2371 xa_erase_irq(&ep->com.dev->atids, atid);
2373 dst_release(ep->dst);
2374 cxgb4_l2t_release(ep->l2t);
2375 c4iw_put_ep(&ep->com);
2384 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2386 if (!ep) {
2390 pr_debug("ep %p status %d error %d\n", ep,
2392 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
2393 c4iw_put_ep(&ep->com);
2402 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2404 if (!ep) {
2408 pr_debug("ep %p\n", ep);
2409 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status));
2410 c4iw_put_ep(&ep->com);
2415 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2425 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
2427 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2428 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2430 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
2437 win = ep->rcv_win >> 10;
2445 L2T_IDX_V(ep->l2t->idx) |
2446 TX_CHAN_V(ep->tx_chan) |
2447 SMAC_SEL_V(ep->smac_idx) |
2448 DSCP_V(ep->tos >> 2) |
2452 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2480 INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
2491 INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
2496 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2497 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
2499 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2535 pr_err("%s - listening ep not in LISTEN\n", __func__);
2549 pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2557 pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2574 pr_err("%s - failed to allocate ep entry!\n", __func__);
2675 struct c4iw_ep *ep;
2681 ep = get_ep_from_tid(dev, tid);
2682 if (!ep)
2685 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2686 ep->snd_seq = be32_to_cpu(req->snd_isn);
2687 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2688 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
2690 pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt);
2692 set_emss(ep, tcp_opt);
2694 dst_confirm(ep->dst);
2695 mutex_lock(&ep->com.mutex);
2696 ep->com.state = MPA_REQ_WAIT;
2697 start_ep_timer(ep);
2698 set_bit(PASS_ESTAB, &ep->com.history);
2699 ret = send_flowc(ep);
2700 mutex_unlock(&ep->com.mutex);
2702 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2703 c4iw_put_ep(&ep->com);
2711 struct c4iw_ep *ep;
2718 ep = get_ep_from_tid(dev, tid);
2719 if (!ep)
2722 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2723 dst_confirm(ep->dst);
2725 set_bit(PEER_CLOSE, &ep->com.history);
2726 mutex_lock(&ep->com.mutex);
2727 switch (ep->com.state) {
2729 __state_set(&ep->com, CLOSING);
2732 __state_set(&ep->com, CLOSING);
2733 connect_reply_upcall(ep, -ECONNRESET);
2743 __state_set(&ep->com, CLOSING);
2744 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2745 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2748 __state_set(&ep->com, CLOSING);
2749 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2750 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2753 start_ep_timer(ep);
2754 __state_set(&ep->com, CLOSING);
2756 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2759 peer_close_upcall(ep);
2767 __state_set(&ep->com, MORIBUND);
2771 (void)stop_ep_timer(ep);
2772 if (ep->com.cm_id && ep->com.qp) {
2774 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2777 close_complete_upcall(ep, 0);
2778 __state_set(&ep->com, DEAD);
2786 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
2788 mutex_unlock(&ep->com.mutex);
2790 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2792 release_ep_resources(ep);
2793 c4iw_put_ep(&ep->com);
2797 static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep)
2799 complete_cached_srq_buffers(ep, ep->srqe_idx);
2800 if (ep->com.cm_id && ep->com.qp) {
2804 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2807 peer_abort_upcall(ep);
2808 release_ep_resources(ep);
2809 c4iw_put_ep(&ep->com);
2815 struct c4iw_ep *ep;
2826 ep = get_ep_from_tid(dev, tid);
2827 if (!ep)
2834 ep->hwtid, status, neg_adv_str(status));
2835 ep->stats.abort_neg_adv++;
2842 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid,
2843 ep->com.state);
2844 set_bit(PEER_ABORT, &ep->com.history);
2851 if (ep->com.state != MPA_REQ_SENT)
2852 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);
2854 mutex_lock(&ep->com.mutex);
2855 switch (ep->com.state) {
2857 c4iw_put_ep(&ep->parent_ep->com);
2860 (void)stop_ep_timer(ep);
2863 (void)stop_ep_timer(ep);
2865 (mpa_rev == 2 && ep->tried_with_mpa_v1))
2866 connect_reply_upcall(ep, -ECONNRESET);
2878 ep->retry_with_mpa_v1 = 1;
2887 stop_ep_timer(ep);
2890 if (ep->com.qp && ep->com.qp->srq) {
2894 complete_cached_srq_buffers(ep, srqidx);
2896 /* Hold ep ref until finish_peer_abort() */
2897 c4iw_get_ep(&ep->com);
2898 __state_set(&ep->com, ABORTING);
2899 set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags);
2900 read_tcb(ep);
2906 if (ep->com.cm_id && ep->com.qp) {
2908 ret = c4iw_modify_qp(ep->com.qp->rhp,
2909 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2914 peer_abort_upcall(ep);
2920 mutex_unlock(&ep->com.mutex);
2923 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
2926 dst_confirm(ep->dst);
2927 if (ep->com.state != ABORTING) {
2928 __state_set(&ep->com, DEAD);
2930 if (!ep->retry_with_mpa_v1)
2933 mutex_unlock(&ep->com.mutex);
2935 rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
2941 cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
2943 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2946 release_ep_resources(ep);
2947 else if (ep->retry_with_mpa_v1) {
2948 if (ep->com.remote_addr.ss_family == AF_INET6) {
2951 &ep->com.local_addr;
2953 ep->com.dev->rdev.lldi.ports[0],
2957 xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid);
2958 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid,
2959 ep->com.local_addr.ss_family);
2960 dst_release(ep->dst);
2961 cxgb4_l2t_release(ep->l2t);
2962 c4iw_reconnect(ep);
2966 c4iw_put_ep(&ep->com);
2967 /* Dereferencing ep, referenced in peer_abort_intr() */
2968 c4iw_put_ep(&ep->com);
2974 struct c4iw_ep *ep;
2980 ep = get_ep_from_tid(dev, tid);
2981 if (!ep)
2984 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
2987 mutex_lock(&ep->com.mutex);
2988 set_bit(CLOSE_CON_RPL, &ep->com.history);
2989 switch (ep->com.state) {
2991 __state_set(&ep->com, MORIBUND);
2994 (void)stop_ep_timer(ep);
2995 if ((ep->com.cm_id) && (ep->com.qp)) {
2997 c4iw_modify_qp(ep->com.qp->rhp,
2998 ep->com.qp,
3002 close_complete_upcall(ep, 0);
3003 __state_set(&ep->com, DEAD);
3010 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
3013 mutex_unlock(&ep->com.mutex);
3015 release_ep_resources(ep);
3016 c4iw_put_ep(&ep->com);
3024 struct c4iw_ep *ep;
3027 ep = get_ep_from_tid(dev, tid);
3029 if (ep) {
3030 if (ep->com.qp) {
3032 ep->com.qp->wq.sq.qid);
3034 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
3041 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3042 c4iw_put_ep(&ep->com);
3044 pr_warn("TERM received tid %u no ep/qp\n", tid);
3056 struct c4iw_ep *ep;
3062 ep = get_ep_from_tid(dev, tid);
3063 if (!ep)
3065 pr_debug("ep %p tid %u credits %u\n",
3066 ep, ep->hwtid, credits);
3068 pr_debug("0 credit ack ep %p tid %u state %u\n",
3069 ep, ep->hwtid, state_read(&ep->com));
3073 dst_confirm(ep->dst);
3074 if (ep->mpa_skb) {
3075 pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
3076 ep, ep->hwtid, state_read(&ep->com),
3077 ep->mpa_attr.initiator ? 1 : 0);
3078 mutex_lock(&ep->com.mutex);
3079 kfree_skb(ep->mpa_skb);
3080 ep->mpa_skb = NULL;
3081 if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
3082 stop_ep_timer(ep);
3083 mutex_unlock(&ep->com.mutex);
3086 c4iw_put_ep(&ep->com);
3093 struct c4iw_ep *ep = to_ep(cm_id);
3095 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
3097 mutex_lock(&ep->com.mutex);
3098 if (ep->com.state != MPA_REQ_RCVD) {
3099 mutex_unlock(&ep->com.mutex);
3100 c4iw_put_ep(&ep->com);
3103 set_bit(ULP_REJECT, &ep->com.history);
3107 abort = send_mpa_reject(ep, pdata, pdata_len);
3108 mutex_unlock(&ep->com.mutex);
3110 stop_ep_timer(ep);
3111 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
3112 c4iw_put_ep(&ep->com);
3121 struct c4iw_ep *ep = to_ep(cm_id);
3126 pr_debug("ep %p tid %u\n", ep, ep->hwtid);
3128 mutex_lock(&ep->com.mutex);
3129 if (ep->com.state != MPA_REQ_RCVD) {
3139 set_bit(ULP_ACCEPT, &ep->com.history);
3140 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3141 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
3146 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3147 if (conn_param->ord > ep->ird) {
3149 conn_param->ord = ep->ird;
3151 ep->ird = conn_param->ird;
3152 ep->ord = conn_param->ord;
3153 send_mpa_reject(ep, conn_param->private_data,
3159 if (conn_param->ird < ep->ord) {
3161 ep->ord <= h->rdev.lldi.max_ordird_qp) {
3162 conn_param->ird = ep->ord;
3169 ep->ird = conn_param->ird;
3170 ep->ord = conn_param->ord;
3172 if (ep->mpa_attr.version == 1) {
3173 if (peer2peer && ep->ird == 0)
3174 ep->ird = 1;
3177 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
3178 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
3179 ep->ird = 1;
3182 pr_debug("ird %d ord %d\n", ep->ird, ep->ord);
3184 ep->com.cm_id = cm_id;
3185 ref_cm_id(&ep->com);
3186 ep->com.qp = qp;
3187 ref_qp(ep);
3190 attrs.mpa_attr = ep->mpa_attr;
3191 attrs.max_ird = ep->ird;
3192 attrs.max_ord = ep->ord;
3193 attrs.llp_stream_handle = ep;
3203 err = c4iw_modify_qp(ep->com.qp->rhp,
3204 ep->com.qp, mask, &attrs, 1);
3208 set_bit(STOP_MPA_TIMER, &ep->com.flags);
3209 err = send_mpa_reply(ep, conn_param->private_data,
3214 __state_set(&ep->com, FPDU_MODE);
3215 established_upcall(ep);
3216 mutex_unlock(&ep->com.mutex);
3217 c4iw_put_ep(&ep->com);
3220 deref_cm_id(&ep->com);
3224 mutex_unlock(&ep->com.mutex);
3226 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3227 c4iw_put_ep(&ep->com);
3300 struct c4iw_ep *ep;
3314 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3315 if (!ep) {
3316 pr_err("%s - cannot alloc ep\n", __func__);
3321 skb_queue_head_init(&ep->com.ep_skb_list);
3322 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
3327 timer_setup(&ep->timer, ep_timeout, 0);
3328 ep->plen = conn_param->private_data_len;
3329 if (ep->plen)
3330 memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3331 conn_param->private_data, ep->plen);
3332 ep->ird = conn_param->ird;
3333 ep->ord = conn_param->ord;
3335 if (peer2peer && ep->ord == 0)
3336 ep->ord = 1;
3338 ep->com.cm_id = cm_id;
3339 ref_cm_id(&ep->com);
3340 cm_id->provider_data = ep;
3341 ep->com.dev = dev;
3342 ep->com.qp = get_qhp(dev, conn_param->qpn);
3343 if (!ep->com.qp) {
3348 ref_qp(ep);
3350 ep->com.qp, cm_id);
3355 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3356 if (ep->atid == -1) {
3361 err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL);
3365 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3366 sizeof(ep->com.local_addr));
3367 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
3368 sizeof(ep->com.remote_addr));
3370 laddr = (struct sockaddr_in *)&ep->com.local_addr;
3371 raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3372 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3373 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
3392 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3415 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
3422 if (!ep->dst) {
3428 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
3429 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3436 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3437 ep->l2t->idx);
3439 state_set(&ep->com, CONNECTING);
3440 ep->tos = cm_id->tos;
3443 err = send_connect(ep);
3447 cxgb4_l2t_release(ep->l2t);
3449 dst_release(ep->dst);
3451 xa_erase_irq(&ep->com.dev->atids, ep->atid);
3453 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3455 skb_queue_purge(&ep->com.ep_skb_list);
3456 deref_cm_id(&ep->com);
3458 c4iw_put_ep(&ep->com);
3463 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3467 &ep->com.local_addr;
3470 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3475 c4iw_init_wr_wait(ep->com.wr_waitp);
3476 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3477 ep->stid, &sin6->sin6_addr,
3479 ep->com.dev->rdev.lldi.rxq_ids[0]);
3481 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3482 ep->com.wr_waitp,
3487 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3490 err, ep->stid,
3496 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3500 &ep->com.local_addr;
3505 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3507 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3509 if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3518 c4iw_init_wr_wait(ep->com.wr_waitp);
3519 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3520 ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3521 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3523 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3524 ep->com.wr_waitp,
3531 , err, ep->stid,
3540 struct c4iw_listen_ep *ep;
3544 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3545 if (!ep) {
3546 pr_err("%s - cannot alloc ep\n", __func__);
3550 skb_queue_head_init(&ep->com.ep_skb_list);
3551 pr_debug("ep %p\n", ep);
3552 ep->com.cm_id = cm_id;
3553 ref_cm_id(&ep->com);
3554 ep->com.dev = dev;
3555 ep->backlog = backlog;
3556 memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3557 sizeof(ep->com.local_addr));
3563 ep->com.local_addr.ss_family == AF_INET)
3564 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3565 cm_id->m_local_addr.ss_family, ep);
3567 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3568 cm_id->m_local_addr.ss_family, ep);
3570 if (ep->stid == -1) {
3575 err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL);
3579 state_set(&ep->com, LISTEN);
3580 if (ep->com.local_addr.ss_family == AF_INET)
3581 err = create_server4(dev, ep);
3583 err = create_server6(dev, ep);
3585 cm_id->provider_data = ep;
3588 xa_erase_irq(&ep->com.dev->stids, ep->stid);
3590 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3591 ep->com.local_addr.ss_family);
3593 deref_cm_id(&ep->com);
3594 c4iw_put_ep(&ep->com);
3603 struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3605 pr_debug("ep %p\n", ep);
3608 state_set(&ep->com, DEAD);
3609 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3610 ep->com.local_addr.ss_family == AF_INET) {
3612 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3613 ep->com.dev->rdev.lldi.rxq_ids[0], false);
3616 c4iw_init_wr_wait(ep->com.wr_waitp);
3618 ep->com.dev->rdev.lldi.ports[0], ep->stid,
3619 ep->com.dev->rdev.lldi.rxq_ids[0],
3620 ep->com.local_addr.ss_family == AF_INET6);
3623 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
3625 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3626 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3629 xa_erase_irq(&ep->com.dev->stids, ep->stid);
3630 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3631 ep->com.local_addr.ss_family);
3633 deref_cm_id(&ep->com);
3634 c4iw_put_ep(&ep->com);
3638 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3645 mutex_lock(&ep->com.mutex);
3647 pr_debug("ep %p state %s, abrupt %d\n", ep,
3648 states[ep->com.state], abrupt);
3651 * Ref the ep here in case we have fatal errors causing the
3652 * ep to be released and freed.
3654 c4iw_get_ep(&ep->com);
3656 rdev = &ep->com.dev->rdev;
3659 close_complete_upcall(ep, -EIO);
3660 ep->com.state = DEAD;
3662 switch (ep->com.state) {
3671 ep->com.state = ABORTING;
3673 ep->com.state = CLOSING;
3679 if (ep->mpa_skb &&
3680 test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
3681 clear_bit(STOP_MPA_TIMER, &ep->com.flags);
3682 stop_ep_timer(ep);
3684 start_ep_timer(ep);
3686 set_bit(CLOSE_SENT, &ep->com.flags);
3689 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3692 (void)stop_ep_timer(ep);
3693 ep->com.state = ABORTING;
3695 ep->com.state = MORIBUND;
3701 pr_debug("ignoring disconnect ep %p state %u\n",
3702 ep, ep->com.state);
3705 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
3711 set_bit(EP_DISC_ABORT, &ep->com.history);
3712 ret = send_abort(ep);
3714 set_bit(EP_DISC_CLOSE, &ep->com.history);
3715 ret = send_halfclose(ep);
3718 set_bit(EP_DISC_FAIL, &ep->com.history);
3720 stop_ep_timer(ep);
3721 close_complete_upcall(ep, -EIO);
3723 if (ep->com.qp) {
3727 ret = c4iw_modify_qp(ep->com.qp->rhp,
3728 ep->com.qp,
3738 mutex_unlock(&ep->com.mutex);
3739 c4iw_put_ep(&ep->com);
3741 release_ep_resources(ep);
3748 struct c4iw_ep *ep;
3751 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3753 if (!ep)
3758 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3759 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3760 send_fw_act_open_req(ep, atid);
3765 set_bit(ACT_RETRY_INUSE, &ep->com.history);
3766 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3767 send_fw_act_open_req(ep, atid);
3781 connect_reply_upcall(ep, status2errno(req->retval));
3782 state_set(&ep->com, DEAD);
3783 if (ep->com.remote_addr.ss_family == AF_INET6) {
3785 (struct sockaddr_in6 *)&ep->com.local_addr;
3786 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3791 dst_release(ep->dst);
3792 cxgb4_l2t_release(ep->l2t);
3793 c4iw_put_ep(&ep->com);
3850 struct c4iw_ep *ep;
3854 ep = get_ep_from_tid(dev, tid);
3855 if (!ep)
3868 c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */
3869 c4iw_put_ep(&ep->com); /* from read_tcb() */
3873 if (++ep->rx_pdu_out_cnt >= 2) {
3877 read_tcb(ep);
3881 ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
3884 pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
3886 if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags))
3887 finish_peer_abort(dev, ep);
3888 else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags))
3889 send_abort_req(ep);
4227 static void process_timeout(struct c4iw_ep *ep)
4232 mutex_lock(&ep->com.mutex);
4233 pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state);
4234 set_bit(TIMEDOUT, &ep->com.history);
4235 switch (ep->com.state) {
4237 connect_reply_upcall(ep, -ETIMEDOUT);
4246 if (ep->com.cm_id && ep->com.qp) {
4248 c4iw_modify_qp(ep->com.qp->rhp,
4249 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4252 close_complete_upcall(ep, -ETIMEDOUT);
4258 * These states are expected if the ep timed out at the same
4265 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4266 __func__, ep, ep->hwtid, ep->com.state);
4269 mutex_unlock(&ep->com.mutex);
4271 c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
4272 c4iw_put_ep(&ep->com);
4277 struct c4iw_ep *ep;
4288 ep = list_entry(tmp, struct c4iw_ep, entry);
4289 process_timeout(ep);
4326 struct c4iw_ep *ep = from_timer(ep, t, timer);
4330 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
4334 if (!ep->entry.next) {
4335 list_add_tail(&ep->entry, &timeout_list);
4408 struct c4iw_ep *ep;
4411 ep = get_ep_from_tid(dev, tid);
4413 if (!ep) {
4420 ep->hwtid, req->status,
4424 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state);
4426 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET);