1/* 2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <linux/gfp.h> 33#include <linux/mman.h> 34#include <net/sock.h> 35#include "iwch_provider.h" 36#include "iwch.h" 37#include "iwch_cm.h" 38#include "cxio_hal.h" 39#include "cxio_wr.h" 40 41static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, 42 struct respQ_msg_t *rsp_msg, 43 enum ib_event_type ib_event, 44 int send_term) 45{ 46 struct ib_event event; 47 struct iwch_qp_attributes attrs; 48 struct iwch_qp *qhp; 49 50 spin_lock(&rnicp->lock); 51 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); 52 53 if (!qhp) { 54 printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n", 55 __func__, CQE_STATUS(rsp_msg->cqe), 56 CQE_QPID(rsp_msg->cqe)); 57 spin_unlock(&rnicp->lock); 58 return; 59 } 60 61 if ((qhp->attr.state == IWCH_QP_STATE_ERROR) || 62 (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) { 63 PDBG("%s AE received after RTS - " 64 "qp state %d qpid 0x%x status 0x%x\n", __func__, 65 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); 66 spin_unlock(&rnicp->lock); 67 return; 68 } 69 70 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " 71 "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__, 72 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), 73 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), 74 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); 75 76 atomic_inc(&qhp->refcnt); 77 spin_unlock(&rnicp->lock); 78 79 event.event = ib_event; 80 event.device = chp->ibcq.device; 81 if (ib_event == IB_EVENT_CQ_ERR) 82 event.element.cq = &chp->ibcq; 83 else 84 event.element.qp = &qhp->ibqp; 85 86 if (qhp->ibqp.event_handler) 87 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); 88 89 if (qhp->attr.state == IWCH_QP_STATE_RTS) { 90 attrs.next_state = IWCH_QP_STATE_TERMINATE; 91 iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, 92 &attrs, 1); 93 if (send_term) 94 iwch_post_terminate(qhp, rsp_msg); 95 } 96 97 if (atomic_dec_and_test(&qhp->refcnt)) 98 wake_up(&qhp->wait); 99} 100 101void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) 102{ 103 struct iwch_dev *rnicp; 104 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data; 105 struct iwch_cq *chp; 106 struct iwch_qp *qhp; 107 u32 cqid = RSPQ_CQID(rsp_msg); 108 109 rnicp = (struct iwch_dev *) rdev_p->ulp; 110 spin_lock(&rnicp->lock); 111 chp = get_chp(rnicp, cqid); 112 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); 113 if (!chp || !qhp) { 114 printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " 115 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n", 116 cqid, CQE_QPID(rsp_msg->cqe), 117 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), 118 CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe), 119 CQE_WRID_LOW(rsp_msg->cqe)); 120 spin_unlock(&rnicp->lock); 121 goto out; 122 } 123 iwch_qp_add_ref(&qhp->ibqp); 124 atomic_inc(&chp->refcnt); 125 spin_unlock(&rnicp->lock); 126 127 /* 128 * 1) completion of our sending a TERMINATE. 129 * 2) incoming TERMINATE message. 130 */ 131 if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) && 132 (CQE_STATUS(rsp_msg->cqe) == 0)) { 133 if (SQ_TYPE(rsp_msg->cqe)) { 134 PDBG("%s QPID 0x%x ep %p disconnecting\n", 135 __func__, qhp->wq.qpid, qhp->ep); 136 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); 137 } else { 138 PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__, 139 qhp->wq.qpid); 140 post_qp_event(rnicp, chp, rsp_msg, 141 IB_EVENT_QP_REQ_ERR, 0); 142 iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC); 143 } 144 goto done; 145 } 146 147 /* Bad incoming Read request */ 148 if (SQ_TYPE(rsp_msg->cqe) && 149 (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) { 150 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1); 151 goto done; 152 } 153 154 /* Bad incoming write */ 155 if (RQ_TYPE(rsp_msg->cqe) && 156 (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) { 157 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1); 158 goto done; 159 } 160 161 switch (CQE_STATUS(rsp_msg->cqe)) { 162 163 /* Completion Events */ 164 case TPT_ERR_SUCCESS: 165 166 /* 167 * Confirm the destination entry if this is a RECV completion. 168 */ 169 if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) 170 dst_confirm(qhp->ep->dst); 171 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 172 break; 173 174 case TPT_ERR_STAG: 175 case TPT_ERR_PDID: 176 case TPT_ERR_QPID: 177 case TPT_ERR_ACCESS: 178 case TPT_ERR_WRAP: 179 case TPT_ERR_BOUND: 180 case TPT_ERR_INVALIDATE_SHARED_MR: 181 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: 182 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 183 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); 184 break; 185 186 /* Device Fatal Errors */ 187 case TPT_ERR_ECC: 188 case TPT_ERR_ECC_PSTAG: 189 case TPT_ERR_INTERNAL_ERR: 190 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1); 191 break; 192 193 /* QP Fatal Errors */ 194 case TPT_ERR_OUT_OF_RQE: 195 case TPT_ERR_PBL_ADDR_BOUND: 196 case TPT_ERR_CRC: 197 case TPT_ERR_MARKER: 198 case TPT_ERR_PDU_LEN_ERR: 199 case TPT_ERR_DDP_VERSION: 200 case TPT_ERR_RDMA_VERSION: 201 case TPT_ERR_OPCODE: 202 case TPT_ERR_DDP_QUEUE_NUM: 203 case TPT_ERR_MSN: 204 case TPT_ERR_TBIT: 205 case TPT_ERR_MO: 206 case TPT_ERR_MSN_GAP: 207 case TPT_ERR_MSN_RANGE: 208 case TPT_ERR_RQE_ADDR_BOUND: 209 case TPT_ERR_IRD_OVERFLOW: 210 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1); 211 break; 212 213 default: 214 printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n", 215 CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); 216 post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1); 217 break; 218 } 219done: 220 if (atomic_dec_and_test(&chp->refcnt)) 221 wake_up(&chp->wait); 222 iwch_qp_rem_ref(&qhp->ibqp); 223out: 224 dev_kfree_skb_irq(skb); 225} 226