• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/cxgb3/
1/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "iwch_provider.h"
33#include "iwch.h"
34
35/*
36 * Get one cq entry from cxio and map it to openib.
37 *
38 * Returns:
39 *	0			EMPTY;
40 *	1			cqe returned
41 *	-EAGAIN		caller must try again
42 *	any other -errno	fatal error
43 */
44static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
45			    struct ib_wc *wc)
46{
47	struct iwch_qp *qhp = NULL;
48	struct t3_cqe cqe, *rd_cqe;
49	struct t3_wq *wq;
50	u32 credit = 0;
51	u8 cqe_flushed;
52	u64 cookie;
53	int ret = 1;
54
55	rd_cqe = cxio_next_cqe(&chp->cq);
56
57	if (!rd_cqe)
58		return 0;
59
60	qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
61	if (!qhp)
62		wq = NULL;
63	else {
64		spin_lock(&qhp->lock);
65		wq = &(qhp->wq);
66	}
67	ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
68				   &credit);
69	if (t3a_device(chp->rhp) && credit) {
70		PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__,
71		     credit, chp->cq.cqid);
72		cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
73	}
74
75	if (ret) {
76		ret = -EAGAIN;
77		goto out;
78	}
79	ret = 1;
80
81	wc->wr_id = cookie;
82	wc->qp = &qhp->ibqp;
83	wc->vendor_err = CQE_STATUS(cqe);
84
85	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
86	     "lo 0x%x cookie 0x%llx\n", __FUNCTION__,
87	     CQE_QPID(cqe), CQE_TYPE(cqe),
88	     CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
89	     CQE_WRID_LOW(cqe), (unsigned long long) cookie);
90
91	if (CQE_TYPE(cqe) == 0) {
92		if (!CQE_STATUS(cqe))
93			wc->byte_len = CQE_LEN(cqe);
94		else
95			wc->byte_len = 0;
96		wc->opcode = IB_WC_RECV;
97	} else {
98		switch (CQE_OPCODE(cqe)) {
99		case T3_RDMA_WRITE:
100			wc->opcode = IB_WC_RDMA_WRITE;
101			break;
102		case T3_READ_REQ:
103			wc->opcode = IB_WC_RDMA_READ;
104			wc->byte_len = CQE_LEN(cqe);
105			break;
106		case T3_SEND:
107		case T3_SEND_WITH_SE:
108			wc->opcode = IB_WC_SEND;
109			break;
110		case T3_BIND_MW:
111			wc->opcode = IB_WC_BIND_MW;
112			break;
113
114		/* these aren't supported yet */
115		case T3_SEND_WITH_INV:
116		case T3_SEND_WITH_SE_INV:
117		case T3_LOCAL_INV:
118		case T3_FAST_REGISTER:
119		default:
120			printk(KERN_ERR MOD "Unexpected opcode %d "
121			       "in the CQE received for QPID=0x%0x\n",
122			       CQE_OPCODE(cqe), CQE_QPID(cqe));
123			ret = -EINVAL;
124			goto out;
125		}
126	}
127
128	if (cqe_flushed)
129		wc->status = IB_WC_WR_FLUSH_ERR;
130	else {
131
132		switch (CQE_STATUS(cqe)) {
133		case TPT_ERR_SUCCESS:
134			wc->status = IB_WC_SUCCESS;
135			break;
136		case TPT_ERR_STAG:
137			wc->status = IB_WC_LOC_ACCESS_ERR;
138			break;
139		case TPT_ERR_PDID:
140			wc->status = IB_WC_LOC_PROT_ERR;
141			break;
142		case TPT_ERR_QPID:
143		case TPT_ERR_ACCESS:
144			wc->status = IB_WC_LOC_ACCESS_ERR;
145			break;
146		case TPT_ERR_WRAP:
147			wc->status = IB_WC_GENERAL_ERR;
148			break;
149		case TPT_ERR_BOUND:
150			wc->status = IB_WC_LOC_LEN_ERR;
151			break;
152		case TPT_ERR_INVALIDATE_SHARED_MR:
153		case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
154			wc->status = IB_WC_MW_BIND_ERR;
155			break;
156		case TPT_ERR_CRC:
157		case TPT_ERR_MARKER:
158		case TPT_ERR_PDU_LEN_ERR:
159		case TPT_ERR_OUT_OF_RQE:
160		case TPT_ERR_DDP_VERSION:
161		case TPT_ERR_RDMA_VERSION:
162		case TPT_ERR_DDP_QUEUE_NUM:
163		case TPT_ERR_MSN:
164		case TPT_ERR_TBIT:
165		case TPT_ERR_MO:
166		case TPT_ERR_MSN_RANGE:
167		case TPT_ERR_IRD_OVERFLOW:
168		case TPT_ERR_OPCODE:
169			wc->status = IB_WC_FATAL_ERR;
170			break;
171		case TPT_ERR_SWFLUSH:
172			wc->status = IB_WC_WR_FLUSH_ERR;
173			break;
174		default:
175			printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
176			       "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
177			ret = -EINVAL;
178		}
179	}
180out:
181	if (wq)
182		spin_unlock(&qhp->lock);
183	return ret;
184}
185
186int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
187{
188	struct iwch_dev *rhp;
189	struct iwch_cq *chp;
190	unsigned long flags;
191	int npolled;
192	int err = 0;
193
194	chp = to_iwch_cq(ibcq);
195	rhp = chp->rhp;
196
197	spin_lock_irqsave(&chp->lock, flags);
198	for (npolled = 0; npolled < num_entries; ++npolled) {
199#ifdef DEBUG
200		int i=0;
201#endif
202
203		/*
204		 * Because T3 can post CQEs that are _not_ associated
205		 * with a WR, we might have to poll again after removing
206		 * one of these.
207		 */
208		do {
209			err = iwch_poll_cq_one(rhp, chp, wc + npolled);
210#ifdef DEBUG
211			BUG_ON(++i > 1000);
212#endif
213		} while (err == -EAGAIN);
214		if (err <= 0)
215			break;
216	}
217	spin_unlock_irqrestore(&chp->lock, flags);
218
219	if (err < 0)
220		return err;
221	else {
222		return npolled;
223	}
224}
225