1219820Sjeff/*
2219820Sjeff * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3219820Sjeff * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved.
4219820Sjeff * Copyright (c) 2006, 2007 Cisco Systems.  All rights reserved.
5219820Sjeff *
6219820Sjeff * This software is available to you under a choice of one of two
7219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
8219820Sjeff * General Public License (GPL) Version 2, available from the file
9219820Sjeff * COPYING in the main directory of this source tree, or the
10219820Sjeff * OpenIB.org BSD license below:
11219820Sjeff *
12219820Sjeff *     Redistribution and use in source and binary forms, with or
13219820Sjeff *     without modification, are permitted provided that the following
14219820Sjeff *     conditions are met:
15219820Sjeff *
16219820Sjeff *      - Redistributions of source code must retain the above
17219820Sjeff *        copyright notice, this list of conditions and the following
18219820Sjeff *        disclaimer.
19219820Sjeff *
20219820Sjeff *      - Redistributions in binary form must reproduce the above
21219820Sjeff *        copyright notice, this list of conditions and the following
22219820Sjeff *        disclaimer in the documentation and/or other materials
23219820Sjeff *        provided with the distribution.
24219820Sjeff *
25219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32219820Sjeff * SOFTWARE.
33219820Sjeff */
34219820Sjeff
35219820Sjeff#if HAVE_CONFIG_H
36219820Sjeff#  include <config.h>
37219820Sjeff#endif /* HAVE_CONFIG_H */
38219820Sjeff
39219820Sjeff#include <stdio.h>
40219820Sjeff#include <stdlib.h>
41219820Sjeff#include <pthread.h>
42219820Sjeff#include <netinet/in.h>
43219820Sjeff#include <string.h>
44219820Sjeff
45219820Sjeff#include <infiniband/opcode.h>
46219820Sjeff
47219820Sjeff#include "mlx4.h"
48219820Sjeff#include "doorbell.h"
49219820Sjeff
50219820Sjeffenum {
51219820Sjeff	MLX4_CQ_DOORBELL			= 0x20
52219820Sjeff};
53219820Sjeff
54219820Sjeffenum {
55219820Sjeff	CQ_OK					=  0,
56219820Sjeff	CQ_EMPTY				= -1,
57219820Sjeff	CQ_POLL_ERR				= -2
58219820Sjeff};
59219820Sjeff
60219820Sjeff#define MLX4_CQ_DB_REQ_NOT_SOL			(1 << 24)
61219820Sjeff#define MLX4_CQ_DB_REQ_NOT			(2 << 24)
62219820Sjeff
63219820Sjeffenum {
64219820Sjeff	MLX4_CQE_OWNER_MASK			= 0x80,
65219820Sjeff	MLX4_CQE_IS_SEND_MASK			= 0x40,
66219820Sjeff	MLX4_CQE_OPCODE_MASK			= 0x1f
67219820Sjeff};
68219820Sjeff
69219820Sjeffenum {
70219820Sjeff	MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
71219820Sjeff	MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
72219820Sjeff	MLX4_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
73219820Sjeff	MLX4_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
74219820Sjeff	MLX4_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
75219820Sjeff	MLX4_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
76219820Sjeff	MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
77219820Sjeff	MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
78219820Sjeff	MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
79219820Sjeff	MLX4_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
80219820Sjeff	MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
81219820Sjeff	MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
82219820Sjeff	MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
83219820Sjeff};
84219820Sjeff
85219820Sjeffstruct mlx4_cqe {
86219820Sjeff	uint32_t	my_qpn;
87219820Sjeff	uint32_t	immed_rss_invalid;
88219820Sjeff	uint32_t	g_mlpath_rqpn;
89219820Sjeff	uint8_t		sl;
90219820Sjeff	uint8_t		reserved1;
91219820Sjeff	uint16_t	rlid;
92219820Sjeff	uint32_t	reserved2;
93219820Sjeff	uint32_t	byte_cnt;
94219820Sjeff	uint16_t	wqe_index;
95219820Sjeff	uint16_t	checksum;
96219820Sjeff	uint8_t		reserved3[3];
97219820Sjeff	uint8_t		owner_sr_opcode;
98219820Sjeff};
99219820Sjeff
100219820Sjeffstruct mlx4_err_cqe {
101219820Sjeff	uint32_t	my_qpn;
102219820Sjeff	uint32_t	reserved1[5];
103219820Sjeff	uint16_t	wqe_index;
104219820Sjeff	uint8_t		vendor_err;
105219820Sjeff	uint8_t		syndrome;
106219820Sjeff	uint8_t		reserved2[3];
107219820Sjeff	uint8_t		owner_sr_opcode;
108219820Sjeff};
109219820Sjeff
110219820Sjeffstatic struct mlx4_cqe *get_cqe(struct mlx4_cq *cq, int entry)
111219820Sjeff{
112219820Sjeff	return cq->buf.buf + entry * MLX4_CQ_ENTRY_SIZE;
113219820Sjeff}
114219820Sjeff
115219820Sjeffstatic void *get_sw_cqe(struct mlx4_cq *cq, int n)
116219820Sjeff{
117219820Sjeff	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibv_cq.cqe);
118219820Sjeff
119219820Sjeff	return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
120219820Sjeff		!!(n & (cq->ibv_cq.cqe + 1))) ? NULL : cqe;
121219820Sjeff}
122219820Sjeff
123219820Sjeffstatic struct mlx4_cqe *next_cqe_sw(struct mlx4_cq *cq)
124219820Sjeff{
125219820Sjeff	return get_sw_cqe(cq, cq->cons_index);
126219820Sjeff}
127219820Sjeff
128219820Sjeffstatic void update_cons_index(struct mlx4_cq *cq)
129219820Sjeff{
130219820Sjeff	*cq->set_ci_db = htonl(cq->cons_index & 0xffffff);
131219820Sjeff}
132219820Sjeff
133219820Sjeffstatic void mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ibv_wc *wc)
134219820Sjeff{
135219820Sjeff	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR)
136219820Sjeff		printf(PFX "local QP operation err "
137219820Sjeff		       "(QPN %06x, WQE index %x, vendor syndrome %02x, "
138219820Sjeff		       "opcode = %02x)\n",
139219820Sjeff		       htonl(cqe->my_qpn), htonl(cqe->wqe_index),
140219820Sjeff		       cqe->vendor_err,
141219820Sjeff		       cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
142219820Sjeff
143219820Sjeff	switch (cqe->syndrome) {
144219820Sjeff	case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR:
145219820Sjeff		wc->status = IBV_WC_LOC_LEN_ERR;
146219820Sjeff		break;
147219820Sjeff	case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR:
148219820Sjeff		wc->status = IBV_WC_LOC_QP_OP_ERR;
149219820Sjeff		break;
150219820Sjeff	case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR:
151219820Sjeff		wc->status = IBV_WC_LOC_PROT_ERR;
152219820Sjeff		break;
153219820Sjeff	case MLX4_CQE_SYNDROME_WR_FLUSH_ERR:
154219820Sjeff		wc->status = IBV_WC_WR_FLUSH_ERR;
155219820Sjeff		break;
156219820Sjeff	case MLX4_CQE_SYNDROME_MW_BIND_ERR:
157219820Sjeff		wc->status = IBV_WC_MW_BIND_ERR;
158219820Sjeff		break;
159219820Sjeff	case MLX4_CQE_SYNDROME_BAD_RESP_ERR:
160219820Sjeff		wc->status = IBV_WC_BAD_RESP_ERR;
161219820Sjeff		break;
162219820Sjeff	case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR:
163219820Sjeff		wc->status = IBV_WC_LOC_ACCESS_ERR;
164219820Sjeff		break;
165219820Sjeff	case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
166219820Sjeff		wc->status = IBV_WC_REM_INV_REQ_ERR;
167219820Sjeff		break;
168219820Sjeff	case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR:
169219820Sjeff		wc->status = IBV_WC_REM_ACCESS_ERR;
170219820Sjeff		break;
171219820Sjeff	case MLX4_CQE_SYNDROME_REMOTE_OP_ERR:
172219820Sjeff		wc->status = IBV_WC_REM_OP_ERR;
173219820Sjeff		break;
174219820Sjeff	case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
175219820Sjeff		wc->status = IBV_WC_RETRY_EXC_ERR;
176219820Sjeff		break;
177219820Sjeff	case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
178219820Sjeff		wc->status = IBV_WC_RNR_RETRY_EXC_ERR;
179219820Sjeff		break;
180219820Sjeff	case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR:
181219820Sjeff		wc->status = IBV_WC_REM_ABORT_ERR;
182219820Sjeff		break;
183219820Sjeff	default:
184219820Sjeff		wc->status = IBV_WC_GENERAL_ERR;
185219820Sjeff		break;
186219820Sjeff	}
187219820Sjeff
188219820Sjeff	wc->vendor_err = cqe->vendor_err;
189219820Sjeff}
190219820Sjeff
191219820Sjeffstatic int mlx4_poll_one(struct mlx4_cq *cq,
192219820Sjeff			 struct mlx4_qp **cur_qp,
193219820Sjeff			 struct ibv_wc *wc)
194219820Sjeff{
195219820Sjeff	struct mlx4_wq *wq;
196219820Sjeff	struct mlx4_cqe *cqe;
197219820Sjeff	struct mlx4_srq *srq = NULL;
198219820Sjeff	uint32_t qpn;
199219820Sjeff	uint32_t srqn;
200219820Sjeff	uint32_t g_mlpath_rqpn;
201219820Sjeff	uint16_t wqe_index;
202219820Sjeff	int is_error;
203219820Sjeff	int is_send;
204219820Sjeff
205219820Sjeff	cqe = next_cqe_sw(cq);
206219820Sjeff	if (!cqe)
207219820Sjeff		return CQ_EMPTY;
208219820Sjeff
209219820Sjeff	++cq->cons_index;
210219820Sjeff
211219820Sjeff	VALGRIND_MAKE_MEM_DEFINED(cqe, sizeof *cqe);
212219820Sjeff
213219820Sjeff	/*
214219820Sjeff	 * Make sure we read CQ entry contents after we've checked the
215219820Sjeff	 * ownership bit.
216219820Sjeff	 */
217219820Sjeff	rmb();
218219820Sjeff
219219820Sjeff	qpn = ntohl(cqe->my_qpn);
220219820Sjeff
221219820Sjeff	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
222219820Sjeff	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
223219820Sjeff		MLX4_CQE_OPCODE_ERROR;
224219820Sjeff
225219820Sjeff	if (qpn & MLX4_XRC_QPN_BIT && !is_send) {
226219820Sjeff		srqn = ntohl(cqe->g_mlpath_rqpn) & 0xffffff;
227219820Sjeff		/*
228219820Sjeff		 * We do not have to take the XRC SRQ table lock here,
229219820Sjeff		 * because CQs will be locked while XRC SRQs are removed
230219820Sjeff		 * from the table.
231219820Sjeff		 */
232219820Sjeff		srq = mlx4_find_xrc_srq(to_mctx(cq->ibv_cq.context), srqn);
233219820Sjeff		if (!srq)
234219820Sjeff			return CQ_POLL_ERR;
235219820Sjeff	} else if (!*cur_qp || (qpn & 0xffffff) != (*cur_qp)->ibv_qp.qp_num) {
236219820Sjeff		/*
237219820Sjeff		 * We do not have to take the QP table lock here,
238219820Sjeff		 * because CQs will be locked while QPs are removed
239219820Sjeff		 * from the table.
240219820Sjeff		 */
241219820Sjeff		*cur_qp = mlx4_find_qp(to_mctx(cq->ibv_cq.context),
242219820Sjeff				       qpn & 0xffffff);
243219820Sjeff		if (!*cur_qp)
244219820Sjeff			return CQ_POLL_ERR;
245219820Sjeff	}
246219820Sjeff
247219820Sjeff	wc->qp_num = qpn & 0xffffff;
248219820Sjeff
249219820Sjeff	if (is_send) {
250219820Sjeff		wq = &(*cur_qp)->sq;
251219820Sjeff		wqe_index = ntohs(cqe->wqe_index);
252219820Sjeff		wq->tail += (uint16_t) (wqe_index - (uint16_t) wq->tail);
253219820Sjeff		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
254219820Sjeff		++wq->tail;
255219820Sjeff	} else if (srq) {
256219820Sjeff		wqe_index = htons(cqe->wqe_index);
257219820Sjeff		wc->wr_id = srq->wrid[wqe_index];
258219820Sjeff		mlx4_free_srq_wqe(srq, wqe_index);
259219820Sjeff	} else if ((*cur_qp)->ibv_qp.srq) {
260219820Sjeff		srq = to_msrq((*cur_qp)->ibv_qp.srq);
261219820Sjeff		wqe_index = htons(cqe->wqe_index);
262219820Sjeff		wc->wr_id = srq->wrid[wqe_index];
263219820Sjeff		mlx4_free_srq_wqe(srq, wqe_index);
264219820Sjeff	} else {
265219820Sjeff		wq = &(*cur_qp)->rq;
266219820Sjeff		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
267219820Sjeff		++wq->tail;
268219820Sjeff	}
269219820Sjeff
270219820Sjeff	if (is_error) {
271219820Sjeff		mlx4_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
272219820Sjeff		return CQ_OK;
273219820Sjeff	}
274219820Sjeff
275219820Sjeff	wc->status = IBV_WC_SUCCESS;
276219820Sjeff
277219820Sjeff	if (is_send) {
278219820Sjeff		wc->wc_flags = 0;
279219820Sjeff		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
280219820Sjeff		case MLX4_OPCODE_RDMA_WRITE_IMM:
281219820Sjeff			wc->wc_flags |= IBV_WC_WITH_IMM;
282219820Sjeff		case MLX4_OPCODE_RDMA_WRITE:
283219820Sjeff			wc->opcode    = IBV_WC_RDMA_WRITE;
284219820Sjeff			break;
285219820Sjeff		case MLX4_OPCODE_SEND_IMM:
286219820Sjeff			wc->wc_flags |= IBV_WC_WITH_IMM;
287219820Sjeff		case MLX4_OPCODE_SEND:
288219820Sjeff			wc->opcode    = IBV_WC_SEND;
289219820Sjeff			break;
290219820Sjeff		case MLX4_OPCODE_RDMA_READ:
291219820Sjeff			wc->opcode    = IBV_WC_RDMA_READ;
292219820Sjeff			wc->byte_len  = ntohl(cqe->byte_cnt);
293219820Sjeff			break;
294219820Sjeff		case MLX4_OPCODE_ATOMIC_CS:
295219820Sjeff			wc->opcode    = IBV_WC_COMP_SWAP;
296219820Sjeff			wc->byte_len  = 8;
297219820Sjeff			break;
298219820Sjeff		case MLX4_OPCODE_ATOMIC_FA:
299219820Sjeff			wc->opcode    = IBV_WC_FETCH_ADD;
300219820Sjeff			wc->byte_len  = 8;
301219820Sjeff			break;
302219820Sjeff		case MLX4_OPCODE_BIND_MW:
303219820Sjeff			wc->opcode    = IBV_WC_BIND_MW;
304219820Sjeff			break;
305219820Sjeff		default:
306219820Sjeff			/* assume it's a send completion */
307219820Sjeff			wc->opcode    = IBV_WC_SEND;
308219820Sjeff			break;
309219820Sjeff		}
310219820Sjeff	} else {
311219820Sjeff		wc->byte_len = ntohl(cqe->byte_cnt);
312219820Sjeff
313219820Sjeff		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
314219820Sjeff		case MLX4_RECV_OPCODE_RDMA_WRITE_IMM:
315219820Sjeff			wc->opcode   = IBV_WC_RECV_RDMA_WITH_IMM;
316219820Sjeff			wc->wc_flags = IBV_WC_WITH_IMM;
317219820Sjeff			wc->imm_data = cqe->immed_rss_invalid;
318219820Sjeff			break;
319219820Sjeff		case MLX4_RECV_OPCODE_SEND:
320219820Sjeff			wc->opcode   = IBV_WC_RECV;
321219820Sjeff			wc->wc_flags = 0;
322219820Sjeff			break;
323219820Sjeff		case MLX4_RECV_OPCODE_SEND_IMM:
324219820Sjeff			wc->opcode   = IBV_WC_RECV;
325219820Sjeff			wc->wc_flags = IBV_WC_WITH_IMM;
326219820Sjeff			wc->imm_data = cqe->immed_rss_invalid;
327219820Sjeff			break;
328219820Sjeff		}
329219820Sjeff
330219820Sjeff		wc->slid	   = ntohs(cqe->rlid);
331219820Sjeff		wc->sl		   = cqe->sl >> 4;
332219820Sjeff		g_mlpath_rqpn	   = ntohl(cqe->g_mlpath_rqpn);
333219820Sjeff		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
334219820Sjeff		wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
335219820Sjeff		wc->wc_flags	  |= g_mlpath_rqpn & 0x80000000 ? IBV_WC_GRH : 0;
336219820Sjeff		wc->pkey_index     = ntohl(cqe->immed_rss_invalid) & 0x7f;
337219820Sjeff	}
338219820Sjeff
339219820Sjeff	return CQ_OK;
340219820Sjeff}
341219820Sjeff
342219820Sjeffint mlx4_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
343219820Sjeff{
344219820Sjeff	struct mlx4_cq *cq = to_mcq(ibcq);
345219820Sjeff	struct mlx4_qp *qp = NULL;
346219820Sjeff	int npolled;
347219820Sjeff	int err = CQ_OK;
348219820Sjeff
349219820Sjeff	pthread_spin_lock(&cq->lock);
350219820Sjeff
351219820Sjeff	for (npolled = 0; npolled < ne; ++npolled) {
352219820Sjeff		err = mlx4_poll_one(cq, &qp, wc + npolled);
353219820Sjeff		if (err != CQ_OK)
354219820Sjeff			break;
355219820Sjeff	}
356219820Sjeff
357219820Sjeff	if (npolled)
358219820Sjeff		update_cons_index(cq);
359219820Sjeff
360219820Sjeff	pthread_spin_unlock(&cq->lock);
361219820Sjeff
362219820Sjeff	return err == CQ_POLL_ERR ? err : npolled;
363219820Sjeff}
364219820Sjeff
365219820Sjeffint mlx4_arm_cq(struct ibv_cq *ibvcq, int solicited)
366219820Sjeff{
367219820Sjeff	struct mlx4_cq *cq = to_mcq(ibvcq);
368219820Sjeff	uint32_t doorbell[2];
369219820Sjeff	uint32_t sn;
370219820Sjeff	uint32_t ci;
371219820Sjeff	uint32_t cmd;
372219820Sjeff
373219820Sjeff	sn  = cq->arm_sn & 3;
374219820Sjeff	ci  = cq->cons_index & 0xffffff;
375219820Sjeff	cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
376219820Sjeff
377219820Sjeff	*cq->arm_db = htonl(sn << 28 | cmd | ci);
378219820Sjeff
379219820Sjeff	/*
380219820Sjeff	 * Make sure that the doorbell record in host memory is
381219820Sjeff	 * written before ringing the doorbell via PCI MMIO.
382219820Sjeff	 */
383219820Sjeff	wmb();
384219820Sjeff
385219820Sjeff	doorbell[0] = htonl(sn << 28 | cmd | cq->cqn);
386219820Sjeff	doorbell[1] = htonl(ci);
387219820Sjeff
388219820Sjeff	mlx4_write64(doorbell, to_mctx(ibvcq->context), MLX4_CQ_DOORBELL);
389219820Sjeff
390219820Sjeff	return 0;
391219820Sjeff}
392219820Sjeff
393219820Sjeffvoid mlx4_cq_event(struct ibv_cq *cq)
394219820Sjeff{
395219820Sjeff	to_mcq(cq)->arm_sn++;
396219820Sjeff}
397219820Sjeff
398219820Sjeffvoid __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
399219820Sjeff{
400219820Sjeff	struct mlx4_cqe *cqe, *dest;
401219820Sjeff	uint32_t prod_index;
402219820Sjeff	uint8_t owner_bit;
403219820Sjeff	int nfreed = 0;
404219820Sjeff	int is_xrc_srq = 0;
405219820Sjeff
406219820Sjeff	if (srq && srq->ibv_srq.xrc_cq)
407219820Sjeff		is_xrc_srq = 1;
408219820Sjeff
409219820Sjeff	/*
410219820Sjeff	 * First we need to find the current producer index, so we
411219820Sjeff	 * know where to start cleaning from.  It doesn't matter if HW
412219820Sjeff	 * adds new entries after this loop -- the QP we're worried
413219820Sjeff	 * about is already in RESET, so the new entries won't come
414219820Sjeff	 * from our QP and therefore don't need to be checked.
415219820Sjeff	 */
416219820Sjeff	for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index); ++prod_index)
417219820Sjeff		if (prod_index == cq->cons_index + cq->ibv_cq.cqe)
418219820Sjeff			break;
419219820Sjeff
420219820Sjeff	/*
421219820Sjeff	 * Now sweep backwards through the CQ, removing CQ entries
422219820Sjeff	 * that match our QP by copying older entries on top of them.
423219820Sjeff	 */
424219820Sjeff	while ((int) --prod_index - (int) cq->cons_index >= 0) {
425219820Sjeff		cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
426219820Sjeff		if (is_xrc_srq &&
427219820Sjeff		    (ntohl(cqe->g_mlpath_rqpn & 0xffffff) == srq->srqn) &&
428219820Sjeff		    !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) {
429219820Sjeff			mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index));
430219820Sjeff			++nfreed;
431219820Sjeff		} else if ((ntohl(cqe->my_qpn) & 0xffffff) == qpn) {
432219820Sjeff			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
433219820Sjeff				mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index));
434219820Sjeff			++nfreed;
435219820Sjeff		} else if (nfreed) {
436219820Sjeff			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe);
437219820Sjeff			owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
438219820Sjeff			memcpy(dest, cqe, sizeof *cqe);
439219820Sjeff			dest->owner_sr_opcode = owner_bit |
440219820Sjeff				(dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
441219820Sjeff		}
442219820Sjeff	}
443219820Sjeff
444219820Sjeff	if (nfreed) {
445219820Sjeff		cq->cons_index += nfreed;
446219820Sjeff		/*
447219820Sjeff		 * Make sure update of buffer contents is done before
448219820Sjeff		 * updating consumer index.
449219820Sjeff		 */
450219820Sjeff		wmb();
451219820Sjeff		update_cons_index(cq);
452219820Sjeff	}
453219820Sjeff}
454219820Sjeff
455219820Sjeffvoid mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
456219820Sjeff{
457219820Sjeff	pthread_spin_lock(&cq->lock);
458219820Sjeff	__mlx4_cq_clean(cq, qpn, srq);
459219820Sjeff	pthread_spin_unlock(&cq->lock);
460219820Sjeff}
461219820Sjeff
462219820Sjeffint mlx4_get_outstanding_cqes(struct mlx4_cq *cq)
463219820Sjeff{
464219820Sjeff	uint32_t i;
465219820Sjeff
466219820Sjeff	for (i = cq->cons_index; get_sw_cqe(cq, (i & cq->ibv_cq.cqe)); ++i)
467219820Sjeff		;
468219820Sjeff
469219820Sjeff	return i - cq->cons_index;
470219820Sjeff}
471219820Sjeff
472219820Sjeffvoid mlx4_cq_resize_copy_cqes(struct mlx4_cq *cq, void *buf, int old_cqe)
473219820Sjeff{
474219820Sjeff	struct mlx4_cqe *cqe;
475219820Sjeff	int i;
476219820Sjeff
477219820Sjeff	i = cq->cons_index;
478219820Sjeff	cqe = get_cqe(cq, (i & old_cqe));
479219820Sjeff
480219820Sjeff	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
481219820Sjeff		cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
482219820Sjeff			(((i + 1) & (cq->ibv_cq.cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
483219820Sjeff		memcpy(buf + ((i + 1) & cq->ibv_cq.cqe) * MLX4_CQ_ENTRY_SIZE,
484219820Sjeff		       cqe, MLX4_CQ_ENTRY_SIZE);
485219820Sjeff		++i;
486219820Sjeff		cqe = get_cqe(cq, (i & old_cqe));
487219820Sjeff	}
488219820Sjeff
489219820Sjeff	++cq->cons_index;
490219820Sjeff}
491219820Sjeff
492219820Sjeffint mlx4_alloc_cq_buf(struct mlx4_device *dev, struct mlx4_buf *buf, int nent)
493219820Sjeff{
494219820Sjeff	if (mlx4_alloc_buf(buf, align(nent * MLX4_CQ_ENTRY_SIZE, dev->page_size),
495219820Sjeff			   dev->page_size))
496219820Sjeff		return -1;
497219820Sjeff	memset(buf->buf, 0, nent * MLX4_CQ_ENTRY_SIZE);
498219820Sjeff
499219820Sjeff	return 0;
500219820Sjeff}
501