1256694Snp/*
2256694Snp * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3256694Snp *
4256694Snp * This software is available to you under a choice of one of two
5256694Snp * licenses.  You may choose to be licensed under the terms of the GNU
6256694Snp * General Public License (GPL) Version 2, available from the file
7256694Snp * COPYING in the main directory of this source tree, or the
8256694Snp * OpenIB.org BSD license below:
9256694Snp *
10256694Snp *     Redistribution and use in source and binary forms, with or
11256694Snp *     without modification, are permitted provided that the following
12256694Snp *     conditions are met:
13256694Snp *
14256694Snp *      - Redistributions of source code must retain the above
15256694Snp *	  copyright notice, this list of conditions and the following
16256694Snp *	  disclaimer.
17256694Snp *
18256694Snp *      - Redistributions in binary form must reproduce the above
19256694Snp *	  copyright notice, this list of conditions and the following
20256694Snp *	  disclaimer in the documentation and/or other materials
21256694Snp *	  provided with the distribution.
22256694Snp *
23256694Snp * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24256694Snp * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25256694Snp * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26256694Snp * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27256694Snp * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28256694Snp * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29256694Snp * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30256694Snp * SOFTWARE.
31256694Snp */
32256694Snp#include <sys/cdefs.h>
33256694Snp__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/cq.c 314776 2017-03-06 15:16:15Z np $");
34256694Snp
35256694Snp#include "opt_inet.h"
36256694Snp
37256694Snp#ifdef TCP_OFFLOAD
38256694Snp#include <sys/param.h>
39256694Snp#include <sys/systm.h>
40256694Snp#include <sys/kernel.h>
41256694Snp#include <sys/ktr.h>
42256694Snp#include <sys/bus.h>
43256694Snp#include <sys/lock.h>
44256694Snp#include <sys/mutex.h>
45256694Snp#include <sys/rwlock.h>
46256694Snp#include <sys/socket.h>
47256694Snp#include <sys/sbuf.h>
48256694Snp
49256694Snp#include "iw_cxgbe.h"
50256694Snp#include "user.h"
51256694Snp
52256694Snpstatic int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
53256694Snp		      struct c4iw_dev_ucontext *uctx)
54256694Snp{
55256694Snp	struct adapter *sc = rdev->adap;
56256694Snp	struct fw_ri_res_wr *res_wr;
57256694Snp	struct fw_ri_res *res;
58256694Snp	int wr_len;
59256694Snp	struct c4iw_wr_wait wr_wait;
60256694Snp	struct wrqe *wr;
61256694Snp
62256694Snp	wr_len = sizeof *res_wr + sizeof *res;
63256694Snp	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
64256694Snp                if (wr == NULL)
65256694Snp                        return (0);
66256694Snp        res_wr = wrtod(wr);
67256694Snp	memset(res_wr, 0, wr_len);
68256694Snp	res_wr->op_nres = cpu_to_be32(
69256694Snp			V_FW_WR_OP(FW_RI_RES_WR) |
70256694Snp			V_FW_RI_RES_WR_NRES(1) |
71256694Snp			F_FW_WR_COMPL);
72256694Snp	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
73256694Snp	res_wr->cookie = (unsigned long) &wr_wait;
74256694Snp	res = res_wr->res;
75256694Snp	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
76256694Snp	res->u.cq.op = FW_RI_RES_OP_RESET;
77256694Snp	res->u.cq.iqid = cpu_to_be32(cq->cqid);
78256694Snp
79256694Snp	c4iw_init_wr_wait(&wr_wait);
80256694Snp
81256694Snp	t4_wrq_tx(sc, wr);
82256694Snp
83256694Snp	c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
84256694Snp
85256694Snp	kfree(cq->sw_queue);
86256694Snp	contigfree(cq->queue, cq->memsize, M_DEVBUF);
87256694Snp	c4iw_put_cqid(rdev, cq->cqid, uctx);
88256694Snp	return 0;
89256694Snp}
90256694Snp
91256694Snpstatic int
92256694Snpcreate_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
93256694Snp    struct c4iw_dev_ucontext *uctx)
94256694Snp{
95256694Snp	struct adapter *sc = rdev->adap;
96256694Snp	struct fw_ri_res_wr *res_wr;
97256694Snp	struct fw_ri_res *res;
98256694Snp	int wr_len;
99256694Snp	int user = (uctx != &rdev->uctx);
100256694Snp	struct c4iw_wr_wait wr_wait;
101256694Snp	int ret;
102256694Snp	struct wrqe *wr;
103256694Snp
104256694Snp	cq->cqid = c4iw_get_cqid(rdev, uctx);
105256694Snp	if (!cq->cqid) {
106256694Snp		ret = -ENOMEM;
107256694Snp		goto err1;
108256694Snp	}
109256694Snp
110256694Snp	if (!user) {
111256694Snp		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
112256694Snp		if (!cq->sw_queue) {
113256694Snp			ret = -ENOMEM;
114256694Snp			goto err2;
115256694Snp		}
116256694Snp	}
117256694Snp
118256694Snp	cq->queue = contigmalloc(cq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
119256694Snp	    PAGE_SIZE, 0);
120256694Snp        if (cq->queue)
121256694Snp                cq->dma_addr = vtophys(cq->queue);
122256694Snp        else {
123256694Snp		ret = -ENOMEM;
124256694Snp                goto err3;
125256694Snp	}
126256694Snp
127256694Snp	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
128256694Snp	memset(cq->queue, 0, cq->memsize);
129256694Snp
130256694Snp	/* build fw_ri_res_wr */
131256694Snp	wr_len = sizeof *res_wr + sizeof *res;
132256694Snp
133256694Snp	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
134256694Snp	if (wr == NULL)
135256694Snp        	return (0);
136256694Snp        res_wr = wrtod(wr);
137256694Snp
138256694Snp	memset(res_wr, 0, wr_len);
139256694Snp	res_wr->op_nres = cpu_to_be32(
140256694Snp			V_FW_WR_OP(FW_RI_RES_WR) |
141256694Snp			V_FW_RI_RES_WR_NRES(1) |
142256694Snp			F_FW_WR_COMPL);
143256694Snp	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
144256694Snp	res_wr->cookie = (unsigned long) &wr_wait;
145256694Snp	res = res_wr->res;
146256694Snp	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
147256694Snp	res->u.cq.op = FW_RI_RES_OP_WRITE;
148256694Snp	res->u.cq.iqid = cpu_to_be32(cq->cqid);
149256694Snp	//Fixme: Always use first queue id for IQANDSTINDEX. Linux does the same.
150256694Snp	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
151256694Snp			V_FW_RI_RES_WR_IQANUS(0) |
152256694Snp			V_FW_RI_RES_WR_IQANUD(1) |
153256694Snp			F_FW_RI_RES_WR_IQANDST |
154256694Snp			V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id));
155256694Snp	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
156256694Snp			F_FW_RI_RES_WR_IQDROPRSS |
157256694Snp			V_FW_RI_RES_WR_IQPCIECH(2) |
158256694Snp			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
159256694Snp			F_FW_RI_RES_WR_IQO |
160256694Snp			V_FW_RI_RES_WR_IQESIZE(1));
161256694Snp	res->u.cq.iqsize = cpu_to_be16(cq->size);
162256694Snp	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
163256694Snp
164256694Snp	c4iw_init_wr_wait(&wr_wait);
165256694Snp
166256694Snp	t4_wrq_tx(sc, wr);
167256694Snp
168256694Snp	CTR2(KTR_IW_CXGBE, "%s wait_event wr_wait %p", __func__, &wr_wait);
169256694Snp	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
170256694Snp	if (ret)
171256694Snp		goto err4;
172256694Snp
173256694Snp	cq->gen = 1;
174256694Snp	cq->gts = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
175309447Sjhb	    sc->sge_gts_reg);
176256694Snp	cq->rdev = rdev;
177256694Snp
178256694Snp	if (user) {
179256694Snp		cq->ugts = (u64)((char*)rman_get_virtual(sc->udbs_res) +
180256694Snp		    (cq->cqid << rdev->cqshift));
181256694Snp		cq->ugts &= PAGE_MASK;
182256694Snp		CTR5(KTR_IW_CXGBE,
183256694Snp		    "%s: UGTS %p cqid %x cqshift %d page_mask %x", __func__,
184256694Snp		    cq->ugts, cq->cqid, rdev->cqshift, PAGE_MASK);
185256694Snp	}
186256694Snp	return 0;
187256694Snperr4:
188256694Snp	contigfree(cq->queue, cq->memsize, M_DEVBUF);
189256694Snperr3:
190256694Snp	kfree(cq->sw_queue);
191256694Snperr2:
192256694Snp	c4iw_put_cqid(rdev, cq->cqid, uctx);
193256694Snperr1:
194256694Snp	return ret;
195256694Snp}
196256694Snp
197256694Snpstatic void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
198256694Snp{
199256694Snp	struct t4_cqe cqe;
200256694Snp
201256694Snp	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
202256694Snp	    cq, cq->sw_cidx, cq->sw_pidx);
203256694Snp	memset(&cqe, 0, sizeof(cqe));
204256694Snp	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
205256694Snp				 V_CQE_OPCODE(FW_RI_SEND) |
206256694Snp				 V_CQE_TYPE(0) |
207256694Snp				 V_CQE_SWCQE(1) |
208256694Snp				 V_CQE_QPID(wq->sq.qid));
209256694Snp	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
210256694Snp	cq->sw_queue[cq->sw_pidx] = cqe;
211256694Snp	t4_swcq_produce(cq);
212256694Snp}
213256694Snp
214256694Snpint c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
215256694Snp{
216256694Snp	int flushed = 0;
217256694Snp	int in_use = wq->rq.in_use - count;
218256694Snp
219256694Snp	BUG_ON(in_use < 0);
220256694Snp	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
221256694Snp	    __func__, wq, cq, wq->rq.in_use, count);
222256694Snp	while (in_use--) {
223256694Snp		insert_recv_cqe(wq, cq);
224256694Snp		flushed++;
225256694Snp	}
226256694Snp	return flushed;
227256694Snp}
228256694Snp
229256694Snpstatic void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
230256694Snp			  struct t4_swsqe *swcqe)
231256694Snp{
232256694Snp	struct t4_cqe cqe;
233256694Snp
234256694Snp	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
235256694Snp	    cq, cq->sw_cidx, cq->sw_pidx);
236256694Snp	memset(&cqe, 0, sizeof(cqe));
237256694Snp	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
238256694Snp				 V_CQE_OPCODE(swcqe->opcode) |
239256694Snp				 V_CQE_TYPE(1) |
240256694Snp				 V_CQE_SWCQE(1) |
241256694Snp				 V_CQE_QPID(wq->sq.qid));
242256694Snp	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
243256694Snp	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
244256694Snp	cq->sw_queue[cq->sw_pidx] = cqe;
245256694Snp	t4_swcq_produce(cq);
246256694Snp}
247256694Snp
248256694Snpint c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
249256694Snp{
250256694Snp	int flushed = 0;
251256694Snp	struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
252256694Snp	int in_use = wq->sq.in_use - count;
253256694Snp
254256694Snp	BUG_ON(in_use < 0);
255256694Snp	while (in_use--) {
256256694Snp		swsqe->signaled = 0;
257256694Snp		insert_sq_cqe(wq, cq, swsqe);
258256694Snp		swsqe++;
259256694Snp		if (swsqe == (wq->sq.sw_sq + wq->sq.size))
260256694Snp			swsqe = wq->sq.sw_sq;
261256694Snp		flushed++;
262256694Snp	}
263256694Snp	return flushed;
264256694Snp}
265256694Snp
266256694Snp/*
267256694Snp * Move all CQEs from the HWCQ into the SWCQ.
268256694Snp */
269256694Snpvoid c4iw_flush_hw_cq(struct t4_cq *cq)
270256694Snp{
271256694Snp	struct t4_cqe *cqe = NULL, *swcqe;
272256694Snp	int ret;
273256694Snp
274256694Snp	CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, cq, cq->cqid);
275256694Snp	ret = t4_next_hw_cqe(cq, &cqe);
276256694Snp	while (!ret) {
277256694Snp		CTR3(KTR_IW_CXGBE, "%s flushing hwcq cidx 0x%x swcq pidx 0x%x",
278256694Snp		    __func__, cq->cidx, cq->sw_pidx);
279256694Snp		swcqe = &cq->sw_queue[cq->sw_pidx];
280256694Snp		*swcqe = *cqe;
281256694Snp		swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
282256694Snp		t4_swcq_produce(cq);
283256694Snp		t4_hwcq_consume(cq);
284256694Snp		ret = t4_next_hw_cqe(cq, &cqe);
285256694Snp	}
286256694Snp}
287256694Snp
288256694Snpstatic int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
289256694Snp{
290256694Snp	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
291256694Snp		return 0;
292256694Snp
293256694Snp	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
294256694Snp		return 0;
295256694Snp
296256694Snp	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
297256694Snp		return 0;
298256694Snp
299256694Snp	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
300256694Snp		return 0;
301256694Snp	return 1;
302256694Snp}
303256694Snp
304256694Snpvoid c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
305256694Snp{
306256694Snp	struct t4_cqe *cqe;
307256694Snp	u32 ptr;
308256694Snp
309256694Snp	*count = 0;
310256694Snp	ptr = cq->sw_cidx;
311256694Snp	while (ptr != cq->sw_pidx) {
312256694Snp		cqe = &cq->sw_queue[ptr];
313256694Snp		if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
314256694Snp				      wq->sq.oldest_read)) &&
315256694Snp		    (CQE_QPID(cqe) == wq->sq.qid))
316256694Snp			(*count)++;
317256694Snp		if (++ptr == cq->size)
318256694Snp			ptr = 0;
319256694Snp	}
320256694Snp	CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
321256694Snp}
322256694Snp
323256694Snpvoid c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
324256694Snp{
325256694Snp	struct t4_cqe *cqe;
326256694Snp	u32 ptr;
327256694Snp
328256694Snp	*count = 0;
329256694Snp	CTR2(KTR_IW_CXGBE, "%s count zero %d", __func__, *count);
330256694Snp	ptr = cq->sw_cidx;
331256694Snp	while (ptr != cq->sw_pidx) {
332256694Snp		cqe = &cq->sw_queue[ptr];
333256694Snp		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
334256694Snp		    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
335256694Snp			(*count)++;
336256694Snp		if (++ptr == cq->size)
337256694Snp			ptr = 0;
338256694Snp	}
339256694Snp	CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
340256694Snp}
341256694Snp
342256694Snpstatic void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
343256694Snp{
344256694Snp	struct t4_swsqe *swsqe;
345256694Snp	u16 ptr = wq->sq.cidx;
346256694Snp	int count = wq->sq.in_use;
347256694Snp	int unsignaled = 0;
348256694Snp
349256694Snp	swsqe = &wq->sq.sw_sq[ptr];
350256694Snp	while (count--)
351256694Snp		if (!swsqe->signaled) {
352256694Snp			if (++ptr == wq->sq.size)
353256694Snp				ptr = 0;
354256694Snp			swsqe = &wq->sq.sw_sq[ptr];
355256694Snp			unsignaled++;
356256694Snp		} else if (swsqe->complete) {
357256694Snp
358256694Snp			/*
359256694Snp			 * Insert this completed cqe into the swcq.
360256694Snp			 */
361256694Snp			CTR3(KTR_IW_CXGBE,
362256694Snp			    "%s moving cqe into swcq sq idx %u cq idx %u",
363256694Snp			    __func__, ptr, cq->sw_pidx);
364256694Snp			swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
365256694Snp			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
366256694Snp			t4_swcq_produce(cq);
367256694Snp			swsqe->signaled = 0;
368256694Snp			wq->sq.in_use -= unsignaled;
369256694Snp			break;
370256694Snp		} else
371256694Snp			break;
372256694Snp}
373256694Snp
374256694Snpstatic void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
375256694Snp				struct t4_cqe *read_cqe)
376256694Snp{
377256694Snp	read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
378256694Snp	read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
379256694Snp	read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
380256694Snp				 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
381256694Snp				 V_CQE_OPCODE(FW_RI_READ_REQ) |
382256694Snp				 V_CQE_TYPE(1));
383256694Snp	read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
384256694Snp}
385256694Snp
386256694Snp/*
387256694Snp * Return a ptr to the next read wr in the SWSQ or NULL.
388256694Snp */
389256694Snpstatic void advance_oldest_read(struct t4_wq *wq)
390256694Snp{
391256694Snp
392256694Snp	u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
393256694Snp
394256694Snp	if (rptr == wq->sq.size)
395256694Snp		rptr = 0;
396256694Snp	while (rptr != wq->sq.pidx) {
397256694Snp		wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
398256694Snp
399256694Snp		if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
400256694Snp			return;
401256694Snp		if (++rptr == wq->sq.size)
402256694Snp			rptr = 0;
403256694Snp	}
404256694Snp	wq->sq.oldest_read = NULL;
405256694Snp}
406256694Snp
407256694Snp/*
408256694Snp * poll_cq
409256694Snp *
410256694Snp * Caller must:
411256694Snp *     check the validity of the first CQE,
412256694Snp *     supply the wq assicated with the qpid.
413256694Snp *
414256694Snp * credit: cq credit to return to sge.
415256694Snp * cqe_flushed: 1 iff the CQE is flushed.
416256694Snp * cqe: copy of the polled CQE.
417256694Snp *
418256694Snp * return value:
419256694Snp *    0		    CQE returned ok.
420256694Snp *    -EAGAIN       CQE skipped, try again.
421256694Snp *    -EOVERFLOW    CQ overflow detected.
422256694Snp */
423256694Snpstatic int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
424256694Snp		   u8 *cqe_flushed, u64 *cookie, u32 *credit)
425256694Snp{
426256694Snp	int ret = 0;
427256694Snp	struct t4_cqe *hw_cqe, read_cqe;
428256694Snp
429256694Snp	*cqe_flushed = 0;
430256694Snp	*credit = 0;
431256694Snp	ret = t4_next_cqe(cq, &hw_cqe);
432256694Snp	if (ret)
433256694Snp		return ret;
434256694Snp
435256694Snp	CTR6(KTR_IW_CXGBE,
436256694Snp	    "%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x", __func__,
437256694Snp	    CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe),
438256694Snp	    CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe));
439256694Snp	CTR5(KTR_IW_CXGBE,
440256694Snp	    "%s opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x",
441256694Snp	    __func__, CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
442256694Snp	    CQE_WRID_LOW(hw_cqe));
443256694Snp
444256694Snp	/*
445256694Snp	 * skip cqe's not affiliated with a QP.
446256694Snp	 */
447256694Snp	if (wq == NULL) {
448256694Snp		ret = -EAGAIN;
449256694Snp		goto skip_cqe;
450256694Snp	}
451256694Snp
452256694Snp	/*
453314776Snp	 * Special cqe for drain WR completions...
454314776Snp	 */
455314776Snp	if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
456314776Snp		*cookie = CQE_DRAIN_COOKIE(hw_cqe);
457314776Snp		*cqe = *hw_cqe;
458314776Snp		goto skip_cqe;
459314776Snp	}
460314776Snp
461314776Snp	/*
462256694Snp	 * Gotta tweak READ completions:
463256694Snp	 *	1) the cqe doesn't contain the sq_wptr from the wr.
464256694Snp	 *	2) opcode not reflected from the wr.
465256694Snp	 *	3) read_len not reflected from the wr.
466256694Snp	 *	4) cq_type is RQ_TYPE not SQ_TYPE.
467256694Snp	 */
468256694Snp	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
469256694Snp
470256694Snp		/*
471256694Snp		 * If this is an unsolicited read response, then the read
472256694Snp		 * was generated by the kernel driver as part of peer-2-peer
473256694Snp		 * connection setup.  So ignore the completion.
474256694Snp		 */
475256694Snp		if (!wq->sq.oldest_read) {
476256694Snp			if (CQE_STATUS(hw_cqe))
477256694Snp				t4_set_wq_in_error(wq);
478256694Snp			ret = -EAGAIN;
479256694Snp			goto skip_cqe;
480256694Snp		}
481256694Snp
482256694Snp		/*
483256694Snp		 * Don't write to the HWCQ, so create a new read req CQE
484256694Snp		 * in local memory.
485256694Snp		 */
486256694Snp		create_read_req_cqe(wq, hw_cqe, &read_cqe);
487256694Snp		hw_cqe = &read_cqe;
488256694Snp		advance_oldest_read(wq);
489256694Snp	}
490256694Snp
491256694Snp	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
492256694Snp		*cqe_flushed = t4_wq_in_error(wq);
493256694Snp		t4_set_wq_in_error(wq);
494256694Snp		goto proc_cqe;
495256694Snp	}
496256694Snp
497256694Snp	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
498256694Snp		ret = -EAGAIN;
499256694Snp		goto skip_cqe;
500256694Snp	}
501256694Snp
502256694Snp	/*
503256694Snp	 * RECV completion.
504256694Snp	 */
505256694Snp	if (RQ_TYPE(hw_cqe)) {
506256694Snp
507256694Snp		/*
508256694Snp		 * HW only validates 4 bits of MSN.  So we must validate that
509256694Snp		 * the MSN in the SEND is the next expected MSN.  If its not,
510256694Snp		 * then we complete this with T4_ERR_MSN and mark the wq in
511256694Snp		 * error.
512256694Snp		 */
513256694Snp
514256694Snp		if (t4_rq_empty(wq)) {
515256694Snp			t4_set_wq_in_error(wq);
516256694Snp			ret = -EAGAIN;
517256694Snp			goto skip_cqe;
518256694Snp		}
519256694Snp		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
520256694Snp			t4_set_wq_in_error(wq);
521256694Snp			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
522256694Snp			goto proc_cqe;
523256694Snp		}
524256694Snp		goto proc_cqe;
525256694Snp	}
526256694Snp
527256694Snp	/*
528256694Snp	 * If we get here its a send completion.
529256694Snp	 *
530256694Snp	 * Handle out of order completion. These get stuffed
531256694Snp	 * in the SW SQ. Then the SW SQ is walked to move any
532256694Snp	 * now in-order completions into the SW CQ.  This handles
533256694Snp	 * 2 cases:
534256694Snp	 *	1) reaping unsignaled WRs when the first subsequent
535256694Snp	 *	   signaled WR is completed.
536256694Snp	 *	2) out of order read completions.
537256694Snp	 */
538256694Snp	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
539256694Snp		struct t4_swsqe *swsqe;
540256694Snp
541256694Snp		CTR2(KTR_IW_CXGBE,
542256694Snp		    "%s out of order completion going in sw_sq at idx %u",
543256694Snp		    __func__, CQE_WRID_SQ_IDX(hw_cqe));
544256694Snp		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
545256694Snp		swsqe->cqe = *hw_cqe;
546256694Snp		swsqe->complete = 1;
547256694Snp		ret = -EAGAIN;
548256694Snp		goto flush_wq;
549256694Snp	}
550256694Snp
551256694Snpproc_cqe:
552256694Snp	*cqe = *hw_cqe;
553256694Snp
554256694Snp	/*
555256694Snp	 * Reap the associated WR(s) that are freed up with this
556256694Snp	 * completion.
557256694Snp	 */
558256694Snp	if (SQ_TYPE(hw_cqe)) {
559256694Snp		wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
560256694Snp		CTR2(KTR_IW_CXGBE, "%s completing sq idx %u",
561256694Snp		     __func__, wq->sq.cidx);
562256694Snp		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
563256694Snp		t4_sq_consume(wq);
564256694Snp	} else {
565256694Snp		CTR2(KTR_IW_CXGBE, "%s completing rq idx %u",
566256694Snp		     __func__, wq->rq.cidx);
567256694Snp		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
568256694Snp		BUG_ON(t4_rq_empty(wq));
569256694Snp		t4_rq_consume(wq);
570256694Snp	}
571256694Snp
572256694Snpflush_wq:
573256694Snp	/*
574256694Snp	 * Flush any completed cqes that are now in-order.
575256694Snp	 */
576256694Snp	flush_completed_wrs(wq, cq);
577256694Snp
578256694Snpskip_cqe:
579256694Snp	if (SW_CQE(hw_cqe)) {
580256694Snp		CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
581256694Snp		     __func__, cq, cq->cqid, cq->sw_cidx);
582256694Snp		t4_swcq_consume(cq);
583256694Snp	} else {
584256694Snp		CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
585256694Snp		     __func__, cq, cq->cqid, cq->cidx);
586256694Snp		t4_hwcq_consume(cq);
587256694Snp	}
588256694Snp	return ret;
589256694Snp}
590256694Snp
591256694Snp/*
592256694Snp * Get one cq entry from c4iw and map it to openib.
593256694Snp *
594256694Snp * Returns:
595256694Snp *	0			cqe returned
596256694Snp *	-ENODATA		EMPTY;
597256694Snp *	-EAGAIN			caller must try again
598256694Snp *	any other -errno	fatal error
599256694Snp */
600256694Snpstatic int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
601256694Snp{
602256694Snp	struct c4iw_qp *qhp = NULL;
603256694Snp	struct t4_cqe cqe = {0, 0}, *rd_cqe;
604256694Snp	struct t4_wq *wq;
605256694Snp	u32 credit = 0;
606256694Snp	u8 cqe_flushed;
607256694Snp	u64 cookie = 0;
608256694Snp	int ret;
609256694Snp
610256694Snp	ret = t4_next_cqe(&chp->cq, &rd_cqe);
611256694Snp
612256694Snp	if (ret)
613256694Snp		return ret;
614256694Snp
615256694Snp	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
616256694Snp	if (!qhp)
617256694Snp		wq = NULL;
618256694Snp	else {
619256694Snp		spin_lock(&qhp->lock);
620256694Snp		wq = &(qhp->wq);
621256694Snp	}
622256694Snp	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
623256694Snp	if (ret)
624256694Snp		goto out;
625256694Snp
626256694Snp	wc->wr_id = cookie;
627256694Snp	wc->qp = &qhp->ibqp;
628256694Snp	wc->vendor_err = CQE_STATUS(&cqe);
629256694Snp	wc->wc_flags = 0;
630256694Snp
631256694Snp	CTR5(KTR_IW_CXGBE, "%s qpid 0x%x type %d opcode %d status 0x%x",
632256694Snp	    __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
633256694Snp	    CQE_STATUS(&cqe));
634256694Snp	CTR5(KTR_IW_CXGBE, "%s len %u wrid hi 0x%x lo 0x%x cookie 0x%llx",
635256694Snp	    __func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
636256694Snp	    (unsigned long long)cookie);
637256694Snp
638256694Snp	if (CQE_TYPE(&cqe) == 0) {
639256694Snp		if (!CQE_STATUS(&cqe))
640256694Snp			wc->byte_len = CQE_LEN(&cqe);
641256694Snp		else
642256694Snp			wc->byte_len = 0;
643256694Snp		wc->opcode = IB_WC_RECV;
644256694Snp		if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
645256694Snp		    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
646256694Snp			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
647256694Snp			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
648256694Snp		}
649256694Snp	} else {
650256694Snp		switch (CQE_OPCODE(&cqe)) {
651256694Snp		case FW_RI_RDMA_WRITE:
652256694Snp			wc->opcode = IB_WC_RDMA_WRITE;
653256694Snp			break;
654256694Snp		case FW_RI_READ_REQ:
655256694Snp			wc->opcode = IB_WC_RDMA_READ;
656256694Snp			wc->byte_len = CQE_LEN(&cqe);
657256694Snp			break;
658256694Snp		case FW_RI_SEND_WITH_INV:
659256694Snp		case FW_RI_SEND_WITH_SE_INV:
660256694Snp			wc->opcode = IB_WC_SEND;
661256694Snp			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
662256694Snp			break;
663256694Snp		case FW_RI_SEND:
664256694Snp		case FW_RI_SEND_WITH_SE:
665256694Snp			wc->opcode = IB_WC_SEND;
666256694Snp			break;
667256694Snp		case FW_RI_BIND_MW:
668256694Snp			wc->opcode = IB_WC_BIND_MW;
669256694Snp			break;
670256694Snp
671256694Snp		case FW_RI_LOCAL_INV:
672256694Snp			wc->opcode = IB_WC_LOCAL_INV;
673256694Snp			break;
674256694Snp		case FW_RI_FAST_REGISTER:
675256694Snp			wc->opcode = IB_WC_FAST_REG_MR;
676256694Snp			break;
677314776Snp		case C4IW_DRAIN_OPCODE:
678314776Snp			wc->opcode = IB_WC_SEND;
679314776Snp			break;
680256694Snp		default:
681256694Snp			printf("Unexpected opcode %d "
682256694Snp			       "in the CQE received for QPID = 0x%0x\n",
683256694Snp			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
684256694Snp			ret = -EINVAL;
685256694Snp			goto out;
686256694Snp		}
687256694Snp	}
688256694Snp
689256694Snp	if (cqe_flushed)
690256694Snp		wc->status = IB_WC_WR_FLUSH_ERR;
691256694Snp	else {
692256694Snp
693256694Snp		switch (CQE_STATUS(&cqe)) {
694256694Snp		case T4_ERR_SUCCESS:
695256694Snp			wc->status = IB_WC_SUCCESS;
696256694Snp			break;
697256694Snp		case T4_ERR_STAG:
698256694Snp			wc->status = IB_WC_LOC_ACCESS_ERR;
699256694Snp			break;
700256694Snp		case T4_ERR_PDID:
701256694Snp			wc->status = IB_WC_LOC_PROT_ERR;
702256694Snp			break;
703256694Snp		case T4_ERR_QPID:
704256694Snp		case T4_ERR_ACCESS:
705256694Snp			wc->status = IB_WC_LOC_ACCESS_ERR;
706256694Snp			break;
707256694Snp		case T4_ERR_WRAP:
708256694Snp			wc->status = IB_WC_GENERAL_ERR;
709256694Snp			break;
710256694Snp		case T4_ERR_BOUND:
711256694Snp			wc->status = IB_WC_LOC_LEN_ERR;
712256694Snp			break;
713256694Snp		case T4_ERR_INVALIDATE_SHARED_MR:
714256694Snp		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
715256694Snp			wc->status = IB_WC_MW_BIND_ERR;
716256694Snp			break;
717256694Snp		case T4_ERR_CRC:
718256694Snp		case T4_ERR_MARKER:
719256694Snp		case T4_ERR_PDU_LEN_ERR:
720256694Snp		case T4_ERR_OUT_OF_RQE:
721256694Snp		case T4_ERR_DDP_VERSION:
722256694Snp		case T4_ERR_RDMA_VERSION:
723256694Snp		case T4_ERR_DDP_QUEUE_NUM:
724256694Snp		case T4_ERR_MSN:
725256694Snp		case T4_ERR_TBIT:
726256694Snp		case T4_ERR_MO:
727256694Snp		case T4_ERR_MSN_RANGE:
728256694Snp		case T4_ERR_IRD_OVERFLOW:
729256694Snp		case T4_ERR_OPCODE:
730256694Snp		case T4_ERR_INTERNAL_ERR:
731256694Snp			wc->status = IB_WC_FATAL_ERR;
732256694Snp			break;
733256694Snp		case T4_ERR_SWFLUSH:
734256694Snp			wc->status = IB_WC_WR_FLUSH_ERR;
735256694Snp			break;
736256694Snp		default:
737256694Snp			printf("Unexpected cqe_status 0x%x for QPID = 0x%0x\n",
738256694Snp			       CQE_STATUS(&cqe), CQE_QPID(&cqe));
739309378Sjhb			wc->status = IB_WC_FATAL_ERR;
740256694Snp		}
741256694Snp	}
742256694Snpout:
743256694Snp	if (wq)
744256694Snp		spin_unlock(&qhp->lock);
745256694Snp	return ret;
746256694Snp}
747256694Snp
748256694Snpint c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
749256694Snp{
750256694Snp	struct c4iw_cq *chp;
751256694Snp	unsigned long flags;
752256694Snp	int npolled;
753256694Snp	int err = 0;
754256694Snp
755256694Snp	chp = to_c4iw_cq(ibcq);
756256694Snp
757256694Snp	spin_lock_irqsave(&chp->lock, flags);
758256694Snp	for (npolled = 0; npolled < num_entries; ++npolled) {
759256694Snp		do {
760256694Snp			err = c4iw_poll_cq_one(chp, wc + npolled);
761256694Snp		} while (err == -EAGAIN);
762256694Snp		if (err)
763256694Snp			break;
764256694Snp	}
765256694Snp	spin_unlock_irqrestore(&chp->lock, flags);
766256694Snp	return !err || err == -ENODATA ? npolled : err;
767256694Snp}
768256694Snp
769256694Snpint c4iw_destroy_cq(struct ib_cq *ib_cq)
770256694Snp{
771256694Snp	struct c4iw_cq *chp;
772256694Snp	struct c4iw_ucontext *ucontext;
773256694Snp
774256694Snp	CTR2(KTR_IW_CXGBE, "%s ib_cq %p", __func__, ib_cq);
775256694Snp	chp = to_c4iw_cq(ib_cq);
776256694Snp
777256694Snp	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
778256694Snp	atomic_dec(&chp->refcnt);
779256694Snp	wait_event(chp->wait, !atomic_read(&chp->refcnt));
780256694Snp
781256694Snp	ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
782256694Snp				  : NULL;
783256694Snp	destroy_cq(&chp->rhp->rdev, &chp->cq,
784256694Snp		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
785256694Snp	kfree(chp);
786256694Snp	return 0;
787256694Snp}
788256694Snp
789256694Snpstruct ib_cq *
790256694Snpc4iw_create_cq(struct ib_device *ibdev, int entries, int vector,
791256694Snp    struct ib_ucontext *ib_context, struct ib_udata *udata)
792256694Snp{
793256694Snp	struct c4iw_dev *rhp;
794256694Snp	struct c4iw_cq *chp;
795256694Snp	struct c4iw_create_cq_resp uresp;
796256694Snp	struct c4iw_ucontext *ucontext = NULL;
797256694Snp	int ret;
798256694Snp	size_t memsize, hwentries;
799256694Snp	struct c4iw_mm_entry *mm, *mm2;
800256694Snp
801256694Snp	CTR3(KTR_IW_CXGBE, "%s ib_dev %p entries %d", __func__, ibdev, entries);
802256694Snp
803256694Snp	rhp = to_c4iw_dev(ibdev);
804256694Snp
805256694Snp	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
806256694Snp	if (!chp)
807256694Snp		return ERR_PTR(-ENOMEM);
808256694Snp
809256694Snp	if (ib_context)
810256694Snp		ucontext = to_c4iw_ucontext(ib_context);
811256694Snp
812256694Snp	/* account for the status page. */
813256694Snp	entries++;
814256694Snp
815256694Snp	/* IQ needs one extra entry to differentiate full vs empty. */
816256694Snp	entries++;
817256694Snp
818256694Snp	/*
819256694Snp	 * entries must be multiple of 16 for HW.
820256694Snp	 */
821256694Snp	entries = roundup(entries, 16);
822256694Snp
823256694Snp	/*
824256694Snp	 * Make actual HW queue 2x to avoid cidx_inc overflows.
825256694Snp	 */
826256694Snp	hwentries = entries * 2;
827256694Snp
828256694Snp	/*
829256694Snp	 * Make HW queue at least 64 entries so GTS updates aren't too
830256694Snp	 * frequent.
831256694Snp	 */
832256694Snp	if (hwentries < 64)
833256694Snp		hwentries = 64;
834256694Snp
835256694Snp	memsize = hwentries * sizeof *chp->cq.queue;
836256694Snp
837256694Snp	/*
838256694Snp	 * memsize must be a multiple of the page size if its a user cq.
839256694Snp	 */
840256694Snp	if (ucontext) {
841256694Snp		memsize = roundup(memsize, PAGE_SIZE);
842256694Snp		hwentries = memsize / sizeof *chp->cq.queue;
843256694Snp		while (hwentries > T4_MAX_IQ_SIZE) {
844256694Snp			memsize -= PAGE_SIZE;
845256694Snp			hwentries = memsize / sizeof *chp->cq.queue;
846256694Snp		}
847256694Snp	}
848256694Snp	chp->cq.size = hwentries;
849256694Snp	chp->cq.memsize = memsize;
850256694Snp
851256694Snp	ret = create_cq(&rhp->rdev, &chp->cq,
852256694Snp			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
853256694Snp	if (ret)
854256694Snp		goto err1;
855256694Snp
856256694Snp	chp->rhp = rhp;
857256694Snp	chp->cq.size--;				/* status page */
858256694Snp	chp->ibcq.cqe = entries - 2;
859256694Snp	spin_lock_init(&chp->lock);
860256694Snp	spin_lock_init(&chp->comp_handler_lock);
861256694Snp	atomic_set(&chp->refcnt, 1);
862256694Snp	init_waitqueue_head(&chp->wait);
863256694Snp	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
864256694Snp	if (ret)
865256694Snp		goto err2;
866256694Snp
867256694Snp	if (ucontext) {
868256694Snp		mm = kmalloc(sizeof *mm, GFP_KERNEL);
869256694Snp		if (!mm)
870256694Snp			goto err3;
871256694Snp		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
872256694Snp		if (!mm2)
873256694Snp			goto err4;
874256694Snp
875309378Sjhb		memset(&uresp, 0, sizeof(uresp));
876256694Snp		uresp.qid_mask = rhp->rdev.cqmask;
877256694Snp		uresp.cqid = chp->cq.cqid;
878256694Snp		uresp.size = chp->cq.size;
879256694Snp		uresp.memsize = chp->cq.memsize;
880256694Snp		spin_lock(&ucontext->mmap_lock);
881256694Snp		uresp.key = ucontext->key;
882256694Snp		ucontext->key += PAGE_SIZE;
883256694Snp		uresp.gts_key = ucontext->key;
884256694Snp		ucontext->key += PAGE_SIZE;
885256694Snp		spin_unlock(&ucontext->mmap_lock);
886309378Sjhb		ret = ib_copy_to_udata(udata, &uresp,
887309378Sjhb					sizeof(uresp) - sizeof(uresp.reserved));
888256694Snp		if (ret)
889256694Snp			goto err5;
890256694Snp
891256694Snp		mm->key = uresp.key;
892256694Snp		mm->addr = vtophys(chp->cq.queue);
893256694Snp		mm->len = chp->cq.memsize;
894256694Snp		insert_mmap(ucontext, mm);
895256694Snp
896256694Snp		mm2->key = uresp.gts_key;
897256694Snp		mm2->addr = chp->cq.ugts;
898256694Snp		mm2->len = PAGE_SIZE;
899256694Snp		insert_mmap(ucontext, mm2);
900256694Snp	}
901256694Snp	CTR6(KTR_IW_CXGBE,
902256694Snp	    "%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx",
903256694Snp	    __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
904256694Snp	    (unsigned long long) chp->cq.dma_addr);
905256694Snp	return &chp->ibcq;
906256694Snperr5:
907256694Snp	kfree(mm2);
908256694Snperr4:
909256694Snp	kfree(mm);
910256694Snperr3:
911256694Snp	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
912256694Snperr2:
913256694Snp	destroy_cq(&chp->rhp->rdev, &chp->cq,
914256694Snp		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
915256694Snperr1:
916256694Snp	kfree(chp);
917256694Snp	return ERR_PTR(ret);
918256694Snp}
919256694Snp
920256694Snpint c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
921256694Snp{
922256694Snp	return -ENOSYS;
923256694Snp}
924256694Snp
925256694Snpint c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
926256694Snp{
927256694Snp	struct c4iw_cq *chp;
928256694Snp	int ret;
929256694Snp	unsigned long flag;
930256694Snp
931256694Snp	chp = to_c4iw_cq(ibcq);
932256694Snp	spin_lock_irqsave(&chp->lock, flag);
933256694Snp	ret = t4_arm_cq(&chp->cq,
934256694Snp			(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
935256694Snp	spin_unlock_irqrestore(&chp->lock, flag);
936256694Snp	if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
937256694Snp		ret = 0;
938256694Snp	return ret;
939256694Snp}
940256694Snp#endif
941