1/*
2 * Copyright (c) 2012 - 2019 Intel Corporation.  All rights reserved.
3 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <rdma/ib_smi.h>
36#include <rdma/ib_verbs.h>
37
38#include "qib.h"
39#include "qib_mad.h"
40
41/**
42 * qib_ud_loopback - handle send on loopback QPs
43 * @sqp: the sending QP
44 * @swqe: the send work request
45 *
46 * This is called from qib_make_ud_req() to forward a WQE addressed
47 * to the same HCA.
48 * Note that the receive interrupt handler may be calling qib_ud_rcv()
49 * while this is being called.
50 */
51static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
52{
53	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
54	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
55	struct qib_devdata *dd = ppd->dd;
56	struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
57	struct rvt_qp *qp;
58	struct rdma_ah_attr *ah_attr;
59	unsigned long flags;
60	struct rvt_sge_state ssge;
61	struct rvt_sge *sge;
62	struct ib_wc wc;
63	u32 length;
64	enum ib_qp_type sqptype, dqptype;
65
66	rcu_read_lock();
67	qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe));
68	if (!qp) {
69		ibp->rvp.n_pkt_drops++;
70		goto drop;
71	}
72
73	sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
74			IB_QPT_UD : sqp->ibqp.qp_type;
75	dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
76			IB_QPT_UD : qp->ibqp.qp_type;
77
78	if (dqptype != sqptype ||
79	    !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
80		ibp->rvp.n_pkt_drops++;
81		goto drop;
82	}
83
84	ah_attr = rvt_get_swqe_ah_attr(swqe);
85	ppd = ppd_from_ibp(ibp);
86
87	if (qp->ibqp.qp_num > 1) {
88		u16 pkey1;
89		u16 pkey2;
90		u16 lid;
91
92		pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
93		pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
94		if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
95			lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
96					  ((1 << ppd->lmc) - 1));
97			qib_bad_pkey(ibp, pkey1,
98				     rdma_ah_get_sl(ah_attr),
99				     sqp->ibqp.qp_num, qp->ibqp.qp_num,
100				     cpu_to_be16(lid),
101				     cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
102			goto drop;
103		}
104	}
105
106	/*
107	 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
108	 * Qkeys with the high order bit set mean use the
109	 * qkey from the QP context instead of the WR (see 10.2.5).
110	 */
111	if (qp->ibqp.qp_num) {
112		u32 qkey;
113
114		qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ?
115			sqp->qkey : rvt_get_swqe_remote_qkey(swqe);
116		if (unlikely(qkey != qp->qkey))
117			goto drop;
118	}
119
120	/*
121	 * A GRH is expected to precede the data even if not
122	 * present on the wire.
123	 */
124	length = swqe->length;
125	memset(&wc, 0, sizeof(wc));
126	wc.byte_len = length + sizeof(struct ib_grh);
127
128	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
129		wc.wc_flags = IB_WC_WITH_IMM;
130		wc.ex.imm_data = swqe->wr.ex.imm_data;
131	}
132
133	spin_lock_irqsave(&qp->r_lock, flags);
134
135	/*
136	 * Get the next work request entry to find where to put the data.
137	 */
138	if (qp->r_flags & RVT_R_REUSE_SGE)
139		qp->r_flags &= ~RVT_R_REUSE_SGE;
140	else {
141		int ret;
142
143		ret = rvt_get_rwqe(qp, false);
144		if (ret < 0) {
145			rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
146			goto bail_unlock;
147		}
148		if (!ret) {
149			if (qp->ibqp.qp_num == 0)
150				ibp->rvp.n_vl15_dropped++;
151			goto bail_unlock;
152		}
153	}
154	/* Silently drop packets which are too big. */
155	if (unlikely(wc.byte_len > qp->r_len)) {
156		qp->r_flags |= RVT_R_REUSE_SGE;
157		ibp->rvp.n_pkt_drops++;
158		goto bail_unlock;
159	}
160
161	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
162		struct ib_grh grh;
163		const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
164
165		qib_make_grh(ibp, &grh, grd, 0, 0);
166		rvt_copy_sge(qp, &qp->r_sge, &grh,
167			     sizeof(grh), true, false);
168		wc.wc_flags |= IB_WC_GRH;
169	} else
170		rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
171	ssge.sg_list = swqe->sg_list + 1;
172	ssge.sge = *swqe->sg_list;
173	ssge.num_sge = swqe->wr.num_sge;
174	sge = &ssge.sge;
175	while (length) {
176		u32 len = rvt_get_sge_length(sge, length);
177
178		rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
179		sge->vaddr += len;
180		sge->length -= len;
181		sge->sge_length -= len;
182		if (sge->sge_length == 0) {
183			if (--ssge.num_sge)
184				*sge = *ssge.sg_list++;
185		} else if (sge->length == 0 && sge->mr->lkey) {
186			if (++sge->n >= RVT_SEGSZ) {
187				if (++sge->m >= sge->mr->mapsz)
188					break;
189				sge->n = 0;
190			}
191			sge->vaddr =
192				sge->mr->map[sge->m]->segs[sge->n].vaddr;
193			sge->length =
194				sge->mr->map[sge->m]->segs[sge->n].length;
195		}
196		length -= len;
197	}
198	rvt_put_ss(&qp->r_sge);
199	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
200		goto bail_unlock;
201	wc.wr_id = qp->r_wr_id;
202	wc.status = IB_WC_SUCCESS;
203	wc.opcode = IB_WC_RECV;
204	wc.qp = &qp->ibqp;
205	wc.src_qp = sqp->ibqp.qp_num;
206	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
207		rvt_get_swqe_pkey_index(swqe) : 0;
208	wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
209				((1 << ppd->lmc) - 1));
210	wc.sl = rdma_ah_get_sl(ah_attr);
211	wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
212	wc.port_num = qp->port_num;
213	/* Signal completion event if the solicited bit is set. */
214	rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
215	ibp->rvp.n_loop_pkts++;
216bail_unlock:
217	spin_unlock_irqrestore(&qp->r_lock, flags);
218drop:
219	rcu_read_unlock();
220}
221
222/**
223 * qib_make_ud_req - construct a UD request packet
224 * @qp: the QP
225 * @flags: flags to modify and pass back to caller
226 *
227 * Assumes the s_lock is held.
228 *
229 * Return 1 if constructed; otherwise, return 0.
230 */
231int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
232{
233	struct qib_qp_priv *priv = qp->priv;
234	struct ib_other_headers *ohdr;
235	struct rdma_ah_attr *ah_attr;
236	struct qib_pportdata *ppd;
237	struct qib_ibport *ibp;
238	struct rvt_swqe *wqe;
239	u32 nwords;
240	u32 extra_bytes;
241	u32 bth0;
242	u16 lrh0;
243	u16 lid;
244	int ret = 0;
245	int next_cur;
246
247	if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
248		if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
249			goto bail;
250		/* We are in the error state, flush the work request. */
251		if (qp->s_last == READ_ONCE(qp->s_head))
252			goto bail;
253		/* If DMAs are in progress, we can't flush immediately. */
254		if (atomic_read(&priv->s_dma_busy)) {
255			qp->s_flags |= RVT_S_WAIT_DMA;
256			goto bail;
257		}
258		wqe = rvt_get_swqe_ptr(qp, qp->s_last);
259		rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
260		goto done;
261	}
262
263	/* see post_one_send() */
264	if (qp->s_cur == READ_ONCE(qp->s_head))
265		goto bail;
266
267	wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
268	next_cur = qp->s_cur + 1;
269	if (next_cur >= qp->s_size)
270		next_cur = 0;
271
272	/* Construct the header. */
273	ibp = to_iport(qp->ibqp.device, qp->port_num);
274	ppd = ppd_from_ibp(ibp);
275	ah_attr = rvt_get_swqe_ah_attr(wqe);
276	if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
277		if (rdma_ah_get_dlid(ah_attr) !=
278				be16_to_cpu(IB_LID_PERMISSIVE))
279			this_cpu_inc(ibp->pmastats->n_multicast_xmit);
280		else
281			this_cpu_inc(ibp->pmastats->n_unicast_xmit);
282	} else {
283		this_cpu_inc(ibp->pmastats->n_unicast_xmit);
284		lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
285		if (unlikely(lid == ppd->lid)) {
286			unsigned long tflags = *flags;
287			/*
288			 * If DMAs are in progress, we can't generate
289			 * a completion for the loopback packet since
290			 * it would be out of order.
291			 * XXX Instead of waiting, we could queue a
292			 * zero length descriptor so we get a callback.
293			 */
294			if (atomic_read(&priv->s_dma_busy)) {
295				qp->s_flags |= RVT_S_WAIT_DMA;
296				goto bail;
297			}
298			qp->s_cur = next_cur;
299			spin_unlock_irqrestore(&qp->s_lock, tflags);
300			qib_ud_loopback(qp, wqe);
301			spin_lock_irqsave(&qp->s_lock, tflags);
302			*flags = tflags;
303			rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
304			goto done;
305		}
306	}
307
308	qp->s_cur = next_cur;
309	extra_bytes = -wqe->length & 3;
310	nwords = (wqe->length + extra_bytes) >> 2;
311
312	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
313	qp->s_hdrwords = 7;
314	qp->s_cur_size = wqe->length;
315	qp->s_cur_sge = &qp->s_sge;
316	qp->s_srate = rdma_ah_get_static_rate(ah_attr);
317	qp->s_wqe = wqe;
318	qp->s_sge.sge = wqe->sg_list[0];
319	qp->s_sge.sg_list = wqe->sg_list + 1;
320	qp->s_sge.num_sge = wqe->wr.num_sge;
321	qp->s_sge.total_len = wqe->length;
322
323	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
324		/* Header size in 32-bit words. */
325		qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
326					       rdma_ah_read_grh(ah_attr),
327					       qp->s_hdrwords, nwords);
328		lrh0 = QIB_LRH_GRH;
329		ohdr = &priv->s_hdr->u.l.oth;
330		/*
331		 * Don't worry about sending to locally attached multicast
332		 * QPs.  It is unspecified by the spec. what happens.
333		 */
334	} else {
335		/* Header size in 32-bit words. */
336		lrh0 = QIB_LRH_BTH;
337		ohdr = &priv->s_hdr->u.oth;
338	}
339	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
340		qp->s_hdrwords++;
341		ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
342		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
343	} else
344		bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
345	lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
346	if (qp->ibqp.qp_type == IB_QPT_SMI)
347		lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
348	else
349		lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
350	priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
351	priv->s_hdr->lrh[1] =
352			cpu_to_be16(rdma_ah_get_dlid(ah_attr));  /* DEST LID */
353	priv->s_hdr->lrh[2] =
354			cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
355	lid = ppd->lid;
356	if (lid) {
357		lid |= rdma_ah_get_path_bits(ah_attr) &
358			((1 << ppd->lmc) - 1);
359		priv->s_hdr->lrh[3] = cpu_to_be16(lid);
360	} else
361		priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
362	if (wqe->wr.send_flags & IB_SEND_SOLICITED)
363		bth0 |= IB_BTH_SOLICITED;
364	bth0 |= extra_bytes << 20;
365	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
366		qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
367			     rvt_get_swqe_pkey_index(wqe) : qp->s_pkey_index);
368	ohdr->bth[0] = cpu_to_be32(bth0);
369	/*
370	 * Use the multicast QP if the destination LID is a multicast LID.
371	 */
372	ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
373			be16_to_cpu(IB_MULTICAST_LID_BASE) &&
374		rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
375		cpu_to_be32(QIB_MULTICAST_QPN) :
376		cpu_to_be32(rvt_get_swqe_remote_qpn(wqe));
377	ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
378	/*
379	 * Qkeys with the high order bit set mean use the
380	 * qkey from the QP context instead of the WR (see 10.2.5).
381	 */
382	ohdr->u.ud.deth[0] =
383		cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey :
384			    rvt_get_swqe_remote_qkey(wqe));
385	ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
386
387done:
388	return 1;
389bail:
390	qp->s_flags &= ~RVT_S_BUSY;
391	return ret;
392}
393
394static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
395{
396	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
397	struct qib_devdata *dd = ppd->dd;
398	unsigned ctxt = ppd->hw_pidx;
399	unsigned i;
400
401	pkey &= 0x7fff;	/* remove limited/full membership bit */
402
403	for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
404		if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
405			return i;
406
407	/*
408	 * Should not get here, this means hardware failed to validate pkeys.
409	 * Punt and return index 0.
410	 */
411	return 0;
412}
413
414/**
415 * qib_ud_rcv - receive an incoming UD packet
416 * @ibp: the port the packet came in on
417 * @hdr: the packet header
418 * @has_grh: true if the packet has a GRH
419 * @data: the packet data
420 * @tlen: the packet length
421 * @qp: the QP the packet came on
422 *
423 * This is called from qib_qp_rcv() to process an incoming UD packet
424 * for the given QP.
425 * Called at interrupt level.
426 */
427void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
428		int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
429{
430	struct ib_other_headers *ohdr;
431	int opcode;
432	u32 hdrsize;
433	u32 pad;
434	struct ib_wc wc;
435	u32 qkey;
436	u32 src_qp;
437	u16 dlid;
438
439	/* Check for GRH */
440	if (!has_grh) {
441		ohdr = &hdr->u.oth;
442		hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
443	} else {
444		ohdr = &hdr->u.l.oth;
445		hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
446	}
447	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
448	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
449
450	/*
451	 * Get the number of bytes the message was padded by
452	 * and drop incomplete packets.
453	 */
454	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
455	if (unlikely(tlen < (hdrsize + pad + 4)))
456		goto drop;
457
458	tlen -= hdrsize + pad + 4;
459
460	/*
461	 * Check that the permissive LID is only used on QP0
462	 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
463	 */
464	if (qp->ibqp.qp_num) {
465		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
466			     hdr->lrh[3] == IB_LID_PERMISSIVE))
467			goto drop;
468		if (qp->ibqp.qp_num > 1) {
469			u16 pkey1, pkey2;
470
471			pkey1 = be32_to_cpu(ohdr->bth[0]);
472			pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
473			if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
474				qib_bad_pkey(ibp,
475					     pkey1,
476					     (be16_to_cpu(hdr->lrh[0]) >> 4) &
477						0xF,
478					     src_qp, qp->ibqp.qp_num,
479					     hdr->lrh[3], hdr->lrh[1]);
480				return;
481			}
482		}
483		if (unlikely(qkey != qp->qkey))
484			return;
485
486		/* Drop invalid MAD packets (see 13.5.3.1). */
487		if (unlikely(qp->ibqp.qp_num == 1 &&
488			     (tlen != 256 ||
489			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
490			goto drop;
491	} else {
492		struct ib_smp *smp;
493
494		/* Drop invalid MAD packets (see 13.5.3.1). */
495		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
496			goto drop;
497		smp = (struct ib_smp *) data;
498		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
499		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
500		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
501			goto drop;
502	}
503
504	/*
505	 * The opcode is in the low byte when its in network order
506	 * (top byte when in host order).
507	 */
508	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
509	if (qp->ibqp.qp_num > 1 &&
510	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
511		wc.ex.imm_data = ohdr->u.ud.imm_data;
512		wc.wc_flags = IB_WC_WITH_IMM;
513	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
514		wc.ex.imm_data = 0;
515		wc.wc_flags = 0;
516	} else
517		goto drop;
518
519	/*
520	 * A GRH is expected to precede the data even if not
521	 * present on the wire.
522	 */
523	wc.byte_len = tlen + sizeof(struct ib_grh);
524
525	/*
526	 * Get the next work request entry to find where to put the data.
527	 */
528	if (qp->r_flags & RVT_R_REUSE_SGE)
529		qp->r_flags &= ~RVT_R_REUSE_SGE;
530	else {
531		int ret;
532
533		ret = rvt_get_rwqe(qp, false);
534		if (ret < 0) {
535			rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
536			return;
537		}
538		if (!ret) {
539			if (qp->ibqp.qp_num == 0)
540				ibp->rvp.n_vl15_dropped++;
541			return;
542		}
543	}
544	/* Silently drop packets which are too big. */
545	if (unlikely(wc.byte_len > qp->r_len)) {
546		qp->r_flags |= RVT_R_REUSE_SGE;
547		goto drop;
548	}
549	if (has_grh) {
550		rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
551			     sizeof(struct ib_grh), true, false);
552		wc.wc_flags |= IB_WC_GRH;
553	} else
554		rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
555	rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
556		     true, false);
557	rvt_put_ss(&qp->r_sge);
558	if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
559		return;
560	wc.wr_id = qp->r_wr_id;
561	wc.status = IB_WC_SUCCESS;
562	wc.opcode = IB_WC_RECV;
563	wc.vendor_err = 0;
564	wc.qp = &qp->ibqp;
565	wc.src_qp = src_qp;
566	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
567		qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
568	wc.slid = be16_to_cpu(hdr->lrh[3]);
569	wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
570	dlid = be16_to_cpu(hdr->lrh[1]);
571	/*
572	 * Save the LMC lower bits if the destination LID is a unicast LID.
573	 */
574	wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
575		dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
576	wc.port_num = qp->port_num;
577	/* Signal completion event if the solicited bit is set. */
578	rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
579	return;
580
581drop:
582	ibp->rvp.n_pkt_drops++;
583}
584