1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#include <linux/skbuff.h>
8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/vmalloc.h>
11#include <rdma/uverbs_ioctl.h>
12
13#include "rxe.h"
14#include "rxe_loc.h"
15#include "rxe_queue.h"
16#include "rxe_task.h"
17
18static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19			  int has_srq)
20{
21	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22		rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
23			 cap->max_send_wr, rxe->attr.max_qp_wr);
24		goto err1;
25	}
26
27	if (cap->max_send_sge > rxe->attr.max_send_sge) {
28		rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
29			 cap->max_send_sge, rxe->attr.max_send_sge);
30		goto err1;
31	}
32
33	if (!has_srq) {
34		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35			rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
36				 cap->max_recv_wr, rxe->attr.max_qp_wr);
37			goto err1;
38		}
39
40		if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41			rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
42				 cap->max_recv_sge, rxe->attr.max_recv_sge);
43			goto err1;
44		}
45	}
46
47	if (cap->max_inline_data > rxe->max_inline_data) {
48		rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
49			 cap->max_inline_data, rxe->max_inline_data);
50		goto err1;
51	}
52
53	return 0;
54
55err1:
56	return -EINVAL;
57}
58
59int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60{
61	struct ib_qp_cap *cap = &init->cap;
62	struct rxe_port *port;
63	int port_num = init->port_num;
64
65	switch (init->qp_type) {
66	case IB_QPT_GSI:
67	case IB_QPT_RC:
68	case IB_QPT_UC:
69	case IB_QPT_UD:
70		break;
71	default:
72		return -EOPNOTSUPP;
73	}
74
75	if (!init->recv_cq || !init->send_cq) {
76		rxe_dbg_dev(rxe, "missing cq\n");
77		goto err1;
78	}
79
80	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
81		goto err1;
82
83	if (init->qp_type == IB_QPT_GSI) {
84		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
85			rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
86			goto err1;
87		}
88
89		port = &rxe->port;
90
91		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
92			rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
93			goto err1;
94		}
95	}
96
97	return 0;
98
99err1:
100	return -EINVAL;
101}
102
103static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
104{
105	qp->resp.res_head = 0;
106	qp->resp.res_tail = 0;
107	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
108
109	if (!qp->resp.resources)
110		return -ENOMEM;
111
112	return 0;
113}
114
115static void free_rd_atomic_resources(struct rxe_qp *qp)
116{
117	if (qp->resp.resources) {
118		int i;
119
120		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121			struct resp_res *res = &qp->resp.resources[i];
122
123			free_rd_atomic_resource(res);
124		}
125		kfree(qp->resp.resources);
126		qp->resp.resources = NULL;
127	}
128}
129
130void free_rd_atomic_resource(struct resp_res *res)
131{
132	res->type = 0;
133}
134
135static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
136{
137	int i;
138	struct resp_res *res;
139
140	if (qp->resp.resources) {
141		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142			res = &qp->resp.resources[i];
143			free_rd_atomic_resource(res);
144		}
145	}
146}
147
148static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
149			     struct ib_qp_init_attr *init)
150{
151	struct rxe_port *port;
152	u32 qpn;
153
154	qp->sq_sig_type		= init->sq_sig_type;
155	qp->attr.path_mtu	= 1;
156	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
157
158	qpn			= qp->elem.index;
159	port			= &rxe->port;
160
161	switch (init->qp_type) {
162	case IB_QPT_GSI:
163		qp->ibqp.qp_num		= 1;
164		port->qp_gsi_index	= qpn;
165		qp->attr.port_num	= init->port_num;
166		break;
167
168	default:
169		qp->ibqp.qp_num		= qpn;
170		break;
171	}
172
173	spin_lock_init(&qp->state_lock);
174
175	spin_lock_init(&qp->sq.sq_lock);
176	spin_lock_init(&qp->rq.producer_lock);
177	spin_lock_init(&qp->rq.consumer_lock);
178
179	skb_queue_head_init(&qp->req_pkts);
180	skb_queue_head_init(&qp->resp_pkts);
181
182	atomic_set(&qp->ssn, 0);
183	atomic_set(&qp->skb_out, 0);
184}
185
186static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
187		       struct ib_udata *udata,
188		       struct rxe_create_qp_resp __user *uresp)
189{
190	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
191	int wqe_size;
192	int err;
193
194	qp->sq.max_wr = init->cap.max_send_wr;
195	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
196			 init->cap.max_inline_data);
197	qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
198	qp->sq.max_inline = wqe_size;
199	wqe_size += sizeof(struct rxe_send_wqe);
200
201	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
202				      QUEUE_TYPE_FROM_CLIENT);
203	if (!qp->sq.queue) {
204		rxe_err_qp(qp, "Unable to allocate send queue\n");
205		err = -ENOMEM;
206		goto err_out;
207	}
208
209	/* prepare info for caller to mmap send queue if user space qp */
210	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
211			   qp->sq.queue->buf, qp->sq.queue->buf_size,
212			   &qp->sq.queue->ip);
213	if (err) {
214		rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
215		goto err_free;
216	}
217
218	/* return actual capabilities to caller which may be larger
219	 * than requested
220	 */
221	init->cap.max_send_wr = qp->sq.max_wr;
222	init->cap.max_send_sge = qp->sq.max_sge;
223	init->cap.max_inline_data = qp->sq.max_inline;
224
225	return 0;
226
227err_free:
228	vfree(qp->sq.queue->buf);
229	kfree(qp->sq.queue);
230	qp->sq.queue = NULL;
231err_out:
232	return err;
233}
234
235static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
236			   struct ib_qp_init_attr *init, struct ib_udata *udata,
237			   struct rxe_create_qp_resp __user *uresp)
238{
239	int err;
240
241	/* if we don't finish qp create make sure queue is valid */
242	skb_queue_head_init(&qp->req_pkts);
243
244	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
245	if (err < 0)
246		return err;
247	qp->sk->sk->sk_user_data = qp;
248
249	/* pick a source UDP port number for this QP based on
250	 * the source QPN. this spreads traffic for different QPs
251	 * across different NIC RX queues (while using a single
252	 * flow for a given QP to maintain packet order).
253	 * the port number must be in the Dynamic Ports range
254	 * (0xc000 - 0xffff).
255	 */
256	qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
257
258	err = rxe_init_sq(qp, init, udata, uresp);
259	if (err)
260		return err;
261
262	qp->req.wqe_index = queue_get_producer(qp->sq.queue,
263					       QUEUE_TYPE_FROM_CLIENT);
264
265	qp->req.opcode		= -1;
266	qp->comp.opcode		= -1;
267
268	rxe_init_task(&qp->req.task, qp, rxe_requester);
269	rxe_init_task(&qp->comp.task, qp, rxe_completer);
270
271	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
272	if (init->qp_type == IB_QPT_RC) {
273		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
274		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
275	}
276	return 0;
277}
278
279static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
280		       struct ib_udata *udata,
281		       struct rxe_create_qp_resp __user *uresp)
282{
283	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
284	int wqe_size;
285	int err;
286
287	qp->rq.max_wr = init->cap.max_recv_wr;
288	qp->rq.max_sge = init->cap.max_recv_sge;
289	wqe_size = sizeof(struct rxe_recv_wqe) +
290				qp->rq.max_sge*sizeof(struct ib_sge);
291
292	qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
293				      QUEUE_TYPE_FROM_CLIENT);
294	if (!qp->rq.queue) {
295		rxe_err_qp(qp, "Unable to allocate recv queue\n");
296		err = -ENOMEM;
297		goto err_out;
298	}
299
300	/* prepare info for caller to mmap recv queue if user space qp */
301	err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
302			   qp->rq.queue->buf, qp->rq.queue->buf_size,
303			   &qp->rq.queue->ip);
304	if (err) {
305		rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
306		goto err_free;
307	}
308
309	/* return actual capabilities to caller which may be larger
310	 * than requested
311	 */
312	init->cap.max_recv_wr = qp->rq.max_wr;
313
314	return 0;
315
316err_free:
317	vfree(qp->rq.queue->buf);
318	kfree(qp->rq.queue);
319	qp->rq.queue = NULL;
320err_out:
321	return err;
322}
323
324static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
325			    struct ib_qp_init_attr *init,
326			    struct ib_udata *udata,
327			    struct rxe_create_qp_resp __user *uresp)
328{
329	int err;
330
331	/* if we don't finish qp create make sure queue is valid */
332	skb_queue_head_init(&qp->resp_pkts);
333
334	if (!qp->srq) {
335		err = rxe_init_rq(qp, init, udata, uresp);
336		if (err)
337			return err;
338	}
339
340	rxe_init_task(&qp->resp.task, qp, rxe_responder);
341
342	qp->resp.opcode		= OPCODE_NONE;
343	qp->resp.msn		= 0;
344
345	return 0;
346}
347
348/* called by the create qp verb */
349int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
350		     struct ib_qp_init_attr *init,
351		     struct rxe_create_qp_resp __user *uresp,
352		     struct ib_pd *ibpd,
353		     struct ib_udata *udata)
354{
355	int err;
356	struct rxe_cq *rcq = to_rcq(init->recv_cq);
357	struct rxe_cq *scq = to_rcq(init->send_cq);
358	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
359	unsigned long flags;
360
361	rxe_get(pd);
362	rxe_get(rcq);
363	rxe_get(scq);
364	if (srq)
365		rxe_get(srq);
366
367	qp->pd = pd;
368	qp->rcq = rcq;
369	qp->scq = scq;
370	qp->srq = srq;
371
372	atomic_inc(&rcq->num_wq);
373	atomic_inc(&scq->num_wq);
374
375	rxe_qp_init_misc(rxe, qp, init);
376
377	err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
378	if (err)
379		goto err1;
380
381	err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
382	if (err)
383		goto err2;
384
385	spin_lock_irqsave(&qp->state_lock, flags);
386	qp->attr.qp_state = IB_QPS_RESET;
387	qp->valid = 1;
388	spin_unlock_irqrestore(&qp->state_lock, flags);
389
390	return 0;
391
392err2:
393	rxe_queue_cleanup(qp->sq.queue);
394	qp->sq.queue = NULL;
395err1:
396	atomic_dec(&rcq->num_wq);
397	atomic_dec(&scq->num_wq);
398
399	qp->pd = NULL;
400	qp->rcq = NULL;
401	qp->scq = NULL;
402	qp->srq = NULL;
403
404	if (srq)
405		rxe_put(srq);
406	rxe_put(scq);
407	rxe_put(rcq);
408	rxe_put(pd);
409
410	return err;
411}
412
413/* called by the query qp verb */
414int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
415{
416	init->event_handler		= qp->ibqp.event_handler;
417	init->qp_context		= qp->ibqp.qp_context;
418	init->send_cq			= qp->ibqp.send_cq;
419	init->recv_cq			= qp->ibqp.recv_cq;
420	init->srq			= qp->ibqp.srq;
421
422	init->cap.max_send_wr		= qp->sq.max_wr;
423	init->cap.max_send_sge		= qp->sq.max_sge;
424	init->cap.max_inline_data	= qp->sq.max_inline;
425
426	if (!qp->srq) {
427		init->cap.max_recv_wr		= qp->rq.max_wr;
428		init->cap.max_recv_sge		= qp->rq.max_sge;
429	}
430
431	init->sq_sig_type		= qp->sq_sig_type;
432
433	init->qp_type			= qp->ibqp.qp_type;
434	init->port_num			= 1;
435
436	return 0;
437}
438
439int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
440		    struct ib_qp_attr *attr, int mask)
441{
442	if (mask & IB_QP_PORT) {
443		if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
444			rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
445			goto err1;
446		}
447	}
448
449	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
450		goto err1;
451
452	if (mask & IB_QP_ACCESS_FLAGS) {
453		if (!(qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_UC))
454			goto err1;
455		if (attr->qp_access_flags & ~RXE_ACCESS_SUPPORTED_QP)
456			goto err1;
457	}
458
459	if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
460		goto err1;
461
462	if (mask & IB_QP_ALT_PATH) {
463		if (rxe_av_chk_attr(qp, &attr->alt_ah_attr))
464			goto err1;
465		if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
466			rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num);
467			goto err1;
468		}
469		if (attr->alt_timeout > 31) {
470			rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n",
471				 attr->alt_timeout);
472			goto err1;
473		}
474	}
475
476	if (mask & IB_QP_PATH_MTU) {
477		struct rxe_port *port = &rxe->port;
478
479		enum ib_mtu max_mtu = port->attr.max_mtu;
480		enum ib_mtu mtu = attr->path_mtu;
481
482		if (mtu > max_mtu) {
483			rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n",
484				 ib_mtu_enum_to_int(mtu),
485				 ib_mtu_enum_to_int(max_mtu));
486			goto err1;
487		}
488	}
489
490	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
491		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
492			rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n",
493				 attr->max_rd_atomic,
494				 rxe->attr.max_qp_rd_atom);
495			goto err1;
496		}
497	}
498
499	if (mask & IB_QP_TIMEOUT) {
500		if (attr->timeout > 31) {
501			rxe_dbg_qp(qp, "invalid timeout %d > 31\n",
502					attr->timeout);
503			goto err1;
504		}
505	}
506
507	return 0;
508
509err1:
510	return -EINVAL;
511}
512
513/* move the qp to the reset state */
514static void rxe_qp_reset(struct rxe_qp *qp)
515{
516	/* stop tasks from running */
517	rxe_disable_task(&qp->resp.task);
518	rxe_disable_task(&qp->comp.task);
519	rxe_disable_task(&qp->req.task);
520
521	/* drain work and packet queuesc */
522	rxe_requester(qp);
523	rxe_completer(qp);
524	rxe_responder(qp);
525
526	if (qp->rq.queue)
527		rxe_queue_reset(qp->rq.queue);
528	if (qp->sq.queue)
529		rxe_queue_reset(qp->sq.queue);
530
531	/* cleanup attributes */
532	atomic_set(&qp->ssn, 0);
533	qp->req.opcode = -1;
534	qp->req.need_retry = 0;
535	qp->req.wait_for_rnr_timer = 0;
536	qp->req.noack_pkts = 0;
537	qp->resp.msn = 0;
538	qp->resp.opcode = -1;
539	qp->resp.drop_msg = 0;
540	qp->resp.goto_error = 0;
541	qp->resp.sent_psn_nak = 0;
542
543	if (qp->resp.mr) {
544		rxe_put(qp->resp.mr);
545		qp->resp.mr = NULL;
546	}
547
548	cleanup_rd_atomic_resources(qp);
549
550	/* reenable tasks */
551	rxe_enable_task(&qp->resp.task);
552	rxe_enable_task(&qp->comp.task);
553	rxe_enable_task(&qp->req.task);
554}
555
556/* move the qp to the error state */
557void rxe_qp_error(struct rxe_qp *qp)
558{
559	unsigned long flags;
560
561	spin_lock_irqsave(&qp->state_lock, flags);
562	qp->attr.qp_state = IB_QPS_ERR;
563
564	/* drain work and packet queues */
565	rxe_sched_task(&qp->resp.task);
566	rxe_sched_task(&qp->comp.task);
567	rxe_sched_task(&qp->req.task);
568	spin_unlock_irqrestore(&qp->state_lock, flags);
569}
570
571static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
572		       int mask)
573{
574	unsigned long flags;
575
576	spin_lock_irqsave(&qp->state_lock, flags);
577	qp->attr.sq_draining = 1;
578	rxe_sched_task(&qp->comp.task);
579	rxe_sched_task(&qp->req.task);
580	spin_unlock_irqrestore(&qp->state_lock, flags);
581}
582
583/* caller should hold qp->state_lock */
584static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr,
585			    int mask)
586{
587	enum ib_qp_state cur_state;
588	enum ib_qp_state new_state;
589
590	cur_state = (mask & IB_QP_CUR_STATE) ?
591				attr->cur_qp_state : qp->attr.qp_state;
592	new_state = (mask & IB_QP_STATE) ?
593				attr->qp_state : cur_state;
594
595	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask))
596		return -EINVAL;
597
598	if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
599		if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
600			return -EINVAL;
601	}
602
603	return 0;
604}
605
606static const char *const qps2str[] = {
607	[IB_QPS_RESET]	= "RESET",
608	[IB_QPS_INIT]	= "INIT",
609	[IB_QPS_RTR]	= "RTR",
610	[IB_QPS_RTS]	= "RTS",
611	[IB_QPS_SQD]	= "SQD",
612	[IB_QPS_SQE]	= "SQE",
613	[IB_QPS_ERR]	= "ERR",
614};
615
616/* called by the modify qp verb */
617int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
618		     struct ib_udata *udata)
619{
620	int err;
621
622	if (mask & IB_QP_CUR_STATE)
623		qp->attr.cur_qp_state = attr->qp_state;
624
625	if (mask & IB_QP_STATE) {
626		unsigned long flags;
627
628		spin_lock_irqsave(&qp->state_lock, flags);
629		err = __qp_chk_state(qp, attr, mask);
630		if (!err) {
631			qp->attr.qp_state = attr->qp_state;
632			rxe_dbg_qp(qp, "state -> %s\n",
633					qps2str[attr->qp_state]);
634		}
635		spin_unlock_irqrestore(&qp->state_lock, flags);
636
637		if (err)
638			return err;
639
640		switch (attr->qp_state) {
641		case IB_QPS_RESET:
642			rxe_qp_reset(qp);
643			break;
644		case IB_QPS_SQD:
645			rxe_qp_sqd(qp, attr, mask);
646			break;
647		case IB_QPS_ERR:
648			rxe_qp_error(qp);
649			break;
650		default:
651			break;
652		}
653	}
654
655	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
656		int max_rd_atomic = attr->max_rd_atomic ?
657			roundup_pow_of_two(attr->max_rd_atomic) : 0;
658
659		qp->attr.max_rd_atomic = max_rd_atomic;
660		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
661	}
662
663	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
664		int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
665			roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
666
667		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
668
669		free_rd_atomic_resources(qp);
670
671		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
672		if (err)
673			return err;
674	}
675
676	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
677		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
678
679	if (mask & IB_QP_ACCESS_FLAGS)
680		qp->attr.qp_access_flags = attr->qp_access_flags;
681
682	if (mask & IB_QP_PKEY_INDEX)
683		qp->attr.pkey_index = attr->pkey_index;
684
685	if (mask & IB_QP_PORT)
686		qp->attr.port_num = attr->port_num;
687
688	if (mask & IB_QP_QKEY)
689		qp->attr.qkey = attr->qkey;
690
691	if (mask & IB_QP_AV)
692		rxe_init_av(&attr->ah_attr, &qp->pri_av);
693
694	if (mask & IB_QP_ALT_PATH) {
695		rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
696		qp->attr.alt_port_num = attr->alt_port_num;
697		qp->attr.alt_pkey_index = attr->alt_pkey_index;
698		qp->attr.alt_timeout = attr->alt_timeout;
699	}
700
701	if (mask & IB_QP_PATH_MTU) {
702		qp->attr.path_mtu = attr->path_mtu;
703		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
704	}
705
706	if (mask & IB_QP_TIMEOUT) {
707		qp->attr.timeout = attr->timeout;
708		if (attr->timeout == 0) {
709			qp->qp_timeout_jiffies = 0;
710		} else {
711			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
712			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
713
714			qp->qp_timeout_jiffies = j ? j : 1;
715		}
716	}
717
718	if (mask & IB_QP_RETRY_CNT) {
719		qp->attr.retry_cnt = attr->retry_cnt;
720		qp->comp.retry_cnt = attr->retry_cnt;
721		rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt);
722	}
723
724	if (mask & IB_QP_RNR_RETRY) {
725		qp->attr.rnr_retry = attr->rnr_retry;
726		qp->comp.rnr_retry = attr->rnr_retry;
727		rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry);
728	}
729
730	if (mask & IB_QP_RQ_PSN) {
731		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
732		qp->resp.psn = qp->attr.rq_psn;
733		rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn);
734	}
735
736	if (mask & IB_QP_MIN_RNR_TIMER) {
737		qp->attr.min_rnr_timer = attr->min_rnr_timer;
738		rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n",
739			 attr->min_rnr_timer);
740	}
741
742	if (mask & IB_QP_SQ_PSN) {
743		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
744		qp->req.psn = qp->attr.sq_psn;
745		qp->comp.psn = qp->attr.sq_psn;
746		rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn);
747	}
748
749	if (mask & IB_QP_PATH_MIG_STATE)
750		qp->attr.path_mig_state = attr->path_mig_state;
751
752	if (mask & IB_QP_DEST_QPN)
753		qp->attr.dest_qp_num = attr->dest_qp_num;
754
755	return 0;
756}
757
758/* called by the query qp verb */
759int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
760{
761	unsigned long flags;
762
763	*attr = qp->attr;
764
765	attr->rq_psn				= qp->resp.psn;
766	attr->sq_psn				= qp->req.psn;
767
768	attr->cap.max_send_wr			= qp->sq.max_wr;
769	attr->cap.max_send_sge			= qp->sq.max_sge;
770	attr->cap.max_inline_data		= qp->sq.max_inline;
771
772	if (!qp->srq) {
773		attr->cap.max_recv_wr		= qp->rq.max_wr;
774		attr->cap.max_recv_sge		= qp->rq.max_sge;
775	}
776
777	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
778	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
779
780	/* Applications that get this state typically spin on it.
781	 * Yield the processor
782	 */
783	spin_lock_irqsave(&qp->state_lock, flags);
784	if (qp->attr.sq_draining) {
785		spin_unlock_irqrestore(&qp->state_lock, flags);
786		cond_resched();
787	} else {
788		spin_unlock_irqrestore(&qp->state_lock, flags);
789	}
790
791	return 0;
792}
793
794int rxe_qp_chk_destroy(struct rxe_qp *qp)
795{
796	/* See IBA o10-2.2.3
797	 * An attempt to destroy a QP while attached to a mcast group
798	 * will fail immediately.
799	 */
800	if (atomic_read(&qp->mcg_num)) {
801		rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n");
802		return -EBUSY;
803	}
804
805	return 0;
806}
807
808/* called when the last reference to the qp is dropped */
809static void rxe_qp_do_cleanup(struct work_struct *work)
810{
811	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
812	unsigned long flags;
813
814	spin_lock_irqsave(&qp->state_lock, flags);
815	qp->valid = 0;
816	spin_unlock_irqrestore(&qp->state_lock, flags);
817	qp->qp_timeout_jiffies = 0;
818
819	if (qp_type(qp) == IB_QPT_RC) {
820		del_timer_sync(&qp->retrans_timer);
821		del_timer_sync(&qp->rnr_nak_timer);
822	}
823
824	if (qp->resp.task.func)
825		rxe_cleanup_task(&qp->resp.task);
826
827	if (qp->req.task.func)
828		rxe_cleanup_task(&qp->req.task);
829
830	if (qp->comp.task.func)
831		rxe_cleanup_task(&qp->comp.task);
832
833	/* flush out any receive wr's or pending requests */
834	rxe_requester(qp);
835	rxe_completer(qp);
836	rxe_responder(qp);
837
838	if (qp->sq.queue)
839		rxe_queue_cleanup(qp->sq.queue);
840
841	if (qp->srq)
842		rxe_put(qp->srq);
843
844	if (qp->rq.queue)
845		rxe_queue_cleanup(qp->rq.queue);
846
847	if (qp->scq) {
848		atomic_dec(&qp->scq->num_wq);
849		rxe_put(qp->scq);
850	}
851
852	if (qp->rcq) {
853		atomic_dec(&qp->rcq->num_wq);
854		rxe_put(qp->rcq);
855	}
856
857	if (qp->pd)
858		rxe_put(qp->pd);
859
860	if (qp->resp.mr)
861		rxe_put(qp->resp.mr);
862
863	free_rd_atomic_resources(qp);
864
865	if (qp->sk) {
866		if (qp_type(qp) == IB_QPT_RC)
867			sk_dst_reset(qp->sk->sk);
868
869		kernel_sock_shutdown(qp->sk, SHUT_RDWR);
870		sock_release(qp->sk);
871	}
872}
873
874/* called when the last reference to the qp is dropped */
875void rxe_qp_cleanup(struct rxe_pool_elem *elem)
876{
877	struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
878
879	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
880}
881