• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/infiniband/core/
1/*
2 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/file.h>
37#include <linux/fs.h>
38#include <linux/slab.h>
39
40#include <asm/uaccess.h>
41
42#include "uverbs.h"
43
44static struct lock_class_key pd_lock_key;
45static struct lock_class_key mr_lock_key;
46static struct lock_class_key cq_lock_key;
47static struct lock_class_key qp_lock_key;
48static struct lock_class_key ah_lock_key;
49static struct lock_class_key srq_lock_key;
50
51#define INIT_UDATA(udata, ibuf, obuf, ilen, olen)			\
52	do {								\
53		(udata)->inbuf  = (void __user *) (ibuf);		\
54		(udata)->outbuf = (void __user *) (obuf);		\
55		(udata)->inlen  = (ilen);				\
56		(udata)->outlen = (olen);				\
57	} while (0)
58
59/*
60 * The ib_uobject locking scheme is as follows:
61 *
62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
63 *   needs to be held during all idr operations.  When an object is
64 *   looked up, a reference must be taken on the object's kref before
65 *   dropping this lock.
66 *
67 * - Each object also has an rwsem.  This rwsem must be held for
68 *   reading while an operation that uses the object is performed.
69 *   For example, while registering an MR, the associated PD's
70 *   uobject.mutex must be held for reading.  The rwsem must be held
71 *   for writing while initializing or destroying an object.
72 *
73 * - In addition, each object has a "live" flag.  If this flag is not
74 *   set, then lookups of the object will fail even if it is found in
75 *   the idr.  This handles a reader that blocks and does not acquire
76 *   the rwsem until after the object is destroyed.  The destroy
77 *   operation will set the live flag to 0 and then drop the rwsem;
78 *   this will allow the reader to acquire the rwsem, see that the
79 *   live flag is 0, and then drop the rwsem and its reference to
80 *   object.  The underlying storage will not be freed until the last
81 *   reference to the object is dropped.
82 */
83
84static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
85		      struct ib_ucontext *context, struct lock_class_key *key)
86{
87	uobj->user_handle = user_handle;
88	uobj->context     = context;
89	kref_init(&uobj->ref);
90	init_rwsem(&uobj->mutex);
91	lockdep_set_class(&uobj->mutex, key);
92	uobj->live        = 0;
93}
94
95static void release_uobj(struct kref *kref)
96{
97	kfree(container_of(kref, struct ib_uobject, ref));
98}
99
100static void put_uobj(struct ib_uobject *uobj)
101{
102	kref_put(&uobj->ref, release_uobj);
103}
104
105static void put_uobj_read(struct ib_uobject *uobj)
106{
107	up_read(&uobj->mutex);
108	put_uobj(uobj);
109}
110
111static void put_uobj_write(struct ib_uobject *uobj)
112{
113	up_write(&uobj->mutex);
114	put_uobj(uobj);
115}
116
117static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
118{
119	int ret;
120
121retry:
122	if (!idr_pre_get(idr, GFP_KERNEL))
123		return -ENOMEM;
124
125	spin_lock(&ib_uverbs_idr_lock);
126	ret = idr_get_new(idr, uobj, &uobj->id);
127	spin_unlock(&ib_uverbs_idr_lock);
128
129	if (ret == -EAGAIN)
130		goto retry;
131
132	return ret;
133}
134
135void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
136{
137	spin_lock(&ib_uverbs_idr_lock);
138	idr_remove(idr, uobj->id);
139	spin_unlock(&ib_uverbs_idr_lock);
140}
141
142static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
143					 struct ib_ucontext *context)
144{
145	struct ib_uobject *uobj;
146
147	spin_lock(&ib_uverbs_idr_lock);
148	uobj = idr_find(idr, id);
149	if (uobj) {
150		if (uobj->context == context)
151			kref_get(&uobj->ref);
152		else
153			uobj = NULL;
154	}
155	spin_unlock(&ib_uverbs_idr_lock);
156
157	return uobj;
158}
159
160static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
161					struct ib_ucontext *context, int nested)
162{
163	struct ib_uobject *uobj;
164
165	uobj = __idr_get_uobj(idr, id, context);
166	if (!uobj)
167		return NULL;
168
169	if (nested)
170		down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
171	else
172		down_read(&uobj->mutex);
173	if (!uobj->live) {
174		put_uobj_read(uobj);
175		return NULL;
176	}
177
178	return uobj;
179}
180
181static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
182					 struct ib_ucontext *context)
183{
184	struct ib_uobject *uobj;
185
186	uobj = __idr_get_uobj(idr, id, context);
187	if (!uobj)
188		return NULL;
189
190	down_write(&uobj->mutex);
191	if (!uobj->live) {
192		put_uobj_write(uobj);
193		return NULL;
194	}
195
196	return uobj;
197}
198
199static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
200			  int nested)
201{
202	struct ib_uobject *uobj;
203
204	uobj = idr_read_uobj(idr, id, context, nested);
205	return uobj ? uobj->object : NULL;
206}
207
208static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
209{
210	return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
211}
212
213static void put_pd_read(struct ib_pd *pd)
214{
215	put_uobj_read(pd->uobject);
216}
217
218static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
219{
220	return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
221}
222
223static void put_cq_read(struct ib_cq *cq)
224{
225	put_uobj_read(cq->uobject);
226}
227
228static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
229{
230	return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
231}
232
233static void put_ah_read(struct ib_ah *ah)
234{
235	put_uobj_read(ah->uobject);
236}
237
238static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
239{
240	return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
241}
242
243static void put_qp_read(struct ib_qp *qp)
244{
245	put_uobj_read(qp->uobject);
246}
247
248static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
249{
250	return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
251}
252
253static void put_srq_read(struct ib_srq *srq)
254{
255	put_uobj_read(srq->uobject);
256}
257
258ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
259			      const char __user *buf,
260			      int in_len, int out_len)
261{
262	struct ib_uverbs_get_context      cmd;
263	struct ib_uverbs_get_context_resp resp;
264	struct ib_udata                   udata;
265	struct ib_device                 *ibdev = file->device->ib_dev;
266	struct ib_ucontext		 *ucontext;
267	struct file			 *filp;
268	int ret;
269
270	if (out_len < sizeof resp)
271		return -ENOSPC;
272
273	if (copy_from_user(&cmd, buf, sizeof cmd))
274		return -EFAULT;
275
276	mutex_lock(&file->mutex);
277
278	if (file->ucontext) {
279		ret = -EINVAL;
280		goto err;
281	}
282
283	INIT_UDATA(&udata, buf + sizeof cmd,
284		   (unsigned long) cmd.response + sizeof resp,
285		   in_len - sizeof cmd, out_len - sizeof resp);
286
287	ucontext = ibdev->alloc_ucontext(ibdev, &udata);
288	if (IS_ERR(ucontext)) {
289		ret = PTR_ERR(ucontext);
290		goto err;
291	}
292
293	ucontext->device = ibdev;
294	INIT_LIST_HEAD(&ucontext->pd_list);
295	INIT_LIST_HEAD(&ucontext->mr_list);
296	INIT_LIST_HEAD(&ucontext->mw_list);
297	INIT_LIST_HEAD(&ucontext->cq_list);
298	INIT_LIST_HEAD(&ucontext->qp_list);
299	INIT_LIST_HEAD(&ucontext->srq_list);
300	INIT_LIST_HEAD(&ucontext->ah_list);
301	ucontext->closing = 0;
302
303	resp.num_comp_vectors = file->device->num_comp_vectors;
304
305	ret = get_unused_fd();
306	if (ret < 0)
307		goto err_free;
308	resp.async_fd = ret;
309
310	filp = ib_uverbs_alloc_event_file(file, 1);
311	if (IS_ERR(filp)) {
312		ret = PTR_ERR(filp);
313		goto err_fd;
314	}
315
316	if (copy_to_user((void __user *) (unsigned long) cmd.response,
317			 &resp, sizeof resp)) {
318		ret = -EFAULT;
319		goto err_file;
320	}
321
322	file->async_file = filp->private_data;
323
324	INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
325			      ib_uverbs_event_handler);
326	ret = ib_register_event_handler(&file->event_handler);
327	if (ret)
328		goto err_file;
329
330	kref_get(&file->async_file->ref);
331	kref_get(&file->ref);
332	file->ucontext = ucontext;
333
334	fd_install(resp.async_fd, filp);
335
336	mutex_unlock(&file->mutex);
337
338	return in_len;
339
340err_file:
341	fput(filp);
342
343err_fd:
344	put_unused_fd(resp.async_fd);
345
346err_free:
347	ibdev->dealloc_ucontext(ucontext);
348
349err:
350	mutex_unlock(&file->mutex);
351	return ret;
352}
353
354ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
355			       const char __user *buf,
356			       int in_len, int out_len)
357{
358	struct ib_uverbs_query_device      cmd;
359	struct ib_uverbs_query_device_resp resp;
360	struct ib_device_attr              attr;
361	int                                ret;
362
363	if (out_len < sizeof resp)
364		return -ENOSPC;
365
366	if (copy_from_user(&cmd, buf, sizeof cmd))
367		return -EFAULT;
368
369	ret = ib_query_device(file->device->ib_dev, &attr);
370	if (ret)
371		return ret;
372
373	memset(&resp, 0, sizeof resp);
374
375	resp.fw_ver 		       = attr.fw_ver;
376	resp.node_guid 		       = file->device->ib_dev->node_guid;
377	resp.sys_image_guid 	       = attr.sys_image_guid;
378	resp.max_mr_size 	       = attr.max_mr_size;
379	resp.page_size_cap 	       = attr.page_size_cap;
380	resp.vendor_id 		       = attr.vendor_id;
381	resp.vendor_part_id 	       = attr.vendor_part_id;
382	resp.hw_ver 		       = attr.hw_ver;
383	resp.max_qp 		       = attr.max_qp;
384	resp.max_qp_wr 		       = attr.max_qp_wr;
385	resp.device_cap_flags 	       = attr.device_cap_flags;
386	resp.max_sge 		       = attr.max_sge;
387	resp.max_sge_rd 	       = attr.max_sge_rd;
388	resp.max_cq 		       = attr.max_cq;
389	resp.max_cqe 		       = attr.max_cqe;
390	resp.max_mr 		       = attr.max_mr;
391	resp.max_pd 		       = attr.max_pd;
392	resp.max_qp_rd_atom 	       = attr.max_qp_rd_atom;
393	resp.max_ee_rd_atom 	       = attr.max_ee_rd_atom;
394	resp.max_res_rd_atom 	       = attr.max_res_rd_atom;
395	resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
396	resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
397	resp.atomic_cap 	       = attr.atomic_cap;
398	resp.max_ee 		       = attr.max_ee;
399	resp.max_rdd 		       = attr.max_rdd;
400	resp.max_mw 		       = attr.max_mw;
401	resp.max_raw_ipv6_qp 	       = attr.max_raw_ipv6_qp;
402	resp.max_raw_ethy_qp 	       = attr.max_raw_ethy_qp;
403	resp.max_mcast_grp 	       = attr.max_mcast_grp;
404	resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
405	resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
406	resp.max_ah 		       = attr.max_ah;
407	resp.max_fmr 		       = attr.max_fmr;
408	resp.max_map_per_fmr 	       = attr.max_map_per_fmr;
409	resp.max_srq 		       = attr.max_srq;
410	resp.max_srq_wr 	       = attr.max_srq_wr;
411	resp.max_srq_sge 	       = attr.max_srq_sge;
412	resp.max_pkeys 		       = attr.max_pkeys;
413	resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
414	resp.phys_port_cnt	       = file->device->ib_dev->phys_port_cnt;
415
416	if (copy_to_user((void __user *) (unsigned long) cmd.response,
417			 &resp, sizeof resp))
418		return -EFAULT;
419
420	return in_len;
421}
422
423ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
424			     const char __user *buf,
425			     int in_len, int out_len)
426{
427	struct ib_uverbs_query_port      cmd;
428	struct ib_uverbs_query_port_resp resp;
429	struct ib_port_attr              attr;
430	int                              ret;
431
432	if (out_len < sizeof resp)
433		return -ENOSPC;
434
435	if (copy_from_user(&cmd, buf, sizeof cmd))
436		return -EFAULT;
437
438	ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
439	if (ret)
440		return ret;
441
442	memset(&resp, 0, sizeof resp);
443
444	resp.state 	     = attr.state;
445	resp.max_mtu 	     = attr.max_mtu;
446	resp.active_mtu      = attr.active_mtu;
447	resp.gid_tbl_len     = attr.gid_tbl_len;
448	resp.port_cap_flags  = attr.port_cap_flags;
449	resp.max_msg_sz      = attr.max_msg_sz;
450	resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
451	resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
452	resp.pkey_tbl_len    = attr.pkey_tbl_len;
453	resp.lid 	     = attr.lid;
454	resp.sm_lid 	     = attr.sm_lid;
455	resp.lmc 	     = attr.lmc;
456	resp.max_vl_num      = attr.max_vl_num;
457	resp.sm_sl 	     = attr.sm_sl;
458	resp.subnet_timeout  = attr.subnet_timeout;
459	resp.init_type_reply = attr.init_type_reply;
460	resp.active_width    = attr.active_width;
461	resp.active_speed    = attr.active_speed;
462	resp.phys_state      = attr.phys_state;
463
464	if (copy_to_user((void __user *) (unsigned long) cmd.response,
465			 &resp, sizeof resp))
466		return -EFAULT;
467
468	return in_len;
469}
470
471ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
472			   const char __user *buf,
473			   int in_len, int out_len)
474{
475	struct ib_uverbs_alloc_pd      cmd;
476	struct ib_uverbs_alloc_pd_resp resp;
477	struct ib_udata                udata;
478	struct ib_uobject             *uobj;
479	struct ib_pd                  *pd;
480	int                            ret;
481
482	if (out_len < sizeof resp)
483		return -ENOSPC;
484
485	if (copy_from_user(&cmd, buf, sizeof cmd))
486		return -EFAULT;
487
488	INIT_UDATA(&udata, buf + sizeof cmd,
489		   (unsigned long) cmd.response + sizeof resp,
490		   in_len - sizeof cmd, out_len - sizeof resp);
491
492	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
493	if (!uobj)
494		return -ENOMEM;
495
496	init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
497	down_write(&uobj->mutex);
498
499	pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
500					    file->ucontext, &udata);
501	if (IS_ERR(pd)) {
502		ret = PTR_ERR(pd);
503		goto err;
504	}
505
506	pd->device  = file->device->ib_dev;
507	pd->uobject = uobj;
508	atomic_set(&pd->usecnt, 0);
509
510	uobj->object = pd;
511	ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
512	if (ret)
513		goto err_idr;
514
515	memset(&resp, 0, sizeof resp);
516	resp.pd_handle = uobj->id;
517
518	if (copy_to_user((void __user *) (unsigned long) cmd.response,
519			 &resp, sizeof resp)) {
520		ret = -EFAULT;
521		goto err_copy;
522	}
523
524	mutex_lock(&file->mutex);
525	list_add_tail(&uobj->list, &file->ucontext->pd_list);
526	mutex_unlock(&file->mutex);
527
528	uobj->live = 1;
529
530	up_write(&uobj->mutex);
531
532	return in_len;
533
534err_copy:
535	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
536
537err_idr:
538	ib_dealloc_pd(pd);
539
540err:
541	put_uobj_write(uobj);
542	return ret;
543}
544
545ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
546			     const char __user *buf,
547			     int in_len, int out_len)
548{
549	struct ib_uverbs_dealloc_pd cmd;
550	struct ib_uobject          *uobj;
551	int                         ret;
552
553	if (copy_from_user(&cmd, buf, sizeof cmd))
554		return -EFAULT;
555
556	uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
557	if (!uobj)
558		return -EINVAL;
559
560	ret = ib_dealloc_pd(uobj->object);
561	if (!ret)
562		uobj->live = 0;
563
564	put_uobj_write(uobj);
565
566	if (ret)
567		return ret;
568
569	idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
570
571	mutex_lock(&file->mutex);
572	list_del(&uobj->list);
573	mutex_unlock(&file->mutex);
574
575	put_uobj(uobj);
576
577	return in_len;
578}
579
580ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
581			 const char __user *buf, int in_len,
582			 int out_len)
583{
584	struct ib_uverbs_reg_mr      cmd;
585	struct ib_uverbs_reg_mr_resp resp;
586	struct ib_udata              udata;
587	struct ib_uobject           *uobj;
588	struct ib_pd                *pd;
589	struct ib_mr                *mr;
590	int                          ret;
591
592	if (out_len < sizeof resp)
593		return -ENOSPC;
594
595	if (copy_from_user(&cmd, buf, sizeof cmd))
596		return -EFAULT;
597
598	INIT_UDATA(&udata, buf + sizeof cmd,
599		   (unsigned long) cmd.response + sizeof resp,
600		   in_len - sizeof cmd, out_len - sizeof resp);
601
602	if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
603		return -EINVAL;
604
605	/*
606	 * Local write permission is required if remote write or
607	 * remote atomic permission is also requested.
608	 */
609	if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
610	    !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
611		return -EINVAL;
612
613	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
614	if (!uobj)
615		return -ENOMEM;
616
617	init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
618	down_write(&uobj->mutex);
619
620	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
621	if (!pd) {
622		ret = -EINVAL;
623		goto err_free;
624	}
625
626	mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
627				     cmd.access_flags, &udata);
628	if (IS_ERR(mr)) {
629		ret = PTR_ERR(mr);
630		goto err_put;
631	}
632
633	mr->device  = pd->device;
634	mr->pd      = pd;
635	mr->uobject = uobj;
636	atomic_inc(&pd->usecnt);
637	atomic_set(&mr->usecnt, 0);
638
639	uobj->object = mr;
640	ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
641	if (ret)
642		goto err_unreg;
643
644	memset(&resp, 0, sizeof resp);
645	resp.lkey      = mr->lkey;
646	resp.rkey      = mr->rkey;
647	resp.mr_handle = uobj->id;
648
649	if (copy_to_user((void __user *) (unsigned long) cmd.response,
650			 &resp, sizeof resp)) {
651		ret = -EFAULT;
652		goto err_copy;
653	}
654
655	put_pd_read(pd);
656
657	mutex_lock(&file->mutex);
658	list_add_tail(&uobj->list, &file->ucontext->mr_list);
659	mutex_unlock(&file->mutex);
660
661	uobj->live = 1;
662
663	up_write(&uobj->mutex);
664
665	return in_len;
666
667err_copy:
668	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
669
670err_unreg:
671	ib_dereg_mr(mr);
672
673err_put:
674	put_pd_read(pd);
675
676err_free:
677	put_uobj_write(uobj);
678	return ret;
679}
680
681ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
682			   const char __user *buf, int in_len,
683			   int out_len)
684{
685	struct ib_uverbs_dereg_mr cmd;
686	struct ib_mr             *mr;
687	struct ib_uobject	 *uobj;
688	int                       ret = -EINVAL;
689
690	if (copy_from_user(&cmd, buf, sizeof cmd))
691		return -EFAULT;
692
693	uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
694	if (!uobj)
695		return -EINVAL;
696
697	mr = uobj->object;
698
699	ret = ib_dereg_mr(mr);
700	if (!ret)
701		uobj->live = 0;
702
703	put_uobj_write(uobj);
704
705	if (ret)
706		return ret;
707
708	idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
709
710	mutex_lock(&file->mutex);
711	list_del(&uobj->list);
712	mutex_unlock(&file->mutex);
713
714	put_uobj(uobj);
715
716	return in_len;
717}
718
719ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
720				      const char __user *buf, int in_len,
721				      int out_len)
722{
723	struct ib_uverbs_create_comp_channel	   cmd;
724	struct ib_uverbs_create_comp_channel_resp  resp;
725	struct file				  *filp;
726	int ret;
727
728	if (out_len < sizeof resp)
729		return -ENOSPC;
730
731	if (copy_from_user(&cmd, buf, sizeof cmd))
732		return -EFAULT;
733
734	ret = get_unused_fd();
735	if (ret < 0)
736		return ret;
737	resp.fd = ret;
738
739	filp = ib_uverbs_alloc_event_file(file, 0);
740	if (IS_ERR(filp)) {
741		put_unused_fd(resp.fd);
742		return PTR_ERR(filp);
743	}
744
745	if (copy_to_user((void __user *) (unsigned long) cmd.response,
746			 &resp, sizeof resp)) {
747		put_unused_fd(resp.fd);
748		fput(filp);
749		return -EFAULT;
750	}
751
752	fd_install(resp.fd, filp);
753	return in_len;
754}
755
756ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
757			    const char __user *buf, int in_len,
758			    int out_len)
759{
760	struct ib_uverbs_create_cq      cmd;
761	struct ib_uverbs_create_cq_resp resp;
762	struct ib_udata                 udata;
763	struct ib_ucq_object           *obj;
764	struct ib_uverbs_event_file    *ev_file = NULL;
765	struct ib_cq                   *cq;
766	int                             ret;
767
768	if (out_len < sizeof resp)
769		return -ENOSPC;
770
771	if (copy_from_user(&cmd, buf, sizeof cmd))
772		return -EFAULT;
773
774	INIT_UDATA(&udata, buf + sizeof cmd,
775		   (unsigned long) cmd.response + sizeof resp,
776		   in_len - sizeof cmd, out_len - sizeof resp);
777
778	if (cmd.comp_vector >= file->device->num_comp_vectors)
779		return -EINVAL;
780
781	obj = kmalloc(sizeof *obj, GFP_KERNEL);
782	if (!obj)
783		return -ENOMEM;
784
785	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
786	down_write(&obj->uobject.mutex);
787
788	if (cmd.comp_channel >= 0) {
789		ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
790		if (!ev_file) {
791			ret = -EINVAL;
792			goto err;
793		}
794	}
795
796	obj->uverbs_file	   = file;
797	obj->comp_events_reported  = 0;
798	obj->async_events_reported = 0;
799	INIT_LIST_HEAD(&obj->comp_list);
800	INIT_LIST_HEAD(&obj->async_list);
801
802	cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
803					     cmd.comp_vector,
804					     file->ucontext, &udata);
805	if (IS_ERR(cq)) {
806		ret = PTR_ERR(cq);
807		goto err_file;
808	}
809
810	cq->device        = file->device->ib_dev;
811	cq->uobject       = &obj->uobject;
812	cq->comp_handler  = ib_uverbs_comp_handler;
813	cq->event_handler = ib_uverbs_cq_event_handler;
814	cq->cq_context    = ev_file;
815	atomic_set(&cq->usecnt, 0);
816
817	obj->uobject.object = cq;
818	ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
819	if (ret)
820		goto err_free;
821
822	memset(&resp, 0, sizeof resp);
823	resp.cq_handle = obj->uobject.id;
824	resp.cqe       = cq->cqe;
825
826	if (copy_to_user((void __user *) (unsigned long) cmd.response,
827			 &resp, sizeof resp)) {
828		ret = -EFAULT;
829		goto err_copy;
830	}
831
832	mutex_lock(&file->mutex);
833	list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
834	mutex_unlock(&file->mutex);
835
836	obj->uobject.live = 1;
837
838	up_write(&obj->uobject.mutex);
839
840	return in_len;
841
842err_copy:
843	idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
844
845err_free:
846	ib_destroy_cq(cq);
847
848err_file:
849	if (ev_file)
850		ib_uverbs_release_ucq(file, ev_file, obj);
851
852err:
853	put_uobj_write(&obj->uobject);
854	return ret;
855}
856
857ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
858			    const char __user *buf, int in_len,
859			    int out_len)
860{
861	struct ib_uverbs_resize_cq	cmd;
862	struct ib_uverbs_resize_cq_resp	resp;
863	struct ib_udata                 udata;
864	struct ib_cq			*cq;
865	int				ret = -EINVAL;
866
867	if (copy_from_user(&cmd, buf, sizeof cmd))
868		return -EFAULT;
869
870	INIT_UDATA(&udata, buf + sizeof cmd,
871		   (unsigned long) cmd.response + sizeof resp,
872		   in_len - sizeof cmd, out_len - sizeof resp);
873
874	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
875	if (!cq)
876		return -EINVAL;
877
878	ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
879	if (ret)
880		goto out;
881
882	resp.cqe = cq->cqe;
883
884	if (copy_to_user((void __user *) (unsigned long) cmd.response,
885			 &resp, sizeof resp.cqe))
886		ret = -EFAULT;
887
888out:
889	put_cq_read(cq);
890
891	return ret ? ret : in_len;
892}
893
894static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
895{
896	struct ib_uverbs_wc tmp;
897
898	tmp.wr_id		= wc->wr_id;
899	tmp.status		= wc->status;
900	tmp.opcode		= wc->opcode;
901	tmp.vendor_err		= wc->vendor_err;
902	tmp.byte_len		= wc->byte_len;
903	tmp.ex.imm_data		= (__u32 __force) wc->ex.imm_data;
904	tmp.qp_num		= wc->qp->qp_num;
905	tmp.src_qp		= wc->src_qp;
906	tmp.wc_flags		= wc->wc_flags;
907	tmp.pkey_index		= wc->pkey_index;
908	tmp.slid		= wc->slid;
909	tmp.sl			= wc->sl;
910	tmp.dlid_path_bits	= wc->dlid_path_bits;
911	tmp.port_num		= wc->port_num;
912	tmp.reserved		= 0;
913
914	if (copy_to_user(dest, &tmp, sizeof tmp))
915		return -EFAULT;
916
917	return 0;
918}
919
920ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
921			  const char __user *buf, int in_len,
922			  int out_len)
923{
924	struct ib_uverbs_poll_cq       cmd;
925	struct ib_uverbs_poll_cq_resp  resp;
926	u8 __user                     *header_ptr;
927	u8 __user                     *data_ptr;
928	struct ib_cq                  *cq;
929	struct ib_wc                   wc;
930	int                            ret;
931
932	if (copy_from_user(&cmd, buf, sizeof cmd))
933		return -EFAULT;
934
935	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
936	if (!cq)
937		return -EINVAL;
938
939	/* we copy a struct ib_uverbs_poll_cq_resp to user space */
940	header_ptr = (void __user *)(unsigned long) cmd.response;
941	data_ptr = header_ptr + sizeof resp;
942
943	memset(&resp, 0, sizeof resp);
944	while (resp.count < cmd.ne) {
945		ret = ib_poll_cq(cq, 1, &wc);
946		if (ret < 0)
947			goto out_put;
948		if (!ret)
949			break;
950
951		ret = copy_wc_to_user(data_ptr, &wc);
952		if (ret)
953			goto out_put;
954
955		data_ptr += sizeof(struct ib_uverbs_wc);
956		++resp.count;
957	}
958
959	if (copy_to_user(header_ptr, &resp, sizeof resp)) {
960		ret = -EFAULT;
961		goto out_put;
962	}
963
964	ret = in_len;
965
966out_put:
967	put_cq_read(cq);
968	return ret;
969}
970
971ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
972				const char __user *buf, int in_len,
973				int out_len)
974{
975	struct ib_uverbs_req_notify_cq cmd;
976	struct ib_cq                  *cq;
977
978	if (copy_from_user(&cmd, buf, sizeof cmd))
979		return -EFAULT;
980
981	cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
982	if (!cq)
983		return -EINVAL;
984
985	ib_req_notify_cq(cq, cmd.solicited_only ?
986			 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
987
988	put_cq_read(cq);
989
990	return in_len;
991}
992
993ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
994			     const char __user *buf, int in_len,
995			     int out_len)
996{
997	struct ib_uverbs_destroy_cq      cmd;
998	struct ib_uverbs_destroy_cq_resp resp;
999	struct ib_uobject		*uobj;
1000	struct ib_cq               	*cq;
1001	struct ib_ucq_object        	*obj;
1002	struct ib_uverbs_event_file	*ev_file;
1003	int                        	 ret = -EINVAL;
1004
1005	if (copy_from_user(&cmd, buf, sizeof cmd))
1006		return -EFAULT;
1007
1008	uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1009	if (!uobj)
1010		return -EINVAL;
1011	cq      = uobj->object;
1012	ev_file = cq->cq_context;
1013	obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1014
1015	ret = ib_destroy_cq(cq);
1016	if (!ret)
1017		uobj->live = 0;
1018
1019	put_uobj_write(uobj);
1020
1021	if (ret)
1022		return ret;
1023
1024	idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1025
1026	mutex_lock(&file->mutex);
1027	list_del(&uobj->list);
1028	mutex_unlock(&file->mutex);
1029
1030	ib_uverbs_release_ucq(file, ev_file, obj);
1031
1032	memset(&resp, 0, sizeof resp);
1033	resp.comp_events_reported  = obj->comp_events_reported;
1034	resp.async_events_reported = obj->async_events_reported;
1035
1036	put_uobj(uobj);
1037
1038	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1039			 &resp, sizeof resp))
1040		return -EFAULT;
1041
1042	return in_len;
1043}
1044
1045ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1046			    const char __user *buf, int in_len,
1047			    int out_len)
1048{
1049	struct ib_uverbs_create_qp      cmd;
1050	struct ib_uverbs_create_qp_resp resp;
1051	struct ib_udata                 udata;
1052	struct ib_uqp_object           *obj;
1053	struct ib_pd                   *pd;
1054	struct ib_cq                   *scq, *rcq;
1055	struct ib_srq                  *srq;
1056	struct ib_qp                   *qp;
1057	struct ib_qp_init_attr          attr;
1058	int ret;
1059
1060	if (out_len < sizeof resp)
1061		return -ENOSPC;
1062
1063	if (copy_from_user(&cmd, buf, sizeof cmd))
1064		return -EFAULT;
1065
1066	INIT_UDATA(&udata, buf + sizeof cmd,
1067		   (unsigned long) cmd.response + sizeof resp,
1068		   in_len - sizeof cmd, out_len - sizeof resp);
1069
1070	obj = kmalloc(sizeof *obj, GFP_KERNEL);
1071	if (!obj)
1072		return -ENOMEM;
1073
1074	init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1075	down_write(&obj->uevent.uobject.mutex);
1076
1077	srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1078	pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1079	scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
1080	rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1081		scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
1082
1083	if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
1084		ret = -EINVAL;
1085		goto err_put;
1086	}
1087
1088	attr.event_handler = ib_uverbs_qp_event_handler;
1089	attr.qp_context    = file;
1090	attr.send_cq       = scq;
1091	attr.recv_cq       = rcq;
1092	attr.srq           = srq;
1093	attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1094	attr.qp_type       = cmd.qp_type;
1095	attr.create_flags  = 0;
1096
1097	attr.cap.max_send_wr     = cmd.max_send_wr;
1098	attr.cap.max_recv_wr     = cmd.max_recv_wr;
1099	attr.cap.max_send_sge    = cmd.max_send_sge;
1100	attr.cap.max_recv_sge    = cmd.max_recv_sge;
1101	attr.cap.max_inline_data = cmd.max_inline_data;
1102
1103	obj->uevent.events_reported     = 0;
1104	INIT_LIST_HEAD(&obj->uevent.event_list);
1105	INIT_LIST_HEAD(&obj->mcast_list);
1106
1107	qp = pd->device->create_qp(pd, &attr, &udata);
1108	if (IS_ERR(qp)) {
1109		ret = PTR_ERR(qp);
1110		goto err_put;
1111	}
1112
1113	qp->device     	  = pd->device;
1114	qp->pd         	  = pd;
1115	qp->send_cq    	  = attr.send_cq;
1116	qp->recv_cq    	  = attr.recv_cq;
1117	qp->srq	       	  = attr.srq;
1118	qp->uobject       = &obj->uevent.uobject;
1119	qp->event_handler = attr.event_handler;
1120	qp->qp_context    = attr.qp_context;
1121	qp->qp_type	  = attr.qp_type;
1122	atomic_inc(&pd->usecnt);
1123	atomic_inc(&attr.send_cq->usecnt);
1124	atomic_inc(&attr.recv_cq->usecnt);
1125	if (attr.srq)
1126		atomic_inc(&attr.srq->usecnt);
1127
1128	obj->uevent.uobject.object = qp;
1129	ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1130	if (ret)
1131		goto err_destroy;
1132
1133	memset(&resp, 0, sizeof resp);
1134	resp.qpn             = qp->qp_num;
1135	resp.qp_handle       = obj->uevent.uobject.id;
1136	resp.max_recv_sge    = attr.cap.max_recv_sge;
1137	resp.max_send_sge    = attr.cap.max_send_sge;
1138	resp.max_recv_wr     = attr.cap.max_recv_wr;
1139	resp.max_send_wr     = attr.cap.max_send_wr;
1140	resp.max_inline_data = attr.cap.max_inline_data;
1141
1142	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1143			 &resp, sizeof resp)) {
1144		ret = -EFAULT;
1145		goto err_copy;
1146	}
1147
1148	put_pd_read(pd);
1149	put_cq_read(scq);
1150	if (rcq != scq)
1151		put_cq_read(rcq);
1152	if (srq)
1153		put_srq_read(srq);
1154
1155	mutex_lock(&file->mutex);
1156	list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1157	mutex_unlock(&file->mutex);
1158
1159	obj->uevent.uobject.live = 1;
1160
1161	up_write(&obj->uevent.uobject.mutex);
1162
1163	return in_len;
1164
1165err_copy:
1166	idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1167
1168err_destroy:
1169	ib_destroy_qp(qp);
1170
1171err_put:
1172	if (pd)
1173		put_pd_read(pd);
1174	if (scq)
1175		put_cq_read(scq);
1176	if (rcq && rcq != scq)
1177		put_cq_read(rcq);
1178	if (srq)
1179		put_srq_read(srq);
1180
1181	put_uobj_write(&obj->uevent.uobject);
1182	return ret;
1183}
1184
1185ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1186			   const char __user *buf, int in_len,
1187			   int out_len)
1188{
1189	struct ib_uverbs_query_qp      cmd;
1190	struct ib_uverbs_query_qp_resp resp;
1191	struct ib_qp                   *qp;
1192	struct ib_qp_attr              *attr;
1193	struct ib_qp_init_attr         *init_attr;
1194	int                            ret;
1195
1196	if (copy_from_user(&cmd, buf, sizeof cmd))
1197		return -EFAULT;
1198
1199	attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1200	init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1201	if (!attr || !init_attr) {
1202		ret = -ENOMEM;
1203		goto out;
1204	}
1205
1206	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1207	if (!qp) {
1208		ret = -EINVAL;
1209		goto out;
1210	}
1211
1212	ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1213
1214	put_qp_read(qp);
1215
1216	if (ret)
1217		goto out;
1218
1219	memset(&resp, 0, sizeof resp);
1220
1221	resp.qp_state               = attr->qp_state;
1222	resp.cur_qp_state           = attr->cur_qp_state;
1223	resp.path_mtu               = attr->path_mtu;
1224	resp.path_mig_state         = attr->path_mig_state;
1225	resp.qkey                   = attr->qkey;
1226	resp.rq_psn                 = attr->rq_psn;
1227	resp.sq_psn                 = attr->sq_psn;
1228	resp.dest_qp_num            = attr->dest_qp_num;
1229	resp.qp_access_flags        = attr->qp_access_flags;
1230	resp.pkey_index             = attr->pkey_index;
1231	resp.alt_pkey_index         = attr->alt_pkey_index;
1232	resp.sq_draining            = attr->sq_draining;
1233	resp.max_rd_atomic          = attr->max_rd_atomic;
1234	resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1235	resp.min_rnr_timer          = attr->min_rnr_timer;
1236	resp.port_num               = attr->port_num;
1237	resp.timeout                = attr->timeout;
1238	resp.retry_cnt              = attr->retry_cnt;
1239	resp.rnr_retry              = attr->rnr_retry;
1240	resp.alt_port_num           = attr->alt_port_num;
1241	resp.alt_timeout            = attr->alt_timeout;
1242
1243	memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1244	resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1245	resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1246	resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1247	resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1248	resp.dest.dlid              = attr->ah_attr.dlid;
1249	resp.dest.sl                = attr->ah_attr.sl;
1250	resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1251	resp.dest.static_rate       = attr->ah_attr.static_rate;
1252	resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1253	resp.dest.port_num          = attr->ah_attr.port_num;
1254
1255	memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1256	resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1257	resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1258	resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1259	resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1260	resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1261	resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1262	resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1263	resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1264	resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1265	resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1266
1267	resp.max_send_wr            = init_attr->cap.max_send_wr;
1268	resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1269	resp.max_send_sge           = init_attr->cap.max_send_sge;
1270	resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1271	resp.max_inline_data        = init_attr->cap.max_inline_data;
1272	resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1273
1274	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1275			 &resp, sizeof resp))
1276		ret = -EFAULT;
1277
1278out:
1279	kfree(attr);
1280	kfree(init_attr);
1281
1282	return ret ? ret : in_len;
1283}
1284
1285ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1286			    const char __user *buf, int in_len,
1287			    int out_len)
1288{
1289	struct ib_uverbs_modify_qp cmd;
1290	struct ib_udata            udata;
1291	struct ib_qp              *qp;
1292	struct ib_qp_attr         *attr;
1293	int                        ret;
1294
1295	if (copy_from_user(&cmd, buf, sizeof cmd))
1296		return -EFAULT;
1297
1298	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1299		   out_len);
1300
1301	attr = kmalloc(sizeof *attr, GFP_KERNEL);
1302	if (!attr)
1303		return -ENOMEM;
1304
1305	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1306	if (!qp) {
1307		ret = -EINVAL;
1308		goto out;
1309	}
1310
1311	attr->qp_state 		  = cmd.qp_state;
1312	attr->cur_qp_state 	  = cmd.cur_qp_state;
1313	attr->path_mtu 		  = cmd.path_mtu;
1314	attr->path_mig_state 	  = cmd.path_mig_state;
1315	attr->qkey 		  = cmd.qkey;
1316	attr->rq_psn 		  = cmd.rq_psn;
1317	attr->sq_psn 		  = cmd.sq_psn;
1318	attr->dest_qp_num 	  = cmd.dest_qp_num;
1319	attr->qp_access_flags 	  = cmd.qp_access_flags;
1320	attr->pkey_index 	  = cmd.pkey_index;
1321	attr->alt_pkey_index 	  = cmd.alt_pkey_index;
1322	attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1323	attr->max_rd_atomic 	  = cmd.max_rd_atomic;
1324	attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
1325	attr->min_rnr_timer 	  = cmd.min_rnr_timer;
1326	attr->port_num 		  = cmd.port_num;
1327	attr->timeout 		  = cmd.timeout;
1328	attr->retry_cnt 	  = cmd.retry_cnt;
1329	attr->rnr_retry 	  = cmd.rnr_retry;
1330	attr->alt_port_num 	  = cmd.alt_port_num;
1331	attr->alt_timeout 	  = cmd.alt_timeout;
1332
1333	memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1334	attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
1335	attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
1336	attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
1337	attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
1338	attr->ah_attr.dlid 	    	    = cmd.dest.dlid;
1339	attr->ah_attr.sl   	    	    = cmd.dest.sl;
1340	attr->ah_attr.src_path_bits 	    = cmd.dest.src_path_bits;
1341	attr->ah_attr.static_rate   	    = cmd.dest.static_rate;
1342	attr->ah_attr.ah_flags 	    	    = cmd.dest.is_global ? IB_AH_GRH : 0;
1343	attr->ah_attr.port_num 	    	    = cmd.dest.port_num;
1344
1345	memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1346	attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
1347	attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
1348	attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
1349	attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1350	attr->alt_ah_attr.dlid 	    	    = cmd.alt_dest.dlid;
1351	attr->alt_ah_attr.sl   	    	    = cmd.alt_dest.sl;
1352	attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
1353	attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
1354	attr->alt_ah_attr.ah_flags 	    = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1355	attr->alt_ah_attr.port_num 	    = cmd.alt_dest.port_num;
1356
1357	ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
1358
1359	put_qp_read(qp);
1360
1361	if (ret)
1362		goto out;
1363
1364	ret = in_len;
1365
1366out:
1367	kfree(attr);
1368
1369	return ret;
1370}
1371
1372ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1373			     const char __user *buf, int in_len,
1374			     int out_len)
1375{
1376	struct ib_uverbs_destroy_qp      cmd;
1377	struct ib_uverbs_destroy_qp_resp resp;
1378	struct ib_uobject		*uobj;
1379	struct ib_qp               	*qp;
1380	struct ib_uqp_object        	*obj;
1381	int                        	 ret = -EINVAL;
1382
1383	if (copy_from_user(&cmd, buf, sizeof cmd))
1384		return -EFAULT;
1385
1386	memset(&resp, 0, sizeof resp);
1387
1388	uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
1389	if (!uobj)
1390		return -EINVAL;
1391	qp  = uobj->object;
1392	obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1393
1394	if (!list_empty(&obj->mcast_list)) {
1395		put_uobj_write(uobj);
1396		return -EBUSY;
1397	}
1398
1399	ret = ib_destroy_qp(qp);
1400	if (!ret)
1401		uobj->live = 0;
1402
1403	put_uobj_write(uobj);
1404
1405	if (ret)
1406		return ret;
1407
1408	idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
1409
1410	mutex_lock(&file->mutex);
1411	list_del(&uobj->list);
1412	mutex_unlock(&file->mutex);
1413
1414	ib_uverbs_release_uevent(file, &obj->uevent);
1415
1416	resp.events_reported = obj->uevent.events_reported;
1417
1418	put_uobj(uobj);
1419
1420	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1421			 &resp, sizeof resp))
1422		return -EFAULT;
1423
1424	return in_len;
1425}
1426
1427ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1428			    const char __user *buf, int in_len,
1429			    int out_len)
1430{
1431	struct ib_uverbs_post_send      cmd;
1432	struct ib_uverbs_post_send_resp resp;
1433	struct ib_uverbs_send_wr       *user_wr;
1434	struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
1435	struct ib_qp                   *qp;
1436	int                             i, sg_ind;
1437	int				is_ud;
1438	ssize_t                         ret = -EINVAL;
1439
1440	if (copy_from_user(&cmd, buf, sizeof cmd))
1441		return -EFAULT;
1442
1443	if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1444	    cmd.sge_count * sizeof (struct ib_uverbs_sge))
1445		return -EINVAL;
1446
1447	if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1448		return -EINVAL;
1449
1450	user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1451	if (!user_wr)
1452		return -ENOMEM;
1453
1454	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1455	if (!qp)
1456		goto out;
1457
1458	is_ud = qp->qp_type == IB_QPT_UD;
1459	sg_ind = 0;
1460	last = NULL;
1461	for (i = 0; i < cmd.wr_count; ++i) {
1462		if (copy_from_user(user_wr,
1463				   buf + sizeof cmd + i * cmd.wqe_size,
1464				   cmd.wqe_size)) {
1465			ret = -EFAULT;
1466			goto out_put;
1467		}
1468
1469		if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1470			ret = -EINVAL;
1471			goto out_put;
1472		}
1473
1474		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1475			       user_wr->num_sge * sizeof (struct ib_sge),
1476			       GFP_KERNEL);
1477		if (!next) {
1478			ret = -ENOMEM;
1479			goto out_put;
1480		}
1481
1482		if (!last)
1483			wr = next;
1484		else
1485			last->next = next;
1486		last = next;
1487
1488		next->next       = NULL;
1489		next->wr_id      = user_wr->wr_id;
1490		next->num_sge    = user_wr->num_sge;
1491		next->opcode     = user_wr->opcode;
1492		next->send_flags = user_wr->send_flags;
1493
1494		if (is_ud) {
1495			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
1496						     file->ucontext);
1497			if (!next->wr.ud.ah) {
1498				ret = -EINVAL;
1499				goto out_put;
1500			}
1501			next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
1502			next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1503		} else {
1504			switch (next->opcode) {
1505			case IB_WR_RDMA_WRITE_WITH_IMM:
1506				next->ex.imm_data =
1507					(__be32 __force) user_wr->ex.imm_data;
1508			case IB_WR_RDMA_WRITE:
1509			case IB_WR_RDMA_READ:
1510				next->wr.rdma.remote_addr =
1511					user_wr->wr.rdma.remote_addr;
1512				next->wr.rdma.rkey        =
1513					user_wr->wr.rdma.rkey;
1514				break;
1515			case IB_WR_SEND_WITH_IMM:
1516				next->ex.imm_data =
1517					(__be32 __force) user_wr->ex.imm_data;
1518				break;
1519			case IB_WR_SEND_WITH_INV:
1520				next->ex.invalidate_rkey =
1521					user_wr->ex.invalidate_rkey;
1522				break;
1523			case IB_WR_ATOMIC_CMP_AND_SWP:
1524			case IB_WR_ATOMIC_FETCH_AND_ADD:
1525				next->wr.atomic.remote_addr =
1526					user_wr->wr.atomic.remote_addr;
1527				next->wr.atomic.compare_add =
1528					user_wr->wr.atomic.compare_add;
1529				next->wr.atomic.swap = user_wr->wr.atomic.swap;
1530				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
1531				break;
1532			default:
1533				break;
1534			}
1535		}
1536
1537		if (next->num_sge) {
1538			next->sg_list = (void *) next +
1539				ALIGN(sizeof *next, sizeof (struct ib_sge));
1540			if (copy_from_user(next->sg_list,
1541					   buf + sizeof cmd +
1542					   cmd.wr_count * cmd.wqe_size +
1543					   sg_ind * sizeof (struct ib_sge),
1544					   next->num_sge * sizeof (struct ib_sge))) {
1545				ret = -EFAULT;
1546				goto out_put;
1547			}
1548			sg_ind += next->num_sge;
1549		} else
1550			next->sg_list = NULL;
1551	}
1552
1553	resp.bad_wr = 0;
1554	ret = qp->device->post_send(qp, wr, &bad_wr);
1555	if (ret)
1556		for (next = wr; next; next = next->next) {
1557			++resp.bad_wr;
1558			if (next == bad_wr)
1559				break;
1560		}
1561
1562	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1563			 &resp, sizeof resp))
1564		ret = -EFAULT;
1565
1566out_put:
1567	put_qp_read(qp);
1568
1569	while (wr) {
1570		if (is_ud && wr->wr.ud.ah)
1571			put_ah_read(wr->wr.ud.ah);
1572		next = wr->next;
1573		kfree(wr);
1574		wr = next;
1575	}
1576
1577out:
1578	kfree(user_wr);
1579
1580	return ret ? ret : in_len;
1581}
1582
1583static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
1584						    int in_len,
1585						    u32 wr_count,
1586						    u32 sge_count,
1587						    u32 wqe_size)
1588{
1589	struct ib_uverbs_recv_wr *user_wr;
1590	struct ib_recv_wr        *wr = NULL, *last, *next;
1591	int                       sg_ind;
1592	int                       i;
1593	int                       ret;
1594
1595	if (in_len < wqe_size * wr_count +
1596	    sge_count * sizeof (struct ib_uverbs_sge))
1597		return ERR_PTR(-EINVAL);
1598
1599	if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
1600		return ERR_PTR(-EINVAL);
1601
1602	user_wr = kmalloc(wqe_size, GFP_KERNEL);
1603	if (!user_wr)
1604		return ERR_PTR(-ENOMEM);
1605
1606	sg_ind = 0;
1607	last = NULL;
1608	for (i = 0; i < wr_count; ++i) {
1609		if (copy_from_user(user_wr, buf + i * wqe_size,
1610				   wqe_size)) {
1611			ret = -EFAULT;
1612			goto err;
1613		}
1614
1615		if (user_wr->num_sge + sg_ind > sge_count) {
1616			ret = -EINVAL;
1617			goto err;
1618		}
1619
1620		next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1621			       user_wr->num_sge * sizeof (struct ib_sge),
1622			       GFP_KERNEL);
1623		if (!next) {
1624			ret = -ENOMEM;
1625			goto err;
1626		}
1627
1628		if (!last)
1629			wr = next;
1630		else
1631			last->next = next;
1632		last = next;
1633
1634		next->next       = NULL;
1635		next->wr_id      = user_wr->wr_id;
1636		next->num_sge    = user_wr->num_sge;
1637
1638		if (next->num_sge) {
1639			next->sg_list = (void *) next +
1640				ALIGN(sizeof *next, sizeof (struct ib_sge));
1641			if (copy_from_user(next->sg_list,
1642					   buf + wr_count * wqe_size +
1643					   sg_ind * sizeof (struct ib_sge),
1644					   next->num_sge * sizeof (struct ib_sge))) {
1645				ret = -EFAULT;
1646				goto err;
1647			}
1648			sg_ind += next->num_sge;
1649		} else
1650			next->sg_list = NULL;
1651	}
1652
1653	kfree(user_wr);
1654	return wr;
1655
1656err:
1657	kfree(user_wr);
1658
1659	while (wr) {
1660		next = wr->next;
1661		kfree(wr);
1662		wr = next;
1663	}
1664
1665	return ERR_PTR(ret);
1666}
1667
1668ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
1669			    const char __user *buf, int in_len,
1670			    int out_len)
1671{
1672	struct ib_uverbs_post_recv      cmd;
1673	struct ib_uverbs_post_recv_resp resp;
1674	struct ib_recv_wr              *wr, *next, *bad_wr;
1675	struct ib_qp                   *qp;
1676	ssize_t                         ret = -EINVAL;
1677
1678	if (copy_from_user(&cmd, buf, sizeof cmd))
1679		return -EFAULT;
1680
1681	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1682				       in_len - sizeof cmd, cmd.wr_count,
1683				       cmd.sge_count, cmd.wqe_size);
1684	if (IS_ERR(wr))
1685		return PTR_ERR(wr);
1686
1687	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1688	if (!qp)
1689		goto out;
1690
1691	resp.bad_wr = 0;
1692	ret = qp->device->post_recv(qp, wr, &bad_wr);
1693
1694	put_qp_read(qp);
1695
1696	if (ret)
1697		for (next = wr; next; next = next->next) {
1698			++resp.bad_wr;
1699			if (next == bad_wr)
1700				break;
1701		}
1702
1703	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1704			 &resp, sizeof resp))
1705		ret = -EFAULT;
1706
1707out:
1708	while (wr) {
1709		next = wr->next;
1710		kfree(wr);
1711		wr = next;
1712	}
1713
1714	return ret ? ret : in_len;
1715}
1716
1717ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
1718				const char __user *buf, int in_len,
1719				int out_len)
1720{
1721	struct ib_uverbs_post_srq_recv      cmd;
1722	struct ib_uverbs_post_srq_recv_resp resp;
1723	struct ib_recv_wr                  *wr, *next, *bad_wr;
1724	struct ib_srq                      *srq;
1725	ssize_t                             ret = -EINVAL;
1726
1727	if (copy_from_user(&cmd, buf, sizeof cmd))
1728		return -EFAULT;
1729
1730	wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1731				       in_len - sizeof cmd, cmd.wr_count,
1732				       cmd.sge_count, cmd.wqe_size);
1733	if (IS_ERR(wr))
1734		return PTR_ERR(wr);
1735
1736	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1737	if (!srq)
1738		goto out;
1739
1740	resp.bad_wr = 0;
1741	ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
1742
1743	put_srq_read(srq);
1744
1745	if (ret)
1746		for (next = wr; next; next = next->next) {
1747			++resp.bad_wr;
1748			if (next == bad_wr)
1749				break;
1750		}
1751
1752	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1753			 &resp, sizeof resp))
1754		ret = -EFAULT;
1755
1756out:
1757	while (wr) {
1758		next = wr->next;
1759		kfree(wr);
1760		wr = next;
1761	}
1762
1763	return ret ? ret : in_len;
1764}
1765
1766ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1767			    const char __user *buf, int in_len,
1768			    int out_len)
1769{
1770	struct ib_uverbs_create_ah	 cmd;
1771	struct ib_uverbs_create_ah_resp	 resp;
1772	struct ib_uobject		*uobj;
1773	struct ib_pd			*pd;
1774	struct ib_ah			*ah;
1775	struct ib_ah_attr		attr;
1776	int ret;
1777
1778	if (out_len < sizeof resp)
1779		return -ENOSPC;
1780
1781	if (copy_from_user(&cmd, buf, sizeof cmd))
1782		return -EFAULT;
1783
1784	uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1785	if (!uobj)
1786		return -ENOMEM;
1787
1788	init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1789	down_write(&uobj->mutex);
1790
1791	pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1792	if (!pd) {
1793		ret = -EINVAL;
1794		goto err;
1795	}
1796
1797	attr.dlid 	       = cmd.attr.dlid;
1798	attr.sl 	       = cmd.attr.sl;
1799	attr.src_path_bits     = cmd.attr.src_path_bits;
1800	attr.static_rate       = cmd.attr.static_rate;
1801	attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
1802	attr.port_num 	       = cmd.attr.port_num;
1803	attr.grh.flow_label    = cmd.attr.grh.flow_label;
1804	attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
1805	attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
1806	attr.grh.traffic_class = cmd.attr.grh.traffic_class;
1807	memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
1808
1809	ah = ib_create_ah(pd, &attr);
1810	if (IS_ERR(ah)) {
1811		ret = PTR_ERR(ah);
1812		goto err_put;
1813	}
1814
1815	ah->uobject  = uobj;
1816	uobj->object = ah;
1817
1818	ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
1819	if (ret)
1820		goto err_destroy;
1821
1822	resp.ah_handle = uobj->id;
1823
1824	if (copy_to_user((void __user *) (unsigned long) cmd.response,
1825			 &resp, sizeof resp)) {
1826		ret = -EFAULT;
1827		goto err_copy;
1828	}
1829
1830	put_pd_read(pd);
1831
1832	mutex_lock(&file->mutex);
1833	list_add_tail(&uobj->list, &file->ucontext->ah_list);
1834	mutex_unlock(&file->mutex);
1835
1836	uobj->live = 1;
1837
1838	up_write(&uobj->mutex);
1839
1840	return in_len;
1841
1842err_copy:
1843	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1844
1845err_destroy:
1846	ib_destroy_ah(ah);
1847
1848err_put:
1849	put_pd_read(pd);
1850
1851err:
1852	put_uobj_write(uobj);
1853	return ret;
1854}
1855
1856ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
1857			     const char __user *buf, int in_len, int out_len)
1858{
1859	struct ib_uverbs_destroy_ah cmd;
1860	struct ib_ah		   *ah;
1861	struct ib_uobject	   *uobj;
1862	int			    ret;
1863
1864	if (copy_from_user(&cmd, buf, sizeof cmd))
1865		return -EFAULT;
1866
1867	uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
1868	if (!uobj)
1869		return -EINVAL;
1870	ah = uobj->object;
1871
1872	ret = ib_destroy_ah(ah);
1873	if (!ret)
1874		uobj->live = 0;
1875
1876	put_uobj_write(uobj);
1877
1878	if (ret)
1879		return ret;
1880
1881	idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1882
1883	mutex_lock(&file->mutex);
1884	list_del(&uobj->list);
1885	mutex_unlock(&file->mutex);
1886
1887	put_uobj(uobj);
1888
1889	return in_len;
1890}
1891
1892ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1893			       const char __user *buf, int in_len,
1894			       int out_len)
1895{
1896	struct ib_uverbs_attach_mcast cmd;
1897	struct ib_qp                 *qp;
1898	struct ib_uqp_object         *obj;
1899	struct ib_uverbs_mcast_entry *mcast;
1900	int                           ret;
1901
1902	if (copy_from_user(&cmd, buf, sizeof cmd))
1903		return -EFAULT;
1904
1905	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1906	if (!qp)
1907		return -EINVAL;
1908
1909	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1910
1911	list_for_each_entry(mcast, &obj->mcast_list, list)
1912		if (cmd.mlid == mcast->lid &&
1913		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1914			ret = 0;
1915			goto out_put;
1916		}
1917
1918	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
1919	if (!mcast) {
1920		ret = -ENOMEM;
1921		goto out_put;
1922	}
1923
1924	mcast->lid = cmd.mlid;
1925	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
1926
1927	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
1928	if (!ret)
1929		list_add_tail(&mcast->list, &obj->mcast_list);
1930	else
1931		kfree(mcast);
1932
1933out_put:
1934	put_qp_read(qp);
1935
1936	return ret ? ret : in_len;
1937}
1938
1939ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1940			       const char __user *buf, int in_len,
1941			       int out_len)
1942{
1943	struct ib_uverbs_detach_mcast cmd;
1944	struct ib_uqp_object         *obj;
1945	struct ib_qp                 *qp;
1946	struct ib_uverbs_mcast_entry *mcast;
1947	int                           ret = -EINVAL;
1948
1949	if (copy_from_user(&cmd, buf, sizeof cmd))
1950		return -EFAULT;
1951
1952	qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1953	if (!qp)
1954		return -EINVAL;
1955
1956	ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1957	if (ret)
1958		goto out_put;
1959
1960	obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1961
1962	list_for_each_entry(mcast, &obj->mcast_list, list)
1963		if (cmd.mlid == mcast->lid &&
1964		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1965			list_del(&mcast->list);
1966			kfree(mcast);
1967			break;
1968		}
1969
1970out_put:
1971	put_qp_read(qp);
1972
1973	return ret ? ret : in_len;
1974}
1975
1976ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1977			     const char __user *buf, int in_len,
1978			     int out_len)
1979{
1980	struct ib_uverbs_create_srq      cmd;
1981	struct ib_uverbs_create_srq_resp resp;
1982	struct ib_udata                  udata;
1983	struct ib_uevent_object         *obj;
1984	struct ib_pd                    *pd;
1985	struct ib_srq                   *srq;
1986	struct ib_srq_init_attr          attr;
1987	int ret;
1988
1989	if (out_len < sizeof resp)
1990		return -ENOSPC;
1991
1992	if (copy_from_user(&cmd, buf, sizeof cmd))
1993		return -EFAULT;
1994
1995	INIT_UDATA(&udata, buf + sizeof cmd,
1996		   (unsigned long) cmd.response + sizeof resp,
1997		   in_len - sizeof cmd, out_len - sizeof resp);
1998
1999	obj = kmalloc(sizeof *obj, GFP_KERNEL);
2000	if (!obj)
2001		return -ENOMEM;
2002
2003	init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
2004	down_write(&obj->uobject.mutex);
2005
2006	pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
2007	if (!pd) {
2008		ret = -EINVAL;
2009		goto err;
2010	}
2011
2012	attr.event_handler  = ib_uverbs_srq_event_handler;
2013	attr.srq_context    = file;
2014	attr.attr.max_wr    = cmd.max_wr;
2015	attr.attr.max_sge   = cmd.max_sge;
2016	attr.attr.srq_limit = cmd.srq_limit;
2017
2018	obj->events_reported     = 0;
2019	INIT_LIST_HEAD(&obj->event_list);
2020
2021	srq = pd->device->create_srq(pd, &attr, &udata);
2022	if (IS_ERR(srq)) {
2023		ret = PTR_ERR(srq);
2024		goto err_put;
2025	}
2026
2027	srq->device    	   = pd->device;
2028	srq->pd        	   = pd;
2029	srq->uobject       = &obj->uobject;
2030	srq->event_handler = attr.event_handler;
2031	srq->srq_context   = attr.srq_context;
2032	atomic_inc(&pd->usecnt);
2033	atomic_set(&srq->usecnt, 0);
2034
2035	obj->uobject.object = srq;
2036	ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2037	if (ret)
2038		goto err_destroy;
2039
2040	memset(&resp, 0, sizeof resp);
2041	resp.srq_handle = obj->uobject.id;
2042	resp.max_wr     = attr.attr.max_wr;
2043	resp.max_sge    = attr.attr.max_sge;
2044
2045	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2046			 &resp, sizeof resp)) {
2047		ret = -EFAULT;
2048		goto err_copy;
2049	}
2050
2051	put_pd_read(pd);
2052
2053	mutex_lock(&file->mutex);
2054	list_add_tail(&obj->uobject.list, &file->ucontext->srq_list);
2055	mutex_unlock(&file->mutex);
2056
2057	obj->uobject.live = 1;
2058
2059	up_write(&obj->uobject.mutex);
2060
2061	return in_len;
2062
2063err_copy:
2064	idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2065
2066err_destroy:
2067	ib_destroy_srq(srq);
2068
2069err_put:
2070	put_pd_read(pd);
2071
2072err:
2073	put_uobj_write(&obj->uobject);
2074	return ret;
2075}
2076
2077ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2078			     const char __user *buf, int in_len,
2079			     int out_len)
2080{
2081	struct ib_uverbs_modify_srq cmd;
2082	struct ib_udata             udata;
2083	struct ib_srq              *srq;
2084	struct ib_srq_attr          attr;
2085	int                         ret;
2086
2087	if (copy_from_user(&cmd, buf, sizeof cmd))
2088		return -EFAULT;
2089
2090	INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2091		   out_len);
2092
2093	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2094	if (!srq)
2095		return -EINVAL;
2096
2097	attr.max_wr    = cmd.max_wr;
2098	attr.srq_limit = cmd.srq_limit;
2099
2100	ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
2101
2102	put_srq_read(srq);
2103
2104	return ret ? ret : in_len;
2105}
2106
2107ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
2108			    const char __user *buf,
2109			    int in_len, int out_len)
2110{
2111	struct ib_uverbs_query_srq      cmd;
2112	struct ib_uverbs_query_srq_resp resp;
2113	struct ib_srq_attr              attr;
2114	struct ib_srq                   *srq;
2115	int                             ret;
2116
2117	if (out_len < sizeof resp)
2118		return -ENOSPC;
2119
2120	if (copy_from_user(&cmd, buf, sizeof cmd))
2121		return -EFAULT;
2122
2123	srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2124	if (!srq)
2125		return -EINVAL;
2126
2127	ret = ib_query_srq(srq, &attr);
2128
2129	put_srq_read(srq);
2130
2131	if (ret)
2132		return ret;
2133
2134	memset(&resp, 0, sizeof resp);
2135
2136	resp.max_wr    = attr.max_wr;
2137	resp.max_sge   = attr.max_sge;
2138	resp.srq_limit = attr.srq_limit;
2139
2140	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2141			 &resp, sizeof resp))
2142		return -EFAULT;
2143
2144	return in_len;
2145}
2146
2147ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
2148			      const char __user *buf, int in_len,
2149			      int out_len)
2150{
2151	struct ib_uverbs_destroy_srq      cmd;
2152	struct ib_uverbs_destroy_srq_resp resp;
2153	struct ib_uobject		 *uobj;
2154	struct ib_srq               	 *srq;
2155	struct ib_uevent_object        	 *obj;
2156	int                         	  ret = -EINVAL;
2157
2158	if (copy_from_user(&cmd, buf, sizeof cmd))
2159		return -EFAULT;
2160
2161	uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
2162	if (!uobj)
2163		return -EINVAL;
2164	srq = uobj->object;
2165	obj = container_of(uobj, struct ib_uevent_object, uobject);
2166
2167	ret = ib_destroy_srq(srq);
2168	if (!ret)
2169		uobj->live = 0;
2170
2171	put_uobj_write(uobj);
2172
2173	if (ret)
2174		return ret;
2175
2176	idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
2177
2178	mutex_lock(&file->mutex);
2179	list_del(&uobj->list);
2180	mutex_unlock(&file->mutex);
2181
2182	ib_uverbs_release_uevent(file, obj);
2183
2184	memset(&resp, 0, sizeof resp);
2185	resp.events_reported = obj->events_reported;
2186
2187	put_uobj(uobj);
2188
2189	if (copy_to_user((void __user *) (unsigned long) cmd.response,
2190			 &resp, sizeof resp))
2191		ret = -EFAULT;
2192
2193	return ret ? ret : in_len;
2194}
2195