1/*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2005 Topspin Communications.  All rights reserved.
5 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
6 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
7 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
8 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses.  You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 *     Redistribution and use in source and binary forms, with or
17 *     without modification, are permitted provided that the following
18 *     conditions are met:
19 *
20 *      - Redistributions of source code must retain the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer.
23 *
24 *      - Redistributions in binary form must reproduce the above
25 *        copyright notice, this list of conditions and the following
26 *        disclaimer in the documentation and/or other materials
27 *        provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD$");
41
42#include <linux/module.h>
43#include <linux/device.h>
44#include <linux/err.h>
45#include <linux/fs.h>
46#include <linux/poll.h>
47#include <linux/sched.h>
48#include <linux/file.h>
49#include <linux/cdev.h>
50#include <linux/slab.h>
51#include <linux/pci.h>
52
53#include <asm/uaccess.h>
54
55#include <rdma/ib.h>
56
57#include "uverbs.h"
58
59MODULE_AUTHOR("Roland Dreier");
60MODULE_DESCRIPTION("InfiniBand userspace verbs access");
61MODULE_LICENSE("Dual BSD/GPL");
62
63enum {
64	IB_UVERBS_MAJOR       = 231,
65	IB_UVERBS_BASE_MINOR  = 192,
66	IB_UVERBS_MAX_DEVICES = 32
67};
68
69#define IB_UVERBS_BASE_DEV	MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
70
71static struct class *uverbs_class;
72
73DEFINE_SPINLOCK(ib_uverbs_idr_lock);
74DEFINE_IDR(ib_uverbs_pd_idr);
75DEFINE_IDR(ib_uverbs_mr_idr);
76DEFINE_IDR(ib_uverbs_mw_idr);
77DEFINE_IDR(ib_uverbs_ah_idr);
78DEFINE_IDR(ib_uverbs_cq_idr);
79DEFINE_IDR(ib_uverbs_qp_idr);
80DEFINE_IDR(ib_uverbs_srq_idr);
81DEFINE_IDR(ib_uverbs_xrcd_idr);
82DEFINE_IDR(ib_uverbs_rule_idr);
83DEFINE_IDR(ib_uverbs_wq_idr);
84DEFINE_IDR(ib_uverbs_rwq_ind_tbl_idr);
85
86static DEFINE_SPINLOCK(map_lock);
87static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
88
89static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
90				     struct ib_device *ib_dev,
91				     const char __user *buf, int in_len,
92				     int out_len) = {
93	[IB_USER_VERBS_CMD_GET_CONTEXT]		= ib_uverbs_get_context,
94	[IB_USER_VERBS_CMD_QUERY_DEVICE]	= ib_uverbs_query_device,
95	[IB_USER_VERBS_CMD_QUERY_PORT]		= ib_uverbs_query_port,
96	[IB_USER_VERBS_CMD_ALLOC_PD]		= ib_uverbs_alloc_pd,
97	[IB_USER_VERBS_CMD_DEALLOC_PD]		= ib_uverbs_dealloc_pd,
98	[IB_USER_VERBS_CMD_REG_MR]		= ib_uverbs_reg_mr,
99	[IB_USER_VERBS_CMD_REREG_MR]		= ib_uverbs_rereg_mr,
100	[IB_USER_VERBS_CMD_DEREG_MR]		= ib_uverbs_dereg_mr,
101	[IB_USER_VERBS_CMD_ALLOC_MW]		= ib_uverbs_alloc_mw,
102	[IB_USER_VERBS_CMD_DEALLOC_MW]		= ib_uverbs_dealloc_mw,
103	[IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
104	[IB_USER_VERBS_CMD_CREATE_CQ]		= ib_uverbs_create_cq,
105	[IB_USER_VERBS_CMD_RESIZE_CQ]		= ib_uverbs_resize_cq,
106	[IB_USER_VERBS_CMD_POLL_CQ]		= ib_uverbs_poll_cq,
107	[IB_USER_VERBS_CMD_REQ_NOTIFY_CQ]	= ib_uverbs_req_notify_cq,
108	[IB_USER_VERBS_CMD_DESTROY_CQ]		= ib_uverbs_destroy_cq,
109	[IB_USER_VERBS_CMD_CREATE_QP]		= ib_uverbs_create_qp,
110	[IB_USER_VERBS_CMD_QUERY_QP]		= ib_uverbs_query_qp,
111	[IB_USER_VERBS_CMD_MODIFY_QP]		= ib_uverbs_modify_qp,
112	[IB_USER_VERBS_CMD_DESTROY_QP]		= ib_uverbs_destroy_qp,
113	[IB_USER_VERBS_CMD_POST_SEND]		= ib_uverbs_post_send,
114	[IB_USER_VERBS_CMD_POST_RECV]		= ib_uverbs_post_recv,
115	[IB_USER_VERBS_CMD_POST_SRQ_RECV]	= ib_uverbs_post_srq_recv,
116	[IB_USER_VERBS_CMD_CREATE_AH]		= ib_uverbs_create_ah,
117	[IB_USER_VERBS_CMD_DESTROY_AH]		= ib_uverbs_destroy_ah,
118	[IB_USER_VERBS_CMD_ATTACH_MCAST]	= ib_uverbs_attach_mcast,
119	[IB_USER_VERBS_CMD_DETACH_MCAST]	= ib_uverbs_detach_mcast,
120	[IB_USER_VERBS_CMD_CREATE_SRQ]		= ib_uverbs_create_srq,
121	[IB_USER_VERBS_CMD_MODIFY_SRQ]		= ib_uverbs_modify_srq,
122	[IB_USER_VERBS_CMD_QUERY_SRQ]		= ib_uverbs_query_srq,
123	[IB_USER_VERBS_CMD_DESTROY_SRQ]		= ib_uverbs_destroy_srq,
124	[IB_USER_VERBS_CMD_OPEN_XRCD]		= ib_uverbs_open_xrcd,
125	[IB_USER_VERBS_CMD_CLOSE_XRCD]		= ib_uverbs_close_xrcd,
126	[IB_USER_VERBS_CMD_CREATE_XSRQ]		= ib_uverbs_create_xsrq,
127	[IB_USER_VERBS_CMD_OPEN_QP]		= ib_uverbs_open_qp,
128};
129
130static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
131				    struct ib_device *ib_dev,
132				    struct ib_udata *ucore,
133				    struct ib_udata *uhw) = {
134	[IB_USER_VERBS_EX_CMD_CREATE_FLOW]	= ib_uverbs_ex_create_flow,
135	[IB_USER_VERBS_EX_CMD_DESTROY_FLOW]	= ib_uverbs_ex_destroy_flow,
136	[IB_USER_VERBS_EX_CMD_QUERY_DEVICE]	= ib_uverbs_ex_query_device,
137	[IB_USER_VERBS_EX_CMD_CREATE_CQ]	= ib_uverbs_ex_create_cq,
138	[IB_USER_VERBS_EX_CMD_CREATE_QP]        = ib_uverbs_ex_create_qp,
139	[IB_USER_VERBS_EX_CMD_CREATE_WQ]        = ib_uverbs_ex_create_wq,
140	[IB_USER_VERBS_EX_CMD_MODIFY_WQ]        = ib_uverbs_ex_modify_wq,
141	[IB_USER_VERBS_EX_CMD_DESTROY_WQ]       = ib_uverbs_ex_destroy_wq,
142	[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
143	[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
144};
145
146static void ib_uverbs_add_one(struct ib_device *device);
147static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
148
149int uverbs_dealloc_mw(struct ib_mw *mw)
150{
151	struct ib_pd *pd = mw->pd;
152	int ret;
153
154	ret = mw->device->dealloc_mw(mw);
155	if (!ret)
156		atomic_dec(&pd->usecnt);
157	return ret;
158}
159
160static void ib_uverbs_release_dev(struct kobject *kobj)
161{
162	struct ib_uverbs_device *dev =
163		container_of(kobj, struct ib_uverbs_device, kobj);
164
165	cleanup_srcu_struct(&dev->disassociate_srcu);
166	kfree(dev);
167}
168
169static struct kobj_type ib_uverbs_dev_ktype = {
170	.release = ib_uverbs_release_dev,
171};
172
173static void ib_uverbs_release_event_file(struct kref *ref)
174{
175	struct ib_uverbs_event_file *file =
176		container_of(ref, struct ib_uverbs_event_file, ref);
177
178	kfree(file);
179}
180
181void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
182			  struct ib_uverbs_event_file *ev_file,
183			  struct ib_ucq_object *uobj)
184{
185	struct ib_uverbs_event *evt, *tmp;
186
187	if (ev_file) {
188		spin_lock_irq(&ev_file->lock);
189		list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
190			list_del(&evt->list);
191			kfree(evt);
192		}
193		spin_unlock_irq(&ev_file->lock);
194
195		kref_put(&ev_file->ref, ib_uverbs_release_event_file);
196	}
197
198	spin_lock_irq(&file->async_file->lock);
199	list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
200		list_del(&evt->list);
201		kfree(evt);
202	}
203	spin_unlock_irq(&file->async_file->lock);
204}
205
206void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
207			      struct ib_uevent_object *uobj)
208{
209	struct ib_uverbs_event *evt, *tmp;
210
211	spin_lock_irq(&file->async_file->lock);
212	list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
213		list_del(&evt->list);
214		kfree(evt);
215	}
216	spin_unlock_irq(&file->async_file->lock);
217}
218
219static void ib_uverbs_detach_umcast(struct ib_qp *qp,
220				    struct ib_uqp_object *uobj)
221{
222	struct ib_uverbs_mcast_entry *mcast, *tmp;
223
224	list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
225		ib_detach_mcast(qp, &mcast->gid, mcast->lid);
226		list_del(&mcast->list);
227		kfree(mcast);
228	}
229}
230
231static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
232				      struct ib_ucontext *context)
233{
234	struct ib_uobject *uobj, *tmp;
235
236	context->closing = 1;
237
238	list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
239		struct ib_ah *ah = uobj->object;
240
241		idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
242		ib_destroy_ah(ah);
243		kfree(uobj);
244	}
245
246	/* Remove MWs before QPs, in order to support type 2A MWs. */
247	list_for_each_entry_safe(uobj, tmp, &context->mw_list, list) {
248		struct ib_mw *mw = uobj->object;
249
250		idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
251		uverbs_dealloc_mw(mw);
252		kfree(uobj);
253	}
254
255	list_for_each_entry_safe(uobj, tmp, &context->rule_list, list) {
256		struct ib_flow *flow_id = uobj->object;
257
258		idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
259		ib_destroy_flow(flow_id);
260		kfree(uobj);
261	}
262
263	list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
264		struct ib_qp *qp = uobj->object;
265		struct ib_uqp_object *uqp =
266			container_of(uobj, struct ib_uqp_object, uevent.uobject);
267
268		idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
269		if (qp == qp->real_qp)
270			ib_uverbs_detach_umcast(qp, uqp);
271		ib_destroy_qp(qp);
272		ib_uverbs_release_uevent(file, &uqp->uevent);
273		kfree(uqp);
274	}
275
276	list_for_each_entry_safe(uobj, tmp, &context->rwq_ind_tbl_list, list) {
277		struct ib_rwq_ind_table *rwq_ind_tbl = uobj->object;
278		struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
279
280		idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj);
281		ib_destroy_rwq_ind_table(rwq_ind_tbl);
282		kfree(ind_tbl);
283		kfree(uobj);
284	}
285
286	list_for_each_entry_safe(uobj, tmp, &context->wq_list, list) {
287		struct ib_wq *wq = uobj->object;
288		struct ib_uwq_object *uwq =
289			container_of(uobj, struct ib_uwq_object, uevent.uobject);
290
291		idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
292		ib_destroy_wq(wq);
293		ib_uverbs_release_uevent(file, &uwq->uevent);
294		kfree(uwq);
295	}
296
297	list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
298		struct ib_srq *srq = uobj->object;
299		struct ib_uevent_object *uevent =
300			container_of(uobj, struct ib_uevent_object, uobject);
301
302		idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
303		ib_destroy_srq(srq);
304		ib_uverbs_release_uevent(file, uevent);
305		kfree(uevent);
306	}
307
308	list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
309		struct ib_cq *cq = uobj->object;
310		struct ib_uverbs_event_file *ev_file = cq->cq_context;
311		struct ib_ucq_object *ucq =
312			container_of(uobj, struct ib_ucq_object, uobject);
313
314		idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
315		ib_destroy_cq(cq);
316		ib_uverbs_release_ucq(file, ev_file, ucq);
317		kfree(ucq);
318	}
319
320	list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
321		struct ib_mr *mr = uobj->object;
322
323		idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
324		ib_dereg_mr(mr);
325		kfree(uobj);
326	}
327
328	mutex_lock(&file->device->xrcd_tree_mutex);
329	list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) {
330		struct ib_xrcd *xrcd = uobj->object;
331		struct ib_uxrcd_object *uxrcd =
332			container_of(uobj, struct ib_uxrcd_object, uobject);
333
334		idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
335		ib_uverbs_dealloc_xrcd(file->device, xrcd);
336		kfree(uxrcd);
337	}
338	mutex_unlock(&file->device->xrcd_tree_mutex);
339
340	list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) {
341		struct ib_pd *pd = uobj->object;
342
343		idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
344		ib_dealloc_pd(pd);
345		kfree(uobj);
346	}
347
348	put_pid(context->tgid);
349
350	return context->device->dealloc_ucontext(context);
351}
352
353static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
354{
355	complete(&dev->comp);
356}
357
358static void ib_uverbs_release_file(struct kref *ref)
359{
360	struct ib_uverbs_file *file =
361		container_of(ref, struct ib_uverbs_file, ref);
362	struct ib_device *ib_dev;
363	int srcu_key;
364
365	srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
366	ib_dev = srcu_dereference(file->device->ib_dev,
367				  &file->device->disassociate_srcu);
368	if (ib_dev && !ib_dev->disassociate_ucontext)
369		module_put(ib_dev->owner);
370	srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
371
372	if (atomic_dec_and_test(&file->device->refcount))
373		ib_uverbs_comp_dev(file->device);
374
375	kfree(file);
376}
377
378static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
379				    size_t count, loff_t *pos)
380{
381	struct ib_uverbs_event_file *file = filp->private_data;
382	struct ib_uverbs_event *event;
383	int eventsz;
384	int ret = 0;
385
386	spin_lock_irq(&file->lock);
387
388	while (list_empty(&file->event_list)) {
389		spin_unlock_irq(&file->lock);
390
391		if (filp->f_flags & O_NONBLOCK)
392			return -EAGAIN;
393
394		if (wait_event_interruptible(file->poll_wait,
395					     (!list_empty(&file->event_list) ||
396			/* The barriers built into wait_event_interruptible()
397			 * and wake_up() guarentee this will see the null set
398			 * without using RCU
399			 */
400					     !file->uverbs_file->device->ib_dev)))
401			return -ERESTARTSYS;
402
403		/* If device was disassociated and no event exists set an error */
404		if (list_empty(&file->event_list) &&
405		    !file->uverbs_file->device->ib_dev)
406			return -EIO;
407
408		spin_lock_irq(&file->lock);
409	}
410
411	event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
412
413	if (file->is_async)
414		eventsz = sizeof (struct ib_uverbs_async_event_desc);
415	else
416		eventsz = sizeof (struct ib_uverbs_comp_event_desc);
417
418	if (eventsz > count) {
419		ret   = -EINVAL;
420		event = NULL;
421	} else {
422		list_del(file->event_list.next);
423		if (event->counter) {
424			++(*event->counter);
425			list_del(&event->obj_list);
426		}
427	}
428
429	spin_unlock_irq(&file->lock);
430
431	if (event) {
432		if (copy_to_user(buf, event, eventsz))
433			ret = -EFAULT;
434		else
435			ret = eventsz;
436	}
437
438	kfree(event);
439
440	return ret;
441}
442
443static unsigned int ib_uverbs_event_poll(struct file *filp,
444					 struct poll_table_struct *wait)
445{
446	unsigned int pollflags = 0;
447	struct ib_uverbs_event_file *file = filp->private_data;
448
449	poll_wait(filp, &file->poll_wait, wait);
450
451	spin_lock_irq(&file->lock);
452	if (!list_empty(&file->event_list))
453		pollflags = POLLIN | POLLRDNORM;
454	spin_unlock_irq(&file->lock);
455
456	return pollflags;
457}
458
459static int ib_uverbs_event_fasync(int fd, struct file *filp, int on)
460{
461	struct ib_uverbs_event_file *file = filp->private_data;
462
463	return fasync_helper(fd, filp, on, &file->async_queue);
464}
465
466static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
467{
468	struct ib_uverbs_event_file *file = filp->private_data;
469	struct ib_uverbs_event *entry, *tmp;
470	int closed_already = 0;
471
472	mutex_lock(&file->uverbs_file->device->lists_mutex);
473	spin_lock_irq(&file->lock);
474	closed_already = file->is_closed;
475	file->is_closed = 1;
476	list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
477		if (entry->counter)
478			list_del(&entry->obj_list);
479		kfree(entry);
480	}
481	spin_unlock_irq(&file->lock);
482	if (!closed_already) {
483		list_del(&file->list);
484		if (file->is_async)
485			ib_unregister_event_handler(&file->uverbs_file->
486				event_handler);
487	}
488	mutex_unlock(&file->uverbs_file->device->lists_mutex);
489
490	kref_put(&file->uverbs_file->ref, ib_uverbs_release_file);
491	kref_put(&file->ref, ib_uverbs_release_event_file);
492
493	return 0;
494}
495
496static const struct file_operations uverbs_event_fops = {
497	.owner	 = THIS_MODULE,
498	.read	 = ib_uverbs_event_read,
499	.poll    = ib_uverbs_event_poll,
500	.release = ib_uverbs_event_close,
501	.fasync  = ib_uverbs_event_fasync,
502	.llseek	 = no_llseek,
503};
504
505void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
506{
507	struct ib_uverbs_event_file    *file = cq_context;
508	struct ib_ucq_object	       *uobj;
509	struct ib_uverbs_event	       *entry;
510	unsigned long			flags;
511
512	if (!file)
513		return;
514
515	spin_lock_irqsave(&file->lock, flags);
516	if (file->is_closed) {
517		spin_unlock_irqrestore(&file->lock, flags);
518		return;
519	}
520
521	entry = kmalloc(sizeof *entry, GFP_ATOMIC);
522	if (!entry) {
523		spin_unlock_irqrestore(&file->lock, flags);
524		return;
525	}
526
527	uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
528
529	entry->desc.comp.cq_handle = cq->uobject->user_handle;
530	entry->counter		   = &uobj->comp_events_reported;
531
532	list_add_tail(&entry->list, &file->event_list);
533	list_add_tail(&entry->obj_list, &uobj->comp_list);
534	spin_unlock_irqrestore(&file->lock, flags);
535
536	wake_up_interruptible(&file->poll_wait);
537	kill_fasync(&file->async_queue, SIGIO, POLL_IN);
538}
539
540static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
541				    __u64 element, __u64 event,
542				    struct list_head *obj_list,
543				    u32 *counter)
544{
545	struct ib_uverbs_event *entry;
546	unsigned long flags;
547
548	spin_lock_irqsave(&file->async_file->lock, flags);
549	if (file->async_file->is_closed) {
550		spin_unlock_irqrestore(&file->async_file->lock, flags);
551		return;
552	}
553
554	entry = kmalloc(sizeof *entry, GFP_ATOMIC);
555	if (!entry) {
556		spin_unlock_irqrestore(&file->async_file->lock, flags);
557		return;
558	}
559
560	entry->desc.async.element    = element;
561	entry->desc.async.event_type = event;
562	entry->desc.async.reserved   = 0;
563	entry->counter               = counter;
564
565	list_add_tail(&entry->list, &file->async_file->event_list);
566	if (obj_list)
567		list_add_tail(&entry->obj_list, obj_list);
568	spin_unlock_irqrestore(&file->async_file->lock, flags);
569
570	wake_up_interruptible(&file->async_file->poll_wait);
571	kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN);
572}
573
574void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
575{
576	struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
577						  struct ib_ucq_object, uobject);
578
579	ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
580				event->event, &uobj->async_list,
581				&uobj->async_events_reported);
582}
583
584void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
585{
586	struct ib_uevent_object *uobj;
587
588	/* for XRC target qp's, check that qp is live */
589	if (!event->element.qp->uobject || !event->element.qp->uobject->live)
590		return;
591
592	uobj = container_of(event->element.qp->uobject,
593			    struct ib_uevent_object, uobject);
594
595	ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
596				event->event, &uobj->event_list,
597				&uobj->events_reported);
598}
599
600void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
601{
602	struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
603						  struct ib_uevent_object, uobject);
604
605	ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
606				event->event, &uobj->event_list,
607				&uobj->events_reported);
608}
609
610void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
611{
612	struct ib_uevent_object *uobj;
613
614	uobj = container_of(event->element.srq->uobject,
615			    struct ib_uevent_object, uobject);
616
617	ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
618				event->event, &uobj->event_list,
619				&uobj->events_reported);
620}
621
622void ib_uverbs_event_handler(struct ib_event_handler *handler,
623			     struct ib_event *event)
624{
625	struct ib_uverbs_file *file =
626		container_of(handler, struct ib_uverbs_file, event_handler);
627
628	ib_uverbs_async_handler(file, event->element.port_num, event->event,
629				NULL, NULL);
630}
631
632void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
633{
634	kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
635	file->async_file = NULL;
636}
637
638struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
639					struct ib_device	*ib_dev,
640					int is_async)
641{
642	struct ib_uverbs_event_file *ev_file;
643	struct file *filp;
644	int ret;
645
646	ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
647	if (!ev_file)
648		return ERR_PTR(-ENOMEM);
649
650	kref_init(&ev_file->ref);
651	spin_lock_init(&ev_file->lock);
652	INIT_LIST_HEAD(&ev_file->event_list);
653	init_waitqueue_head(&ev_file->poll_wait);
654	ev_file->uverbs_file = uverbs_file;
655	kref_get(&ev_file->uverbs_file->ref);
656	ev_file->async_queue = NULL;
657	ev_file->is_closed   = 0;
658
659	/*
660	 * fops_get() can't fail here, because we're coming from a
661	 * system call on a uverbs file, which will already have a
662	 * module reference.
663	 */
664	filp = alloc_file(FMODE_READ, fops_get(&uverbs_event_fops));
665	if (IS_ERR(filp))
666		goto err_put_refs;
667	filp->private_data = ev_file;
668
669	mutex_lock(&uverbs_file->device->lists_mutex);
670	list_add_tail(&ev_file->list,
671		      &uverbs_file->device->uverbs_events_file_list);
672	mutex_unlock(&uverbs_file->device->lists_mutex);
673
674	if (is_async) {
675		WARN_ON(uverbs_file->async_file);
676		uverbs_file->async_file = ev_file;
677		kref_get(&uverbs_file->async_file->ref);
678		INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
679				      ib_dev,
680				      ib_uverbs_event_handler);
681		ret = ib_register_event_handler(&uverbs_file->event_handler);
682		if (ret)
683			goto err_put_file;
684
685		/* At that point async file stuff was fully set */
686		ev_file->is_async = 1;
687	}
688
689	return filp;
690
691err_put_file:
692	fput(filp);
693	kref_put(&uverbs_file->async_file->ref, ib_uverbs_release_event_file);
694	uverbs_file->async_file = NULL;
695	return ERR_PTR(ret);
696
697err_put_refs:
698	kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
699	kref_put(&ev_file->ref, ib_uverbs_release_event_file);
700	return filp;
701}
702
703/*
704 * Look up a completion event file by FD.  If lookup is successful,
705 * takes a ref to the event file struct that it returns; if
706 * unsuccessful, returns NULL.
707 */
708struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd)
709{
710	struct ib_uverbs_event_file *ev_file = NULL;
711	struct fd f = fdget(fd);
712
713	if (!f.file)
714		return NULL;
715
716	if (f.file->f_op != &uverbs_event_fops)
717		goto out;
718
719	ev_file = f.file->private_data;
720	if (ev_file->is_async) {
721		ev_file = NULL;
722		goto out;
723	}
724
725	kref_get(&ev_file->ref);
726
727out:
728	fdput(f);
729	return ev_file;
730}
731
732static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
733{
734	u64 mask;
735
736	if (command <= IB_USER_VERBS_CMD_OPEN_QP)
737		mask = ib_dev->uverbs_cmd_mask;
738	else
739		mask = ib_dev->uverbs_ex_cmd_mask;
740
741	if (mask & ((u64)1 << command))
742		return 0;
743
744	return -1;
745}
746
747static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
748			     size_t count, loff_t *pos)
749{
750	struct ib_uverbs_file *file = filp->private_data;
751	struct ib_device *ib_dev;
752	struct ib_uverbs_cmd_hdr hdr;
753	__u32 command;
754	__u32 flags;
755	int srcu_key;
756	ssize_t ret;
757
758	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
759		return -EACCES;
760
761	if (count < sizeof hdr)
762		return -EINVAL;
763
764	if (copy_from_user(&hdr, buf, sizeof hdr))
765		return -EFAULT;
766
767	srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
768	ib_dev = srcu_dereference(file->device->ib_dev,
769				  &file->device->disassociate_srcu);
770	if (!ib_dev) {
771		ret = -EIO;
772		goto out;
773	}
774
775	if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
776				   IB_USER_VERBS_CMD_COMMAND_MASK)) {
777		ret = -EINVAL;
778		goto out;
779	}
780
781	command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
782	if (verify_command_mask(ib_dev, command)) {
783		ret = -EOPNOTSUPP;
784		goto out;
785	}
786
787	if (!file->ucontext &&
788	    command != IB_USER_VERBS_CMD_GET_CONTEXT) {
789		ret = -EINVAL;
790		goto out;
791	}
792
793	flags = (hdr.command &
794		 IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
795
796	if (!flags) {
797		if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
798		    !uverbs_cmd_table[command]) {
799			ret = -EINVAL;
800			goto out;
801		}
802
803		if (hdr.in_words * 4 != count) {
804			ret = -EINVAL;
805			goto out;
806		}
807
808		ret = uverbs_cmd_table[command](file, ib_dev,
809						 buf + sizeof(hdr),
810						 hdr.in_words * 4,
811						 hdr.out_words * 4);
812
813	} else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
814		struct ib_uverbs_ex_cmd_hdr ex_hdr;
815		struct ib_udata ucore;
816		struct ib_udata uhw;
817		size_t written_count = count;
818
819		if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
820		    !uverbs_ex_cmd_table[command]) {
821			ret = -ENOSYS;
822			goto out;
823		}
824
825		if (!file->ucontext) {
826			ret = -EINVAL;
827			goto out;
828		}
829
830		if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
831			ret = -EINVAL;
832			goto out;
833		}
834
835		if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) {
836			ret = -EFAULT;
837			goto out;
838		}
839
840		count -= sizeof(hdr) + sizeof(ex_hdr);
841		buf += sizeof(hdr) + sizeof(ex_hdr);
842
843		if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count) {
844			ret = -EINVAL;
845			goto out;
846		}
847
848		if (ex_hdr.cmd_hdr_reserved) {
849			ret = -EINVAL;
850			goto out;
851		}
852
853		if (ex_hdr.response) {
854			if (!hdr.out_words && !ex_hdr.provider_out_words) {
855				ret = -EINVAL;
856				goto out;
857			}
858
859			if (!access_ok(VERIFY_WRITE,
860				       (void __user *) (unsigned long) ex_hdr.response,
861				       (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
862				ret = -EFAULT;
863				goto out;
864			}
865		} else {
866			if (hdr.out_words || ex_hdr.provider_out_words) {
867				ret = -EINVAL;
868				goto out;
869			}
870		}
871
872		INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
873				       hdr.in_words * 8, hdr.out_words * 8);
874
875		INIT_UDATA_BUF_OR_NULL(&uhw,
876				       buf + ucore.inlen,
877				       (unsigned long) ex_hdr.response + ucore.outlen,
878				       ex_hdr.provider_in_words * 8,
879				       ex_hdr.provider_out_words * 8);
880
881		ret = uverbs_ex_cmd_table[command](file,
882						   ib_dev,
883						   &ucore,
884						   &uhw);
885		if (!ret)
886			ret = written_count;
887	} else {
888		ret = -ENOSYS;
889	}
890
891out:
892	srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
893	return ret;
894}
895
896static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
897{
898	struct ib_uverbs_file *file = filp->private_data;
899	struct ib_device *ib_dev;
900	int ret = 0;
901	int srcu_key;
902
903	srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
904	ib_dev = srcu_dereference(file->device->ib_dev,
905				  &file->device->disassociate_srcu);
906	if (!ib_dev) {
907		ret = -EIO;
908		goto out;
909	}
910
911	if (!file->ucontext)
912		ret = -ENODEV;
913	else
914		ret = ib_dev->mmap(file->ucontext, vma);
915out:
916	srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
917	return ret;
918}
919
920/*
921 * ib_uverbs_open() does not need the BKL:
922 *
923 *  - the ib_uverbs_device structures are properly reference counted and
924 *    everything else is purely local to the file being created, so
925 *    races against other open calls are not a problem;
926 *  - there is no ioctl method to race against;
927 *  - the open method will either immediately run -ENXIO, or all
928 *    required initialization will be done.
929 */
930static int ib_uverbs_open(struct inode *inode, struct file *filp)
931{
932	struct ib_uverbs_device *dev;
933	struct ib_uverbs_file *file;
934	struct ib_device *ib_dev;
935	int ret;
936	int module_dependent;
937	int srcu_key;
938
939	dev = container_of(inode->i_cdev->si_drv1, struct ib_uverbs_device, cdev);
940	if (!atomic_inc_not_zero(&dev->refcount))
941		return -ENXIO;
942
943	srcu_key = srcu_read_lock(&dev->disassociate_srcu);
944	mutex_lock(&dev->lists_mutex);
945	ib_dev = srcu_dereference(dev->ib_dev,
946				  &dev->disassociate_srcu);
947	if (!ib_dev) {
948		ret = -EIO;
949		goto err;
950	}
951
952	/* In case IB device supports disassociate ucontext, there is no hard
953	 * dependency between uverbs device and its low level device.
954	 */
955	module_dependent = !(ib_dev->disassociate_ucontext);
956
957	if (module_dependent) {
958		if (!try_module_get(ib_dev->owner)) {
959			ret = -ENODEV;
960			goto err;
961		}
962	}
963
964	file = kzalloc(sizeof(*file), GFP_KERNEL);
965	if (!file) {
966		ret = -ENOMEM;
967		if (module_dependent)
968			goto err_module;
969
970		goto err;
971	}
972
973	file->device	 = dev;
974	file->ucontext	 = NULL;
975	file->async_file = NULL;
976	kref_init(&file->ref);
977	mutex_init(&file->mutex);
978	mutex_init(&file->cleanup_mutex);
979
980	filp->private_data = file;
981	kobject_get(&dev->kobj);
982	list_add_tail(&file->list, &dev->uverbs_file_list);
983	mutex_unlock(&dev->lists_mutex);
984	srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
985
986	return nonseekable_open(inode, filp);
987
988err_module:
989	module_put(ib_dev->owner);
990
991err:
992	mutex_unlock(&dev->lists_mutex);
993	srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
994	if (atomic_dec_and_test(&dev->refcount))
995		ib_uverbs_comp_dev(dev);
996
997	return ret;
998}
999
1000static int ib_uverbs_close(struct inode *inode, struct file *filp)
1001{
1002	struct ib_uverbs_file *file = filp->private_data;
1003	struct ib_uverbs_device *dev = file->device;
1004
1005	mutex_lock(&file->cleanup_mutex);
1006	if (file->ucontext) {
1007		ib_uverbs_cleanup_ucontext(file, file->ucontext);
1008		file->ucontext = NULL;
1009	}
1010	mutex_unlock(&file->cleanup_mutex);
1011
1012	mutex_lock(&file->device->lists_mutex);
1013	if (!file->is_closed) {
1014		list_del(&file->list);
1015		file->is_closed = 1;
1016	}
1017	mutex_unlock(&file->device->lists_mutex);
1018
1019	if (file->async_file)
1020		kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
1021
1022	kref_put(&file->ref, ib_uverbs_release_file);
1023	kobject_put(&dev->kobj);
1024
1025	return 0;
1026}
1027
1028static const struct file_operations uverbs_fops = {
1029	.owner	 = THIS_MODULE,
1030	.write	 = ib_uverbs_write,
1031	.open	 = ib_uverbs_open,
1032	.release = ib_uverbs_close,
1033	.llseek	 = no_llseek,
1034};
1035
1036static const struct file_operations uverbs_mmap_fops = {
1037	.owner	 = THIS_MODULE,
1038	.write	 = ib_uverbs_write,
1039	.mmap    = ib_uverbs_mmap,
1040	.open	 = ib_uverbs_open,
1041	.release = ib_uverbs_close,
1042	.llseek	 = no_llseek,
1043};
1044
1045static struct ib_client uverbs_client = {
1046	.name   = "uverbs",
1047	.add    = ib_uverbs_add_one,
1048	.remove = ib_uverbs_remove_one
1049};
1050
1051static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
1052			  char *buf)
1053{
1054	int ret = -ENODEV;
1055	int srcu_key;
1056	struct ib_uverbs_device *dev = dev_get_drvdata(device);
1057	struct ib_device *ib_dev;
1058
1059	if (!dev)
1060		return -ENODEV;
1061
1062	srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1063	ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1064	if (ib_dev)
1065		ret = sprintf(buf, "%s\n", ib_dev->name);
1066	srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1067
1068	return ret;
1069}
1070static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1071
1072static ssize_t show_dev_abi_version(struct device *device,
1073				    struct device_attribute *attr, char *buf)
1074{
1075	struct ib_uverbs_device *dev = dev_get_drvdata(device);
1076	int ret = -ENODEV;
1077	int srcu_key;
1078	struct ib_device *ib_dev;
1079
1080	if (!dev)
1081		return -ENODEV;
1082	srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1083	ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1084	if (ib_dev)
1085		ret = sprintf(buf, "%d\n", ib_dev->uverbs_abi_ver);
1086	srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1087
1088	return ret;
1089}
1090static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
1091
1092static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1093			 __stringify(IB_USER_VERBS_ABI_VERSION));
1094
1095static dev_t overflow_maj;
1096static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
1097
1098/*
1099 * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
1100 * requesting a new major number and doubling the number of max devices we
1101 * support. It's stupid, but simple.
1102 */
1103static int find_overflow_devnum(void)
1104{
1105	int ret;
1106
1107	if (!overflow_maj) {
1108		ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
1109					  "infiniband_verbs");
1110		if (ret) {
1111			pr_err("user_verbs: couldn't register dynamic device number\n");
1112			return ret;
1113		}
1114	}
1115
1116	ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
1117	if (ret >= IB_UVERBS_MAX_DEVICES)
1118		return -1;
1119
1120	return ret;
1121}
1122
1123static ssize_t
1124show_dev_device(struct device *device, struct device_attribute *attr, char *buf)
1125{
1126	struct ib_uverbs_device *dev = dev_get_drvdata(device);
1127
1128	if (!dev || !dev->ib_dev->dma_device)
1129		return -ENODEV;
1130
1131	return sprintf(buf, "0x%04x\n",
1132	    ((struct pci_dev *)dev->ib_dev->dma_device)->device);
1133}
1134static DEVICE_ATTR(device, S_IRUGO, show_dev_device, NULL);
1135
1136static ssize_t
1137show_dev_vendor(struct device *device, struct device_attribute *attr, char *buf)
1138{
1139	struct ib_uverbs_device *dev = dev_get_drvdata(device);
1140
1141	if (!dev || !dev->ib_dev->dma_device)
1142		return -ENODEV;
1143
1144	return sprintf(buf, "0x%04x\n",
1145	    ((struct pci_dev *)dev->ib_dev->dma_device)->vendor);
1146}
1147static DEVICE_ATTR(vendor, S_IRUGO, show_dev_vendor, NULL);
1148
1149struct attribute *device_attrs[] =
1150{
1151	&dev_attr_device.attr,
1152	&dev_attr_vendor.attr,
1153	NULL
1154};
1155
1156static struct attribute_group device_group = {
1157        .name  = "device",
1158        .attrs  = device_attrs
1159};
1160
1161static void ib_uverbs_add_one(struct ib_device *device)
1162{
1163	int devnum;
1164	dev_t base;
1165	struct ib_uverbs_device *uverbs_dev;
1166	int ret;
1167
1168	if (!device->alloc_ucontext)
1169		return;
1170
1171	uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
1172	if (!uverbs_dev)
1173		return;
1174
1175	ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1176	if (ret) {
1177		kfree(uverbs_dev);
1178		return;
1179	}
1180
1181	atomic_set(&uverbs_dev->refcount, 1);
1182	init_completion(&uverbs_dev->comp);
1183	uverbs_dev->xrcd_tree = RB_ROOT;
1184	mutex_init(&uverbs_dev->xrcd_tree_mutex);
1185	kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
1186	mutex_init(&uverbs_dev->lists_mutex);
1187	INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1188	INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
1189
1190	spin_lock(&map_lock);
1191	devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
1192	if (devnum >= IB_UVERBS_MAX_DEVICES) {
1193		spin_unlock(&map_lock);
1194		devnum = find_overflow_devnum();
1195		if (devnum < 0)
1196			goto err;
1197
1198		spin_lock(&map_lock);
1199		uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
1200		base = devnum + overflow_maj;
1201		set_bit(devnum, overflow_map);
1202	} else {
1203		uverbs_dev->devnum = devnum;
1204		base = devnum + IB_UVERBS_BASE_DEV;
1205		set_bit(devnum, dev_map);
1206	}
1207	spin_unlock(&map_lock);
1208
1209	rcu_assign_pointer(uverbs_dev->ib_dev, device);
1210	uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1211
1212	cdev_init(&uverbs_dev->cdev, NULL);
1213	uverbs_dev->cdev.owner = THIS_MODULE;
1214	uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
1215	uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
1216	kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
1217	if (cdev_add(&uverbs_dev->cdev, base, 1))
1218		goto err_cdev;
1219
1220	uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
1221					uverbs_dev->cdev.dev, uverbs_dev,
1222					"uverbs%d", uverbs_dev->devnum);
1223	if (IS_ERR(uverbs_dev->dev))
1224		goto err_cdev;
1225
1226	if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
1227		goto err_class;
1228	if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
1229		goto err_class;
1230	if (sysfs_create_group(&uverbs_dev->dev->kobj, &device_group))
1231		goto err_class;
1232
1233	ib_set_client_data(device, &uverbs_client, uverbs_dev);
1234
1235	return;
1236
1237err_class:
1238	device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1239
1240err_cdev:
1241	cdev_del(&uverbs_dev->cdev);
1242	if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
1243		clear_bit(devnum, dev_map);
1244	else
1245		clear_bit(devnum, overflow_map);
1246
1247err:
1248	if (atomic_dec_and_test(&uverbs_dev->refcount))
1249		ib_uverbs_comp_dev(uverbs_dev);
1250	wait_for_completion(&uverbs_dev->comp);
1251	kobject_put(&uverbs_dev->kobj);
1252	return;
1253}
1254
1255static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1256					struct ib_device *ib_dev)
1257{
1258	struct ib_uverbs_file *file;
1259	struct ib_uverbs_event_file *event_file;
1260	struct ib_event event;
1261
1262	/* Pending running commands to terminate */
1263	synchronize_srcu(&uverbs_dev->disassociate_srcu);
1264	event.event = IB_EVENT_DEVICE_FATAL;
1265	event.element.port_num = 0;
1266	event.device = ib_dev;
1267
1268	mutex_lock(&uverbs_dev->lists_mutex);
1269	while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1270		struct ib_ucontext *ucontext;
1271		file = list_first_entry(&uverbs_dev->uverbs_file_list,
1272					struct ib_uverbs_file, list);
1273		file->is_closed = 1;
1274		list_del(&file->list);
1275		kref_get(&file->ref);
1276		mutex_unlock(&uverbs_dev->lists_mutex);
1277
1278
1279		mutex_lock(&file->cleanup_mutex);
1280		ucontext = file->ucontext;
1281		file->ucontext = NULL;
1282		mutex_unlock(&file->cleanup_mutex);
1283
1284		/* At this point ib_uverbs_close cannot be running
1285		 * ib_uverbs_cleanup_ucontext
1286		 */
1287		if (ucontext) {
1288			/* We must release the mutex before going ahead and
1289			 * calling disassociate_ucontext. disassociate_ucontext
1290			 * might end up indirectly calling uverbs_close,
1291			 * for example due to freeing the resources
1292			 * (e.g mmput).
1293			 */
1294			ib_uverbs_event_handler(&file->event_handler, &event);
1295			ib_dev->disassociate_ucontext(ucontext);
1296			ib_uverbs_cleanup_ucontext(file, ucontext);
1297		}
1298
1299		mutex_lock(&uverbs_dev->lists_mutex);
1300		kref_put(&file->ref, ib_uverbs_release_file);
1301	}
1302
1303	while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1304		event_file = list_first_entry(&uverbs_dev->
1305					      uverbs_events_file_list,
1306					      struct ib_uverbs_event_file,
1307					      list);
1308		spin_lock_irq(&event_file->lock);
1309		event_file->is_closed = 1;
1310		spin_unlock_irq(&event_file->lock);
1311
1312		list_del(&event_file->list);
1313		if (event_file->is_async) {
1314			ib_unregister_event_handler(&event_file->uverbs_file->
1315						    event_handler);
1316			event_file->uverbs_file->event_handler.device = NULL;
1317		}
1318
1319		wake_up_interruptible(&event_file->poll_wait);
1320		kill_fasync(&event_file->async_queue, SIGIO, POLL_IN);
1321	}
1322	mutex_unlock(&uverbs_dev->lists_mutex);
1323}
1324
1325static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1326{
1327	struct ib_uverbs_device *uverbs_dev = client_data;
1328	int wait_clients = 1;
1329
1330	if (!uverbs_dev)
1331		return;
1332
1333	sysfs_remove_group(&uverbs_dev->dev->kobj, &device_group);
1334	dev_set_drvdata(uverbs_dev->dev, NULL);
1335	device_destroy(uverbs_class, uverbs_dev->cdev.dev);
1336	cdev_del(&uverbs_dev->cdev);
1337
1338	if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
1339		clear_bit(uverbs_dev->devnum, dev_map);
1340	else
1341		clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
1342
1343	if (device->disassociate_ucontext) {
1344		/* We disassociate HW resources and immediately return.
1345		 * Userspace will see a EIO errno for all future access.
1346		 * Upon returning, ib_device may be freed internally and is not
1347		 * valid any more.
1348		 * uverbs_device is still available until all clients close
1349		 * their files, then the uverbs device ref count will be zero
1350		 * and its resources will be freed.
1351		 * Note: At this point no more files can be opened since the
1352		 * cdev was deleted, however active clients can still issue
1353		 * commands and close their open files.
1354		 */
1355		rcu_assign_pointer(uverbs_dev->ib_dev, NULL);
1356		ib_uverbs_free_hw_resources(uverbs_dev, device);
1357		wait_clients = 0;
1358	}
1359
1360	if (atomic_dec_and_test(&uverbs_dev->refcount))
1361		ib_uverbs_comp_dev(uverbs_dev);
1362	if (wait_clients)
1363		wait_for_completion(&uverbs_dev->comp);
1364	kobject_put(&uverbs_dev->kobj);
1365}
1366
1367static char *uverbs_devnode(struct device *dev, umode_t *mode)
1368{
1369	if (mode)
1370		*mode = 0666;
1371	return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1372}
1373
1374static int __init ib_uverbs_init(void)
1375{
1376	int ret;
1377
1378	ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
1379				     "infiniband_verbs");
1380	if (ret) {
1381		pr_err("user_verbs: couldn't register device number\n");
1382		goto out;
1383	}
1384
1385	uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1386	if (IS_ERR(uverbs_class)) {
1387		ret = PTR_ERR(uverbs_class);
1388		pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1389		goto out_chrdev;
1390	}
1391
1392	uverbs_class->devnode = uverbs_devnode;
1393
1394	ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
1395	if (ret) {
1396		pr_err("user_verbs: couldn't create abi_version attribute\n");
1397		goto out_class;
1398	}
1399
1400	ret = ib_register_client(&uverbs_client);
1401	if (ret) {
1402		pr_err("user_verbs: couldn't register client\n");
1403		goto out_class;
1404	}
1405
1406	return 0;
1407
1408out_class:
1409	class_destroy(uverbs_class);
1410
1411out_chrdev:
1412	unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
1413
1414out:
1415	return ret;
1416}
1417
1418static void __exit ib_uverbs_cleanup(void)
1419{
1420	ib_unregister_client(&uverbs_client);
1421	class_destroy(uverbs_class);
1422	unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
1423	if (overflow_maj)
1424		unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
1425	idr_destroy(&ib_uverbs_pd_idr);
1426	idr_destroy(&ib_uverbs_mr_idr);
1427	idr_destroy(&ib_uverbs_mw_idr);
1428	idr_destroy(&ib_uverbs_ah_idr);
1429	idr_destroy(&ib_uverbs_cq_idr);
1430	idr_destroy(&ib_uverbs_qp_idr);
1431	idr_destroy(&ib_uverbs_srq_idr);
1432}
1433
1434module_init_order(ib_uverbs_init, SI_ORDER_FIFTH);
1435module_exit_order(ib_uverbs_cleanup, SI_ORDER_FIFTH);
1436