1/*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3 *
4 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39#include <linux/module.h>
40#include <linux/string.h>
41#include <linux/errno.h>
42#include <linux/kernel.h>
43#include <linux/slab.h>
44#include <linux/mutex.h>
45#include <linux/netdevice.h>
46#include <rdma/ib_addr.h>
47#include <rdma/ib_cache.h>
48
49#include "core_priv.h"
50
51MODULE_AUTHOR("Roland Dreier");
52MODULE_DESCRIPTION("core kernel InfiniBand API");
53MODULE_LICENSE("Dual BSD/GPL");
54
55struct ib_client_data {
56	struct list_head  list;
57	struct ib_client *client;
58	void *            data;
59	/* The device or client is going down. Do not call client or device
60	 * callbacks other than remove(). */
61	bool		  going_down;
62};
63
64struct workqueue_struct *ib_comp_wq;
65struct workqueue_struct *ib_wq;
66EXPORT_SYMBOL_GPL(ib_wq);
67
68/* The device_list and client_list contain devices and clients after their
69 * registration has completed, and the devices and clients are removed
70 * during unregistration. */
71static LIST_HEAD(device_list);
72static LIST_HEAD(client_list);
73
74/*
75 * device_mutex and lists_rwsem protect access to both device_list and
76 * client_list.  device_mutex protects writer access by device and client
77 * registration / de-registration.  lists_rwsem protects reader access to
78 * these lists.  Iterators of these lists must lock it for read, while updates
79 * to the lists must be done with a write lock. A special case is when the
80 * device_mutex is locked. In this case locking the lists for read access is
81 * not necessary as the device_mutex implies it.
82 *
83 * lists_rwsem also protects access to the client data list.
84 */
85static DEFINE_MUTEX(device_mutex);
86static DECLARE_RWSEM(lists_rwsem);
87
88
89static int ib_device_check_mandatory(struct ib_device *device)
90{
91#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
92	static const struct {
93		size_t offset;
94		char  *name;
95	} mandatory_table[] = {
96		IB_MANDATORY_FUNC(query_device),
97		IB_MANDATORY_FUNC(query_port),
98		IB_MANDATORY_FUNC(query_pkey),
99		IB_MANDATORY_FUNC(query_gid),
100		IB_MANDATORY_FUNC(alloc_pd),
101		IB_MANDATORY_FUNC(dealloc_pd),
102		IB_MANDATORY_FUNC(create_ah),
103		IB_MANDATORY_FUNC(destroy_ah),
104		IB_MANDATORY_FUNC(create_qp),
105		IB_MANDATORY_FUNC(modify_qp),
106		IB_MANDATORY_FUNC(destroy_qp),
107		IB_MANDATORY_FUNC(post_send),
108		IB_MANDATORY_FUNC(post_recv),
109		IB_MANDATORY_FUNC(create_cq),
110		IB_MANDATORY_FUNC(destroy_cq),
111		IB_MANDATORY_FUNC(poll_cq),
112		IB_MANDATORY_FUNC(req_notify_cq),
113		IB_MANDATORY_FUNC(get_dma_mr),
114		IB_MANDATORY_FUNC(dereg_mr),
115		IB_MANDATORY_FUNC(get_port_immutable)
116	};
117	int i;
118
119	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
120		if (!*(void **) ((char *) device + mandatory_table[i].offset)) {
121			pr_warn("Device %s is missing mandatory function %s\n",
122				device->name, mandatory_table[i].name);
123			return -EINVAL;
124		}
125	}
126
127	return 0;
128}
129
130static struct ib_device *__ib_device_get_by_name(const char *name)
131{
132	struct ib_device *device;
133
134	list_for_each_entry(device, &device_list, core_list)
135		if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
136			return device;
137
138	return NULL;
139}
140
141
142static int alloc_name(char *name)
143{
144	unsigned long *inuse;
145	char buf[IB_DEVICE_NAME_MAX];
146	struct ib_device *device;
147	int i;
148
149	inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
150	if (!inuse)
151		return -ENOMEM;
152
153	list_for_each_entry(device, &device_list, core_list) {
154		if (!sscanf(device->name, name, &i))
155			continue;
156		if (i < 0 || i >= PAGE_SIZE * 8)
157			continue;
158		snprintf(buf, sizeof buf, name, i);
159		if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
160			set_bit(i, inuse);
161	}
162
163	i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
164	free_page((unsigned long) inuse);
165	snprintf(buf, sizeof buf, name, i);
166
167	if (__ib_device_get_by_name(buf))
168		return -ENFILE;
169
170	strlcpy(name, buf, IB_DEVICE_NAME_MAX);
171	return 0;
172}
173
174static void ib_device_release(struct device *device)
175{
176	struct ib_device *dev = container_of(device, struct ib_device, dev);
177
178	WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
179	if (dev->reg_state == IB_DEV_UNREGISTERED) {
180		/*
181		 * In IB_DEV_UNINITIALIZED state, cache or port table
182		 * is not even created. Free cache and port table only when
183		 * device reaches UNREGISTERED state.
184		 */
185		ib_cache_release_one(dev);
186		kfree(dev->port_immutable);
187	}
188	kfree(dev);
189}
190
191static struct class ib_class = {
192	.name    = "infiniband",
193	.dev_release = ib_device_release,
194};
195
196/**
197 * ib_alloc_device - allocate an IB device struct
198 * @size:size of structure to allocate
199 *
200 * Low-level drivers should use ib_alloc_device() to allocate &struct
201 * ib_device.  @size is the size of the structure to be allocated,
202 * including any private data used by the low-level driver.
203 * ib_dealloc_device() must be used to free structures allocated with
204 * ib_alloc_device().
205 */
206struct ib_device *ib_alloc_device(size_t size)
207{
208	struct ib_device *device;
209
210	if (WARN_ON(size < sizeof(struct ib_device)))
211		return NULL;
212
213	device = kzalloc(size, GFP_KERNEL);
214	if (!device)
215		return NULL;
216
217	device->dev.parent = &linux_root_device;
218	device->dev.class = &ib_class;
219	device_initialize(&device->dev);
220
221	dev_set_drvdata(&device->dev, device);
222
223	INIT_LIST_HEAD(&device->event_handler_list);
224	spin_lock_init(&device->event_handler_lock);
225	spin_lock_init(&device->client_data_lock);
226	INIT_LIST_HEAD(&device->client_data_list);
227	INIT_LIST_HEAD(&device->port_list);
228
229	return device;
230}
231EXPORT_SYMBOL(ib_alloc_device);
232
233/**
234 * ib_dealloc_device - free an IB device struct
235 * @device:structure to free
236 *
237 * Free a structure allocated with ib_alloc_device().
238 */
239void ib_dealloc_device(struct ib_device *device)
240{
241	WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
242		device->reg_state != IB_DEV_UNINITIALIZED);
243	kobject_put(&device->dev.kobj);
244}
245EXPORT_SYMBOL(ib_dealloc_device);
246
247static int add_client_context(struct ib_device *device, struct ib_client *client)
248{
249	struct ib_client_data *context;
250	unsigned long flags;
251
252	context = kmalloc(sizeof *context, GFP_KERNEL);
253	if (!context) {
254		pr_warn("Couldn't allocate client context for %s/%s\n",
255			device->name, client->name);
256		return -ENOMEM;
257	}
258
259	context->client = client;
260	context->data   = NULL;
261	context->going_down = false;
262
263	down_write(&lists_rwsem);
264	spin_lock_irqsave(&device->client_data_lock, flags);
265	list_add(&context->list, &device->client_data_list);
266	spin_unlock_irqrestore(&device->client_data_lock, flags);
267	up_write(&lists_rwsem);
268
269	return 0;
270}
271
272static int verify_immutable(const struct ib_device *dev, u8 port)
273{
274	return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
275			    rdma_max_mad_size(dev, port) != 0);
276}
277
278static int read_port_immutable(struct ib_device *device)
279{
280	int ret;
281	u8 start_port = rdma_start_port(device);
282	u8 end_port = rdma_end_port(device);
283	u8 port;
284
285	/**
286	 * device->port_immutable is indexed directly by the port number to make
287	 * access to this data as efficient as possible.
288	 *
289	 * Therefore port_immutable is declared as a 1 based array with
290	 * potential empty slots at the beginning.
291	 */
292	device->port_immutable = kzalloc(sizeof(*device->port_immutable)
293					 * (end_port + 1),
294					 GFP_KERNEL);
295	if (!device->port_immutable)
296		return -ENOMEM;
297
298	for (port = start_port; port <= end_port; ++port) {
299		ret = device->get_port_immutable(device, port,
300						 &device->port_immutable[port]);
301		if (ret)
302			return ret;
303
304		if (verify_immutable(device, port))
305			return -EINVAL;
306	}
307	return 0;
308}
309
310void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
311{
312	if (dev->get_dev_fw_str)
313		dev->get_dev_fw_str(dev, str, str_len);
314	else
315		str[0] = '\0';
316}
317EXPORT_SYMBOL(ib_get_device_fw_str);
318
319/**
320 * ib_register_device - Register an IB device with IB core
321 * @device:Device to register
322 *
323 * Low-level drivers use ib_register_device() to register their
324 * devices with the IB core.  All registered clients will receive a
325 * callback for each device that is added. @device must be allocated
326 * with ib_alloc_device().
327 */
328int ib_register_device(struct ib_device *device,
329		       int (*port_callback)(struct ib_device *,
330					    u8, struct kobject *))
331{
332	int ret;
333	struct ib_client *client;
334	struct ib_udata uhw = {.outlen = 0, .inlen = 0};
335
336	mutex_lock(&device_mutex);
337
338	if (strchr(device->name, '%')) {
339		ret = alloc_name(device->name);
340		if (ret)
341			goto out;
342	}
343
344	if (ib_device_check_mandatory(device)) {
345		ret = -EINVAL;
346		goto out;
347	}
348
349	ret = read_port_immutable(device);
350	if (ret) {
351		pr_warn("Couldn't create per port immutable data %s\n",
352			device->name);
353		goto out;
354	}
355
356	ret = ib_cache_setup_one(device);
357	if (ret) {
358		pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
359		goto port_cleanup;
360	}
361
362	memset(&device->attrs, 0, sizeof(device->attrs));
363	ret = device->query_device(device, &device->attrs, &uhw);
364	if (ret) {
365		pr_warn("Couldn't query the device attributes\n");
366		goto cache_cleanup;
367	}
368
369	ret = ib_device_register_sysfs(device, port_callback);
370	if (ret) {
371		pr_warn("Couldn't register device %s with driver model\n",
372			device->name);
373		goto cache_cleanup;
374	}
375
376	device->reg_state = IB_DEV_REGISTERED;
377
378	list_for_each_entry(client, &client_list, list)
379		if (client->add && !add_client_context(device, client))
380			client->add(device);
381
382	down_write(&lists_rwsem);
383	list_add_tail(&device->core_list, &device_list);
384	up_write(&lists_rwsem);
385	mutex_unlock(&device_mutex);
386	return 0;
387
388cache_cleanup:
389	ib_cache_cleanup_one(device);
390	ib_cache_release_one(device);
391port_cleanup:
392	kfree(device->port_immutable);
393out:
394	mutex_unlock(&device_mutex);
395	return ret;
396}
397EXPORT_SYMBOL(ib_register_device);
398
399/**
400 * ib_unregister_device - Unregister an IB device
401 * @device:Device to unregister
402 *
403 * Unregister an IB device.  All clients will receive a remove callback.
404 */
405void ib_unregister_device(struct ib_device *device)
406{
407	struct ib_client_data *context, *tmp;
408	unsigned long flags;
409
410	mutex_lock(&device_mutex);
411
412	down_write(&lists_rwsem);
413	list_del(&device->core_list);
414	spin_lock_irqsave(&device->client_data_lock, flags);
415	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
416		context->going_down = true;
417	spin_unlock_irqrestore(&device->client_data_lock, flags);
418	downgrade_write(&lists_rwsem);
419
420	list_for_each_entry_safe(context, tmp, &device->client_data_list,
421				 list) {
422		if (context->client->remove)
423			context->client->remove(device, context->data);
424	}
425	up_read(&lists_rwsem);
426
427	mutex_unlock(&device_mutex);
428
429	ib_device_unregister_sysfs(device);
430	ib_cache_cleanup_one(device);
431
432	down_write(&lists_rwsem);
433	spin_lock_irqsave(&device->client_data_lock, flags);
434	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
435		kfree(context);
436	spin_unlock_irqrestore(&device->client_data_lock, flags);
437	up_write(&lists_rwsem);
438
439	device->reg_state = IB_DEV_UNREGISTERED;
440}
441EXPORT_SYMBOL(ib_unregister_device);
442
443/**
444 * ib_register_client - Register an IB client
445 * @client:Client to register
446 *
447 * Upper level users of the IB drivers can use ib_register_client() to
448 * register callbacks for IB device addition and removal.  When an IB
449 * device is added, each registered client's add method will be called
450 * (in the order the clients were registered), and when a device is
451 * removed, each client's remove method will be called (in the reverse
452 * order that clients were registered).  In addition, when
453 * ib_register_client() is called, the client will receive an add
454 * callback for all devices already registered.
455 */
456int ib_register_client(struct ib_client *client)
457{
458	struct ib_device *device;
459
460	mutex_lock(&device_mutex);
461
462	list_for_each_entry(device, &device_list, core_list)
463		if (client->add && !add_client_context(device, client))
464			client->add(device);
465
466	down_write(&lists_rwsem);
467	list_add_tail(&client->list, &client_list);
468	up_write(&lists_rwsem);
469
470	mutex_unlock(&device_mutex);
471
472	return 0;
473}
474EXPORT_SYMBOL(ib_register_client);
475
476/**
477 * ib_unregister_client - Unregister an IB client
478 * @client:Client to unregister
479 *
480 * Upper level users use ib_unregister_client() to remove their client
481 * registration.  When ib_unregister_client() is called, the client
482 * will receive a remove callback for each IB device still registered.
483 */
484void ib_unregister_client(struct ib_client *client)
485{
486	struct ib_client_data *context, *tmp;
487	struct ib_device *device;
488	unsigned long flags;
489
490	mutex_lock(&device_mutex);
491
492	down_write(&lists_rwsem);
493	list_del(&client->list);
494	up_write(&lists_rwsem);
495
496	list_for_each_entry(device, &device_list, core_list) {
497		struct ib_client_data *found_context = NULL;
498
499		down_write(&lists_rwsem);
500		spin_lock_irqsave(&device->client_data_lock, flags);
501		list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
502			if (context->client == client) {
503				context->going_down = true;
504				found_context = context;
505				break;
506			}
507		spin_unlock_irqrestore(&device->client_data_lock, flags);
508		up_write(&lists_rwsem);
509
510		if (client->remove)
511			client->remove(device, found_context ?
512					       found_context->data : NULL);
513
514		if (!found_context) {
515			pr_warn("No client context found for %s/%s\n",
516				device->name, client->name);
517			continue;
518		}
519
520		down_write(&lists_rwsem);
521		spin_lock_irqsave(&device->client_data_lock, flags);
522		list_del(&found_context->list);
523		kfree(found_context);
524		spin_unlock_irqrestore(&device->client_data_lock, flags);
525		up_write(&lists_rwsem);
526	}
527
528	mutex_unlock(&device_mutex);
529}
530EXPORT_SYMBOL(ib_unregister_client);
531
532/**
533 * ib_get_client_data - Get IB client context
534 * @device:Device to get context for
535 * @client:Client to get context for
536 *
537 * ib_get_client_data() returns client context set with
538 * ib_set_client_data().
539 */
540void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
541{
542	struct ib_client_data *context;
543	void *ret = NULL;
544	unsigned long flags;
545
546	spin_lock_irqsave(&device->client_data_lock, flags);
547	list_for_each_entry(context, &device->client_data_list, list)
548		if (context->client == client) {
549			ret = context->data;
550			break;
551		}
552	spin_unlock_irqrestore(&device->client_data_lock, flags);
553
554	return ret;
555}
556EXPORT_SYMBOL(ib_get_client_data);
557
558/**
559 * ib_set_client_data - Set IB client context
560 * @device:Device to set context for
561 * @client:Client to set context for
562 * @data:Context to set
563 *
564 * ib_set_client_data() sets client context that can be retrieved with
565 * ib_get_client_data().
566 */
567void ib_set_client_data(struct ib_device *device, struct ib_client *client,
568			void *data)
569{
570	struct ib_client_data *context;
571	unsigned long flags;
572
573	spin_lock_irqsave(&device->client_data_lock, flags);
574	list_for_each_entry(context, &device->client_data_list, list)
575		if (context->client == client) {
576			context->data = data;
577			goto out;
578		}
579
580	pr_warn("No client context found for %s/%s\n",
581		device->name, client->name);
582
583out:
584	spin_unlock_irqrestore(&device->client_data_lock, flags);
585}
586EXPORT_SYMBOL(ib_set_client_data);
587
588/**
589 * ib_register_event_handler - Register an IB event handler
590 * @event_handler:Handler to register
591 *
592 * ib_register_event_handler() registers an event handler that will be
593 * called back when asynchronous IB events occur (as defined in
594 * chapter 11 of the InfiniBand Architecture Specification).  This
595 * callback may occur in interrupt context.
596 */
597int ib_register_event_handler  (struct ib_event_handler *event_handler)
598{
599	unsigned long flags;
600
601	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
602	list_add_tail(&event_handler->list,
603		      &event_handler->device->event_handler_list);
604	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
605
606	return 0;
607}
608EXPORT_SYMBOL(ib_register_event_handler);
609
610/**
611 * ib_unregister_event_handler - Unregister an event handler
612 * @event_handler:Handler to unregister
613 *
614 * Unregister an event handler registered with
615 * ib_register_event_handler().
616 */
617int ib_unregister_event_handler(struct ib_event_handler *event_handler)
618{
619	unsigned long flags;
620
621	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
622	list_del(&event_handler->list);
623	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
624
625	return 0;
626}
627EXPORT_SYMBOL(ib_unregister_event_handler);
628
629/**
630 * ib_dispatch_event - Dispatch an asynchronous event
631 * @event:Event to dispatch
632 *
633 * Low-level drivers must call ib_dispatch_event() to dispatch the
634 * event to all registered event handlers when an asynchronous event
635 * occurs.
636 */
637void ib_dispatch_event(struct ib_event *event)
638{
639	unsigned long flags;
640	struct ib_event_handler *handler;
641
642	spin_lock_irqsave(&event->device->event_handler_lock, flags);
643
644	list_for_each_entry(handler, &event->device->event_handler_list, list)
645		handler->handler(handler, event);
646
647	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
648}
649EXPORT_SYMBOL(ib_dispatch_event);
650
651/**
652 * ib_query_port - Query IB port attributes
653 * @device:Device to query
654 * @port_num:Port number to query
655 * @port_attr:Port attributes
656 *
657 * ib_query_port() returns the attributes of a port through the
658 * @port_attr pointer.
659 */
660int ib_query_port(struct ib_device *device,
661		  u8 port_num,
662		  struct ib_port_attr *port_attr)
663{
664	union ib_gid gid;
665	int err;
666
667	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
668		return -EINVAL;
669
670	memset(port_attr, 0, sizeof(*port_attr));
671	err = device->query_port(device, port_num, port_attr);
672	if (err || port_attr->subnet_prefix)
673		return err;
674
675	if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
676		return 0;
677
678	err = ib_query_gid(device, port_num, 0, &gid, NULL);
679	if (err)
680		return err;
681
682	port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
683	return 0;
684}
685EXPORT_SYMBOL(ib_query_port);
686
687/**
688 * ib_query_gid - Get GID table entry
689 * @device:Device to query
690 * @port_num:Port number to query
691 * @index:GID table index to query
692 * @gid:Returned GID
693 * @attr: Returned GID attributes related to this GID index (only in RoCE).
694 *   NULL means ignore.
695 *
696 * ib_query_gid() fetches the specified GID table entry.
697 */
698int ib_query_gid(struct ib_device *device,
699		 u8 port_num, int index, union ib_gid *gid,
700		 struct ib_gid_attr *attr)
701{
702	if (rdma_cap_roce_gid_table(device, port_num))
703		return ib_get_cached_gid(device, port_num, index, gid, attr);
704
705	if (attr)
706		return -EINVAL;
707
708	return device->query_gid(device, port_num, index, gid);
709}
710EXPORT_SYMBOL(ib_query_gid);
711
712/**
713 * ib_enum_roce_netdev - enumerate all RoCE ports
714 * @ib_dev : IB device we want to query
715 * @filter: Should we call the callback?
716 * @filter_cookie: Cookie passed to filter
717 * @cb: Callback to call for each found RoCE ports
718 * @cookie: Cookie passed back to the callback
719 *
720 * Enumerates all of the physical RoCE ports of ib_dev
721 * which are related to netdevice and calls callback() on each
722 * device for which filter() function returns non zero.
723 */
724void ib_enum_roce_netdev(struct ib_device *ib_dev,
725			 roce_netdev_filter filter,
726			 void *filter_cookie,
727			 roce_netdev_callback cb,
728			 void *cookie)
729{
730	u8 port;
731
732	for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
733	     port++)
734		if (rdma_protocol_roce(ib_dev, port)) {
735			struct net_device *idev = NULL;
736
737			if (ib_dev->get_netdev)
738				idev = ib_dev->get_netdev(ib_dev, port);
739
740			if (idev && (idev->if_flags & IFF_DYING)) {
741				dev_put(idev);
742				idev = NULL;
743			}
744
745			if (filter(ib_dev, port, idev, filter_cookie))
746				cb(ib_dev, port, idev, cookie);
747
748			if (idev)
749				dev_put(idev);
750		}
751}
752
753/**
754 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
755 * @filter: Should we call the callback?
756 * @filter_cookie: Cookie passed to filter
757 * @cb: Callback to call for each found RoCE ports
758 * @cookie: Cookie passed back to the callback
759 *
760 * Enumerates all RoCE devices' physical ports which are related
761 * to netdevices and calls callback() on each device for which
762 * filter() function returns non zero.
763 */
764void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
765			      void *filter_cookie,
766			      roce_netdev_callback cb,
767			      void *cookie)
768{
769	struct ib_device *dev;
770
771	down_read(&lists_rwsem);
772	list_for_each_entry(dev, &device_list, core_list)
773		ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
774	up_read(&lists_rwsem);
775}
776
777/**
778 * ib_cache_gid_del_all_by_netdev - delete GIDs belonging a netdevice
779 *
780 * @ndev: Pointer to netdevice
781 */
782void ib_cache_gid_del_all_by_netdev(struct net_device *ndev)
783{
784	struct ib_device *ib_dev;
785	u8 port;
786
787	down_read(&lists_rwsem);
788	list_for_each_entry(ib_dev, &device_list, core_list) {
789		for (port = rdma_start_port(ib_dev);
790		     port <= rdma_end_port(ib_dev);
791		     port++) {
792			if (rdma_protocol_roce(ib_dev, port) == 0)
793				continue;
794			(void) ib_cache_gid_del_all_netdev_gids(ib_dev, port, ndev);
795		}
796	}
797	up_read(&lists_rwsem);
798}
799
800/**
801 * ib_query_pkey - Get P_Key table entry
802 * @device:Device to query
803 * @port_num:Port number to query
804 * @index:P_Key table index to query
805 * @pkey:Returned P_Key
806 *
807 * ib_query_pkey() fetches the specified P_Key table entry.
808 */
809int ib_query_pkey(struct ib_device *device,
810		  u8 port_num, u16 index, u16 *pkey)
811{
812	return device->query_pkey(device, port_num, index, pkey);
813}
814EXPORT_SYMBOL(ib_query_pkey);
815
816/**
817 * ib_modify_device - Change IB device attributes
818 * @device:Device to modify
819 * @device_modify_mask:Mask of attributes to change
820 * @device_modify:New attribute values
821 *
822 * ib_modify_device() changes a device's attributes as specified by
823 * the @device_modify_mask and @device_modify structure.
824 */
825int ib_modify_device(struct ib_device *device,
826		     int device_modify_mask,
827		     struct ib_device_modify *device_modify)
828{
829	if (!device->modify_device)
830		return -ENOSYS;
831
832	return device->modify_device(device, device_modify_mask,
833				     device_modify);
834}
835EXPORT_SYMBOL(ib_modify_device);
836
837/**
838 * ib_modify_port - Modifies the attributes for the specified port.
839 * @device: The device to modify.
840 * @port_num: The number of the port to modify.
841 * @port_modify_mask: Mask used to specify which attributes of the port
842 *   to change.
843 * @port_modify: New attribute values for the port.
844 *
845 * ib_modify_port() changes a port's attributes as specified by the
846 * @port_modify_mask and @port_modify structure.
847 */
848int ib_modify_port(struct ib_device *device,
849		   u8 port_num, int port_modify_mask,
850		   struct ib_port_modify *port_modify)
851{
852	if (!device->modify_port)
853		return -ENOSYS;
854
855	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
856		return -EINVAL;
857
858	return device->modify_port(device, port_num, port_modify_mask,
859				   port_modify);
860}
861EXPORT_SYMBOL(ib_modify_port);
862
863/**
864 * ib_find_gid - Returns the port number and GID table index where
865 *   a specified GID value occurs.
866 * @device: The device to query.
867 * @gid: The GID value to search for.
868 * @gid_type: Type of GID.
869 * @ndev: The ndev related to the GID to search for.
870 * @port_num: The port number of the device where the GID value was found.
871 * @index: The index into the GID table where the GID was found.  This
872 *   parameter may be NULL.
873 */
874int ib_find_gid(struct ib_device *device, union ib_gid *gid,
875		enum ib_gid_type gid_type, struct net_device *ndev,
876		u8 *port_num, u16 *index)
877{
878	union ib_gid tmp_gid;
879	int ret, port, i;
880
881	for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
882		if (rdma_cap_roce_gid_table(device, port)) {
883			if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
884							ndev, index)) {
885				*port_num = port;
886				return 0;
887			}
888		}
889
890		if (gid_type != IB_GID_TYPE_IB)
891			continue;
892
893		for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
894			ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
895			if (ret)
896				return ret;
897			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
898				*port_num = port;
899				if (index)
900					*index = i;
901				return 0;
902			}
903		}
904	}
905
906	return -ENOENT;
907}
908EXPORT_SYMBOL(ib_find_gid);
909
910/**
911 * ib_find_pkey - Returns the PKey table index where a specified
912 *   PKey value occurs.
913 * @device: The device to query.
914 * @port_num: The port number of the device to search for the PKey.
915 * @pkey: The PKey value to search for.
916 * @index: The index into the PKey table where the PKey was found.
917 */
918int ib_find_pkey(struct ib_device *device,
919		 u8 port_num, u16 pkey, u16 *index)
920{
921	int ret, i;
922	u16 tmp_pkey;
923	int partial_ix = -1;
924
925	for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
926		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
927		if (ret)
928			return ret;
929		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
930			/* if there is full-member pkey take it.*/
931			if (tmp_pkey & 0x8000) {
932				*index = i;
933				return 0;
934			}
935			if (partial_ix < 0)
936				partial_ix = i;
937		}
938	}
939
940	/*no full-member, if exists take the limited*/
941	if (partial_ix >= 0) {
942		*index = partial_ix;
943		return 0;
944	}
945	return -ENOENT;
946}
947EXPORT_SYMBOL(ib_find_pkey);
948
949/**
950 * ib_get_net_dev_by_params() - Return the appropriate net_dev
951 * for a received CM request
952 * @dev:	An RDMA device on which the request has been received.
953 * @port:	Port number on the RDMA device.
954 * @pkey:	The Pkey the request came on.
955 * @gid:	A GID that the net_dev uses to communicate.
956 * @addr:	Contains the IP address that the request specified as its
957 *		destination.
958 */
959struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
960					    u8 port,
961					    u16 pkey,
962					    const union ib_gid *gid,
963					    const struct sockaddr *addr)
964{
965	struct net_device *net_dev = NULL;
966	struct ib_client_data *context;
967
968	if (!rdma_protocol_ib(dev, port))
969		return NULL;
970
971	down_read(&lists_rwsem);
972
973	list_for_each_entry(context, &dev->client_data_list, list) {
974		struct ib_client *client = context->client;
975
976		if (context->going_down)
977			continue;
978
979		if (client->get_net_dev_by_params) {
980			net_dev = client->get_net_dev_by_params(dev, port, pkey,
981								gid, addr,
982								context->data);
983			if (net_dev)
984				break;
985		}
986	}
987
988	up_read(&lists_rwsem);
989
990	return net_dev;
991}
992EXPORT_SYMBOL(ib_get_net_dev_by_params);
993
994static int __init ib_core_init(void)
995{
996	int ret;
997
998	ib_wq = alloc_workqueue("infiniband", 0, 0);
999	if (!ib_wq)
1000		return -ENOMEM;
1001
1002	ib_comp_wq = alloc_workqueue("ib-comp-wq",
1003			WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1004			mp_ncpus * 4 /* WQ_UNBOUND_MAX_ACTIVE */);
1005	if (!ib_comp_wq) {
1006		ret = -ENOMEM;
1007		goto err;
1008	}
1009
1010	ret = class_register(&ib_class);
1011	if (ret) {
1012		pr_warn("Couldn't create InfiniBand device class\n");
1013		goto err_comp;
1014	}
1015
1016	ret = addr_init();
1017	if (ret) {
1018		pr_warn("Could't init IB address resolution\n");
1019		goto err_sysfs;
1020	}
1021
1022	ret = ib_mad_init();
1023	if (ret) {
1024		pr_warn("Couldn't init IB MAD\n");
1025		goto err_addr;
1026	}
1027
1028	ret = ib_sa_init();
1029	if (ret) {
1030		pr_warn("Couldn't init SA\n");
1031		goto err_mad;
1032	}
1033
1034	ib_cache_setup();
1035
1036	return 0;
1037
1038err_mad:
1039	ib_mad_cleanup();
1040err_addr:
1041	addr_cleanup();
1042err_sysfs:
1043	class_unregister(&ib_class);
1044err_comp:
1045	destroy_workqueue(ib_comp_wq);
1046err:
1047	destroy_workqueue(ib_wq);
1048	return ret;
1049}
1050
1051static void __exit ib_core_cleanup(void)
1052{
1053	ib_cache_cleanup();
1054	ib_sa_cleanup();
1055	ib_mad_cleanup();
1056	addr_cleanup();
1057	class_unregister(&ib_class);
1058	destroy_workqueue(ib_comp_wq);
1059	/* Make sure that any pending umem accounting work is done. */
1060	destroy_workqueue(ib_wq);
1061}
1062
1063/*
1064 * Typical loading and unloading order values and their use:
1065 *
1066 * SI_ORDER_FIRST (default for module_init):
1067 *      Core modules (PCI, infiniband)
1068 * SI_ORDER_SECOND (default for module_exit):
1069 *      Infiniband core modules (CM)
1070 * SI_ORDER_THIRD:
1071 * SI_ORDER_FOURTH:
1072 *      Infiniband core modules (CMA)
1073 * SI_ORDER_FIFTH:
1074 *      Infiniband user-space modules (UCM,UCMA,UMAD,UVERBS,IPOIB)
1075 * SI_ORDER_SIXTH:
1076 *      Network HW driver modules
1077 * SI_ORDER_SEVENTH:
1078 *      Infiniband HW driver modules
1079 */
1080module_init_order(ib_core_init, SI_ORDER_FIRST);
1081module_exit_order(ib_core_cleanup, SI_ORDER_FIRST);
1082
1083MODULE_VERSION(ibcore, 1);
1084MODULE_DEPEND(ibcore, linuxkpi, 1, 1, 1);
1085